diff --git a/.gitattributes b/.gitattributes index 852eec4d638..9d638481a84 100644 --- a/.gitattributes +++ b/.gitattributes @@ -25,6 +25,7 @@ pcre/testdata/greppatN4 -text *.MYD binary *.MYI binary *.class binary +*.jar binary *.c diff=cpp *.h diff=cpp diff --git a/.gitignore b/.gitignore index 718befa962d..932c666b74a 100644 --- a/.gitignore +++ b/.gitignore @@ -220,6 +220,8 @@ storage/tokudb/PerconaFT/tools/tokudb_load storage/tokudb/PerconaFT/tools/tokuftdump storage/tokudb/PerconaFT/tools/tokuft_logprint storage/tokudb/PerconaFT/xz/ +storage/tokudb/tokudb.cnf +storage/tokudb/tokudb.conf strings/conf_to_src support-files/MySQL-shared-compat.spec support-files/binary-configure diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh index 721ed3a4c45..e9bede9c746 100755 --- a/BUILD/SETUP.sh +++ b/BUILD/SETUP.sh @@ -140,7 +140,7 @@ elif [ "x$warning_mode" = "xmaintainer" ]; then debug_extra_cflags="-g3" else # Both C and C++ warnings - warnings="-Wall -Wextra -Wunused -Wwrite-strings -Wno-uninitialized" + warnings="-Wall -Wextra -Wunused -Wwrite-strings -Wno-uninitialized -Wno-strict-aliasing" # For more warnings, uncomment the following line # warnings="$warnings -Wshadow" diff --git a/BUILD/compile-alpha b/BUILD/compile-alpha deleted file mode 100755 index 45f72eb90e1..00000000000 --- a/BUILD/compile-alpha +++ /dev/null @@ -1,24 +0,0 @@ -#! /bin/sh - -# Copyright (C) 2000, 2002 MySQL AB -# Use is subject to license terms -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -path=`dirname $0` -. "$path/SETUP.sh" - -extra_flags="$alpha_cflags $fast_cflags" -extra_configs="$alpha_configs $static_link" - -. "$path/FINISH.sh" diff --git a/BUILD/compile-alpha-debug b/BUILD/compile-alpha-debug deleted file mode 100755 index 2d8869227dc..00000000000 --- a/BUILD/compile-alpha-debug +++ /dev/null @@ -1,12 +0,0 @@ -#! /bin/sh - -/bin/rm -f */.deps/*.P */*.o -make -k maintainer-clean -/bin/rm -f */.deps/*.P */*.o -/bin/rm -f */.deps/*.P config.cache storage/innobase/config.cache mysql-*.tar.gz - -path=`dirname $0` -. "$path/autorun.sh" - -CFLAGS=-O1 CC=gcc CXX=g++ CXXFLAGS="-O1 -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --with-debug --with-extra-charsets=complex --without-extra-tools -make diff --git a/BUILD/compile-amd64-debug-wsrep b/BUILD/compile-amd64-debug-wsrep old mode 100644 new mode 100755 diff --git a/BUILD/compile-amd64-wsrep b/BUILD/compile-amd64-wsrep old mode 100644 new mode 100755 diff --git a/BUILD/compile-ia64-debug-max b/BUILD/compile-ia64-debug-max deleted file mode 100755 index 508cadf73e2..00000000000 --- a/BUILD/compile-ia64-debug-max +++ /dev/null @@ -1,25 +0,0 @@ -#! /bin/sh - -# Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -gmake -k maintainer-clean || true -/bin/rm -f */.deps/*.P config.cache storage/innobase/config.cache - -path=`dirname $0` -. "$path/autorun.sh" - -CC=ecc CFLAGS="-w1 -DEXTRA_DEBUG -DSAFE_MUTEX -O2" CXX=ecc CXXFLAGS="-w1 -DEXTRA_DEBUG -DSAFE_MUTEX -O2" ./configure --prefix=/usr/local/mysql --with-extra-charsets=complex --enable-thread-safe-client --with-mysqld-ldflags=-all-static --with-client-ldflags=-all-static --with-debug --with-innodb --with-embedded-server --with-archive-storage-engine -gmake diff --git a/BUILD/compile-innodb b/BUILD/compile-innodb deleted file mode 100755 index fa791282b28..00000000000 --- a/BUILD/compile-innodb +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh -# -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free Software -# Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, -# Fifth Floor, Boston, MA 02110-1301 USA -# - -# we assume this script is in storage/innobase/ - -MYSQL_ROOT="$(dirname ${0})/../.." - -cd ${MYSQL_ROOT} - -cmake -DWITH_INNOBASE_STORAGE_ENGINE:BOOL=ON -make -j$(nproc) diff --git a/BUILD/compile-pentium-debug-all b/BUILD/compile-pentium-debug-all deleted file mode 100755 index 9ed5bf6b2cd..00000000000 --- a/BUILD/compile-pentium-debug-all +++ /dev/null @@ -1,10 +0,0 @@ -#! /bin/sh - -path=`dirname $0` -set -- "$@" --with-debug=full -. "$path/SETUP.sh" - -extra_flags="$pentium_cflags $debug_cflags" -extra_configs="$pentium_configs $debug_configs $all_configs $error_inject --with-experimental-collations $disable_64_bit_plugins" - -. "$path/FINISH.sh" diff --git a/BUILD/compile-pentium-debug-max-no-embedded b/BUILD/compile-pentium-debug-max-no-embedded deleted file mode 100755 index d8bc896f89c..00000000000 --- a/BUILD/compile-pentium-debug-max-no-embedded +++ /dev/null @@ -1,25 +0,0 @@ -#! /bin/sh - -# Copyright (c) 2004-2006 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -path=`dirname $0` -. "$path/SETUP.sh" - -extra_flags="$pentium_cflags $debug_cflags" -extra_configs="$pentium_configs $debug_configs $max_no_embedded_configs $disable_64_bit_plugins" - -. "$path/FINISH.sh" diff --git a/BUILD/compile-pentium-debug-wsrep b/BUILD/compile-pentium-debug-wsrep deleted file mode 100644 index 6528ed77f95..00000000000 --- a/BUILD/compile-pentium-debug-wsrep +++ /dev/null @@ -1,12 +0,0 @@ -#! /bin/sh -x - -path=`dirname $0` -set -- "$@" --with-debug=full -. "$path/SETUP.sh" - -extra_flags="$pentium_cflags $debug_cflags -g -O0 $wsrep_cflags" -c_warnings="$c_warnings $debug_extra_warnings" -cxx_warnings="$cxx_warnings $debug_extra_warnings" -extra_configs="$pentium_configs $debug_configs $wsrep_configs --with-wsrep $disable_64_bit_plugins" - -. "$path/FINISH.sh" diff --git a/BUILD/compile-pentium-debug-yassl b/BUILD/compile-pentium-debug-yassl deleted file mode 100755 index e8cfff6cb07..00000000000 --- a/BUILD/compile-pentium-debug-yassl +++ /dev/null @@ -1,26 +0,0 @@ -#! /bin/sh - -# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -path=`dirname $0` -. "$path/SETUP.sh" - -extra_flags="$pentium_cflags $debug_cflags" -extra_configs="$pentium_configs $debug_configs" - -extra_configs="$extra_configs --with-debug --with-ssl" - -. "$path/FINISH.sh" diff --git a/BUILD/compile-pentium-icc-yassl b/BUILD/compile-pentium-icc-yassl old mode 100644 new mode 100755 diff --git a/BUILD/compile-pentium b/BUILD/compile-pentium32 similarity index 100% rename from BUILD/compile-pentium rename to BUILD/compile-pentium32 diff --git a/BUILD/compile-pentium-cybozu b/BUILD/compile-pentium32-cybozu similarity index 100% rename from BUILD/compile-pentium-cybozu rename to BUILD/compile-pentium32-cybozu diff --git a/BUILD/compile-pentium-debug b/BUILD/compile-pentium32-debug similarity index 100% rename from BUILD/compile-pentium-debug rename to BUILD/compile-pentium32-debug diff --git a/BUILD/compile-pentium-debug-max b/BUILD/compile-pentium32-debug-max similarity index 100% rename from BUILD/compile-pentium-debug-max rename to BUILD/compile-pentium32-debug-max diff --git a/BUILD/compile-pentium-debug-openssl b/BUILD/compile-pentium32-debug-openssl similarity index 100% rename from BUILD/compile-pentium-debug-openssl rename to BUILD/compile-pentium32-debug-openssl diff --git a/BUILD/compile-pentium-gcov b/BUILD/compile-pentium32-gcov similarity index 100% rename from BUILD/compile-pentium-gcov rename to BUILD/compile-pentium32-gcov diff --git a/BUILD/compile-pentium-gprof b/BUILD/compile-pentium32-gprof similarity index 100% rename from BUILD/compile-pentium-gprof rename to BUILD/compile-pentium32-gprof diff --git a/BUILD/compile-pentium-icc-valgrind-max b/BUILD/compile-pentium32-icc-valgrind-max similarity index 100% rename from BUILD/compile-pentium-icc-valgrind-max rename to BUILD/compile-pentium32-icc-valgrind-max diff --git a/BUILD/compile-pentium-max b/BUILD/compile-pentium32-max similarity index 100% rename from BUILD/compile-pentium-max rename to BUILD/compile-pentium32-max diff --git a/BUILD/compile-pentium-valgrind-max b/BUILD/compile-pentium32-valgrind-max similarity index 100% rename from BUILD/compile-pentium-valgrind-max rename to BUILD/compile-pentium32-valgrind-max diff --git a/BUILD/compile-pentium-wsrep b/BUILD/compile-pentium32-wsrep old mode 100644 new mode 100755 similarity index 100% rename from BUILD/compile-pentium-wsrep rename to BUILD/compile-pentium32-wsrep diff --git a/BUILD/compile-pentium64-wsrep b/BUILD/compile-pentium64-wsrep old mode 100644 new mode 100755 diff --git a/BUILD/compile-solaris-x86-32 b/BUILD/compile-solaris-x86-32 deleted file mode 100755 index 29965524479..00000000000 --- a/BUILD/compile-solaris-x86-32 +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -path=`dirname $0` -. "$path/SETUP.sh" -extra_flags="-D__sun -m32" -extra_configs="$max_configs --with-libevent" - -LDFLAGS="-lmtmalloc -R/usr/sfw/lib" -export LDFLAGS - -. "$path/FINISH.sh" diff --git a/BUILD/compile-solaris-x86-32-debug b/BUILD/compile-solaris-x86-32-debug deleted file mode 100755 index 9ce91495c1c..00000000000 --- a/BUILD/compile-solaris-x86-32-debug +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -path=`dirname $0` -. "$path/SETUP.sh" -extra_flags="-D__sun -m32 $debug_cflags" -extra_configs="$max_configs --with-libevent $debug_configs" - -LDFLAGS="-lmtmalloc -R/usr/sfw/lib" -export LDFLAGS - -. "$path/FINISH.sh" diff --git a/BUILD/compile-solaris-x86-32-debug-forte b/BUILD/compile-solaris-x86-32-debug-forte deleted file mode 100755 index 777360865a2..00000000000 --- a/BUILD/compile-solaris-x86-32-debug-forte +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh - -path=`dirname $0` -. "$path/SETUP.sh" - -# Take only #define options - the others are gcc specific. -# (real fix is for SETUP.sh not to put gcc specific options in $debug_cflags) -DEFS="" -for F in $debug_cflags ; do - expr "$F" : "^-D" && DEFS="$DEFS $F" -done -debug_cflags="-O0 -g $DEFS" - -extra_flags="-m32 -mt -D_FORTEC_ -xbuiltin=%all -xlibmil -xlibmopt -fns=no -xprefetch=auto -xprefetch_level=3 $debug_cflags" -extra_configs="$max_configs --with-libevent $debug_configs" - -warnings="" -c_warnings="" -cxx_warnings="" -base_cxxflags="-noex" - -CC=cc -CFLAGS="-xstrconst" -CXX=CC -LDFLAGS="-lmtmalloc" - -. "$path/FINISH.sh" diff --git a/BUILD/compile-solaris-x86-forte-32 b/BUILD/compile-solaris-x86-forte-32 deleted file mode 100755 index 5aac376a44c..00000000000 --- a/BUILD/compile-solaris-x86-forte-32 +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh - -path=`dirname $0` -. "$path/SETUP.sh" - -extra_flags="-m32 -mt -D_FORTEC_ -xbuiltin=%all -xlibmil -xlibmopt -fns=no -xprefetch=auto -xprefetch_level=3" -extra_configs="$max_configs --with-libevent" - -warnings="" -c_warnings="" -cxx_warnings="" -base_cxxflags="-noex" - -CC=cc -CFLAGS="-xstrconst" -CXX=CC -LDFLAGS="-lmtmalloc" - -. "$path/FINISH.sh" diff --git a/CMakeLists.txt b/CMakeLists.txt index 94362c349f2..76b0817f8c7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -129,6 +129,9 @@ IF(DEFINED ENV{CPPFLAGS}) ADD_DEFINITIONS($ENV{CPPFLAGS}) ENDIF() +# NUMA +SET(WITH_NUMA "AUTO" CACHE STRING "Build with non-uniform memory access, allowing --innodb-numa-interleave. Options are ON|OFF|AUTO. ON = enabled (requires NUMA library), OFF = disabled, AUTO = enabled if NUMA library found.") + SET(MYSQL_MAINTAINER_MODE "AUTO" CACHE STRING "MySQL maintainer-specific development environment. Options are: ON OFF AUTO.") # Packaging @@ -163,7 +166,7 @@ INCLUDE(install_macros) INCLUDE(systemd) INCLUDE(mysql_add_executable) INCLUDE(compile_flags) -INCLUDE(crc32-vpmsum) +INCLUDE(crc32) # Handle options OPTION(DISABLE_SHARED diff --git a/VERSION b/VERSION index fe6067ad986..0fbbb61fd83 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=3 -MYSQL_VERSION_PATCH=6 -SERVER_MATURITY=gamma +MYSQL_VERSION_PATCH=8 +SERVER_MATURITY=stable diff --git a/client/client_priv.h b/client/client_priv.h index 1d584efeea7..ada72187569 100644 --- a/client/client_priv.h +++ b/client/client_priv.h @@ -61,6 +61,7 @@ enum options_client OPT_USE_THREADS, OPT_IMPORT_USE_THREADS, OPT_MYSQL_NUMBER_OF_QUERY, + OPT_IGNORE_DATABASE, OPT_IGNORE_TABLE,OPT_INSERT_IGNORE,OPT_SHOW_WARNINGS,OPT_DROP_DATABASE, OPT_TZ_UTC, OPT_CREATE_SLAP_SCHEMA, OPT_MYSQLDUMP_SLAVE_APPLY, diff --git a/client/mysql.cc b/client/mysql.cc index cb5c2318698..6a720c38ef1 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -3130,7 +3130,7 @@ static int com_help(String *buffer __attribute__((unused)), char *line __attribute__((unused))) { - reg1 int i, j; + int i, j; char * help_arg= strchr(line,' '), buff[32], *end; if (help_arg) { @@ -4205,8 +4205,7 @@ com_edit(String *buffer,char *line __attribute__((unused))) const char *editor; MY_STAT stat_arg; - if ((fd=create_temp_file(filename,NullS,"sql", O_CREAT | O_WRONLY, - MYF(MY_WME))) < 0) + if ((fd= create_temp_file(filename,NullS,"sql", 0, MYF(MY_WME))) < 0) goto err; if (buffer->is_empty() && !old_buffer.is_empty()) (void) my_write(fd,(uchar*) old_buffer.ptr(),old_buffer.length(), diff --git a/client/mysql_plugin.c b/client/mysql_plugin.c index 81677ad551f..40560613a89 100644 --- a/client/mysql_plugin.c +++ b/client/mysql_plugin.c @@ -159,8 +159,7 @@ static int make_tempfile(char *filename, const char *ext) { int fd= 0; - if ((fd=create_temp_file(filename, NullS, ext, O_CREAT | O_WRONLY, - MYF(MY_WME))) < 0) + if ((fd= create_temp_file(filename, NullS, ext, 0, MYF(MY_WME))) < 0) { fprintf(stderr, "ERROR: Cannot generate temporary file. Error code: %d.\n", fd); @@ -365,6 +364,12 @@ static int get_default_values() } /* Now open the file and read the defaults we want. */ file= fopen(defaults_file, "r"); + if (file == NULL) + { + fprintf(stderr, "ERROR: failed to open file %s: %s.\n", defaults_file, + strerror(errno)); + goto exit; + } while (fgets(line, FN_REFLEN, file) != NULL) { char *value= 0; diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c index 0e0695c9ebe..ef1630dd0e3 100644 --- a/client/mysql_upgrade.c +++ b/client/mysql_upgrade.c @@ -558,8 +558,7 @@ static int run_query(const char *query, DYNAMIC_STRING *ds_res, DBUG_PRINT("enter", ("query: %s", query)); if ((fd= create_temp_file(query_file_path, opt_tmpdir[0] ? opt_tmpdir : NULL, - "sql", O_CREAT | O_SHARE | O_RDWR, - MYF(MY_WME))) < 0) + "sql", O_SHARE, MYF(MY_WME))) < 0) die("Failed to create temporary file for defaults"); /* @@ -1144,7 +1143,7 @@ int main(int argc, char **argv) load_defaults_or_exit("my", load_default_groups, &argc, &argv); defaults_argv= argv; /* Must be freed by 'free_defaults' */ -#if __WIN__ +#if defined(__WIN__) if (GetModuleFileName(NULL, self_name, FN_REFLEN) == 0) #endif { @@ -1176,7 +1175,7 @@ int main(int argc, char **argv) cnf_file_path= strmov(defaults_file, "--defaults-file="); { int fd= create_temp_file(cnf_file_path, opt_tmpdir[0] ? opt_tmpdir : NULL, - "mysql_upgrade-", O_CREAT | O_WRONLY, MYF(MY_FAE)); + "mysql_upgrade-", 0, MYF(MY_FAE)); if (fd < 0) die(NULL); my_write(fd, USTRING_WITH_LEN( "[client]\n"), MYF(MY_FAE)); diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index 44675ef7ed9..4c8709bfca6 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -2904,7 +2904,7 @@ static Exit_status dump_local_log_entries(PRINT_EVENT_INFO *print_event_info, stdin in binary mode. Errors on setting this mode result in halting the function and printing an error message to stderr. */ -#if defined (__WIN__) || (_WIN64) +#if defined (__WIN__) || defined(_WIN64) if (_setmode(fileno(stdin), O_BINARY) == -1) { error("Could not set binary mode on stdin."); diff --git a/client/mysqldump.c b/client/mysqldump.c index 3dbe8412f3c..dc87338aac2 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -212,7 +212,9 @@ TYPELIB compatible_mode_typelib= {array_elements(compatible_mode_names) - 1, #define MED_ENGINES "MRG_MyISAM, MRG_ISAM, CONNECT, OQGRAPH, SPIDER, VP, FEDERATED" -HASH ignore_table; +static HASH ignore_table; + +static HASH ignore_database; static struct my_option my_long_options[] = { @@ -376,6 +378,11 @@ static struct my_option my_long_options[] = &opt_hex_blob, &opt_hex_blob, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", ¤t_host, ¤t_host, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"ignore-database", OPT_IGNORE_DATABASE, + "Do not dump the specified database. To specify more than one database to ignore, " + "use the directive multiple times, once for each database. Only takes effect " + "when used together with --all-databases|-A", + 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ignore-table", OPT_IGNORE_TABLE, "Do not dump the specified table. To specify more than one table to ignore, " "use the directive multiple times, once for each table. Each table must " @@ -900,6 +907,10 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case (int) OPT_TABLES: opt_databases=0; break; + case (int) OPT_IGNORE_DATABASE: + if (my_hash_insert(&ignore_database, (uchar*) my_strdup(argument, MYF(0)))) + exit(EX_EOM); + break; case (int) OPT_IGNORE_TABLE: { if (!strchr(argument, '.')) @@ -984,6 +995,9 @@ static int get_options(int *argc, char ***argv) load_defaults_or_exit("my", load_default_groups, argc, argv); defaults_argv= *argv; + if (my_hash_init(&ignore_database, charset_info, 16, 0, 0, + (my_hash_get_key) get_table_key, my_free, 0)) + return(EX_EOM); if (my_hash_init(&ignore_table, charset_info, 16, 0, 0, (my_hash_get_key) get_table_key, my_free, 0)) return(EX_EOM); @@ -1056,6 +1070,13 @@ static int get_options(int *argc, char ***argv) my_progname_short); return(EX_USAGE); } + if (ignore_database.records && !opt_alldbs) + { + fprintf(stderr, + "%s: --ignore-database can only be used together with --all-databases.\n", + my_progname_short); + return(EX_USAGE); + } if (strcmp(default_charset, charset_info->csname) && !(charset_info= get_charset_by_csname(default_charset, MY_CS_PRIMARY, MYF(MY_WME)))) @@ -1642,6 +1663,8 @@ static void free_resources() my_free(opt_password); my_free(current_host); free_root(&glob_root, MYF(0)); + if (my_hash_inited(&ignore_database)) + my_hash_free(&ignore_database); if (my_hash_inited(&ignore_table)) my_hash_free(&ignore_table); dynstr_free(&extended_row); @@ -2491,7 +2514,7 @@ static uint dump_routines_for_db(char *db) "Create Package Body"}; char db_name_buff[NAME_LEN*2+3], name_buff[NAME_LEN*2+3]; char *routine_name; - int i; + uint i; FILE *sql_file= md_result_file; MYSQL_ROW row, routine_list_row; @@ -2527,7 +2550,7 @@ static uint dump_routines_for_db(char *db) fputs("\t\n", sql_file); /* 0, retrieve and dump functions, 1, procedures, etc. */ - for (i= 0; i < 4; i++) + for (i= 0; i < array_elements(routine_type); i++) { my_snprintf(query_buff, sizeof(query_buff), "SHOW %s STATUS WHERE Db = '%s'", @@ -4214,6 +4237,7 @@ static int dump_tablespaces_for_tables(char *db, char **table_names, int tables) return r; } + static int dump_tablespaces_for_databases(char** databases) { int r; @@ -4243,6 +4267,7 @@ static int dump_tablespaces_for_databases(char** databases) return r; } + static int dump_tablespaces(char* ts_where) { MYSQL_ROW row; @@ -4429,6 +4454,14 @@ static int dump_tablespaces(char* ts_where) DBUG_RETURN(0); } + +/* Return 1 if we should copy the database */ +static my_bool include_database(const char *hash_key) +{ + return !my_hash_search(&ignore_database, (uchar*) hash_key, strlen(hash_key)); +} + + static int dump_all_databases() { MYSQL_ROW row; @@ -4447,8 +4480,9 @@ static int dump_all_databases() !my_strcasecmp(&my_charset_latin1, row[0], PERFORMANCE_SCHEMA_DB_NAME)) continue; - if (dump_all_tables_in_db(row[0])) - result=1; + if (include_database(row[0])) + if (dump_all_tables_in_db(row[0])) + result=1; } mysql_free_result(tableres); if (seen_views) @@ -4470,8 +4504,9 @@ static int dump_all_databases() !my_strcasecmp(&my_charset_latin1, row[0], PERFORMANCE_SCHEMA_DB_NAME)) continue; - if (dump_all_views_in_db(row[0])) - result=1; + if (include_database(row[0])) + if (dump_all_views_in_db(row[0])) + result=1; } mysql_free_result(tableres); } @@ -6055,7 +6090,6 @@ int main(int argc, char **argv) sf_leaking_memory=1; /* don't report memory leaks on early exits */ compatible_mode_normal_str[0]= 0; default_charset= (char *)mysql_universal_client_charset; - bzero((char*) &ignore_table, sizeof(ignore_table)); exit_code= get_options(&argc, &argv); if (exit_code) diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 12016d9b6c7..b9aac043017 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -949,6 +949,8 @@ pthread_handler_t connection_thread(void *arg) end_thread: cn->query_done= 1; + mysql_close(cn->mysql); + cn->mysql= 0; mysql_thread_end(); pthread_exit(0); return 0; @@ -1462,7 +1464,7 @@ void close_statements() for (con= connections; con < next_con; con++) { if (con->stmt) - mysql_stmt_close(con->stmt); + do_stmt_close(con); con->stmt= 0; } DBUG_VOID_RETURN; @@ -2209,8 +2211,7 @@ int dyn_string_cmp(DYNAMIC_STRING* ds, const char *fname) DBUG_ENTER("dyn_string_cmp"); DBUG_PRINT("enter", ("fname: %s", fname)); - if ((fd= create_temp_file(temp_file_path, TMPDIR, - "tmp", O_CREAT | O_SHARE | O_RDWR, + if ((fd= create_temp_file(temp_file_path, TMPDIR, "tmp", O_SHARE, MYF(MY_WME))) < 0) die("Failed to create temporary file for ds"); @@ -4684,8 +4685,7 @@ void do_perl(struct st_command *command) /* Create temporary file name */ if ((fd= create_temp_file(temp_file_path, getenv("MYSQLTEST_VARDIR"), - "tmp", O_CREAT | O_SHARE | O_RDWR, - MYF(MY_WME))) < 0) + "tmp", O_SHARE, MYF(MY_WME))) < 0) die("Failed to create temporary file for perl command"); my_close(fd, MYF(0)); diff --git a/cmake/crc32-vpmsum.cmake b/cmake/crc32-vpmsum.cmake deleted file mode 100644 index c1a2dee56e2..00000000000 --- a/cmake/crc32-vpmsum.cmake +++ /dev/null @@ -1,5 +0,0 @@ -IF(CMAKE_SYSTEM_PROCESSOR MATCHES "ppc64le") - SET(HAVE_CRC32_VPMSUM 1) - SET(CRC32_VPMSUM_LIBRARY crc32-vpmsum) - ADD_SUBDIRECTORY(extra/crc32-vpmsum) -ENDIF() diff --git a/cmake/crc32.cmake b/cmake/crc32.cmake new file mode 100644 index 00000000000..ee8afdb0e92 --- /dev/null +++ b/cmake/crc32.cmake @@ -0,0 +1,5 @@ +IF(CMAKE_SYSTEM_PROCESSOR MATCHES "ppc64") + SET(HAVE_CRC32_VPMSUM 1) + SET(CRC32_LIBRARY crc32-vpmsum) + ADD_SUBDIRECTORY(extra/crc32-vpmsum) +ENDIF() diff --git a/cmake/mariadb_connector_c.cmake b/cmake/mariadb_connector_c.cmake index fe2bcfa4a1f..c36087cb862 100644 --- a/cmake/mariadb_connector_c.cmake +++ b/cmake/mariadb_connector_c.cmake @@ -34,5 +34,9 @@ ENDIF() SET(PLUGIN_INSTALL_DIR ${INSTALL_PLUGINDIR}) SET(MARIADB_UNIX_ADDR ${MYSQL_UNIX_ADDR}) +SET(CLIENT_PLUGIN_PVIO_NPIPE STATIC) +SET(CLIENT_PLUGIN_PVIO_SHMEM STATIC) +SET(CLIENT_PLUGIN_PVIO_SOCKET STATIC) + MESSAGE("== Configuring MariaDB Connector/C") ADD_SUBDIRECTORY(libmariadb) diff --git a/cmake/numa.cmake b/cmake/numa.cmake index 4bace0ee7f4..cdc31229da8 100644 --- a/cmake/numa.cmake +++ b/cmake/numa.cmake @@ -1,16 +1,18 @@ MACRO (MYSQL_CHECK_NUMA) - IF(CMAKE_SYSTEM_NAME MATCHES "Linux") + STRING(TOLOWER "${WITH_NUMA}" WITH_NUMA_LOWERCASE) + + IF(NOT WITH_NUMA) + MESSAGE(STATUS "WITH_NUMA=OFF: NUMA memory allocation policy disabled") + + ELSEIF(NOT WITH_NUMA_LOWERCASE STREQUAL "auto" AND NOT WITH_NUMA_LOWERCASE STREQUAL "on") + MESSAGE(FATAL_ERROR "Wrong value for WITH_NUMA") + + ELSEIF(CMAKE_SYSTEM_NAME MATCHES "Linux") CHECK_INCLUDE_FILES(numa.h HAVE_NUMA_H) CHECK_INCLUDE_FILES(numaif.h HAVE_NUMAIF_H) IF(HAVE_NUMA_H AND HAVE_NUMAIF_H) - OPTION(WITH_NUMA "Explicitly set NUMA memory allocation policy" ON) - ELSE() - OPTION(WITH_NUMA "Explicitly set NUMA memory allocation policy" OFF) - ENDIF() - - IF(WITH_NUMA AND HAVE_NUMA_H AND HAVE_NUMAIF_H) SET(SAVE_CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES}) SET(CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES} numa) CHECK_C_SOURCE_COMPILES( @@ -31,12 +33,19 @@ MACRO (MYSQL_CHECK_NUMA) ENDIF() ENDIF() - IF(WITH_NUMA AND NOT HAVE_LIBNUMA) + IF(WITH_NUMA_LOWERCASE STREQUAL "auto" AND HAVE_LIBNUMA) + MESSAGE(STATUS "WITH_NUMA=AUTO: NUMA memory allocation policy enabled") + ELSEIF(WITH_NUMA_LOWERCASE STREQUAL "auto" AND NOT HAVE_LIBNUMA) + MESSAGE(STATUS "WITH_NUMA=AUTO: NUMA memory allocation policy disabled") + ELSEIF(HAVE_LIBNUMA) + MESSAGE(STATUS "WITH_NUMA=ON: NUMA memory allocation policy enabled") + ELSE() # Forget it in cache, abort the build. UNSET(WITH_NUMA CACHE) UNSET(NUMA_LIBRARY CACHE) - MESSAGE(FATAL_ERROR "Could not find numa headers/libraries") + MESSAGE(FATAL_ERROR "WITH_NUMA=ON: Could not find NUMA headers/libraries") ENDIF() + ENDIF() ENDMACRO() diff --git a/cmake/os/Windows.cmake b/cmake/os/Windows.cmake index 3935894781e..da7dd3180a3 100644 --- a/cmake/os/Windows.cmake +++ b/cmake/os/Windows.cmake @@ -261,8 +261,6 @@ CHECK_SYMBOL_REPLACEMENT(S_IROTH _S_IREAD sys/stat.h) CHECK_SYMBOL_REPLACEMENT(S_IFIFO _S_IFIFO sys/stat.h) CHECK_SYMBOL_REPLACEMENT(SIGQUIT SIGTERM signal.h) CHECK_SYMBOL_REPLACEMENT(SIGPIPE SIGINT signal.h) -CHECK_SYMBOL_REPLACEMENT(isnan _isnan "math.h;float.h") -CHECK_SYMBOL_REPLACEMENT(finite _finite "math;float.h") CHECK_FUNCTION_REPLACEMENT(popen _popen) CHECK_FUNCTION_REPLACEMENT(pclose _pclose) CHECK_FUNCTION_REPLACEMENT(access _access) diff --git a/cmake/os/WindowsCache.cmake b/cmake/os/WindowsCache.cmake index 1d8c568163a..2ba3aec16b4 100644 --- a/cmake/os/WindowsCache.cmake +++ b/cmake/os/WindowsCache.cmake @@ -52,8 +52,6 @@ SET(HAVE_DECL_FDATASYNC CACHE INTERNAL "") SET(HAVE_FEDISABLEEXCEPT CACHE INTERNAL "") SET(HAVE_FENV_H CACHE INTERNAL "") SET(HAVE_FESETROUND CACHE INTERNAL "") -SET(HAVE_FINITE CACHE INTERNAL "") -SET(HAVE_FINITE_IN_MATH_H CACHE INTERNAL "") SET(HAVE_FLOAT_H 1 CACHE INTERNAL "") SET(HAVE_FNMATCH_H CACHE INTERNAL "") SET(HAVE_FPU_CONTROL_H CACHE INTERNAL "") @@ -89,7 +87,6 @@ SET(HAVE_LDIV 1 CACHE INTERNAL "") SET(HAVE_LIMITS_H 1 CACHE INTERNAL "") SET(HAVE_LOCALE_H 1 CACHE INTERNAL "") SET(HAVE_LOCALTIME_R 1 CACHE INTERNAL "") -#SET(HAVE_LOG2 CACHE INTERNAL "") SET(HAVE_LRAND48 CACHE INTERNAL "") SET(HAVE_LSTAT CACHE INTERNAL "") SET(HAVE_MADVISE CACHE INTERNAL "") @@ -100,6 +97,7 @@ SET(HAVE_MEMCPY 1 CACHE INTERNAL "") SET(HAVE_MEMMOVE 1 CACHE INTERNAL "") SET(HAVE_MEMORY_H 1 CACHE INTERNAL "") SET(HAVE_MKSTEMP CACHE INTERNAL "") +SET(HAVE_MKOSTEMP CACHE INTERNAL "") SET(HAVE_MLOCK CACHE INTERNAL "") SET(HAVE_MLOCKALL CACHE INTERNAL "") SET(HAVE_MMAP CACHE INTERNAL "") @@ -140,7 +138,6 @@ SET(HAVE_READLINK CACHE INTERNAL "") SET(HAVE_READ_REAL_TIME CACHE INTERNAL "") SET(HAVE_REALPATH CACHE INTERNAL "") SET(HAVE_RENAME 1 CACHE INTERNAL "") -#SET(HAVE_RINT CACHE INTERNAL "") SET(HAVE_RWLOCK_INIT CACHE INTERNAL "") SET(HAVE_SCHED_H CACHE INTERNAL "") SET(HAVE_SCHED_YIELD CACHE INTERNAL "") @@ -239,7 +236,6 @@ SET(HAVE_SYS_VADVISE_H CACHE INTERNAL "") SET(HAVE_SYS_WAIT_H CACHE INTERNAL "") SET(HAVE_TCGETATTR CACHE INTERNAL "") SET(HAVE_TELL 1 CACHE INTERNAL "") -SET(HAVE_TEMPNAM 1 CACHE INTERNAL "") SET(HAVE_TERMCAP_H CACHE INTERNAL "") SET(HAVE_TERMIOS_H CACHE INTERNAL "") SET(HAVE_TERMIO_H CACHE INTERNAL "") diff --git a/config.h.cmake b/config.h.cmake index d3a76c77c39..cdf3fd1a726 100644 --- a/config.h.cmake +++ b/config.h.cmake @@ -140,7 +140,6 @@ #cmakedefine HAVE_DECL_FDATASYNC 1 #cmakedefine HAVE_FEDISABLEEXCEPT 1 #cmakedefine HAVE_FESETROUND 1 -#cmakedefine HAVE_FINITE 1 #cmakedefine HAVE_FP_EXCEPT 1 #cmakedefine HAVE_FSEEKO 1 #cmakedefine HAVE_FSYNC 1 @@ -162,13 +161,10 @@ #cmakedefine gmtime_r @gmtime_r@ #cmakedefine HAVE_IN_ADDR_T 1 #cmakedefine HAVE_INITGROUPS 1 -#cmakedefine HAVE_ISNAN 1 -#cmakedefine HAVE_ISINF 1 #cmakedefine HAVE_LARGE_PAGE_OPTION 1 #cmakedefine HAVE_LDIV 1 #cmakedefine HAVE_LRAND48 1 #cmakedefine HAVE_LOCALTIME_R 1 -#cmakedefine HAVE_LOG2 1 #cmakedefine HAVE_LSTAT 1 #cmakedefine HAVE_MEMALIGN 1 /* #cmakedefine HAVE_MLOCK 1 see Bug#54662 */ @@ -181,6 +177,7 @@ #cmakedefine HAVE_MEMCPY 1 #cmakedefine HAVE_MEMMOVE 1 #cmakedefine HAVE_MKSTEMP 1 +#cmakedefine HAVE_MKOSTEMP 1 #cmakedefine HAVE_MLOCKALL 1 #cmakedefine HAVE_MMAP 1 #cmakedefine HAVE_MMAP64 1 @@ -212,7 +209,6 @@ #cmakedefine HAVE_READLINK 1 #cmakedefine HAVE_REALPATH 1 #cmakedefine HAVE_RENAME 1 -#cmakedefine HAVE_RINT 1 #cmakedefine HAVE_RWLOCK_INIT 1 #cmakedefine HAVE_SCHED_YIELD 1 #cmakedefine HAVE_SELECT 1 @@ -235,7 +231,6 @@ #cmakedefine HAVE_STRTOUL 1 #cmakedefine HAVE_STRTOULL 1 #cmakedefine HAVE_TELL 1 -#cmakedefine HAVE_TEMPNAM 1 #cmakedefine HAVE_THR_SETCONCURRENCY 1 #cmakedefine HAVE_THR_YIELD 1 #cmakedefine HAVE_TIME 1 @@ -421,8 +416,6 @@ #cmakedefine mode_t @mode_t@ #cmakedefine SIGQUIT @SIGQUIT@ #cmakedefine SIGPIPE @SIGPIPE@ -#cmakedefine isnan @isnan@ -#cmakedefine finite @finite@ #cmakedefine popen @popen@ #cmakedefine pclose @pclose@ #cmakedefine ssize_t @ssize_t@ @@ -433,11 +426,11 @@ #cmakedefine strtoll @strtoll@ #cmakedefine strtoull @strtoull@ #cmakedefine vsnprintf @vsnprintf@ -#if (_MSC_VER > 1800) +#if defined(_MSC_VER) && (_MSC_VER > 1800) #define tzname _tzname #define P_tmpdir "C:\\TEMP" #endif -#if (_MSC_VER > 1310) +#if defined(_MSC_VER) && (_MSC_VER > 1310) # define HAVE_SETENV #define setenv(a,b,c) _putenv_s(a,b) #endif diff --git a/configure.cmake b/configure.cmake index 17f31206975..d840dd4e565 100644 --- a/configure.cmake +++ b/configure.cmake @@ -370,6 +370,7 @@ CHECK_FUNCTION_EXISTS (mallinfo HAVE_MALLINFO) CHECK_FUNCTION_EXISTS (memcpy HAVE_MEMCPY) CHECK_FUNCTION_EXISTS (memmove HAVE_MEMMOVE) CHECK_FUNCTION_EXISTS (mkstemp HAVE_MKSTEMP) +CHECK_FUNCTION_EXISTS (mkostemp HAVE_MKOSTEMP) CHECK_FUNCTION_EXISTS (mlock HAVE_MLOCK) CHECK_FUNCTION_EXISTS (mlockall HAVE_MLOCKALL) CHECK_FUNCTION_EXISTS (mmap HAVE_MMAP) @@ -413,7 +414,6 @@ CHECK_FUNCTION_EXISTS (strtoul HAVE_STRTOUL) CHECK_FUNCTION_EXISTS (strtoull HAVE_STRTOULL) CHECK_FUNCTION_EXISTS (strcasecmp HAVE_STRCASECMP) CHECK_FUNCTION_EXISTS (tell HAVE_TELL) -CHECK_FUNCTION_EXISTS (tempnam HAVE_TEMPNAM) CHECK_FUNCTION_EXISTS (thr_setconcurrency HAVE_THR_SETCONCURRENCY) CHECK_FUNCTION_EXISTS (thr_yield HAVE_THR_YIELD) CHECK_FUNCTION_EXISTS (vasprintf HAVE_VASPRINTF) @@ -477,26 +477,6 @@ CHECK_SYMBOL_EXISTS(TIOCSTAT "sys/ioctl.h" TIOCSTAT_IN_SYS_IOCTL) CHECK_SYMBOL_EXISTS(FIONREAD "sys/filio.h" FIONREAD_IN_SYS_FILIO) CHECK_SYMBOL_EXISTS(gettimeofday "sys/time.h" HAVE_GETTIMEOFDAY) -CHECK_SYMBOL_EXISTS(finite "math.h" HAVE_FINITE_IN_MATH_H) -IF(HAVE_FINITE_IN_MATH_H) - SET(HAVE_FINITE TRUE CACHE INTERNAL "") -ELSE() - CHECK_SYMBOL_EXISTS(finite "ieeefp.h" HAVE_FINITE) -ENDIF() -CHECK_SYMBOL_EXISTS(log2 math.h HAVE_LOG2) -CHECK_SYMBOL_EXISTS(isnan math.h HAVE_ISNAN) -CHECK_SYMBOL_EXISTS(rint math.h HAVE_RINT) - -# isinf() prototype not found on Solaris -CHECK_CXX_SOURCE_COMPILES( -"#include -int main() { - isinf(0.0); - return 0; -}" HAVE_ISINF) - - - # # Test for endianness # diff --git a/debian/additions/debian-start b/debian/additions/debian-start index dea22c48f76..40c248fd81f 100755 --- a/debian/additions/debian-start +++ b/debian/additions/debian-start @@ -4,6 +4,8 @@ # # Changes to this file will be preserved when updating the Debian package. # +# NOTE: This file is read only by the traditional SysV init script, not systemd. +# source /usr/share/mysql/debian-start.inc.sh diff --git a/debian/additions/innotop/changelog.innotop b/debian/additions/innotop/changelog.innotop index 1816595240b..67bc52698e7 100644 --- a/debian/additions/innotop/changelog.innotop +++ b/debian/additions/innotop/changelog.innotop @@ -77,7 +77,7 @@ Changelog for innotop: * remove cxn from $meta->{group_by} if there's only one connection displayed * fix for issue 19 - cxn column won't become visible when viewing two connections after having viewed one connection - * suppress errors resulting from the addition of a 'BACKGROUND THREAD' + * supress errors resulting from the addition of a 'BACKGROUND THREAD' section in the output of 'show innodb status' * possible fix for issue 22 - Useless use of a constant in void context * small change to set_to_tbl() around hiding the cxn column if there diff --git a/debian/additions/innotop/innotop b/debian/additions/innotop/innotop index b0134e0bc91..f958eba432b 100644 --- a/debian/additions/innotop/innotop +++ b/debian/additions/innotop/innotop @@ -17,7 +17,7 @@ # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 Franklin -# Street, Fifth Floor, Boston, MA 02111-1301 USA +# Street, Fifth Floor, Boston, MA 02110-1301 USA use strict; use warnings FATAL => 'all'; @@ -1438,8 +1438,8 @@ systems, you can issue `man perlgpl' or `man perlartistic' to read these licenses. You should have received a copy of the GNU General Public License along with -this program; if not, write to the Free Software Foundation, Inc., 51 Franklin -Street, Fifth Floor, Boston, MA 02111-1301 USA. +this program; if not, write to the Free Software Foundation, Inc., 59 Temple +Place, Suite 330, Boston, MA 02111-1307 USA. LICENSE # Configuration information and global setup {{{1 @@ -4657,7 +4657,7 @@ my %stmt_maker_for = ( my $sth; eval { # This can fail if the table doesn't exist, INFORMATION_SCHEMA doesn't exist, etc. my $cols = $dbh->selectall_arrayref(q{SHOW /*innotop*/ COLUMNS FROM INFORMATION_SCHEMA.PROCESSLIST LIKE 'TIME_MS'}); - if ( @$cols ) { # The TIME_MS column exists + if ( @$cols ) { # The TIME_MS colum exists $sth = $dbh->prepare(q{SELECT /*innotop*/ ID, USER, HOST, DB, COMMAND, CASE WHEN TIME_MS/1000 > 365*86400 THEN TIME ELSE TIME_MS/1000 END AS TIME, STATE, INFO FROM INFORMATION_SCHEMA.PROCESSLIST}); } }; @@ -11653,7 +11653,7 @@ show you something like this: pages_modified Dirty Pages Pages modified (dirty IB_bp_pages_m buf_pool_hit_rate Hit Rate Buffer pool hit rate IB_bp_buf_poo total_mem_alloc Memory Total memory allocate IB_bp_total_m - add_pool_alloc Add'l Pool Additional pool alloca IB_bp_add_poo + add_pool_alloc Add'l Pool Additonal pool alloca IB_bp_add_poo The first line shows which table you're editing, and reminds you again to press '?' for a list of key mappings. The rest is a tabular representation of the @@ -12233,8 +12233,8 @@ systems, you can issue `man perlgpl' or `man perlartistic' to read these licenses. You should have received a copy of the GNU General Public License along with -this program; if not, write to the Free Software Foundation, Inc., 51 Franklin -Street, Fifth Floor, Boston, MA 02111-1301 USA. +this program; if not, write to the Free Software Foundation, Inc., 59 Temple +Place, Suite 330, Boston, MA 02111-1307 USA. Execute innotop and press '!' to see this information at any time. diff --git a/debian/additions/innotop/innotop.1 b/debian/additions/innotop/innotop.1 index 7079ff2c4a4..7cd275a79f2 100644 --- a/debian/additions/innotop/innotop.1 +++ b/debian/additions/innotop/innotop.1 @@ -1678,7 +1678,7 @@ show you something like this: \& pages_modified Dirty Pages Pages modified (dirty IB_bp_pages_m \& buf_pool_hit_rate Hit Rate Buffer pool hit rate IB_bp_buf_poo \& total_mem_alloc Memory Total memory allocate IB_bp_total_m -\& add_pool_alloc Add\*(Aql Pool Additional pool alloca IB_bp_add_poo +\& add_pool_alloc Add\*(Aql Pool Additonal pool alloca IB_bp_add_poo .Ve .PP The first line shows which table you're editing, and reminds you again to press @@ -2183,8 +2183,8 @@ systems, you can issue `man perlgpl' or `man perlartistic' to read these licenses. .PP You should have received a copy of the \s-1GNU\s0 General Public License along with -this program; if not, write to the Free Software Foundation, Inc., 51 Franklin -Street, Fifth Floor, Boston, \s-1MA\s0 02111\-1301 \s-1USA\s0. +this program; if not, write to the Free Software Foundation, Inc., 59 Temple +Place, Suite 330, Boston, \s-1MA 02111\-1307 USA.\s0 .PP Execute innotop and press '!' to see this information at any time. .SH "AUTHOR" diff --git a/debian/additions/my.cnf b/debian/additions/my.cnf index d30ec182c8b..91a159e4669 100644 --- a/debian/additions/my.cnf +++ b/debian/additions/my.cnf @@ -186,4 +186,5 @@ key_buffer = 16M # * IMPORTANT: Additional settings that can override those from this file! # The files must end with '.cnf', otherwise they'll be ignored. # +!include /etc/mysql/mariadb.cnf !includedir /etc/mysql/conf.d/ diff --git a/debian/apparmor-profile b/debian/apparmor-profile index 4ffb7eab550..b1f229b33b4 100644 --- a/debian/apparmor-profile +++ b/debian/apparmor-profile @@ -1,4 +1,4 @@ -# This file is intensionally empty to disable apparmor by default for newer +# This file is intentionally empty to disable apparmor by default for newer # versions of MariaDB, while providing seamless upgrade from older versions # and from mysql, where apparmor is used. # @@ -11,5 +11,5 @@ # be used. # # When upgrading from previous version, users who modified the profile -# will be promptet to keep or discard it, while for default installs +# will be prompted to keep or discard it, while for default installs # we will automatically disable the profile. diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh index f72e1136d83..48721043279 100755 --- a/debian/autobake-deb.sh +++ b/debian/autobake-deb.sh @@ -48,7 +48,7 @@ fi if ! apt-cache madison libcrack2-dev | grep 'libcrack2-dev *| *2\.9' >/dev/null 2>&1 then sed '/libcrack2-dev/d' -i debian/control - sed '/Package: mariadb-plugin-cracklib/,+9d' -i debian/control + sed '/Package: mariadb-plugin-cracklib/,+11d' -i debian/control fi # If libpcre3-dev (>= 2:8.35-3.2~) is not available (before Debian Jessie or Ubuntu Wily) @@ -73,6 +73,15 @@ then sed '/mariadb-service-convert/d' -i debian/mariadb-server-10.3.install fi +# If libzstd-dev is not available (before Debian Stretch and Ubuntu Xenial) +# remove the dependency from server and rocksdb so it can build properly +if ! apt-cache madison libzstd-dev | grep 'libzstd-dev' >/dev/null 2>&1 +then + sed '/libzstd-dev/d' -i debian/control + sed '/libzstd1/d' -i debian/control +fi + + # Convert gcc version to numberical value. Format is Mmmpp where M is Major # version, mm is minor version and p is patch. # -dumpfullversion & -dumpversion to make it uniform across old and new (>=7) @@ -128,10 +137,11 @@ UPSTREAM="${MYSQL_VERSION_MAJOR}.${MYSQL_VERSION_MINOR}.${MYSQL_VERSION_PATCH}${ PATCHLEVEL="+maria" LOGSTRING="MariaDB build" CODENAME="$(lsb_release -sc)" +EPOCH="1:" -dch -b -D ${CODENAME} -v "1:${UPSTREAM}${PATCHLEVEL}~${CODENAME}" "Automatic build with ${LOGSTRING}." +dch -b -D ${CODENAME} -v "${EPOCH}${UPSTREAM}${PATCHLEVEL}~${CODENAME}" "Automatic build with ${LOGSTRING}." -echo "Creating package version ${UPSTREAM}${PATCHLEVEL}~${CODENAME} ... " +echo "Creating package version ${EPOCH}${UPSTREAM}${PATCHLEVEL}~${CODENAME} ... " # On Travis CI, use -b to build binary only packages as there is no need to # waste time on generating the source package. diff --git a/debian/control b/debian/control index 275d48943f9..ba783955dc3 100644 --- a/debian/control +++ b/debian/control @@ -8,7 +8,6 @@ Build-Depends: bison, debhelper (>= 9), dh-apparmor, dh-systemd, - dpatch, gdb, libaio-dev [linux-any], libboost-dev, @@ -26,6 +25,7 @@ Build-Depends: bison, libssl-dev | libssl1.0-dev, libsystemd-dev, libxml2-dev, + libzstd-dev, lsb-release, perl (>= 5.6.0), po-debconf, @@ -40,6 +40,7 @@ Vcs-Git: https://github.com/MariaDB/server.git Package: libmariadb-dev Architecture: any +Multi-Arch: same Section: libdevel Depends: libmariadb3 (= ${binary:Version}), zlib1g-dev, @@ -91,6 +92,7 @@ Description: MariaDB Connector/C, compatibility symlinks Package: libmariadb3 Architecture: any +Multi-Arch: same Section: libs Depends: mariadb-common, ${misc:Depends}, @@ -167,6 +169,7 @@ Description: Virtual package to satisfy external libmysqlclient18 depends Package: libmariadbd-dev Architecture: any +Multi-Arch: same Section: libdevel Provides: libmysqld-dev Pre-Depends: ${misc:Pre-Depends} @@ -186,10 +189,10 @@ Description: MariaDB embedded database, development files Package: libmariadbd19 Architecture: any +Multi-Arch: same Section: libs Depends: ${misc:Depends}, ${shlibs:Depends} -Multi-Arch: same Breaks: libmariadbd-dev (<< ${source:Version}) Replaces: libmariadbd-dev (<< ${source:Version}) Description: MariaDB embedded database, shared library @@ -418,10 +421,7 @@ Recommends: libhtml-template-perl Pre-Depends: adduser (>= 3.40), debconf, mariadb-common (>= ${source:Version}) -Depends: bsdutils, - coreutils, - findutils, - galera-3 (>=25.3), +Depends: galera-3 (>=25.3), gawk, iproute2, libdbi-perl, @@ -536,6 +536,7 @@ Description: Connect storage engine for MariaDB Package: mariadb-plugin-rocksdb Architecture: amd64 arm64 mips64el ppc64el Depends: mariadb-server-10.3 (= ${binary:Version}), + libzstd1, ${misc:Depends}, ${shlibs:Depends} Breaks: mariadb-rocksdb-engine-10.2, @@ -655,7 +656,10 @@ Replaces: mariadb-gssapi-server-10.1, mariadb-gssapi-server-10.2, mariadb-gssapi-server-10.3 Description: GSSAPI authentication plugin for MariaDB server - This package contains the server components. + This plugin includes support for Kerberos on Unix, but can also be used for + Windows authentication with or without domain environment. + . + This package contains the server parts. Package: mariadb-plugin-gssapi-client Architecture: any @@ -670,7 +674,10 @@ Replaces: mariadb-gssapi-client-10.1, mariadb-gssapi-client-10.2, mariadb-gssapi-client-10.3 Description: GSSAPI authentication plugin for MariaDB client - This package contains the client components. + This plugin includes support for Kerberos on Unix, but can also be used for + Windows authentication with or without domain environment. + . + This package contains the client parts. Package: mariadb-backup Architecture: any @@ -683,6 +690,10 @@ Depends: mariadb-client-core-10.3, ${shlibs:Depends} Description: Backup tool for MariaDB server This backup tool is guaranteed to be compatible with MariaDB. + Based on Xtrabackup, but improved to work with MariaDB. + . + Plese refer to the MariaDB Knowledge Base on more information on + how to use this tool. Package: mariadb-plugin-cracklib-password-check Architecture: any @@ -693,6 +704,9 @@ Depends: libcrack2 (>= 2.9.0), Description: CrackLib Password Validation Plugin for MariaDB This password validation plugin uses cracklib to allow only sufficiently secure (as defined by cracklib) user passwords in MariaDB. + . + Plese refer to the MariaDB Knowledge Base on more information on + how to use this tool. Package: mariadb-test Architecture: any @@ -733,6 +747,7 @@ Description: MariaDB database regression test suite Package: mariadb-test-data Architecture: all +Multi-Arch: foreign Depends: ${misc:Depends} Breaks: mariadb-test-10.0, mariadb-test-10.1, diff --git a/debian/mariadb-server-10.3.install b/debian/mariadb-server-10.3.install index d8de5512b05..a7d4d665892 100644 --- a/debian/mariadb-server-10.3.install +++ b/debian/mariadb-server-10.3.install @@ -41,6 +41,7 @@ usr/bin/wsrep_sst_xtrabackup-v2 usr/lib/mysql/plugin/auth_ed25519.so usr/lib/mysql/plugin/auth_pam.so usr/lib/mysql/plugin/auth_socket.so +usr/lib/mysql/plugin/disks.so usr/lib/mysql/plugin/file_key_management.so usr/lib/mysql/plugin/ha_archive.so usr/lib/mysql/plugin/ha_blackhole.so @@ -98,4 +99,5 @@ usr/share/mysql/mysql_performance_tables.sql usr/share/mysql/mysql_system_tables.sql usr/share/mysql/mysql_system_tables_data.sql usr/share/mysql/mysql_test_data_timezone.sql +usr/share/mysql/mysql_test_db.sql usr/share/mysql/wsrep_notify diff --git a/debian/mariadb-server-10.3.mysql.default b/debian/mariadb-server-10.3.mysql.default index 22f08e54427..146c5a87a84 100644 --- a/debian/mariadb-server-10.3.mysql.default +++ b/debian/mariadb-server-10.3.mysql.default @@ -1,3 +1,18 @@ +# +# NOTE: This file is read only by the traditional SysV init script. +# Debian 9 and Ubuntu 17.04 onwards do not normally read this file as they use +# systemd by default. +# +# For similar behaviour, systemd users should override ExecStart by dropping +# files into /etc/systemd/system/mariadb.service.d/ +# +# See also: +# https://wiki.debian.org/Teams/pkg-systemd/Packaging#overriding_options_and_.2Fetc.2Fdefault_handling +# https://mariadb.com/kb/en/mariadb/systemd/ +# +# Note also that MariaDB systemd does _not_ utilize mysqld_safe nor debian-start. + + # The delay in seconds the init script waits for the server to be up and running after having started "mysqld_safe" to run the "/etc/mysql/debian-start" script. # If the server is still not responding after the delay, the script won't be executed and an error will be thrown on the syslog. # Default: 30 diff --git a/debian/mariadb-server-10.3.postinst b/debian/mariadb-server-10.3.postinst index 2a85099b45b..aa6ea07a888 100644 --- a/debian/mariadb-server-10.3.postinst +++ b/debian/mariadb-server-10.3.postinst @@ -91,7 +91,7 @@ case "$1" in mv "$targetdir" "$mysql_tmp" cat << EOF > "$mysql_tmp/README" -Ff you're reading this, it's most likely because you had replaced /var/lib/mysql +If you're reading this, it's most likely because you had replaced /var/lib/mysql with a symlink, then upgraded to a new version of mysql, and then dpkg removed your symlink (see #182747 and others). The mysql packages noticed that this happened, and as a workaround have restored it. However, because @@ -141,7 +141,9 @@ EOF # Debian: beware of the bashisms... # Debian: can safely run on upgrades with existing databases set +e - bash /usr/bin/mysql_install_db --rpm --cross-bootstrap --user=mysql --disable-log-bin 2>&1 | $ERR_LOGGER + bash /usr/bin/mysql_install_db --rpm --cross-bootstrap --user=mysql \ + --disable-log-bin --skip-test-db 2>&1 | \ + $ERR_LOGGER set -e diff --git a/debian/mysql-common.install b/debian/mysql-common.install index 264df611822..56c1c4a03f6 100644 --- a/debian/mysql-common.install +++ b/debian/mysql-common.install @@ -1,2 +1 @@ debian/additions/my.cnf etc/mysql -usr/share/mysql-common/internal-use-only diff --git a/debian/patches/00list b/debian/patches/00list deleted file mode 100644 index 5d090c644f1..00000000000 --- a/debian/patches/00list +++ /dev/null @@ -1,5 +0,0 @@ -33_scripts__mysql_create_system_tables__no_test.dpatch -38_scripts__mysqld_safe.sh__signals.dpatch -41_scripts__mysql_install_db.sh__no_test.dpatch -50_mysql-test__db_test.dpatch -61_replace_dash_with_bash_mbug675185.dpatch diff --git a/debian/patches/33_scripts__mysql_create_system_tables__no_test.dpatch b/debian/patches/33_scripts__mysql_create_system_tables__no_test.dpatch deleted file mode 100755 index 06c984c398c..00000000000 --- a/debian/patches/33_scripts__mysql_create_system_tables__no_test.dpatch +++ /dev/null @@ -1,38 +0,0 @@ -#! /bin/sh /usr/share/dpatch/dpatch-run -## 33_scripts__mysql_create_system_tables__no_test.dpatch by -## -## All lines beginning with `## DP:' are a description of the patch. -## DP: scripts__mysql_create_system_tables__no_test -## DP: A user with no password prevents a normal user from login under certain -## DP: circumstances as it is checked first. See #301741. -## DP: http://bugs.mysql.com/bug.php?id=6901 - -@DPATCH@ ---- a/scripts/mysql_system_tables_data.sql -+++ b/scripts/mysql_system_tables_data.sql -@@ -27,15 +27,6 @@ - SELECT LOWER( REPLACE((SELECT REPLACE(@@hostname,'_','\_')),'%','\%') )INTO @current_hostname; - - ---- Fill "db" table with default grants for anyone to ---- access database 'test' and 'test_%' if "db" table didn't exist --CREATE TEMPORARY TABLE tmp_db LIKE db; --INSERT INTO tmp_db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y','Y'); --INSERT INTO tmp_db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y','Y'); --INSERT INTO db SELECT * FROM tmp_db WHERE @had_db_table=0; --DROP TABLE tmp_db; -- -- - -- Fill "user" table with default users allowing root access - -- from local machine if "user" table didn't exist before - CREATE TEMPORARY TABLE tmp_user_nopasswd LIKE user; -@@ -48,9 +39,6 @@ REPLACE INTO tmp_user_nopasswd VALUES ('127.0.0.1','root','','Y','Y','Y','Y','Y' - REPLACE INTO tmp_user_nopasswd VALUES ('::1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','','N','N', '', 0); - -- More secure root account using unix sucket auth. - INSERT INTO tmp_user_socket VALUES ('localhost',IFNULL(@auth_root_socket, 'root'),'','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'unix_socket','','N', 'N','', 0); ---- Anonymous user with no privileges. --INSERT INTO tmp_user_anonymous (host,user) VALUES ('localhost',''); --INSERT INTO tmp_user_anonymous (host,user) SELECT @current_hostname,'' FROM dual WHERE @current_hostname != 'localhost'; - - INSERT INTO user SELECT * FROM tmp_user_nopasswd WHERE @had_user_table=0 AND @skip_auth_root_nopasswd IS NULL; - INSERT INTO user SELECT * FROM tmp_user_socket WHERE @had_user_table=0 AND @auth_root_socket IS NOT NULL; diff --git a/debian/patches/38_scripts__mysqld_safe.sh__signals.dpatch b/debian/patches/38_scripts__mysqld_safe.sh__signals.dpatch deleted file mode 100755 index 5cbc897c272..00000000000 --- a/debian/patches/38_scripts__mysqld_safe.sh__signals.dpatch +++ /dev/null @@ -1,43 +0,0 @@ -#! /bin/sh /usr/share/dpatch/dpatch-run -## 38_scripts__mysqld_safe.sh__signals.dpatch by -## -## All lines beginning with `## DP:' are a description of the patch. -## DP: Executes /etc/init.d/mysql on signals -## DP: Reported as http://bugs.mysql.com/bug.php?id=31361 - -@DPATCH@ - ---- a/scripts/mysqld_safe.sh 2013-01-11 16:02:41 +0000 -+++ b/scripts/mysqld_safe.sh 2013-01-11 16:03:14 +0000 -@@ -36,7 +36,6 @@ skip_err_log=0 - syslog_tag_mysqld=mysqld - syslog_tag_mysqld_safe=mysqld_safe - --trap '' 1 2 3 15 # we shouldn't let anyone kill us - - # MySQL-specific environment variable. First off, it's not really a umask, - # it's the desired mode. Second, it follows umask(2), not umask(3) in that -@@ -181,7 +180,7 @@ eval_log_error () { - # sed buffers output (only GNU sed supports a -u (unbuffered) option) - # which means that messages may not get sent to syslog until the - # mysqld process quits. -- cmd="$cmd 2>&1 | logger -t '$syslog_tag_mysqld' -p daemon.error" -+ cmd="$cmd 2>&1 | logger -t '$syslog_tag_mysqld' -p daemon.error & wait" - ;; - *) - echo "Internal program error (non-fatal):" \ -@@ -895,6 +894,13 @@ then - fi - - # -+# From now on, we catch signals to do a proper shutdown of mysqld -+# when signalled to do so. -+# -+trap '/usr/bin/mysqladmin --defaults-extra-file=/etc/mysql/debian.cnf refresh & wait' 1 # HUP -+trap '/usr/bin/mysqladmin --defaults-extra-file=/etc/mysql/debian.cnf shutdown' 2 3 15 # INT QUIT and TERM -+ -+# - # Uncomment the following lines if you want all tables to be automatically - # checked and repaired during startup. You should add sensible key_buffer - # and sort_buffer values to my.cnf to improve check performance or require - diff --git a/debian/patches/41_scripts__mysql_install_db.sh__no_test.dpatch b/debian/patches/41_scripts__mysql_install_db.sh__no_test.dpatch deleted file mode 100755 index 9a063e408a5..00000000000 --- a/debian/patches/41_scripts__mysql_install_db.sh__no_test.dpatch +++ /dev/null @@ -1,20 +0,0 @@ -#! /bin/sh /usr/share/dpatch/dpatch-run -## 41_scripts__mysql_install_db.sh__no_test.dpatch by -## -## All lines beginning with `## DP:' are a description of the patch. -## DP: scripts__mysql_install_db.sh__no_test -## DP: http://bugs.mysql.com/bug.php?id=6901 - -@DPATCH@ - ---- mysql-dfsg-5.1-5.1.23rc.orig/scripts/mysql_install_db.sh 2008-01-29 22:41:20.000000000 +0100 -+++ mysql-dfsg-5.1-5.1.23rc/scripts/mysql_install_db.sh 2008-02-28 10:08:11.000000000 +0100 -@@ -407,7 +407,7 @@ then - fi - - # Create database directories --for dir in "$ldata" "$ldata/mysql" "$ldata/test" -+for dir in "$ldata" "$ldata/mysql" - do - if test ! -d "$dir" - then diff --git a/debian/patches/50_mysql-test__db_test.dpatch b/debian/patches/50_mysql-test__db_test.dpatch deleted file mode 100755 index ed2efb95998..00000000000 --- a/debian/patches/50_mysql-test__db_test.dpatch +++ /dev/null @@ -1,24 +0,0 @@ -#! /bin/sh /usr/share/dpatch/dpatch-run -## 50_mysql-test__db_test.dpatch by Christian Hammers -## -## All lines beginning with `## DP:' are a description of the patch. -## DP: Patch 33_scripts__mysql_create_system_tables__no_test removes the -## DP: rights for anybody to connect to the test database but the test -## DP: suite depends on them. - -@DPATCH@ - ---- old/mysql-test/mysql-test-run.pl 2009-06-16 14:24:09.000000000 +0200 -+++ new/mysql-test/mysql-test-run.pl 2009-07-04 00:03:34.000000000 +0200 -@@ -3180,6 +3180,11 @@ sub mysql_install_db { - mtr_appendfile_to_file("$sql_dir/mysql_system_tables_data.sql", - $bootstrap_sql_file); - -+ mtr_tofile($bootstrap_sql_file, "-- Debian removed the default privileges on the 'test' database\n"); -+ mtr_tofile($bootstrap_sql_file, "INSERT INTO mysql.db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y','Y');\n"); -+ mtr_tofile($bootstrap_sql_file, "INSERT INTO mysql.db VALUES ('%','test\\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y','Y');\n"); -+ -+ - # Add test data for timezone - this is just a subset, on a real - # system these tables will be populated either by mysql_tzinfo_to_sql - # or by downloading the timezone table package from our website diff --git a/debian/patches/61_replace_dash_with_bash_mbug675185.dpatch b/debian/patches/61_replace_dash_with_bash_mbug675185.dpatch deleted file mode 100755 index 2a4ee8cd648..00000000000 --- a/debian/patches/61_replace_dash_with_bash_mbug675185.dpatch +++ /dev/null @@ -1,20 +0,0 @@ -#! /bin/sh /usr/share/dpatch/dpatch-run -## 61_replace_dash_with_bash_mbug675185.dpatch by -## -## All lines beginning with `## DP:' are a description of the patch. -## DP: 61_replace_dash_with_bash_mbug675185 -## DP: A race in dash causes mysqld_safe to occasionally loop infinitely. -## DP: Fix by using bash instead. -## DP: https://bugs.launchpad.net/ubuntu/+source/mysql-dfsg-5.0/+bug/675185 - -@DPATCH@ -=== modified file 'scripts/mysqld_safe.sh' ---- old/scripts/mysqld_safe.sh 2010-04-09 11:47:18 +0000 -+++ new/scripts/mysqld_safe.sh 2010-11-21 09:40:50 +0000 -@@ -1,4 +1,4 @@ --#!/bin/sh -+#!/bin/bash - # Copyright Abandoned 1996 TCX DataKonsult AB & Monty Program KB & Detron HB - # This file is public domain and comes with NO WARRANTY of any kind - # - diff --git a/debian/rules b/debian/rules index b4ef2e5da6d..ceb2171a289 100755 --- a/debian/rules +++ b/debian/rules @@ -4,25 +4,13 @@ export DH_VERBOSE=1 # enable Debian Hardening # see: https://wiki.debian.org/Hardening -export DEB_BUILD_MAINT_OPTIONS = hardening=+all,-pie +export DEB_BUILD_MAINT_OPTIONS = hardening=+all DPKG_EXPORT_BUILDFLAGS = 1 -include /usr/share/dpkg/buildflags.mk +include /usr/share/dpkg/default.mk -ARCH := $(shell dpkg-architecture -qDEB_BUILD_ARCH) -ARCH_OS := $(shell dpkg-architecture -qDEB_BUILD_ARCH_OS) BUILDDIR := builddir -DEB_HOST_GNU_TYPE ?= $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE) -DEB_BUILD_GNU_SYSTEM ?= $(shell dpkg-architecture -qDEB_BUILD_GNU_SYSTEM) -DEB_BUILD_ARCH ?= $(shell dpkg-architecture -qDEB_BUILD_ARCH) -DEBVERSION := $(shell dpkg-parsechangelog | awk '/^Version: / { print $$2 }' | sed 's/^.*-//' ) -DEB_SOURCE_PACKAGE ?= $(strip $(shell egrep '^Source: ' debian/control | cut -f 2 -d ':')) -DEB_VERSION ?= $(shell dpkg-parsechangelog | egrep '^Version:' | cut -f 2 -d ' ') -DEB_NOEPOCH_VERSION ?= $(shell echo $(DEB_VERSION) | cut -d: -f2-) -DEB_UPSTREAM_VERSION ?= $(shell echo $(DEB_NOEPOCH_VERSION) | sed 's/-[^-]*$$//') -DEB_UPSTREAM_VERSION_MAJOR_MINOR := $(shell echo $(DEB_UPSTREAM_VERSION) | sed -r -n 's/^([0-9]+\.[0-9]+).*/\1/p') -DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture -qDEB_HOST_MULTIARCH) -DISTRIBUTION := $(shell lsb_release -i -s) -RELEASE := $(shell lsb_release -r -s) +DEB_VERSION_REVISION := $(shell echo $(DEB_VERSION) | sed -e 's/^.*-//') +RELEASE := $(shell lsb_release -r -s) # Use changelog based DEB_DISTRIBUTION instead? TMP:=$(CURDIR)/debian/tmp CC := $(DEB_HOST_GNU_TYPE)-gcc @@ -39,12 +27,28 @@ else endif # Ignore test suite exit code on unstable platforms -ifneq (,$(filter $(ARCH),mips mipsel mips64el alpha powerpc sh4 hurd-i386 sparc64 kfreebsd-i386 kfreebsd-amd64)) +ifneq (,$(filter $(DEB_HOST_ARCH),mips mipsel mips64el alpha powerpc sh4 hurd-i386 sparc64 kfreebsd-i386 kfreebsd-amd64)) TESTSUITE_FAIL_CMD:=true else TESTSUITE_FAIL_CMD:=exit 1 endif +ifeq (32,$(DEB_HOST_ARCH_BITS)) + CMAKEFLAGS += -DWITHOUT_ROCKSDB=true +endif + +# Skip TokuDB if arch is not amd64 (also disable for kfreebsd-amd64 as it FTBFS) +# Skipped on the x32 ABI too; untested, but unlikely to work given i386 is not +# supported. +ifneq ($(DEB_HOST_ARCH),amd64) + CMAKEFLAGS += -DWITHOUT_TOKUDB=true +endif + +# Disable jemalloc on mips* due to #843926 +ifneq (,$(filter $(DEB_HOST_ARCH), mips mipsel mips64 mips64el)) + CMAKEFLAGS += -DWITH_JEMALLOC=no +endif + # Add support for verbose builds MAKEFLAGS += VERBOSE=1 @@ -66,7 +70,7 @@ override_dh_auto_configure: # Versioned symbols are only available on Linux. # Remove symbols file on kFreeBSD builds so that # dpkg-gensymbols will not fail the build. -ifneq (,$(filter $(ARCH), kfreebsd-i386 kfreebsd-amd64)) +ifneq (,$(filter $(DEB_HOST_ARCH), kfreebsd-i386 kfreebsd-amd64)) rm debian/libmariadb3.symbols endif @@ -77,19 +81,14 @@ endif cmake -DCMAKE_INSTALL_PREFIX=/usr \ $(CMAKEFLAGS) \ -DCOMPILATION_COMMENT="mariadb.org binary distribution" \ - -DMYSQL_SERVER_SUFFIX="-$(DEBVERSION)" \ - -DSYSTEM_TYPE="debian-$(DEB_BUILD_GNU_SYSTEM)" \ - $${MYSQL_BUILD_CXX:+-DCMAKE_CXX_COMPILER=$${MYSQL_BUILD_CXX}} \ - $${MYSQL_BUILD_CC:+-DCMAKE_C_COMPILER=$${MYSQL_BUILD_CC}} \ - $${MYSQL_COMPILER_LAUNCHER:+-DCMAKE_CXX_COMPILER_LAUNCHER=${MYSQL_COMPILER_LAUNCHER}} \ - $${MYSQL_COMPILER_LAUNCHER:+-DCMAKE_C_COMPILER_LAUNCHER=${MYSQL_COMPILER_LAUNCHER}} \ - -DCMAKE_SYSTEM_PROCESSOR=$(DEB_BUILD_ARCH) \ + -DMYSQL_SERVER_SUFFIX="-$(DEB_VERSION_REVISION)" \ + -DSYSTEM_TYPE="debian-$(DEB_HOST_GNU_SYSTEM)" \ + -DCMAKE_SYSTEM_PROCESSOR=$(DEB_HOST_ARCH) \ -DBUILD_CONFIG=mysql_release \ -DINSTALL_LIBDIR=lib/$(DEB_HOST_MULTIARCH) \ -DINSTALL_PLUGINDIR=lib/mysql/plugin \ -DINSTALL_MYSQLTESTDIR=share/mysql/mysql-test \ - -DDEB=$(DISTRIBUTION) ..' - touch $@ + -DDEB=$(DEB_VENDOR) ..' # This is needed, otherwise 'make test' will run before binaries have been built override_dh_auto_build: @@ -97,13 +96,12 @@ override_dh_auto_build: # Print build env info to help debug builds on different platforms dpkg-architecture cd $(BUILDDIR) && $(MAKE) - touch $@ override_dh_auto_test: @echo "RULES.$@" dh_testdir # Skip unstable tests if such are defined for arch - [ ! -f debian/unstable-tests.$(ARCH) ] || cat debian/unstable-tests.$(ARCH) >> mysql-test/unstable-tests + [ ! -f debian/unstable-tests.$(DEB_HOST_ARCH) ] || cat debian/unstable-tests.$(DEB_HOST_ARCH) >> mysql-test/unstable-tests # Run testsuite ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS))) cd $(BUILDDIR)/mysql-test && ./mtr --force --mem --parallel=$(NUMJOBS) --skip-rpl --suite=main --skip-test-list=unstable-tests || $(TESTSUITE_FAIL_CMD) ; @@ -119,9 +117,11 @@ override_dh_auto_install: # otherwise skip it. [ -f $(BUILDDIR)/storage/cassandra/ha_cassandra.so ] || sed -i -e "/Package: mariadb-plugin-cassandra/,+20d" debian/control +ifneq (,$(filter linux,$(DEB_HOST_ARCH_OS))) # Copy systemd files to a location available for dh_installinit cp $(BUILDDIR)/support-files/mariadb.service debian/mariadb-server-10.3.mariadb.service cp $(BUILDDIR)/support-files/mariadb@.service debian/mariadb-server-10.3.mariadb@.service +endif # make install cd $(BUILDDIR) && $(MAKE) install DESTDIR=$(TMP) @@ -135,13 +135,6 @@ override_dh_auto_install: # to satisfy Debian reproducible build requirements nm --defined-only $(BUILDDIR)/sql/mysqld | LC_ALL=C sort | gzip -n -9 > $(TMP)/usr/share/doc/mariadb-server-10.3/mysqld.sym.gz - # For 5.0 -> 10.3 transition - d=$(TMP)/usr/share/mysql-common/internal-use-only/; \ - mkdir -p $$d; \ - cp debian/mariadb-server-10.3.mysql.init $$d/_etc_init.d_mysql; \ - cp debian/mariadb-server-10.3.mysql-server.logrotate $$d/_etc_logrotate.d_mysql-server; \ - cp debian/additions/debian-start $$d/_etc_mysql_debian-start; - # rename and install AppArmor profile install -D -m 644 debian/apparmor-profile $(TMP)/etc/apparmor.d/usr.sbin.mysqld # install Apport hook @@ -156,7 +149,6 @@ override_dh_auto_install: ln -s libmariadb.so.3 $(TMP)/usr/lib/$(DEB_HOST_MULTIARCH)/libmysqlclient.so.19 ln -s libmariadb.so.3 $(TMP)/usr/lib/$(DEB_HOST_MULTIARCH)/libmysqlclient.so.20 - touch $@ override_dh_installlogrotate-arch: dh_installlogrotate --name mysql-server @@ -182,6 +174,6 @@ get-orig-source: # white list file only starting from Debian Stretch and Ubuntu Xenial. # To find more, grep build logs for 'but is not installed to anywhere'. %: - dh $@ --parallel --with dpatch --with systemd --list-missing + dh $@ --parallel --with systemd --list-missing # vim: ts=8 diff --git a/extra/crc32-vpmsum/CMakeLists.txt b/extra/crc32-vpmsum/CMakeLists.txt index 0bb254bea6a..31c09a97d6a 100644 --- a/extra/crc32-vpmsum/CMakeLists.txt +++ b/extra/crc32-vpmsum/CMakeLists.txt @@ -1,2 +1,9 @@ -ENABLE_LANGUAGE(ASM) -ADD_CONVENIENCE_LIBRARY(${CRC32_VPMSUM_LIBRARY} crc32c.S crc32c_wrapper.c crc32ieee.S crc32ieee_wrapper.c) +ADD_CONVENIENCE_LIBRARY(${CRC32_LIBRARY} $ $) +ADD_LIBRARY(crc32c OBJECT vec_crc32.c) +ADD_LIBRARY(crc32ieee OBJECT vec_crc32.c) + +GET_TARGET_PROPERTY(CFLAGS_CRC32_VPMSUM ${CRC32_LIBRARY} COMPILE_FLAGS) +SET_TARGET_PROPERTIES(crc32c crc32ieee PROPERTIES COMPILE_FLAGS "${CFLAGS_CRC32_VPMSUM} -maltivec -mvsx -mpower8-vector -mcrypto -mpower8-vector") +SET_TARGET_PROPERTIES(crc32ieee PROPERTIES COMPILE_DEFINITIONS "CRC32_FUNCTION=crc32ieee_vpmsum;CRC32_CONSTANTS_HEADER=\"crc32ieee_constants.h\"") +SET_TARGET_PROPERTIES(crc32c PROPERTIES COMPILE_DEFINITIONS "CRC32_FUNCTION=crc32c_vpmsum;CRC32_CONSTANTS_HEADER=\"crc32c_constants.h\"") + diff --git a/extra/crc32-vpmsum/clang_workaround.h b/extra/crc32-vpmsum/clang_workaround.h new file mode 100644 index 00000000000..b5e7dae011c --- /dev/null +++ b/extra/crc32-vpmsum/clang_workaround.h @@ -0,0 +1,82 @@ +#ifndef CLANG_WORKAROUNDS_H +#define CLANG_WORKAROUNDS_H + +/* + * These stubs fix clang incompatibilities with GCC builtins. + */ + +#ifndef __builtin_crypto_vpmsumw +#define __builtin_crypto_vpmsumw __builtin_crypto_vpmsumb +#endif +#ifndef __builtin_crypto_vpmsumd +#define __builtin_crypto_vpmsumd __builtin_crypto_vpmsumb +#endif + +static inline +__vector unsigned long long __attribute__((overloadable)) +vec_ld(int __a, const __vector unsigned long long* __b) +{ + return (__vector unsigned long long)__builtin_altivec_lvx(__a, __b); +} + +/* + * GCC __builtin_pack_vector_int128 returns a vector __int128_t but Clang + * does not recognize this type. On GCC this builtin is translated to a + * xxpermdi instruction that only moves the registers __a, __b instead generates + * a load. + * + * Clang has vec_xxpermdi intrinsics. It was implemented in 4.0.0. + */ +static inline +__vector unsigned long long __builtin_pack_vector (unsigned long __a, + unsigned long __b) +{ + #if defined(__BIG_ENDIAN__) + __vector unsigned long long __v = {__a, __b}; + #else + __vector unsigned long long __v = {__b, __a}; + #endif + return __v; +} + +#ifndef vec_xxpermdi + +static inline +unsigned long __builtin_unpack_vector (__vector unsigned long long __v, + int __o) +{ + return __v[__o]; +} + +#if defined(__BIG_ENDIAN__) +#define __builtin_unpack_vector_0(a) __builtin_unpack_vector ((a), 0) +#define __builtin_unpack_vector_1(a) __builtin_unpack_vector ((a), 1) +#else +#define __builtin_unpack_vector_0(a) __builtin_unpack_vector ((a), 1) +#define __builtin_unpack_vector_1(a) __builtin_unpack_vector ((a), 0) +#endif + +#else + +static inline +unsigned long __builtin_unpack_vector_0 (__vector unsigned long long __v) +{ + #if defined(__BIG_ENDIAN__) + return vec_xxpermdi(__v, __v, 0x0)[1]; + #else + return vec_xxpermdi(__v, __v, 0x0)[0]; + #endif +} + +static inline +unsigned long __builtin_unpack_vector_1 (__vector unsigned long long __v) +{ + #if defined(__BIG_ENDIAN__) + return vec_xxpermdi(__v, __v, 0x3)[1]; + #else + return vec_xxpermdi(__v, __v, 0x3)[0]; + #endif +} +#endif /* vec_xxpermdi */ + +#endif diff --git a/extra/crc32-vpmsum/crc32.iS b/extra/crc32-vpmsum/crc32.iS deleted file mode 100644 index 4e7c18922da..00000000000 --- a/extra/crc32-vpmsum/crc32.iS +++ /dev/null @@ -1,734 +0,0 @@ -/* - * Calculate the checksum of data that is 16 byte aligned and a multiple of - * 16 bytes. - * - * The first step is to reduce it to 1024 bits. We do this in 8 parallel - * chunks in order to mask the latency of the vpmsum instructions. If we - * have more than 32 kB of data to checksum we repeat this step multiple - * times, passing in the previous 1024 bits. - * - * The next step is to reduce the 1024 bits to 64 bits. This step adds - * 32 bits of 0s to the end - this matches what a CRC does. We just - * calculate constants that land the data in this 32 bits. - * - * We then use fixed point Barrett reduction to compute a mod n over GF(2) - * for n = CRC using POWER8 instructions. We use x = 32. - * - * http://en.wikipedia.org/wiki/Barrett_reduction - * - * Copyright (C) 2015 Anton Blanchard , IBM - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifdef __powerpc__ - -#include -#include "ppc-opcode.h" - -#undef toc - -#ifndef r1 -#define r1 1 -#endif - -#ifndef r2 -#define r2 2 -#endif - - .section .rodata -.balign 16 - -.byteswap_constant: - /* byte reverse permute constant */ - .octa 0x0F0E0D0C0B0A09080706050403020100 - - .text - -#define off16 r25 -#define off32 r26 -#define off48 r27 -#define off64 r28 -#define off80 r29 -#define off96 r30 -#define off112 r31 - -#define const1 v24 -#define const2 v25 - -#define byteswap v26 -#define mask_32bit v27 -#define mask_64bit v28 -#define zeroes v29 - -#ifdef BYTESWAP_DATA -#define VPERM(A, B, C, D) vperm A, B, C, D -#else -#define VPERM(A, B, C, D) -#endif - -/* unsigned int __crc32_vpmsum(unsigned int crc, void *p, unsigned long len) */ -FUNC_START(__F) - std r31,-8(r1) - std r30,-16(r1) - std r29,-24(r1) - std r28,-32(r1) - std r27,-40(r1) - std r26,-48(r1) - std r25,-56(r1) - - li off16,16 - li off32,32 - li off48,48 - li off64,64 - li off80,80 - li off96,96 - li off112,112 - li r0,0 - - /* Enough room for saving 10 non volatile VMX registers */ - subi r6,r1,56+10*16 - subi r7,r1,56+2*16 - - stvx v20,0,r6 - stvx v21,off16,r6 - stvx v22,off32,r6 - stvx v23,off48,r6 - stvx v24,off64,r6 - stvx v25,off80,r6 - stvx v26,off96,r6 - stvx v27,off112,r6 - stvx v28,0,r7 - stvx v29,off16,r7 - - mr r10,r3 - - vxor zeroes,zeroes,zeroes - vspltisw v0,-1 - - vsldoi mask_32bit,zeroes,v0,4 - vsldoi mask_64bit,zeroes,v0,8 - - /* Get the initial value into v8 */ - vxor v8,v8,v8 - MTVRD(v8, r3) - - vsldoi v8,zeroes,v8,8 /* shift into bottom 32 bits */ - - addis r3,r2,.byteswap_constant@toc@ha - addi r3,r3,.byteswap_constant@toc@l - - lvx byteswap,0,r3 - addi r3,r3,16 - - cmpdi r5,256 - blt .Lshort - - rldicr r6,r5,0,56 - - /* Checksum in blocks of MAX_SIZE */ -1: lis r7,MAX_SIZE@h - ori r7,r7,MAX_SIZE@l - mr r9,r7 - cmpd r6,r7 - bgt 2f - mr r7,r6 -2: subf r6,r7,r6 - - /* our main loop does 128 bytes at a time */ - srdi r7,r7,7 - - /* - * Work out the offset into the constants table to start at. Each - * constant is 16 bytes, and it is used against 128 bytes of input - * data - 128 / 16 = 8 - */ - sldi r8,r7,4 - srdi r9,r9,3 - subf r8,r8,r9 - - /* We reduce our final 128 bytes in a separate step */ - addi r7,r7,-1 - mtctr r7 - - addis r3,r2,CONSTANTS@toc@ha - addi r3,r3,CONSTANTS@toc@l - - /* Find the start of our constants */ - add r3,r3,r8 - - /* zero v0-v7 which will contain our checksums */ - vxor v0,v0,v0 - vxor v1,v1,v1 - vxor v2,v2,v2 - vxor v3,v3,v3 - vxor v4,v4,v4 - vxor v5,v5,v5 - vxor v6,v6,v6 - vxor v7,v7,v7 - - lvx const1,0,r3 - - /* - * If we are looping back to consume more data we use the values - * already in v16-v23. - */ - cmpdi r0,1 - beq 2f - - /* First warm up pass */ - lvx v16,0,r4 - lvx v17,off16,r4 - VPERM(v16,v16,v16,byteswap) - VPERM(v17,v17,v17,byteswap) - lvx v18,off32,r4 - lvx v19,off48,r4 - VPERM(v18,v18,v18,byteswap) - VPERM(v19,v19,v19,byteswap) - lvx v20,off64,r4 - lvx v21,off80,r4 - VPERM(v20,v20,v20,byteswap) - VPERM(v21,v21,v21,byteswap) - lvx v22,off96,r4 - lvx v23,off112,r4 - VPERM(v22,v22,v22,byteswap) - VPERM(v23,v23,v23,byteswap) - addi r4,r4,8*16 - - /* xor in initial value */ - vxor v16,v16,v8 - -2: bdz .Lfirst_warm_up_done - - addi r3,r3,16 - lvx const2,0,r3 - - /* Second warm up pass */ - VPMSUMD(v8,v16,const1) - lvx v16,0,r4 - VPERM(v16,v16,v16,byteswap) - ori r2,r2,0 - - VPMSUMD(v9,v17,const1) - lvx v17,off16,r4 - VPERM(v17,v17,v17,byteswap) - ori r2,r2,0 - - VPMSUMD(v10,v18,const1) - lvx v18,off32,r4 - VPERM(v18,v18,v18,byteswap) - ori r2,r2,0 - - VPMSUMD(v11,v19,const1) - lvx v19,off48,r4 - VPERM(v19,v19,v19,byteswap) - ori r2,r2,0 - - VPMSUMD(v12,v20,const1) - lvx v20,off64,r4 - VPERM(v20,v20,v20,byteswap) - ori r2,r2,0 - - VPMSUMD(v13,v21,const1) - lvx v21,off80,r4 - VPERM(v21,v21,v21,byteswap) - ori r2,r2,0 - - VPMSUMD(v14,v22,const1) - lvx v22,off96,r4 - VPERM(v22,v22,v22,byteswap) - ori r2,r2,0 - - VPMSUMD(v15,v23,const1) - lvx v23,off112,r4 - VPERM(v23,v23,v23,byteswap) - - addi r4,r4,8*16 - - bdz .Lfirst_cool_down - - /* - * main loop. We modulo schedule it such that it takes three iterations - * to complete - first iteration load, second iteration vpmsum, third - * iteration xor. - */ - .balign 16 -4: lvx const1,0,r3 - addi r3,r3,16 - ori r2,r2,0 - - vxor v0,v0,v8 - VPMSUMD(v8,v16,const2) - lvx v16,0,r4 - VPERM(v16,v16,v16,byteswap) - ori r2,r2,0 - - vxor v1,v1,v9 - VPMSUMD(v9,v17,const2) - lvx v17,off16,r4 - VPERM(v17,v17,v17,byteswap) - ori r2,r2,0 - - vxor v2,v2,v10 - VPMSUMD(v10,v18,const2) - lvx v18,off32,r4 - VPERM(v18,v18,v18,byteswap) - ori r2,r2,0 - - vxor v3,v3,v11 - VPMSUMD(v11,v19,const2) - lvx v19,off48,r4 - VPERM(v19,v19,v19,byteswap) - lvx const2,0,r3 - ori r2,r2,0 - - vxor v4,v4,v12 - VPMSUMD(v12,v20,const1) - lvx v20,off64,r4 - VPERM(v20,v20,v20,byteswap) - ori r2,r2,0 - - vxor v5,v5,v13 - VPMSUMD(v13,v21,const1) - lvx v21,off80,r4 - VPERM(v21,v21,v21,byteswap) - ori r2,r2,0 - - vxor v6,v6,v14 - VPMSUMD(v14,v22,const1) - lvx v22,off96,r4 - VPERM(v22,v22,v22,byteswap) - ori r2,r2,0 - - vxor v7,v7,v15 - VPMSUMD(v15,v23,const1) - lvx v23,off112,r4 - VPERM(v23,v23,v23,byteswap) - - addi r4,r4,8*16 - - bdnz 4b - -.Lfirst_cool_down: - /* First cool down pass */ - lvx const1,0,r3 - addi r3,r3,16 - - vxor v0,v0,v8 - VPMSUMD(v8,v16,const1) - ori r2,r2,0 - - vxor v1,v1,v9 - VPMSUMD(v9,v17,const1) - ori r2,r2,0 - - vxor v2,v2,v10 - VPMSUMD(v10,v18,const1) - ori r2,r2,0 - - vxor v3,v3,v11 - VPMSUMD(v11,v19,const1) - ori r2,r2,0 - - vxor v4,v4,v12 - VPMSUMD(v12,v20,const1) - ori r2,r2,0 - - vxor v5,v5,v13 - VPMSUMD(v13,v21,const1) - ori r2,r2,0 - - vxor v6,v6,v14 - VPMSUMD(v14,v22,const1) - ori r2,r2,0 - - vxor v7,v7,v15 - VPMSUMD(v15,v23,const1) - ori r2,r2,0 - -.Lsecond_cool_down: - /* Second cool down pass */ - vxor v0,v0,v8 - vxor v1,v1,v9 - vxor v2,v2,v10 - vxor v3,v3,v11 - vxor v4,v4,v12 - vxor v5,v5,v13 - vxor v6,v6,v14 - vxor v7,v7,v15 - - /* - * vpmsumd produces a 96 bit result in the least significant bits - * of the register. Since we are bit reflected we have to shift it - * left 32 bits so it occupies the least significant bits in the - * bit reflected domain. - */ - vsldoi v0,v0,zeroes,4 - vsldoi v1,v1,zeroes,4 - vsldoi v2,v2,zeroes,4 - vsldoi v3,v3,zeroes,4 - vsldoi v4,v4,zeroes,4 - vsldoi v5,v5,zeroes,4 - vsldoi v6,v6,zeroes,4 - vsldoi v7,v7,zeroes,4 - - /* xor with last 1024 bits */ - lvx v8,0,r4 - lvx v9,off16,r4 - VPERM(v8,v8,v8,byteswap) - VPERM(v9,v9,v9,byteswap) - lvx v10,off32,r4 - lvx v11,off48,r4 - VPERM(v10,v10,v10,byteswap) - VPERM(v11,v11,v11,byteswap) - lvx v12,off64,r4 - lvx v13,off80,r4 - VPERM(v12,v12,v12,byteswap) - VPERM(v13,v13,v13,byteswap) - lvx v14,off96,r4 - lvx v15,off112,r4 - VPERM(v14,v14,v14,byteswap) - VPERM(v15,v15,v15,byteswap) - - addi r4,r4,8*16 - - vxor v16,v0,v8 - vxor v17,v1,v9 - vxor v18,v2,v10 - vxor v19,v3,v11 - vxor v20,v4,v12 - vxor v21,v5,v13 - vxor v22,v6,v14 - vxor v23,v7,v15 - - li r0,1 - cmpdi r6,0 - addi r6,r6,128 - bne 1b - - /* Work out how many bytes we have left */ - andi. r5,r5,127 - - /* Calculate where in the constant table we need to start */ - subfic r6,r5,128 - add r3,r3,r6 - - /* How many 16 byte chunks are in the tail */ - srdi r7,r5,4 - mtctr r7 - - /* - * Reduce the previously calculated 1024 bits to 64 bits, shifting - * 32 bits to include the trailing 32 bits of zeros - */ - lvx v0,0,r3 - lvx v1,off16,r3 - lvx v2,off32,r3 - lvx v3,off48,r3 - lvx v4,off64,r3 - lvx v5,off80,r3 - lvx v6,off96,r3 - lvx v7,off112,r3 - addi r3,r3,8*16 - - VPMSUMW(v0,v16,v0) - VPMSUMW(v1,v17,v1) - VPMSUMW(v2,v18,v2) - VPMSUMW(v3,v19,v3) - VPMSUMW(v4,v20,v4) - VPMSUMW(v5,v21,v5) - VPMSUMW(v6,v22,v6) - VPMSUMW(v7,v23,v7) - - /* Now reduce the tail (0 - 112 bytes) */ - cmpdi r7,0 - beq 1f - - lvx v16,0,r4 - lvx v17,0,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - bdz 1f - - lvx v16,off16,r4 - lvx v17,off16,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - bdz 1f - - lvx v16,off32,r4 - lvx v17,off32,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - bdz 1f - - lvx v16,off48,r4 - lvx v17,off48,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - bdz 1f - - lvx v16,off64,r4 - lvx v17,off64,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - bdz 1f - - lvx v16,off80,r4 - lvx v17,off80,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - bdz 1f - - lvx v16,off96,r4 - lvx v17,off96,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - - /* Now xor all the parallel chunks together */ -1: vxor v0,v0,v1 - vxor v2,v2,v3 - vxor v4,v4,v5 - vxor v6,v6,v7 - - vxor v0,v0,v2 - vxor v4,v4,v6 - - vxor v0,v0,v4 - -.Lbarrett_reduction: - /* Barrett constants */ - addis r3,r2,BARRETT_CONSTANTS@toc@ha - addi r3,r3,BARRETT_CONSTANTS@toc@l - - lvx const1,0,r3 - lvx const2,off16,r3 - - vsldoi v1,v0,v0,8 - vxor v0,v0,v1 /* xor two 64 bit results together */ - - /* shift left one bit */ - vspltisb v1,1 - vsl v0,v0,v1 - - vand v0,v0,mask_64bit - - /* - * The reflected version of Barrett reduction. Instead of bit - * reflecting our data (which is expensive to do), we bit reflect our - * constants and our algorithm, which means the intermediate data in - * our vector registers goes from 0-63 instead of 63-0. We can reflect - * the algorithm because we don't carry in mod 2 arithmetic. - */ - vand v1,v0,mask_32bit /* bottom 32 bits of a */ - VPMSUMD(v1,v1,const1) /* ma */ - vand v1,v1,mask_32bit /* bottom 32bits of ma */ - VPMSUMD(v1,v1,const2) /* qn */ - vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */ - - /* - * Since we are bit reflected, the result (ie the low 32 bits) is in - * the high 32 bits. We just need to shift it left 4 bytes - * V0 [ 0 1 X 3 ] - * V0 [ 0 X 2 3 ] - */ - vsldoi v0,v0,zeroes,4 /* shift result into top 64 bits of */ - -.Lout: - subi r6,r1,56+10*16 - subi r7,r1,56+2*16 - - lvx v20,0,r6 - lvx v21,off16,r6 - lvx v22,off32,r6 - lvx v23,off48,r6 - lvx v24,off64,r6 - lvx v25,off80,r6 - lvx v26,off96,r6 - lvx v27,off112,r6 - lvx v28,0,r7 - lvx v29,off16,r7 - - /* Get it into r3 */ - MFVRD(r3, v0) - - ld r31,-8(r1) - ld r30,-16(r1) - ld r29,-24(r1) - ld r28,-32(r1) - ld r27,-40(r1) - ld r26,-48(r1) - ld r25,-56(r1) - - blr - -.Lfirst_warm_up_done: - lvx const1,0,r3 - addi r3,r3,16 - - VPMSUMD(v8,v16,const1) - VPMSUMD(v9,v17,const1) - VPMSUMD(v10,v18,const1) - VPMSUMD(v11,v19,const1) - VPMSUMD(v12,v20,const1) - VPMSUMD(v13,v21,const1) - VPMSUMD(v14,v22,const1) - VPMSUMD(v15,v23,const1) - - b .Lsecond_cool_down - -.Lshort: - cmpdi r5,0 - beq .Lzero - - addis r3,r2,SHORT_CONSTANTS@toc@ha - addi r3,r3,SHORT_CONSTANTS@toc@l - - /* Calculate where in the constant table we need to start */ - subfic r6,r5,256 - add r3,r3,r6 - - /* How many 16 byte chunks? */ - srdi r7,r5,4 - mtctr r7 - - vxor v19,v19,v19 - vxor v20,v20,v20 - - lvx v0,0,r4 - lvx v16,0,r3 - VPERM(v0,v0,v16,byteswap) - vxor v0,v0,v8 /* xor in initial value */ - VPMSUMW(v0,v0,v16) - bdz .Lv0 - - lvx v1,off16,r4 - lvx v17,off16,r3 - VPERM(v1,v1,v17,byteswap) - VPMSUMW(v1,v1,v17) - bdz .Lv1 - - lvx v2,off32,r4 - lvx v16,off32,r3 - VPERM(v2,v2,v16,byteswap) - VPMSUMW(v2,v2,v16) - bdz .Lv2 - - lvx v3,off48,r4 - lvx v17,off48,r3 - VPERM(v3,v3,v17,byteswap) - VPMSUMW(v3,v3,v17) - bdz .Lv3 - - lvx v4,off64,r4 - lvx v16,off64,r3 - VPERM(v4,v4,v16,byteswap) - VPMSUMW(v4,v4,v16) - bdz .Lv4 - - lvx v5,off80,r4 - lvx v17,off80,r3 - VPERM(v5,v5,v17,byteswap) - VPMSUMW(v5,v5,v17) - bdz .Lv5 - - lvx v6,off96,r4 - lvx v16,off96,r3 - VPERM(v6,v6,v16,byteswap) - VPMSUMW(v6,v6,v16) - bdz .Lv6 - - lvx v7,off112,r4 - lvx v17,off112,r3 - VPERM(v7,v7,v17,byteswap) - VPMSUMW(v7,v7,v17) - bdz .Lv7 - - addi r3,r3,128 - addi r4,r4,128 - - lvx v8,0,r4 - lvx v16,0,r3 - VPERM(v8,v8,v16,byteswap) - VPMSUMW(v8,v8,v16) - bdz .Lv8 - - lvx v9,off16,r4 - lvx v17,off16,r3 - VPERM(v9,v9,v17,byteswap) - VPMSUMW(v9,v9,v17) - bdz .Lv9 - - lvx v10,off32,r4 - lvx v16,off32,r3 - VPERM(v10,v10,v16,byteswap) - VPMSUMW(v10,v10,v16) - bdz .Lv10 - - lvx v11,off48,r4 - lvx v17,off48,r3 - VPERM(v11,v11,v17,byteswap) - VPMSUMW(v11,v11,v17) - bdz .Lv11 - - lvx v12,off64,r4 - lvx v16,off64,r3 - VPERM(v12,v12,v16,byteswap) - VPMSUMW(v12,v12,v16) - bdz .Lv12 - - lvx v13,off80,r4 - lvx v17,off80,r3 - VPERM(v13,v13,v17,byteswap) - VPMSUMW(v13,v13,v17) - bdz .Lv13 - - lvx v14,off96,r4 - lvx v16,off96,r3 - VPERM(v14,v14,v16,byteswap) - VPMSUMW(v14,v14,v16) - bdz .Lv14 - - lvx v15,off112,r4 - lvx v17,off112,r3 - VPERM(v15,v15,v17,byteswap) - VPMSUMW(v15,v15,v17) - -.Lv15: vxor v19,v19,v15 -.Lv14: vxor v20,v20,v14 -.Lv13: vxor v19,v19,v13 -.Lv12: vxor v20,v20,v12 -.Lv11: vxor v19,v19,v11 -.Lv10: vxor v20,v20,v10 -.Lv9: vxor v19,v19,v9 -.Lv8: vxor v20,v20,v8 -.Lv7: vxor v19,v19,v7 -.Lv6: vxor v20,v20,v6 -.Lv5: vxor v19,v19,v5 -.Lv4: vxor v20,v20,v4 -.Lv3: vxor v19,v19,v3 -.Lv2: vxor v20,v20,v2 -.Lv1: vxor v19,v19,v1 -.Lv0: vxor v20,v20,v0 - - vxor v0,v19,v20 - - b .Lbarrett_reduction - -.Lzero: - mr r3,r10 - b .Lout - -FUNC_END(__F) - -#endif /* __powerpc__ */ diff --git a/extra/crc32-vpmsum/crc32_wrapper.ic b/extra/crc32-vpmsum/crc32_wrapper.ic deleted file mode 100644 index 750e971f83e..00000000000 --- a/extra/crc32-vpmsum/crc32_wrapper.ic +++ /dev/null @@ -1,52 +0,0 @@ -#ifdef __powerpc__ - - -#define VMX_ALIGN 16 -#define VMX_ALIGN_MASK (VMX_ALIGN-1) - -static unsigned int crc32_align(unsigned int crc, unsigned char *p, - unsigned long len) -{ - while (len--) - crc = crc_table[(crc ^ *p++) & 0xff] ^ (crc >> 8); - return crc; -} - -unsigned int __F(unsigned int crc, unsigned char *p, - unsigned long len); - -unsigned int F(unsigned int crc, unsigned char *p, - unsigned long len) -{ - unsigned int prealign; - unsigned int tail; - - crc ^= 0xffffffff; - - if (len < VMX_ALIGN + VMX_ALIGN_MASK) { - crc = crc32_align(crc, p, len); - goto out; - } - - if ((unsigned long)p & VMX_ALIGN_MASK) { - prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK); - crc = crc32_align(crc, p, prealign); - len -= prealign; - p += prealign; - } - - crc = __F(crc, p, len & ~VMX_ALIGN_MASK); - - tail = len & VMX_ALIGN_MASK; - if (tail) { - p += len & ~VMX_ALIGN_MASK; - crc = crc32_align(crc, p, tail); - } - -out: - crc ^= 0xffffffff; - - return crc; -} - -#endif /* __powerpc__ */ diff --git a/extra/crc32-vpmsum/crc32c.S b/extra/crc32-vpmsum/crc32c.S deleted file mode 100644 index 390c4bf0660..00000000000 --- a/extra/crc32-vpmsum/crc32c.S +++ /dev/null @@ -1,14 +0,0 @@ -#ifdef __powerpc__ - -#define CONSTANTS .crc32c_constants -#define SHORT_CONSTANTS .crc32c_short_constants -#define BARRETT_CONSTANTS .crc32c_barrett_constants - -#include "crc32c_constants.h" - -#define __F __crc32c_vpmsum - -#include "crc32.iS" - -#endif - diff --git a/extra/crc32-vpmsum/crc32c_constants.h b/extra/crc32-vpmsum/crc32c_constants.h index 555b785ce9f..40b216b6057 100644 --- a/extra/crc32-vpmsum/crc32c_constants.h +++ b/extra/crc32-vpmsum/crc32c_constants.h @@ -1,837 +1,1206 @@ -#ifndef CRC32_CONSTANTS_H -#define CRC32_CONSTANTS_H +/* +* +* THIS FILE IS GENERATED WITH +./crc32_constants -c -x -r 0x1edc6f41 -#ifdef __powerpc__ +* This is from https://github.com/antonblanchard/crc32-vpmsum/ +* DO NOT MODIFY IT MANUALLY! +* +*/ #define CRC 0x1edc6f41 - -#define MAX_SIZE 32768 -CONSTANTS: - - /* Reduce 262144 kbits to 1024 bits */ - /* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */ - .octa 0x00000000b6ca9e20000000009c37c408 - - /* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */ - .octa 0x00000000350249a800000001b51df26c - - /* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */ - .octa 0x00000001862dac54000000000724b9d0 - - /* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */ - .octa 0x00000001d87fb48c00000001c00532fe - - /* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */ - .octa 0x00000001f39b699e00000000f05a9362 - - /* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */ - .octa 0x0000000101da11b400000001e1007970 - - /* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */ - .octa 0x00000001cab571e000000000a57366ee - - /* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */ - .octa 0x00000000c7020cfe0000000192011284 - - /* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */ - .octa 0x00000000cdaed1ae0000000162716d9a - - /* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */ - .octa 0x00000001e804effc00000000cd97ecde - - /* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */ - .octa 0x0000000077c3ea3a0000000058812bc0 - - /* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */ - .octa 0x0000000068df31b40000000088b8c12e - - /* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */ - .octa 0x00000000b059b6c200000001230b234c - - /* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */ - .octa 0x0000000145fb8ed800000001120b416e - - /* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */ - .octa 0x00000000cbc0916800000001974aecb0 - - /* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */ - .octa 0x000000005ceeedc2000000008ee3f226 - - /* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */ - .octa 0x0000000047d74e8600000001089aba9a - - /* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */ - .octa 0x00000001407e9e220000000065113872 - - /* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */ - .octa 0x00000001da967bda000000005c07ec10 - - /* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */ - .octa 0x000000006c8983680000000187590924 - - /* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */ - .octa 0x00000000f2d14c9800000000e35da7c6 - - /* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */ - .octa 0x00000001993c6ad4000000000415855a - - /* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */ - .octa 0x000000014683d1ac0000000073617758 - - /* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */ - .octa 0x00000001a7c93e6c0000000176021d28 - - /* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */ - .octa 0x000000010211e90a00000001c358fd0a - - /* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */ - .octa 0x000000001119403e00000001ff7a2c18 - - /* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */ - .octa 0x000000001c3261aa00000000f2d9f7e4 - - /* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */ - .octa 0x000000014e37a634000000016cf1f9c8 - - /* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */ - .octa 0x0000000073786c0c000000010af9279a - - /* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */ - .octa 0x000000011dc037f80000000004f101e8 - - /* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */ - .octa 0x0000000031433dfc0000000070bcf184 - - /* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */ - .octa 0x000000009cde8348000000000a8de642 - - /* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */ - .octa 0x0000000038d3c2a60000000062ea130c - - /* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */ - .octa 0x000000011b25f26000000001eb31cbb2 - - /* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */ - .octa 0x000000001629e6f00000000170783448 - - /* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */ - .octa 0x0000000160838b4c00000001a684b4c6 - - /* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */ - .octa 0x000000007a44011c00000000253ca5b4 - - /* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */ - .octa 0x00000000226f417a0000000057b4b1e2 - - /* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */ - .octa 0x0000000045eb2eb400000000b6bd084c - - /* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */ - .octa 0x000000014459d70c0000000123c2d592 - - /* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */ - .octa 0x00000001d406ed8200000000159dafce - - /* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */ - .octa 0x0000000160c8e1a80000000127e1a64e - - /* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */ - .octa 0x0000000027ba80980000000056860754 - - /* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */ - .octa 0x000000006d92d01800000001e661aae8 - - /* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */ - .octa 0x000000012ed7e3f200000000f82c6166 - - /* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */ - .octa 0x000000002dc8778800000000c4f9c7ae - - /* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */ - .octa 0x0000000018240bb80000000074203d20 - - /* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */ - .octa 0x000000001ad381580000000198173052 - - /* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */ - .octa 0x00000001396b78f200000001ce8aba54 - - /* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */ - .octa 0x000000011a68133400000001850d5d94 - - /* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */ - .octa 0x000000012104732e00000001d609239c - - /* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */ - .octa 0x00000000a140d90c000000001595f048 - - /* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */ - .octa 0x00000001b7215eda0000000042ccee08 - - /* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */ - .octa 0x00000001aaf1df3c000000010a389d74 - - /* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */ - .octa 0x0000000029d15b8a000000012a840da6 - - /* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */ - .octa 0x00000000f1a96922000000001d181c0c - - /* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */ - .octa 0x00000001ac80d03c0000000068b7d1f6 - - /* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */ - .octa 0x000000000f11d56a000000005b0f14fc - - /* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */ - .octa 0x00000001f1c022a20000000179e9e730 - - /* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */ - .octa 0x0000000173d00ae200000001ce1368d6 - - /* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */ - .octa 0x00000001d4ffe4ac0000000112c3a84c - - /* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */ - .octa 0x000000016edc5ae400000000de940fee - - /* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */ - .octa 0x00000001f1a0214000000000fe896b7e - - /* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */ - .octa 0x00000000ca0b28a000000001f797431c - - /* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */ - .octa 0x00000001928e30a20000000053e989ba - - /* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */ - .octa 0x0000000097b1b002000000003920cd16 - - /* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */ - .octa 0x00000000b15bf90600000001e6f579b8 - - /* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */ - .octa 0x00000000411c5d52000000007493cb0a - - /* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */ - .octa 0x00000001c36f330000000001bdd376d8 - - /* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */ - .octa 0x00000001119227e0000000016badfee6 - - /* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */ - .octa 0x00000000114d47020000000071de5c58 - - /* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */ - .octa 0x00000000458b5b9800000000453f317c - - /* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */ - .octa 0x000000012e31fb8e0000000121675cce - - /* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */ - .octa 0x000000005cf619d800000001f409ee92 - - /* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */ - .octa 0x0000000063f4d8b200000000f36b9c88 - - /* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */ - .octa 0x000000004138dc8a0000000036b398f4 - - /* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */ - .octa 0x00000001d29ee8e000000001748f9adc - - /* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */ - .octa 0x000000006a08ace800000001be94ec00 - - /* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */ - .octa 0x0000000127d4201000000000b74370d6 - - /* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */ - .octa 0x0000000019d76b6200000001174d0b98 - - /* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */ - .octa 0x00000001b1471f6e00000000befc06a4 - - /* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */ - .octa 0x00000001f64c19cc00000001ae125288 - - /* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */ - .octa 0x00000000003c0ea00000000095c19b34 - - /* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */ - .octa 0x000000014d73abf600000001a78496f2 - - /* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */ - .octa 0x00000001620eb84400000001ac5390a0 - - /* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */ - .octa 0x0000000147655048000000002a80ed6e - - /* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */ - .octa 0x0000000067b5077e00000001fa9b0128 - - /* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */ - .octa 0x0000000010ffe20600000001ea94929e - - /* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */ - .octa 0x000000000fee8f1e0000000125f4305c - - /* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */ - .octa 0x00000001da26fbae00000001471e2002 - - /* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */ - .octa 0x00000001b3a8bd880000000132d2253a - - /* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */ - .octa 0x00000000e8f3898e00000000f26b3592 - - /* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */ - .octa 0x00000000b0d0d28c00000000bc8b67b0 - - /* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */ - .octa 0x0000000030f2a798000000013a826ef2 - - /* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */ - .octa 0x000000000fba10020000000081482c84 - - /* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */ - .octa 0x00000000bdb9bd7200000000e77307c2 - - /* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */ - .octa 0x0000000075d3bf5a00000000d4a07ec8 - - /* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */ - .octa 0x00000000ef1f98a00000000017102100 - - /* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */ - .octa 0x00000000689c760200000000db406486 - - /* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */ - .octa 0x000000016d5fa5fe0000000192db7f88 - - /* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */ - .octa 0x00000001d0d2b9ca000000018bf67b1e - - /* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */ - .octa 0x0000000041e7b470000000007c09163e - - /* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */ - .octa 0x00000001cbb6495e000000000adac060 - - /* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */ - .octa 0x000000010052a0b000000000bd8316ae - - /* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */ - .octa 0x00000001d8effb5c000000019f09ab54 - - /* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */ - .octa 0x00000001d969853c0000000125155542 - - /* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */ - .octa 0x00000000523ccce2000000018fdb5882 - - /* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */ - .octa 0x000000001e2436bc00000000e794b3f4 - - /* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */ - .octa 0x00000000ddd1c3a2000000016f9bb022 - - /* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */ - .octa 0x0000000019fcfe3800000000290c9978 - - /* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */ - .octa 0x00000001ce95db640000000083c0f350 - - /* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */ - .octa 0x00000000af5828060000000173ea6628 - - /* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */ - .octa 0x00000001006388f600000001c8b4e00a - - /* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */ - .octa 0x0000000179eca00a00000000de95d6aa - - /* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */ - .octa 0x0000000122410a6a000000010b7f7248 - - /* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */ - .octa 0x000000004288e87c00000001326e3a06 - - /* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */ - .octa 0x000000016c5490da00000000bb62c2e6 - - /* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */ - .octa 0x00000000d1c71f6e0000000156a4b2c2 - - /* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */ - .octa 0x00000001b4ce08a6000000011dfe763a - - /* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */ - .octa 0x00000001466ba60c000000007bcca8e2 - - /* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */ - .octa 0x00000001f6c488a40000000186118faa - - /* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */ - .octa 0x000000013bfb06820000000111a65a88 - - /* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */ - .octa 0x00000000690e9e54000000003565e1c4 - - /* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */ - .octa 0x00000000281346b6000000012ed02a82 - - /* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */ - .octa 0x000000015646402400000000c486ecfc - - /* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */ - .octa 0x000000016063a8dc0000000001b951b2 - - /* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */ - .octa 0x0000000116a663620000000048143916 - - /* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */ - .octa 0x000000017e8aa4d200000001dc2ae124 - - /* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */ - .octa 0x00000001728eb10c00000001416c58d6 - - /* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */ - .octa 0x00000001b08fd7fa00000000a479744a - - /* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */ - .octa 0x00000001092a16e80000000096ca3a26 - - /* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */ - .octa 0x00000000a505637c00000000ff223d4e - - /* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */ - .octa 0x00000000d94869b2000000010e84da42 - - /* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */ - .octa 0x00000001c8b203ae00000001b61ba3d0 - - /* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */ - .octa 0x000000005704aea000000000680f2de8 - - /* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */ - .octa 0x000000012e295fa2000000008772a9a8 - - /* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */ - .octa 0x000000011d0908bc0000000155f295bc - - /* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */ - .octa 0x0000000193ed97ea00000000595f9282 - - /* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */ - .octa 0x000000013a0f1c520000000164b1c25a - - /* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */ - .octa 0x000000010c2c40c000000000fbd67c50 - - /* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */ - .octa 0x00000000ff6fac3e0000000096076268 - - /* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */ - .octa 0x000000017b3609c000000001d288e4cc - - /* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */ - .octa 0x0000000088c8c92200000001eaac1bdc - - /* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */ - .octa 0x00000001751baae600000001f1ea39e2 - - /* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */ - .octa 0x000000010795297200000001eb6506fc - - /* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */ - .octa 0x0000000162b00abe000000010f806ffe - - /* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */ - .octa 0x000000000d7b404c000000010408481e - - /* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */ - .octa 0x00000000763b13d40000000188260534 - - /* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */ - .octa 0x00000000f6dc22d80000000058fc73e0 - - /* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */ - .octa 0x000000007daae06000000000391c59b8 - - /* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */ - .octa 0x000000013359ab7c000000018b638400 - - /* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */ - .octa 0x000000008add438a000000011738f5c4 - - /* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */ - .octa 0x00000001edbefdea000000008cf7c6da - - /* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */ - .octa 0x000000004104e0f800000001ef97fb16 - - /* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */ - .octa 0x00000000b48a82220000000102130e20 - - /* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */ - .octa 0x00000001bcb4684400000000db968898 - - /* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */ - .octa 0x000000013293ce0a00000000b5047b5e - - /* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */ - .octa 0x00000001710d0844000000010b90fdb2 - - /* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */ - .octa 0x0000000117907f6e000000004834a32e - - /* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */ - .octa 0x0000000087ddf93e0000000059c8f2b0 - - /* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */ - .octa 0x000000005970e9b00000000122cec508 - - /* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */ - .octa 0x0000000185b2b7d0000000000a330cda - - /* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */ - .octa 0x00000001dcee0efc000000014a47148c - - /* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */ - .octa 0x0000000030da27220000000042c61cb8 - - /* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */ - .octa 0x000000012f925a180000000012fe6960 - - /* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */ - .octa 0x00000000dd2e357c00000000dbda2c20 - - /* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */ - .octa 0x00000000071c80de000000011122410c - - /* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */ - .octa 0x000000011513140a00000000977b2070 - - /* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */ - .octa 0x00000001df876e8e000000014050438e - - /* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */ - .octa 0x000000015f81d6ce0000000147c840e8 - - /* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */ - .octa 0x000000019dd94dbe00000001cc7c88ce - - /* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */ - .octa 0x00000001373d206e00000001476b35a4 - - /* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */ - .octa 0x00000000668ccade000000013d52d508 - - /* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */ - .octa 0x00000001b192d268000000008e4be32e - - /* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */ - .octa 0x00000000e30f3a7800000000024120fe - - /* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */ - .octa 0x000000010ef1f7bc00000000ddecddb4 - - /* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */ - .octa 0x00000001f5ac738000000000d4d403bc - - /* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */ - .octa 0x000000011822ea7000000001734b89aa - - /* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */ - .octa 0x00000000c3a33848000000010e7a58d6 - - /* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */ - .octa 0x00000001bd151c2400000001f9f04e9c - - /* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */ - .octa 0x0000000056002d7600000000b692225e - - /* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */ - .octa 0x000000014657c4f4000000019b8d3f3e - - /* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */ - .octa 0x0000000113742d7c00000001a874f11e - - /* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */ - .octa 0x000000019c5920ba000000010d5a4254 - - /* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */ - .octa 0x000000005216d2d600000000bbb2f5d6 - - /* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */ - .octa 0x0000000136f5ad8a0000000179cc0e36 - - /* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */ - .octa 0x000000018b07beb600000001dca1da4a - - /* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */ - .octa 0x00000000db1e93b000000000feb1a192 - - /* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */ - .octa 0x000000000b96fa3a00000000d1eeedd6 - - /* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */ - .octa 0x00000001d9968af0000000008fad9bb4 - - /* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */ - .octa 0x000000000e4a77a200000001884938e4 - - /* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */ - .octa 0x00000000508c2ac800000001bc2e9bc0 - - /* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */ - .octa 0x0000000021572a8000000001f9658a68 - - /* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */ - .octa 0x00000001b859daf2000000001b9224fc - - /* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */ - .octa 0x000000016f7884740000000055b2fb84 - - /* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */ - .octa 0x00000001b438810e000000018b090348 - - /* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */ - .octa 0x0000000095ddc6f2000000011ccbd5ea - - /* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */ - .octa 0x00000001d977c20c0000000007ae47f8 - - /* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */ - .octa 0x00000000ebedb99a0000000172acbec0 - - /* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */ - .octa 0x00000001df9e9e9200000001c6e3ff20 - - /* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */ - .octa 0x00000001a4a3f95200000000e1b38744 - - /* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */ - .octa 0x00000000e2f5122000000000791585b2 - - /* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */ - .octa 0x000000004aa01f3e00000000ac53b894 - - /* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */ - .octa 0x00000000b3e90a5800000001ed5f2cf4 - - /* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */ - .octa 0x000000000c9ca2aa00000001df48b2e0 - - /* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */ - .octa 0x000000015168231600000000049c1c62 - - /* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */ - .octa 0x0000000036fce78c000000017c460c12 - - /* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */ - .octa 0x000000009037dc10000000015be4da7e - - /* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */ - .octa 0x00000000d3298582000000010f38f668 - - /* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */ - .octa 0x00000001b42e8ad60000000039f40a00 - - /* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */ - .octa 0x00000000142a983800000000bd4c10c4 - - /* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */ - .octa 0x0000000109c7f1900000000042db1d98 - - /* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */ - .octa 0x0000000056ff931000000001c905bae6 - - /* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */ - .octa 0x00000001594513aa00000000069d40ea - - /* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */ - .octa 0x00000001e3b5b1e8000000008e4fbad0 - - /* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */ - .octa 0x000000011dd5fc080000000047bedd46 - - /* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */ - .octa 0x00000001675f0cc20000000026396bf8 - - /* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */ - .octa 0x00000000d1c8dd4400000000379beb92 - - /* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */ - .octa 0x0000000115ebd3d8000000000abae54a - - /* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */ - .octa 0x00000001ecbd0dac0000000007e6a128 - - /* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */ - .octa 0x00000000cdf67af2000000000ade29d2 - - /* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */ - .octa 0x000000004c01ff4c00000000f974c45c - - /* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */ - .octa 0x00000000f2d8657e00000000e77ac60a - - /* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */ - .octa 0x000000006bae74c40000000145895816 - - /* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */ - .octa 0x0000000152af8aa00000000038e362be - - /* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */ - .octa 0x0000000004663802000000007f991a64 - - /* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */ - .octa 0x00000001ab2f5afc00000000fa366d3a - - /* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */ - .octa 0x0000000074a4ebd400000001a2bb34f0 - - /* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */ - .octa 0x00000001d7ab3a4c0000000028a9981e - - /* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */ - .octa 0x00000001a8da60c600000001dbc672be - - /* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */ - .octa 0x000000013cf6382000000000b04d77f6 - - /* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */ - .octa 0x00000000bec12e1e0000000124400d96 - - /* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */ - .octa 0x00000001c6368010000000014ca4b414 - - /* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */ - .octa 0x00000001e6e78758000000012fe2c938 - - /* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */ - .octa 0x000000008d7f2b3c00000001faed01e6 - - /* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */ - .octa 0x000000016b4a156e000000007e80ecfe - - /* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */ - .octa 0x00000001c63cfeb60000000098daee94 - - /* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */ - .octa 0x000000015f902670000000010a04edea - - /* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */ - .octa 0x00000001cd5de11e00000001c00b4524 - - /* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */ - .octa 0x000000001acaec540000000170296550 - - /* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */ - .octa 0x000000002bd0ca780000000181afaa48 - - /* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */ - .octa 0x0000000032d63d5c0000000185a31ffa - - /* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */ - .octa 0x000000001c6d4e4c000000002469f608 - - /* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */ - .octa 0x0000000106a60b92000000006980102a - - /* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */ - .octa 0x00000000d3855e120000000111ea9ca8 - - /* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */ - .octa 0x00000000e312563600000001bd1d29ce - - /* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */ - .octa 0x000000009e8f7ea400000001b34b9580 - - /* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */ - .octa 0x00000001c82e562c000000003076054e - - /* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */ - .octa 0x00000000ca9f09ce000000012a608ea4 - - /* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */ - .octa 0x00000000c63764e600000000784d05fe - - /* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */ - .octa 0x0000000168d2e49e000000016ef0d82a - - /* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */ - .octa 0x00000000e986c1480000000075bda454 - - /* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */ - .octa 0x00000000cfb65894000000003dc0a1c4 - - /* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */ - .octa 0x0000000111cadee400000000e9a5d8be - - /* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */ - .octa 0x0000000171fb63ce00000001609bc4b4 - -SHORT_CONSTANTS: - - /* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the trailing 32 bits of zeros */ - /* x^1952 mod p(x)`, x^1984 mod p(x)`, x^2016 mod p(x)`, x^2048 mod p(x)` */ - .octa 0x7fec2963e5bf80485cf015c388e56f72 - - /* x^1824 mod p(x)`, x^1856 mod p(x)`, x^1888 mod p(x)`, x^1920 mod p(x)` */ - .octa 0x38e888d4844752a9963a18920246e2e6 - - /* x^1696 mod p(x)`, x^1728 mod p(x)`, x^1760 mod p(x)`, x^1792 mod p(x)` */ - .octa 0x42316c00730206ad419a441956993a31 - - /* x^1568 mod p(x)`, x^1600 mod p(x)`, x^1632 mod p(x)`, x^1664 mod p(x)` */ - .octa 0x543d5c543e65ddf9924752ba2b830011 - - /* x^1440 mod p(x)`, x^1472 mod p(x)`, x^1504 mod p(x)`, x^1536 mod p(x)` */ - .octa 0x78e87aaf56767c9255bd7f9518e4a304 - - /* x^1312 mod p(x)`, x^1344 mod p(x)`, x^1376 mod p(x)`, x^1408 mod p(x)` */ - .octa 0x8f68fcec1903da7f6d76739fe0553f1e - - /* x^1184 mod p(x)`, x^1216 mod p(x)`, x^1248 mod p(x)`, x^1280 mod p(x)` */ - .octa 0x3f4840246791d588c133722b1fe0b5c3 - - /* x^1056 mod p(x)`, x^1088 mod p(x)`, x^1120 mod p(x)`, x^1152 mod p(x)` */ - .octa 0x34c96751b04de25a64b67ee0e55ef1f3 - - /* x^928 mod p(x)`, x^960 mod p(x)`, x^992 mod p(x)`, x^1024 mod p(x)` */ - .octa 0x156c8e180b4a395b069db049b8fdb1e7 - - /* x^800 mod p(x)`, x^832 mod p(x)`, x^864 mod p(x)`, x^896 mod p(x)` */ - .octa 0xe0b99ccbe661f7bea11bfaf3c9e90b9e - - /* x^672 mod p(x)`, x^704 mod p(x)`, x^736 mod p(x)`, x^768 mod p(x)` */ - .octa 0x041d37768cd75659817cdc5119b29a35 - - /* x^544 mod p(x)`, x^576 mod p(x)`, x^608 mod p(x)`, x^640 mod p(x)` */ - .octa 0x3a0777818cfaa9651ce9d94b36c41f1c - - /* x^416 mod p(x)`, x^448 mod p(x)`, x^480 mod p(x)`, x^512 mod p(x)` */ - .octa 0x0e148e8252377a554f256efcb82be955 - - /* x^288 mod p(x)`, x^320 mod p(x)`, x^352 mod p(x)`, x^384 mod p(x)` */ - .octa 0x9c25531d19e65ddeec1631edb2dea967 - - /* x^160 mod p(x)`, x^192 mod p(x)`, x^224 mod p(x)`, x^256 mod p(x)` */ - .octa 0x790606ff9957c0a65d27e147510ac59a - - /* x^32 mod p(x)`, x^64 mod p(x)`, x^96 mod p(x)`, x^128 mod p(x)` */ - .octa 0x82f63b786ea2d55ca66805eb18b8ea18 - - -BARRETT_CONSTANTS: - /* 33 bit reflected Barrett constant m - (4^32)/n */ - .octa 0x000000000000000000000000dea713f1 /* x^64 div p(x)` */ - /* 33 bit reflected Barrett constant n */ - .octa 0x00000000000000000000000105ec76f1 - -#endif /* __powerpc__ */ - -#endif +#define CRC_XOR +#define REFLECT +#define MAX_SIZE 32768 + +#ifndef __ASSEMBLER__ +#ifdef CRC_TABLE +static const unsigned int crc_table[] = { + 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, + 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb, + 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, + 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, + 0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b, + 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384, + 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, + 0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b, + 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, + 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, + 0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5, + 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa, + 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, + 0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a, + 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, + 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, + 0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48, + 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957, + 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, + 0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198, + 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, + 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, + 0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8, + 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7, + 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, + 0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789, + 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, + 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, + 0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9, + 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6, + 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, + 0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829, + 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, + 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, + 0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043, + 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c, + 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, + 0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc, + 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, + 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, + 0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652, + 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d, + 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, + 0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982, + 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, + 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, + 0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2, + 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed, + 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, + 0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f, + 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, + 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, + 0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f, + 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540, + 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, + 0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f, + 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, + 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, + 0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321, + 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e, + 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, + 0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e, + 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, + 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351,}; + +#endif /* CRC_TABLE */ +#ifdef POWER8_INTRINSICS + +/* Constants */ + +/* Reduce 262144 kbits to 1024 bits */ +static const __vector unsigned long long vcrc_const[255] + __attribute__((aligned (16))) = { +#ifdef __LITTLE_ENDIAN__ + /* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */ + { 0x000000009c37c408, 0x00000000b6ca9e20 }, + /* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */ + { 0x00000001b51df26c, 0x00000000350249a8 }, + /* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */ + { 0x000000000724b9d0, 0x00000001862dac54 }, + /* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */ + { 0x00000001c00532fe, 0x00000001d87fb48c }, + /* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */ + { 0x00000000f05a9362, 0x00000001f39b699e }, + /* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */ + { 0x00000001e1007970, 0x0000000101da11b4 }, + /* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */ + { 0x00000000a57366ee, 0x00000001cab571e0 }, + /* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */ + { 0x0000000192011284, 0x00000000c7020cfe }, + /* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */ + { 0x0000000162716d9a, 0x00000000cdaed1ae }, + /* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */ + { 0x00000000cd97ecde, 0x00000001e804effc }, + /* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */ + { 0x0000000058812bc0, 0x0000000077c3ea3a }, + /* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */ + { 0x0000000088b8c12e, 0x0000000068df31b4 }, + /* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */ + { 0x00000001230b234c, 0x00000000b059b6c2 }, + /* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */ + { 0x00000001120b416e, 0x0000000145fb8ed8 }, + /* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */ + { 0x00000001974aecb0, 0x00000000cbc09168 }, + /* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */ + { 0x000000008ee3f226, 0x000000005ceeedc2 }, + /* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */ + { 0x00000001089aba9a, 0x0000000047d74e86 }, + /* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */ + { 0x0000000065113872, 0x00000001407e9e22 }, + /* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */ + { 0x000000005c07ec10, 0x00000001da967bda }, + /* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */ + { 0x0000000187590924, 0x000000006c898368 }, + /* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */ + { 0x00000000e35da7c6, 0x00000000f2d14c98 }, + /* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */ + { 0x000000000415855a, 0x00000001993c6ad4 }, + /* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */ + { 0x0000000073617758, 0x000000014683d1ac }, + /* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */ + { 0x0000000176021d28, 0x00000001a7c93e6c }, + /* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */ + { 0x00000001c358fd0a, 0x000000010211e90a }, + /* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */ + { 0x00000001ff7a2c18, 0x000000001119403e }, + /* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */ + { 0x00000000f2d9f7e4, 0x000000001c3261aa }, + /* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */ + { 0x000000016cf1f9c8, 0x000000014e37a634 }, + /* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */ + { 0x000000010af9279a, 0x0000000073786c0c }, + /* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */ + { 0x0000000004f101e8, 0x000000011dc037f8 }, + /* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */ + { 0x0000000070bcf184, 0x0000000031433dfc }, + /* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */ + { 0x000000000a8de642, 0x000000009cde8348 }, + /* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */ + { 0x0000000062ea130c, 0x0000000038d3c2a6 }, + /* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */ + { 0x00000001eb31cbb2, 0x000000011b25f260 }, + /* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */ + { 0x0000000170783448, 0x000000001629e6f0 }, + /* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */ + { 0x00000001a684b4c6, 0x0000000160838b4c }, + /* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */ + { 0x00000000253ca5b4, 0x000000007a44011c }, + /* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */ + { 0x0000000057b4b1e2, 0x00000000226f417a }, + /* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */ + { 0x00000000b6bd084c, 0x0000000045eb2eb4 }, + /* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */ + { 0x0000000123c2d592, 0x000000014459d70c }, + /* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */ + { 0x00000000159dafce, 0x00000001d406ed82 }, + /* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */ + { 0x0000000127e1a64e, 0x0000000160c8e1a8 }, + /* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */ + { 0x0000000056860754, 0x0000000027ba8098 }, + /* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */ + { 0x00000001e661aae8, 0x000000006d92d018 }, + /* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */ + { 0x00000000f82c6166, 0x000000012ed7e3f2 }, + /* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */ + { 0x00000000c4f9c7ae, 0x000000002dc87788 }, + /* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */ + { 0x0000000074203d20, 0x0000000018240bb8 }, + /* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */ + { 0x0000000198173052, 0x000000001ad38158 }, + /* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */ + { 0x00000001ce8aba54, 0x00000001396b78f2 }, + /* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */ + { 0x00000001850d5d94, 0x000000011a681334 }, + /* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */ + { 0x00000001d609239c, 0x000000012104732e }, + /* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */ + { 0x000000001595f048, 0x00000000a140d90c }, + /* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */ + { 0x0000000042ccee08, 0x00000001b7215eda }, + /* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */ + { 0x000000010a389d74, 0x00000001aaf1df3c }, + /* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */ + { 0x000000012a840da6, 0x0000000029d15b8a }, + /* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */ + { 0x000000001d181c0c, 0x00000000f1a96922 }, + /* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */ + { 0x0000000068b7d1f6, 0x00000001ac80d03c }, + /* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */ + { 0x000000005b0f14fc, 0x000000000f11d56a }, + /* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */ + { 0x0000000179e9e730, 0x00000001f1c022a2 }, + /* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */ + { 0x00000001ce1368d6, 0x0000000173d00ae2 }, + /* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */ + { 0x0000000112c3a84c, 0x00000001d4ffe4ac }, + /* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */ + { 0x00000000de940fee, 0x000000016edc5ae4 }, + /* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */ + { 0x00000000fe896b7e, 0x00000001f1a02140 }, + /* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */ + { 0x00000001f797431c, 0x00000000ca0b28a0 }, + /* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */ + { 0x0000000053e989ba, 0x00000001928e30a2 }, + /* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */ + { 0x000000003920cd16, 0x0000000097b1b002 }, + /* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */ + { 0x00000001e6f579b8, 0x00000000b15bf906 }, + /* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */ + { 0x000000007493cb0a, 0x00000000411c5d52 }, + /* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */ + { 0x00000001bdd376d8, 0x00000001c36f3300 }, + /* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */ + { 0x000000016badfee6, 0x00000001119227e0 }, + /* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */ + { 0x0000000071de5c58, 0x00000000114d4702 }, + /* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */ + { 0x00000000453f317c, 0x00000000458b5b98 }, + /* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */ + { 0x0000000121675cce, 0x000000012e31fb8e }, + /* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */ + { 0x00000001f409ee92, 0x000000005cf619d8 }, + /* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */ + { 0x00000000f36b9c88, 0x0000000063f4d8b2 }, + /* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */ + { 0x0000000036b398f4, 0x000000004138dc8a }, + /* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */ + { 0x00000001748f9adc, 0x00000001d29ee8e0 }, + /* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */ + { 0x00000001be94ec00, 0x000000006a08ace8 }, + /* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */ + { 0x00000000b74370d6, 0x0000000127d42010 }, + /* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */ + { 0x00000001174d0b98, 0x0000000019d76b62 }, + /* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */ + { 0x00000000befc06a4, 0x00000001b1471f6e }, + /* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */ + { 0x00000001ae125288, 0x00000001f64c19cc }, + /* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */ + { 0x0000000095c19b34, 0x00000000003c0ea0 }, + /* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */ + { 0x00000001a78496f2, 0x000000014d73abf6 }, + /* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */ + { 0x00000001ac5390a0, 0x00000001620eb844 }, + /* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */ + { 0x000000002a80ed6e, 0x0000000147655048 }, + /* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */ + { 0x00000001fa9b0128, 0x0000000067b5077e }, + /* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */ + { 0x00000001ea94929e, 0x0000000010ffe206 }, + /* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */ + { 0x0000000125f4305c, 0x000000000fee8f1e }, + /* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */ + { 0x00000001471e2002, 0x00000001da26fbae }, + /* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */ + { 0x0000000132d2253a, 0x00000001b3a8bd88 }, + /* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */ + { 0x00000000f26b3592, 0x00000000e8f3898e }, + /* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */ + { 0x00000000bc8b67b0, 0x00000000b0d0d28c }, + /* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */ + { 0x000000013a826ef2, 0x0000000030f2a798 }, + /* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */ + { 0x0000000081482c84, 0x000000000fba1002 }, + /* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */ + { 0x00000000e77307c2, 0x00000000bdb9bd72 }, + /* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */ + { 0x00000000d4a07ec8, 0x0000000075d3bf5a }, + /* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */ + { 0x0000000017102100, 0x00000000ef1f98a0 }, + /* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */ + { 0x00000000db406486, 0x00000000689c7602 }, + /* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */ + { 0x0000000192db7f88, 0x000000016d5fa5fe }, + /* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */ + { 0x000000018bf67b1e, 0x00000001d0d2b9ca }, + /* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */ + { 0x000000007c09163e, 0x0000000041e7b470 }, + /* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */ + { 0x000000000adac060, 0x00000001cbb6495e }, + /* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */ + { 0x00000000bd8316ae, 0x000000010052a0b0 }, + /* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */ + { 0x000000019f09ab54, 0x00000001d8effb5c }, + /* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */ + { 0x0000000125155542, 0x00000001d969853c }, + /* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */ + { 0x000000018fdb5882, 0x00000000523ccce2 }, + /* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */ + { 0x00000000e794b3f4, 0x000000001e2436bc }, + /* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */ + { 0x000000016f9bb022, 0x00000000ddd1c3a2 }, + /* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */ + { 0x00000000290c9978, 0x0000000019fcfe38 }, + /* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */ + { 0x0000000083c0f350, 0x00000001ce95db64 }, + /* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */ + { 0x0000000173ea6628, 0x00000000af582806 }, + /* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */ + { 0x00000001c8b4e00a, 0x00000001006388f6 }, + /* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */ + { 0x00000000de95d6aa, 0x0000000179eca00a }, + /* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */ + { 0x000000010b7f7248, 0x0000000122410a6a }, + /* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */ + { 0x00000001326e3a06, 0x000000004288e87c }, + /* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */ + { 0x00000000bb62c2e6, 0x000000016c5490da }, + /* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */ + { 0x0000000156a4b2c2, 0x00000000d1c71f6e }, + /* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */ + { 0x000000011dfe763a, 0x00000001b4ce08a6 }, + /* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */ + { 0x000000007bcca8e2, 0x00000001466ba60c }, + /* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */ + { 0x0000000186118faa, 0x00000001f6c488a4 }, + /* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */ + { 0x0000000111a65a88, 0x000000013bfb0682 }, + /* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */ + { 0x000000003565e1c4, 0x00000000690e9e54 }, + /* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */ + { 0x000000012ed02a82, 0x00000000281346b6 }, + /* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */ + { 0x00000000c486ecfc, 0x0000000156464024 }, + /* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */ + { 0x0000000001b951b2, 0x000000016063a8dc }, + /* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */ + { 0x0000000048143916, 0x0000000116a66362 }, + /* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */ + { 0x00000001dc2ae124, 0x000000017e8aa4d2 }, + /* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */ + { 0x00000001416c58d6, 0x00000001728eb10c }, + /* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */ + { 0x00000000a479744a, 0x00000001b08fd7fa }, + /* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */ + { 0x0000000096ca3a26, 0x00000001092a16e8 }, + /* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */ + { 0x00000000ff223d4e, 0x00000000a505637c }, + /* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */ + { 0x000000010e84da42, 0x00000000d94869b2 }, + /* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */ + { 0x00000001b61ba3d0, 0x00000001c8b203ae }, + /* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */ + { 0x00000000680f2de8, 0x000000005704aea0 }, + /* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */ + { 0x000000008772a9a8, 0x000000012e295fa2 }, + /* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */ + { 0x0000000155f295bc, 0x000000011d0908bc }, + /* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */ + { 0x00000000595f9282, 0x0000000193ed97ea }, + /* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */ + { 0x0000000164b1c25a, 0x000000013a0f1c52 }, + /* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */ + { 0x00000000fbd67c50, 0x000000010c2c40c0 }, + /* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */ + { 0x0000000096076268, 0x00000000ff6fac3e }, + /* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */ + { 0x00000001d288e4cc, 0x000000017b3609c0 }, + /* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */ + { 0x00000001eaac1bdc, 0x0000000088c8c922 }, + /* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */ + { 0x00000001f1ea39e2, 0x00000001751baae6 }, + /* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */ + { 0x00000001eb6506fc, 0x0000000107952972 }, + /* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */ + { 0x000000010f806ffe, 0x0000000162b00abe }, + /* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */ + { 0x000000010408481e, 0x000000000d7b404c }, + /* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */ + { 0x0000000188260534, 0x00000000763b13d4 }, + /* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */ + { 0x0000000058fc73e0, 0x00000000f6dc22d8 }, + /* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */ + { 0x00000000391c59b8, 0x000000007daae060 }, + /* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */ + { 0x000000018b638400, 0x000000013359ab7c }, + /* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */ + { 0x000000011738f5c4, 0x000000008add438a }, + /* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */ + { 0x000000008cf7c6da, 0x00000001edbefdea }, + /* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */ + { 0x00000001ef97fb16, 0x000000004104e0f8 }, + /* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */ + { 0x0000000102130e20, 0x00000000b48a8222 }, + /* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */ + { 0x00000000db968898, 0x00000001bcb46844 }, + /* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */ + { 0x00000000b5047b5e, 0x000000013293ce0a }, + /* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */ + { 0x000000010b90fdb2, 0x00000001710d0844 }, + /* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */ + { 0x000000004834a32e, 0x0000000117907f6e }, + /* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */ + { 0x0000000059c8f2b0, 0x0000000087ddf93e }, + /* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */ + { 0x0000000122cec508, 0x000000005970e9b0 }, + /* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */ + { 0x000000000a330cda, 0x0000000185b2b7d0 }, + /* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */ + { 0x000000014a47148c, 0x00000001dcee0efc }, + /* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */ + { 0x0000000042c61cb8, 0x0000000030da2722 }, + /* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */ + { 0x0000000012fe6960, 0x000000012f925a18 }, + /* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */ + { 0x00000000dbda2c20, 0x00000000dd2e357c }, + /* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */ + { 0x000000011122410c, 0x00000000071c80de }, + /* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */ + { 0x00000000977b2070, 0x000000011513140a }, + /* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */ + { 0x000000014050438e, 0x00000001df876e8e }, + /* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */ + { 0x0000000147c840e8, 0x000000015f81d6ce }, + /* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */ + { 0x00000001cc7c88ce, 0x000000019dd94dbe }, + /* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */ + { 0x00000001476b35a4, 0x00000001373d206e }, + /* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */ + { 0x000000013d52d508, 0x00000000668ccade }, + /* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */ + { 0x000000008e4be32e, 0x00000001b192d268 }, + /* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */ + { 0x00000000024120fe, 0x00000000e30f3a78 }, + /* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */ + { 0x00000000ddecddb4, 0x000000010ef1f7bc }, + /* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */ + { 0x00000000d4d403bc, 0x00000001f5ac7380 }, + /* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */ + { 0x00000001734b89aa, 0x000000011822ea70 }, + /* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */ + { 0x000000010e7a58d6, 0x00000000c3a33848 }, + /* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */ + { 0x00000001f9f04e9c, 0x00000001bd151c24 }, + /* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */ + { 0x00000000b692225e, 0x0000000056002d76 }, + /* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */ + { 0x000000019b8d3f3e, 0x000000014657c4f4 }, + /* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */ + { 0x00000001a874f11e, 0x0000000113742d7c }, + /* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */ + { 0x000000010d5a4254, 0x000000019c5920ba }, + /* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */ + { 0x00000000bbb2f5d6, 0x000000005216d2d6 }, + /* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */ + { 0x0000000179cc0e36, 0x0000000136f5ad8a }, + /* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */ + { 0x00000001dca1da4a, 0x000000018b07beb6 }, + /* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */ + { 0x00000000feb1a192, 0x00000000db1e93b0 }, + /* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */ + { 0x00000000d1eeedd6, 0x000000000b96fa3a }, + /* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */ + { 0x000000008fad9bb4, 0x00000001d9968af0 }, + /* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */ + { 0x00000001884938e4, 0x000000000e4a77a2 }, + /* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */ + { 0x00000001bc2e9bc0, 0x00000000508c2ac8 }, + /* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */ + { 0x00000001f9658a68, 0x0000000021572a80 }, + /* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */ + { 0x000000001b9224fc, 0x00000001b859daf2 }, + /* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */ + { 0x0000000055b2fb84, 0x000000016f788474 }, + /* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */ + { 0x000000018b090348, 0x00000001b438810e }, + /* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */ + { 0x000000011ccbd5ea, 0x0000000095ddc6f2 }, + /* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */ + { 0x0000000007ae47f8, 0x00000001d977c20c }, + /* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */ + { 0x0000000172acbec0, 0x00000000ebedb99a }, + /* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */ + { 0x00000001c6e3ff20, 0x00000001df9e9e92 }, + /* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */ + { 0x00000000e1b38744, 0x00000001a4a3f952 }, + /* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */ + { 0x00000000791585b2, 0x00000000e2f51220 }, + /* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */ + { 0x00000000ac53b894, 0x000000004aa01f3e }, + /* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */ + { 0x00000001ed5f2cf4, 0x00000000b3e90a58 }, + /* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */ + { 0x00000001df48b2e0, 0x000000000c9ca2aa }, + /* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */ + { 0x00000000049c1c62, 0x0000000151682316 }, + /* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */ + { 0x000000017c460c12, 0x0000000036fce78c }, + /* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */ + { 0x000000015be4da7e, 0x000000009037dc10 }, + /* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */ + { 0x000000010f38f668, 0x00000000d3298582 }, + /* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */ + { 0x0000000039f40a00, 0x00000001b42e8ad6 }, + /* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */ + { 0x00000000bd4c10c4, 0x00000000142a9838 }, + /* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */ + { 0x0000000042db1d98, 0x0000000109c7f190 }, + /* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */ + { 0x00000001c905bae6, 0x0000000056ff9310 }, + /* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */ + { 0x00000000069d40ea, 0x00000001594513aa }, + /* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */ + { 0x000000008e4fbad0, 0x00000001e3b5b1e8 }, + /* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */ + { 0x0000000047bedd46, 0x000000011dd5fc08 }, + /* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */ + { 0x0000000026396bf8, 0x00000001675f0cc2 }, + /* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */ + { 0x00000000379beb92, 0x00000000d1c8dd44 }, + /* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */ + { 0x000000000abae54a, 0x0000000115ebd3d8 }, + /* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */ + { 0x0000000007e6a128, 0x00000001ecbd0dac }, + /* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */ + { 0x000000000ade29d2, 0x00000000cdf67af2 }, + /* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */ + { 0x00000000f974c45c, 0x000000004c01ff4c }, + /* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */ + { 0x00000000e77ac60a, 0x00000000f2d8657e }, + /* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */ + { 0x0000000145895816, 0x000000006bae74c4 }, + /* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */ + { 0x0000000038e362be, 0x0000000152af8aa0 }, + /* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */ + { 0x000000007f991a64, 0x0000000004663802 }, + /* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */ + { 0x00000000fa366d3a, 0x00000001ab2f5afc }, + /* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */ + { 0x00000001a2bb34f0, 0x0000000074a4ebd4 }, + /* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */ + { 0x0000000028a9981e, 0x00000001d7ab3a4c }, + /* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */ + { 0x00000001dbc672be, 0x00000001a8da60c6 }, + /* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */ + { 0x00000000b04d77f6, 0x000000013cf63820 }, + /* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */ + { 0x0000000124400d96, 0x00000000bec12e1e }, + /* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */ + { 0x000000014ca4b414, 0x00000001c6368010 }, + /* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */ + { 0x000000012fe2c938, 0x00000001e6e78758 }, + /* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */ + { 0x00000001faed01e6, 0x000000008d7f2b3c }, + /* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */ + { 0x000000007e80ecfe, 0x000000016b4a156e }, + /* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */ + { 0x0000000098daee94, 0x00000001c63cfeb6 }, + /* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */ + { 0x000000010a04edea, 0x000000015f902670 }, + /* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */ + { 0x00000001c00b4524, 0x00000001cd5de11e }, + /* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */ + { 0x0000000170296550, 0x000000001acaec54 }, + /* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */ + { 0x0000000181afaa48, 0x000000002bd0ca78 }, + /* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */ + { 0x0000000185a31ffa, 0x0000000032d63d5c }, + /* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */ + { 0x000000002469f608, 0x000000001c6d4e4c }, + /* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */ + { 0x000000006980102a, 0x0000000106a60b92 }, + /* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */ + { 0x0000000111ea9ca8, 0x00000000d3855e12 }, + /* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */ + { 0x00000001bd1d29ce, 0x00000000e3125636 }, + /* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */ + { 0x00000001b34b9580, 0x000000009e8f7ea4 }, + /* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */ + { 0x000000003076054e, 0x00000001c82e562c }, + /* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */ + { 0x000000012a608ea4, 0x00000000ca9f09ce }, + /* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */ + { 0x00000000784d05fe, 0x00000000c63764e6 }, + /* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */ + { 0x000000016ef0d82a, 0x0000000168d2e49e }, + /* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */ + { 0x0000000075bda454, 0x00000000e986c148 }, + /* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */ + { 0x000000003dc0a1c4, 0x00000000cfb65894 }, + /* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */ + { 0x00000000e9a5d8be, 0x0000000111cadee4 }, + /* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */ + { 0x00000001609bc4b4, 0x0000000171fb63ce } +#else /* __LITTLE_ENDIAN__ */ + /* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */ + { 0x00000000b6ca9e20, 0x000000009c37c408 }, + /* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */ + { 0x00000000350249a8, 0x00000001b51df26c }, + /* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */ + { 0x00000001862dac54, 0x000000000724b9d0 }, + /* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */ + { 0x00000001d87fb48c, 0x00000001c00532fe }, + /* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */ + { 0x00000001f39b699e, 0x00000000f05a9362 }, + /* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */ + { 0x0000000101da11b4, 0x00000001e1007970 }, + /* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */ + { 0x00000001cab571e0, 0x00000000a57366ee }, + /* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */ + { 0x00000000c7020cfe, 0x0000000192011284 }, + /* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */ + { 0x00000000cdaed1ae, 0x0000000162716d9a }, + /* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */ + { 0x00000001e804effc, 0x00000000cd97ecde }, + /* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */ + { 0x0000000077c3ea3a, 0x0000000058812bc0 }, + /* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */ + { 0x0000000068df31b4, 0x0000000088b8c12e }, + /* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */ + { 0x00000000b059b6c2, 0x00000001230b234c }, + /* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */ + { 0x0000000145fb8ed8, 0x00000001120b416e }, + /* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */ + { 0x00000000cbc09168, 0x00000001974aecb0 }, + /* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */ + { 0x000000005ceeedc2, 0x000000008ee3f226 }, + /* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */ + { 0x0000000047d74e86, 0x00000001089aba9a }, + /* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */ + { 0x00000001407e9e22, 0x0000000065113872 }, + /* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */ + { 0x00000001da967bda, 0x000000005c07ec10 }, + /* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */ + { 0x000000006c898368, 0x0000000187590924 }, + /* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */ + { 0x00000000f2d14c98, 0x00000000e35da7c6 }, + /* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */ + { 0x00000001993c6ad4, 0x000000000415855a }, + /* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */ + { 0x000000014683d1ac, 0x0000000073617758 }, + /* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */ + { 0x00000001a7c93e6c, 0x0000000176021d28 }, + /* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */ + { 0x000000010211e90a, 0x00000001c358fd0a }, + /* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */ + { 0x000000001119403e, 0x00000001ff7a2c18 }, + /* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */ + { 0x000000001c3261aa, 0x00000000f2d9f7e4 }, + /* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */ + { 0x000000014e37a634, 0x000000016cf1f9c8 }, + /* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */ + { 0x0000000073786c0c, 0x000000010af9279a }, + /* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */ + { 0x000000011dc037f8, 0x0000000004f101e8 }, + /* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */ + { 0x0000000031433dfc, 0x0000000070bcf184 }, + /* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */ + { 0x000000009cde8348, 0x000000000a8de642 }, + /* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */ + { 0x0000000038d3c2a6, 0x0000000062ea130c }, + /* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */ + { 0x000000011b25f260, 0x00000001eb31cbb2 }, + /* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */ + { 0x000000001629e6f0, 0x0000000170783448 }, + /* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */ + { 0x0000000160838b4c, 0x00000001a684b4c6 }, + /* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */ + { 0x000000007a44011c, 0x00000000253ca5b4 }, + /* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */ + { 0x00000000226f417a, 0x0000000057b4b1e2 }, + /* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */ + { 0x0000000045eb2eb4, 0x00000000b6bd084c }, + /* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */ + { 0x000000014459d70c, 0x0000000123c2d592 }, + /* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */ + { 0x00000001d406ed82, 0x00000000159dafce }, + /* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */ + { 0x0000000160c8e1a8, 0x0000000127e1a64e }, + /* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */ + { 0x0000000027ba8098, 0x0000000056860754 }, + /* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */ + { 0x000000006d92d018, 0x00000001e661aae8 }, + /* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */ + { 0x000000012ed7e3f2, 0x00000000f82c6166 }, + /* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */ + { 0x000000002dc87788, 0x00000000c4f9c7ae }, + /* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */ + { 0x0000000018240bb8, 0x0000000074203d20 }, + /* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */ + { 0x000000001ad38158, 0x0000000198173052 }, + /* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */ + { 0x00000001396b78f2, 0x00000001ce8aba54 }, + /* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */ + { 0x000000011a681334, 0x00000001850d5d94 }, + /* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */ + { 0x000000012104732e, 0x00000001d609239c }, + /* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */ + { 0x00000000a140d90c, 0x000000001595f048 }, + /* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */ + { 0x00000001b7215eda, 0x0000000042ccee08 }, + /* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */ + { 0x00000001aaf1df3c, 0x000000010a389d74 }, + /* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */ + { 0x0000000029d15b8a, 0x000000012a840da6 }, + /* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */ + { 0x00000000f1a96922, 0x000000001d181c0c }, + /* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */ + { 0x00000001ac80d03c, 0x0000000068b7d1f6 }, + /* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */ + { 0x000000000f11d56a, 0x000000005b0f14fc }, + /* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */ + { 0x00000001f1c022a2, 0x0000000179e9e730 }, + /* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */ + { 0x0000000173d00ae2, 0x00000001ce1368d6 }, + /* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */ + { 0x00000001d4ffe4ac, 0x0000000112c3a84c }, + /* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */ + { 0x000000016edc5ae4, 0x00000000de940fee }, + /* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */ + { 0x00000001f1a02140, 0x00000000fe896b7e }, + /* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */ + { 0x00000000ca0b28a0, 0x00000001f797431c }, + /* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */ + { 0x00000001928e30a2, 0x0000000053e989ba }, + /* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */ + { 0x0000000097b1b002, 0x000000003920cd16 }, + /* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */ + { 0x00000000b15bf906, 0x00000001e6f579b8 }, + /* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */ + { 0x00000000411c5d52, 0x000000007493cb0a }, + /* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */ + { 0x00000001c36f3300, 0x00000001bdd376d8 }, + /* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */ + { 0x00000001119227e0, 0x000000016badfee6 }, + /* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */ + { 0x00000000114d4702, 0x0000000071de5c58 }, + /* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */ + { 0x00000000458b5b98, 0x00000000453f317c }, + /* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */ + { 0x000000012e31fb8e, 0x0000000121675cce }, + /* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */ + { 0x000000005cf619d8, 0x00000001f409ee92 }, + /* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */ + { 0x0000000063f4d8b2, 0x00000000f36b9c88 }, + /* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */ + { 0x000000004138dc8a, 0x0000000036b398f4 }, + /* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */ + { 0x00000001d29ee8e0, 0x00000001748f9adc }, + /* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */ + { 0x000000006a08ace8, 0x00000001be94ec00 }, + /* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */ + { 0x0000000127d42010, 0x00000000b74370d6 }, + /* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */ + { 0x0000000019d76b62, 0x00000001174d0b98 }, + /* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */ + { 0x00000001b1471f6e, 0x00000000befc06a4 }, + /* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */ + { 0x00000001f64c19cc, 0x00000001ae125288 }, + /* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */ + { 0x00000000003c0ea0, 0x0000000095c19b34 }, + /* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */ + { 0x000000014d73abf6, 0x00000001a78496f2 }, + /* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */ + { 0x00000001620eb844, 0x00000001ac5390a0 }, + /* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */ + { 0x0000000147655048, 0x000000002a80ed6e }, + /* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */ + { 0x0000000067b5077e, 0x00000001fa9b0128 }, + /* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */ + { 0x0000000010ffe206, 0x00000001ea94929e }, + /* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */ + { 0x000000000fee8f1e, 0x0000000125f4305c }, + /* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */ + { 0x00000001da26fbae, 0x00000001471e2002 }, + /* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */ + { 0x00000001b3a8bd88, 0x0000000132d2253a }, + /* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */ + { 0x00000000e8f3898e, 0x00000000f26b3592 }, + /* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */ + { 0x00000000b0d0d28c, 0x00000000bc8b67b0 }, + /* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */ + { 0x0000000030f2a798, 0x000000013a826ef2 }, + /* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */ + { 0x000000000fba1002, 0x0000000081482c84 }, + /* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */ + { 0x00000000bdb9bd72, 0x00000000e77307c2 }, + /* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */ + { 0x0000000075d3bf5a, 0x00000000d4a07ec8 }, + /* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */ + { 0x00000000ef1f98a0, 0x0000000017102100 }, + /* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */ + { 0x00000000689c7602, 0x00000000db406486 }, + /* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */ + { 0x000000016d5fa5fe, 0x0000000192db7f88 }, + /* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */ + { 0x00000001d0d2b9ca, 0x000000018bf67b1e }, + /* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */ + { 0x0000000041e7b470, 0x000000007c09163e }, + /* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */ + { 0x00000001cbb6495e, 0x000000000adac060 }, + /* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */ + { 0x000000010052a0b0, 0x00000000bd8316ae }, + /* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */ + { 0x00000001d8effb5c, 0x000000019f09ab54 }, + /* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */ + { 0x00000001d969853c, 0x0000000125155542 }, + /* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */ + { 0x00000000523ccce2, 0x000000018fdb5882 }, + /* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */ + { 0x000000001e2436bc, 0x00000000e794b3f4 }, + /* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */ + { 0x00000000ddd1c3a2, 0x000000016f9bb022 }, + /* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */ + { 0x0000000019fcfe38, 0x00000000290c9978 }, + /* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */ + { 0x00000001ce95db64, 0x0000000083c0f350 }, + /* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */ + { 0x00000000af582806, 0x0000000173ea6628 }, + /* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */ + { 0x00000001006388f6, 0x00000001c8b4e00a }, + /* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */ + { 0x0000000179eca00a, 0x00000000de95d6aa }, + /* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */ + { 0x0000000122410a6a, 0x000000010b7f7248 }, + /* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */ + { 0x000000004288e87c, 0x00000001326e3a06 }, + /* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */ + { 0x000000016c5490da, 0x00000000bb62c2e6 }, + /* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */ + { 0x00000000d1c71f6e, 0x0000000156a4b2c2 }, + /* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */ + { 0x00000001b4ce08a6, 0x000000011dfe763a }, + /* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */ + { 0x00000001466ba60c, 0x000000007bcca8e2 }, + /* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */ + { 0x00000001f6c488a4, 0x0000000186118faa }, + /* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */ + { 0x000000013bfb0682, 0x0000000111a65a88 }, + /* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */ + { 0x00000000690e9e54, 0x000000003565e1c4 }, + /* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */ + { 0x00000000281346b6, 0x000000012ed02a82 }, + /* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */ + { 0x0000000156464024, 0x00000000c486ecfc }, + /* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */ + { 0x000000016063a8dc, 0x0000000001b951b2 }, + /* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */ + { 0x0000000116a66362, 0x0000000048143916 }, + /* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */ + { 0x000000017e8aa4d2, 0x00000001dc2ae124 }, + /* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */ + { 0x00000001728eb10c, 0x00000001416c58d6 }, + /* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */ + { 0x00000001b08fd7fa, 0x00000000a479744a }, + /* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */ + { 0x00000001092a16e8, 0x0000000096ca3a26 }, + /* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */ + { 0x00000000a505637c, 0x00000000ff223d4e }, + /* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */ + { 0x00000000d94869b2, 0x000000010e84da42 }, + /* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */ + { 0x00000001c8b203ae, 0x00000001b61ba3d0 }, + /* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */ + { 0x000000005704aea0, 0x00000000680f2de8 }, + /* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */ + { 0x000000012e295fa2, 0x000000008772a9a8 }, + /* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */ + { 0x000000011d0908bc, 0x0000000155f295bc }, + /* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */ + { 0x0000000193ed97ea, 0x00000000595f9282 }, + /* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */ + { 0x000000013a0f1c52, 0x0000000164b1c25a }, + /* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */ + { 0x000000010c2c40c0, 0x00000000fbd67c50 }, + /* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */ + { 0x00000000ff6fac3e, 0x0000000096076268 }, + /* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */ + { 0x000000017b3609c0, 0x00000001d288e4cc }, + /* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */ + { 0x0000000088c8c922, 0x00000001eaac1bdc }, + /* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */ + { 0x00000001751baae6, 0x00000001f1ea39e2 }, + /* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */ + { 0x0000000107952972, 0x00000001eb6506fc }, + /* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */ + { 0x0000000162b00abe, 0x000000010f806ffe }, + /* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */ + { 0x000000000d7b404c, 0x000000010408481e }, + /* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */ + { 0x00000000763b13d4, 0x0000000188260534 }, + /* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */ + { 0x00000000f6dc22d8, 0x0000000058fc73e0 }, + /* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */ + { 0x000000007daae060, 0x00000000391c59b8 }, + /* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */ + { 0x000000013359ab7c, 0x000000018b638400 }, + /* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */ + { 0x000000008add438a, 0x000000011738f5c4 }, + /* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */ + { 0x00000001edbefdea, 0x000000008cf7c6da }, + /* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */ + { 0x000000004104e0f8, 0x00000001ef97fb16 }, + /* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */ + { 0x00000000b48a8222, 0x0000000102130e20 }, + /* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */ + { 0x00000001bcb46844, 0x00000000db968898 }, + /* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */ + { 0x000000013293ce0a, 0x00000000b5047b5e }, + /* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */ + { 0x00000001710d0844, 0x000000010b90fdb2 }, + /* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */ + { 0x0000000117907f6e, 0x000000004834a32e }, + /* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */ + { 0x0000000087ddf93e, 0x0000000059c8f2b0 }, + /* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */ + { 0x000000005970e9b0, 0x0000000122cec508 }, + /* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */ + { 0x0000000185b2b7d0, 0x000000000a330cda }, + /* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */ + { 0x00000001dcee0efc, 0x000000014a47148c }, + /* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */ + { 0x0000000030da2722, 0x0000000042c61cb8 }, + /* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */ + { 0x000000012f925a18, 0x0000000012fe6960 }, + /* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */ + { 0x00000000dd2e357c, 0x00000000dbda2c20 }, + /* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */ + { 0x00000000071c80de, 0x000000011122410c }, + /* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */ + { 0x000000011513140a, 0x00000000977b2070 }, + /* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */ + { 0x00000001df876e8e, 0x000000014050438e }, + /* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */ + { 0x000000015f81d6ce, 0x0000000147c840e8 }, + /* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */ + { 0x000000019dd94dbe, 0x00000001cc7c88ce }, + /* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */ + { 0x00000001373d206e, 0x00000001476b35a4 }, + /* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */ + { 0x00000000668ccade, 0x000000013d52d508 }, + /* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */ + { 0x00000001b192d268, 0x000000008e4be32e }, + /* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */ + { 0x00000000e30f3a78, 0x00000000024120fe }, + /* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */ + { 0x000000010ef1f7bc, 0x00000000ddecddb4 }, + /* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */ + { 0x00000001f5ac7380, 0x00000000d4d403bc }, + /* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */ + { 0x000000011822ea70, 0x00000001734b89aa }, + /* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */ + { 0x00000000c3a33848, 0x000000010e7a58d6 }, + /* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */ + { 0x00000001bd151c24, 0x00000001f9f04e9c }, + /* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */ + { 0x0000000056002d76, 0x00000000b692225e }, + /* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */ + { 0x000000014657c4f4, 0x000000019b8d3f3e }, + /* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */ + { 0x0000000113742d7c, 0x00000001a874f11e }, + /* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */ + { 0x000000019c5920ba, 0x000000010d5a4254 }, + /* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */ + { 0x000000005216d2d6, 0x00000000bbb2f5d6 }, + /* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */ + { 0x0000000136f5ad8a, 0x0000000179cc0e36 }, + /* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */ + { 0x000000018b07beb6, 0x00000001dca1da4a }, + /* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */ + { 0x00000000db1e93b0, 0x00000000feb1a192 }, + /* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */ + { 0x000000000b96fa3a, 0x00000000d1eeedd6 }, + /* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */ + { 0x00000001d9968af0, 0x000000008fad9bb4 }, + /* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */ + { 0x000000000e4a77a2, 0x00000001884938e4 }, + /* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */ + { 0x00000000508c2ac8, 0x00000001bc2e9bc0 }, + /* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */ + { 0x0000000021572a80, 0x00000001f9658a68 }, + /* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */ + { 0x00000001b859daf2, 0x000000001b9224fc }, + /* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */ + { 0x000000016f788474, 0x0000000055b2fb84 }, + /* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */ + { 0x00000001b438810e, 0x000000018b090348 }, + /* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */ + { 0x0000000095ddc6f2, 0x000000011ccbd5ea }, + /* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */ + { 0x00000001d977c20c, 0x0000000007ae47f8 }, + /* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */ + { 0x00000000ebedb99a, 0x0000000172acbec0 }, + /* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */ + { 0x00000001df9e9e92, 0x00000001c6e3ff20 }, + /* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */ + { 0x00000001a4a3f952, 0x00000000e1b38744 }, + /* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */ + { 0x00000000e2f51220, 0x00000000791585b2 }, + /* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */ + { 0x000000004aa01f3e, 0x00000000ac53b894 }, + /* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */ + { 0x00000000b3e90a58, 0x00000001ed5f2cf4 }, + /* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */ + { 0x000000000c9ca2aa, 0x00000001df48b2e0 }, + /* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */ + { 0x0000000151682316, 0x00000000049c1c62 }, + /* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */ + { 0x0000000036fce78c, 0x000000017c460c12 }, + /* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */ + { 0x000000009037dc10, 0x000000015be4da7e }, + /* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */ + { 0x00000000d3298582, 0x000000010f38f668 }, + /* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */ + { 0x00000001b42e8ad6, 0x0000000039f40a00 }, + /* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */ + { 0x00000000142a9838, 0x00000000bd4c10c4 }, + /* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */ + { 0x0000000109c7f190, 0x0000000042db1d98 }, + /* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */ + { 0x0000000056ff9310, 0x00000001c905bae6 }, + /* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */ + { 0x00000001594513aa, 0x00000000069d40ea }, + /* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */ + { 0x00000001e3b5b1e8, 0x000000008e4fbad0 }, + /* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */ + { 0x000000011dd5fc08, 0x0000000047bedd46 }, + /* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */ + { 0x00000001675f0cc2, 0x0000000026396bf8 }, + /* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */ + { 0x00000000d1c8dd44, 0x00000000379beb92 }, + /* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */ + { 0x0000000115ebd3d8, 0x000000000abae54a }, + /* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */ + { 0x00000001ecbd0dac, 0x0000000007e6a128 }, + /* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */ + { 0x00000000cdf67af2, 0x000000000ade29d2 }, + /* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */ + { 0x000000004c01ff4c, 0x00000000f974c45c }, + /* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */ + { 0x00000000f2d8657e, 0x00000000e77ac60a }, + /* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */ + { 0x000000006bae74c4, 0x0000000145895816 }, + /* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */ + { 0x0000000152af8aa0, 0x0000000038e362be }, + /* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */ + { 0x0000000004663802, 0x000000007f991a64 }, + /* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */ + { 0x00000001ab2f5afc, 0x00000000fa366d3a }, + /* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */ + { 0x0000000074a4ebd4, 0x00000001a2bb34f0 }, + /* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */ + { 0x00000001d7ab3a4c, 0x0000000028a9981e }, + /* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */ + { 0x00000001a8da60c6, 0x00000001dbc672be }, + /* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */ + { 0x000000013cf63820, 0x00000000b04d77f6 }, + /* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */ + { 0x00000000bec12e1e, 0x0000000124400d96 }, + /* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */ + { 0x00000001c6368010, 0x000000014ca4b414 }, + /* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */ + { 0x00000001e6e78758, 0x000000012fe2c938 }, + /* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */ + { 0x000000008d7f2b3c, 0x00000001faed01e6 }, + /* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */ + { 0x000000016b4a156e, 0x000000007e80ecfe }, + /* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */ + { 0x00000001c63cfeb6, 0x0000000098daee94 }, + /* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */ + { 0x000000015f902670, 0x000000010a04edea }, + /* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */ + { 0x00000001cd5de11e, 0x00000001c00b4524 }, + /* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */ + { 0x000000001acaec54, 0x0000000170296550 }, + /* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */ + { 0x000000002bd0ca78, 0x0000000181afaa48 }, + /* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */ + { 0x0000000032d63d5c, 0x0000000185a31ffa }, + /* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */ + { 0x000000001c6d4e4c, 0x000000002469f608 }, + /* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */ + { 0x0000000106a60b92, 0x000000006980102a }, + /* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */ + { 0x00000000d3855e12, 0x0000000111ea9ca8 }, + /* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */ + { 0x00000000e3125636, 0x00000001bd1d29ce }, + /* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */ + { 0x000000009e8f7ea4, 0x00000001b34b9580 }, + /* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */ + { 0x00000001c82e562c, 0x000000003076054e }, + /* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */ + { 0x00000000ca9f09ce, 0x000000012a608ea4 }, + /* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */ + { 0x00000000c63764e6, 0x00000000784d05fe }, + /* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */ + { 0x0000000168d2e49e, 0x000000016ef0d82a }, + /* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */ + { 0x00000000e986c148, 0x0000000075bda454 }, + /* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */ + { 0x00000000cfb65894, 0x000000003dc0a1c4 }, + /* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */ + { 0x0000000111cadee4, 0x00000000e9a5d8be }, + /* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */ + { 0x0000000171fb63ce, 0x00000001609bc4b4 } +#endif /* __LITTLE_ENDIAN__ */ + }; + +/* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the trailing 32 bits of zeros */ + +static const __vector unsigned long long vcrc_short_const[16] + __attribute__((aligned (16))) = { +#ifdef __LITTLE_ENDIAN__ + /* x^1952 mod p(x) , x^1984 mod p(x) , x^2016 mod p(x) , x^2048 mod p(x) */ + { 0x5cf015c388e56f72, 0x7fec2963e5bf8048 }, + /* x^1824 mod p(x) , x^1856 mod p(x) , x^1888 mod p(x) , x^1920 mod p(x) */ + { 0x963a18920246e2e6, 0x38e888d4844752a9 }, + /* x^1696 mod p(x) , x^1728 mod p(x) , x^1760 mod p(x) , x^1792 mod p(x) */ + { 0x419a441956993a31, 0x42316c00730206ad }, + /* x^1568 mod p(x) , x^1600 mod p(x) , x^1632 mod p(x) , x^1664 mod p(x) */ + { 0x924752ba2b830011, 0x543d5c543e65ddf9 }, + /* x^1440 mod p(x) , x^1472 mod p(x) , x^1504 mod p(x) , x^1536 mod p(x) */ + { 0x55bd7f9518e4a304, 0x78e87aaf56767c92 }, + /* x^1312 mod p(x) , x^1344 mod p(x) , x^1376 mod p(x) , x^1408 mod p(x) */ + { 0x6d76739fe0553f1e, 0x8f68fcec1903da7f }, + /* x^1184 mod p(x) , x^1216 mod p(x) , x^1248 mod p(x) , x^1280 mod p(x) */ + { 0xc133722b1fe0b5c3, 0x3f4840246791d588 }, + /* x^1056 mod p(x) , x^1088 mod p(x) , x^1120 mod p(x) , x^1152 mod p(x) */ + { 0x64b67ee0e55ef1f3, 0x34c96751b04de25a }, + /* x^928 mod p(x) , x^960 mod p(x) , x^992 mod p(x) , x^1024 mod p(x) */ + { 0x069db049b8fdb1e7, 0x156c8e180b4a395b }, + /* x^800 mod p(x) , x^832 mod p(x) , x^864 mod p(x) , x^896 mod p(x) */ + { 0xa11bfaf3c9e90b9e, 0xe0b99ccbe661f7be }, + /* x^672 mod p(x) , x^704 mod p(x) , x^736 mod p(x) , x^768 mod p(x) */ + { 0x817cdc5119b29a35, 0x041d37768cd75659 }, + /* x^544 mod p(x) , x^576 mod p(x) , x^608 mod p(x) , x^640 mod p(x) */ + { 0x1ce9d94b36c41f1c, 0x3a0777818cfaa965 }, + /* x^416 mod p(x) , x^448 mod p(x) , x^480 mod p(x) , x^512 mod p(x) */ + { 0x4f256efcb82be955, 0x0e148e8252377a55 }, + /* x^288 mod p(x) , x^320 mod p(x) , x^352 mod p(x) , x^384 mod p(x) */ + { 0xec1631edb2dea967, 0x9c25531d19e65dde }, + /* x^160 mod p(x) , x^192 mod p(x) , x^224 mod p(x) , x^256 mod p(x) */ + { 0x5d27e147510ac59a, 0x790606ff9957c0a6 }, + /* x^32 mod p(x) , x^64 mod p(x) , x^96 mod p(x) , x^128 mod p(x) */ + { 0xa66805eb18b8ea18, 0x82f63b786ea2d55c } +#else /* __LITTLE_ENDIAN__ */ + /* x^1952 mod p(x) , x^1984 mod p(x) , x^2016 mod p(x) , x^2048 mod p(x) */ + { 0x7fec2963e5bf8048, 0x5cf015c388e56f72 }, + /* x^1824 mod p(x) , x^1856 mod p(x) , x^1888 mod p(x) , x^1920 mod p(x) */ + { 0x38e888d4844752a9, 0x963a18920246e2e6 }, + /* x^1696 mod p(x) , x^1728 mod p(x) , x^1760 mod p(x) , x^1792 mod p(x) */ + { 0x42316c00730206ad, 0x419a441956993a31 }, + /* x^1568 mod p(x) , x^1600 mod p(x) , x^1632 mod p(x) , x^1664 mod p(x) */ + { 0x543d5c543e65ddf9, 0x924752ba2b830011 }, + /* x^1440 mod p(x) , x^1472 mod p(x) , x^1504 mod p(x) , x^1536 mod p(x) */ + { 0x78e87aaf56767c92, 0x55bd7f9518e4a304 }, + /* x^1312 mod p(x) , x^1344 mod p(x) , x^1376 mod p(x) , x^1408 mod p(x) */ + { 0x8f68fcec1903da7f, 0x6d76739fe0553f1e }, + /* x^1184 mod p(x) , x^1216 mod p(x) , x^1248 mod p(x) , x^1280 mod p(x) */ + { 0x3f4840246791d588, 0xc133722b1fe0b5c3 }, + /* x^1056 mod p(x) , x^1088 mod p(x) , x^1120 mod p(x) , x^1152 mod p(x) */ + { 0x34c96751b04de25a, 0x64b67ee0e55ef1f3 }, + /* x^928 mod p(x) , x^960 mod p(x) , x^992 mod p(x) , x^1024 mod p(x) */ + { 0x156c8e180b4a395b, 0x069db049b8fdb1e7 }, + /* x^800 mod p(x) , x^832 mod p(x) , x^864 mod p(x) , x^896 mod p(x) */ + { 0xe0b99ccbe661f7be, 0xa11bfaf3c9e90b9e }, + /* x^672 mod p(x) , x^704 mod p(x) , x^736 mod p(x) , x^768 mod p(x) */ + { 0x041d37768cd75659, 0x817cdc5119b29a35 }, + /* x^544 mod p(x) , x^576 mod p(x) , x^608 mod p(x) , x^640 mod p(x) */ + { 0x3a0777818cfaa965, 0x1ce9d94b36c41f1c }, + /* x^416 mod p(x) , x^448 mod p(x) , x^480 mod p(x) , x^512 mod p(x) */ + { 0x0e148e8252377a55, 0x4f256efcb82be955 }, + /* x^288 mod p(x) , x^320 mod p(x) , x^352 mod p(x) , x^384 mod p(x) */ + { 0x9c25531d19e65dde, 0xec1631edb2dea967 }, + /* x^160 mod p(x) , x^192 mod p(x) , x^224 mod p(x) , x^256 mod p(x) */ + { 0x790606ff9957c0a6, 0x5d27e147510ac59a }, + /* x^32 mod p(x) , x^64 mod p(x) , x^96 mod p(x) , x^128 mod p(x) */ + { 0x82f63b786ea2d55c, 0xa66805eb18b8ea18 } +#endif /* __LITTLE_ENDIAN__ */ + }; + +/* Barrett constants */ +/* 33 bit reflected Barrett constant m - (4^32)/n */ + +static const __vector unsigned long long v_Barrett_const[2] + __attribute__((aligned (16))) = { + /* x^64 div p(x) */ +#ifdef __LITTLE_ENDIAN__ + { 0x00000000dea713f1, 0x0000000000000000 }, + { 0x0000000105ec76f1, 0x0000000000000000 } +#else /* __LITTLE_ENDIAN__ */ + { 0x0000000000000000, 0x00000000dea713f1 }, + { 0x0000000000000000, 0x0000000105ec76f1 } +#endif /* __LITTLE_ENDIAN__ */ + }; +#endif /* POWER8_INTRINSICS */ + +#endif /* __ASSEMBLER__ */ diff --git a/extra/crc32-vpmsum/crc32c_wrapper.c b/extra/crc32-vpmsum/crc32c_wrapper.c deleted file mode 100644 index b121d3e8c41..00000000000 --- a/extra/crc32-vpmsum/crc32c_wrapper.c +++ /dev/null @@ -1,78 +0,0 @@ -#ifdef __powerpc__ - -#define F crc32c_vpmsum -#define __F __crc32c_vpmsum - -#define CRC 0x1edc6f41 - -static const unsigned int crc_table[] = { - 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, - 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb, - 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, - 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, - 0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b, - 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384, - 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, - 0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b, - 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, - 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, - 0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5, - 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa, - 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, - 0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a, - 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, - 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, - 0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48, - 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957, - 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, - 0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198, - 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, - 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, - 0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8, - 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7, - 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, - 0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789, - 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, - 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, - 0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9, - 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6, - 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, - 0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829, - 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, - 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, - 0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043, - 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c, - 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, - 0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc, - 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, - 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, - 0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652, - 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d, - 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, - 0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982, - 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, - 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, - 0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2, - 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed, - 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, - 0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f, - 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, - 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, - 0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f, - 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540, - 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, - 0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f, - 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, - 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, - 0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321, - 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e, - 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, - 0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e, - 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, - 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351,}; - - -#include "crc32_wrapper.ic" - -#endif - diff --git a/extra/crc32-vpmsum/crc32ieee.S b/extra/crc32-vpmsum/crc32ieee.S deleted file mode 100644 index 42c4f77630f..00000000000 --- a/extra/crc32-vpmsum/crc32ieee.S +++ /dev/null @@ -1,14 +0,0 @@ -#ifdef __powerpc__ - -#define CONSTANTS .crc32_constants -#define SHORT_CONSTANTS .crc32_short_constants -#define BARRETT_CONSTANTS .crc32_barrett_constants - -#include "crc32ieee_constants.h" - -#define __F __crc32ieee_vpmsum - -#include "crc32.iS" - -#endif - diff --git a/extra/crc32-vpmsum/crc32ieee_constants.h b/extra/crc32-vpmsum/crc32ieee_constants.h index a99b1c0d859..2e07d2576ed 100644 --- a/extra/crc32-vpmsum/crc32ieee_constants.h +++ b/extra/crc32-vpmsum/crc32ieee_constants.h @@ -1,835 +1,1206 @@ -#ifndef CRC32_CONSTANTS_H -#define CRC32_CONSTANTS_H - -#ifdef __powerpc__ - -#define MAX_SIZE 32768 -CONSTANTS: - - /* Reduce 262144 kbits to 1024 bits */ - /* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */ - .octa 0x00000001651797d20000000099ea94a8 - - /* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */ - .octa 0x0000000021e0d56c00000000945a8420 - - /* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */ - .octa 0x000000000f95ecaa0000000030762706 - - /* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */ - .octa 0x00000001ebd224ac00000001a52fc582 - - /* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */ - .octa 0x000000000ccb97ca00000001a4a7167a - - /* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */ - .octa 0x00000001006ec8a8000000000c18249a - - /* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */ - .octa 0x000000014f58f19600000000a924ae7c - - /* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */ - .octa 0x00000001a7192ca600000001e12ccc12 - - /* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */ - .octa 0x000000019a64bab200000000a0b9d4ac - - /* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */ - .octa 0x0000000014f4ed2e0000000095e8ddfe - - /* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */ - .octa 0x000000011092b6a200000000233fddc4 - - /* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */ - .octa 0x00000000c8a1629c00000001b4529b62 - - /* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */ - .octa 0x000000017bf32e8e00000001a7fa0e64 - - /* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */ - .octa 0x00000001f8cc658200000001b5334592 - - /* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */ - .octa 0x000000008631ddf0000000011f8ee1b4 - - /* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */ - .octa 0x000000007e5a76d0000000006252e632 - - /* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */ - .octa 0x000000002b09b31c00000000ab973e84 - - /* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */ - .octa 0x00000001b2df1f84000000007734f5ec - - /* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */ - .octa 0x00000001d6f56afc000000007c547798 - - /* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */ - .octa 0x00000001b9b5e70c000000007ec40210 - - /* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */ - .octa 0x0000000034b626d200000001ab1695a8 - - /* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */ - .octa 0x000000014c53479a0000000090494bba - - /* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */ - .octa 0x00000001a6d179a400000001123fb816 - - /* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */ - .octa 0x000000015abd16b400000001e188c74c - - /* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */ - .octa 0x00000000018f985200000001c2d3451c - - /* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */ - .octa 0x000000001fb3084a00000000f55cf1ca - - /* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */ - .octa 0x00000000c53dfb0400000001a0531540 - - /* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */ - .octa 0x00000000e10c9ad60000000132cd7ebc - - /* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */ - .octa 0x0000000025aa994a0000000073ab7f36 - - /* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */ - .octa 0x00000000fa3a74c40000000041aed1c2 - - /* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */ - .octa 0x0000000033eb3f400000000136c53800 - - /* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */ - .octa 0x000000017193f2960000000126835a30 - - /* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */ - .octa 0x0000000043f6c86a000000006241b502 - - /* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */ - .octa 0x000000016b513ec600000000d5196ad4 - - /* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */ - .octa 0x00000000c8f25b4e000000009cfa769a - - /* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */ - .octa 0x00000001a45048ec00000000920e5df4 - - /* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */ - .octa 0x000000000c4410040000000169dc310e - - /* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */ - .octa 0x000000000e17cad60000000009fc331c - - /* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */ - .octa 0x00000001253ae964000000010d94a81e - - /* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */ - .octa 0x00000001d7c88ebc0000000027a20ab2 - - /* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */ - .octa 0x00000001e7ca913a0000000114f87504 - - /* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */ - .octa 0x0000000033ed078a000000004b076d96 - - /* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */ - .octa 0x00000000e1839c7800000000da4d1e74 - - /* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */ - .octa 0x00000001322b267e000000001b81f672 - - /* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */ - .octa 0x00000000638231b6000000009367c988 - - /* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */ - .octa 0x00000001ee7f16f400000001717214ca - - /* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */ - .octa 0x0000000117d9924a000000009f47d820 - - /* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */ - .octa 0x00000000e1a9e0c4000000010d9a47d2 - - /* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */ - .octa 0x00000001403731dc00000000a696c58c - - /* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */ - .octa 0x00000001a5ea9682000000002aa28ec6 - - /* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */ - .octa 0x0000000101c5c57800000001fe18fd9a - - /* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */ - .octa 0x00000000dddf6494000000019d4fc1ae - - /* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */ - .octa 0x00000000f1c3db2800000001ba0e3dea - - /* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */ - .octa 0x000000013112fb9c0000000074b59a5e - - /* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */ - .octa 0x00000000b680b90600000000f2b5ea98 - - /* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */ - .octa 0x000000001a2829320000000187132676 - - /* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */ - .octa 0x0000000089406e7e000000010a8c6ad4 - - /* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */ - .octa 0x00000001def6be8c00000001e21dfe70 - - /* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */ - .octa 0x000000007525872800000001da0050e4 - - /* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */ - .octa 0x000000019536090a00000000772172ae - - /* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */ - .octa 0x00000000f2455bfc00000000e47724aa - - /* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */ - .octa 0x000000018c40baf4000000003cd63ac4 - - /* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */ - .octa 0x000000004cd390d400000001bf47d352 - - /* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */ - .octa 0x00000001e4ece95a000000018dc1d708 - - /* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */ - .octa 0x000000001a3ee918000000002d4620a4 - - /* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */ - .octa 0x000000007c652fb80000000058fd1740 - - /* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */ - .octa 0x000000011c67842c00000000dadd9bfc - - /* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */ - .octa 0x00000000254f759c00000001ea2140be - - /* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */ - .octa 0x000000007ece94ca000000009de128ba - - /* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */ - .octa 0x0000000038f258c2000000013ac3aa8e - - /* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */ - .octa 0x00000001cdf17b000000000099980562 - - /* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */ - .octa 0x000000011f882c1600000001c1579c86 - - /* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */ - .octa 0x0000000100093fc80000000068dbbf94 - - /* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */ - .octa 0x00000001cd684f16000000004509fb04 - - /* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */ - .octa 0x000000004bc6a70a00000001202f6398 - - /* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */ - .octa 0x000000004fc7e8e4000000013aea243e - - /* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */ - .octa 0x0000000130103f1c00000001b4052ae6 - - /* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */ - .octa 0x0000000111b0024c00000001cd2a0ae8 - - /* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */ - .octa 0x000000010b3079da00000001fe4aa8b4 - - /* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */ - .octa 0x000000010192bcc200000001d1559a42 - - /* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */ - .octa 0x0000000074838d5000000001f3e05ecc - - /* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */ - .octa 0x000000001b20f5200000000104ddd2cc - - /* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */ - .octa 0x0000000050c3590a000000015393153c - - /* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */ - .octa 0x00000000b41cac8e0000000057e942c6 - - /* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */ - .octa 0x000000000c72cc78000000012c633850 - - /* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */ - .octa 0x0000000030cdb03200000000ebcaae4c - - /* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */ - .octa 0x000000013e09fc32000000013ee532a6 - - /* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */ - .octa 0x000000001ed624d200000001bf0cbc7e - - /* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */ - .octa 0x00000000781aee1a00000000d50b7a5a - - /* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */ - .octa 0x00000001c4d8348c0000000002fca6e8 - - /* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */ - .octa 0x0000000057a40336000000007af40044 - - /* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */ - .octa 0x00000000855449400000000016178744 - - /* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */ - .octa 0x000000019cd21e80000000014c177458 - - /* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */ - .octa 0x000000013eb95bc0000000011b6ddf04 - - /* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */ - .octa 0x00000001dfc9fdfc00000001f3e29ccc - - /* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */ - .octa 0x00000000cd028bc20000000135ae7562 - - /* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */ - .octa 0x0000000090db8c440000000190ef812c - - /* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */ - .octa 0x000000010010a4ce0000000067a2c786 - - /* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */ - .octa 0x00000001c8f4c72c0000000048b9496c - - /* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */ - .octa 0x000000001c26170c000000015a422de6 - - /* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */ - .octa 0x00000000e3fccf6800000001ef0e3640 - - /* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */ - .octa 0x00000000d513ed2400000001006d2d26 - - /* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */ - .octa 0x00000000141beada00000001170d56d6 - - /* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */ - .octa 0x000000011071aea000000000a5fb613c - - /* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */ - .octa 0x000000012e19080a0000000040bbf7fc - - /* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */ - .octa 0x0000000100ecf826000000016ac3a5b2 - - /* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */ - .octa 0x0000000069b0941200000000abf16230 - - /* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */ - .octa 0x0000000122297bac00000001ebe23fac - - /* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */ - .octa 0x00000000e9e4b068000000008b6a0894 - - /* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */ - .octa 0x000000004b38651a00000001288ea478 - - /* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */ - .octa 0x00000001468360e2000000016619c442 - - /* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */ - .octa 0x00000000121c24080000000086230038 - - /* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */ - .octa 0x00000000da7e7d08000000017746a756 - - /* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */ - .octa 0x00000001058d76520000000191b8f8f8 - - /* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */ - .octa 0x000000014a098a90000000008e167708 - - /* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */ - .octa 0x0000000020dbe72e0000000148b22d54 - - /* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */ - .octa 0x000000011e7323e80000000044ba2c3c - - /* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */ - .octa 0x00000000d5d4bf9400000000b54d2b52 - - /* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */ - .octa 0x0000000199d8746c0000000005a4fd8a - - /* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */ - .octa 0x00000000ce9ca8a00000000139f9fc46 - - /* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */ - .octa 0x00000000136edece000000015a1fa824 - - /* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */ - .octa 0x000000019b92a068000000000a61ae4c - - /* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */ - .octa 0x0000000071d622060000000145e9113e - - /* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */ - .octa 0x00000000dfc50158000000006a348448 - - /* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */ - .octa 0x00000001517626bc000000004d80a08c - - /* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */ - .octa 0x0000000148d1e4fa000000014b6837a0 - - /* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */ - .octa 0x0000000094d8266e000000016896a7fc - - /* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */ - .octa 0x00000000606c5e34000000014f187140 - - /* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */ - .octa 0x000000019766beaa000000019581b9da - - /* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */ - .octa 0x00000001d80c506c00000001091bc984 - - /* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */ - .octa 0x000000001e73837c000000001067223c - - /* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */ - .octa 0x0000000064d587de00000001ab16ea02 - - /* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */ - .octa 0x00000000f4a507b0000000013c4598a8 - - /* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */ - .octa 0x0000000040e342fc00000000b3735430 - - /* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */ - .octa 0x00000001d5ad9c3a00000001bb3fc0c0 - - /* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */ - .octa 0x0000000094a691a400000001570ae19c - - /* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */ - .octa 0x00000001271ecdfa00000001ea910712 - - /* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */ - .octa 0x000000009e54475a0000000167127128 - - /* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */ - .octa 0x00000000c9c099ee0000000019e790a2 - - /* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */ - .octa 0x000000009a2f736c000000003788f710 - - /* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */ - .octa 0x00000000bb9f499600000001682a160e - - /* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */ - .octa 0x00000001db688050000000007f0ebd2e - - /* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */ - .octa 0x00000000e9b10af4000000002b032080 - - /* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */ - .octa 0x000000012d4545e400000000cfd1664a - - /* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */ - .octa 0x000000000361139c00000000aa1181c2 - - /* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */ - .octa 0x00000001a5a1a3a800000000ddd08002 - - /* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */ - .octa 0x000000006844e0b000000000e8dd0446 - - /* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */ - .octa 0x00000000c3762f2800000001bbd94a00 - - /* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */ - .octa 0x00000001d26287a200000000ab6cd180 - - /* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */ - .octa 0x00000001f6f0bba80000000031803ce2 - - /* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */ - .octa 0x000000002ffabd620000000024f40b0c - - /* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */ - .octa 0x00000000fb4516b800000001ba1d9834 - - /* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */ - .octa 0x000000018cfa961c0000000104de61aa - - /* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */ - .octa 0x000000019e588d520000000113e40d46 - - /* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */ - .octa 0x00000001180f0bbc00000001415598a0 - - /* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */ - .octa 0x00000000e1d9177a00000000bf6c8c90 - - /* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */ - .octa 0x0000000105abc27c00000001788b0504 - - /* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */ - .octa 0x00000000972e4a580000000038385d02 - - /* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */ - .octa 0x0000000183499a5e00000001b6c83844 - - /* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */ - .octa 0x00000001c96a8cca0000000051061a8a - - /* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */ - .octa 0x00000001a1a5b60c000000017351388a - - /* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */ - .octa 0x00000000e4b6ac9c0000000132928f92 - - /* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */ - .octa 0x00000001807e7f5a00000000e6b4f48a - - /* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */ - .octa 0x000000017a7e3bc80000000039d15e90 - - /* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */ - .octa 0x00000000d73975da00000000312d6074 - - /* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */ - .octa 0x000000017375d038000000017bbb2cc4 - - /* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */ - .octa 0x00000000193680bc000000016ded3e18 - - /* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */ - .octa 0x00000000999b06f600000000f1638b16 - - /* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */ - .octa 0x00000001f685d2b800000001d38b9ecc - - /* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */ - .octa 0x00000001f4ecbed2000000018b8d09dc - - /* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */ - .octa 0x00000000ba16f1a000000000e7bc27d2 - - /* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */ - .octa 0x0000000115aceac400000000275e1e96 - - /* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */ - .octa 0x00000001aeff629200000000e2e3031e - - /* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */ - .octa 0x000000009640124c00000001041c84d8 - - /* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */ - .octa 0x0000000114f41f0200000000706ce672 - - /* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */ - .octa 0x000000009c5f3586000000015d5070da - - /* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */ - .octa 0x00000001878275fa0000000038f9493a - - /* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */ - .octa 0x00000000ddc42ce800000000a3348a76 - - /* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */ - .octa 0x0000000181d2c73a00000001ad0aab92 - - /* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */ - .octa 0x0000000141c9320a000000019e85f712 - - /* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */ - .octa 0x000000015235719a000000005a871e76 - - /* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */ - .octa 0x00000000be27d804000000017249c662 - - /* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */ - .octa 0x000000006242d45a000000003a084712 - - /* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */ - .octa 0x000000009a53638e00000000ed438478 - - /* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */ - .octa 0x00000001001ecfb600000000abac34cc - - /* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */ - .octa 0x000000016d7c2d64000000005f35ef3e - - /* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */ - .octa 0x00000001d0ce46c00000000047d6608c - - /* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */ - .octa 0x0000000124c907b4000000002d01470e - - /* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */ - .octa 0x0000000018a555ca0000000158bbc7b0 - - /* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */ - .octa 0x000000006b0980bc00000000c0a23e8e - - /* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */ - .octa 0x000000008bbba96400000001ebd85c88 - - /* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */ - .octa 0x00000001070a5a1e000000019ee20bb2 - - /* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */ - .octa 0x000000002204322a00000001acabf2d6 - - /* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */ - .octa 0x00000000a27524d000000001b7963d56 - - /* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */ - .octa 0x0000000020b1e4ba000000017bffa1fe - - /* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */ - .octa 0x0000000032cc27fc000000001f15333e - - /* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */ - .octa 0x0000000044dd22b8000000018593129e - - /* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */ - .octa 0x00000000dffc9e0a000000019cb32602 - - /* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */ - .octa 0x00000001b7a0ed140000000142b05cc8 - - /* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */ - .octa 0x00000000c784248800000001be49e7a4 - - /* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */ - .octa 0x00000001c02a4fee0000000108f69d6c - - /* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */ - .octa 0x000000003c273778000000006c0971f0 - - /* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */ - .octa 0x00000001d63f8894000000005b16467a - - /* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */ - .octa 0x000000006be557d600000001551a628e - - /* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */ - .octa 0x000000006a7806ea000000019e42ea92 - - /* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */ - .octa 0x000000016155aa0c000000012fa83ff2 - - /* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */ - .octa 0x00000000908650ac000000011ca9cde0 - - /* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */ - .octa 0x00000000aa5a808400000000c8e5cd74 - - /* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */ - .octa 0x0000000191bb500a0000000096c27f0c - - /* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */ - .octa 0x0000000064e9bed0000000002baed926 - - /* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */ - .octa 0x000000009444f302000000017c8de8d2 - - /* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */ - .octa 0x000000019db07d3c00000000d43d6068 - - /* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */ - .octa 0x00000001359e3e6e00000000cb2c4b26 - - /* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */ - .octa 0x00000001e4f10dd20000000145b8da26 - - /* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */ - .octa 0x0000000124f5735e000000018fff4b08 - - /* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */ - .octa 0x0000000124760a4c0000000150b58ed0 - - /* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */ - .octa 0x000000000f1fc18600000001549f39bc - - /* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */ - .octa 0x00000000150e4cc400000000ef4d2f42 - - /* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */ - .octa 0x000000002a6204e800000001b1468572 - - /* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */ - .octa 0x00000000beb1d432000000013d7403b2 - - /* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */ - .octa 0x0000000135f3f1f000000001a4681842 - - /* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */ - .octa 0x0000000074fe22320000000167714492 - - /* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */ - .octa 0x000000001ac6e2ba00000001e599099a - - /* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */ - .octa 0x0000000013fca91e00000000fe128194 - - /* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */ - .octa 0x0000000183f4931e0000000077e8b990 - - /* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */ - .octa 0x00000000b6d9b4e400000001a267f63a - - /* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */ - .octa 0x00000000b518865600000001945c245a - - /* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */ - .octa 0x0000000027a81a840000000149002e76 - - /* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */ - .octa 0x000000012569925800000001bb8310a4 - - /* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */ - .octa 0x00000001b23de796000000019ec60bcc - - /* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */ - .octa 0x00000000fe4365dc000000012d8590ae - - /* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */ - .octa 0x00000000c68f497a0000000065b00684 - - /* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */ - .octa 0x00000000fbf521ee000000015e5aeadc - - /* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */ - .octa 0x000000015eac337800000000b77ff2b0 - - /* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */ - .octa 0x0000000134914b900000000188da2ff6 - - /* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */ - .octa 0x0000000016335cfe0000000063da929a - - /* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */ - .octa 0x000000010372d10c00000001389caa80 - - /* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */ - .octa 0x000000015097b908000000013db599d2 - - /* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */ - .octa 0x00000001227a75720000000122505a86 - - /* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */ - .octa 0x000000009a8f75c0000000016bd72746 - - /* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */ - .octa 0x00000000682c77a200000001c3faf1d4 - - /* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */ - .octa 0x00000000231f091c00000001111c826c - - /* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */ - .octa 0x000000007d4439f200000000153e9fb2 - - /* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */ - .octa 0x000000017e221efc000000002b1f7b60 - - /* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */ - .octa 0x0000000167457c3800000000b1dba570 - - /* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */ - .octa 0x00000000bdf081c400000001f6397b76 - - /* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */ - .octa 0x000000016286d6b00000000156335214 - - /* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */ - .octa 0x00000000c84f001c00000001d70e3986 - - /* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */ - .octa 0x0000000064efe7c0000000003701a774 - - /* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */ - .octa 0x000000000ac2d90400000000ac81ef72 - - /* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */ - .octa 0x00000000fd226d140000000133212464 - - /* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */ - .octa 0x000000011cfd42e000000000e4e45610 - - /* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */ - .octa 0x000000016e5a5678000000000c1bd370 - - /* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */ - .octa 0x00000001d888fe2200000001a7b9e7a6 - - /* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */ - .octa 0x00000001af77fcd4000000007d657a10 - -SHORT_CONSTANTS: - - /* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the trailing 32 bits of zeros */ - /* x^1952 mod p(x)`, x^1984 mod p(x)`, x^2016 mod p(x)`, x^2048 mod p(x)` */ - .octa 0xed837b2613e8221e99168a18ec447f11 - - /* x^1824 mod p(x)`, x^1856 mod p(x)`, x^1888 mod p(x)`, x^1920 mod p(x)` */ - .octa 0xc8acdd8147b9ce5ae23e954e8fd2cd3c - - /* x^1696 mod p(x)`, x^1728 mod p(x)`, x^1760 mod p(x)`, x^1792 mod p(x)` */ - .octa 0xd9ad6d87d4277e2592f8befe6b1d2b53 - - /* x^1568 mod p(x)`, x^1600 mod p(x)`, x^1632 mod p(x)`, x^1664 mod p(x)` */ - .octa 0xc10ec5e033fbca3bf38a3556291ea462 - - /* x^1440 mod p(x)`, x^1472 mod p(x)`, x^1504 mod p(x)`, x^1536 mod p(x)` */ - .octa 0xc0b55b0e82e02e2f974ac56262b6ca4b - - /* x^1312 mod p(x)`, x^1344 mod p(x)`, x^1376 mod p(x)`, x^1408 mod p(x)` */ - .octa 0x71aa1df0e172334d855712b3784d2a56 - - /* x^1184 mod p(x)`, x^1216 mod p(x)`, x^1248 mod p(x)`, x^1280 mod p(x)` */ - .octa 0xfee3053e3969324da5abe9f80eaee722 - - /* x^1056 mod p(x)`, x^1088 mod p(x)`, x^1120 mod p(x)`, x^1152 mod p(x)` */ - .octa 0xf44779b93eb2bd081fa0943ddb54814c - - /* x^928 mod p(x)`, x^960 mod p(x)`, x^992 mod p(x)`, x^1024 mod p(x)` */ - .octa 0xf5449b3f00cc3374a53ff440d7bbfe6a - - /* x^800 mod p(x)`, x^832 mod p(x)`, x^864 mod p(x)`, x^896 mod p(x)` */ - .octa 0x6f8346e1d777606eebe7e3566325605c - - /* x^672 mod p(x)`, x^704 mod p(x)`, x^736 mod p(x)`, x^768 mod p(x)` */ - .octa 0xe3ab4f2ac0b95347c65a272ce5b592b8 - - /* x^544 mod p(x)`, x^576 mod p(x)`, x^608 mod p(x)`, x^640 mod p(x)` */ - .octa 0xaa2215ea329ecc115705a9ca4721589f - - /* x^416 mod p(x)`, x^448 mod p(x)`, x^480 mod p(x)`, x^512 mod p(x)` */ - .octa 0x1ed8f66ed95efd26e3720acb88d14467 - - /* x^288 mod p(x)`, x^320 mod p(x)`, x^352 mod p(x)`, x^384 mod p(x)` */ - .octa 0x78ed02d5a700e96aba1aca0315141c31 - - /* x^160 mod p(x)`, x^192 mod p(x)`, x^224 mod p(x)`, x^256 mod p(x)` */ - .octa 0xba8ccbe832b39da3ad2a31b3ed627dae - - /* x^32 mod p(x)`, x^64 mod p(x)`, x^96 mod p(x)`, x^128 mod p(x)` */ - .octa 0xedb88320b1e6b0926655004fa06a2517 - - -BARRETT_CONSTANTS: - /* 33 bit reflected Barrett constant m - (4^32)/n */ - .octa 0x000000000000000000000001f7011641 /* x^64 div p(x)` */ - /* 33 bit reflected Barrett constant n */ - .octa 0x000000000000000000000001db710641 - -#endif /* __powerpc__ */ - -#endif +/* +* +* THIS FILE IS GENERATED WITH +./crc32_constants -c -x -r 0x4c11db7 + +* This is from https://github.com/antonblanchard/crc32-vpmsum/ +* DO NOT MODIFY IT MANUALLY! +* +*/ + +#define CRC 0x4c11db7 +#define CRC_XOR +#define REFLECT +#define MAX_SIZE 32768 + +#ifndef __ASSEMBLER__ +#ifdef CRC_TABLE +static const unsigned int crc_table[] = { + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, + 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, + 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, + 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, + 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, + 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, + 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, + 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, + 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, + 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, + 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, + 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, + 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, + 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, + 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, + 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, + 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, + 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, + 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, + 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, + 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, + 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, + 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, + 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, + 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, + 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, + 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, + 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, + 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, + 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, + 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, + 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, + 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, + 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, + 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, + 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, + 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, + 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, + 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, + 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, + 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, + 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, + 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, + 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, + 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, + 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, + 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, + 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, + 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, + 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, + 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, + 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,}; + +#endif /* CRC_TABLE */ +#ifdef POWER8_INTRINSICS + +/* Constants */ + +/* Reduce 262144 kbits to 1024 bits */ +static const __vector unsigned long long vcrc_const[255] + __attribute__((aligned (16))) = { +#ifdef __LITTLE_ENDIAN__ + /* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */ + { 0x0000000099ea94a8, 0x00000001651797d2 }, + /* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */ + { 0x00000000945a8420, 0x0000000021e0d56c }, + /* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */ + { 0x0000000030762706, 0x000000000f95ecaa }, + /* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */ + { 0x00000001a52fc582, 0x00000001ebd224ac }, + /* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */ + { 0x00000001a4a7167a, 0x000000000ccb97ca }, + /* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */ + { 0x000000000c18249a, 0x00000001006ec8a8 }, + /* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */ + { 0x00000000a924ae7c, 0x000000014f58f196 }, + /* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */ + { 0x00000001e12ccc12, 0x00000001a7192ca6 }, + /* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */ + { 0x00000000a0b9d4ac, 0x000000019a64bab2 }, + /* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */ + { 0x0000000095e8ddfe, 0x0000000014f4ed2e }, + /* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */ + { 0x00000000233fddc4, 0x000000011092b6a2 }, + /* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */ + { 0x00000001b4529b62, 0x00000000c8a1629c }, + /* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */ + { 0x00000001a7fa0e64, 0x000000017bf32e8e }, + /* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */ + { 0x00000001b5334592, 0x00000001f8cc6582 }, + /* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */ + { 0x000000011f8ee1b4, 0x000000008631ddf0 }, + /* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */ + { 0x000000006252e632, 0x000000007e5a76d0 }, + /* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */ + { 0x00000000ab973e84, 0x000000002b09b31c }, + /* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */ + { 0x000000007734f5ec, 0x00000001b2df1f84 }, + /* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */ + { 0x000000007c547798, 0x00000001d6f56afc }, + /* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */ + { 0x000000007ec40210, 0x00000001b9b5e70c }, + /* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */ + { 0x00000001ab1695a8, 0x0000000034b626d2 }, + /* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */ + { 0x0000000090494bba, 0x000000014c53479a }, + /* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */ + { 0x00000001123fb816, 0x00000001a6d179a4 }, + /* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */ + { 0x00000001e188c74c, 0x000000015abd16b4 }, + /* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */ + { 0x00000001c2d3451c, 0x00000000018f9852 }, + /* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */ + { 0x00000000f55cf1ca, 0x000000001fb3084a }, + /* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */ + { 0x00000001a0531540, 0x00000000c53dfb04 }, + /* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */ + { 0x0000000132cd7ebc, 0x00000000e10c9ad6 }, + /* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */ + { 0x0000000073ab7f36, 0x0000000025aa994a }, + /* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */ + { 0x0000000041aed1c2, 0x00000000fa3a74c4 }, + /* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */ + { 0x0000000136c53800, 0x0000000033eb3f40 }, + /* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */ + { 0x0000000126835a30, 0x000000017193f296 }, + /* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */ + { 0x000000006241b502, 0x0000000043f6c86a }, + /* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */ + { 0x00000000d5196ad4, 0x000000016b513ec6 }, + /* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */ + { 0x000000009cfa769a, 0x00000000c8f25b4e }, + /* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */ + { 0x00000000920e5df4, 0x00000001a45048ec }, + /* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */ + { 0x0000000169dc310e, 0x000000000c441004 }, + /* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */ + { 0x0000000009fc331c, 0x000000000e17cad6 }, + /* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */ + { 0x000000010d94a81e, 0x00000001253ae964 }, + /* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */ + { 0x0000000027a20ab2, 0x00000001d7c88ebc }, + /* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */ + { 0x0000000114f87504, 0x00000001e7ca913a }, + /* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */ + { 0x000000004b076d96, 0x0000000033ed078a }, + /* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */ + { 0x00000000da4d1e74, 0x00000000e1839c78 }, + /* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */ + { 0x000000001b81f672, 0x00000001322b267e }, + /* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */ + { 0x000000009367c988, 0x00000000638231b6 }, + /* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */ + { 0x00000001717214ca, 0x00000001ee7f16f4 }, + /* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */ + { 0x000000009f47d820, 0x0000000117d9924a }, + /* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */ + { 0x000000010d9a47d2, 0x00000000e1a9e0c4 }, + /* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */ + { 0x00000000a696c58c, 0x00000001403731dc }, + /* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */ + { 0x000000002aa28ec6, 0x00000001a5ea9682 }, + /* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */ + { 0x00000001fe18fd9a, 0x0000000101c5c578 }, + /* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */ + { 0x000000019d4fc1ae, 0x00000000dddf6494 }, + /* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */ + { 0x00000001ba0e3dea, 0x00000000f1c3db28 }, + /* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */ + { 0x0000000074b59a5e, 0x000000013112fb9c }, + /* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */ + { 0x00000000f2b5ea98, 0x00000000b680b906 }, + /* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */ + { 0x0000000187132676, 0x000000001a282932 }, + /* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */ + { 0x000000010a8c6ad4, 0x0000000089406e7e }, + /* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */ + { 0x00000001e21dfe70, 0x00000001def6be8c }, + /* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */ + { 0x00000001da0050e4, 0x0000000075258728 }, + /* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */ + { 0x00000000772172ae, 0x000000019536090a }, + /* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */ + { 0x00000000e47724aa, 0x00000000f2455bfc }, + /* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */ + { 0x000000003cd63ac4, 0x000000018c40baf4 }, + /* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */ + { 0x00000001bf47d352, 0x000000004cd390d4 }, + /* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */ + { 0x000000018dc1d708, 0x00000001e4ece95a }, + /* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */ + { 0x000000002d4620a4, 0x000000001a3ee918 }, + /* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */ + { 0x0000000058fd1740, 0x000000007c652fb8 }, + /* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */ + { 0x00000000dadd9bfc, 0x000000011c67842c }, + /* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */ + { 0x00000001ea2140be, 0x00000000254f759c }, + /* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */ + { 0x000000009de128ba, 0x000000007ece94ca }, + /* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */ + { 0x000000013ac3aa8e, 0x0000000038f258c2 }, + /* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */ + { 0x0000000099980562, 0x00000001cdf17b00 }, + /* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */ + { 0x00000001c1579c86, 0x000000011f882c16 }, + /* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */ + { 0x0000000068dbbf94, 0x0000000100093fc8 }, + /* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */ + { 0x000000004509fb04, 0x00000001cd684f16 }, + /* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */ + { 0x00000001202f6398, 0x000000004bc6a70a }, + /* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */ + { 0x000000013aea243e, 0x000000004fc7e8e4 }, + /* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */ + { 0x00000001b4052ae6, 0x0000000130103f1c }, + /* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */ + { 0x00000001cd2a0ae8, 0x0000000111b0024c }, + /* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */ + { 0x00000001fe4aa8b4, 0x000000010b3079da }, + /* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */ + { 0x00000001d1559a42, 0x000000010192bcc2 }, + /* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */ + { 0x00000001f3e05ecc, 0x0000000074838d50 }, + /* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */ + { 0x0000000104ddd2cc, 0x000000001b20f520 }, + /* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */ + { 0x000000015393153c, 0x0000000050c3590a }, + /* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */ + { 0x0000000057e942c6, 0x00000000b41cac8e }, + /* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */ + { 0x000000012c633850, 0x000000000c72cc78 }, + /* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */ + { 0x00000000ebcaae4c, 0x0000000030cdb032 }, + /* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */ + { 0x000000013ee532a6, 0x000000013e09fc32 }, + /* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */ + { 0x00000001bf0cbc7e, 0x000000001ed624d2 }, + /* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */ + { 0x00000000d50b7a5a, 0x00000000781aee1a }, + /* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */ + { 0x0000000002fca6e8, 0x00000001c4d8348c }, + /* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */ + { 0x000000007af40044, 0x0000000057a40336 }, + /* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */ + { 0x0000000016178744, 0x0000000085544940 }, + /* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */ + { 0x000000014c177458, 0x000000019cd21e80 }, + /* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */ + { 0x000000011b6ddf04, 0x000000013eb95bc0 }, + /* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */ + { 0x00000001f3e29ccc, 0x00000001dfc9fdfc }, + /* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */ + { 0x0000000135ae7562, 0x00000000cd028bc2 }, + /* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */ + { 0x0000000190ef812c, 0x0000000090db8c44 }, + /* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */ + { 0x0000000067a2c786, 0x000000010010a4ce }, + /* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */ + { 0x0000000048b9496c, 0x00000001c8f4c72c }, + /* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */ + { 0x000000015a422de6, 0x000000001c26170c }, + /* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */ + { 0x00000001ef0e3640, 0x00000000e3fccf68 }, + /* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */ + { 0x00000001006d2d26, 0x00000000d513ed24 }, + /* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */ + { 0x00000001170d56d6, 0x00000000141beada }, + /* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */ + { 0x00000000a5fb613c, 0x000000011071aea0 }, + /* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */ + { 0x0000000040bbf7fc, 0x000000012e19080a }, + /* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */ + { 0x000000016ac3a5b2, 0x0000000100ecf826 }, + /* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */ + { 0x00000000abf16230, 0x0000000069b09412 }, + /* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */ + { 0x00000001ebe23fac, 0x0000000122297bac }, + /* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */ + { 0x000000008b6a0894, 0x00000000e9e4b068 }, + /* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */ + { 0x00000001288ea478, 0x000000004b38651a }, + /* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */ + { 0x000000016619c442, 0x00000001468360e2 }, + /* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */ + { 0x0000000086230038, 0x00000000121c2408 }, + /* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */ + { 0x000000017746a756, 0x00000000da7e7d08 }, + /* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */ + { 0x0000000191b8f8f8, 0x00000001058d7652 }, + /* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */ + { 0x000000008e167708, 0x000000014a098a90 }, + /* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */ + { 0x0000000148b22d54, 0x0000000020dbe72e }, + /* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */ + { 0x0000000044ba2c3c, 0x000000011e7323e8 }, + /* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */ + { 0x00000000b54d2b52, 0x00000000d5d4bf94 }, + /* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */ + { 0x0000000005a4fd8a, 0x0000000199d8746c }, + /* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */ + { 0x0000000139f9fc46, 0x00000000ce9ca8a0 }, + /* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */ + { 0x000000015a1fa824, 0x00000000136edece }, + /* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */ + { 0x000000000a61ae4c, 0x000000019b92a068 }, + /* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */ + { 0x0000000145e9113e, 0x0000000071d62206 }, + /* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */ + { 0x000000006a348448, 0x00000000dfc50158 }, + /* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */ + { 0x000000004d80a08c, 0x00000001517626bc }, + /* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */ + { 0x000000014b6837a0, 0x0000000148d1e4fa }, + /* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */ + { 0x000000016896a7fc, 0x0000000094d8266e }, + /* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */ + { 0x000000014f187140, 0x00000000606c5e34 }, + /* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */ + { 0x000000019581b9da, 0x000000019766beaa }, + /* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */ + { 0x00000001091bc984, 0x00000001d80c506c }, + /* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */ + { 0x000000001067223c, 0x000000001e73837c }, + /* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */ + { 0x00000001ab16ea02, 0x0000000064d587de }, + /* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */ + { 0x000000013c4598a8, 0x00000000f4a507b0 }, + /* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */ + { 0x00000000b3735430, 0x0000000040e342fc }, + /* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */ + { 0x00000001bb3fc0c0, 0x00000001d5ad9c3a }, + /* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */ + { 0x00000001570ae19c, 0x0000000094a691a4 }, + /* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */ + { 0x00000001ea910712, 0x00000001271ecdfa }, + /* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */ + { 0x0000000167127128, 0x000000009e54475a }, + /* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */ + { 0x0000000019e790a2, 0x00000000c9c099ee }, + /* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */ + { 0x000000003788f710, 0x000000009a2f736c }, + /* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */ + { 0x00000001682a160e, 0x00000000bb9f4996 }, + /* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */ + { 0x000000007f0ebd2e, 0x00000001db688050 }, + /* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */ + { 0x000000002b032080, 0x00000000e9b10af4 }, + /* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */ + { 0x00000000cfd1664a, 0x000000012d4545e4 }, + /* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */ + { 0x00000000aa1181c2, 0x000000000361139c }, + /* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */ + { 0x00000000ddd08002, 0x00000001a5a1a3a8 }, + /* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */ + { 0x00000000e8dd0446, 0x000000006844e0b0 }, + /* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */ + { 0x00000001bbd94a00, 0x00000000c3762f28 }, + /* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */ + { 0x00000000ab6cd180, 0x00000001d26287a2 }, + /* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */ + { 0x0000000031803ce2, 0x00000001f6f0bba8 }, + /* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */ + { 0x0000000024f40b0c, 0x000000002ffabd62 }, + /* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */ + { 0x00000001ba1d9834, 0x00000000fb4516b8 }, + /* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */ + { 0x0000000104de61aa, 0x000000018cfa961c }, + /* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */ + { 0x0000000113e40d46, 0x000000019e588d52 }, + /* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */ + { 0x00000001415598a0, 0x00000001180f0bbc }, + /* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */ + { 0x00000000bf6c8c90, 0x00000000e1d9177a }, + /* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */ + { 0x00000001788b0504, 0x0000000105abc27c }, + /* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */ + { 0x0000000038385d02, 0x00000000972e4a58 }, + /* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */ + { 0x00000001b6c83844, 0x0000000183499a5e }, + /* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */ + { 0x0000000051061a8a, 0x00000001c96a8cca }, + /* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */ + { 0x000000017351388a, 0x00000001a1a5b60c }, + /* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */ + { 0x0000000132928f92, 0x00000000e4b6ac9c }, + /* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */ + { 0x00000000e6b4f48a, 0x00000001807e7f5a }, + /* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */ + { 0x0000000039d15e90, 0x000000017a7e3bc8 }, + /* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */ + { 0x00000000312d6074, 0x00000000d73975da }, + /* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */ + { 0x000000017bbb2cc4, 0x000000017375d038 }, + /* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */ + { 0x000000016ded3e18, 0x00000000193680bc }, + /* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */ + { 0x00000000f1638b16, 0x00000000999b06f6 }, + /* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */ + { 0x00000001d38b9ecc, 0x00000001f685d2b8 }, + /* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */ + { 0x000000018b8d09dc, 0x00000001f4ecbed2 }, + /* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */ + { 0x00000000e7bc27d2, 0x00000000ba16f1a0 }, + /* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */ + { 0x00000000275e1e96, 0x0000000115aceac4 }, + /* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */ + { 0x00000000e2e3031e, 0x00000001aeff6292 }, + /* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */ + { 0x00000001041c84d8, 0x000000009640124c }, + /* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */ + { 0x00000000706ce672, 0x0000000114f41f02 }, + /* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */ + { 0x000000015d5070da, 0x000000009c5f3586 }, + /* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */ + { 0x0000000038f9493a, 0x00000001878275fa }, + /* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */ + { 0x00000000a3348a76, 0x00000000ddc42ce8 }, + /* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */ + { 0x00000001ad0aab92, 0x0000000181d2c73a }, + /* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */ + { 0x000000019e85f712, 0x0000000141c9320a }, + /* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */ + { 0x000000005a871e76, 0x000000015235719a }, + /* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */ + { 0x000000017249c662, 0x00000000be27d804 }, + /* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */ + { 0x000000003a084712, 0x000000006242d45a }, + /* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */ + { 0x00000000ed438478, 0x000000009a53638e }, + /* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */ + { 0x00000000abac34cc, 0x00000001001ecfb6 }, + /* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */ + { 0x000000005f35ef3e, 0x000000016d7c2d64 }, + /* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */ + { 0x0000000047d6608c, 0x00000001d0ce46c0 }, + /* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */ + { 0x000000002d01470e, 0x0000000124c907b4 }, + /* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */ + { 0x0000000158bbc7b0, 0x0000000018a555ca }, + /* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */ + { 0x00000000c0a23e8e, 0x000000006b0980bc }, + /* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */ + { 0x00000001ebd85c88, 0x000000008bbba964 }, + /* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */ + { 0x000000019ee20bb2, 0x00000001070a5a1e }, + /* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */ + { 0x00000001acabf2d6, 0x000000002204322a }, + /* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */ + { 0x00000001b7963d56, 0x00000000a27524d0 }, + /* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */ + { 0x000000017bffa1fe, 0x0000000020b1e4ba }, + /* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */ + { 0x000000001f15333e, 0x0000000032cc27fc }, + /* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */ + { 0x000000018593129e, 0x0000000044dd22b8 }, + /* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */ + { 0x000000019cb32602, 0x00000000dffc9e0a }, + /* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */ + { 0x0000000142b05cc8, 0x00000001b7a0ed14 }, + /* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */ + { 0x00000001be49e7a4, 0x00000000c7842488 }, + /* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */ + { 0x0000000108f69d6c, 0x00000001c02a4fee }, + /* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */ + { 0x000000006c0971f0, 0x000000003c273778 }, + /* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */ + { 0x000000005b16467a, 0x00000001d63f8894 }, + /* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */ + { 0x00000001551a628e, 0x000000006be557d6 }, + /* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */ + { 0x000000019e42ea92, 0x000000006a7806ea }, + /* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */ + { 0x000000012fa83ff2, 0x000000016155aa0c }, + /* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */ + { 0x000000011ca9cde0, 0x00000000908650ac }, + /* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */ + { 0x00000000c8e5cd74, 0x00000000aa5a8084 }, + /* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */ + { 0x0000000096c27f0c, 0x0000000191bb500a }, + /* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */ + { 0x000000002baed926, 0x0000000064e9bed0 }, + /* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */ + { 0x000000017c8de8d2, 0x000000009444f302 }, + /* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */ + { 0x00000000d43d6068, 0x000000019db07d3c }, + /* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */ + { 0x00000000cb2c4b26, 0x00000001359e3e6e }, + /* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */ + { 0x0000000145b8da26, 0x00000001e4f10dd2 }, + /* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */ + { 0x000000018fff4b08, 0x0000000124f5735e }, + /* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */ + { 0x0000000150b58ed0, 0x0000000124760a4c }, + /* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */ + { 0x00000001549f39bc, 0x000000000f1fc186 }, + /* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */ + { 0x00000000ef4d2f42, 0x00000000150e4cc4 }, + /* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */ + { 0x00000001b1468572, 0x000000002a6204e8 }, + /* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */ + { 0x000000013d7403b2, 0x00000000beb1d432 }, + /* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */ + { 0x00000001a4681842, 0x0000000135f3f1f0 }, + /* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */ + { 0x0000000167714492, 0x0000000074fe2232 }, + /* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */ + { 0x00000001e599099a, 0x000000001ac6e2ba }, + /* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */ + { 0x00000000fe128194, 0x0000000013fca91e }, + /* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */ + { 0x0000000077e8b990, 0x0000000183f4931e }, + /* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */ + { 0x00000001a267f63a, 0x00000000b6d9b4e4 }, + /* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */ + { 0x00000001945c245a, 0x00000000b5188656 }, + /* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */ + { 0x0000000149002e76, 0x0000000027a81a84 }, + /* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */ + { 0x00000001bb8310a4, 0x0000000125699258 }, + /* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */ + { 0x000000019ec60bcc, 0x00000001b23de796 }, + /* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */ + { 0x000000012d8590ae, 0x00000000fe4365dc }, + /* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */ + { 0x0000000065b00684, 0x00000000c68f497a }, + /* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */ + { 0x000000015e5aeadc, 0x00000000fbf521ee }, + /* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */ + { 0x00000000b77ff2b0, 0x000000015eac3378 }, + /* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */ + { 0x0000000188da2ff6, 0x0000000134914b90 }, + /* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */ + { 0x0000000063da929a, 0x0000000016335cfe }, + /* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */ + { 0x00000001389caa80, 0x000000010372d10c }, + /* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */ + { 0x000000013db599d2, 0x000000015097b908 }, + /* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */ + { 0x0000000122505a86, 0x00000001227a7572 }, + /* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */ + { 0x000000016bd72746, 0x000000009a8f75c0 }, + /* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */ + { 0x00000001c3faf1d4, 0x00000000682c77a2 }, + /* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */ + { 0x00000001111c826c, 0x00000000231f091c }, + /* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */ + { 0x00000000153e9fb2, 0x000000007d4439f2 }, + /* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */ + { 0x000000002b1f7b60, 0x000000017e221efc }, + /* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */ + { 0x00000000b1dba570, 0x0000000167457c38 }, + /* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */ + { 0x00000001f6397b76, 0x00000000bdf081c4 }, + /* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */ + { 0x0000000156335214, 0x000000016286d6b0 }, + /* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */ + { 0x00000001d70e3986, 0x00000000c84f001c }, + /* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */ + { 0x000000003701a774, 0x0000000064efe7c0 }, + /* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */ + { 0x00000000ac81ef72, 0x000000000ac2d904 }, + /* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */ + { 0x0000000133212464, 0x00000000fd226d14 }, + /* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */ + { 0x00000000e4e45610, 0x000000011cfd42e0 }, + /* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */ + { 0x000000000c1bd370, 0x000000016e5a5678 }, + /* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */ + { 0x00000001a7b9e7a6, 0x00000001d888fe22 }, + /* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */ + { 0x000000007d657a10, 0x00000001af77fcd4 } +#else /* __LITTLE_ENDIAN__ */ + /* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */ + { 0x00000001651797d2, 0x0000000099ea94a8 }, + /* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */ + { 0x0000000021e0d56c, 0x00000000945a8420 }, + /* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */ + { 0x000000000f95ecaa, 0x0000000030762706 }, + /* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */ + { 0x00000001ebd224ac, 0x00000001a52fc582 }, + /* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */ + { 0x000000000ccb97ca, 0x00000001a4a7167a }, + /* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */ + { 0x00000001006ec8a8, 0x000000000c18249a }, + /* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */ + { 0x000000014f58f196, 0x00000000a924ae7c }, + /* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */ + { 0x00000001a7192ca6, 0x00000001e12ccc12 }, + /* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */ + { 0x000000019a64bab2, 0x00000000a0b9d4ac }, + /* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */ + { 0x0000000014f4ed2e, 0x0000000095e8ddfe }, + /* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */ + { 0x000000011092b6a2, 0x00000000233fddc4 }, + /* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */ + { 0x00000000c8a1629c, 0x00000001b4529b62 }, + /* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */ + { 0x000000017bf32e8e, 0x00000001a7fa0e64 }, + /* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */ + { 0x00000001f8cc6582, 0x00000001b5334592 }, + /* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */ + { 0x000000008631ddf0, 0x000000011f8ee1b4 }, + /* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */ + { 0x000000007e5a76d0, 0x000000006252e632 }, + /* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */ + { 0x000000002b09b31c, 0x00000000ab973e84 }, + /* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */ + { 0x00000001b2df1f84, 0x000000007734f5ec }, + /* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */ + { 0x00000001d6f56afc, 0x000000007c547798 }, + /* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */ + { 0x00000001b9b5e70c, 0x000000007ec40210 }, + /* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */ + { 0x0000000034b626d2, 0x00000001ab1695a8 }, + /* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */ + { 0x000000014c53479a, 0x0000000090494bba }, + /* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */ + { 0x00000001a6d179a4, 0x00000001123fb816 }, + /* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */ + { 0x000000015abd16b4, 0x00000001e188c74c }, + /* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */ + { 0x00000000018f9852, 0x00000001c2d3451c }, + /* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */ + { 0x000000001fb3084a, 0x00000000f55cf1ca }, + /* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */ + { 0x00000000c53dfb04, 0x00000001a0531540 }, + /* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */ + { 0x00000000e10c9ad6, 0x0000000132cd7ebc }, + /* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */ + { 0x0000000025aa994a, 0x0000000073ab7f36 }, + /* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */ + { 0x00000000fa3a74c4, 0x0000000041aed1c2 }, + /* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */ + { 0x0000000033eb3f40, 0x0000000136c53800 }, + /* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */ + { 0x000000017193f296, 0x0000000126835a30 }, + /* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */ + { 0x0000000043f6c86a, 0x000000006241b502 }, + /* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */ + { 0x000000016b513ec6, 0x00000000d5196ad4 }, + /* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */ + { 0x00000000c8f25b4e, 0x000000009cfa769a }, + /* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */ + { 0x00000001a45048ec, 0x00000000920e5df4 }, + /* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */ + { 0x000000000c441004, 0x0000000169dc310e }, + /* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */ + { 0x000000000e17cad6, 0x0000000009fc331c }, + /* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */ + { 0x00000001253ae964, 0x000000010d94a81e }, + /* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */ + { 0x00000001d7c88ebc, 0x0000000027a20ab2 }, + /* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */ + { 0x00000001e7ca913a, 0x0000000114f87504 }, + /* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */ + { 0x0000000033ed078a, 0x000000004b076d96 }, + /* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */ + { 0x00000000e1839c78, 0x00000000da4d1e74 }, + /* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */ + { 0x00000001322b267e, 0x000000001b81f672 }, + /* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */ + { 0x00000000638231b6, 0x000000009367c988 }, + /* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */ + { 0x00000001ee7f16f4, 0x00000001717214ca }, + /* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */ + { 0x0000000117d9924a, 0x000000009f47d820 }, + /* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */ + { 0x00000000e1a9e0c4, 0x000000010d9a47d2 }, + /* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */ + { 0x00000001403731dc, 0x00000000a696c58c }, + /* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */ + { 0x00000001a5ea9682, 0x000000002aa28ec6 }, + /* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */ + { 0x0000000101c5c578, 0x00000001fe18fd9a }, + /* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */ + { 0x00000000dddf6494, 0x000000019d4fc1ae }, + /* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */ + { 0x00000000f1c3db28, 0x00000001ba0e3dea }, + /* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */ + { 0x000000013112fb9c, 0x0000000074b59a5e }, + /* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */ + { 0x00000000b680b906, 0x00000000f2b5ea98 }, + /* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */ + { 0x000000001a282932, 0x0000000187132676 }, + /* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */ + { 0x0000000089406e7e, 0x000000010a8c6ad4 }, + /* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */ + { 0x00000001def6be8c, 0x00000001e21dfe70 }, + /* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */ + { 0x0000000075258728, 0x00000001da0050e4 }, + /* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */ + { 0x000000019536090a, 0x00000000772172ae }, + /* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */ + { 0x00000000f2455bfc, 0x00000000e47724aa }, + /* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */ + { 0x000000018c40baf4, 0x000000003cd63ac4 }, + /* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */ + { 0x000000004cd390d4, 0x00000001bf47d352 }, + /* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */ + { 0x00000001e4ece95a, 0x000000018dc1d708 }, + /* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */ + { 0x000000001a3ee918, 0x000000002d4620a4 }, + /* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */ + { 0x000000007c652fb8, 0x0000000058fd1740 }, + /* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */ + { 0x000000011c67842c, 0x00000000dadd9bfc }, + /* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */ + { 0x00000000254f759c, 0x00000001ea2140be }, + /* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */ + { 0x000000007ece94ca, 0x000000009de128ba }, + /* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */ + { 0x0000000038f258c2, 0x000000013ac3aa8e }, + /* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */ + { 0x00000001cdf17b00, 0x0000000099980562 }, + /* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */ + { 0x000000011f882c16, 0x00000001c1579c86 }, + /* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */ + { 0x0000000100093fc8, 0x0000000068dbbf94 }, + /* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */ + { 0x00000001cd684f16, 0x000000004509fb04 }, + /* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */ + { 0x000000004bc6a70a, 0x00000001202f6398 }, + /* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */ + { 0x000000004fc7e8e4, 0x000000013aea243e }, + /* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */ + { 0x0000000130103f1c, 0x00000001b4052ae6 }, + /* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */ + { 0x0000000111b0024c, 0x00000001cd2a0ae8 }, + /* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */ + { 0x000000010b3079da, 0x00000001fe4aa8b4 }, + /* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */ + { 0x000000010192bcc2, 0x00000001d1559a42 }, + /* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */ + { 0x0000000074838d50, 0x00000001f3e05ecc }, + /* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */ + { 0x000000001b20f520, 0x0000000104ddd2cc }, + /* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */ + { 0x0000000050c3590a, 0x000000015393153c }, + /* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */ + { 0x00000000b41cac8e, 0x0000000057e942c6 }, + /* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */ + { 0x000000000c72cc78, 0x000000012c633850 }, + /* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */ + { 0x0000000030cdb032, 0x00000000ebcaae4c }, + /* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */ + { 0x000000013e09fc32, 0x000000013ee532a6 }, + /* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */ + { 0x000000001ed624d2, 0x00000001bf0cbc7e }, + /* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */ + { 0x00000000781aee1a, 0x00000000d50b7a5a }, + /* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */ + { 0x00000001c4d8348c, 0x0000000002fca6e8 }, + /* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */ + { 0x0000000057a40336, 0x000000007af40044 }, + /* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */ + { 0x0000000085544940, 0x0000000016178744 }, + /* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */ + { 0x000000019cd21e80, 0x000000014c177458 }, + /* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */ + { 0x000000013eb95bc0, 0x000000011b6ddf04 }, + /* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */ + { 0x00000001dfc9fdfc, 0x00000001f3e29ccc }, + /* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */ + { 0x00000000cd028bc2, 0x0000000135ae7562 }, + /* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */ + { 0x0000000090db8c44, 0x0000000190ef812c }, + /* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */ + { 0x000000010010a4ce, 0x0000000067a2c786 }, + /* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */ + { 0x00000001c8f4c72c, 0x0000000048b9496c }, + /* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */ + { 0x000000001c26170c, 0x000000015a422de6 }, + /* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */ + { 0x00000000e3fccf68, 0x00000001ef0e3640 }, + /* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */ + { 0x00000000d513ed24, 0x00000001006d2d26 }, + /* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */ + { 0x00000000141beada, 0x00000001170d56d6 }, + /* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */ + { 0x000000011071aea0, 0x00000000a5fb613c }, + /* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */ + { 0x000000012e19080a, 0x0000000040bbf7fc }, + /* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */ + { 0x0000000100ecf826, 0x000000016ac3a5b2 }, + /* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */ + { 0x0000000069b09412, 0x00000000abf16230 }, + /* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */ + { 0x0000000122297bac, 0x00000001ebe23fac }, + /* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */ + { 0x00000000e9e4b068, 0x000000008b6a0894 }, + /* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */ + { 0x000000004b38651a, 0x00000001288ea478 }, + /* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */ + { 0x00000001468360e2, 0x000000016619c442 }, + /* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */ + { 0x00000000121c2408, 0x0000000086230038 }, + /* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */ + { 0x00000000da7e7d08, 0x000000017746a756 }, + /* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */ + { 0x00000001058d7652, 0x0000000191b8f8f8 }, + /* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */ + { 0x000000014a098a90, 0x000000008e167708 }, + /* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */ + { 0x0000000020dbe72e, 0x0000000148b22d54 }, + /* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */ + { 0x000000011e7323e8, 0x0000000044ba2c3c }, + /* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */ + { 0x00000000d5d4bf94, 0x00000000b54d2b52 }, + /* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */ + { 0x0000000199d8746c, 0x0000000005a4fd8a }, + /* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */ + { 0x00000000ce9ca8a0, 0x0000000139f9fc46 }, + /* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */ + { 0x00000000136edece, 0x000000015a1fa824 }, + /* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */ + { 0x000000019b92a068, 0x000000000a61ae4c }, + /* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */ + { 0x0000000071d62206, 0x0000000145e9113e }, + /* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */ + { 0x00000000dfc50158, 0x000000006a348448 }, + /* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */ + { 0x00000001517626bc, 0x000000004d80a08c }, + /* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */ + { 0x0000000148d1e4fa, 0x000000014b6837a0 }, + /* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */ + { 0x0000000094d8266e, 0x000000016896a7fc }, + /* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */ + { 0x00000000606c5e34, 0x000000014f187140 }, + /* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */ + { 0x000000019766beaa, 0x000000019581b9da }, + /* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */ + { 0x00000001d80c506c, 0x00000001091bc984 }, + /* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */ + { 0x000000001e73837c, 0x000000001067223c }, + /* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */ + { 0x0000000064d587de, 0x00000001ab16ea02 }, + /* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */ + { 0x00000000f4a507b0, 0x000000013c4598a8 }, + /* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */ + { 0x0000000040e342fc, 0x00000000b3735430 }, + /* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */ + { 0x00000001d5ad9c3a, 0x00000001bb3fc0c0 }, + /* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */ + { 0x0000000094a691a4, 0x00000001570ae19c }, + /* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */ + { 0x00000001271ecdfa, 0x00000001ea910712 }, + /* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */ + { 0x000000009e54475a, 0x0000000167127128 }, + /* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */ + { 0x00000000c9c099ee, 0x0000000019e790a2 }, + /* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */ + { 0x000000009a2f736c, 0x000000003788f710 }, + /* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */ + { 0x00000000bb9f4996, 0x00000001682a160e }, + /* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */ + { 0x00000001db688050, 0x000000007f0ebd2e }, + /* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */ + { 0x00000000e9b10af4, 0x000000002b032080 }, + /* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */ + { 0x000000012d4545e4, 0x00000000cfd1664a }, + /* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */ + { 0x000000000361139c, 0x00000000aa1181c2 }, + /* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */ + { 0x00000001a5a1a3a8, 0x00000000ddd08002 }, + /* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */ + { 0x000000006844e0b0, 0x00000000e8dd0446 }, + /* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */ + { 0x00000000c3762f28, 0x00000001bbd94a00 }, + /* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */ + { 0x00000001d26287a2, 0x00000000ab6cd180 }, + /* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */ + { 0x00000001f6f0bba8, 0x0000000031803ce2 }, + /* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */ + { 0x000000002ffabd62, 0x0000000024f40b0c }, + /* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */ + { 0x00000000fb4516b8, 0x00000001ba1d9834 }, + /* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */ + { 0x000000018cfa961c, 0x0000000104de61aa }, + /* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */ + { 0x000000019e588d52, 0x0000000113e40d46 }, + /* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */ + { 0x00000001180f0bbc, 0x00000001415598a0 }, + /* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */ + { 0x00000000e1d9177a, 0x00000000bf6c8c90 }, + /* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */ + { 0x0000000105abc27c, 0x00000001788b0504 }, + /* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */ + { 0x00000000972e4a58, 0x0000000038385d02 }, + /* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */ + { 0x0000000183499a5e, 0x00000001b6c83844 }, + /* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */ + { 0x00000001c96a8cca, 0x0000000051061a8a }, + /* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */ + { 0x00000001a1a5b60c, 0x000000017351388a }, + /* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */ + { 0x00000000e4b6ac9c, 0x0000000132928f92 }, + /* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */ + { 0x00000001807e7f5a, 0x00000000e6b4f48a }, + /* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */ + { 0x000000017a7e3bc8, 0x0000000039d15e90 }, + /* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */ + { 0x00000000d73975da, 0x00000000312d6074 }, + /* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */ + { 0x000000017375d038, 0x000000017bbb2cc4 }, + /* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */ + { 0x00000000193680bc, 0x000000016ded3e18 }, + /* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */ + { 0x00000000999b06f6, 0x00000000f1638b16 }, + /* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */ + { 0x00000001f685d2b8, 0x00000001d38b9ecc }, + /* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */ + { 0x00000001f4ecbed2, 0x000000018b8d09dc }, + /* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */ + { 0x00000000ba16f1a0, 0x00000000e7bc27d2 }, + /* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */ + { 0x0000000115aceac4, 0x00000000275e1e96 }, + /* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */ + { 0x00000001aeff6292, 0x00000000e2e3031e }, + /* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */ + { 0x000000009640124c, 0x00000001041c84d8 }, + /* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */ + { 0x0000000114f41f02, 0x00000000706ce672 }, + /* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */ + { 0x000000009c5f3586, 0x000000015d5070da }, + /* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */ + { 0x00000001878275fa, 0x0000000038f9493a }, + /* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */ + { 0x00000000ddc42ce8, 0x00000000a3348a76 }, + /* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */ + { 0x0000000181d2c73a, 0x00000001ad0aab92 }, + /* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */ + { 0x0000000141c9320a, 0x000000019e85f712 }, + /* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */ + { 0x000000015235719a, 0x000000005a871e76 }, + /* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */ + { 0x00000000be27d804, 0x000000017249c662 }, + /* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */ + { 0x000000006242d45a, 0x000000003a084712 }, + /* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */ + { 0x000000009a53638e, 0x00000000ed438478 }, + /* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */ + { 0x00000001001ecfb6, 0x00000000abac34cc }, + /* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */ + { 0x000000016d7c2d64, 0x000000005f35ef3e }, + /* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */ + { 0x00000001d0ce46c0, 0x0000000047d6608c }, + /* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */ + { 0x0000000124c907b4, 0x000000002d01470e }, + /* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */ + { 0x0000000018a555ca, 0x0000000158bbc7b0 }, + /* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */ + { 0x000000006b0980bc, 0x00000000c0a23e8e }, + /* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */ + { 0x000000008bbba964, 0x00000001ebd85c88 }, + /* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */ + { 0x00000001070a5a1e, 0x000000019ee20bb2 }, + /* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */ + { 0x000000002204322a, 0x00000001acabf2d6 }, + /* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */ + { 0x00000000a27524d0, 0x00000001b7963d56 }, + /* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */ + { 0x0000000020b1e4ba, 0x000000017bffa1fe }, + /* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */ + { 0x0000000032cc27fc, 0x000000001f15333e }, + /* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */ + { 0x0000000044dd22b8, 0x000000018593129e }, + /* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */ + { 0x00000000dffc9e0a, 0x000000019cb32602 }, + /* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */ + { 0x00000001b7a0ed14, 0x0000000142b05cc8 }, + /* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */ + { 0x00000000c7842488, 0x00000001be49e7a4 }, + /* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */ + { 0x00000001c02a4fee, 0x0000000108f69d6c }, + /* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */ + { 0x000000003c273778, 0x000000006c0971f0 }, + /* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */ + { 0x00000001d63f8894, 0x000000005b16467a }, + /* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */ + { 0x000000006be557d6, 0x00000001551a628e }, + /* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */ + { 0x000000006a7806ea, 0x000000019e42ea92 }, + /* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */ + { 0x000000016155aa0c, 0x000000012fa83ff2 }, + /* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */ + { 0x00000000908650ac, 0x000000011ca9cde0 }, + /* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */ + { 0x00000000aa5a8084, 0x00000000c8e5cd74 }, + /* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */ + { 0x0000000191bb500a, 0x0000000096c27f0c }, + /* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */ + { 0x0000000064e9bed0, 0x000000002baed926 }, + /* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */ + { 0x000000009444f302, 0x000000017c8de8d2 }, + /* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */ + { 0x000000019db07d3c, 0x00000000d43d6068 }, + /* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */ + { 0x00000001359e3e6e, 0x00000000cb2c4b26 }, + /* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */ + { 0x00000001e4f10dd2, 0x0000000145b8da26 }, + /* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */ + { 0x0000000124f5735e, 0x000000018fff4b08 }, + /* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */ + { 0x0000000124760a4c, 0x0000000150b58ed0 }, + /* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */ + { 0x000000000f1fc186, 0x00000001549f39bc }, + /* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */ + { 0x00000000150e4cc4, 0x00000000ef4d2f42 }, + /* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */ + { 0x000000002a6204e8, 0x00000001b1468572 }, + /* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */ + { 0x00000000beb1d432, 0x000000013d7403b2 }, + /* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */ + { 0x0000000135f3f1f0, 0x00000001a4681842 }, + /* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */ + { 0x0000000074fe2232, 0x0000000167714492 }, + /* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */ + { 0x000000001ac6e2ba, 0x00000001e599099a }, + /* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */ + { 0x0000000013fca91e, 0x00000000fe128194 }, + /* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */ + { 0x0000000183f4931e, 0x0000000077e8b990 }, + /* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */ + { 0x00000000b6d9b4e4, 0x00000001a267f63a }, + /* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */ + { 0x00000000b5188656, 0x00000001945c245a }, + /* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */ + { 0x0000000027a81a84, 0x0000000149002e76 }, + /* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */ + { 0x0000000125699258, 0x00000001bb8310a4 }, + /* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */ + { 0x00000001b23de796, 0x000000019ec60bcc }, + /* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */ + { 0x00000000fe4365dc, 0x000000012d8590ae }, + /* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */ + { 0x00000000c68f497a, 0x0000000065b00684 }, + /* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */ + { 0x00000000fbf521ee, 0x000000015e5aeadc }, + /* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */ + { 0x000000015eac3378, 0x00000000b77ff2b0 }, + /* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */ + { 0x0000000134914b90, 0x0000000188da2ff6 }, + /* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */ + { 0x0000000016335cfe, 0x0000000063da929a }, + /* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */ + { 0x000000010372d10c, 0x00000001389caa80 }, + /* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */ + { 0x000000015097b908, 0x000000013db599d2 }, + /* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */ + { 0x00000001227a7572, 0x0000000122505a86 }, + /* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */ + { 0x000000009a8f75c0, 0x000000016bd72746 }, + /* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */ + { 0x00000000682c77a2, 0x00000001c3faf1d4 }, + /* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */ + { 0x00000000231f091c, 0x00000001111c826c }, + /* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */ + { 0x000000007d4439f2, 0x00000000153e9fb2 }, + /* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */ + { 0x000000017e221efc, 0x000000002b1f7b60 }, + /* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */ + { 0x0000000167457c38, 0x00000000b1dba570 }, + /* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */ + { 0x00000000bdf081c4, 0x00000001f6397b76 }, + /* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */ + { 0x000000016286d6b0, 0x0000000156335214 }, + /* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */ + { 0x00000000c84f001c, 0x00000001d70e3986 }, + /* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */ + { 0x0000000064efe7c0, 0x000000003701a774 }, + /* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */ + { 0x000000000ac2d904, 0x00000000ac81ef72 }, + /* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */ + { 0x00000000fd226d14, 0x0000000133212464 }, + /* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */ + { 0x000000011cfd42e0, 0x00000000e4e45610 }, + /* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */ + { 0x000000016e5a5678, 0x000000000c1bd370 }, + /* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */ + { 0x00000001d888fe22, 0x00000001a7b9e7a6 }, + /* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */ + { 0x00000001af77fcd4, 0x000000007d657a10 } +#endif /* __LITTLE_ENDIAN__ */ + }; + +/* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the trailing 32 bits of zeros */ + +static const __vector unsigned long long vcrc_short_const[16] + __attribute__((aligned (16))) = { +#ifdef __LITTLE_ENDIAN__ + /* x^1952 mod p(x) , x^1984 mod p(x) , x^2016 mod p(x) , x^2048 mod p(x) */ + { 0x99168a18ec447f11, 0xed837b2613e8221e }, + /* x^1824 mod p(x) , x^1856 mod p(x) , x^1888 mod p(x) , x^1920 mod p(x) */ + { 0xe23e954e8fd2cd3c, 0xc8acdd8147b9ce5a }, + /* x^1696 mod p(x) , x^1728 mod p(x) , x^1760 mod p(x) , x^1792 mod p(x) */ + { 0x92f8befe6b1d2b53, 0xd9ad6d87d4277e25 }, + /* x^1568 mod p(x) , x^1600 mod p(x) , x^1632 mod p(x) , x^1664 mod p(x) */ + { 0xf38a3556291ea462, 0xc10ec5e033fbca3b }, + /* x^1440 mod p(x) , x^1472 mod p(x) , x^1504 mod p(x) , x^1536 mod p(x) */ + { 0x974ac56262b6ca4b, 0xc0b55b0e82e02e2f }, + /* x^1312 mod p(x) , x^1344 mod p(x) , x^1376 mod p(x) , x^1408 mod p(x) */ + { 0x855712b3784d2a56, 0x71aa1df0e172334d }, + /* x^1184 mod p(x) , x^1216 mod p(x) , x^1248 mod p(x) , x^1280 mod p(x) */ + { 0xa5abe9f80eaee722, 0xfee3053e3969324d }, + /* x^1056 mod p(x) , x^1088 mod p(x) , x^1120 mod p(x) , x^1152 mod p(x) */ + { 0x1fa0943ddb54814c, 0xf44779b93eb2bd08 }, + /* x^928 mod p(x) , x^960 mod p(x) , x^992 mod p(x) , x^1024 mod p(x) */ + { 0xa53ff440d7bbfe6a, 0xf5449b3f00cc3374 }, + /* x^800 mod p(x) , x^832 mod p(x) , x^864 mod p(x) , x^896 mod p(x) */ + { 0xebe7e3566325605c, 0x6f8346e1d777606e }, + /* x^672 mod p(x) , x^704 mod p(x) , x^736 mod p(x) , x^768 mod p(x) */ + { 0xc65a272ce5b592b8, 0xe3ab4f2ac0b95347 }, + /* x^544 mod p(x) , x^576 mod p(x) , x^608 mod p(x) , x^640 mod p(x) */ + { 0x5705a9ca4721589f, 0xaa2215ea329ecc11 }, + /* x^416 mod p(x) , x^448 mod p(x) , x^480 mod p(x) , x^512 mod p(x) */ + { 0xe3720acb88d14467, 0x1ed8f66ed95efd26 }, + /* x^288 mod p(x) , x^320 mod p(x) , x^352 mod p(x) , x^384 mod p(x) */ + { 0xba1aca0315141c31, 0x78ed02d5a700e96a }, + /* x^160 mod p(x) , x^192 mod p(x) , x^224 mod p(x) , x^256 mod p(x) */ + { 0xad2a31b3ed627dae, 0xba8ccbe832b39da3 }, + /* x^32 mod p(x) , x^64 mod p(x) , x^96 mod p(x) , x^128 mod p(x) */ + { 0x6655004fa06a2517, 0xedb88320b1e6b092 } +#else /* __LITTLE_ENDIAN__ */ + /* x^1952 mod p(x) , x^1984 mod p(x) , x^2016 mod p(x) , x^2048 mod p(x) */ + { 0xed837b2613e8221e, 0x99168a18ec447f11 }, + /* x^1824 mod p(x) , x^1856 mod p(x) , x^1888 mod p(x) , x^1920 mod p(x) */ + { 0xc8acdd8147b9ce5a, 0xe23e954e8fd2cd3c }, + /* x^1696 mod p(x) , x^1728 mod p(x) , x^1760 mod p(x) , x^1792 mod p(x) */ + { 0xd9ad6d87d4277e25, 0x92f8befe6b1d2b53 }, + /* x^1568 mod p(x) , x^1600 mod p(x) , x^1632 mod p(x) , x^1664 mod p(x) */ + { 0xc10ec5e033fbca3b, 0xf38a3556291ea462 }, + /* x^1440 mod p(x) , x^1472 mod p(x) , x^1504 mod p(x) , x^1536 mod p(x) */ + { 0xc0b55b0e82e02e2f, 0x974ac56262b6ca4b }, + /* x^1312 mod p(x) , x^1344 mod p(x) , x^1376 mod p(x) , x^1408 mod p(x) */ + { 0x71aa1df0e172334d, 0x855712b3784d2a56 }, + /* x^1184 mod p(x) , x^1216 mod p(x) , x^1248 mod p(x) , x^1280 mod p(x) */ + { 0xfee3053e3969324d, 0xa5abe9f80eaee722 }, + /* x^1056 mod p(x) , x^1088 mod p(x) , x^1120 mod p(x) , x^1152 mod p(x) */ + { 0xf44779b93eb2bd08, 0x1fa0943ddb54814c }, + /* x^928 mod p(x) , x^960 mod p(x) , x^992 mod p(x) , x^1024 mod p(x) */ + { 0xf5449b3f00cc3374, 0xa53ff440d7bbfe6a }, + /* x^800 mod p(x) , x^832 mod p(x) , x^864 mod p(x) , x^896 mod p(x) */ + { 0x6f8346e1d777606e, 0xebe7e3566325605c }, + /* x^672 mod p(x) , x^704 mod p(x) , x^736 mod p(x) , x^768 mod p(x) */ + { 0xe3ab4f2ac0b95347, 0xc65a272ce5b592b8 }, + /* x^544 mod p(x) , x^576 mod p(x) , x^608 mod p(x) , x^640 mod p(x) */ + { 0xaa2215ea329ecc11, 0x5705a9ca4721589f }, + /* x^416 mod p(x) , x^448 mod p(x) , x^480 mod p(x) , x^512 mod p(x) */ + { 0x1ed8f66ed95efd26, 0xe3720acb88d14467 }, + /* x^288 mod p(x) , x^320 mod p(x) , x^352 mod p(x) , x^384 mod p(x) */ + { 0x78ed02d5a700e96a, 0xba1aca0315141c31 }, + /* x^160 mod p(x) , x^192 mod p(x) , x^224 mod p(x) , x^256 mod p(x) */ + { 0xba8ccbe832b39da3, 0xad2a31b3ed627dae }, + /* x^32 mod p(x) , x^64 mod p(x) , x^96 mod p(x) , x^128 mod p(x) */ + { 0xedb88320b1e6b092, 0x6655004fa06a2517 } +#endif /* __LITTLE_ENDIAN__ */ + }; + +/* Barrett constants */ +/* 33 bit reflected Barrett constant m - (4^32)/n */ + +static const __vector unsigned long long v_Barrett_const[2] + __attribute__((aligned (16))) = { + /* x^64 div p(x) */ +#ifdef __LITTLE_ENDIAN__ + { 0x00000001f7011641, 0x0000000000000000 }, + { 0x00000001db710641, 0x0000000000000000 } +#else /* __LITTLE_ENDIAN__ */ + { 0x0000000000000000, 0x00000001f7011641 }, + { 0x0000000000000000, 0x00000001db710641 } +#endif /* __LITTLE_ENDIAN__ */ + }; +#endif /* POWER8_INTRINSICS */ + +#endif /* __ASSEMBLER__ */ diff --git a/extra/crc32-vpmsum/crc32ieee_wrapper.c b/extra/crc32-vpmsum/crc32ieee_wrapper.c deleted file mode 100644 index 41c1f980097..00000000000 --- a/extra/crc32-vpmsum/crc32ieee_wrapper.c +++ /dev/null @@ -1,75 +0,0 @@ -#ifdef __powerpc__ - -#define F crc32ieee_vpmsum -#define __F __crc32ieee_vpmsum - -static const unsigned int crc_table[] = { - 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, - 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, - 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, - 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, - 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, - 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, - 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, - 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, - 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, - 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, - 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, - 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, - 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, - 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, - 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, - 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, - 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, - 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, - 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, - 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, - 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, - 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, - 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, - 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, - 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, - 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, - 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, - 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, - 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, - 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, - 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, - 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, - 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, - 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, - 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, - 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, - 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, - 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, - 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, - 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, - 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, - 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, - 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, - 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, - 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, - 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, - 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, - 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, - 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, - 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, - 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, - 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, - 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, - 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, - 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, - 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, - 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, - 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, - 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, - 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, - 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, - 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, - 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, - 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,}; - -#include "crc32_wrapper.ic" - -#endif - diff --git a/extra/crc32-vpmsum/ppc-opcode.h b/extra/crc32-vpmsum/ppc-opcode.h deleted file mode 100644 index 5942bd4923a..00000000000 --- a/extra/crc32-vpmsum/ppc-opcode.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef __OPCODES_H -#define __OPCODES_H - -#define __PPC_RA(a) (((a) & 0x1f) << 16) -#define __PPC_RB(b) (((b) & 0x1f) << 11) -#define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3)) -#define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4)) -#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) -#define __PPC_XT(s) __PPC_XS(s) -#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b)) -#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b)) - -#define PPC_INST_VPMSUMW 0x10000488 -#define PPC_INST_VPMSUMD 0x100004c8 -#define PPC_INST_MFVSRD 0x7c000066 -#define PPC_INST_MTVSRD 0x7c000166 - -#define VPMSUMW(t, a, b) .long PPC_INST_VPMSUMW | VSX_XX3((t), a, b) -#define VPMSUMD(t, a, b) .long PPC_INST_VPMSUMD | VSX_XX3((t), a, b) -#define MFVRD(a, t) .long PPC_INST_MFVSRD | VSX_XX1((t)+32, a, 0) -#define MTVRD(t, a) .long PPC_INST_MTVSRD | VSX_XX1((t)+32, a, 0) - -#endif diff --git a/extra/crc32-vpmsum/vec_crc32.c b/extra/crc32-vpmsum/vec_crc32.c new file mode 100644 index 00000000000..bb2204b247c --- /dev/null +++ b/extra/crc32-vpmsum/vec_crc32.c @@ -0,0 +1,674 @@ +/* + * Calculate the checksum of data that is 16 byte aligned and a multiple of + * 16 bytes. + * + * The first step is to reduce it to 1024 bits. We do this in 8 parallel + * chunks in order to mask the latency of the vpmsum instructions. If we + * have more than 32 kB of data to checksum we repeat this step multiple + * times, passing in the previous 1024 bits. + * + * The next step is to reduce the 1024 bits to 64 bits. This step adds + * 32 bits of 0s to the end - this matches what a CRC does. We just + * calculate constants that land the data in this 32 bits. + * + * We then use fixed point Barrett reduction to compute a mod n over GF(2) + * for n = CRC using POWER8 instructions. We use x = 32. + * + * http://en.wikipedia.org/wiki/Barrett_reduction + * + * This code uses gcc vector builtins instead using assembly directly. + * + * Copyright (C) 2017 Rogerio Alves , IBM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of either: + * + * a) the GNU General Public License as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) + * any later version, or + * b) the Apache License, Version 2.0 + */ + +#include + +#define POWER8_INTRINSICS +#define CRC_TABLE + +#ifdef CRC32_CONSTANTS_HEADER +#include CRC32_CONSTANTS_HEADER +#else +#include "crc32_constants.h" +#endif + +#define VMX_ALIGN 16 +#define VMX_ALIGN_MASK (VMX_ALIGN-1) + +#ifdef REFLECT +static unsigned int crc32_align(unsigned int crc, const unsigned char *p, + unsigned long len) +{ + while (len--) + crc = crc_table[(crc ^ *p++) & 0xff] ^ (crc >> 8); + return crc; +} +#else +static unsigned int crc32_align(unsigned int crc, const unsigned char *p, + unsigned long len) +{ + while (len--) + crc = crc_table[((crc >> 24) ^ *p++) & 0xff] ^ (crc << 8); + return crc; +} +#endif + +static unsigned int __attribute__ ((aligned (32))) +__crc32_vpmsum(unsigned int crc, const void* p, unsigned long len); + +#ifndef CRC32_FUNCTION +#define CRC32_FUNCTION crc32_vpmsum +#endif + +unsigned int CRC32_FUNCTION(unsigned int crc, const unsigned char *p, + unsigned long len) +{ + unsigned int prealign; + unsigned int tail; + +#ifdef CRC_XOR + crc ^= 0xffffffff; +#endif + + if (len < VMX_ALIGN + VMX_ALIGN_MASK) { + crc = crc32_align(crc, p, len); + goto out; + } + + if ((unsigned long)p & VMX_ALIGN_MASK) { + prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK); + crc = crc32_align(crc, p, prealign); + len -= prealign; + p += prealign; + } + + crc = __crc32_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); + + tail = len & VMX_ALIGN_MASK; + if (tail) { + p += len & ~VMX_ALIGN_MASK; + crc = crc32_align(crc, p, tail); + } + +out: +#ifdef CRC_XOR + crc ^= 0xffffffff; +#endif + + return crc; +} + +#if defined (__clang__) +#include "clang_workaround.h" +#else +#define __builtin_pack_vector(a, b) __builtin_pack_vector_int128 ((a), (b)) +#define __builtin_unpack_vector_0(a) __builtin_unpack_vector_int128 ((vector __int128_t)(a), 0) +#define __builtin_unpack_vector_1(a) __builtin_unpack_vector_int128 ((vector __int128_t)(a), 1) +#endif + +/* When we have a load-store in a single-dispatch group and address overlap + * such that foward is not allowed (load-hit-store) the group must be flushed. + * A group ending NOP prevents the flush. + */ +#define GROUP_ENDING_NOP asm("ori 2,2,0" ::: "memory") + +#if defined(__BIG_ENDIAN__) && defined (REFLECT) +#define BYTESWAP_DATA +#elif defined(__LITTLE_ENDIAN__) && !defined(REFLECT) +#define BYTESWAP_DATA +#endif + +#ifdef BYTESWAP_DATA +#define VEC_PERM(vr, va, vb, vc) vr = vec_perm(va, vb,\ + (__vector unsigned char) vc) +#if defined(__LITTLE_ENDIAN__) +/* Byte reverse permute constant LE. */ +static const __vector unsigned long long vperm_const + __attribute__ ((aligned(16))) = { 0x08090A0B0C0D0E0FUL, + 0x0001020304050607UL }; +#else +static const __vector unsigned long long vperm_const + __attribute__ ((aligned(16))) = { 0x0F0E0D0C0B0A0908UL, + 0X0706050403020100UL }; +#endif +#else +#define VEC_PERM(vr, va, vb, vc) +#endif + +static unsigned int __attribute__ ((aligned (32))) +__crc32_vpmsum(unsigned int crc, const void* p, unsigned long len) { + + const __vector unsigned long long vzero = {0,0}; + const __vector unsigned long long vones = {0xffffffffffffffffUL, + 0xffffffffffffffffUL}; + +#ifdef REFLECT + const __vector unsigned long long vmask_32bit = + (__vector unsigned long long)vec_sld((__vector unsigned char)vzero, + (__vector unsigned char)vones, 4); +#endif + + const __vector unsigned long long vmask_64bit = + (__vector unsigned long long)vec_sld((__vector unsigned char)vzero, + (__vector unsigned char)vones, 8); + + __vector unsigned long long vcrc; + + __vector unsigned long long vconst1, vconst2; + + /* vdata0-vdata7 will contain our data (p). */ + __vector unsigned long long vdata0, vdata1, vdata2, vdata3, vdata4, + vdata5, vdata6, vdata7; + + /* v0-v7 will contain our checksums */ + __vector unsigned long long v0 = {0,0}; + __vector unsigned long long v1 = {0,0}; + __vector unsigned long long v2 = {0,0}; + __vector unsigned long long v3 = {0,0}; + __vector unsigned long long v4 = {0,0}; + __vector unsigned long long v5 = {0,0}; + __vector unsigned long long v6 = {0,0}; + __vector unsigned long long v7 = {0,0}; + + + /* Vector auxiliary variables. */ + __vector unsigned long long va0, va1, va2, va3, va4, va5, va6, va7; + + unsigned int result = 0; + unsigned int offset; /* Constant table offset. */ + + unsigned long i; /* Counter. */ + unsigned long chunks; + + unsigned long block_size; + int next_block = 0; + + /* Align by 128 bits. The last 128 bit block will be processed at end. */ + unsigned long length = len & 0xFFFFFFFFFFFFFF80UL; + +#ifdef REFLECT + vcrc = (__vector unsigned long long)__builtin_pack_vector(0UL, crc); +#else + vcrc = (__vector unsigned long long)__builtin_pack_vector(crc, 0UL); + + /* Shift into top 32 bits */ + vcrc = (__vector unsigned long long)vec_sld((__vector unsigned char)vcrc, + (__vector unsigned char)vzero, 4); +#endif + + /* Short version. */ + if (len < 256) { + /* Calculate where in the constant table we need to start. */ + offset = 256 - len; + + vconst1 = vec_ld(offset, vcrc_short_const); + vdata0 = vec_ld(0, (__vector unsigned long long*) p); + VEC_PERM(vdata0, vdata0, vconst1, vperm_const); + + /* xor initial value*/ + vdata0 = vec_xor(vdata0, vcrc); + + vdata0 = (__vector unsigned long long) __builtin_crypto_vpmsumw + ((__vector unsigned int)vdata0, (__vector unsigned int)vconst1); + v0 = vec_xor(v0, vdata0); + + for (i = 16; i < len; i += 16) { + vconst1 = vec_ld(offset + i, vcrc_short_const); + vdata0 = vec_ld(i, (__vector unsigned long long*) p); + VEC_PERM(vdata0, vdata0, vconst1, vperm_const); + vdata0 = (__vector unsigned long long) __builtin_crypto_vpmsumw + ((__vector unsigned int)vdata0, (__vector unsigned int)vconst1); + v0 = vec_xor(v0, vdata0); + } + } else { + + /* Load initial values. */ + vdata0 = vec_ld(0, (__vector unsigned long long*) p); + vdata1 = vec_ld(16, (__vector unsigned long long*) p); + + VEC_PERM(vdata0, vdata0, vdata0, vperm_const); + VEC_PERM(vdata1, vdata1, vdata1, vperm_const); + + vdata2 = vec_ld(32, (__vector unsigned long long*) p); + vdata3 = vec_ld(48, (__vector unsigned long long*) p); + + VEC_PERM(vdata2, vdata2, vdata2, vperm_const); + VEC_PERM(vdata3, vdata3, vdata3, vperm_const); + + vdata4 = vec_ld(64, (__vector unsigned long long*) p); + vdata5 = vec_ld(80, (__vector unsigned long long*) p); + + VEC_PERM(vdata4, vdata4, vdata4, vperm_const); + VEC_PERM(vdata5, vdata5, vdata5, vperm_const); + + vdata6 = vec_ld(96, (__vector unsigned long long*) p); + vdata7 = vec_ld(112, (__vector unsigned long long*) p); + + VEC_PERM(vdata6, vdata6, vdata6, vperm_const); + VEC_PERM(vdata7, vdata7, vdata7, vperm_const); + + /* xor in initial value */ + vdata0 = vec_xor(vdata0, vcrc); + + p = (char *)p + 128; + + do { + /* Checksum in blocks of MAX_SIZE. */ + block_size = length; + if (block_size > MAX_SIZE) { + block_size = MAX_SIZE; + } + + length = length - block_size; + + /* + * Work out the offset into the constants table to start at. Each + * constant is 16 bytes, and it is used against 128 bytes of input + * data - 128 / 16 = 8 + */ + offset = (MAX_SIZE/8) - (block_size/8); + /* We reduce our final 128 bytes in a separate step */ + chunks = (block_size/128)-1; + + vconst1 = vec_ld(offset, vcrc_const); + + va0 = __builtin_crypto_vpmsumd ((__vector unsigned long long)vdata0, + (__vector unsigned long long)vconst1); + va1 = __builtin_crypto_vpmsumd ((__vector unsigned long long)vdata1, + (__vector unsigned long long)vconst1); + va2 = __builtin_crypto_vpmsumd ((__vector unsigned long long)vdata2, + (__vector unsigned long long)vconst1); + va3 = __builtin_crypto_vpmsumd ((__vector unsigned long long)vdata3, + (__vector unsigned long long)vconst1); + va4 = __builtin_crypto_vpmsumd ((__vector unsigned long long)vdata4, + (__vector unsigned long long)vconst1); + va5 = __builtin_crypto_vpmsumd ((__vector unsigned long long)vdata5, + (__vector unsigned long long)vconst1); + va6 = __builtin_crypto_vpmsumd ((__vector unsigned long long)vdata6, + (__vector unsigned long long)vconst1); + va7 = __builtin_crypto_vpmsumd ((__vector unsigned long long)vdata7, + (__vector unsigned long long)vconst1); + + if (chunks > 1) { + offset += 16; + vconst2 = vec_ld(offset, vcrc_const); + GROUP_ENDING_NOP; + + vdata0 = vec_ld(0, (__vector unsigned long long*) p); + VEC_PERM(vdata0, vdata0, vdata0, vperm_const); + + vdata1 = vec_ld(16, (__vector unsigned long long*) p); + VEC_PERM(vdata1, vdata1, vdata1, vperm_const); + + vdata2 = vec_ld(32, (__vector unsigned long long*) p); + VEC_PERM(vdata2, vdata2, vdata2, vperm_const); + + vdata3 = vec_ld(48, (__vector unsigned long long*) p); + VEC_PERM(vdata3, vdata3, vdata3, vperm_const); + + vdata4 = vec_ld(64, (__vector unsigned long long*) p); + VEC_PERM(vdata4, vdata4, vdata4, vperm_const); + + vdata5 = vec_ld(80, (__vector unsigned long long*) p); + VEC_PERM(vdata5, vdata5, vdata5, vperm_const); + + vdata6 = vec_ld(96, (__vector unsigned long long*) p); + VEC_PERM(vdata6, vdata6, vdata6, vperm_const); + + vdata7 = vec_ld(112, (__vector unsigned long long*) p); + VEC_PERM(vdata7, vdata7, vdata7, vperm_const); + + p = (char *)p + 128; + + /* + * main loop. We modulo schedule it such that it takes three + * iterations to complete - first iteration load, second + * iteration vpmsum, third iteration xor. + */ + for (i = 0; i < chunks-2; i++) { + vconst1 = vec_ld(offset, vcrc_const); + offset += 16; + GROUP_ENDING_NOP; + + v0 = vec_xor(v0, va0); + va0 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata0, (__vector unsigned long long)vconst2); + vdata0 = vec_ld(0, (__vector unsigned long long*) p); + VEC_PERM(vdata0, vdata0, vdata0, vperm_const); + GROUP_ENDING_NOP; + + v1 = vec_xor(v1, va1); + va1 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata1, (__vector unsigned long long)vconst2); + vdata1 = vec_ld(16, (__vector unsigned long long*) p); + VEC_PERM(vdata1, vdata1, vdata1, vperm_const); + GROUP_ENDING_NOP; + + v2 = vec_xor(v2, va2); + va2 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata2, (__vector unsigned long long)vconst2); + vdata2 = vec_ld(32, (__vector unsigned long long*) p); + VEC_PERM(vdata2, vdata2, vdata2, vperm_const); + GROUP_ENDING_NOP; + + v3 = vec_xor(v3, va3); + va3 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata3, (__vector unsigned long long)vconst2); + vdata3 = vec_ld(48, (__vector unsigned long long*) p); + VEC_PERM(vdata3, vdata3, vdata3, vperm_const); + + vconst2 = vec_ld(offset, vcrc_const); + GROUP_ENDING_NOP; + + v4 = vec_xor(v4, va4); + va4 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata4, (__vector unsigned long long)vconst1); + vdata4 = vec_ld(64, (__vector unsigned long long*) p); + VEC_PERM(vdata4, vdata4, vdata4, vperm_const); + GROUP_ENDING_NOP; + + v5 = vec_xor(v5, va5); + va5 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata5, (__vector unsigned long long)vconst1); + vdata5 = vec_ld(80, (__vector unsigned long long*) p); + VEC_PERM(vdata5, vdata5, vdata5, vperm_const); + GROUP_ENDING_NOP; + + v6 = vec_xor(v6, va6); + va6 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata6, (__vector unsigned long long)vconst1); + vdata6 = vec_ld(96, (__vector unsigned long long*) p); + VEC_PERM(vdata6, vdata6, vdata6, vperm_const); + GROUP_ENDING_NOP; + + v7 = vec_xor(v7, va7); + va7 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata7, (__vector unsigned long long)vconst1); + vdata7 = vec_ld(112, (__vector unsigned long long*) p); + VEC_PERM(vdata7, vdata7, vdata7, vperm_const); + + p = (char *)p + 128; + } + + /* First cool down*/ + vconst1 = vec_ld(offset, vcrc_const); + offset += 16; + + v0 = vec_xor(v0, va0); + va0 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata0, (__vector unsigned long long)vconst1); + GROUP_ENDING_NOP; + + v1 = vec_xor(v1, va1); + va1 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata1, (__vector unsigned long long)vconst1); + GROUP_ENDING_NOP; + + v2 = vec_xor(v2, va2); + va2 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata2, (__vector unsigned long long)vconst1); + GROUP_ENDING_NOP; + + v3 = vec_xor(v3, va3); + va3 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata3, (__vector unsigned long long)vconst1); + GROUP_ENDING_NOP; + + v4 = vec_xor(v4, va4); + va4 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata4, (__vector unsigned long long)vconst1); + GROUP_ENDING_NOP; + + v5 = vec_xor(v5, va5); + va5 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata5, (__vector unsigned long long)vconst1); + GROUP_ENDING_NOP; + + v6 = vec_xor(v6, va6); + va6 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata6, (__vector unsigned long long)vconst1); + GROUP_ENDING_NOP; + + v7 = vec_xor(v7, va7); + va7 = __builtin_crypto_vpmsumd ((__vector unsigned long + long)vdata7, (__vector unsigned long long)vconst1); + }/* else */ + + /* Second cool down. */ + v0 = vec_xor(v0, va0); + v1 = vec_xor(v1, va1); + v2 = vec_xor(v2, va2); + v3 = vec_xor(v3, va3); + v4 = vec_xor(v4, va4); + v5 = vec_xor(v5, va5); + v6 = vec_xor(v6, va6); + v7 = vec_xor(v7, va7); + +#ifdef REFLECT + /* + * vpmsumd produces a 96 bit result in the least significant bits + * of the register. Since we are bit reflected we have to shift it + * left 32 bits so it occupies the least significant bits in the + * bit reflected domain. + */ + v0 = (__vector unsigned long long)vec_sld((__vector unsigned char)v0, + (__vector unsigned char)vzero, 4); + v1 = (__vector unsigned long long)vec_sld((__vector unsigned char)v1, + (__vector unsigned char)vzero, 4); + v2 = (__vector unsigned long long)vec_sld((__vector unsigned char)v2, + (__vector unsigned char)vzero, 4); + v3 = (__vector unsigned long long)vec_sld((__vector unsigned char)v3, + (__vector unsigned char)vzero, 4); + v4 = (__vector unsigned long long)vec_sld((__vector unsigned char)v4, + (__vector unsigned char)vzero, 4); + v5 = (__vector unsigned long long)vec_sld((__vector unsigned char)v5, + (__vector unsigned char)vzero, 4); + v6 = (__vector unsigned long long)vec_sld((__vector unsigned char)v6, + (__vector unsigned char)vzero, 4); + v7 = (__vector unsigned long long)vec_sld((__vector unsigned char)v7, + (__vector unsigned char)vzero, 4); +#endif + + /* xor with the last 1024 bits. */ + va0 = vec_ld(0, (__vector unsigned long long*) p); + VEC_PERM(va0, va0, va0, vperm_const); + + va1 = vec_ld(16, (__vector unsigned long long*) p); + VEC_PERM(va1, va1, va1, vperm_const); + + va2 = vec_ld(32, (__vector unsigned long long*) p); + VEC_PERM(va2, va2, va2, vperm_const); + + va3 = vec_ld(48, (__vector unsigned long long*) p); + VEC_PERM(va3, va3, va3, vperm_const); + + va4 = vec_ld(64, (__vector unsigned long long*) p); + VEC_PERM(va4, va4, va4, vperm_const); + + va5 = vec_ld(80, (__vector unsigned long long*) p); + VEC_PERM(va5, va5, va5, vperm_const); + + va6 = vec_ld(96, (__vector unsigned long long*) p); + VEC_PERM(va6, va6, va6, vperm_const); + + va7 = vec_ld(112, (__vector unsigned long long*) p); + VEC_PERM(va7, va7, va7, vperm_const); + + p = (char *)p + 128; + + vdata0 = vec_xor(v0, va0); + vdata1 = vec_xor(v1, va1); + vdata2 = vec_xor(v2, va2); + vdata3 = vec_xor(v3, va3); + vdata4 = vec_xor(v4, va4); + vdata5 = vec_xor(v5, va5); + vdata6 = vec_xor(v6, va6); + vdata7 = vec_xor(v7, va7); + + /* Check if we have more blocks to process */ + next_block = 0; + if (length != 0) { + next_block = 1; + + /* zero v0-v7 */ + v0 = vec_xor(v0, v0); + v1 = vec_xor(v1, v1); + v2 = vec_xor(v2, v2); + v3 = vec_xor(v3, v3); + v4 = vec_xor(v4, v4); + v5 = vec_xor(v5, v5); + v6 = vec_xor(v6, v6); + v7 = vec_xor(v7, v7); + } + length = length + 128; + + } while (next_block); + + /* Calculate how many bytes we have left. */ + length = (len & 127); + + /* Calculate where in (short) constant table we need to start. */ + offset = 128 - length; + + v0 = vec_ld(offset, vcrc_short_const); + v1 = vec_ld(offset + 16, vcrc_short_const); + v2 = vec_ld(offset + 32, vcrc_short_const); + v3 = vec_ld(offset + 48, vcrc_short_const); + v4 = vec_ld(offset + 64, vcrc_short_const); + v5 = vec_ld(offset + 80, vcrc_short_const); + v6 = vec_ld(offset + 96, vcrc_short_const); + v7 = vec_ld(offset + 112, vcrc_short_const); + + offset += 128; + + v0 = (__vector unsigned long long)__builtin_crypto_vpmsumw ( + (__vector unsigned int)vdata0,(__vector unsigned int)v0); + v1 = (__vector unsigned long long)__builtin_crypto_vpmsumw ( + (__vector unsigned int)vdata1,(__vector unsigned int)v1); + v2 = (__vector unsigned long long)__builtin_crypto_vpmsumw ( + (__vector unsigned int)vdata2,(__vector unsigned int)v2); + v3 = (__vector unsigned long long)__builtin_crypto_vpmsumw ( + (__vector unsigned int)vdata3,(__vector unsigned int)v3); + v4 = (__vector unsigned long long)__builtin_crypto_vpmsumw ( + (__vector unsigned int)vdata4,(__vector unsigned int)v4); + v5 = (__vector unsigned long long)__builtin_crypto_vpmsumw ( + (__vector unsigned int)vdata5,(__vector unsigned int)v5); + v6 = (__vector unsigned long long)__builtin_crypto_vpmsumw ( + (__vector unsigned int)vdata6,(__vector unsigned int)v6); + v7 = (__vector unsigned long long)__builtin_crypto_vpmsumw ( + (__vector unsigned int)vdata7,(__vector unsigned int)v7); + + /* Now reduce the tail (0-112 bytes). */ + for (i = 0; i < length; i+=16) { + vdata0 = vec_ld(i,(__vector unsigned long long*)p); + VEC_PERM(vdata0, vdata0, vdata0, vperm_const); + va0 = vec_ld(offset + i,vcrc_short_const); + va0 = (__vector unsigned long long)__builtin_crypto_vpmsumw ( + (__vector unsigned int)vdata0,(__vector unsigned int)va0); + v0 = vec_xor(v0, va0); + } + + /* xor all parallel chunks together. */ + v0 = vec_xor(v0, v1); + v2 = vec_xor(v2, v3); + v4 = vec_xor(v4, v5); + v6 = vec_xor(v6, v7); + + v0 = vec_xor(v0, v2); + v4 = vec_xor(v4, v6); + + v0 = vec_xor(v0, v4); + } + + /* Barrett Reduction */ + vconst1 = vec_ld(0, v_Barrett_const); + vconst2 = vec_ld(16, v_Barrett_const); + + v1 = (__vector unsigned long long)vec_sld((__vector unsigned char)v0, + (__vector unsigned char)v0, 8); + v0 = vec_xor(v1,v0); + +#ifdef REFLECT + /* shift left one bit */ + __vector unsigned char vsht_splat = vec_splat_u8 (1); + v0 = (__vector unsigned long long)vec_sll ((__vector unsigned char)v0, + vsht_splat); +#endif + + v0 = vec_and(v0, vmask_64bit); + +#ifndef REFLECT + + /* + * Now for the actual algorithm. The idea is to calculate q, + * the multiple of our polynomial that we need to subtract. By + * doing the computation 2x bits higher (ie 64 bits) and shifting the + * result back down 2x bits, we round down to the nearest multiple. + */ + + /* ma */ + v1 = __builtin_crypto_vpmsumd ((__vector unsigned long long)v0, + (__vector unsigned long long)vconst1); + /* q = floor(ma/(2^64)) */ + v1 = (__vector unsigned long long)vec_sld ((__vector unsigned char)vzero, + (__vector unsigned char)v1, 8); + /* qn */ + v1 = __builtin_crypto_vpmsumd ((__vector unsigned long long)v1, + (__vector unsigned long long)vconst2); + /* a - qn, subtraction is xor in GF(2) */ + v0 = vec_xor (v0, v1); + /* + * Get the result into r3. We need to shift it left 8 bytes: + * V0 [ 0 1 2 X ] + * V0 [ 0 X 2 3 ] + */ + result = __builtin_unpack_vector_1 (v0); +#else + + /* + * The reflected version of Barrett reduction. Instead of bit + * reflecting our data (which is expensive to do), we bit reflect our + * constants and our algorithm, which means the intermediate data in + * our vector registers goes from 0-63 instead of 63-0. We can reflect + * the algorithm because we don't carry in mod 2 arithmetic. + */ + + /* bottom 32 bits of a */ + v1 = vec_and(v0, vmask_32bit); + + /* ma */ + v1 = __builtin_crypto_vpmsumd ((__vector unsigned long long)v1, + (__vector unsigned long long)vconst1); + + /* bottom 32bits of ma */ + v1 = vec_and(v1, vmask_32bit); + /* qn */ + v1 = __builtin_crypto_vpmsumd ((__vector unsigned long long)v1, + (__vector unsigned long long)vconst2); + /* a - qn, subtraction is xor in GF(2) */ + v0 = vec_xor (v0, v1); + + /* + * Since we are bit reflected, the result (ie the low 32 bits) is in + * the high 32 bits. We just need to shift it left 4 bytes + * V0 [ 0 1 X 3 ] + * V0 [ 0 X 2 3 ] + */ + + /* shift result into top 64 bits of */ + v0 = (__vector unsigned long long)vec_sld((__vector unsigned char)v0, + (__vector unsigned char)vzero, 4); + + result = __builtin_unpack_vector_0 (v0); +#endif + + return result; +} diff --git a/extra/innochecksum.cc b/extra/innochecksum.cc index 6fb4154c6ca..a4d6c361809 100644 --- a/extra/innochecksum.cc +++ b/extra/innochecksum.cc @@ -98,6 +98,7 @@ extern ulong srv_checksum_algorithm; static ulint physical_page_size; /* Page size in bytes on disk. */ static ulint logical_page_size; /* Page size when uncompressed. */ ulong srv_page_size; +ulong srv_page_size_shift; page_size_t univ_page_size(0, 0, false); /* Current page number (0 based). */ unsigned long long cur_page_num; @@ -308,16 +309,16 @@ const page_size_t get_page_size( byte* buf) { - const ulint flags = mach_read_from_4(buf + FIL_PAGE_DATA + const unsigned flags = mach_read_from_4(buf + FIL_PAGE_DATA + FSP_SPACE_FLAGS); - const ulint ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags); + const ulong ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags); - if (ssize == 0) { - srv_page_size = UNIV_PAGE_SIZE_ORIG; - } else { - srv_page_size = ((UNIV_ZIP_SIZE_MIN >> 1) << ssize); - } + srv_page_size_shift = ssize + ? UNIV_ZIP_SIZE_SHIFT_MIN - 1 + ssize + : UNIV_PAGE_SIZE_SHIFT_ORIG; + + srv_page_size = 1U << srv_page_size_shift; univ_page_size.copy_from( page_size_t(srv_page_size, srv_page_size, false)); @@ -1703,7 +1704,6 @@ int main( ulint zip_size = page_size.is_compressed() ? page_size.logical() : 0; logical_page_size = page_size.is_compressed() ? zip_size : 0; physical_page_size = page_size.physical(); - srv_page_size = (ulong)page_size.logical(); bool is_compressed = FSP_FLAGS_HAS_PAGE_COMPRESSION(flags); if (page_size.physical() > UNIV_ZIP_SIZE_MIN) { diff --git a/extra/mariabackup/CMakeLists.txt b/extra/mariabackup/CMakeLists.txt index f92da3fd3fd..7df5fa17903 100644 --- a/extra/mariabackup/CMakeLists.txt +++ b/extra/mariabackup/CMakeLists.txt @@ -55,7 +55,7 @@ ELSE() SET(NT_SERVICE_SOURCE) ENDIF() -ADD_DEFINITIONS(-DPCRE_STATIC=1) +ADD_DEFINITIONS(-DPCRE_STATIC=1 -DHAVE_OPENSSL=1) MYSQL_ADD_EXECUTABLE(mariabackup xtrabackup.cc diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc index b15887fee4c..a0c5fbcca1b 100644 --- a/extra/mariabackup/backup_copy.cc +++ b/extra/mariabackup/backup_copy.cc @@ -57,8 +57,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA #include "backup_copy.h" #include "backup_mysql.h" #include -#include "xb0xb.h" - /* list of files to sync for --rsync mode */ static std::set rsync_list; @@ -966,6 +964,9 @@ copy_file(ds_ctxt_t *datasink, ds_file_t *dstfile = NULL; datafile_cur_t cursor; xb_fil_cur_result_t res; + const char *dst_path = + (xtrabackup_copy_back || xtrabackup_move_back)? + dst_file_path : trim_dotslash(dst_file_path); if (!datafile_open(src_file_path, &cursor, thread_n)) { goto error_close; @@ -973,8 +974,7 @@ copy_file(ds_ctxt_t *datasink, strncpy(dst_name, cursor.rel_path, sizeof(dst_name)); - dstfile = ds_open(datasink, trim_dotslash(dst_file_path), - &cursor.statinfo); + dstfile = ds_open(datasink, dst_path, &cursor.statinfo); if (dstfile == NULL) { msg("[%02u] error: " "cannot open the destination stream for %s\n", diff --git a/extra/mariabackup/backup_mysql.cc b/extra/mariabackup/backup_mysql.cc index 59569fc625d..3517d9136d4 100644 --- a/extra/mariabackup/backup_mysql.cc +++ b/extra/mariabackup/backup_mysql.cc @@ -48,6 +48,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA #include #include "common.h" #include "xtrabackup.h" +#include "srv0srv.h" #include "mysql_version.h" #include "backup_copy.h" #include "backup_mysql.h" @@ -93,7 +94,7 @@ time_t history_lock_time; MYSQL *mysql_connection; -my_bool opt_ssl_verify_server_cert; +extern my_bool opt_ssl_verify_server_cert, opt_use_ssl; MYSQL * xb_mysql_connect() @@ -480,7 +481,7 @@ get_mysql_vars(MYSQL *connection) innodb_data_file_path_var, MYF(MY_FAE)); } - if (innodb_data_home_dir_var && *innodb_data_home_dir_var) { + if (innodb_data_home_dir_var) { innobase_data_home_dir = my_strdup( innodb_data_home_dir_var, MYF(MY_FAE)); } @@ -897,16 +898,23 @@ DECLARE_THREAD(kill_mdl_waiters_thread(void *)) break; MYSQL_RES *result = xb_mysql_query(mysql, - "SELECT ID, COMMAND FROM INFORMATION_SCHEMA.PROCESSLIST " + "SELECT ID, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST " " WHERE State='Waiting for table metadata lock'", true, true); while (MYSQL_ROW row = mysql_fetch_row(result)) { char query[64]; - msg_ts("Killing MDL waiting query '%s' on connection '%s'\n", - row[1], row[0]); + + if (row[1] && !strcmp(row[1], "Killed")) + continue; + + msg_ts("Killing MDL waiting %s ('%s') on connection %s\n", + row[1], row[2], row[0]); snprintf(query, sizeof(query), "KILL QUERY %s", row[0]); - xb_mysql_query(mysql, query, true); + if (mysql_query(mysql, query) && (mysql_errno(mysql) != ER_NO_SUCH_THREAD)) { + msg("Error: failed to execute query %s: %s\n", query,mysql_error(mysql)); + exit(EXIT_FAILURE); + } } } @@ -1607,6 +1615,44 @@ cleanup: extern const char *innodb_checksum_algorithm_names[]; +#ifdef _WIN32 +#include +#endif + +static std::string make_local_paths(const char *data_file_path) +{ + if (strchr(data_file_path, '/') == 0 +#ifdef _WIN32 + && strchr(data_file_path, '\\') == 0 +#endif + ){ + return std::string(data_file_path); + } + + std::ostringstream buf; + + char *dup = strdup(innobase_data_file_path); + ut_a(dup); + char *p; + char * token = strtok_r(dup, ";", &p); + while (token) { + if (buf.tellp()) + buf << ";"; + + char *fname = strrchr(token, '/'); +#ifdef _WIN32 + fname = std::max(fname,strrchr(token, '\\')); +#endif + if (fname) + buf << fname + 1; + else + buf << token; + token = strtok_r(NULL, ";", &p); + } + free(dup); + return buf.str(); +} + bool write_backup_config_file() { int rc= backup_file_printf("backup-my.cnf", @@ -1623,7 +1669,7 @@ bool write_backup_config_file() "%s%s\n" "%s\n", innodb_checksum_algorithm_names[srv_checksum_algorithm], - innobase_data_file_path, + make_local_paths(innobase_data_file_path).c_str(), srv_n_log_files, srv_log_file_size, srv_page_size, diff --git a/extra/mariabackup/changed_page_bitmap.cc b/extra/mariabackup/changed_page_bitmap.cc index 46bb3a7bcb5..b704c3a063d 100644 --- a/extra/mariabackup/changed_page_bitmap.cc +++ b/extra/mariabackup/changed_page_bitmap.cc @@ -26,6 +26,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA #include "common.h" #include "xtrabackup.h" +#include "srv0srv.h" /* TODO: copy-pasted shared definitions from the XtraDB bitmap write code. Remove these on the first opportunity, i.e. single-binary XtraBackup. */ diff --git a/extra/mariabackup/crc/crc-intel-pclmul.c b/extra/mariabackup/crc/crc-intel-pclmul.c index d470c2bee43..501ca71d739 100644 --- a/extra/mariabackup/crc/crc-intel-pclmul.c +++ b/extra/mariabackup/crc/crc-intel-pclmul.c @@ -57,7 +57,7 @@ typedef uint8_t byte; #if __GNUC__ >= 4 && defined(__x86_64__) && defined(HAVE_CLMUL_INSTRUCTION) -#if _GCRY_GCC_VERSION >= 40400 /* 4.4 */ +#if defined(_GCRY_GCC_VERSION) && _GCRY_GCC_VERSION >= 40400 /* 4.4 */ /* Prevent compiler from issuing SSE instructions between asm blocks. */ # pragma GCC target("no-sse") #endif diff --git a/extra/mariabackup/ds_tmpfile.c b/extra/mariabackup/ds_tmpfile.c index 443b3703b20..0e5decc5a6e 100644 --- a/extra/mariabackup/ds_tmpfile.c +++ b/extra/mariabackup/ds_tmpfile.c @@ -91,22 +91,8 @@ tmpfile_open(ds_ctxt_t *ctxt, const char *path, /* Create a temporary file in tmpdir. The file will be automatically removed on close. Code copied from mysql_tmpfile(). */ fd = create_temp_file(tmp_path,xtrabackup_tmpdir, - "xbtemp", -#ifdef __WIN__ - O_BINARY | O_TRUNC | O_SEQUENTIAL | - O_TEMPORARY | O_SHORT_LIVED | -#endif /* __WIN__ */ - O_CREAT | O_EXCL | O_RDWR, - MYF(MY_WME)); - -#ifndef __WIN__ - if (fd >= 0) { - /* On Windows, open files cannot be removed, but files can be - created with the O_TEMPORARY flag to the same effect - ("delete on close"). */ - unlink(tmp_path); - } -#endif /* !__WIN__ */ + "xbtemp", O_BINARY | O_SEQUENTIAL, + MYF(MY_WME | MY_TEMPORARY)); if (fd < 0) { return NULL; diff --git a/extra/mariabackup/fil_cur.cc b/extra/mariabackup/fil_cur.cc index 3aa501c8447..799405b3d42 100644 --- a/extra/mariabackup/fil_cur.cc +++ b/extra/mariabackup/fil_cur.cc @@ -33,7 +33,6 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA #include "common.h" #include "read_filt.h" #include "xtrabackup.h" -#include "xb0xb.h" /* Size of read buffer in pages (640 pages = 10M for 16K sized pages) */ #define XB_FIL_CUR_PAGES 640 @@ -218,9 +217,9 @@ xb_fil_cur_open( /* Allocate read buffer */ cursor->buf_size = XB_FIL_CUR_PAGES * page_size.physical(); cursor->orig_buf = static_cast - (malloc(cursor->buf_size + UNIV_PAGE_SIZE)); + (malloc(cursor->buf_size + srv_page_size)); cursor->buf = static_cast - (ut_align(cursor->orig_buf, UNIV_PAGE_SIZE)); + (ut_align(cursor->orig_buf, srv_page_size)); cursor->buf_read = 0; cursor->buf_npages = 0; @@ -258,7 +257,7 @@ xb_fil_cur_read( ib_int64_t offset; ib_int64_t to_read; const ulint page_size = cursor->page_size.physical(); - xb_ad(!cursor->is_system() || page_size == UNIV_PAGE_SIZE); + xb_ad(!cursor->is_system() || page_size == srv_page_size); cursor->read_filter->get_next_batch(&cursor->read_filter_ctxt, &offset, &to_read); diff --git a/extra/mariabackup/fil_cur.h b/extra/mariabackup/fil_cur.h index e3f356a346c..6e0dda8c885 100644 --- a/extra/mariabackup/fil_cur.h +++ b/extra/mariabackup/fil_cur.h @@ -28,6 +28,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA #include #include "read_filt.h" #include "srv0start.h" +#include "srv0srv.h" struct xb_fil_cur_t { pfs_os_file_t file; /*!< source file handle */ diff --git a/extra/mariabackup/xb0xb.h b/extra/mariabackup/xb0xb.h deleted file mode 100644 index 59938a014c6..00000000000 --- a/extra/mariabackup/xb0xb.h +++ /dev/null @@ -1,27 +0,0 @@ -/****************************************************** -Copyright (c) 2012 Percona LLC and/or its affiliates. - -Declarations of XtraBackup functions called by InnoDB code. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -*******************************************************/ - -#ifndef xb0xb_h -#define xb0xb_h - -extern const char *innodb_checksum_algorithm_names[]; -extern TYPELIB innodb_checksum_algorithm_typelib; - -#endif diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 745a6adf8b9..179172d3629 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -4,7 +4,7 @@ MariaBackup: hot backup tool for InnoDB Originally Created 3/3/2009 Yasufumi Kinoshita Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko, Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz. -(c) 2017, MariaDB Corporation. +(c) 2017, 2018, MariaDB Corporation. Portions written by Marko Mäkelä. This program is free software; you can redistribute it and/or modify @@ -97,7 +97,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA #include "backup_mysql.h" #include "backup_copy.h" #include "backup_mysql.h" -#include "xb0xb.h" #include "encryption_plugin.h" #include #include @@ -196,6 +195,10 @@ static ulong max_buf_pool_modified_pct; /* Ignored option (--log) for MySQL option compatibility */ static char* log_ignored_opt; + +extern my_bool opt_use_ssl; +my_bool opt_ssl_verify_server_cert; + /* === metadata of backup === */ #define XTRABACKUP_METADATA_FILENAME "xtrabackup_checkpoints" char metadata_type[30] = ""; /*[full-backuped|log-applied|incremental]*/ @@ -223,7 +226,6 @@ long innobase_buffer_pool_awe_mem_mb = 0; long innobase_file_io_threads = 4; long innobase_read_io_threads = 4; long innobase_write_io_threads = 4; -long innobase_log_buffer_size = 1024*1024L; longlong innobase_page_size = (1LL << 14); /* 16KB */ char* innobase_buffer_pool_filename = NULL; @@ -236,9 +238,6 @@ are determined in innobase_init below: */ static char* innobase_ignored_opt; char* innobase_data_home_dir; char* innobase_data_file_path; -/* The following has a misleading name: starting from 4.0.5, this also -affects Windows: */ -char* innobase_unix_file_flush_method; my_bool innobase_use_doublewrite; my_bool innobase_use_large_pages; @@ -301,6 +300,11 @@ my_bool opt_remove_original; my_bool opt_lock_ddl_per_table = FALSE; +extern const char *innodb_checksum_algorithm_names[]; +extern TYPELIB innodb_checksum_algorithm_typelib; +extern const char *innodb_flush_method_names[]; +extern TYPELIB innodb_flush_method_typelib; + static const char *binlog_info_values[] = {"off", "lockless", "on", "auto", NullS}; static TYPELIB binlog_info_typelib = {array_elements(binlog_info_values)-1, "", @@ -334,9 +338,6 @@ uint opt_safe_slave_backup_timeout = 0; const char *opt_history = NULL; -#if defined(HAVE_OPENSSL) -my_bool opt_ssl_verify_server_cert = FALSE; -#endif char mariabackup_exe[FN_REFLEN]; char orig_argv1[FN_REFLEN]; @@ -589,6 +590,7 @@ typedef struct { } data_thread_ctxt_t; /* ======== for option and variables ======== */ +#include <../../client/client_priv.h> enum options_xtrabackup { @@ -621,13 +623,10 @@ enum options_xtrabackup OPT_INNODB_ADAPTIVE_HASH_INDEX, OPT_INNODB_DOUBLEWRITE, OPT_INNODB_FILE_PER_TABLE, - OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT, OPT_INNODB_FLUSH_METHOD, - OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG, OPT_INNODB_LOG_GROUP_HOME_DIR, OPT_INNODB_MAX_DIRTY_PAGES_PCT, OPT_INNODB_MAX_PURGE_LAG, - OPT_INNODB_ROLLBACK_ON_TIMEOUT, OPT_INNODB_STATUS_FILE, OPT_INNODB_AUTOEXTEND_INCREMENT, OPT_INNODB_BUFFER_POOL_SIZE, @@ -652,8 +651,6 @@ enum options_xtrabackup OPT_INNODB_LOG_CHECKSUMS, OPT_XTRA_INCREMENTAL_FORCE_SCAN, OPT_DEFAULTS_GROUP, - OPT_OPEN_FILES_LIMIT, - OPT_PLUGIN_DIR, OPT_PLUGIN_LOAD, OPT_INNODB_ENCRYPT_LOG, OPT_CLOSE_FILES, @@ -1033,9 +1030,9 @@ struct my_option xb_client_options[] = {"secure-auth", OPT_XB_SECURE_AUTH, "Refuse client connecting to server if it" " uses old (pre-4.1.1) protocol.", &opt_secure_auth, &opt_secure_auth, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, - +#define MYSQL_CLIENT #include "sslopt-longopts.h" - +#undef MYSQL_CLIENT { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -1128,14 +1125,15 @@ struct my_option xb_server_options[] = FALSE, 0, 0, 0, 0, 0}, {"innodb_flush_method", OPT_INNODB_FLUSH_METHOD, - "With which method to flush data.", (G_PTR*) &innobase_unix_file_flush_method, - (G_PTR*) &innobase_unix_file_flush_method, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, - 0, 0, 0}, + "With which method to flush data.", + &srv_file_flush_method, &srv_file_flush_method, + &innodb_flush_method_typelib, GET_ENUM, REQUIRED_ARG, + IF_WIN(SRV_ALL_O_DIRECT_FSYNC, SRV_FSYNC), 0, 0, 0, 0, 0}, {"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE, "The size of the buffer which InnoDB uses to write log to the log files on disk.", - (G_PTR*) &innobase_log_buffer_size, (G_PTR*) &innobase_log_buffer_size, 0, - GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, LONG_MAX, 0, 1024, 0}, + (G_PTR*) &srv_log_buffer_size, (G_PTR*) &srv_log_buffer_size, 0, + GET_ULONG, REQUIRED_ARG, 1024*1024L, 256*1024L, LONG_MAX, 0, 1024, 0}, {"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE, "Ignored for mysqld option compatibility", (G_PTR*) &srv_log_file_size, (G_PTR*) &srv_log_file_size, 0, @@ -1480,8 +1478,9 @@ xb_get_one_option(int optid, break; case OPT_INNODB_FLUSH_METHOD: - - ADD_PRINT_PARAM_OPT(innobase_unix_file_flush_method); + ut_a(srv_file_flush_method + <= IF_WIN(SRV_ALL_O_DIRECT_FSYNC, SRV_O_DIRECT_NO_FSYNC)); + ADD_PRINT_PARAM_OPT(innodb_flush_method_names[srv_file_flush_method]); break; case OPT_INNODB_PAGE_SIZE: @@ -1571,7 +1570,9 @@ xb_get_one_option(int optid, } } break; +#define MYSQL_CLIENT #include "sslopt-case.h" +#undef MYSQL_CLIENT case '?': usage(); @@ -1587,15 +1588,14 @@ xb_get_one_option(int optid, return 0; } -static my_bool -innodb_init_param(void) +static bool innodb_init_param() { srv_is_being_started = TRUE; /* === some variables from mysqld === */ memset((G_PTR) &mysql_tmpdir_list, 0, sizeof(mysql_tmpdir_list)); if (init_tmpdir(&mysql_tmpdir_list, opt_mysql_tmpdir)) - exit(EXIT_FAILURE); + return true; xtrabackup_tmpdir = my_tmpdir(&mysql_tmpdir_list); /* dummy for initialize all_charsets[] */ get_charset_name(0); @@ -1607,21 +1607,21 @@ innodb_init_param(void) #endif /* BTR_CUR_HASH_ADAPT */ if (innobase_page_size != (1LL << 14)) { - int n_shift = (int)get_bit_shift((ulint) innobase_page_size); + size_t n_shift = get_bit_shift(size_t(innobase_page_size)); if (n_shift >= 12 && n_shift <= UNIV_PAGE_SIZE_SHIFT_MAX) { - srv_page_size_shift = n_shift; - srv_page_size = 1 << n_shift; + srv_page_size_shift = ulong(n_shift); + srv_page_size = 1U << n_shift; msg("InnoDB: The universal page size of the " "database is set to %lu.\n", srv_page_size); } else { msg("InnoDB: Error: invalid value of " "innobase_page_size: %lld", innobase_page_size); - exit(EXIT_FAILURE); + goto error; } } else { srv_page_size_shift = 14; - srv_page_size = (1 << srv_page_size_shift); + srv_page_size = 1U << 14; } /* Check that values don't overflow on 32-bit systems. */ @@ -1684,6 +1684,9 @@ innodb_init_param(void) goto error; } + srv_sys_space.normalize_size(); + srv_lock_table_size = 5 * (srv_buf_pool_size >> srv_page_size_shift); + /* -------------- Log files ---------------------------*/ /* The default dir for log files is the datadir of MySQL */ @@ -1707,16 +1710,13 @@ innodb_init_param(void) srv_adaptive_flushing = FALSE; - srv_file_flush_method_str = innobase_unix_file_flush_method; - - srv_log_buffer_size = (ulint) innobase_log_buffer_size; - /* We set srv_pool_size here in units of 1 kB. InnoDB internally changes the value so that it becomes the number of database pages. */ srv_buf_pool_size = (ulint) xtrabackup_use_memory; srv_buf_pool_chunk_unit = (ulong)srv_buf_pool_size; srv_buf_pool_instances = 1; + srv_n_page_cleaners = 1; srv_n_file_io_threads = (ulint) innobase_file_io_threads; srv_n_read_io_threads = (ulint) innobase_read_io_threads; @@ -1732,7 +1732,7 @@ innodb_init_param(void) srv_locks_unsafe_for_binlog = (ibool) innobase_locks_unsafe_for_binlog; - srv_max_n_open_files = ULINT_UNDEFINED; + srv_max_n_open_files = ULINT_UNDEFINED - 5; srv_innodb_status = (ibool) innobase_create_status_file; srv_print_verbose_log = 1; @@ -1743,20 +1743,7 @@ innodb_init_param(void) /* We cannot treat characterset here for now!! */ data_mysql_default_charset_coll = (ulint)default_charset_info->number; - ut_a(DATA_MYSQL_BINARY_CHARSET_COLL == my_charset_bin.number); - - //innobase_commit_concurrency_init_default(); - - /* Since we in this module access directly the fields of a trx - struct, and due to different headers and flags it might happen that - mutex_t has a different size in this module and in InnoDB - modules, we check at run time that the size is the same in - these compilation modules. */ - - /* On 5.5+ srv_use_native_aio is TRUE by default. It is later reset - if it is not supported by the platform in - innobase_start_or_create_for_mysql(). As we don't call it in xtrabackup, - we have to duplicate checks from that function here. */ + ut_ad(DATA_MYSQL_BINARY_CHARSET_COLL == my_charset_bin.number); #ifdef _WIN32 srv_use_native_aio = TRUE; @@ -1789,16 +1776,26 @@ innodb_init_param(void) ? log_block_calc_checksum_crc32 : log_block_calc_checksum_none; - return(FALSE); +#ifdef _WIN32 + srv_use_native_aio = TRUE; +#endif + return false; error: msg("mariabackup: innodb_init_param(): Error occured.\n"); - return(TRUE); + return true; } static bool innodb_init() { - dberr_t err = innobase_start_or_create_for_mysql(); + bool create_new_db = false; + /* Check if the data files exist or not. */ + dberr_t err = srv_sys_space.check_file_spec(&create_new_db, 5U << 20); + + if (err == DB_SUCCESS) { + err = srv_start(create_new_db); + } + if (err != DB_SUCCESS) { msg("mariabackup: innodb_init() returned %d (%s).\n", err, ut_strerr(err)); @@ -2466,7 +2463,7 @@ lsn_t xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn) { lsn_t scanned_lsn = start_lsn; - const byte* log_block = log_sys->buf; + const byte* log_block = log_sys.buf; bool more_data = false; for (ulint scanned_checkpoint = 0; @@ -2515,7 +2512,7 @@ xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn) recv_sys_justify_left_parsing_buf(); - log_sys->log.scanned_lsn = scanned_lsn; + log_sys.log.scanned_lsn = scanned_lsn; end_lsn = copy == COPY_LAST ? ut_uint64_align_up(scanned_lsn, OS_FILE_LOG_BLOCK_SIZE) @@ -2523,10 +2520,10 @@ xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn) if (ulint write_size = ulint(end_lsn - start_lsn)) { if (srv_encrypt_log) { - log_crypt(log_sys->buf, start_lsn, write_size); + log_crypt(log_sys.buf, start_lsn, write_size); } - if (ds_write(dst_log_file, log_sys->buf, write_size)) { + if (ds_write(dst_log_file, log_sys.buf, write_size)) { msg("mariabackup: Error: " "write to logfile failed\n"); return(0); @@ -2565,8 +2562,7 @@ xtrabackup_copy_logfile(copy_logfile copy) lsn_t lsn= start_lsn; for(int retries= 0; retries < 100; retries++) { - if (log_group_read_log_seg(log_sys->buf, &log_sys->log, - &lsn, end_lsn)){ + if (log_sys.log.read_log_seg(&lsn, end_lsn)) { break; } msg("Retrying read of a redo log block"); @@ -2586,7 +2582,7 @@ xtrabackup_copy_logfile(copy_logfile copy) } } while (start_lsn == end_lsn); - ut_ad(start_lsn == log_sys->log.scanned_lsn); + ut_ad(start_lsn == log_sys.log.scanned_lsn); msg_ts(">> log scanned up to (" LSN_PF ")\n", start_lsn); @@ -3036,7 +3032,7 @@ static dberr_t xb_assign_undo_space_start() byte* page; bool ret; dberr_t error = DB_SUCCESS; - ulint space, page_no; + ulint space, page_no __attribute__((unused)); if (srv_undo_tablespaces == 0) { return error; @@ -3050,7 +3046,7 @@ static dberr_t xb_assign_undo_space_start() name[dirnamelen++] = OS_PATH_SEPARATOR; } - snprintf(name + dirnamelen, strlen(name) + strlen("ibdata1"), + snprintf(name + dirnamelen, (sizeof name) - dirnamelen, "%s", "ibdata1"); file = os_file_create(0, name, OS_FILE_OPEN, @@ -3061,12 +3057,13 @@ static dberr_t xb_assign_undo_space_start() return DB_ERROR; } - buf = static_cast(ut_malloc_nokey(2 * UNIV_PAGE_SIZE)); - page = static_cast(ut_align(buf, UNIV_PAGE_SIZE)); + buf = static_cast(ut_malloc_nokey(2U << srv_page_size_shift)); + page = static_cast(ut_align(buf, srv_page_size)); retry: - if (!os_file_read(IORequestRead, file, page, TRX_SYS_PAGE_NO * UNIV_PAGE_SIZE, - UNIV_PAGE_SIZE)) { + if (!os_file_read(IORequestRead, file, page, + TRX_SYS_PAGE_NO << srv_page_size_shift, + srv_page_size)) { msg("mariabackup: Reading TRX_SYS page failed.\n"); error = DB_ERROR; goto func_exit; @@ -3582,19 +3579,6 @@ open_or_create_log_file( return(DB_SUCCESS); } -/*********************************************************************//** -Normalizes init parameter values to use units we use inside InnoDB. -@return DB_SUCCESS or error code */ -static -void -xb_normalize_init_values(void) -/*==========================*/ -{ - srv_sys_space.normalize(); - srv_log_buffer_size /= UNIV_PAGE_SIZE; - srv_lock_table_size = 5 * (srv_buf_pool_size / UNIV_PAGE_SIZE); -} - /*********************************************************************** Set the open files limit. Based on set_max_open_files(). @@ -3689,9 +3673,9 @@ xtrabackup_backup_low() log_mutex_enter(); if (recv_find_max_checkpoint(&max_cp_field) == DB_SUCCESS - && log_sys->log.format != 0) { + && log_sys.log.format != 0) { metadata_to_lsn = mach_read_from_8( - log_sys->checkpoint_buf + LOG_CHECKPOINT_LSN); + log_sys.checkpoint_buf + LOG_CHECKPOINT_LSN); msg("mariabackup: The latest check point" " (for incremental): '" LSN_PF "'\n", metadata_to_lsn); @@ -3804,42 +3788,6 @@ fail: return(false); } - xb_normalize_init_values(); - - - if (srv_file_flush_method_str == NULL) { - /* These are the default options */ - srv_file_flush_method = SRV_FSYNC; - } else if (0 == ut_strcmp(srv_file_flush_method_str, "fsync")) { - srv_file_flush_method = SRV_FSYNC; - } else if (0 == ut_strcmp(srv_file_flush_method_str, "O_DSYNC")) { - srv_file_flush_method = SRV_O_DSYNC; - - } else if (0 == ut_strcmp(srv_file_flush_method_str, "O_DIRECT")) { - srv_file_flush_method = SRV_O_DIRECT; - msg("mariabackup: using O_DIRECT\n"); - } else if (0 == ut_strcmp(srv_file_flush_method_str, "littlesync")) { - srv_file_flush_method = SRV_LITTLESYNC; - } else if (0 == ut_strcmp(srv_file_flush_method_str, "nosync")) { - srv_file_flush_method = SRV_NOSYNC; - } else if (0 == ut_strcmp(srv_file_flush_method_str, "ALL_O_DIRECT")) { - srv_file_flush_method = SRV_ALL_O_DIRECT_FSYNC; - msg("mariabackup: using ALL_O_DIRECT\n"); - } else if (0 == ut_strcmp(srv_file_flush_method_str, - "O_DIRECT_NO_FSYNC")) { - srv_file_flush_method = SRV_O_DIRECT_NO_FSYNC; - msg("mariabackup: using O_DIRECT_NO_FSYNC\n"); - } else { - msg("mariabackup: Unrecognized value %s for " - "innodb_flush_method\n", srv_file_flush_method_str); - goto fail; - } - -#ifdef _WIN32 - srv_file_flush_method = SRV_ALL_O_DIRECT_FSYNC; - srv_use_native_aio = TRUE; -#endif - if (srv_buf_pool_size >= 1000 * 1024 * 1024) { /* Here we still have srv_pool_size counted in kilobytes (in 4.0 this was in bytes) @@ -3887,8 +3835,8 @@ fail: os_aio_init(srv_n_read_io_threads, srv_n_write_io_threads, SRV_MAX_N_PENDING_SYNC_IOS); - log_sys_init(); - log_init(srv_n_log_files); + log_sys.create(); + log_sys.log.create(srv_n_log_files); fil_space_t* space = fil_space_create( "innodb_redo_log", SRV_LOG_SPACE_FIRST_ID, 0, FIL_TYPE_LOG, NULL); @@ -3963,7 +3911,7 @@ log_fail: goto fail; } - if (log_sys->log.format == 0) { + if (log_sys.log.format == 0) { old_format: msg("mariabackup: Error: cannot process redo log" " before MariaDB 10.2.2\n"); @@ -3971,14 +3919,14 @@ old_format: goto log_fail; } - ut_ad(!((log_sys->log.format ^ LOG_HEADER_FORMAT_CURRENT) + ut_ad(!((log_sys.log.format ^ LOG_HEADER_FORMAT_CURRENT) & ~LOG_HEADER_FORMAT_ENCRYPTED)); - const byte* buf = log_sys->checkpoint_buf; + const byte* buf = log_sys.checkpoint_buf; reread_log_header: - checkpoint_lsn_start = log_sys->log.lsn; - checkpoint_no_start = log_sys->next_checkpoint_no; + checkpoint_lsn_start = log_sys.log.lsn; + checkpoint_no_start = log_sys.next_checkpoint_no; err = recv_find_max_checkpoint(&max_cp_field); @@ -3986,14 +3934,14 @@ reread_log_header: goto log_fail; } - if (log_sys->log.format == 0) { + if (log_sys.log.format == 0) { goto old_format; } - ut_ad(!((log_sys->log.format ^ LOG_HEADER_FORMAT_CURRENT) + ut_ad(!((log_sys.log.format ^ LOG_HEADER_FORMAT_CURRENT) & ~LOG_HEADER_FORMAT_ENCRYPTED)); - log_group_header_read(&log_sys->log, max_cp_field); + log_header_read(max_cp_field); if (checkpoint_no_start != mach_read_from_8(buf + LOG_CHECKPOINT_NO)) { goto reread_log_header; @@ -4019,7 +3967,7 @@ reread_log_header: /* label it */ byte MY_ALIGNED(OS_FILE_LOG_BLOCK_SIZE) log_hdr[OS_FILE_LOG_BLOCK_SIZE]; memset(log_hdr, 0, sizeof log_hdr); - mach_write_to_4(LOG_HEADER_FORMAT + log_hdr, log_sys->log.format); + mach_write_to_4(LOG_HEADER_FORMAT + log_hdr, log_sys.log.format); mach_write_to_8(LOG_HEADER_START_LSN + log_hdr, checkpoint_lsn_start); strcpy(reinterpret_cast(LOG_HEADER_CREATOR + log_hdr), "Backup " MYSQL_SERVER_VERSION); @@ -4248,7 +4196,8 @@ xb_space_create_file( } ret = os_file_set_size(path, *file, - FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE); + FIL_IBD_FILE_INITIAL_SIZE + << srv_page_size_shift); if (!ret) { msg("mariabackup: cannot set size for file %s\n", path); os_file_close(*file); @@ -4256,11 +4205,11 @@ xb_space_create_file( return ret; } - buf = static_cast(malloc(3 * UNIV_PAGE_SIZE)); + buf = static_cast(malloc(3U << srv_page_size_shift)); /* Align the memory for file i/o if we might have O_DIRECT set */ - page = static_cast(ut_align(buf, UNIV_PAGE_SIZE)); + page = static_cast(ut_align(buf, srv_page_size)); - memset(page, '\0', UNIV_PAGE_SIZE); + memset(page, '\0', srv_page_size); fsp_header_init_fields(page, space_id, flags); mach_write_to_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, space_id); @@ -4271,12 +4220,12 @@ xb_space_create_file( buf_flush_init_for_writing(NULL, page, NULL, 0); ret = os_file_write(IORequestWrite, path, *file, page, 0, - UNIV_PAGE_SIZE); + srv_page_size); } else { page_zip_des_t page_zip; ulint zip_size = page_size.physical(); page_zip_set_size(&page_zip, zip_size); - page_zip.data = page + UNIV_PAGE_SIZE; + page_zip.data = page + srv_page_size; fprintf(stderr, "zip_size = " ULINTPF "\n", zip_size); #ifdef UNIV_DEBUG @@ -4562,8 +4511,6 @@ xtrabackup_apply_delta( posix_fadvise(src_file, 0, 0, POSIX_FADV_SEQUENTIAL); - os_file_set_nocache(src_file, src_path, "OPEN"); - dst_file = xb_delta_open_matching_space( dbname, space_name, info, dst_path, sizeof(dst_path), &success); @@ -4574,8 +4521,6 @@ xtrabackup_apply_delta( posix_fadvise(dst_file, 0, 0, POSIX_FADV_DONTNEED); - os_file_set_nocache(dst_file, dst_path, "OPEN"); - /* allocate buffer for incremental backup (4096 pages) */ incremental_buffer_base = static_cast (malloc((page_size / 4 + 1) * page_size)); @@ -4658,7 +4603,7 @@ xtrabackup_apply_delta( n_pages * page_size)) goto error; } else if (fil_space_t* space - = fil_space_acquire(0)) { + = fil_system.sys_space) { /* The system tablespace can consist of multiple files. The first one has full tablespace @@ -4669,7 +4614,6 @@ xtrabackup_apply_delta( bool fail = !strcmp(n->name, dst_path) && !fil_space_extend( space, (ulint)n_pages); - fil_space_release(space); if (fail) goto error; } } @@ -4681,6 +4625,13 @@ xtrabackup_apply_delta( } } + /* Free file system buffer cache after the batch was written. */ +#ifdef __linux__ + os_file_flush_func(dst_file); +#endif + posix_fadvise(dst_file, 0, 0, POSIX_FADV_DONTNEED); + + incremental_buffers++; } @@ -4998,12 +4949,11 @@ xtrabackup_prepare_func(char** argv) goto error_cleanup; } - xb_normalize_init_values(); sync_check_init(); ut_d(sync_check_enable()); ut_crc32_init(); recv_sys_init(); - log_sys_init(); + log_sys.create(); recv_recovery_on = true; #ifdef WITH_INNODB_DISALLOW_WRITES @@ -5037,7 +4987,7 @@ xtrabackup_prepare_func(char** argv) os_event_destroy(srv_allow_writes_event); #endif innodb_free_param(); - log_shutdown(); + log_sys.close(); sync_check_close(); if (!ok) goto error_cleanup; } diff --git a/extra/replace.c b/extra/replace.c index eabf953837b..4086f22d129 100644 --- a/extra/replace.c +++ b/extra/replace.c @@ -1072,7 +1072,7 @@ static int convert_file(REPLACE *rep, char * name) if (!(in= my_fopen(org_name,O_RDONLY,MYF(MY_WME)))) DBUG_RETURN(1); dirname_part(dir_buff, org_name, &dir_buff_length); - if ((temp_file= create_temp_file(tempname, dir_buff, "PR", O_WRONLY, + if ((temp_file= create_temp_file(tempname, dir_buff, "PR", 0, MYF(MY_WME))) < 0) { my_fclose(in,MYF(0)); diff --git a/include/hash.h b/include/hash.h index 75ad0b6d9a0..068700aaec6 100644 --- a/include/hash.h +++ b/include/hash.h @@ -72,7 +72,7 @@ my_bool my_hash_init2(HASH *hash, uint growth_size, CHARSET_INFO *charset, uint flags); void my_hash_free(HASH *tree); void my_hash_reset(HASH *hash); -uchar *my_hash_element(HASH *hash, ulong idx); +uchar *my_hash_element(HASH *hash, size_t idx); uchar *my_hash_search(const HASH *info, const uchar *key, size_t length); uchar *my_hash_search_using_hash_value(const HASH *info, my_hash_value_type hash_value, diff --git a/include/heap.h b/include/heap.h index e92f649b87b..2bbbc635238 100644 --- a/include/heap.h +++ b/include/heap.h @@ -217,7 +217,7 @@ extern int heap_write(HP_INFO *info,const uchar *buff); extern int heap_update(HP_INFO *info,const uchar *old,const uchar *newdata); extern int heap_rrnd(HP_INFO *info,uchar *buf,uchar *pos); extern int heap_scan_init(HP_INFO *info); -extern int heap_scan(register HP_INFO *info, uchar *record); +extern int heap_scan(HP_INFO *info, uchar *record); extern int heap_delete(HP_INFO *info,const uchar *buff); extern int heap_info(HP_INFO *info,HEAPINFO *x,int flag); extern int heap_create(const char *name, diff --git a/include/m_ctype.h b/include/m_ctype.h index 8302c85c0e7..fc51ebd3c8b 100644 --- a/include/m_ctype.h +++ b/include/m_ctype.h @@ -1093,7 +1093,7 @@ my_well_formed_length(CHARSET_INFO *cs, const char *b, const char *e, MY_STRCOPY_STATUS status; (void) cs->cset->well_formed_char_length(cs, b, e, nchars, &status); *error= status.m_well_formed_error_pos == NULL ? 0 : 1; - return status.m_source_end_pos - b; + return (size_t) (status.m_source_end_pos - b); } diff --git a/include/maria.h b/include/maria.h index f88ad610213..5bdd166287e 100644 --- a/include/maria.h +++ b/include/maria.h @@ -366,7 +366,7 @@ int maria_sort_index(HA_CHECK *param, MARIA_HA *info, char * name); int maria_zerofill(HA_CHECK *param, MARIA_HA *info, const char *name); int maria_repair_by_sort(HA_CHECK *param, MARIA_HA *info, const char *name, my_bool rep_quick); -int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info, +int maria_repair_parallel(HA_CHECK *param, MARIA_HA *info, const char *name, my_bool rep_quick); int maria_change_to_newfile(const char *filename, const char *old_ext, const char *new_ext, time_t backup_time, diff --git a/include/my_base.h b/include/my_base.h index 63a9710c306..c36072c0bfa 100644 --- a/include/my_base.h +++ b/include/my_base.h @@ -647,7 +647,7 @@ typedef ulong ha_rows; #define HA_POS_ERROR (~ (ha_rows) 0) #define HA_OFFSET_ERROR (~ (my_off_t) 0) -#if SYSTEM_SIZEOF_OFF_T == 4 +#if SIZEOF_OFF_T == 4 #define MAX_FILE_SIZE INT_MAX32 #else #define MAX_FILE_SIZE LONGLONG_MAX diff --git a/include/my_dir.h b/include/my_dir.h index af4e640c96a..a4024516c91 100644 --- a/include/my_dir.h +++ b/include/my_dir.h @@ -76,7 +76,7 @@ typedef struct my_stat #else -#if(_MSC_VER) +#if defined(_MSC_VER) #define MY_STAT struct _stati64 /* 64 bit file size */ #else #define MY_STAT struct stat /* Original struct has what we need */ diff --git a/include/my_global.h b/include/my_global.h index b32389d061f..7bf0ee43186 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -189,15 +189,6 @@ #define __builtin_expect(x, expected_value) (x) #endif -/** - The semantics of builtin_expect() are that - 1) its two arguments are long - 2) it's likely that they are == - Those of our likely(x) are that x can be bool/int/longlong/pointer. -*/ -#define likely(x) __builtin_expect(((x) != 0),1) -#define unlikely(x) __builtin_expect(((x) != 0),0) - /* Fix problem with S_ISLNK() on Linux */ #if defined(TARGET_OS_LINUX) || defined(__GLIBC__) #undef _GNU_SOURCE @@ -384,6 +375,36 @@ C_MODE_END #include #endif +/* Add checking if we are using likely/unlikely wrong */ +#ifdef CHECK_UNLIKELY +C_MODE_START +extern void init_my_likely(), end_my_likely(FILE *); +extern int my_likely_ok(const char *file_name, uint line); +extern int my_likely_fail(const char *file_name, uint line); +C_MODE_END + +#define likely(A) ((A) ? (my_likely_ok(__FILE__, __LINE__),1) : (my_likely_fail(__FILE__, __LINE__), 0)) +#define unlikely(A) ((A) ? (my_likely_fail(__FILE__, __LINE__),1) : (my_likely_ok(__FILE__, __LINE__), 0)) +/* + These macros should be used when the check fails often when running benchmarks but + we know for sure that the check is correct in a production environment +*/ +#define checked_likely(A) (A) +#define checked_unlikely(A) (A) +#else +/** + The semantics of builtin_expect() are that + 1) its two arguments are long + 2) it's likely that they are == + Those of our likely(x) are that x can be bool/int/longlong/pointer. +*/ + +#define likely(x) __builtin_expect(((x) != 0),1) +#define unlikely(x) __builtin_expect(((x) != 0),0) +#define checked_likely(x) likely(x) +#define checked_unlikely(x) unlikely(x) +#endif /* CHECK_UNLIKELY */ + /* A lot of our programs uses asserts, so better to always include it This also fixes a problem when people uses DBUG_ASSERT without including @@ -579,8 +600,8 @@ typedef SOCKET_SIZE_TYPE size_socket; #endif #endif /* O_SHARE */ -#ifndef O_TEMPORARY -#define O_TEMPORARY 0 +#ifndef O_SEQUENTIAL +#define O_SEQUENTIAL 0 #endif #ifndef O_SHORT_LIVED #define O_SHORT_LIVED 0 @@ -710,7 +731,7 @@ typedef SOCKET_SIZE_TYPE size_socket; #define closesocket(A) close(A) #endif -#if (_MSC_VER) +#if defined(_MSC_VER) #if !defined(_WIN64) inline double my_ulonglong2double(unsigned long long value) { @@ -807,37 +828,6 @@ inline unsigned long long my_double2ulonglong(double d) #define SIZE_T_MAX (~((size_t) 0)) #endif -#ifndef HAVE_FINITE -#define finite(x) (1.0 / fabs(x) > 0.0) -#endif - -#ifndef isfinite -#define isfinite(x) finite(x) -#endif - -#ifndef HAVE_ISNAN -#define isnan(x) ((x) != (x)) -#endif -#define my_isnan(x) isnan(x) - -#ifndef HAVE_ISINF -#define isinf(X) (!isfinite(X) && !isnan(X)) -#endif -#define my_isinf(X) isinf(X) - -#ifdef __cplusplus -#include -#ifndef isfinite -#define isfinite(X) std::isfinite(X) -#endif -#ifndef isnan -#define isnan(X) std::isnan(X) -#endif -#ifndef isinf -#define isinf(X) std::isinf(X) -#endif -#endif - /* Define missing math constants. */ #ifndef M_PI #define M_PI 3.14159265358979323846 @@ -849,17 +839,6 @@ inline unsigned long long my_double2ulonglong(double d) #define M_LN2 0.69314718055994530942 #endif -#ifndef HAVE_LOG2 -/* - This will be slightly slower and perhaps a tiny bit less accurate than - doing it the IEEE754 way but log2() should be available on C99 systems. -*/ -static inline double log2(double x) -{ - return (log(x) / M_LN2); -} -#endif - /* Max size that must be added to a so that we know Size to make addressable obj. @@ -1166,7 +1145,7 @@ typedef struct { const char *dli_fname, dli_fbase; } Dl_info; /* Provide __func__ macro definition for platforms that miss it. */ #if !defined (__func__) -#if __STDC_VERSION__ < 199901L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ < 199901L # if __GNUC__ >= 2 # define __func__ __FUNCTION__ # else @@ -1185,41 +1164,6 @@ typedef struct { const char *dli_fname, dli_fbase; } Dl_info; #endif #endif /* !defined(__func__) */ -#ifndef HAVE_RINT -/** - All integers up to this number can be represented exactly as double precision - values (DBL_MANT_DIG == 53 for IEEE 754 hardware). -*/ -#define MAX_EXACT_INTEGER ((1LL << DBL_MANT_DIG) - 1) - -/** - rint(3) implementation for platforms that do not have it. - Always rounds to the nearest integer with ties being rounded to the nearest - even integer to mimic glibc's rint() behavior in the "round-to-nearest" - FPU mode. Hardware-specific optimizations are possible (frndint on x86). - Unlike this implementation, hardware will also honor the FPU rounding mode. -*/ - -static inline double rint(double x) -{ - double f, i; - f = modf(x, &i); - /* - All doubles with absolute values > MAX_EXACT_INTEGER are even anyway, - no need to check it. - */ - if (x > 0.0) - i += (double) ((f > 0.5) || (f == 0.5 && - i <= (double) MAX_EXACT_INTEGER && - (longlong) i % 2)); - else - i -= (double) ((f < -0.5) || (f == -0.5 && - i >= (double) -MAX_EXACT_INTEGER && - (longlong) i % 2)); - return i; -} -#endif /* HAVE_RINT */ - /* MYSQL_PLUGIN_IMPORT macro is used to export mysqld data (i.e variables) for usage in storage engine loadable plugins. @@ -1258,7 +1202,7 @@ static inline double rint(double x) CMake using getconf */ #if !defined(CPU_LEVEL1_DCACHE_LINESIZE) || CPU_LEVEL1_DCACHE_LINESIZE == 0 - #if CPU_LEVEL1_DCACHE_LINESIZE == 0 + #if defined(CPU_LEVEL1_DCACHE_LINESIZE) && CPU_LEVEL1_DCACHE_LINESIZE == 0 #undef CPU_LEVEL1_DCACHE_LINESIZE #endif @@ -1279,5 +1223,4 @@ static inline double rint(double x) #else #define NOT_FIXED_DEC FLOATING_POINT_DECIMALS #endif - #endif /* my_global_h */ diff --git a/include/my_sys.h b/include/my_sys.h index 068733cd1fd..13ab7b12320 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -67,6 +67,7 @@ typedef struct my_aio_result { #define MY_WAIT_IF_FULL 32U /* Wait and try again if disk full error */ #define MY_IGNORE_BADFD 32U /* my_sync(): ignore 'bad descriptor' errors */ #define MY_ENCRYPT 64U /* Encrypt IO_CACHE temporary files */ +#define MY_TEMPORARY 64U /* create_temp_file(): delete file at once */ #define MY_NOSYMLINKS 512U /* my_open(): don't follow symlinks */ #define MY_FULL_IO 512U /* my_read(): loop until I/O is complete */ #define MY_DONT_CHECK_FILESIZE 128U /* Option to init_io_cache() */ @@ -573,21 +574,21 @@ static inline my_bool my_b_write_byte(IO_CACHE *info, uchar chr) static inline size_t my_b_fill(IO_CACHE *info) { info->read_pos= info->read_end; - return _my_b_read(info,0,0) ? 0 : info->read_end - info->read_pos; + return _my_b_read(info,0,0) ? 0 : (size_t) (info->read_end - info->read_pos); } static inline my_off_t my_b_tell(const IO_CACHE *info) { if (info->type == WRITE_CACHE) { - return info->pos_in_file + (info->write_pos - info->request_pos); + return info->pos_in_file + (my_off_t)(info->write_pos - info->request_pos); } - return info->pos_in_file + (info->read_pos - info->request_pos); + return info->pos_in_file + (my_off_t) (info->read_pos - info->request_pos); } static inline my_off_t my_b_write_tell(const IO_CACHE *info) { - return info->pos_in_file + (info->write_pos - info->write_buffer); + return info->pos_in_file + (my_off_t) (info->write_pos - info->write_buffer); } static inline uchar* my_b_get_buffer_start(const IO_CACHE *info) @@ -597,7 +598,7 @@ static inline uchar* my_b_get_buffer_start(const IO_CACHE *info) static inline size_t my_b_get_bytes_in_buffer(const IO_CACHE *info) { - return info->read_end - info->request_pos; + return (size_t) (info->read_end - info->request_pos); } static inline my_off_t my_b_get_pos_in_file(const IO_CACHE *info) @@ -608,9 +609,9 @@ static inline my_off_t my_b_get_pos_in_file(const IO_CACHE *info) static inline size_t my_b_bytes_in_cache(const IO_CACHE *info) { if (info->type == WRITE_CACHE) { - return info->write_end - info->write_pos; + return (size_t) (info->write_end - info->write_pos); } - return info->read_end - info->read_pos; + return (size_t) (info->read_end - info->read_pos); } int my_b_copy_to_file(IO_CACHE *cache, FILE *file); @@ -733,12 +734,6 @@ void my_create_backup_name(char *to, const char *from, extern int my_copystat(const char *from, const char *to, int MyFlags); extern char * my_filename(File fd); -#ifdef EXTRA_DEBUG -void my_print_open_files(void); -#else -#define my_print_open_files() -#endif - extern my_bool init_tmpdir(MY_TMPDIR *tmpdir, const char *pathlist); extern char *my_tmpdir(MY_TMPDIR *tmpdir); extern void free_tmpdir(MY_TMPDIR *tmpdir); diff --git a/include/my_time.h b/include/my_time.h index f7d910fa07d..cec168c6fd6 100644 --- a/include/my_time.h +++ b/include/my_time.h @@ -104,7 +104,8 @@ typedef struct st_mysql_time_status static inline void my_time_status_init(MYSQL_TIME_STATUS *status) { - status->warnings= status->precision= 0; + status->warnings= 0; + status->precision= 0; } my_bool check_date(const MYSQL_TIME *ltime, my_bool not_zero_date, diff --git a/include/myisam.h b/include/myisam.h index e54b7fb5662..97f5a2d486a 100644 --- a/include/myisam.h +++ b/include/myisam.h @@ -373,7 +373,7 @@ typedef struct st_mi_sort_param int (*key_read)(struct st_mi_sort_param *,void *); int (*key_write)(struct st_mi_sort_param *, const void *); void (*lock_in_memory)(HA_CHECK *); - int (*write_keys)(struct st_mi_sort_param *, register uchar **, + int (*write_keys)(struct st_mi_sort_param *, uchar **, ulonglong , struct st_buffpek *, IO_CACHE *); my_off_t (*read_to_buffer)(IO_CACHE *,struct st_buffpek *, uint); int (*write_key)(struct st_mi_sort_param *, IO_CACHE *,uchar *, @@ -383,16 +383,15 @@ typedef struct st_mi_sort_param /* functions in mi_check */ void myisamchk_init(HA_CHECK *param); int chk_status(HA_CHECK *param, MI_INFO *info); -int chk_del(HA_CHECK *param, register MI_INFO *info, ulonglong test_flag); +int chk_del(HA_CHECK *param, MI_INFO *info, ulonglong test_flag); int chk_size(HA_CHECK *param, MI_INFO *info); int chk_key(HA_CHECK *param, MI_INFO *info); int chk_data_link(HA_CHECK *param, MI_INFO *info, my_bool extend); -int mi_repair(HA_CHECK *param, register MI_INFO *info, - char * name, int rep_quick); -int mi_sort_index(HA_CHECK *param, register MI_INFO *info, char * name); -int mi_repair_by_sort(HA_CHECK *param, register MI_INFO *info, +int mi_repair(HA_CHECK *param, MI_INFO *info, char * name, int rep_quick); +int mi_sort_index(HA_CHECK *param, MI_INFO *info, char * name); +int mi_repair_by_sort(HA_CHECK *param, MI_INFO *info, const char * name, int rep_quick); -int mi_repair_parallel(HA_CHECK *param, register MI_INFO *info, +int mi_repair_parallel(HA_CHECK *param, MI_INFO *info, const char * name, int rep_quick); int change_to_newfile(const char * filename, const char * old_ext, const char * new_ext, time_t backup_time, myf myflags); diff --git a/include/mysql/psi/mysql_file.h b/include/mysql/psi/mysql_file.h index 801c26086f5..f793aa6237c 100644 --- a/include/mysql/psi/mysql_file.h +++ b/include/mysql/psi/mysql_file.h @@ -529,16 +529,18 @@ inline_mysql_file_fgets( { char *result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_stream_locker) - (&state, file->m_psi, PSI_FILE_READ); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) size, src_file, src_line); - result= fgets(str, size, file->m_file); - PSI_FILE_CALL(end_file_wait)(locker, result ? strlen(result) : 0); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_stream_locker) (&state, file->m_psi, PSI_FILE_READ); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) size, src_file, src_line); + result= fgets(str, size, file->m_file); + PSI_FILE_CALL(end_file_wait)(locker, result ? strlen(result) : 0); + return result; + } } #endif @@ -555,16 +557,18 @@ inline_mysql_file_fgetc( { int result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_stream_locker) - (&state, file->m_psi, PSI_FILE_READ); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) 1, src_file, src_line); - result= fgetc(file->m_file); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) 1); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_stream_locker)(&state, file->m_psi, PSI_FILE_READ); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) 1, src_file, src_line); + result= fgetc(file->m_file); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) 1); + return result; + } } #endif @@ -581,18 +585,20 @@ inline_mysql_file_fputs( { int result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - size_t bytes; - locker= PSI_FILE_CALL(get_thread_file_stream_locker) - (&state, file->m_psi, PSI_FILE_WRITE); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - bytes= str ? strlen(str) : 0; - PSI_FILE_CALL(start_file_wait)(locker, bytes, src_file, src_line); - result= fputs(str, file->m_file); - PSI_FILE_CALL(end_file_wait)(locker, bytes); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + size_t bytes; + locker= PSI_FILE_CALL(get_thread_file_stream_locker) (&state, file->m_psi, PSI_FILE_WRITE); + if (likely(locker != NULL)) + { + bytes= str ? strlen(str) : 0; + PSI_FILE_CALL(start_file_wait)(locker, bytes, src_file, src_line); + result= fputs(str, file->m_file); + PSI_FILE_CALL(end_file_wait)(locker, bytes); + return result; + } } #endif @@ -609,16 +615,18 @@ inline_mysql_file_fputc( { int result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_stream_locker) - (&state, file->m_psi, PSI_FILE_WRITE); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) 1, src_file, src_line); - result= fputc(c, file->m_file); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) 1); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_stream_locker) (&state, file->m_psi, PSI_FILE_WRITE); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) 1, src_file, src_line); + result= fputc(c, file->m_file); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) 1); + return result; + } } #endif @@ -635,18 +643,20 @@ inline_mysql_file_fprintf(MYSQL_FILE *file, const char *format, ...) int result; va_list args; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_stream_locker) - (&state, file->m_psi, PSI_FILE_WRITE); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, __FILE__, __LINE__); - va_start(args, format); - result= vfprintf(file->m_file, format, args); - va_end(args); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) result); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_stream_locker) (&state, file->m_psi, PSI_FILE_WRITE); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, __FILE__, __LINE__); + va_start(args, format); + result= vfprintf(file->m_file, format, args); + va_end(args); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) result); + return result; + } } #endif @@ -665,16 +675,18 @@ inline_mysql_file_vfprintf( { int result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_stream_locker) - (&state, file->m_psi, PSI_FILE_WRITE); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); - result= vfprintf(file->m_file, format, args); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) result); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_stream_locker) (&state, file->m_psi, PSI_FILE_WRITE); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); + result= vfprintf(file->m_file, format, args); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) result); + return result; + } } #endif @@ -691,16 +703,18 @@ inline_mysql_file_fflush( { int result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_stream_locker) - (&state, file->m_psi, PSI_FILE_FLUSH); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); - result= fflush(file->m_file); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_stream_locker)(&state, file->m_psi, PSI_FILE_FLUSH); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); + result= fflush(file->m_file); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); + return result; + } } #endif @@ -723,16 +737,18 @@ inline_mysql_file_fstat( { int result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_descriptor_locker) - (&state, filenr, PSI_FILE_FSTAT); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); - result= my_fstat(filenr, stat_area, flags); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)(&state, filenr, PSI_FILE_FSTAT); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); + result= my_fstat(filenr, stat_area, flags); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); + return result; + } } #endif @@ -749,16 +765,18 @@ inline_mysql_file_stat( { MY_STAT *result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_name_locker) - (&state, key, PSI_FILE_STAT, path, &locker); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_open_wait)(locker, src_file, src_line); - result= my_stat(path, stat_area, flags); - PSI_FILE_CALL(end_file_open_wait)(locker, result); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_STAT, path, &locker); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_open_wait)(locker, src_file, src_line); + result= my_stat(path, stat_area, flags); + PSI_FILE_CALL(end_file_open_wait)(locker, result); + return result; + } } #endif @@ -775,17 +793,19 @@ inline_mysql_file_chsize( { int result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_descriptor_locker) - (&state, file, PSI_FILE_CHSIZE); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) newlength, src_file, - src_line); - result= my_chsize(file, newlength, filler, flags); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) newlength); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)(&state, file, PSI_FILE_CHSIZE); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) newlength, src_file, + src_line); + result= my_chsize(file, newlength, filler, flags); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) newlength); + return result; + } } #endif @@ -805,22 +825,24 @@ inline_mysql_file_fopen( if (likely(that != NULL)) { #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_name_locker) - (&state, key, PSI_FILE_STREAM_OPEN, filename, that); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_open_wait) - (locker, src_file, src_line); - that->m_file= my_fopen(filename, flags, myFlags); - that->m_psi= PSI_FILE_CALL(end_file_open_wait)(locker, that->m_file); - if (unlikely(that->m_file == NULL)) + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_STREAM_OPEN, + filename, that); + if (likely(locker != NULL)) { - my_free(that); - return NULL; + PSI_FILE_CALL(start_file_open_wait)(locker, src_file, src_line); + that->m_file= my_fopen(filename, flags, myFlags); + that->m_psi= PSI_FILE_CALL(end_file_open_wait)(locker, that->m_file); + if (unlikely(that->m_file == NULL)) + { + my_free(that); + return NULL; + } + return that; } - return that; } #endif @@ -846,17 +868,20 @@ inline_mysql_file_fclose( if (likely(file != NULL)) { #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_stream_locker) - (&state, file->m_psi, PSI_FILE_STREAM_CLOSE); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_close_wait)(locker, src_file, src_line); - result= my_fclose(file->m_file, flags); - PSI_FILE_CALL(end_file_close_wait)(locker, result); - my_free(file); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_stream_locker)(&state, file->m_psi, + PSI_FILE_STREAM_CLOSE); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_close_wait)(locker, src_file, src_line); + result= my_fclose(file->m_file, flags); + PSI_FILE_CALL(end_file_close_wait)(locker, result); + my_free(file); + return result; + } } #endif @@ -875,21 +900,23 @@ inline_mysql_file_fread( { size_t result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - size_t bytes_read; - locker= PSI_FILE_CALL(get_thread_file_stream_locker) - (&state, file->m_psi, PSI_FILE_READ); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line); - result= my_fread(file->m_file, buffer, count, flags); - if (flags & (MY_NABP | MY_FNABP)) - bytes_read= (result == 0) ? count : 0; - else - bytes_read= (result != MY_FILE_ERROR) ? result : 0; - PSI_FILE_CALL(end_file_wait)(locker, bytes_read); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + size_t bytes_read; + locker= PSI_FILE_CALL(get_thread_file_stream_locker)(&state, file->m_psi, PSI_FILE_READ); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line); + result= my_fread(file->m_file, buffer, count, flags); + if (flags & (MY_NABP | MY_FNABP)) + bytes_read= (result == 0) ? count : 0; + else + bytes_read= (result != MY_FILE_ERROR) ? result : 0; + PSI_FILE_CALL(end_file_wait)(locker, bytes_read); + return result; + } } #endif @@ -906,21 +933,23 @@ inline_mysql_file_fwrite( { size_t result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - size_t bytes_written; - locker= PSI_FILE_CALL(get_thread_file_stream_locker) - (&state, file->m_psi, PSI_FILE_WRITE); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line); - result= my_fwrite(file->m_file, buffer, count, flags); - if (flags & (MY_NABP | MY_FNABP)) - bytes_written= (result == 0) ? count : 0; - else - bytes_written= (result != MY_FILE_ERROR) ? result : 0; - PSI_FILE_CALL(end_file_wait)(locker, bytes_written); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + size_t bytes_written; + locker= PSI_FILE_CALL(get_thread_file_stream_locker)(&state, file->m_psi, PSI_FILE_WRITE); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line); + result= my_fwrite(file->m_file, buffer, count, flags); + if (flags & (MY_NABP | MY_FNABP)) + bytes_written= (result == 0) ? count : 0; + else + bytes_written= (result != MY_FILE_ERROR) ? result : 0; + PSI_FILE_CALL(end_file_wait)(locker, bytes_written); + return result; + } } #endif @@ -937,16 +966,18 @@ inline_mysql_file_fseek( { my_off_t result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_stream_locker) - (&state, file->m_psi, PSI_FILE_SEEK); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); - result= my_fseek(file->m_file, pos, whence, flags); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_stream_locker)(&state, file->m_psi, PSI_FILE_SEEK); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); + result= my_fseek(file->m_file, pos, whence, flags); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); + return result; + } } #endif @@ -963,16 +994,18 @@ inline_mysql_file_ftell( { my_off_t result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_stream_locker) - (&state, file->m_psi, PSI_FILE_TELL); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); - result= my_ftell(file->m_file, flags); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_stream_locker)(&state, file->m_psi, PSI_FILE_TELL); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); + result= my_ftell(file->m_file, flags); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); + return result; + } } #endif @@ -989,16 +1022,19 @@ inline_mysql_file_create( { File file; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_name_locker) - (&state, key, PSI_FILE_CREATE, filename, &locker); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_open_wait)(locker, src_file, src_line); - file= my_create(filename, create_flags, access_flags, myFlags); - PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(locker, file); - return file; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_CREATE, filename, + &locker); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_open_wait)(locker, src_file, src_line); + file= my_create(filename, create_flags, access_flags, myFlags); + PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(locker, file); + return file; + } } #endif @@ -1035,16 +1071,19 @@ inline_mysql_file_open( { File file; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_name_locker) - (&state, key, PSI_FILE_OPEN, filename, &locker); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_open_wait)(locker, src_file, src_line); - file= my_open(filename, flags, myFlags); - PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(locker, file); - return file; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_OPEN, filename, + &locker); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_open_wait)(locker, src_file, src_line); + file= my_open(filename, flags, myFlags); + PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(locker, file); + return file; + } } #endif @@ -1061,16 +1100,18 @@ inline_mysql_file_close( { int result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_descriptor_locker) - (&state, file, PSI_FILE_CLOSE); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_close_wait)(locker, src_file, src_line); - result= my_close(file, flags); - PSI_FILE_CALL(end_file_close_wait)(locker, result); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)(&state, file, PSI_FILE_CLOSE); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_close_wait)(locker, src_file, src_line); + result= my_close(file, flags); + PSI_FILE_CALL(end_file_close_wait)(locker, result); + return result; + } } #endif @@ -1087,21 +1128,23 @@ inline_mysql_file_read( { size_t result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - size_t bytes_read; - locker= PSI_FILE_CALL(get_thread_file_descriptor_locker) - (&state, file, PSI_FILE_READ); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line); - result= my_read(file, buffer, count, flags); - if (flags & (MY_NABP | MY_FNABP)) - bytes_read= (result == 0) ? count : 0; - else - bytes_read= (result != MY_FILE_ERROR) ? result : 0; - PSI_FILE_CALL(end_file_wait)(locker, bytes_read); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + size_t bytes_read; + locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)(&state, file, PSI_FILE_READ); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line); + result= my_read(file, buffer, count, flags); + if (flags & (MY_NABP | MY_FNABP)) + bytes_read= (result == 0) ? count : 0; + else + bytes_read= (result != MY_FILE_ERROR) ? result : 0; + PSI_FILE_CALL(end_file_wait)(locker, bytes_read); + return result; + } } #endif @@ -1118,21 +1161,23 @@ inline_mysql_file_write( { size_t result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - size_t bytes_written; - locker= PSI_FILE_CALL(get_thread_file_descriptor_locker) - (&state, file, PSI_FILE_WRITE); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line); - result= my_write(file, buffer, count, flags); - if (flags & (MY_NABP | MY_FNABP)) - bytes_written= (result == 0) ? count : 0; - else - bytes_written= (result != MY_FILE_ERROR) ? result : 0; - PSI_FILE_CALL(end_file_wait)(locker, bytes_written); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + size_t bytes_written; + locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)(&state, file, PSI_FILE_WRITE); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line); + result= my_write(file, buffer, count, flags); + if (flags & (MY_NABP | MY_FNABP)) + bytes_written= (result == 0) ? count : 0; + else + bytes_written= (result != MY_FILE_ERROR) ? result : 0; + PSI_FILE_CALL(end_file_wait)(locker, bytes_written); + return result; + } } #endif @@ -1149,21 +1194,23 @@ inline_mysql_file_pread( { size_t result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - size_t bytes_read; - locker= PSI_FILE_CALL(get_thread_file_descriptor_locker) - (&state, file, PSI_FILE_READ); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line); - result= my_pread(file, buffer, count, offset, flags); - if (flags & (MY_NABP | MY_FNABP)) - bytes_read= (result == 0) ? count : 0; - else - bytes_read= (result != MY_FILE_ERROR) ? result : 0; - PSI_FILE_CALL(end_file_wait)(locker, bytes_read); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + size_t bytes_read; + locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)(&state, file, PSI_FILE_READ); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line); + result= my_pread(file, buffer, count, offset, flags); + if (flags & (MY_NABP | MY_FNABP)) + bytes_read= (result == 0) ? count : 0; + else + bytes_read= (result != MY_FILE_ERROR) ? result : 0; + PSI_FILE_CALL(end_file_wait)(locker, bytes_read); + return result; + } } #endif @@ -1180,21 +1227,23 @@ inline_mysql_file_pwrite( { size_t result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - size_t bytes_written; - locker= PSI_FILE_CALL(get_thread_file_descriptor_locker) - (&state, file, PSI_FILE_WRITE); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line); - result= my_pwrite(file, buffer, count, offset, flags); - if (flags & (MY_NABP | MY_FNABP)) - bytes_written= (result == 0) ? count : 0; - else - bytes_written= (result != MY_FILE_ERROR) ? result : 0; - PSI_FILE_CALL(end_file_wait)(locker, bytes_written); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + size_t bytes_written; + locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)(&state, file, PSI_FILE_WRITE); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line); + result= my_pwrite(file, buffer, count, offset, flags); + if (flags & (MY_NABP | MY_FNABP)) + bytes_written= (result == 0) ? count : 0; + else + bytes_written= (result != MY_FILE_ERROR) ? result : 0; + PSI_FILE_CALL(end_file_wait)(locker, bytes_written); + return result; + } } #endif @@ -1211,16 +1260,18 @@ inline_mysql_file_seek( { my_off_t result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_descriptor_locker) - (&state, file, PSI_FILE_SEEK); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); - result= my_seek(file, pos, whence, flags); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)(&state, file, PSI_FILE_SEEK); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); + result= my_seek(file, pos, whence, flags); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); + return result; + } } #endif @@ -1237,16 +1288,18 @@ inline_mysql_file_tell( { my_off_t result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_descriptor_locker) - (&state, file, PSI_FILE_TELL); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); - result= my_tell(file, flags); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)(&state, file, PSI_FILE_TELL); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); + result= my_tell(file, flags); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); + return result; + } } #endif @@ -1263,16 +1316,18 @@ inline_mysql_file_delete( { int result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_name_locker) - (&state, key, PSI_FILE_DELETE, name, &locker); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_close_wait)(locker, src_file, src_line); - result= my_delete(name, flags); - PSI_FILE_CALL(end_file_close_wait)(locker, result); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_DELETE, name, &locker); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_close_wait)(locker, src_file, src_line); + result= my_delete(name, flags); + PSI_FILE_CALL(end_file_close_wait)(locker, result); + return result; + } } #endif @@ -1289,16 +1344,18 @@ inline_mysql_file_rename( { int result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_name_locker) - (&state, key, PSI_FILE_RENAME, to, &locker); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); - result= my_rename(from, to, flags); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_RENAME, to, &locker); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); + result= my_rename(from, to, flags); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); + return result; + } } #endif @@ -1317,17 +1374,20 @@ inline_mysql_file_create_with_symlink( { File file; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_name_locker) - (&state, key, PSI_FILE_CREATE, filename, &locker); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_open_wait)(locker, src_file, src_line); - file= my_create_with_symlink(linkname, filename, create_flags, access_flags, - flags); - PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(locker, file); - return file; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_CREATE, filename, + &locker); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_open_wait)(locker, src_file, src_line); + file= my_create_with_symlink(linkname, filename, create_flags, access_flags, + flags); + PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(locker, file); + return file; + } } #endif @@ -1348,16 +1408,19 @@ inline_mysql_file_delete_with_symlink( char buf[FN_REFLEN]; char *fullname= fn_format(buf, name, "", ext, MY_UNPACK_FILENAME | MY_APPEND_EXT); #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_name_locker) - (&state, key, PSI_FILE_DELETE, fullname, &locker); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_close_wait)(locker, src_file, src_line); - result= my_handler_delete_with_symlink(fullname, flags); - PSI_FILE_CALL(end_file_close_wait)(locker, result); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_DELETE, fullname, + &locker); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_close_wait)(locker, src_file, src_line); + result= my_handler_delete_with_symlink(fullname, flags); + PSI_FILE_CALL(end_file_close_wait)(locker, result); + return result; + } } #endif @@ -1375,16 +1438,18 @@ inline_mysql_file_rename_with_symlink( { int result; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_name_locker) - (&state, key, PSI_FILE_RENAME, to, &locker); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); - result= my_rename_with_symlink(from, to, flags); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_RENAME, to, &locker); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); + result= my_rename_with_symlink(from, to, flags); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); + return result; + } } #endif @@ -1401,16 +1466,18 @@ inline_mysql_file_sync( { int result= 0; #ifdef HAVE_PSI_FILE_INTERFACE - struct PSI_file_locker *locker; - PSI_file_locker_state state; - locker= PSI_FILE_CALL(get_thread_file_descriptor_locker) - (&state, fd, PSI_FILE_SYNC); - if (likely(locker != NULL)) + if (psi_likely(pfs_enabled)) { - PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); - result= my_sync(fd, flags); - PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); - return result; + struct PSI_file_locker *locker; + PSI_file_locker_state state; + locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)(&state, fd, PSI_FILE_SYNC); + if (likely(locker != NULL)) + { + PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line); + result= my_sync(fd, flags); + PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0); + return result; + } } #endif diff --git a/include/mysql/psi/mysql_idle.h b/include/mysql/psi/mysql_idle.h index b623edce108..16c9de7ff55 100644 --- a/include/mysql/psi/mysql_idle.h +++ b/include/mysql/psi/mysql_idle.h @@ -82,7 +82,7 @@ inline_mysql_start_idle_wait(PSI_idle_locker_state *state, static inline void inline_mysql_end_idle_wait(struct PSI_idle_locker *locker) { - if (likely(locker != NULL)) + if (psi_likely(locker != NULL)) PSI_IDLE_CALL(end_idle_wait)(locker); } #endif diff --git a/include/mysql/psi/mysql_socket.h b/include/mysql/psi/mysql_socket.h index 2bbe4c39849..3baabb6e57d 100644 --- a/include/mysql/psi/mysql_socket.h +++ b/include/mysql/psi/mysql_socket.h @@ -245,7 +245,7 @@ inline_mysql_start_socket_wait(PSI_socket_locker_state *state, const char *src_file, uint src_line) { struct PSI_socket_locker *locker; - if (mysql_socket.m_psi != NULL) + if (psi_likely(mysql_socket.m_psi != NULL)) { locker= PSI_SOCKET_CALL(start_socket_wait) (state, mysql_socket.m_psi, op, byte_count, src_file, src_line); @@ -262,7 +262,7 @@ inline_mysql_start_socket_wait(PSI_socket_locker_state *state, static inline void inline_mysql_end_socket_wait(struct PSI_socket_locker *locker, size_t byte_count) { - if (locker != NULL) + if (psi_likely(locker != NULL)) PSI_SOCKET_CALL(end_socket_wait)(locker, byte_count); } @@ -577,7 +577,7 @@ inline_mysql_socket_bind int result; #ifdef HAVE_PSI_SOCKET_INTERFACE - if (mysql_socket.m_psi != NULL) + if (psi_likely(mysql_socket.m_psi != NULL)) { /* Instrumentation start */ PSI_socket_locker_state state; @@ -617,7 +617,7 @@ inline_mysql_socket_getsockname int result; #ifdef HAVE_PSI_SOCKET_INTERFACE - if (mysql_socket.m_psi != NULL) + if (psi_likely(mysql_socket.m_psi != NULL)) { /* Instrumentation start */ PSI_socket_locker *locker; @@ -655,7 +655,7 @@ inline_mysql_socket_connect int result; #ifdef HAVE_PSI_SOCKET_INTERFACE - if (mysql_socket.m_psi != NULL) + if (psi_likely(mysql_socket.m_psi != NULL)) { /* Instrumentation start */ PSI_socket_locker *locker; @@ -693,7 +693,7 @@ inline_mysql_socket_getpeername int result; #ifdef HAVE_PSI_SOCKET_INTERFACE - if (mysql_socket.m_psi != NULL) + if (psi_likely(mysql_socket.m_psi != NULL)) { /* Instrumentation start */ PSI_socket_locker *locker; @@ -731,7 +731,7 @@ inline_mysql_socket_send ssize_t result; #ifdef HAVE_PSI_SOCKET_INTERFACE - if (mysql_socket.m_psi != NULL) + if (psi_likely(mysql_socket.m_psi != NULL)) { /* Instrumentation start */ PSI_socket_locker *locker; @@ -772,7 +772,7 @@ inline_mysql_socket_recv ssize_t result; #ifdef HAVE_PSI_SOCKET_INTERFACE - if (mysql_socket.m_psi != NULL) + if (psi_likely(mysql_socket.m_psi != NULL)) { /* Instrumentation start */ PSI_socket_locker *locker; @@ -813,7 +813,7 @@ inline_mysql_socket_sendto ssize_t result; #ifdef HAVE_PSI_SOCKET_INTERFACE - if (mysql_socket.m_psi != NULL) + if (psi_likely(mysql_socket.m_psi != NULL)) { /* Instrumentation start */ PSI_socket_locker *locker; @@ -855,7 +855,7 @@ inline_mysql_socket_recvfrom ssize_t result; #ifdef HAVE_PSI_SOCKET_INTERFACE - if (mysql_socket.m_psi != NULL) + if (psi_likely(mysql_socket.m_psi != NULL)) { /* Instrumentation start */ PSI_socket_locker *locker; @@ -896,7 +896,7 @@ inline_mysql_socket_getsockopt int result; #ifdef HAVE_PSI_SOCKET_INTERFACE - if (mysql_socket.m_psi != NULL) + if (psi_likely(mysql_socket.m_psi != NULL)) { /* Instrumentation start */ PSI_socket_locker *locker; @@ -935,7 +935,7 @@ inline_mysql_socket_setsockopt int result; #ifdef HAVE_PSI_SOCKET_INTERFACE - if (mysql_socket.m_psi) + if (psi_likely(mysql_socket.m_psi)) { /* Instrumentation start */ PSI_socket_locker *locker; @@ -973,7 +973,7 @@ inline_mysql_socket_listen int result; #ifdef HAVE_PSI_SOCKET_INTERFACE - if (mysql_socket.m_psi != NULL) + if (psi_likely(mysql_socket.m_psi != NULL)) { /* Instrumentation start */ PSI_socket_locker *locker; @@ -1087,7 +1087,7 @@ inline_mysql_socket_close int result; #ifdef HAVE_PSI_SOCKET_INTERFACE - if (mysql_socket.m_psi != NULL) + if (psi_likely(mysql_socket.m_psi != NULL)) { /* Instrumentation start */ PSI_socket_locker *locker; @@ -1142,7 +1142,7 @@ inline_mysql_socket_shutdown /* Instrumentation start */ #ifdef HAVE_PSI_SOCKET_INTERFACE - if (mysql_socket.m_psi != NULL) + if (psi_likely(mysql_socket.m_psi != NULL)) { PSI_socket_locker *locker; PSI_socket_locker_state state; diff --git a/include/mysql/psi/mysql_statement.h b/include/mysql/psi/mysql_statement.h index 2c59b50aa63..44d27ef1ea6 100644 --- a/include/mysql/psi/mysql_statement.h +++ b/include/mysql/psi/mysql_statement.h @@ -127,7 +127,7 @@ inline_mysql_digest_start(PSI_statement_locker *locker) { PSI_digest_locker* digest_locker= NULL; - if (likely(locker != NULL)) + if (psi_likely(locker != NULL)) digest_locker= PSI_DIGEST_CALL(digest_start)(locker); return digest_locker; } @@ -137,7 +137,7 @@ inline_mysql_digest_start(PSI_statement_locker *locker) static inline void inline_mysql_digest_end(PSI_digest_locker *locker, const sql_digest_storage *digest) { - if (likely(locker != NULL)) + if (psi_likely(locker != NULL)) PSI_DIGEST_CALL(digest_end)(locker, digest); } #endif @@ -151,7 +151,7 @@ inline_mysql_start_statement(PSI_statement_locker_state *state, { PSI_statement_locker *locker; locker= PSI_STATEMENT_CALL(get_thread_statement_locker)(state, key, charset); - if (likely(locker != NULL)) + if (psi_likely(locker != NULL)) PSI_STATEMENT_CALL(start_statement)(locker, db, (uint)db_len, src_file, src_line); return locker; } @@ -160,7 +160,7 @@ static inline struct PSI_statement_locker * inline_mysql_refine_statement(PSI_statement_locker *locker, PSI_statement_key key) { - if (likely(locker != NULL)) + if (psi_likely(locker != NULL)) { locker= PSI_STATEMENT_CALL(refine_statement)(locker, key); } @@ -171,7 +171,7 @@ static inline void inline_mysql_set_statement_text(PSI_statement_locker *locker, const char *text, uint text_len) { - if (likely(locker != NULL)) + if (psi_likely(locker != NULL)) { PSI_STATEMENT_CALL(set_statement_text)(locker, text, text_len); } @@ -181,7 +181,7 @@ static inline void inline_mysql_set_statement_lock_time(PSI_statement_locker *locker, ulonglong count) { - if (likely(locker != NULL)) + if (psi_likely(locker != NULL)) { PSI_STATEMENT_CALL(set_statement_lock_time)(locker, count); } @@ -191,7 +191,7 @@ static inline void inline_mysql_set_statement_rows_sent(PSI_statement_locker *locker, ulonglong count) { - if (likely(locker != NULL)) + if (psi_likely(locker != NULL)) { PSI_STATEMENT_CALL(set_statement_rows_sent)(locker, count); } @@ -201,7 +201,7 @@ static inline void inline_mysql_set_statement_rows_examined(PSI_statement_locker *locker, ulonglong count) { - if (likely(locker != NULL)) + if (psi_likely(locker != NULL)) { PSI_STATEMENT_CALL(set_statement_rows_examined)(locker, count); } @@ -212,7 +212,7 @@ inline_mysql_end_statement(struct PSI_statement_locker *locker, Diagnostics_area *stmt_da) { PSI_STAGE_CALL(end_stage)(); - if (likely(locker != NULL)) + if (psi_likely(locker != NULL)) PSI_STATEMENT_CALL(end_statement)(locker, stmt_da); } #endif diff --git a/include/mysql/psi/mysql_table.h b/include/mysql/psi/mysql_table.h index 5b4b64a8e48..e420f9a099e 100644 --- a/include/mysql/psi/mysql_table.h +++ b/include/mysql/psi/mysql_table.h @@ -87,7 +87,7 @@ #ifdef HAVE_PSI_TABLE_INTERFACE #define MYSQL_TABLE_IO_WAIT(PSI, OP, INDEX, FLAGS, PAYLOAD) \ { \ - if (PSI != NULL) \ + if (psi_likely(PSI != NULL)) \ { \ PSI_table_locker *locker; \ PSI_table_locker_state state; \ @@ -120,7 +120,7 @@ #ifdef HAVE_PSI_TABLE_INTERFACE #define MYSQL_TABLE_LOCK_WAIT(PSI, OP, FLAGS, PAYLOAD) \ { \ - if (PSI != NULL) \ + if (psi_likely(PSI != NULL)) \ { \ PSI_table_locker *locker; \ PSI_table_locker_state state; \ @@ -186,7 +186,7 @@ inline_mysql_start_table_lock_wait(PSI_table_locker_state *state, enum PSI_table_lock_operation op, ulong flags, const char *src_file, uint src_line) { - if (psi != NULL) + if (psi_likely(psi != NULL)) { struct PSI_table_locker *locker; locker= PSI_TABLE_CALL(start_table_lock_wait) @@ -203,7 +203,7 @@ inline_mysql_start_table_lock_wait(PSI_table_locker_state *state, static inline void inline_mysql_end_table_lock_wait(struct PSI_table_locker *locker) { - if (locker != NULL) + if (psi_likely(locker != NULL)) PSI_TABLE_CALL(end_table_lock_wait)(locker); } #endif diff --git a/include/mysql/psi/mysql_thread.h b/include/mysql/psi/mysql_thread.h index 08715513f8c..6350467c3bc 100644 --- a/include/mysql/psi/mysql_thread.h +++ b/include/mysql/psi/mysql_thread.h @@ -682,7 +682,7 @@ static inline int inline_mysql_mutex_lock( int result; #ifdef HAVE_PSI_MUTEX_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) { /* Instrumentation start */ PSI_mutex_locker *locker; @@ -725,7 +725,7 @@ static inline int inline_mysql_mutex_trylock( int result; #ifdef HAVE_PSI_MUTEX_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) { /* Instrumentation start */ PSI_mutex_locker *locker; @@ -768,7 +768,7 @@ static inline int inline_mysql_mutex_unlock( int result; #ifdef HAVE_PSI_MUTEX_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) PSI_MUTEX_CALL(unlock_mutex)(that->m_psi); #endif @@ -835,7 +835,7 @@ static inline int inline_mysql_rwlock_destroy( mysql_rwlock_t *that) { #ifdef HAVE_PSI_RWLOCK_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) { PSI_RWLOCK_CALL(destroy_rwlock)(that->m_psi); that->m_psi= NULL; @@ -849,7 +849,7 @@ static inline int inline_mysql_prlock_destroy( mysql_prlock_t *that) { #ifdef HAVE_PSI_RWLOCK_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) { PSI_RWLOCK_CALL(destroy_rwlock)(that->m_psi); that->m_psi= NULL; @@ -869,7 +869,7 @@ static inline int inline_mysql_rwlock_rdlock( int result; #ifdef HAVE_PSI_RWLOCK_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) { /* Instrumentation start */ PSI_rwlock_locker *locker; @@ -905,7 +905,7 @@ static inline int inline_mysql_prlock_rdlock( int result; #ifdef HAVE_PSI_RWLOCK_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) { /* Instrumentation start */ PSI_rwlock_locker *locker; @@ -941,7 +941,7 @@ static inline int inline_mysql_rwlock_wrlock( int result; #ifdef HAVE_PSI_RWLOCK_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) { /* Instrumentation start */ PSI_rwlock_locker *locker; @@ -977,7 +977,7 @@ static inline int inline_mysql_prlock_wrlock( int result; #ifdef HAVE_PSI_RWLOCK_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) { /* Instrumentation start */ PSI_rwlock_locker *locker; @@ -1013,7 +1013,7 @@ static inline int inline_mysql_rwlock_tryrdlock( int result; #ifdef HAVE_PSI_RWLOCK_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) { /* Instrumentation start */ PSI_rwlock_locker *locker; @@ -1048,7 +1048,7 @@ static inline int inline_mysql_rwlock_trywrlock( int result; #ifdef HAVE_PSI_RWLOCK_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) { /* Instrumentation start */ PSI_rwlock_locker *locker; @@ -1078,7 +1078,7 @@ static inline int inline_mysql_rwlock_unlock( { int result; #ifdef HAVE_PSI_RWLOCK_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) PSI_RWLOCK_CALL(unlock_rwlock)(that->m_psi); #endif result= rw_unlock(&that->m_rwlock); @@ -1091,7 +1091,7 @@ static inline int inline_mysql_prlock_unlock( { int result; #ifdef HAVE_PSI_RWLOCK_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) PSI_RWLOCK_CALL(unlock_rwlock)(that->m_psi); #endif result= rw_pr_unlock(&that->m_prlock); @@ -1135,7 +1135,7 @@ static inline int inline_mysql_cond_destroy( mysql_cond_t *that) { #ifdef HAVE_PSI_COND_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) { PSI_COND_CALL(destroy_cond)(that->m_psi); that->m_psi= NULL; @@ -1155,7 +1155,7 @@ static inline int inline_mysql_cond_wait( int result; #ifdef HAVE_PSI_COND_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) { /* Instrumentation start */ PSI_cond_locker *locker; @@ -1192,7 +1192,7 @@ static inline int inline_mysql_cond_timedwait( int result; #ifdef HAVE_PSI_COND_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) { /* Instrumentation start */ PSI_cond_locker *locker; @@ -1204,7 +1204,7 @@ static inline int inline_mysql_cond_timedwait( result= my_cond_timedwait(&that->m_cond, &mutex->m_mutex, abstime); /* Instrumentation end */ - if (locker != NULL) + if (psi_likely(locker != NULL)) PSI_COND_CALL(end_cond_wait)(locker, result); return result; @@ -1222,7 +1222,7 @@ static inline int inline_mysql_cond_signal( { int result; #ifdef HAVE_PSI_COND_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) PSI_COND_CALL(signal_cond)(that->m_psi); #endif result= pthread_cond_signal(&that->m_cond); @@ -1234,7 +1234,7 @@ static inline int inline_mysql_cond_broadcast( { int result; #ifdef HAVE_PSI_COND_INTERFACE - if (that->m_psi != NULL) + if (psi_likely(that->m_psi != NULL)) PSI_COND_CALL(broadcast_cond)(that->m_psi); #endif result= pthread_cond_broadcast(&that->m_cond); diff --git a/include/mysql/psi/psi.h b/include/mysql/psi/psi.h index 3f43445e08a..394dd1b30b4 100644 --- a/include/mysql/psi/psi.h +++ b/include/mysql/psi/psi.h @@ -40,6 +40,21 @@ #error "You must include my_global.h in the code for the build to be correct." #endif +/* + If PSI_ON_BY_DFAULT is defined, assume PSI will be enabled by default and + optimize jumps testing for PSI this case. If not, optimize the binary for + that PSI is not enabled +*/ + +#ifdef PSI_ON_BY_DEFAULT +#define psi_likely(A) likely(A) +#define psi_unlikely(A) unlikely(A) +#else +#define psi_likely(A) unlikely(A) +#define psi_unlikely(A) likely(A) +#endif + + C_MODE_START struct TABLE_SHARE; @@ -2346,6 +2361,7 @@ typedef struct PSI_stage_info_none PSI_stage_info; #endif /* HAVE_PSI_INTERFACE */ +extern MYSQL_PLUGIN_IMPORT my_bool pfs_enabled; extern MYSQL_PLUGIN_IMPORT PSI *PSI_server; /* diff --git a/include/mysql/psi/psi_abi_v1.h.pp b/include/mysql/psi/psi_abi_v1.h.pp index 17ac0271da2..ef18f59e4a9 100644 --- a/include/mysql/psi/psi_abi_v1.h.pp +++ b/include/mysql/psi/psi_abi_v1.h.pp @@ -616,5 +616,6 @@ typedef struct PSI_file_locker_state_v1 PSI_file_locker_state; typedef struct PSI_table_locker_state_v1 PSI_table_locker_state; typedef struct PSI_statement_locker_state_v1 PSI_statement_locker_state; typedef struct PSI_socket_locker_state_v1 PSI_socket_locker_state; +extern MYSQL_PLUGIN_IMPORT my_bool pfs_enabled; extern MYSQL_PLUGIN_IMPORT PSI *PSI_server; C_MODE_END diff --git a/include/mysql/psi/psi_abi_v2.h.pp b/include/mysql/psi/psi_abi_v2.h.pp index 4e81fd66ca4..adf1af7cfae 100644 --- a/include/mysql/psi/psi_abi_v2.h.pp +++ b/include/mysql/psi/psi_abi_v2.h.pp @@ -209,5 +209,6 @@ typedef struct PSI_file_locker_state_v2 PSI_file_locker_state; typedef struct PSI_table_locker_state_v2 PSI_table_locker_state; typedef struct PSI_statement_locker_state_v2 PSI_statement_locker_state; typedef struct PSI_socket_locker_state_v2 PSI_socket_locker_state; +extern MYSQL_PLUGIN_IMPORT my_bool pfs_enabled; extern MYSQL_PLUGIN_IMPORT PSI *PSI_server; C_MODE_END diff --git a/libmariadb b/libmariadb index 668757aaa9a..a12a0b8362f 160000 --- a/libmariadb +++ b/libmariadb @@ -1 +1 @@ -Subproject commit 668757aaa9a55d2bcd806908cb5a8e806cd6dc31 +Subproject commit a12a0b8362fe8c92ec7252c8da19c14d22e289fc diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt index 519063f5075..f1fcfac58f5 100644 --- a/libmysqld/CMakeLists.txt +++ b/libmysqld/CMakeLists.txt @@ -445,7 +445,9 @@ IF(NOT DISABLE_SHARED) # Clean direct output flags, as 2 targets have the same base name # libmysqld SET_TARGET_PROPERTIES(libmysqld PROPERTIES CLEAN_DIRECT_OUTPUT 1) + TARGET_LINK_LIBRARIES(libmysqld ${CRC32_LIBRARY}) SET_TARGET_PROPERTIES(mysqlserver PROPERTIES CLEAN_DIRECT_OUTPUT 1) + TARGET_LINK_LIBRARIES(mysqlserver ${CRC32_LIBRARY}) IF(LIBMYSQLD_SO_EXTRA_LIBS) TARGET_LINK_LIBRARIES(libmysqld ${LIBMYSQLD_SO_EXTRA_LIBS}) ENDIF() diff --git a/man/make_win_bin_dist.1 b/man/make_win_bin_dist.1 deleted file mode 100644 index f54f1144ed0..00000000000 --- a/man/make_win_bin_dist.1 +++ /dev/null @@ -1,176 +0,0 @@ -'\" t -.\" -.TH "\FBMAKE_WIN_BIN_DIST" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System" -.\" ----------------------------------------------------------------- -.\" * set default formatting -.\" ----------------------------------------------------------------- -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l -.\" ----------------------------------------------------------------- -.\" * MAIN CONTENT STARTS HERE * -.\" ----------------------------------------------------------------- -.\" make_win_bin_dist -.SH "NAME" -make_win_bin_dist \- package MySQL distribution as ZIP archive -.SH "SYNOPSIS" -.HP \w'\fBmake_win_bin_dist\ [\fR\fB\fIoptions\fR\fR\fB]\ \fR\fB\fIpackage_basename\fR\fR\fB\ [\fR\fB\fIcopy_def\fR\fR\fB\ \&.\&.\&.]\fR\ 'u -\fBmake_win_bin_dist [\fR\fB\fIoptions\fR\fR\fB] \fR\fB\fIpackage_basename\fR\fR\fB [\fR\fB\fIcopy_def\fR\fR\fB \&.\&.\&.]\fR -.SH "DESCRIPTION" -.PP -This script is used on Windows after building a MySQL distribution from source to create executable programs\&. It packages the binaries and support files into a ZIP archive that can be unpacked at the location where you want to install MySQL\&. -.PP -\fBmake_win_bin_dist\fR -is a shell script, so you must have Cygwin installed to use it\&. -.PP -This program\'s use is subject to change\&. Currently, you invoke it as follows from the root directory of your source distribution: -.sp -.if n \{\ -.RS 4 -.\} -.nf -shell> \fBmake_win_bin_dist [\fR\fB\fIoptions\fR\fR\fB] \fR\fB\fIpackage_basename\fR\fR\fB [\fR\fB\fIcopy_def\fR\fR\fB \&.\&.\&.]\fR -.fi -.if n \{\ -.RE -.\} -.PP -The -\fIpackage_basename\fR -argument provides the basename for the resulting ZIP archive\&. This name will be the name of the directory that results from unpacking the archive\&. -.PP -Because you might want to include files of directories from other builds, you can instruct this script do copy them in for you, via -\fIcopy_def\fR -arguments, which of which is of the form -\fIrelative_dest_name\fR=\fIsource_name\fR\&. -.PP -Example: -.sp -.if n \{\ -.RS 4 -.\} -.nf -bin/mysqld\-max\&.exe=\&.\&./my\-max\-build/sql/release/mysqld\&.exe -.fi -.if n \{\ -.RE -.\} -.PP -If you specify a directory, the entire directory will be copied\&. -.PP -\fBmake_win_bin_dist\fR -supports the following options\&. -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" make_win_bin_dist: debug option -.\" debug option: make_win_bin_dist -\fB\-\-debug\fR -.sp -Pack the debug binaries and produce an error if they were not built\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" make_win_bin_dist: embedded option -.\" embedded option: make_win_bin_dist -\fB\-\-embedded\fR -.sp -Pack the embedded server and produce an error if it was not built\&. The default is to pack it if it was built\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" make_win_bin_dist: exe-suffix option -.\" exe-suffix option: make_win_bin_dist -\fB\-\-exe\-suffix=\fR\fB\fIsuffix\fR\fR -.sp -Add a suffix to the basename of the -\fBmysql\fR -binary\&. For example, a suffix of -\-abc -produces a binary named -\fBmysqld\-abc\&.exe\fR\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" make_win_bin_dist: no-debug option -.\" no-debug option: make_win_bin_dist -\fB\-\-no\-debug\fR -.sp -Do not pack the debug binaries even if they were built\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" make_win_bin_dist: no-embedded option -.\" no-embedded option: make_win_bin_dist -\fB\-\-no\-embedded\fR -.sp -Do not pack the embedded server even if it was built\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" make_win_bin_dist: only-debug option -.\" only-debug option: make_win_bin_dist -\fB\-\-only\-debug\fR -.sp -Use this option when the target for this build was -Debug, and you just want to replace the normal binaries with debug versions (that is, do not use separate -debug -directories)\&. -.RE -.SH "COPYRIGHT" -.br -.PP -Copyright 2007-2008 MySQL AB, 2008-2010 Sun Microsystems, Inc., 2010-2015 MariaDB Foundation -.PP -This documentation is free software; you can redistribute it and/or modify it only under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. -.PP -This documentation is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -.PP -You should have received a copy of the GNU General Public License along with the program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or see http://www.gnu.org/licenses/. -.sp -.SH "SEE ALSO" -For more information, please refer to the MariaDB Knowledge Base, available online at https://mariadb.com/kb/ -.SH AUTHOR -MariaDB Foundation (http://www.mariadb.org/). diff --git a/mysql-test/include/check-testcase.test b/mysql-test/include/check-testcase.test index 4ca53989d06..3c164eef8b6 100644 --- a/mysql-test/include/check-testcase.test +++ b/mysql-test/include/check-testcase.test @@ -70,10 +70,13 @@ if ($tmp) --echo SQL_Delay 0 --echo SQL_Remaining_Delay NULL --echo Slave_SQL_Running_State + --echo Slave_DDL_Groups # + --echo Slave_Non_Transactional_Groups # + --echo Slave_Transactional_Groups # } if (!$tmp) { # Note: after WL#5177, fields 13-18 shall not be filtered-out. - --replace_column 4 # 5 # 6 # 7 # 8 # 9 # 10 # 13 # 14 # 15 # 16 # 17 # 18 # 22 # 23 # 24 # 25 # 26 # 40 # 41 # 42 # 44 # + --replace_column 4 # 5 # 6 # 7 # 8 # 9 # 10 # 13 # 14 # 15 # 16 # 17 # 18 # 22 # 23 # 24 # 25 # 26 # 40 # 41 # 42 # 44 # 51 # 52 # 53 # query_vertical SHOW SLAVE STATUS; } diff --git a/mysql-test/include/galera_wait_ready.inc b/mysql-test/include/galera_wait_ready.inc index e20f01fad90..a726116f000 100644 --- a/mysql-test/include/galera_wait_ready.inc +++ b/mysql-test/include/galera_wait_ready.inc @@ -1,2 +1,32 @@ -let $wait_condition = SELECT 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready' AND VARIABLE_VALUE = 'ON'; ---source include/wait_condition.inc +# include/galera_wait_ready.inc +# +# Waits for galera node to transition to READY state. +# + +--enable_reconnect +--disable_query_log +--disable_result_log +let $wait_counter = 600; +while ($wait_counter) +{ + --disable_abort_on_error + let $success = `SELECT VARIABLE_VALUE = 'ON' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready'`; + --enable_abort_on_error + if ($success) + { + let $wait_counter = 0; + } + if (!$success) + { + real_sleep 0.1; + dec $wait_counter; + } +} + +if (!$success) +{ + die "Server did not transition to READY state"; +} +--disable_reconnect +--enable_query_log +--enable_result_log diff --git a/mysql-test/include/have_rbr_triggers.inc b/mysql-test/include/have_rbr_triggers.inc deleted file mode 100644 index 9ccfc18bfde..00000000000 --- a/mysql-test/include/have_rbr_triggers.inc +++ /dev/null @@ -1,5 +0,0 @@ -if (`select count(*) = 0 from information_schema.session_variables where variable_name = 'slave_run_triggers_for_rbr'`) -{ - skip RBR triggers are not available; -} - diff --git a/mysql-test/lib/My/CoreDump.pm b/mysql-test/lib/My/CoreDump.pm index a421d51ec98..8c22ea83f6b 100644 --- a/mysql-test/lib/My/CoreDump.pm +++ b/mysql-test/lib/My/CoreDump.pm @@ -264,6 +264,44 @@ EOF } +sub _lldb +{ + my ($core_name)= @_; + + print "\nTrying 'lldb' to get a backtrace from coredump $core_name\n"; + + # Create tempfile containing lldb commands + my ($tmp, $tmp_name)= tempfile(); + print $tmp + "bt\n", + "thread backtrace all\n", + "quit\n"; + close $tmp or die "Error closing $tmp_name: $!"; + + my $lldb_output= `lldb -c '$core_name' -s '$tmp_name' 2>&1`; + + unlink $tmp_name or die "Error removing $tmp_name: $!"; + + if ($? == 127) + { + print "lldb not found, cannot get the stack trace\n"; + return; + } + + return if $?; + return unless $lldb_output; + + resfile_print < + + +/* + Eventually we may want to adopt kern.corefile parsing code from + https://opensource.apple.com/source/xnu/xnu-3247.1.106/bsd/kern/kern_proc.c +*/ + +void handle_core(pid_t pid) +{ + char corefile[256]; + int coredump; + size_t corefile_size= sizeof(corefile); + size_t coredump_size= sizeof(coredump); + + if (sysctlbyname("kern.coredump", &coredump, &coredump_size, 0, 0) || + sysctlbyname("kern.corefile", corefile, &corefile_size, 0, 0)) + { + message("sysctlbyname failed: %d (%s)", errno, strerror(errno)); + return; + } + + if (!coredump) + { + message("core dumps disabled, to enable run sudo sysctl kern.coredump=1"); + return; + } + + if (!strncmp(corefile, "/cores/core.%P", corefile_size)) + { + char from[256]; + char *to= from + 7; + + snprintf(from, sizeof(from), "/cores/core.%u", pid); + if (!access(from, R_OK)) + { + if (symlink(from, to)) + message("symlink failed: %d (%s)", errno, strerror(errno)); + } + } +} +#else +void handle_core(pid_t pid __attribute__((unused))) {} +#endif + + +static int kill_child(bool was_killed) { int status= 0; @@ -108,15 +155,18 @@ static void kill_child(bool was_killed) exit_code= WEXITSTATUS(status); message("Child exit: %d", exit_code); // Exit with exit status of the child - exit(exit_code); + return exit_code; } if (WIFSIGNALED(status)) + { message("Child killed by signal: %d", WTERMSIG(status)); + handle_core(child_pid); + } - exit(exit_code); + return exit_code; } - exit(5); + return 5; } @@ -136,7 +186,7 @@ extern "C" void handle_signal(int sig) terminated= 1; if (child_pid > 0) - kill_child(sig == SIGCHLD); + _exit(kill_child(sig == SIGCHLD)); // Ignore further signals signal(SIGTERM, SIG_IGN); @@ -300,8 +350,6 @@ int main(int argc, char* const argv[] ) /* Wait for parent or child to die */ sleep(1); } - kill_child(0); - - return 4; + return kill_child(0); } diff --git a/mysql-test/lib/mtr_report.pm b/mysql-test/lib/mtr_report.pm index d93d8adf34c..4869c8f4c4b 100644 --- a/mysql-test/lib/mtr_report.pm +++ b/mysql-test/lib/mtr_report.pm @@ -118,7 +118,7 @@ sub mtr_report_test ($) { my $logfile= $tinfo->{'logfile'}; my $warnings= $tinfo->{'warnings'}; my $result= $tinfo->{'result'}; - my $retry= $tinfo->{'retries'} ? "retry-" : ""; + my $retry= $tinfo->{'retries'} ? "retry-" : $tinfo->{'repeat'} ? "$tinfo->{'repeat'} " : ""; if ($result eq 'MTR_RES_FAILED'){ diff --git a/mysql-test/main/alter_table.result b/mysql-test/main/alter_table.result index 9b394926489..cb5553a086c 100644 --- a/mysql-test/main/alter_table.result +++ b/mysql-test/main/alter_table.result @@ -1810,9 +1810,7 @@ affected rows: 2 info: Records: 2 Duplicates: 0 Warnings: 0 ALTER TABLE ti1 ADD FULLTEXT INDEX ii3 (d); affected rows: 0 -info: Records: 0 Duplicates: 0 Warnings: 1 -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID +info: Records: 0 Duplicates: 0 Warnings: 0 ALTER TABLE tm1 ADD FULLTEXT INDEX im3 (d); affected rows: 2 info: Records: 2 Duplicates: 0 Warnings: 0 @@ -2177,6 +2175,66 @@ t1 CREATE TABLE `t1` ( ) ENGINE=InnoDB DEFAULT CHARSET=utf8 DROP TABLE t1; # +# MDEV-15308 +# Assertion `ha_alter_info->alter_info->drop_list.elements > 0' failed +# in ha_innodb::prepare_inplace_alter_table +# +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP COLUMN b; +Warnings: +Note 1091 Can't DROP FOREIGN KEY `fk`; check that it exists +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP INDEX IF EXISTS fk, DROP COLUMN b; +Warnings: +Note 1091 Can't DROP INDEX `fk`; check that it exists +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, c INT, KEY(c)) ENGINE=InnoDB; +ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP COLUMN c; +Warnings: +Note 1091 Can't DROP FOREIGN KEY `fk`; check that it exists +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, c INT, KEY c1(c)) ENGINE=InnoDB; +ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP INDEX c1; +Warnings: +Note 1091 Can't DROP FOREIGN KEY `fk`; check that it exists +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP INDEX IF EXISTS fk, DROP COLUMN IF EXISTS c; +Warnings: +Note 1091 Can't DROP INDEX `fk`; check that it exists +Note 1091 Can't DROP COLUMN `c`; check that it exists +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +# # End of 10.0 tests # # diff --git a/mysql-test/main/alter_table.test b/mysql-test/main/alter_table.test index 7f692f36f4e..aa9faf710f5 100644 --- a/mysql-test/main/alter_table.test +++ b/mysql-test/main/alter_table.test @@ -1,7 +1,3 @@ -if (`select plugin_auth_version < "5.6.26" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in XtraDB below 5.6.26 -} --source include/have_innodb.inc # # Test of alter table @@ -1807,6 +1803,37 @@ ALTER TABLE t1 CONVERT TO CHARACTER SET utf8; SHOW CREATE TABLE t1; DROP TABLE t1; +--echo # +--echo # MDEV-15308 +--echo # Assertion `ha_alter_info->alter_info->drop_list.elements > 0' failed +--echo # in ha_innodb::prepare_inplace_alter_table +--echo # + +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP COLUMN b; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP INDEX IF EXISTS fk, DROP COLUMN b; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b INT, c INT, KEY(c)) ENGINE=InnoDB; +ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP COLUMN c; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b INT, c INT, KEY c1(c)) ENGINE=InnoDB; +ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP INDEX c1; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP INDEX IF EXISTS fk, DROP COLUMN IF EXISTS c; +SHOW CREATE TABLE t1; +DROP TABLE t1; + --echo # --echo # End of 10.0 tests --echo # diff --git a/mysql-test/main/alter_table_errors.result b/mysql-test/main/alter_table_errors.result new file mode 100644 index 00000000000..020a30304d0 --- /dev/null +++ b/mysql-test/main/alter_table_errors.result @@ -0,0 +1,10 @@ +create table t (a int, v int as (a)) engine=innodb; +alter table t change column a b tinyint, algorithm=inplace; +ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Cannot change column type INPLACE. Try ALGORITHM=COPY +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `a` int(11) DEFAULT NULL, + `v` int(11) GENERATED ALWAYS AS (`a`) VIRTUAL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +drop table t; diff --git a/mysql-test/main/alter_table_errors.test b/mysql-test/main/alter_table_errors.test new file mode 100644 index 00000000000..d9982ac26f4 --- /dev/null +++ b/mysql-test/main/alter_table_errors.test @@ -0,0 +1,10 @@ +--source include/have_innodb.inc + +# +# MDEV-16110 ALTER with ALGORITHM=INPLACE breaks temporary table with virtual columns +# +create table t (a int, v int as (a)) engine=innodb; +--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON +alter table t change column a b tinyint, algorithm=inplace; +show create table t; +drop table t; diff --git a/mysql-test/main/alter_table_online.result b/mysql-test/main/alter_table_online.result index d5a2a028acc..2e3de2c0635 100644 --- a/mysql-test/main/alter_table_online.result +++ b/mysql-test/main/alter_table_online.result @@ -112,7 +112,7 @@ create table t1 (a int not null primary key, b int, c varchar(80)); create table t2 (a int not null primary key, b int, c varchar(80)); create table t3 (a int not null primary key, b int, c varchar(80)) engine=merge UNION=(t1); alter online table t3 union=(t1,t2); -ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE +ERROR 0A000: LOCK=NONE is not supported for this operation. Try LOCK=EXCLUSIVE drop table t1,t2,t3; create table t1 (i int) partition by hash(i) partitions 2; alter online table t1 comment 'test'; diff --git a/mysql-test/main/ansi.result b/mysql-test/main/ansi.result index 527748e00d5..810168cc3bd 100644 --- a/mysql-test/main/ansi.result +++ b/mysql-test/main/ansi.result @@ -46,3 +46,73 @@ t1 CREATE TABLE `t1` ( PRIMARY KEY (`i`) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; +# +# MDEV-16186 Concatenation operator || returns wrong results in sql_mode=ORACLE +# +SET sql_mode=ANSI; +SELECT -1<<1||1 AS a FROM DUAL; +a +18446744073709549568 +SELECT -1||0<<1 AS a FROM DUAL; +a +18446744073709551596 +EXPLAIN EXTENDED SELECT -1<<1||1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select -1 << concat(1,1) AS "a" +EXPLAIN EXTENDED SELECT -1||0<<1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select concat(-1,0) << 1 AS "a" +SELECT -1+1||1 AS a FROM DUAL; +a +10 +SELECT -1||0+1 AS a FROM DUAL; +a +-9 +EXPLAIN EXTENDED SELECT -1+1||1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select -1 + concat(1,1) AS "a" +EXPLAIN EXTENDED SELECT -1||0+1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select concat(-1,0) + 1 AS "a" +SELECT 1*1||-1 AS a FROM DUAL; +a +1 +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: '1-1' +SELECT 1||1*-1 AS a FROM DUAL; +a +-11 +EXPLAIN EXTENDED SELECT 1*1||-1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select 1 * concat(1,-1) AS "a" +EXPLAIN EXTENDED SELECT 1||1*-1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select concat(1,1) * -1 AS "a" +SELECT -1^1||1 AS a FROM DUAL; +a +18446744073709551604 +SELECT -1||0^1 AS a FROM DUAL; +a +18446744073709551607 +EXPLAIN EXTENDED SELECT -1^1||1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select -1 ^ concat(1,1) AS "a" +EXPLAIN EXTENDED SELECT -1||0^1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select concat(-1,0) ^ 1 AS "a" diff --git a/mysql-test/main/ansi.test b/mysql-test/main/ansi.test index fa7f999954e..0620465728b 100644 --- a/mysql-test/main/ansi.test +++ b/mysql-test/main/ansi.test @@ -39,3 +39,36 @@ SHOW CREATE TABLE t1; DROP TABLE t1; # End of 4.1 tests + + +--echo # +--echo # MDEV-16186 Concatenation operator || returns wrong results in sql_mode=ORACLE +--echo # + +SET sql_mode=ANSI; + +# Concatenation operator || is stronger than numeric dyadic operators ^ * + << + +SELECT -1<<1||1 AS a FROM DUAL; +SELECT -1||0<<1 AS a FROM DUAL; + +EXPLAIN EXTENDED SELECT -1<<1||1 AS a FROM DUAL; +EXPLAIN EXTENDED SELECT -1||0<<1 AS a FROM DUAL; + +SELECT -1+1||1 AS a FROM DUAL; +SELECT -1||0+1 AS a FROM DUAL; + +EXPLAIN EXTENDED SELECT -1+1||1 AS a FROM DUAL; +EXPLAIN EXTENDED SELECT -1||0+1 AS a FROM DUAL; + +SELECT 1*1||-1 AS a FROM DUAL; +SELECT 1||1*-1 AS a FROM DUAL; + +EXPLAIN EXTENDED SELECT 1*1||-1 AS a FROM DUAL; +EXPLAIN EXTENDED SELECT 1||1*-1 AS a FROM DUAL; + +SELECT -1^1||1 AS a FROM DUAL; +SELECT -1||0^1 AS a FROM DUAL; + +EXPLAIN EXTENDED SELECT -1^1||1 AS a FROM DUAL; +EXPLAIN EXTENDED SELECT -1||0^1 AS a FROM DUAL; diff --git a/mysql-test/main/assign_key_cache.result b/mysql-test/main/assign_key_cache.result new file mode 100644 index 00000000000..4ed6170136b --- /dev/null +++ b/mysql-test/main/assign_key_cache.result @@ -0,0 +1,13 @@ +set global my_cache.key_buffer_size = 1024*1024; +create table t1 (i int) engine=myisam partition by hash (i) partitions 2; +xa start 'xid'; +cache index t1 partition (non_existing_partition) in my_cache; +Table Op Msg_type Msg_text +test.t1 assign_to_keycache error Error in list of partitions to test.t1 +cache index t1 partition (p1) in my_cache; +Table Op Msg_type Msg_text +test.t1 assign_to_keycache status OK +xa end 'xid'; +xa rollback 'xid'; +drop table t1; +set global my_cache.key_buffer_size = 0; diff --git a/mysql-test/main/assign_key_cache.test b/mysql-test/main/assign_key_cache.test new file mode 100644 index 00000000000..401e7bf9138 --- /dev/null +++ b/mysql-test/main/assign_key_cache.test @@ -0,0 +1,13 @@ +# +# MDEV-15216 Assertion `! is_set() || m_can_overwrite_status' failed in Diagnostics_area::set_error_status upon operation inside XA +# +--source include/have_partition.inc +set global my_cache.key_buffer_size = 1024*1024; +create table t1 (i int) engine=myisam partition by hash (i) partitions 2; +xa start 'xid'; +cache index t1 partition (non_existing_partition) in my_cache; +cache index t1 partition (p1) in my_cache; +xa end 'xid'; +xa rollback 'xid'; +drop table t1; +set global my_cache.key_buffer_size = 0; diff --git a/mysql-test/main/assign_key_cache-5405.result b/mysql-test/main/assign_key_cache_debug.result similarity index 100% rename from mysql-test/main/assign_key_cache-5405.result rename to mysql-test/main/assign_key_cache_debug.result diff --git a/mysql-test/main/assign_key_cache-5405.test b/mysql-test/main/assign_key_cache_debug.test similarity index 100% rename from mysql-test/main/assign_key_cache-5405.test rename to mysql-test/main/assign_key_cache_debug.test diff --git a/mysql-test/main/check.result b/mysql-test/main/check.result index 341c4411298..e3dcda773f4 100644 --- a/mysql-test/main/check.result +++ b/mysql-test/main/check.result @@ -52,3 +52,36 @@ connection default; UNLOCK TABLES; DROP TABLE t1; disconnect con1; +# +# MDEV-15338 +# Assertion `!table || (!table->read_set || +# bitmap_is_set(table->read_set, field_index))' +# failed on dropping column with CHECK +# +CREATE TABLE t1 (a INT, b INT, CHECK (a>0)) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1,2),(3,4); +ALTER TABLE t1 DROP COLUMN a; +CREATE OR REPLACE TABLE t1 (a INT, b INT, CHECK (a>0)) ENGINE=MyISAM; +ALTER TABLE t1 DROP COLUMN b; +CREATE OR REPLACE TABLE t1 (a INT, b INT, c INT, CHECK (a+b>0)) ENGINE=MyISAM; +ALTER TABLE t1 DROP COLUMN b; +ERROR 42S22: Unknown column 'b' in 'CHECK' +ALTER TABLE t1 DROP COLUMN a, DROP COLUMN b; +CREATE OR REPLACE TABLE t1 (a INT, b INT, c INT, CHECK (a+b>0)) ENGINE=MyISAM; +ALTER TABLE t1 DROP CONSTRAINT `CONSTRAINT_1`; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +CREATE OR REPLACE TABLE t1 (a INT, b INT, c INT, CHECK (a+b>0)) ENGINE=MyISAM; +ALTER TABLE t1 DROP COLUMN b, DROP CONSTRAINT `CONSTRAINT_1`; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/mysql-test/main/check.test b/mysql-test/main/check.test index 6a7b7253a59..cce8fd34c9c 100644 --- a/mysql-test/main/check.test +++ b/mysql-test/main/check.test @@ -79,3 +79,27 @@ disconnect con1; # Wait till we reached the initial number of concurrent sessions --source include/wait_until_count_sessions.inc + +--echo # +--echo # MDEV-15338 +--echo # Assertion `!table || (!table->read_set || +--echo # bitmap_is_set(table->read_set, field_index))' +--echo # failed on dropping column with CHECK +--echo # + +CREATE TABLE t1 (a INT, b INT, CHECK (a>0)) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1,2),(3,4); +ALTER TABLE t1 DROP COLUMN a; +CREATE OR REPLACE TABLE t1 (a INT, b INT, CHECK (a>0)) ENGINE=MyISAM; +ALTER TABLE t1 DROP COLUMN b; +CREATE OR REPLACE TABLE t1 (a INT, b INT, c INT, CHECK (a+b>0)) ENGINE=MyISAM; +--error ER_BAD_FIELD_ERROR +ALTER TABLE t1 DROP COLUMN b; +ALTER TABLE t1 DROP COLUMN a, DROP COLUMN b; +CREATE OR REPLACE TABLE t1 (a INT, b INT, c INT, CHECK (a+b>0)) ENGINE=MyISAM; +ALTER TABLE t1 DROP CONSTRAINT `CONSTRAINT_1`; +SHOW CREATE TABLE t1; +CREATE OR REPLACE TABLE t1 (a INT, b INT, c INT, CHECK (a+b>0)) ENGINE=MyISAM; +ALTER TABLE t1 DROP COLUMN b, DROP CONSTRAINT `CONSTRAINT_1`; +SHOW CREATE TABLE t1; +DROP TABLE t1; diff --git a/mysql-test/main/check_constraint.result b/mysql-test/main/check_constraint.result index 70d64cd6ff7..9a32e6f12bc 100644 --- a/mysql-test/main/check_constraint.result +++ b/mysql-test/main/check_constraint.result @@ -156,3 +156,44 @@ create table t1 (id int auto_increment primary key, datecol datetime, check (dat insert into t1 (datecol) values (now()); insert into t1 (datecol) values (now()); drop table t1; +CREATE TABLE t1 ( +EmployeeID SMALLINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, +FirstName VARCHAR(30) NOT NULL CHECK (CHAR_LENGTH(FirstName > 2)) +); +INSERT INTO t1 VALUES (NULL, 'Ken'); +ERROR 22007: Truncated incorrect DOUBLE value: 'Ken' +SHOW WARNINGS; +Level Code Message +Error 1292 Truncated incorrect DOUBLE value: 'Ken' +Error 4025 CONSTRAINT `FirstName` failed for `test`.`t1` +INSERT INTO t1 VALUES (NULL, 'Ken'),(NULL, 'Brian'); +ERROR 22007: Truncated incorrect DOUBLE value: 'Ken' +SHOW WARNINGS; +Level Code Message +Error 1292 Truncated incorrect DOUBLE value: 'Ken' +Error 4025 CONSTRAINT `FirstName` failed for `test`.`t1` +INSERT IGNORE INTO t1 VALUES (NULL, 'Ken'); +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'Ken' +INSERT IGNORE INTO t1 VALUES (NULL, 'Ken'),(NULL, 'Brian'); +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'Ken' +Warning 1292 Truncated incorrect DOUBLE value: 'Brian' +set sql_mode=""; +INSERT INTO t1 VALUES (NULL, 'Ken'); +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'Ken' +INSERT INTO t1 VALUES (NULL, 'Ken'),(NULL, 'Brian'); +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'Ken' +Warning 1292 Truncated incorrect DOUBLE value: 'Brian' +set sql_mode=default; +select * from t1; +EmployeeID FirstName +1 Ken +2 Ken +3 Brian +4 Ken +5 Ken +6 Brian +drop table t1; diff --git a/mysql-test/main/check_constraint.test b/mysql-test/main/check_constraint.test index 9a77736acd7..02081071bd4 100644 --- a/mysql-test/main/check_constraint.test +++ b/mysql-test/main/check_constraint.test @@ -111,3 +111,27 @@ create table t1 (id int auto_increment primary key, datecol datetime, check (dat insert into t1 (datecol) values (now()); insert into t1 (datecol) values (now()); drop table t1; + +# +# MDEV-15461 Check Constraints with binary logging makes insert inconsistent +# + +CREATE TABLE t1 ( + EmployeeID SMALLINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, + FirstName VARCHAR(30) NOT NULL CHECK (CHAR_LENGTH(FirstName > 2)) +); + +--error ER_TRUNCATED_WRONG_VALUE +INSERT INTO t1 VALUES (NULL, 'Ken'); +SHOW WARNINGS; +--error ER_TRUNCATED_WRONG_VALUE +INSERT INTO t1 VALUES (NULL, 'Ken'),(NULL, 'Brian'); +SHOW WARNINGS; +INSERT IGNORE INTO t1 VALUES (NULL, 'Ken'); +INSERT IGNORE INTO t1 VALUES (NULL, 'Ken'),(NULL, 'Brian'); +set sql_mode=""; +INSERT INTO t1 VALUES (NULL, 'Ken'); +INSERT INTO t1 VALUES (NULL, 'Ken'),(NULL, 'Brian'); +set sql_mode=default; +select * from t1; +drop table t1; diff --git a/mysql-test/main/column_compression.result b/mysql-test/main/column_compression.result index b3d4caad555..ace65387181 100644 --- a/mysql-test/main/column_compression.result +++ b/mysql-test/main/column_compression.result @@ -1336,15 +1336,33 @@ a LENGTH(a) DROP TABLE t1; CREATE TABLE t1(a TINYTEXT COMPRESSED); SET column_compression_threshold=300; +INSERT INTO t1 VALUES(REPEAT('a', 254)); +INSERT INTO t1 VALUES(REPEAT(' ', 254)); INSERT INTO t1 VALUES(REPEAT('a', 255)); ERROR 22001: Data too long for column 'a' at row 1 INSERT INTO t1 VALUES(REPEAT(' ', 255)); Warnings: Note 1265 Data truncated for column 'a' at row 1 +INSERT INTO t1 VALUES(REPEAT('a', 256)); +ERROR 22001: Data too long for column 'a' at row 1 +INSERT INTO t1 VALUES(REPEAT(' ', 256)); +Warnings: +Note 1265 Data truncated for column 'a' at row 1 +Note 1265 Data truncated for column 'a' at row 1 +INSERT INTO t1 VALUES(REPEAT('a', 257)); +ERROR 22001: Data too long for column 'a' at row 1 +INSERT INTO t1 VALUES(REPEAT(' ', 257)); +Warnings: +Note 1265 Data truncated for column 'a' at row 1 +Note 1265 Data truncated for column 'a' at row 1 SET column_compression_threshold=DEFAULT; -SELECT a, LENGTH(a) FROM t1; -a LENGTH(a) - 254 +SELECT LEFT(a, 10), LENGTH(a) FROM t1 ORDER BY 1; +LEFT(a, 10) LENGTH(a) + 254 + 254 + 254 + 254 +aaaaaaaaaa 254 DROP TABLE t1; # Corner case: VARCHAR(255) COMPRESSED must have 2 bytes pack length CREATE TABLE t1(a VARCHAR(255) COMPRESSED); @@ -1360,6 +1378,32 @@ SELECT a, LENGTH(a) FROM t1; a LENGTH(a) aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 255 DROP TABLE t1; +CREATE TABLE t1(a VARCHAR(65531) COMPRESSED); +SET column_compression_threshold=65537; +INSERT INTO t1 VALUES(REPEAT('a', 65530)); +INSERT INTO t1 VALUES(REPEAT(' ', 65530)); +INSERT INTO t1 VALUES(REPEAT('a', 65531)); +INSERT INTO t1 VALUES(REPEAT(' ', 65531)); +INSERT INTO t1 VALUES(REPEAT('a', 65532)); +ERROR 22001: Data too long for column 'a' at row 1 +INSERT INTO t1 VALUES(REPEAT(' ', 65532)); +Warnings: +Note 1265 Data truncated for column 'a' at row 1 +INSERT INTO t1 VALUES(REPEAT('a', 65533)); +ERROR 22001: Data too long for column 'a' at row 1 +INSERT INTO t1 VALUES(REPEAT(' ', 65533)); +Warnings: +Note 1265 Data truncated for column 'a' at row 1 +SET column_compression_threshold=DEFAULT; +SELECT LEFT(a, 10), LENGTH(a) FROM t1 ORDER BY 1, 2; +LEFT(a, 10) LENGTH(a) + 65530 + 65531 + 65531 + 65531 +aaaaaaaaaa 65530 +aaaaaaaaaa 65531 +DROP TABLE t1; # # MDEV-14929 - AddressSanitizer: memcpy-param-overlap in # Field_longstr::compress @@ -1373,3 +1417,58 @@ bar foo SET SESSION optimizer_switch=DEFAULT; DROP TABLE t1; +# +# MDEV-15762 - VARCHAR(0) COMPRESSED crashes the server +# +CREATE TABLE t1(a VARCHAR(0) COMPRESSED); +INSERT INTO t1 VALUES('a'); +ERROR 22001: Data too long for column 'a' at row 1 +INSERT INTO t1 VALUES(' '); +Warnings: +Note 1265 Data truncated for column 'a' at row 1 +SELECT LENGTH(a) FROM t1; +LENGTH(a) +0 +DROP TABLE t1; +# +# MDEV-15763 - VARCHAR(1) COMPRESSED crashes the server +# +CREATE TABLE t1(a VARCHAR(1) COMPRESSED); +SET column_compression_threshold=0; +INSERT INTO t1 VALUES('a'); +SET column_compression_threshold=DEFAULT; +DROP TABLE t1; +# +# MDEV-15938 - TINYTEXT CHARACTER SET utf8 COMPRESSED truncates data +# +CREATE TABLE t1(a TINYTEXT COMPRESSED, b TINYTEXT) CHARACTER SET utf8; +INSERT INTO t1 VALUES (REPEAT(_latin1'a', 254), REPEAT(_latin1'a', 254)); +SELECT CHAR_LENGTH(a), CHAR_LENGTH(b), LEFT(a, 10), LEFT(b, 10) FROM t1; +CHAR_LENGTH(a) CHAR_LENGTH(b) LEFT(a, 10) LEFT(b, 10) +254 254 aaaaaaaaaa aaaaaaaaaa +DROP TABLE t1; +# +# MDEV-16134 Wrong I_S.COLUMNS.CHARACTER_XXX_LENGTH value for compressed columns +# +CREATE TABLE t1 +( +a VARCHAR(10) CHARACTER SET latin1 COMPRESSED, +b VARCHAR(10) CHARACTER SET utf8 COMPRESSED +); +SELECT COLUMN_NAME, CHARACTER_MAXIMUM_LENGTH, CHARACTER_OCTET_LENGTH +FROM INFORMATION_SCHEMA.COLUMNS +WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1' AND COLUMN_NAME IN ('a','b') +ORDER BY COLUMN_NAME; +COLUMN_NAME CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH +a 10 10 +b 10 30 +DROP TABLE t1; +# +# MDEV-15592 - Column COMPRESSED should select a 'high order' datatype +# +CREATE TABLE t1(a TINYTEXT COMPRESSED); +INSERT INTO t1 VALUES(REPEAT('a', 255)); +SELECT LEFT(a, 10), LENGTH(a) FROM t1; +LEFT(a, 10) LENGTH(a) +aaaaaaaaaa 255 +DROP TABLE t1; diff --git a/mysql-test/main/column_compression.test b/mysql-test/main/column_compression.test index 6d6ed3d3993..f3220503694 100644 --- a/mysql-test/main/column_compression.test +++ b/mysql-test/main/column_compression.test @@ -64,11 +64,19 @@ DROP TABLE t1; CREATE TABLE t1(a TINYTEXT COMPRESSED); SET column_compression_threshold=300; +INSERT INTO t1 VALUES(REPEAT('a', 254)); +INSERT INTO t1 VALUES(REPEAT(' ', 254)); --error ER_DATA_TOO_LONG INSERT INTO t1 VALUES(REPEAT('a', 255)); INSERT INTO t1 VALUES(REPEAT(' ', 255)); +--error ER_DATA_TOO_LONG +INSERT INTO t1 VALUES(REPEAT('a', 256)); +INSERT INTO t1 VALUES(REPEAT(' ', 256)); +--error ER_DATA_TOO_LONG +INSERT INTO t1 VALUES(REPEAT('a', 257)); +INSERT INTO t1 VALUES(REPEAT(' ', 257)); SET column_compression_threshold=DEFAULT; -SELECT a, LENGTH(a) FROM t1; +SELECT LEFT(a, 10), LENGTH(a) FROM t1 ORDER BY 1; DROP TABLE t1; --echo # Corner case: VARCHAR(255) COMPRESSED must have 2 bytes pack length @@ -80,6 +88,22 @@ SET column_compression_threshold=DEFAULT; SELECT a, LENGTH(a) FROM t1; DROP TABLE t1; +CREATE TABLE t1(a VARCHAR(65531) COMPRESSED); +SET column_compression_threshold=65537; +INSERT INTO t1 VALUES(REPEAT('a', 65530)); +INSERT INTO t1 VALUES(REPEAT(' ', 65530)); +INSERT INTO t1 VALUES(REPEAT('a', 65531)); +INSERT INTO t1 VALUES(REPEAT(' ', 65531)); +--error ER_DATA_TOO_LONG +INSERT INTO t1 VALUES(REPEAT('a', 65532)); +INSERT INTO t1 VALUES(REPEAT(' ', 65532)); +--error ER_DATA_TOO_LONG +INSERT INTO t1 VALUES(REPEAT('a', 65533)); +INSERT INTO t1 VALUES(REPEAT(' ', 65533)); +SET column_compression_threshold=DEFAULT; +SELECT LEFT(a, 10), LENGTH(a) FROM t1 ORDER BY 1, 2; +DROP TABLE t1; + --echo # --echo # MDEV-14929 - AddressSanitizer: memcpy-param-overlap in @@ -91,3 +115,58 @@ SET SESSION optimizer_switch = 'derived_merge=off'; SELECT * FROM ( SELECT * FROM t1 ) AS sq ORDER BY b; SET SESSION optimizer_switch=DEFAULT; DROP TABLE t1; + + +--echo # +--echo # MDEV-15762 - VARCHAR(0) COMPRESSED crashes the server +--echo # +CREATE TABLE t1(a VARCHAR(0) COMPRESSED); +--error ER_DATA_TOO_LONG +INSERT INTO t1 VALUES('a'); +INSERT INTO t1 VALUES(' '); +SELECT LENGTH(a) FROM t1; +DROP TABLE t1; + + +--echo # +--echo # MDEV-15763 - VARCHAR(1) COMPRESSED crashes the server +--echo # +CREATE TABLE t1(a VARCHAR(1) COMPRESSED); +SET column_compression_threshold=0; +INSERT INTO t1 VALUES('a'); +SET column_compression_threshold=DEFAULT; +DROP TABLE t1; + + +--echo # +--echo # MDEV-15938 - TINYTEXT CHARACTER SET utf8 COMPRESSED truncates data +--echo # +CREATE TABLE t1(a TINYTEXT COMPRESSED, b TINYTEXT) CHARACTER SET utf8; +INSERT INTO t1 VALUES (REPEAT(_latin1'a', 254), REPEAT(_latin1'a', 254)); +SELECT CHAR_LENGTH(a), CHAR_LENGTH(b), LEFT(a, 10), LEFT(b, 10) FROM t1; +DROP TABLE t1; + + +--echo # +--echo # MDEV-16134 Wrong I_S.COLUMNS.CHARACTER_XXX_LENGTH value for compressed columns +--echo # + +CREATE TABLE t1 +( + a VARCHAR(10) CHARACTER SET latin1 COMPRESSED, + b VARCHAR(10) CHARACTER SET utf8 COMPRESSED +); +SELECT COLUMN_NAME, CHARACTER_MAXIMUM_LENGTH, CHARACTER_OCTET_LENGTH +FROM INFORMATION_SCHEMA.COLUMNS +WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1' AND COLUMN_NAME IN ('a','b') +ORDER BY COLUMN_NAME; +DROP TABLE t1; + + +--echo # +--echo # MDEV-15592 - Column COMPRESSED should select a 'high order' datatype +--echo # +CREATE TABLE t1(a TINYTEXT COMPRESSED); +INSERT INTO t1 VALUES(REPEAT('a', 255)); +SELECT LEFT(a, 10), LENGTH(a) FROM t1; +DROP TABLE t1; diff --git a/mysql-test/main/column_compression_utf16.result b/mysql-test/main/column_compression_utf16.result new file mode 100644 index 00000000000..142eaa9f965 --- /dev/null +++ b/mysql-test/main/column_compression_utf16.result @@ -0,0 +1,9 @@ +# +# MDEV-15938 - TINYTEXT CHARACTER SET utf8 COMPRESSED truncates data +# +CREATE TABLE t1(a TINYTEXT COMPRESSED, b TINYTEXT) CHARACTER SET utf16; +INSERT INTO t1 VALUES (REPEAT(_latin1'a', 127), REPEAT(_latin1'a', 127)); +SELECT CHAR_LENGTH(a), CHAR_LENGTH(b), LEFT(a, 10), LEFT(b, 10) FROM t1; +CHAR_LENGTH(a) CHAR_LENGTH(b) LEFT(a, 10) LEFT(b, 10) +127 127 aaaaaaaaaa aaaaaaaaaa +DROP TABLE t1; diff --git a/mysql-test/main/column_compression_utf16.test b/mysql-test/main/column_compression_utf16.test new file mode 100644 index 00000000000..56791d5cfd1 --- /dev/null +++ b/mysql-test/main/column_compression_utf16.test @@ -0,0 +1,9 @@ +--source include/have_utf16.inc + +--echo # +--echo # MDEV-15938 - TINYTEXT CHARACTER SET utf8 COMPRESSED truncates data +--echo # +CREATE TABLE t1(a TINYTEXT COMPRESSED, b TINYTEXT) CHARACTER SET utf16; +INSERT INTO t1 VALUES (REPEAT(_latin1'a', 127), REPEAT(_latin1'a', 127)); +SELECT CHAR_LENGTH(a), CHAR_LENGTH(b), LEFT(a, 10), LEFT(b, 10) FROM t1; +DROP TABLE t1; diff --git a/mysql-test/main/connect_debug.result b/mysql-test/main/connect_debug.result new file mode 100644 index 00000000000..0452b238db9 --- /dev/null +++ b/mysql-test/main/connect_debug.result @@ -0,0 +1,5 @@ +set @old_dbug=@@global.debug_dbug; +set global debug_dbug='+d,auth_disconnect'; +create user 'bad' identified by 'worse'; +set global debug_dbug=@old_dbug; +drop user bad; diff --git a/mysql-test/main/connect_debug.test b/mysql-test/main/connect_debug.test new file mode 100644 index 00000000000..299b605b2cd --- /dev/null +++ b/mysql-test/main/connect_debug.test @@ -0,0 +1,12 @@ +source include/have_debug.inc; +set @old_dbug=@@global.debug_dbug; + +# +# use after free if need plugin change and auth aborted +# +set global debug_dbug='+d,auth_disconnect'; +create user 'bad' identified by 'worse'; +--error 1 +--exec $MYSQL --default-auth=mysql_old_password --user=bad --password=worse +set global debug_dbug=@old_dbug; +drop user bad; diff --git a/mysql-test/main/create_or_replace.result b/mysql-test/main/create_or_replace.result index e26884f1cbf..0c1bccb861a 100644 --- a/mysql-test/main/create_or_replace.result +++ b/mysql-test/main/create_or_replace.result @@ -459,6 +459,26 @@ UNLOCK TABLES; DROP FUNCTION f1; DROP TABLE t1; # +# MDEV-11129 +# CREATE OR REPLACE TABLE t1 AS SELECT spfunc() crashes if spfunc() +# references t1 +# +CREATE OR REPLACE TABLE t1(a INT); +CREATE FUNCTION f1() RETURNS VARCHAR(16383) +BEGIN +INSERT INTO t1 VALUES(1); +RETURN 'test'; +END; +$$ +CREATE OR REPLACE TABLE t1 AS SELECT f1(); +ERROR HY000: Table 't1' is specified twice, both as a target for 'CREATE' and as a separate source for data +LOCK TABLE t1 WRITE; +CREATE OR REPLACE TABLE t1 AS SELECT f1(); +ERROR HY000: Table 't1' was not locked with LOCK TABLES +UNLOCK TABLES; +DROP FUNCTION f1; +DROP TABLE t1; +# # MDEV-11071 - Assertion `thd->transaction.stmt.is_empty()' failed in # Locked_tables_list::unlock_locked_tables # diff --git a/mysql-test/main/create_or_replace.test b/mysql-test/main/create_or_replace.test index 4ef4189694b..3ae882139bc 100644 --- a/mysql-test/main/create_or_replace.test +++ b/mysql-test/main/create_or_replace.test @@ -397,6 +397,31 @@ UNLOCK TABLES; DROP FUNCTION f1; DROP TABLE t1; +--echo # +--echo # MDEV-11129 +--echo # CREATE OR REPLACE TABLE t1 AS SELECT spfunc() crashes if spfunc() +--echo # references t1 +--echo # + +CREATE OR REPLACE TABLE t1(a INT); +DELIMITER $$; +CREATE FUNCTION f1() RETURNS VARCHAR(16383) +BEGIN + INSERT INTO t1 VALUES(1); + RETURN 'test'; +END; +$$ +DELIMITER ;$$ +--error ER_UPDATE_TABLE_USED +CREATE OR REPLACE TABLE t1 AS SELECT f1(); +LOCK TABLE t1 WRITE; +--error ER_TABLE_NOT_LOCKED +CREATE OR REPLACE TABLE t1 AS SELECT f1(); +UNLOCK TABLES; + +DROP FUNCTION f1; +DROP TABLE t1; + --echo # --echo # MDEV-11071 - Assertion `thd->transaction.stmt.is_empty()' failed in --echo # Locked_tables_list::unlock_locked_tables diff --git a/mysql-test/main/cte_recursive.result b/mysql-test/main/cte_recursive.result index 15d4fc1a01f..4d1535cacb2 100644 --- a/mysql-test/main/cte_recursive.result +++ b/mysql-test/main/cte_recursive.result @@ -3070,6 +3070,207 @@ SELECT * FROM cte; 2 3 # +# MDEV-15575: using recursive cte with big_tables enabled +# +set big_tables=1; +with recursive qn as +(select 123 as a union all select 1+a from qn where a<130) +select * from qn; +a +123 +124 +125 +126 +127 +128 +129 +130 +set big_tables=default; +# +# MDEV-15571: using recursive cte with big_tables enabled +# +create table t1 (a bigint); +insert into t1 values(1); +set big_tables=1; +with recursive qn as +( +select a from t1 +union all +select a*2000 from qn where a<10000000000000000000 +) +select * from qn; +ERROR 22003: BIGINT value is out of range in '`qn`.`a` * 2000' +set big_tables=default; +drop table t1; +# +# MDEV-15556: using recursive cte with big_tables enabled +# when recursive tables are accessed by key +# +SET big_tables=1; +CREATE TABLE t1 (id int, name char(10), leftpar int, rightpar int); +INSERT INTO t1 VALUES +(1, "A", 2, 3), (2, "LA", 4, 5), (4, "LLA", 6, 7), +(6, "LLLA", NULL, NULL), (7, "RLLA", NULL, NULL), (5, "RLA", 8, 9), +(8, "LRLA", NULL, NULL), (9, "RRLA", NULL, NULL), (3, "RA", 10, 11), +(10, "LRA", 12, 13), (11, "RRA", 14, 15), (15, "RRRA", NULL, NULL), +(16, "B", 17, 18), (17, "LB", NULL, NULL), (18, "RB", NULL, NULL); +CREATE TABLE t2 SELECT * FROM t1 ORDER BY rand(); +WITH RECURSIVE tree_of_a AS +(SELECT *, cast(id AS char(200)) AS path FROM t2 WHERE name="A" + UNION ALL +SELECT t2.*, concat(tree_of_a.path,",",t2.id) +FROM t2 JOIN tree_of_a ON t2.id=tree_of_a.leftpar +UNION ALL +SELECT t2.*, concat(tree_of_a.path,",",t2.id) +FROM t2 JOIN tree_of_a ON t2.id=tree_of_a.rightpar) +SELECT * FROM tree_of_a +ORDER BY path; +id name leftpar rightpar path +1 A 2 3 1 +2 LA 4 5 1,2 +4 LLA 6 7 1,2,4 +6 LLLA NULL NULL 1,2,4,6 +7 RLLA NULL NULL 1,2,4,7 +5 RLA 8 9 1,2,5 +8 LRLA NULL NULL 1,2,5,8 +9 RRLA NULL NULL 1,2,5,9 +3 RA 10 11 1,3 +10 LRA 12 13 1,3,10 +11 RRA 14 15 1,3,11 +15 RRRA NULL NULL 1,3,11,15 +EXPLAIN WITH RECURSIVE tree_of_a AS +(SELECT *, cast(id AS char(200)) AS path FROM t2 WHERE name="A" + UNION ALL +SELECT t2.*, concat(tree_of_a.path,",",t2.id) +FROM t2 JOIN tree_of_a ON t2.id=tree_of_a.leftpar +UNION ALL +SELECT t2.*, concat(tree_of_a.path,",",t2.id) +FROM t2 JOIN tree_of_a ON t2.id=tree_of_a.rightpar) +SELECT * FROM tree_of_a +ORDER BY path; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 15 Using filesort +2 DERIVED t2 ALL NULL NULL NULL NULL 15 Using where +3 RECURSIVE UNION t2 ALL NULL NULL NULL NULL 15 Using where +3 RECURSIVE UNION ref key0 key0 5 test.t2.id 2 +4 RECURSIVE UNION t2 ALL NULL NULL NULL NULL 15 Using where +4 RECURSIVE UNION ref key0 key0 5 test.t2.id 2 +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +DROP TABLE t1,t2; +SET big_tables=0; +# +# MDEV-15840: recursive tables are accessed by key +# (the same problem as for MDEV-15556) +# +CREATE TABLE t1 (p1 text,k2 int, p2 text, k1 int); +INSERT INTO t1 select seq, seq, seq, seq from seq_1_to_1000; +CREATE PROCEDURE getNums() +BEGIN +WITH RECURSIVE cte as +( +SELECT * FROM t1 +UNION +SELECT c.* FROM t1 c JOIN cte p ON c.p1 = p.p2 AND c.k2 = p.k1 +) +SELECT * FROM cte LIMIT 10; +END | +call getNums(); +p1 k2 p2 k1 +1 1 1 1 +2 2 2 2 +3 3 3 3 +4 4 4 4 +5 5 5 5 +6 6 6 6 +7 7 7 7 +8 8 8 8 +9 9 9 9 +10 10 10 10 +DROP PROCEDURE getNums; +DROP TABLE t1; +# +# MDEV-15894: aggregate/winfow functions in non-recorsive part +# +create table t1(b int); +insert into t1 values(10),(20),(10); +with recursive qn as +(select max(b) as a from t1 union +select a from qn) +select * from qn; +a +20 +with recursive qn as +(select rank() over (order by b) as a from t1 union +select a from qn) +select * from qn; +a +1 +3 +drop table t1; +# +# MDEV-16086: tmp table for CTE is created as ARIA tables +# +CREATE TABLE t1 ( +Id int(11) not null AUTO_INCREMENT, +Parent varchar(15) not null, +Child varchar(15) not null, +PRIMARY KEY (Id) +) ENGINE = MyISAM; +INSERT INTO t1 (Parent, Child) VALUES +('123', '456'),('456', '789'),('321', '654'),('654', '987'); +WITH RECURSIVE cte AS +( SELECT b.Parent, +b.Child, +CAST(CONCAT(b.Child,',') AS CHAR(513)) Path +FROM t1 b +LEFT OUTER JOIN t1 bc ON b.Child = bc.Parent +WHERE bc.Id IS NULL +UNION ALL SELECT c.Parent, +c.Child, +CONCAT(p.Path,c.Child,',') Path +FROM t1 c +INNER JOIN cte p ON c.Child = p.Parent) +SELECT * +FROM cte +ORDER BY Path; +Parent Child Path +456 789 789, +123 456 789,456, +654 987 987, +321 654 987,654, +DROP TABLE t1; +# +# MDEV-16212: recursive CTE with global ORDER BY +# +set statement max_recursive_iterations = 2 for +WITH RECURSIVE qn AS ( +SELECT 1 FROM dual UNION ALL +SELECT 1 FROM qn +ORDER BY (SELECT * FROM qn)) +SELECT count(*) FROM qn; +ERROR 42000: This version of MariaDB doesn't yet support 'global ORDER_BY/LIMIT in recursive CTE spec' +# +# MDEV-15581: mix of ALL and DISTINCT UNION in recursive CTE +# +create table t1(a int); +insert into t1 values(1),(2); +insert into t1 values(1),(2); +set @c=0, @d=0; +WITH RECURSIVE qn AS +( +select 1,0 as col from t1 +union distinct +select 1,0 from t1 +union all +select 3, 0*(@c:=@c+1) from qn where @c<1 +union all +select 3, 0*(@d:=@d+1) from qn where @d<1 +) +select * from qn; +ERROR 42000: This version of MariaDB doesn't yet support 'mix of ALL and DISTINCT UNION operations in recursive CTE spec' +drop table t1; +# Start of 10.3 tests +# # MDEV-14217 [db crash] Recursive CTE when SELECT includes new field # CREATE TEMPORARY TABLE a_tbl ( @@ -3338,3 +3539,22 @@ Beijing Bangkok Paris drop table flights, distances; +# +# MDEV-15159: Forced nullability of columns in recursive CTE +# +WITH RECURSIVE cte AS ( +SELECT 1 AS a UNION ALL +SELECT NULL FROM cte WHERE a IS NOT NULL) +SELECT * FROM cte; +a +1 +NULL +CREATE TABLE t1 (a int NOT NULL); +INSERT INTO t1 VALUES (0); +WITH RECURSIVE cte AS +(SELECT a FROM t1 where a=0 UNION SELECT NULL FROM cte) +SELECT * FROM cte; +a +0 +NULL +DROP TABLE t1; diff --git a/mysql-test/main/cte_recursive.test b/mysql-test/main/cte_recursive.test index 7ed55a1daaa..6f394bd673c 100644 --- a/mysql-test/main/cte_recursive.test +++ b/mysql-test/main/cte_recursive.test @@ -2098,6 +2098,192 @@ WITH RECURSIVE cte AS SELECT @c:=@c+1 FROM cte WHERE @c<3) SELECT * FROM cte; +--echo # +--echo # MDEV-15575: using recursive cte with big_tables enabled +--echo # + +set big_tables=1; + +with recursive qn as +(select 123 as a union all select 1+a from qn where a<130) +select * from qn; + +set big_tables=default; + +--echo # +--echo # MDEV-15571: using recursive cte with big_tables enabled +--echo # + +create table t1 (a bigint); +insert into t1 values(1); + +set big_tables=1; + +--error ER_DATA_OUT_OF_RANGE +with recursive qn as +( + select a from t1 + union all + select a*2000 from qn where a<10000000000000000000 +) +select * from qn; + +set big_tables=default; + +drop table t1; + +--echo # +--echo # MDEV-15556: using recursive cte with big_tables enabled +--echo # when recursive tables are accessed by key +--echo # + +SET big_tables=1; + +CREATE TABLE t1 (id int, name char(10), leftpar int, rightpar int); +INSERT INTO t1 VALUES + (1, "A", 2, 3), (2, "LA", 4, 5), (4, "LLA", 6, 7), + (6, "LLLA", NULL, NULL), (7, "RLLA", NULL, NULL), (5, "RLA", 8, 9), + (8, "LRLA", NULL, NULL), (9, "RRLA", NULL, NULL), (3, "RA", 10, 11), + (10, "LRA", 12, 13), (11, "RRA", 14, 15), (15, "RRRA", NULL, NULL), + (16, "B", 17, 18), (17, "LB", NULL, NULL), (18, "RB", NULL, NULL); + +CREATE TABLE t2 SELECT * FROM t1 ORDER BY rand(); + +let $q= +WITH RECURSIVE tree_of_a AS + (SELECT *, cast(id AS char(200)) AS path FROM t2 WHERE name="A" + UNION ALL + SELECT t2.*, concat(tree_of_a.path,",",t2.id) + FROM t2 JOIN tree_of_a ON t2.id=tree_of_a.leftpar + UNION ALL + SELECT t2.*, concat(tree_of_a.path,",",t2.id) + FROM t2 JOIN tree_of_a ON t2.id=tree_of_a.rightpar) +SELECT * FROM tree_of_a +ORDER BY path; + +eval $q; +eval EXPLAIN $q; + +DROP TABLE t1,t2; + +SET big_tables=0; + +--echo # +--echo # MDEV-15840: recursive tables are accessed by key +--echo # (the same problem as for MDEV-15556) +--echo # + +--source include/have_sequence.inc + +CREATE TABLE t1 (p1 text,k2 int, p2 text, k1 int); +INSERT INTO t1 select seq, seq, seq, seq from seq_1_to_1000; + +DELIMITER |; +CREATE PROCEDURE getNums() +BEGIN +WITH RECURSIVE cte as +( + SELECT * FROM t1 + UNION + SELECT c.* FROM t1 c JOIN cte p ON c.p1 = p.p2 AND c.k2 = p.k1 +) +SELECT * FROM cte LIMIT 10; +END | + +DELIMITER ;| +call getNums(); + +DROP PROCEDURE getNums; +DROP TABLE t1; + +--echo # +--echo # MDEV-15894: aggregate/winfow functions in non-recorsive part +--echo # + +create table t1(b int); +insert into t1 values(10),(20),(10); + +with recursive qn as + (select max(b) as a from t1 union + select a from qn) +select * from qn; + +with recursive qn as + (select rank() over (order by b) as a from t1 union + select a from qn) +select * from qn; + +drop table t1; + +--echo # +--echo # MDEV-16086: tmp table for CTE is created as ARIA tables +--echo # + +CREATE TABLE t1 ( + Id int(11) not null AUTO_INCREMENT, + Parent varchar(15) not null, + Child varchar(15) not null, + PRIMARY KEY (Id) +) ENGINE = MyISAM; + +INSERT INTO t1 (Parent, Child) VALUES + ('123', '456'),('456', '789'),('321', '654'),('654', '987'); + +WITH RECURSIVE cte AS + ( SELECT b.Parent, + b.Child, + CAST(CONCAT(b.Child,',') AS CHAR(513)) Path + FROM t1 b + LEFT OUTER JOIN t1 bc ON b.Child = bc.Parent + WHERE bc.Id IS NULL + UNION ALL SELECT c.Parent, + c.Child, + CONCAT(p.Path,c.Child,',') Path + FROM t1 c + INNER JOIN cte p ON c.Child = p.Parent) +SELECT * +FROM cte +ORDER BY Path; + +DROP TABLE t1; + +--echo # +--echo # MDEV-16212: recursive CTE with global ORDER BY +--echo # + +--error ER_NOT_SUPPORTED_YET +set statement max_recursive_iterations = 2 for +WITH RECURSIVE qn AS ( +SELECT 1 FROM dual UNION ALL +SELECT 1 FROM qn +ORDER BY (SELECT * FROM qn)) +SELECT count(*) FROM qn; + +--echo # +--echo # MDEV-15581: mix of ALL and DISTINCT UNION in recursive CTE +--echo # + +create table t1(a int); +insert into t1 values(1),(2); +insert into t1 values(1),(2); + +set @c=0, @d=0; +--error ER_NOT_SUPPORTED_YET +WITH RECURSIVE qn AS +( +select 1,0 as col from t1 +union distinct +select 1,0 from t1 +union all +select 3, 0*(@c:=@c+1) from qn where @c<1 +union all +select 3, 0*(@d:=@d+1) from qn where @d<1 +) +select * from qn; + +drop table t1; + +--echo # Start of 10.3 tests --echo # --echo # MDEV-14217 [db crash] Recursive CTE when SELECT includes new field @@ -2308,3 +2494,21 @@ with recursive destinations (city) as select * from destinations; drop table flights, distances; + +--echo # +--echo # MDEV-15159: Forced nullability of columns in recursive CTE +--echo # + +WITH RECURSIVE cte AS ( + SELECT 1 AS a UNION ALL + SELECT NULL FROM cte WHERE a IS NOT NULL) +SELECT * FROM cte; + +CREATE TABLE t1 (a int NOT NULL); +INSERT INTO t1 VALUES (0); + +WITH RECURSIVE cte AS + (SELECT a FROM t1 where a=0 UNION SELECT NULL FROM cte) +SELECT * FROM cte; + +DROP TABLE t1; diff --git a/mysql-test/main/ctype_create.result b/mysql-test/main/ctype_create.result index 4128be82c23..9a76802579e 100644 --- a/mysql-test/main/ctype_create.result +++ b/mysql-test/main/ctype_create.result @@ -83,6 +83,8 @@ USE test; # # MDEV-7387 Alter table xxx CHARACTER SET utf8, CONVERT TO CHARACTER SET latin1 should fail # +CREATE DATABASE tmp DEFAULT CHARACTER SET latin5; +USE tmp; CREATE TABLE t1 (a VARCHAR(10)) CHARACTER SET DEFAULT, CHARACTER SET utf8; ERROR HY000: Conflicting declarations: 'CHARACTER SET DEFAULT' and 'CHARACTER SET utf8' CREATE TABLE t1 (a VARCHAR(10)) CHARACTER SET utf8, CHARACTER SET DEFAULT; @@ -105,6 +107,8 @@ ERROR HY000: Conflicting declarations: 'CHARACTER SET latin5' and 'CHARACTER SET ALTER TABLE t1 CONVERT TO CHARACTER SET DEFAULT, CHARACTER SET latin1; ERROR HY000: Conflicting declarations: 'CHARACTER SET latin5' and 'CHARACTER SET latin1' DROP TABLE t1; +DROP DATABASE tmp; +USE test; # # End of 10.0 tests # diff --git a/mysql-test/main/ctype_create.test b/mysql-test/main/ctype_create.test index 61fc5292094..6ccc83521bd 100644 --- a/mysql-test/main/ctype_create.test +++ b/mysql-test/main/ctype_create.test @@ -114,6 +114,8 @@ USE test; --echo # --echo # MDEV-7387 Alter table xxx CHARACTER SET utf8, CONVERT TO CHARACTER SET latin1 should fail --echo # +CREATE DATABASE tmp DEFAULT CHARACTER SET latin5; +USE tmp; --error ER_CONFLICTING_DECLARATIONS CREATE TABLE t1 (a VARCHAR(10)) CHARACTER SET DEFAULT, CHARACTER SET utf8; --error ER_CONFLICTING_DECLARATIONS @@ -137,6 +139,8 @@ ALTER TABLE t1 CONVERT TO CHARACTER SET DEFAULT, CHARACTER SET utf8; --error ER_CONFLICTING_DECLARATIONS ALTER TABLE t1 CONVERT TO CHARACTER SET DEFAULT, CHARACTER SET latin1; DROP TABLE t1; +DROP DATABASE tmp; +USE test; --echo # --echo # End of 10.0 tests diff --git a/mysql-test/main/ctype_latin1_de-master.opt b/mysql-test/main/ctype_latin1_de-master.opt deleted file mode 100644 index 0c072424de9..00000000000 --- a/mysql-test/main/ctype_latin1_de-master.opt +++ /dev/null @@ -1 +0,0 @@ ---character-set-server=latin1 --collation-server=latin1_german2_ci diff --git a/mysql-test/main/ctype_latin1_de.result b/mysql-test/main/ctype_latin1_de.result index 2d3803fba18..4fd366b2e8e 100644 --- a/mysql-test/main/ctype_latin1_de.result +++ b/mysql-test/main/ctype_latin1_de.result @@ -1,3 +1,5 @@ +create database latin1_german2_ci default character set latin1 collate latin1_german2_ci; +use latin1_german2_ci; set names latin1; set @@collation_connection=latin1_german2_ci; select @@collation_connection; @@ -139,7 +141,7 @@ a a check table t1; Table Op Msg_type Msg_text -test.t1 check status OK +latin1_german2_ci.t1 check status OK select * from t1 where a like "ö%"; a b ö 1 @@ -835,3 +837,5 @@ hex(weight_string('x # # End of 5.6 tests # +drop database latin1_german2_ci; +use test; diff --git a/mysql-test/main/ctype_latin1_de.test b/mysql-test/main/ctype_latin1_de.test index 29a52c36db6..dc261790f6c 100644 --- a/mysql-test/main/ctype_latin1_de.test +++ b/mysql-test/main/ctype_latin1_de.test @@ -1,6 +1,8 @@ # # Test latin_de character set # +create database latin1_german2_ci default character set latin1 collate latin1_german2_ci; +use latin1_german2_ci; set names latin1; set @@collation_connection=latin1_german2_ci; @@ -180,3 +182,6 @@ select hex(weight_string('x --echo # --echo # End of 5.6 tests --echo # + +drop database latin1_german2_ci; +use test; diff --git a/mysql-test/main/ctype_ucs.result b/mysql-test/main/ctype_ucs.result index fcc8f27010c..a6e324b538f 100644 --- a/mysql-test/main/ctype_ucs.result +++ b/mysql-test/main/ctype_ucs.result @@ -205,6 +205,7 @@ DROP TABLE t1; # and reverse() function # # Problem # 1 (original report): wrong parsing of ucs2 data +SET character_set_connection=ucs2; SELECT '00' UNION SELECT '10' INTO OUTFILE 'tmpp.txt'; CREATE TABLE t1(a INT); LOAD DATA INFILE 'tmpp.txt' INTO TABLE t1 CHARACTER SET ucs2 @@ -4596,6 +4597,37 @@ c1 mediumtext YES NULL DROP TABLE t1; set sql_mode=default; # +# MDEV-15624 Changing the default character set to utf8mb4 changes query evaluation in a very surprising way +# +SET NAMES utf8; +CREATE TABLE t1 (id INT); +INSERT INTO t1 VALUES (1),(2),(3); +SELECT COUNT(DISTINCT c) FROM (SELECT id, REPLACE(uuid_short(), '0', CAST('o' AS CHAR CHARACTER SET ucs2)) AS c FROM t1) AS d1; +COUNT(DISTINCT c) +3 +SELECT DISTINCT REPLACE(uuid_short(), '0', CAST('o' AS CHAR CHARACTER SET ucs2)) AS c FROM t1; +c +xxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxx +SELECT COUNT(DISTINCT c) FROM (SELECT id, INSERT(uuid_short(), 1, 1, CAST('0' AS CHAR CHARACTER SET ucs2)) AS c FROM t1) AS d1; +COUNT(DISTINCT c) +3 +SELECT DISTINCT INSERT(uuid_short(), 1, 1, CAST('0' AS CHAR CHARACTER SET ucs2)) AS c FROM t1; +c +xxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxx +SELECT COUNT(DISTINCT c) FROM (SELECT id, CONCAT(uuid_short(), CAST('0' AS CHAR CHARACTER SET ucs2)) AS c FROM t1) AS d1; +COUNT(DISTINCT c) +3 +SELECT DISTINCT CONCAT(uuid_short(), CAST('0' AS CHAR CHARACTER SET ucs2)) AS c FROM t1; +c +xxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxx +DROP TABLE t1; +# # End of 5.5 tests # # diff --git a/mysql-test/main/ctype_ucs.test b/mysql-test/main/ctype_ucs.test index d7a4fd48ccd..9592e27f8e1 100644 --- a/mysql-test/main/ctype_ucs.test +++ b/mysql-test/main/ctype_ucs.test @@ -74,6 +74,7 @@ DROP TABLE t1; --echo # --echo # Problem # 1 (original report): wrong parsing of ucs2 data +SET character_set_connection=ucs2; SELECT '00' UNION SELECT '10' INTO OUTFILE 'tmpp.txt'; CREATE TABLE t1(a INT); LOAD DATA INFILE 'tmpp.txt' INTO TABLE t1 CHARACTER SET ucs2 @@ -851,6 +852,28 @@ DROP TABLE t1; set sql_mode=default; +--echo # +--echo # MDEV-15624 Changing the default character set to utf8mb4 changes query evaluation in a very surprising way +--echo # + +SET NAMES utf8; +CREATE TABLE t1 (id INT); +INSERT INTO t1 VALUES (1),(2),(3); + +SELECT COUNT(DISTINCT c) FROM (SELECT id, REPLACE(uuid_short(), '0', CAST('o' AS CHAR CHARACTER SET ucs2)) AS c FROM t1) AS d1; +--replace_column 1 xxxxxxxxxxxxxxxxx +SELECT DISTINCT REPLACE(uuid_short(), '0', CAST('o' AS CHAR CHARACTER SET ucs2)) AS c FROM t1; + +SELECT COUNT(DISTINCT c) FROM (SELECT id, INSERT(uuid_short(), 1, 1, CAST('0' AS CHAR CHARACTER SET ucs2)) AS c FROM t1) AS d1; +--replace_column 1 xxxxxxxxxxxxxxxxx +SELECT DISTINCT INSERT(uuid_short(), 1, 1, CAST('0' AS CHAR CHARACTER SET ucs2)) AS c FROM t1; + +SELECT COUNT(DISTINCT c) FROM (SELECT id, CONCAT(uuid_short(), CAST('0' AS CHAR CHARACTER SET ucs2)) AS c FROM t1) AS d1; +--replace_column 1 xxxxxxxxxxxxxxxxx +SELECT DISTINCT CONCAT(uuid_short(), CAST('0' AS CHAR CHARACTER SET ucs2)) AS c FROM t1; +DROP TABLE t1; + + --echo # --echo # End of 5.5 tests --echo # diff --git a/mysql-test/main/ctype_utf8mb4.result b/mysql-test/main/ctype_utf8mb4.result index fbe95d9f44b..7e5e99354ea 100644 --- a/mysql-test/main/ctype_utf8mb4.result +++ b/mysql-test/main/ctype_utf8mb4.result @@ -2868,6 +2868,29 @@ SELECT LENGTH(data) AS len FROM (SELECT REPEAT('☃', 65536) AS data ) AS sub; len 196608 # +# MDEV-15624 Changing the default character set to utf8mb4 changes query evaluation in a very surprising way +# +SET NAMES utf8mb4; +CREATE TABLE t1 (id INT); +INSERT INTO t1 VALUES (1),(2),(3); +SELECT COUNT(DISTINCT c) FROM (SELECT id, REPLACE(UUID(), "-", "") AS c FROM t1) AS d1; +COUNT(DISTINCT c) +3 +SELECT DISTINCT INSERT(uuid(), 9, 1, "X") AS c FROM t1; +c +xxxxxxxxxxxxx-xxxx-xxxx-xxxxxxxxxxxx +xxxxxxxxxxxxx-xxxx-xxxx-xxxxxxxxxxxx +xxxxxxxxxxxxx-xxxx-xxxx-xxxxxxxxxxxx +SELECT COUNT(DISTINCT c) FROM (SELECT id, INSERT(UUID(), 9, 1, "X") AS c FROM t1) AS d1; +COUNT(DISTINCT c) +3 +SELECT DISTINCT INSERT(UUID(), 9, 1, "X") AS c FROM t1; +c +xxxxxxxxxxxxx-xxxx-xxxx-xxxxxxxxxxxx +xxxxxxxxxxxxx-xxxx-xxxx-xxxxxxxxxxxx +xxxxxxxxxxxxx-xxxx-xxxx-xxxxxxxxxxxx +DROP TABLE t1; +# # End of 5.5 tests # # diff --git a/mysql-test/main/ctype_utf8mb4.test b/mysql-test/main/ctype_utf8mb4.test index 7aa644fe8a9..545347fcd26 100644 --- a/mysql-test/main/ctype_utf8mb4.test +++ b/mysql-test/main/ctype_utf8mb4.test @@ -1850,6 +1850,25 @@ SELECT LENGTH(data) AS len FROM (SELECT REPEAT('☃', 21846) AS data ) AS sub; SELECT LENGTH(data) AS len FROM (SELECT REPEAT('☃', 65535) AS data ) AS sub; SELECT LENGTH(data) AS len FROM (SELECT REPEAT('☃', 65536) AS data ) AS sub; +--echo # +--echo # MDEV-15624 Changing the default character set to utf8mb4 changes query evaluation in a very surprising way +--echo # + +SET NAMES utf8mb4; +CREATE TABLE t1 (id INT); +INSERT INTO t1 VALUES (1),(2),(3); + +SELECT COUNT(DISTINCT c) FROM (SELECT id, REPLACE(UUID(), "-", "") AS c FROM t1) AS d1; +--replace_column 1 xxxxxxxxxxxxx-xxxx-xxxx-xxxxxxxxxxxx +SELECT DISTINCT INSERT(uuid(), 9, 1, "X") AS c FROM t1; + +SELECT COUNT(DISTINCT c) FROM (SELECT id, INSERT(UUID(), 9, 1, "X") AS c FROM t1) AS d1; +--replace_column 1 xxxxxxxxxxxxx-xxxx-xxxx-xxxxxxxxxxxx +SELECT DISTINCT INSERT(UUID(), 9, 1, "X") AS c FROM t1; + +DROP TABLE t1; + + --echo # --echo # End of 5.5 tests --echo # diff --git a/mysql-test/main/custom_aggregate_functions.result b/mysql-test/main/custom_aggregate_functions.result index 67be44c43f7..4060d6665f6 100644 --- a/mysql-test/main/custom_aggregate_functions.result +++ b/mysql-test/main/custom_aggregate_functions.result @@ -947,3 +947,209 @@ select f1('2001-01-01'),cast(f1('2001-01-01') as time); f1('2001-01-01') cast(f1('2001-01-01') as time) 2001-01-01 00:00:00 drop function f1; +# +# MDEV-15957 Unexpected "Data too long" when doing CREATE..SELECT with stored functions +# +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +CREATE AGGREGATE FUNCTION f1(x INT) RETURNS INT(3) +BEGIN +DECLARE res INT DEFAULT 0; +DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN res-200; +LOOP +FETCH GROUP NEXT ROW; +SET res= res + x; +END LOOP; +RETURN res; +END; +$$ +CREATE TABLE t2 AS SELECT CONCAT(f1(a)) FROM t1; +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `CONCAT(f1(a))` varchar(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1,t2; +DROP FUNCTION f1; +CREATE AGGREGATE FUNCTION f1() RETURNS TINYTEXT CHARACTER SET latin1 +BEGIN +DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; +LOOP +FETCH GROUP NEXT ROW; +END LOOP; +RETURN ''; +END; +$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` tinytext DEFAULT NULL, + `c2` tinytext DEFAULT NULL, + `c3` varchar(255) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE AGGREGATE FUNCTION f1() RETURNS TEXT CHARACTER SET latin1 +BEGIN +DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; +LOOP +FETCH GROUP NEXT ROW; +END LOOP; +RETURN ''; +END; +$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` text DEFAULT NULL, + `c2` text DEFAULT NULL, + `c3` text DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE AGGREGATE FUNCTION f1() RETURNS MEDIUMTEXT CHARACTER SET latin1 +BEGIN +DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; +LOOP +FETCH GROUP NEXT ROW; +END LOOP; +RETURN ''; +END; +$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` mediumtext DEFAULT NULL, + `c2` mediumtext DEFAULT NULL, + `c3` mediumtext DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE AGGREGATE FUNCTION f1() RETURNS LONGTEXT CHARACTER SET latin1 +BEGIN +DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; +LOOP +FETCH GROUP NEXT ROW; +END LOOP; +RETURN ''; +END; +$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` longtext DEFAULT NULL, + `c2` longtext DEFAULT NULL, + `c3` longtext DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE AGGREGATE FUNCTION f1() RETURNS TINYTEXT CHARACTER SET utf8 +BEGIN +DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; +LOOP +FETCH GROUP NEXT ROW; +END LOOP; +RETURN ''; +END; +$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` tinytext CHARACTER SET utf8 DEFAULT NULL, + `c2` text CHARACTER SET utf8 DEFAULT NULL, + `c3` varchar(255) CHARACTER SET utf8 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE AGGREGATE FUNCTION f1() RETURNS TEXT CHARACTER SET utf8 +BEGIN +DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; +LOOP +FETCH GROUP NEXT ROW; +END LOOP; +RETURN ''; +END; +$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` text CHARACTER SET utf8 DEFAULT NULL, + `c2` mediumtext CHARACTER SET utf8 DEFAULT NULL, + `c3` mediumtext CHARACTER SET utf8 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE AGGREGATE FUNCTION f1() RETURNS MEDIUMTEXT CHARACTER SET utf8 +BEGIN +DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; +LOOP +FETCH GROUP NEXT ROW; +END LOOP; +RETURN ''; +END; +$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` mediumtext CHARACTER SET utf8 DEFAULT NULL, + `c2` longtext CHARACTER SET utf8 DEFAULT NULL, + `c3` longtext CHARACTER SET utf8 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE AGGREGATE FUNCTION f1() RETURNS LONGTEXT CHARACTER SET utf8 +BEGIN +DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; +LOOP +FETCH GROUP NEXT ROW; +END LOOP; +RETURN ''; +END; +$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` longtext CHARACTER SET utf8 DEFAULT NULL, + `c2` longtext CHARACTER SET utf8 DEFAULT NULL, + `c3` longtext CHARACTER SET utf8 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +# +# MDEV-14520: Custom aggregate functions work incorrectly with WITH ROLLUP clause +# +create aggregate function agg_sum(x INT) returns INT +begin +declare z int default 0; +declare continue handler for not found return z; +loop +fetch group next row; +set z= z+x; +end loop; +end| +create table t1 (i int); +insert into t1 values (1),(2),(2),(3); +select i, agg_sum(i) from t1 group by i with rollup; +i agg_sum(i) +1 1 +2 4 +3 3 +NULL 8 +# +# Compare with +select i, sum(i) from t1 group by i with rollup; +i sum(i) +1 1 +2 4 +3 3 +NULL 8 +drop function agg_sum; +drop table t1; diff --git a/mysql-test/main/custom_aggregate_functions.test b/mysql-test/main/custom_aggregate_functions.test index 13eb3bed2af..ab799b48bdb 100644 --- a/mysql-test/main/custom_aggregate_functions.test +++ b/mysql-test/main/custom_aggregate_functions.test @@ -783,3 +783,185 @@ end| delimiter ;| select f1('2001-01-01'),cast(f1('2001-01-01') as time); drop function f1; + + +--echo # +--echo # MDEV-15957 Unexpected "Data too long" when doing CREATE..SELECT with stored functions +--echo # + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +DELIMITER $$; +CREATE AGGREGATE FUNCTION f1(x INT) RETURNS INT(3) +BEGIN + DECLARE res INT DEFAULT 0; + DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN res-200; + LOOP + FETCH GROUP NEXT ROW; + SET res= res + x; + END LOOP; + RETURN res; +END; +$$ +DELIMITER ;$$ +CREATE TABLE t2 AS SELECT CONCAT(f1(a)) FROM t1; +SHOW CREATE TABLE t2; +DROP TABLE t1,t2; +DROP FUNCTION f1; + + +DELIMITER $$; +CREATE AGGREGATE FUNCTION f1() RETURNS TINYTEXT CHARACTER SET latin1 +BEGIN + DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; + LOOP + FETCH GROUP NEXT ROW; + END LOOP; + RETURN ''; +END; +$$ +DELIMITER ;$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +DELIMITER $$; +CREATE AGGREGATE FUNCTION f1() RETURNS TEXT CHARACTER SET latin1 +BEGIN + DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; + LOOP + FETCH GROUP NEXT ROW; + END LOOP; + RETURN ''; +END; +$$ +DELIMITER ;$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +DELIMITER $$; +CREATE AGGREGATE FUNCTION f1() RETURNS MEDIUMTEXT CHARACTER SET latin1 +BEGIN + DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; + LOOP + FETCH GROUP NEXT ROW; + END LOOP; + RETURN ''; +END; +$$ +DELIMITER ;$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +DELIMITER $$; +CREATE AGGREGATE FUNCTION f1() RETURNS LONGTEXT CHARACTER SET latin1 +BEGIN + DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; + LOOP + FETCH GROUP NEXT ROW; + END LOOP; + RETURN ''; +END; +$$ +DELIMITER ;$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + + +DELIMITER $$; +CREATE AGGREGATE FUNCTION f1() RETURNS TINYTEXT CHARACTER SET utf8 +BEGIN + DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; + LOOP + FETCH GROUP NEXT ROW; + END LOOP; + RETURN ''; +END; +$$ +DELIMITER ;$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +DELIMITER $$; +CREATE AGGREGATE FUNCTION f1() RETURNS TEXT CHARACTER SET utf8 +BEGIN + DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; + LOOP + FETCH GROUP NEXT ROW; + END LOOP; + RETURN ''; +END; +$$ +DELIMITER ;$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +DELIMITER $$; +CREATE AGGREGATE FUNCTION f1() RETURNS MEDIUMTEXT CHARACTER SET utf8 +BEGIN + DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; + LOOP + FETCH GROUP NEXT ROW; + END LOOP; + RETURN ''; +END; +$$ +DELIMITER ;$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +DELIMITER $$; +CREATE AGGREGATE FUNCTION f1() RETURNS LONGTEXT CHARACTER SET utf8 +BEGIN + DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ''; + LOOP + FETCH GROUP NEXT ROW; + END LOOP; + RETURN ''; +END; +$$ +DELIMITER ;$$ +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +--echo # +--echo # MDEV-14520: Custom aggregate functions work incorrectly with WITH ROLLUP clause +--echo # + +--delimiter | +create aggregate function agg_sum(x INT) returns INT +begin +declare z int default 0; +declare continue handler for not found return z; +loop +fetch group next row; +set z= z+x; +end loop; +end| +--delimiter ; + +create table t1 (i int); +insert into t1 values (1),(2),(2),(3); +select i, agg_sum(i) from t1 group by i with rollup; +--echo # +--echo # Compare with +select i, sum(i) from t1 group by i with rollup; + +# Cleanup +drop function agg_sum; +drop table t1; diff --git a/mysql-test/main/delayed.result b/mysql-test/main/delayed.result index d54fa40f2da..d10f4ae22cf 100644 --- a/mysql-test/main/delayed.result +++ b/mysql-test/main/delayed.result @@ -500,7 +500,6 @@ call mtr.add_suppression("Checking table"); insert delayed into t1 values (2,2); Warnings: Error 145 Table './test/t1' is marked as crashed and should be repaired -Error 1194 Table 't1' is marked as crashed and should be repaired Error 1034 1 client is using or hasn't closed the table properly insert delayed into t1 values (3,3); flush tables t1; diff --git a/mysql-test/main/derived_cond_pushdown.result b/mysql-test/main/derived_cond_pushdown.result index 6887ad11284..867da5bbbf3 100644 --- a/mysql-test/main/derived_cond_pushdown.result +++ b/mysql-test/main/derived_cond_pushdown.result @@ -12778,6 +12778,434 @@ where t.d between date ('2017-01-01') and date ('2019-01-01'); d 2018-01-01 # +# MDEV-16088: pushdown into derived defined in the IN subquery +# +CREATE TABLE t1 (a INT, b INT); +CREATE TABLE t2 (e INT, f INT, g INT); +INSERT INTO t1 VALUES (1,14),(2,13),(1,19),(2,32),(3,24); +INSERT INTO t2 VALUES (1,19,2),(3,24,1),(1,12,2),(3,11,3),(2,32,1); +SELECT * FROM t1 +WHERE (t1.a,t1.b) IN +( +SELECT d_tab.e,d_tab.max_f +FROM ( +SELECT t2.e, MAX(t2.f) AS max_f +FROM t2 +GROUP BY t2.e +HAVING max_f>18 +) as d_tab +WHERE d_tab.e>1 +) +; +a b +2 32 +3 24 +EXPLAIN SELECT * FROM t1 +WHERE (t1.a,t1.b) IN +( +SELECT d_tab.e,d_tab.max_f +FROM ( +SELECT t2.e, MAX(t2.f) AS max_f +FROM t2 +GROUP BY t2.e +HAVING max_f>18 +) as d_tab +WHERE d_tab.e>1 +) +; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 5 +1 PRIMARY eq_ref distinct_key distinct_key 8 func,func 1 +2 MATERIALIZED ALL NULL NULL NULL NULL 5 Using where +3 DERIVED t2 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort +EXPLAIN FORMAT=JSON SELECT * FROM t1 +WHERE (t1.a,t1.b) IN +( +SELECT d_tab.e,d_tab.max_f +FROM ( +SELECT t2.e, MAX(t2.f) AS max_f +FROM t2 +GROUP BY t2.e +HAVING max_f>18 +) as d_tab +WHERE d_tab.e>1 +) +; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 5, + "filtered": 100 + }, + "table": { + "table_name": "", + "access_type": "eq_ref", + "possible_keys": ["distinct_key"], + "key": "distinct_key", + "key_length": "8", + "used_key_parts": ["e", "max_f"], + "ref": ["func", "func"], + "rows": 1, + "filtered": 100, + "materialized": { + "unique": 1, + "query_block": { + "select_id": 2, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 5, + "filtered": 100, + "attached_condition": "d_tab.e > 1", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "max_f > 18", + "filesort": { + "sort_key": "t2.e", + "temporary_table": { + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 5, + "filtered": 100, + "attached_condition": "t2.e > 1" + } + } + } + } + } + } + } + } + } + } +} +SELECT * FROM t1 +WHERE (t1.a,t1.b) IN +( +SELECT d_tab.e,d_tab.max_f +FROM ( +SELECT t2.e, MAX(t2.f) AS max_f +FROM t2 +GROUP BY t2.e +HAVING max_f>18 +) as d_tab +WHERE d_tab.max_f<25 +) +; +a b +1 19 +3 24 +EXPLAIN SELECT * FROM t1 +WHERE (t1.a,t1.b) IN +( +SELECT d_tab.e,d_tab.max_f +FROM ( +SELECT t2.e, MAX(t2.f) AS max_f +FROM t2 +GROUP BY t2.e +HAVING max_f>18 +) as d_tab +WHERE d_tab.max_f<25 +) +; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 5 +1 PRIMARY eq_ref distinct_key distinct_key 8 func,func 1 +2 MATERIALIZED ALL NULL NULL NULL NULL 5 Using where +3 DERIVED t2 ALL NULL NULL NULL NULL 5 Using temporary; Using filesort +EXPLAIN FORMAT=JSON SELECT * FROM t1 +WHERE (t1.a,t1.b) IN +( +SELECT d_tab.e,d_tab.max_f +FROM ( +SELECT t2.e, MAX(t2.f) AS max_f +FROM t2 +GROUP BY t2.e +HAVING max_f>18 +) as d_tab +WHERE d_tab.max_f<25 +) +; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 5, + "filtered": 100 + }, + "table": { + "table_name": "", + "access_type": "eq_ref", + "possible_keys": ["distinct_key"], + "key": "distinct_key", + "key_length": "8", + "used_key_parts": ["e", "max_f"], + "ref": ["func", "func"], + "rows": 1, + "filtered": 100, + "materialized": { + "unique": 1, + "query_block": { + "select_id": 2, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 5, + "filtered": 100, + "attached_condition": "d_tab.max_f < 25", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "max_f > 18 and max_f < 25", + "filesort": { + "sort_key": "t2.e", + "temporary_table": { + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 5, + "filtered": 100 + } + } + } + } + } + } + } + } + } + } +} +SELECT * FROM t1 +WHERE (t1.a,t1.b) IN +( +SELECT d_tab.e, MAX(d_tab.max_f) AS max_f +FROM ( +SELECT t2.e, MAX(t2.f) as max_f, t2.g +FROM t2 +GROUP BY t2.e +) as d_tab +WHERE d_tab.e>1 +GROUP BY d_tab.g +) +; +a b +2 32 +EXPLAIN SELECT * FROM t1 +WHERE (t1.a,t1.b) IN +( +SELECT d_tab.e, MAX(d_tab.max_f) AS max_f +FROM ( +SELECT t2.e, MAX(t2.f) as max_f, t2.g +FROM t2 +GROUP BY t2.e +) as d_tab +WHERE d_tab.e>1 +GROUP BY d_tab.g +) +; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where +1 PRIMARY eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.b 1 +2 MATERIALIZED ALL NULL NULL NULL NULL 5 Using where; Using temporary +3 DERIVED t2 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort +EXPLAIN FORMAT=JSON SELECT * FROM t1 +WHERE (t1.a,t1.b) IN +( +SELECT d_tab.e, MAX(d_tab.max_f) AS max_f +FROM ( +SELECT t2.e, MAX(t2.f) as max_f, t2.g +FROM t2 +GROUP BY t2.e +) as d_tab +WHERE d_tab.e>1 +GROUP BY d_tab.g +) +; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 5, + "filtered": 100, + "attached_condition": "t1.a is not null and t1.b is not null" + }, + "table": { + "table_name": "", + "access_type": "eq_ref", + "possible_keys": ["distinct_key"], + "key": "distinct_key", + "key_length": "8", + "used_key_parts": ["e", "max_f"], + "ref": ["test.t1.a", "test.t1.b"], + "rows": 1, + "filtered": 100, + "materialized": { + "unique": 1, + "query_block": { + "select_id": 2, + "temporary_table": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 5, + "filtered": 100, + "attached_condition": "d_tab.e > 1", + "materialized": { + "query_block": { + "select_id": 3, + "filesort": { + "sort_key": "t2.e", + "temporary_table": { + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 5, + "filtered": 100, + "attached_condition": "t2.e > 1" + } + } + } + } + } + } + } + } + } + } + } +} +SELECT * FROM t1 +WHERE (t1.a,t1.b) IN +( +SELECT d_tab.e, MAX(d_tab.max_f) AS max_f +FROM ( +SELECT t2.e, MAX(t2.f) as max_f, t2.g +FROM t2 +GROUP BY t2.e +) as d_tab +WHERE d_tab.max_f>20 +GROUP BY d_tab.g +) +; +a b +2 32 +EXPLAIN SELECT * FROM t1 +WHERE (t1.a,t1.b) IN +( +SELECT d_tab.e, MAX(d_tab.max_f) AS max_f +FROM ( +SELECT t2.e, MAX(t2.f) as max_f, t2.g +FROM t2 +GROUP BY t2.e +) as d_tab +WHERE d_tab.max_f>20 +GROUP BY d_tab.g +) +; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where +1 PRIMARY eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.b 1 +2 MATERIALIZED ALL NULL NULL NULL NULL 5 Using where; Using temporary +3 DERIVED t2 ALL NULL NULL NULL NULL 5 Using temporary; Using filesort +EXPLAIN FORMAT=JSON SELECT * FROM t1 +WHERE (t1.a,t1.b) IN +( +SELECT d_tab.e, MAX(d_tab.max_f) AS max_f +FROM ( +SELECT t2.e, MAX(t2.f) as max_f, t2.g +FROM t2 +GROUP BY t2.e +) as d_tab +WHERE d_tab.max_f>20 +GROUP BY d_tab.g +) +; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 5, + "filtered": 100, + "attached_condition": "t1.a is not null and t1.b is not null" + }, + "table": { + "table_name": "", + "access_type": "eq_ref", + "possible_keys": ["distinct_key"], + "key": "distinct_key", + "key_length": "8", + "used_key_parts": ["e", "max_f"], + "ref": ["test.t1.a", "test.t1.b"], + "rows": 1, + "filtered": 100, + "materialized": { + "unique": 1, + "query_block": { + "select_id": 2, + "temporary_table": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 5, + "filtered": 100, + "attached_condition": "d_tab.max_f > 20", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "max_f > 20", + "filesort": { + "sort_key": "t2.e", + "temporary_table": { + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 5, + "filtered": 100 + } + } + } + } + } + } + } + } + } + } + } +} +DROP TABLE t1,t2; +# +# MDEV-15765: pushing condition with IN subquery defined with constants +# using substitution +# +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2); +SELECT * FROM +( +SELECT DISTINCT * FROM t1 +) der_tab +WHERE (a>0 AND a<2 OR a IN (2,3)) AND +(a=2 OR 0); +a +2 +DROP TABLE t1; +# # MDEV-10855: Pushdown into derived with window functions # set @save_optimizer_switch= @@optimizer_switch; @@ -15071,7 +15499,6 @@ EXPLAIN "materialized": { "query_block": { "select_id": 2, - "outer_ref_condition": "t1.pk1 is not null", "table": { "table_name": "t2", "access_type": "eq_ref", @@ -15119,3 +15546,82 @@ Warnings: Note 1003 /* select#1 */ select NULL AS `f`,`v2`.`f` AS `f` from `test`.`t1` `a` straight_join `test`.`t1` `b` join `test`.`v2` where 0 DROP VIEW v1,v2; DROP TABLE t1; +# +# MDEV-15899: derived with WF without any key access +# +create table t1 (f1 int, f2 int, f4 int); +insert into t1 values +(3,1,1), (3,0,9), (0,1,8), (9,0,0), (3,0,9); +with +cte as (select median(f2) over (partition by f1) as k1 from t1 order by f1), +cte1 as (select median(f4) over (partition by f1) as k2 from t1) +select k1,k2 from cte1, cte; +k1 k2 +1.0000000000 9.0000000000 +1.0000000000 9.0000000000 +1.0000000000 8.0000000000 +1.0000000000 0.0000000000 +1.0000000000 9.0000000000 +0.0000000000 9.0000000000 +0.0000000000 9.0000000000 +0.0000000000 8.0000000000 +0.0000000000 0.0000000000 +0.0000000000 9.0000000000 +0.0000000000 9.0000000000 +0.0000000000 9.0000000000 +0.0000000000 8.0000000000 +0.0000000000 0.0000000000 +0.0000000000 9.0000000000 +0.0000000000 9.0000000000 +0.0000000000 9.0000000000 +0.0000000000 8.0000000000 +0.0000000000 0.0000000000 +0.0000000000 9.0000000000 +0.0000000000 9.0000000000 +0.0000000000 9.0000000000 +0.0000000000 8.0000000000 +0.0000000000 0.0000000000 +0.0000000000 9.0000000000 +explain with +cte as (select median(f2) over (partition by f1) as k1 from t1 order by f1), +cte1 as (select median(f4) over (partition by f1) as k2 from t1) +select k1,k2 from cte1, cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 5 +1 PRIMARY ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join) +3 DERIVED t1 ALL NULL NULL NULL NULL 5 Using temporary +2 DERIVED t1 ALL NULL NULL NULL NULL 5 Using temporary; Using filesort +drop table t1; +# +# MDEV-16104: embedded splittable materialized derived/views +# +CREATE TABLE t1 (f int PRIMARY KEY) ENGINE=MyISAM; +INSERT INTO t1 +VALUES (3), (7), (1), (4), (8), (5), (9); +CREATE ALGORITHM=MERGE VIEW v1 AS +SELECT a2.* +FROM +( SELECT f, COUNT(*) as c FROM t1 GROUP BY f ) AS a1 +JOIN +t1 AS a2 +USING (f); +EXPLAIN EXTENDED +SELECT * FROM ( SELECT STRAIGHT_JOIN f, COUNT(*) as c FROM v1 GROUP BY f ) AS s; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY ALL NULL NULL NULL NULL 7 100.00 +2 DERIVED ALL NULL NULL NULL NULL 7 100.00 Using temporary; Using filesort +2 DERIVED a2 eq_ref PRIMARY PRIMARY 4 a1.f 1 100.00 Using index +4 DERIVED t1 index PRIMARY PRIMARY 4 NULL 7 100.00 Using index; Using temporary; Using filesort +Warnings: +Note 1003 /* select#1 */ select `s`.`f` AS `f`,`s`.`c` AS `c` from (/* select#2 */ select straight_join `a2`.`f` AS `f`,count(0) AS `c` from ((/* select#4 */ select `test`.`t1`.`f` AS `f`,count(0) AS `c` from `test`.`t1` group by `test`.`t1`.`f`)) `a1` join `test`.`t1` `a2` where `a2`.`f` = `a1`.`f` group by `a2`.`f`) `s` +SELECT * FROM ( SELECT STRAIGHT_JOIN f, COUNT(*) as c FROM v1 GROUP BY f ) AS s; +f c +1 1 +3 1 +4 1 +5 1 +7 1 +8 1 +9 1 +DROP VIEW v1; +DROP TABLE t1; diff --git a/mysql-test/main/derived_cond_pushdown.test b/mysql-test/main/derived_cond_pushdown.test index 234f88976c9..d523ea3916f 100644 --- a/mysql-test/main/derived_cond_pushdown.test +++ b/mysql-test/main/derived_cond_pushdown.test @@ -2221,6 +2221,105 @@ select * from (select date('2018-01-01') as d select * from (select date('2018-01-01') as d) as t where t.d between date ('2017-01-01') and date ('2019-01-01'); +--echo # +--echo # MDEV-16088: pushdown into derived defined in the IN subquery +--echo # + +CREATE TABLE t1 (a INT, b INT); +CREATE TABLE t2 (e INT, f INT, g INT); +INSERT INTO t1 VALUES (1,14),(2,13),(1,19),(2,32),(3,24); +INSERT INTO t2 VALUES (1,19,2),(3,24,1),(1,12,2),(3,11,3),(2,32,1); + +LET $query= +SELECT * FROM t1 +WHERE (t1.a,t1.b) IN + ( + SELECT d_tab.e,d_tab.max_f + FROM ( + SELECT t2.e, MAX(t2.f) AS max_f + FROM t2 + GROUP BY t2.e + HAVING max_f>18 + ) as d_tab + WHERE d_tab.e>1 + ) +; +EVAL $query; +EVAL EXPLAIN $query; +EVAL EXPLAIN FORMAT=JSON $query; + +LET $query= +SELECT * FROM t1 +WHERE (t1.a,t1.b) IN + ( + SELECT d_tab.e,d_tab.max_f + FROM ( + SELECT t2.e, MAX(t2.f) AS max_f + FROM t2 + GROUP BY t2.e + HAVING max_f>18 + ) as d_tab + WHERE d_tab.max_f<25 + ) +; +EVAL $query; +EVAL EXPLAIN $query; +EVAL EXPLAIN FORMAT=JSON $query; + +LET $query= +SELECT * FROM t1 +WHERE (t1.a,t1.b) IN + ( + SELECT d_tab.e, MAX(d_tab.max_f) AS max_f + FROM ( + SELECT t2.e, MAX(t2.f) as max_f, t2.g + FROM t2 + GROUP BY t2.e + ) as d_tab + WHERE d_tab.e>1 + GROUP BY d_tab.g + ) +; +EVAL $query; +EVAL EXPLAIN $query; +EVAL EXPLAIN FORMAT=JSON $query; + +LET $query= +SELECT * FROM t1 +WHERE (t1.a,t1.b) IN + ( + SELECT d_tab.e, MAX(d_tab.max_f) AS max_f + FROM ( + SELECT t2.e, MAX(t2.f) as max_f, t2.g + FROM t2 + GROUP BY t2.e + ) as d_tab + WHERE d_tab.max_f>20 + GROUP BY d_tab.g + ) +; +EVAL $query; +EVAL EXPLAIN $query; +EVAL EXPLAIN FORMAT=JSON $query; + +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-15765: pushing condition with IN subquery defined with constants +--echo # using substitution +--echo # + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2); +SELECT * FROM +( + SELECT DISTINCT * FROM t1 +) der_tab +WHERE (a>0 AND a<2 OR a IN (2,3)) AND + (a=2 OR 0); + +DROP TABLE t1; + # Start of 10.3 tests --echo # @@ -2671,3 +2770,45 @@ SELECT * FROM v1 JOIN v2 ON v1.f = v2.f; DROP VIEW v1,v2; DROP TABLE t1; + +--echo # +--echo # MDEV-15899: derived with WF without any key access +--echo # + +create table t1 (f1 int, f2 int, f4 int); +insert into t1 values + (3,1,1), (3,0,9), (0,1,8), (9,0,0), (3,0,9); + +let $q= +with +cte as (select median(f2) over (partition by f1) as k1 from t1 order by f1), +cte1 as (select median(f4) over (partition by f1) as k2 from t1) +select k1,k2 from cte1, cte; + +eval $q; +eval explain $q; + +drop table t1; + +--echo # +--echo # MDEV-16104: embedded splittable materialized derived/views +--echo # + +CREATE TABLE t1 (f int PRIMARY KEY) ENGINE=MyISAM; +INSERT INTO t1 + VALUES (3), (7), (1), (4), (8), (5), (9); + +CREATE ALGORITHM=MERGE VIEW v1 AS +SELECT a2.* +FROM + ( SELECT f, COUNT(*) as c FROM t1 GROUP BY f ) AS a1 + JOIN + t1 AS a2 + USING (f); + +EXPLAIN EXTENDED +SELECT * FROM ( SELECT STRAIGHT_JOIN f, COUNT(*) as c FROM v1 GROUP BY f ) AS s; +SELECT * FROM ( SELECT STRAIGHT_JOIN f, COUNT(*) as c FROM v1 GROUP BY f ) AS s; + +DROP VIEW v1; +DROP TABLE t1; diff --git a/mysql-test/main/derived_view.result b/mysql-test/main/derived_view.result index 85e56ff176e..6c4b3310e11 100644 --- a/mysql-test/main/derived_view.result +++ b/mysql-test/main/derived_view.result @@ -1841,7 +1841,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t2 system NULL NULL NULL NULL 1 1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where 1 PRIMARY ALL NULL NULL NULL NULL 3 Using where; Start temporary; End temporary -3 DERIVED t1 ALL NULL NULL NULL NULL 3 +3 DERIVED t1 ALL NULL NULL NULL NULL 3 Using where SELECT * FROM t3 WHERE t3.b IN (SELECT v1.b FROM v1, t2 WHERE t2.c = v1.c AND t2.c = v1.b AND v1.b = t3.c); @@ -1856,7 +1856,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t2 system NULL NULL NULL NULL 1 1 PRIMARY ref key1 key1 8 const,const 0 Start temporary 1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where; End temporary; Using join buffer (flat, BNL join) -3 DERIVED t1 ALL NULL NULL NULL NULL 3 +3 DERIVED t1 ALL NULL NULL NULL NULL 3 Using where SELECT * FROM t3 WHERE t3.b IN (SELECT v1.b FROM v1, t2 WHERE t2.c = v1.c AND t2.c = v1.b AND v1.b = t3.c); diff --git a/mysql-test/main/distinct.result b/mysql-test/main/distinct.result index 926dc17d0c4..70bce519ad2 100644 --- a/mysql-test/main/distinct.result +++ b/mysql-test/main/distinct.result @@ -1039,4 +1039,14 @@ count(distinct case when id<=63 then id end) 63 drop table tb; SET @@tmp_table_size= @tmp_table_size_save; +# +# MDEV-14695: Assertion `n < m_size' failed in Bounds_checked_array::operator +# +CREATE TABLE t1 (b1 BIT, b2 BIT, b3 BIT, b4 BIT , b5 BIT, b6 BIT); +INSERT INTO t1 VALUES (1,0,0,1,0,1),(0,1,0,0,1,0); +SELECT DISTINCT b1+'0', b2+'0', b3+'0', b4+'0', b5+'0', b6 +'0' FROM t1; +b1+'0' b2+'0' b3+'0' b4+'0' b5+'0' b6 +'0' +1 0 0 1 0 1 +0 1 0 0 1 0 +DROP TABLE t1; End of 5.5 tests diff --git a/mysql-test/main/distinct.test b/mysql-test/main/distinct.test index 7cf3d6810bb..c11f8b501bc 100644 --- a/mysql-test/main/distinct.test +++ b/mysql-test/main/distinct.test @@ -790,4 +790,12 @@ drop table tb; SET @@tmp_table_size= @tmp_table_size_save; +--echo # +--echo # MDEV-14695: Assertion `n < m_size' failed in Bounds_checked_array::operator +--echo # + +CREATE TABLE t1 (b1 BIT, b2 BIT, b3 BIT, b4 BIT , b5 BIT, b6 BIT); +INSERT INTO t1 VALUES (1,0,0,1,0,1),(0,1,0,0,1,0); +SELECT DISTINCT b1+'0', b2+'0', b3+'0', b4+'0', b5+'0', b6 +'0' FROM t1; +DROP TABLE t1; --echo End of 5.5 tests diff --git a/mysql-test/main/drop_bad_db_type.result b/mysql-test/main/drop_bad_db_type.result index de22373e0fd..d44af1e8808 100644 --- a/mysql-test/main/drop_bad_db_type.result +++ b/mysql-test/main/drop_bad_db_type.result @@ -26,8 +26,10 @@ Level Warning Code 1286 Message Unknown storage engine 'ARCHIVE' install soname 'ha_archive'; +db.opt t1.ARZ t1.frm drop table t1; +db.opt uninstall soname 'ha_archive'; set debug_dbug='-d,unstable_db_type'; diff --git a/mysql-test/main/explain_slowquerylog.result b/mysql-test/main/explain_slowquerylog.result index 2b350cf04ff..63da82b5288 100644 --- a/mysql-test/main/explain_slowquerylog.result +++ b/mysql-test/main/explain_slowquerylog.result @@ -54,3 +54,7 @@ SELECT 1; 1 SET log_slow_rate_limit=@save1; SET long_query_time=@save2; +create table t1 (a int); +execute immediate "select * from t1 join t1 t2 on (t1.a>5) where exists (select 1)"; +a a +drop table t1; diff --git a/mysql-test/main/explain_slowquerylog.test b/mysql-test/main/explain_slowquerylog.test index 6503a326eb8..ee90fbac4e6 100644 --- a/mysql-test/main/explain_slowquerylog.test +++ b/mysql-test/main/explain_slowquerylog.test @@ -61,3 +61,9 @@ SELECT 1; SET log_slow_rate_limit=@save1; SET long_query_time=@save2; +# +# MDEV-16153 Server crashes in Apc_target::disable, ASAN heap-use-after-free in Explain_query::~Explain_query upon/after EXECUTE IMMEDIATE +# +create table t1 (a int); +execute immediate "select * from t1 join t1 t2 on (t1.a>5) where exists (select 1)"; +drop table t1; diff --git a/mysql-test/main/features.result b/mysql-test/main/features.result index a12b41c943a..f31a6672549 100644 --- a/mysql-test/main/features.result +++ b/mysql-test/main/features.result @@ -10,8 +10,10 @@ Feature_dynamic_columns 0 Feature_fulltext 0 Feature_gis 0 Feature_invisible_columns 0 +Feature_json 0 Feature_locale 0 Feature_subquery 0 +Feature_system_versioning 0 Feature_timezone 0 Feature_trigger 0 Feature_window_functions 0 diff --git a/mysql-test/main/func_math.result b/mysql-test/main/func_math.result index 7f2ed1c2ff0..832ca69d3ea 100644 --- a/mysql-test/main/func_math.result +++ b/mysql-test/main/func_math.result @@ -835,9 +835,9 @@ select 0=0, 0=-0, 0.0= -0.0, 0.0 = -(0.0), 0.0E1=-0.0E1, 0.0E1=-(0.0E1); # # CRC32 tests # -select CRC32(NULL), CRC32(''), CRC32('MySQL'), CRC32('mysql'), CRC32('01234567'), CRC32('012345678'); -CRC32(NULL) CRC32('') CRC32('MySQL') CRC32('mysql') CRC32('01234567') CRC32('012345678') -NULL 0 3259397556 2501908538 763378421 939184570 +select CRC32(NULL), CRC32(''), CRC32('MySQL'), CRC32('mysql'), CRC32('01234567'), CRC32('012345678'), CRC32(REPEAT('ABCDEfghij', 20)), CRC32(REPEAT('0123456789', 200)); +CRC32(NULL) CRC32('') CRC32('MySQL') CRC32('mysql') CRC32('01234567') CRC32('012345678') CRC32(REPEAT('ABCDEfghij', 20)) CRC32(REPEAT('0123456789', 200)) +NULL 0 3259397556 2501908538 763378421 939184570 3823776386 1428305034 explain extended select (3-2)+1, (3/2)*1, 3-(2+1), 3/(2*1); id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used @@ -900,3 +900,388 @@ a HEX(a) DROP TABLE t2; DROP TABLE t1; SET sql_mode=DEFAULT; +# +# Bug#26495791 - EXPAND TEST SUITE TO INCLUDE CRC32 TESTS +# +SELECT CRC32(NULL), CRC32(''), CRC32('MySQL'), CRC32('mysql'); +CRC32(NULL) CRC32('') CRC32('MySQL') CRC32('mysql') +NULL 0 3259397556 2501908538 +SELECT CRC32('01234567'), CRC32('012345678'); +CRC32('01234567') CRC32('012345678') +763378421 939184570 +SELECT CRC32('~!@$%^*'), CRC32('-0.0001'); +CRC32('~!@$%^*') CRC32('-0.0001') +1533935548 324352530 +SELECT CRC32(99999999999999999999999999999999); +CRC32(99999999999999999999999999999999) +3035957051 +SELECT CRC32(-99999999999999999999999999999999); +CRC32(-99999999999999999999999999999999) +1052326872 +DROP TABLE IF EXISTS t; +Warnings: +Note 1051 Unknown table 'test.t' +CREATE TABLE t(a INT, b VARCHAR(2)); +INSERT INTO t VALUES (1,'a'), (2,'qw'), (1,'t'), (3,'t'); +SELECT crc32(SUM(a)) FROM t; +crc32(SUM(a)) +1790921346 +SELECT crc32(AVG(a)) FROM t GROUP BY b; +crc32(AVG(a)) +768278432 +2875100430 +2875100430 +SELECT crc32(MAX(b)) FROM t GROUP BY a; +crc32(MAX(b)) +2238339752 +3114057431 +2238339752 +SELECT a, b, crc32(a) FROM t GROUP BY a,b HAVING crc32(MAX(a))=450215437; +a b crc32(a) +2 qw 450215437 +SELECT a,b,concat(a,b),crc32(concat(a,b)) FROM t ORDER BY crc32(concat(a,b)); +a b concat(a,b) crc32(concat(a,b)) +2 qw 2qw 552289476 +1 a 1a 3174122627 +1 t 1t 3505168488 +3 t 3t 3805987562 +DROP TABLE t; +SELECT CRC32(4+2); +CRC32(4+2) +498629140 +SELECT CRC32(4/2); +CRC32(4/2) +2875100430 +SELECT CRC32(4-2); +CRC32(4-2) +450215437 +SELECT CRC32(4*2); +CRC32(4*2) +4194326291 +SELECT CRC32(ABS(-6)); +CRC32(ABS(-6)) +498629140 +SELECT CRC32(CEILING(1.23)); +CRC32(CEILING(1.23)) +450215437 +SELECT CRC32(FLOOR(1.23)); +CRC32(FLOOR(1.23)) +2212294583 +SELECT CRC32(LOG(10,100)); +CRC32(LOG(10,100)) +450215437 +SELECT CRC32(PI()); +CRC32(PI()) +2969982827 +SELECT CRC32(POWER(2,2)); +CRC32(POWER(2,2)) +4088798008 +SELECT CRC32(ROUND(1.58)); +CRC32(ROUND(1.58)) +450215437 +SELECT CRC32(SIGN(0)); +CRC32(SIGN(0)) +4108050209 +SELECT CRC32(SQRT(4)); +CRC32(SQRT(4)) +450215437 +SELECT CRC32(2 > 4); +CRC32(2 > 4) +4108050209 +SELECT CRC32(2 < 4); +CRC32(2 < 4) +2212294583 +SELECT CRC32(2 >= 4); +CRC32(2 >= 4) +4108050209 +SELECT CRC32(2 <= 4); +CRC32(2 <= 4) +2212294583 +SELECT CRC32(2 != 4); +CRC32(2 != 4) +2212294583 +SELECT CRC32(NOT 1); +CRC32(NOT 1) +4108050209 +SELECT CRC32(1 AND 1); +CRC32(1 AND 1) +2212294583 +SELECT CRC32(1 OR 1); +CRC32(1 OR 1) +2212294583 +SELECT CRC32(1 XOR 1); +CRC32(1 XOR 1) +4108050209 +SELECT CRC32(ASCII('2')); +CRC32(ASCII('2')) +3308380389 +SELECT CRC32(BIT_LENGTH('text')); +CRC32(BIT_LENGTH('text')) +2103780943 +SELECT CRC32(CHAR('77','121','83','81','76')); +CRC32(CHAR('77','121','83','81','76')) +3259397556 +SELECT CRC32(CONCAT('good','year')); +CRC32(CONCAT('good','year')) +3441724142 +SELECT CRC32(INSERT('foodyear', 1, 4, 'good')); +CRC32(INSERT('foodyear', 1, 4, 'good')) +3441724142 +SELECT CRC32(LEFT('goodyear', 4)); +CRC32(LEFT('goodyear', 4)) +1820610194 +SELECT CRC32(LENGTH('text')); +CRC32(LENGTH('text')) +4088798008 +SELECT CRC32(LOWER('GOODYEAR')); +CRC32(LOWER('GOODYEAR')) +3441724142 +SELECT CRC32(UPPER('goodyear')); +CRC32(UPPER('goodyear')) +186062498 +SELECT CRC32(LTRIM(' goodyear')); +CRC32(LTRIM(' goodyear')) +3441724142 +SELECT CRC32(RTRIM('goodyear ')); +CRC32(RTRIM('goodyear ')) +3441724142 +SELECT CRC32(REPLACE('godyear','o','oo')); +CRC32(REPLACE('godyear','o','oo')) +3441724142 +SELECT CRC32(REVERSE('goodyear')); +CRC32(REVERSE('goodyear')) +3423698264 +SELECT CRC32(true); +CRC32(true) +2212294583 +SELECT CRC32(false); +CRC32(false) +4108050209 +DROP TABLE IF EXISTS t1; +Warnings: +Note 1051 Unknown table 'test.t1' +CREATE TABLE t1 (c1 BIT(5), +c2 TINYINT, +c3 MEDIUMINT, +c4 INTEGER, +c5 BIGINT, +c6 DECIMAL(7,5), +c7 FLOAT(7,5), +c8 DOUBLE(7,5)); +INSERT INTO t1 VALUES (B'10101', 127, 8388607, 2147483647, +9223372036854775807, 10.5, 11.5, 12.5); +SELECT CRC32(c1) FROM t1; +CRC32(c1) +3219065702 +SELECT CRC32(c2) FROM t1; +CRC32(c2) +2401609675 +SELECT CRC32(c3) FROM t1; +CRC32(c3) +1742563487 +SELECT CRC32(c4) FROM t1; +CRC32(c4) +365954768 +SELECT CRC32(c5) FROM t1; +CRC32(c5) +1122634462 +SELECT CRC32(c6) FROM t1; +CRC32(c6) +4255015174 +SELECT CRC32(c7) FROM t1; +CRC32(c7) +1542023858 +SELECT CRC32(c8) FROM t1; +CRC32(c8) +1778479151 +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (c1 DATE, +c2 DATETIME, +c3 TIMESTAMP, +c4 TIME, +c5 YEAR); +INSERT INTO t1 VALUES ('2007-01-01', '2007-01-01 12:00:01', +'2007-01-01 00:00:01.000000', +'12:00:01.000000', '2007'); +SELECT CRC32(c1) FROM t1; +CRC32(c1) +2772295888 +SELECT CRC32(c2) FROM t1; +CRC32(c2) +1449026401 +SELECT CRC32(c3) FROM t1; +CRC32(c3) +225145558 +SELECT CRC32(c4) FROM t1; +CRC32(c4) +1077283474 +SELECT CRC32(c5) FROM t1; +CRC32(c5) +955685210 +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (c1 CHAR, +c2 VARCHAR(10), +c3 BINARY(1), +c4 VARBINARY(10), +c5 TINYBLOB, +c6 TINYTEXT, +c7 BLOB, +c8 TEXT, +c9 MEDIUMBLOB, +c10 MEDIUMTEXT, +c11 LONGBLOB, +c12 LONGTEXT); +INSERT INTO t1 VALUES ('a', 'a', 0x61, 0x61, 'a', 'a', +'a', 'a', 'a', 'a', 'a', 'a'); +SELECT CRC32(c1) FROM t1; +CRC32(c1) +3904355907 +SELECT CRC32(c2) FROM t1; +CRC32(c2) +3904355907 +SELECT CRC32(c3) FROM t1; +CRC32(c3) +3904355907 +SELECT CRC32(c4) FROM t1; +CRC32(c4) +3904355907 +SELECT CRC32(c5) FROM t1; +CRC32(c5) +3904355907 +SELECT CRC32(c6) FROM t1; +CRC32(c6) +3904355907 +SELECT CRC32(c7) FROM t1; +CRC32(c7) +3904355907 +SELECT CRC32(c8) FROM t1; +CRC32(c8) +3904355907 +SELECT CRC32(c9) FROM t1; +CRC32(c9) +3904355907 +SELECT CRC32(c10) FROM t1; +CRC32(c10) +3904355907 +SELECT CRC32(c11) FROM t1; +CRC32(c11) +3904355907 +SELECT CRC32(c12) FROM t1; +CRC32(c12) +3904355907 +SELECT CRC32(ST_GeomFromText('POINT(1 1)')); +CRC32(ST_GeomFromText('POINT(1 1)')) +1349318989 +CREATE TABLE geom_data(id INT, +pt POINT NOT NULL, +lnstr LINESTRING NOT NULL, +mlnstr MULTILINESTRING NOT NULL, +poly POLYGON NOT NULL, +mpoly MULTIPOLYGON NOT NULL); +INSERT INTO geom_data VALUES (10, +ST_GEOMFROMTEXT('POINT(10 20)'), +ST_GEOMFROMTEXT('LINESTRING(0 0,5 5,6 6)'), +ST_GEOMFROMTEXT('MULTILINESTRING((0 0,2 3,4 5),(6 6,8 8,9 9,10 10))'), +ST_GEOMFROMTEXT('POLYGON((0 0,6 7,8 8,3 9,0 0),(3 6,4 6,4 7,3 6))'), +ST_GEOMFROMTEXT('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)), + ((2 2,4 5,6 2,2 2)))')); +SELECT CRC32(ST_X(pt)) FROM geom_data; +CRC32(ST_X(pt)) +2707236321 +SELECT CRC32(ST_NumPoints((lnstr))) FROM geom_data; +CRC32(ST_NumPoints((lnstr))) +1842515611 +SELECT CRC32(ST_Length((mlnstr))) FROM geom_data; +CRC32(ST_Length((mlnstr))) +2090153432 +SELECT CRC32(ST_Area((poly))) FROM geom_data; +CRC32(ST_Area((poly))) +534598600 +SELECT CRC32(ST_Area((mpoly))) FROM geom_data; +CRC32(ST_Area((mpoly))) +3832313845 +DROP TABLE geom_data; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (e1 ENUM ('a','b'), s1 SET('a','b')); +INSERT INTO t1 VALUES(2,'a,b'),('a','b,a'); +SELECT e1, CRC32(e1) FROM t1; +e1 CRC32(e1) +b 1908338681 +a 3904355907 +SELECT s1, CRC32(s1) FROM t1; +s1 CRC32(s1) +a,b 752423903 +a,b 752423903 +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a JSON); +INSERT INTO t1 VALUES ('{"name" : "goodyear"}'), +('{"name" : "verygood-year"}'); +SELECT a, CRC32(a) FROM t1; +a CRC32(a) +{"name" : "goodyear"} 3258555818 +{"name" : "verygood-year"} 3616384779 +SELECT CRC32(json_query(a, '$.name')) FROM t1 WHERE json_query(a, '$.name') = 'goodyear'; +CRC32(json_query(a, '$.name')) +SELECT CRC32(REPLACE(JSON_EXTRACT(a, "$.name"),'\"','')) +FROM t1 WHERE JSON_EXTRACT(a, "$.name") = 'goodyear'; +CRC32(REPLACE(JSON_EXTRACT(a, "$.name"),'\"','')) +3441724142 +DROP TABLE t1; +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES(10); +CREATE VIEW v1 AS SELECT CRC32(a) AS my_crc FROM t1; +SELECT * FROM v1; +my_crc +2707236321 +SELECT CRC32(CRC32(my_crc)) FROM v1; +CRC32(CRC32(my_crc)) +3114155452 +DROP VIEW v1; +DROP TABLE t1; +CREATE TABLE t1 (a CHAR); +CREATE TABLE t2 (b BIGINT); +CREATE TRIGGER trg1 +BEFORE INSERT ON t1 +FOR EACH ROW +INSERT INTO t2 VALUES(CRC32(NEW.a)); +INSERT INTO t1 VALUES('a'); +SELECT * FROM t2; +b +3904355907 +DROP TRIGGER trg1; +DROP TABLE t2; +DROP TABLE t1; +CREATE PROCEDURE crc32_proc (IN a CHAR, OUT b BIGINT) +SELECT CRC32(a) INTO b; +CALL crc32_proc('a',@val); +SELECT @val; +@val +3904355907 +DROP PROCEDURE crc32_proc; +CREATE FUNCTION crc32_func(inputvar CHAR) +RETURNS BIGINT +BEGIN +DECLARE crcval BIGINT; +SELECT CRC32(inputvar) INTO crcval; +RETURN crcval; +END| +SELECT crc32_func('a'); +crc32_func('a') +3904355907 +DROP FUNCTION crc32_func; +PREPARE stmt1 FROM 'SELECT CRC32(?)'; +SET @val = 'a'; +EXECUTE stmt1 USING @val; +CRC32(?) +3904355907 +DEALLOCATE PREPARE stmt; +SET NAMES utf8; +CREATE TABLE t1 (a TEXT) CHARACTER SET = utf8; +LOAD DATA INFILE '../../std_data/loaddata_utf8.dat' INTO TABLE t1 CHARACTER SET utf8; +SELECT HEX(a), CRC32(a) from t1; +HEX(a) CRC32(a) +E4B880E4BA8CE4B889 1785250883 +E59B9BE4BA94E585AD 2914501801 +E4B883E585ABE4B99D 2374586519 +DROP TABLE t1; +SET NAMES default; diff --git a/mysql-test/main/func_math.test b/mysql-test/main/func_math.test index 83e345ec890..a2c54b58a67 100644 --- a/mysql-test/main/func_math.test +++ b/mysql-test/main/func_math.test @@ -607,7 +607,7 @@ select 0=0, 0=-0, 0.0= -0.0, 0.0 = -(0.0), 0.0E1=-0.0E1, 0.0E1=-(0.0E1); --echo # CRC32 tests --echo # -select CRC32(NULL), CRC32(''), CRC32('MySQL'), CRC32('mysql'), CRC32('01234567'), CRC32('012345678'); +select CRC32(NULL), CRC32(''), CRC32('MySQL'), CRC32('mysql'), CRC32('01234567'), CRC32('012345678'), CRC32(REPEAT('ABCDEfghij', 20)), CRC32(REPEAT('0123456789', 200)); # # MDEV-13673 Bad result in view @@ -659,3 +659,241 @@ SELECT a, HEX(a) FROM t2; DROP TABLE t2; DROP TABLE t1; SET sql_mode=DEFAULT; + + +--echo # +--echo # Bug#26495791 - EXPAND TEST SUITE TO INCLUDE CRC32 TESTS +--echo # + +SELECT CRC32(NULL), CRC32(''), CRC32('MySQL'), CRC32('mysql'); +SELECT CRC32('01234567'), CRC32('012345678'); +SELECT CRC32('~!@$%^*'), CRC32('-0.0001'); +SELECT CRC32(99999999999999999999999999999999); +SELECT CRC32(-99999999999999999999999999999999); + +# Test cases for using the function in aggregate functions, group-by, having +# and order-by clauses +DROP TABLE IF EXISTS t; +CREATE TABLE t(a INT, b VARCHAR(2)); +INSERT INTO t VALUES (1,'a'), (2,'qw'), (1,'t'), (3,'t'); +SELECT crc32(SUM(a)) FROM t; +SELECT crc32(AVG(a)) FROM t GROUP BY b; +SELECT crc32(MAX(b)) FROM t GROUP BY a; +SELECT a, b, crc32(a) FROM t GROUP BY a,b HAVING crc32(MAX(a))=450215437; +SELECT a,b,concat(a,b),crc32(concat(a,b)) FROM t ORDER BY crc32(concat(a,b)); +DROP TABLE t; + +# Test cases for arithmetic operators and functions +SELECT CRC32(4+2); +SELECT CRC32(4/2); +SELECT CRC32(4-2); +SELECT CRC32(4*2); +SELECT CRC32(ABS(-6)); +SELECT CRC32(CEILING(1.23)); +SELECT CRC32(FLOOR(1.23)); +SELECT CRC32(LOG(10,100)); +SELECT CRC32(PI()); +SELECT CRC32(POWER(2,2)); +SELECT CRC32(ROUND(1.58)); +SELECT CRC32(SIGN(0)); +SELECT CRC32(SQRT(4)); + +# Test cases for comparison operators +SELECT CRC32(2 > 4); +SELECT CRC32(2 < 4); +SELECT CRC32(2 >= 4); +SELECT CRC32(2 <= 4); +SELECT CRC32(2 != 4); + +# Test cases for logical operators +SELECT CRC32(NOT 1); +SELECT CRC32(1 AND 1); +SELECT CRC32(1 OR 1); +SELECT CRC32(1 XOR 1); + +# Test cases for string functions +SELECT CRC32(ASCII('2')); +SELECT CRC32(BIT_LENGTH('text')); +SELECT CRC32(CHAR('77','121','83','81','76')); +SELECT CRC32(CONCAT('good','year')); +SELECT CRC32(INSERT('foodyear', 1, 4, 'good')); +SELECT CRC32(LEFT('goodyear', 4)); +SELECT CRC32(LENGTH('text')); +SELECT CRC32(LOWER('GOODYEAR')); +SELECT CRC32(UPPER('goodyear')); +SELECT CRC32(LTRIM(' goodyear')); +SELECT CRC32(RTRIM('goodyear ')); +SELECT CRC32(REPLACE('godyear','o','oo')); +SELECT CRC32(REVERSE('goodyear')); + +# Test cases for boolean values +SELECT CRC32(true); +SELECT CRC32(false); + +# Test cases for numeric data types +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (c1 BIT(5), + c2 TINYINT, + c3 MEDIUMINT, + c4 INTEGER, + c5 BIGINT, + c6 DECIMAL(7,5), + c7 FLOAT(7,5), + c8 DOUBLE(7,5)); + +INSERT INTO t1 VALUES (B'10101', 127, 8388607, 2147483647, + 9223372036854775807, 10.5, 11.5, 12.5); + +SELECT CRC32(c1) FROM t1; +SELECT CRC32(c2) FROM t1; +SELECT CRC32(c3) FROM t1; +SELECT CRC32(c4) FROM t1; +SELECT CRC32(c5) FROM t1; +SELECT CRC32(c6) FROM t1; +SELECT CRC32(c7) FROM t1; +SELECT CRC32(c8) FROM t1; + +# Test cases for temporal data types +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (c1 DATE, + c2 DATETIME, + c3 TIMESTAMP, + c4 TIME, + c5 YEAR); +INSERT INTO t1 VALUES ('2007-01-01', '2007-01-01 12:00:01', + '2007-01-01 00:00:01.000000', + '12:00:01.000000', '2007'); +SELECT CRC32(c1) FROM t1; +SELECT CRC32(c2) FROM t1; +SELECT CRC32(c3) FROM t1; +SELECT CRC32(c4) FROM t1; +SELECT CRC32(c5) FROM t1; + +# Test cases for string data types +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (c1 CHAR, + c2 VARCHAR(10), + c3 BINARY(1), + c4 VARBINARY(10), + c5 TINYBLOB, + c6 TINYTEXT, + c7 BLOB, + c8 TEXT, + c9 MEDIUMBLOB, + c10 MEDIUMTEXT, + c11 LONGBLOB, + c12 LONGTEXT); + +INSERT INTO t1 VALUES ('a', 'a', 0x61, 0x61, 'a', 'a', + 'a', 'a', 'a', 'a', 'a', 'a'); +SELECT CRC32(c1) FROM t1; +SELECT CRC32(c2) FROM t1; +SELECT CRC32(c3) FROM t1; +SELECT CRC32(c4) FROM t1; +SELECT CRC32(c5) FROM t1; +SELECT CRC32(c6) FROM t1; +SELECT CRC32(c7) FROM t1; +SELECT CRC32(c8) FROM t1; +SELECT CRC32(c9) FROM t1; +SELECT CRC32(c10) FROM t1; +SELECT CRC32(c11) FROM t1; +SELECT CRC32(c12) FROM t1; + +# Test cases for geometric data types +SELECT CRC32(ST_GeomFromText('POINT(1 1)')); + +CREATE TABLE geom_data(id INT, + pt POINT NOT NULL, + lnstr LINESTRING NOT NULL, + mlnstr MULTILINESTRING NOT NULL, + poly POLYGON NOT NULL, + mpoly MULTIPOLYGON NOT NULL); + +INSERT INTO geom_data VALUES (10, + ST_GEOMFROMTEXT('POINT(10 20)'), + ST_GEOMFROMTEXT('LINESTRING(0 0,5 5,6 6)'), + ST_GEOMFROMTEXT('MULTILINESTRING((0 0,2 3,4 5),(6 6,8 8,9 9,10 10))'), + ST_GEOMFROMTEXT('POLYGON((0 0,6 7,8 8,3 9,0 0),(3 6,4 6,4 7,3 6))'), + ST_GEOMFROMTEXT('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)), + ((2 2,4 5,6 2,2 2)))')); + +SELECT CRC32(ST_X(pt)) FROM geom_data; +SELECT CRC32(ST_NumPoints((lnstr))) FROM geom_data; +SELECT CRC32(ST_Length((mlnstr))) FROM geom_data; +SELECT CRC32(ST_Area((poly))) FROM geom_data; +SELECT CRC32(ST_Area((mpoly))) FROM geom_data; +DROP TABLE geom_data; + +# Test cases for ENUM and SET data types +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (e1 ENUM ('a','b'), s1 SET('a','b')); +INSERT INTO t1 VALUES(2,'a,b'),('a','b,a'); +SELECT e1, CRC32(e1) FROM t1; +SELECT s1, CRC32(s1) FROM t1; + +# Test cases for JSON data types +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a JSON); +INSERT INTO t1 VALUES ('{"name" : "goodyear"}'), + ('{"name" : "verygood-year"}'); +SELECT a, CRC32(a) FROM t1; +SELECT CRC32(json_query(a, '$.name')) FROM t1 WHERE json_query(a, '$.name') = 'goodyear'; +SELECT CRC32(REPLACE(JSON_EXTRACT(a, "$.name"),'\"','')) + FROM t1 WHERE JSON_EXTRACT(a, "$.name") = 'goodyear'; +DROP TABLE t1; + +# Test case for views +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES(10); +CREATE VIEW v1 AS SELECT CRC32(a) AS my_crc FROM t1; +SELECT * FROM v1; +SELECT CRC32(CRC32(my_crc)) FROM v1; +DROP VIEW v1; +DROP TABLE t1; + +# Test case for triggers +CREATE TABLE t1 (a CHAR); +CREATE TABLE t2 (b BIGINT); +CREATE TRIGGER trg1 + BEFORE INSERT ON t1 + FOR EACH ROW + INSERT INTO t2 VALUES(CRC32(NEW.a)); +INSERT INTO t1 VALUES('a'); +SELECT * FROM t2; +DROP TRIGGER trg1; +DROP TABLE t2; +DROP TABLE t1; + +# Test case for a stored procedure +CREATE PROCEDURE crc32_proc (IN a CHAR, OUT b BIGINT) + SELECT CRC32(a) INTO b; +CALL crc32_proc('a',@val); +SELECT @val; +DROP PROCEDURE crc32_proc; + +# Test case for a user defined function +DELIMITER |; +CREATE FUNCTION crc32_func(inputvar CHAR) +RETURNS BIGINT +BEGIN + DECLARE crcval BIGINT; + SELECT CRC32(inputvar) INTO crcval; + RETURN crcval; +END| +DELIMITER ;| +SELECT crc32_func('a'); +DROP FUNCTION crc32_func; + +# Test case for a prepared statement +PREPARE stmt1 FROM 'SELECT CRC32(?)'; +SET @val = 'a'; +EXECUTE stmt1 USING @val; +DEALLOCATE PREPARE stmt; + +# Test case for checksum on contents of a file +SET NAMES utf8; +CREATE TABLE t1 (a TEXT) CHARACTER SET = utf8; +LOAD DATA INFILE '../../std_data/loaddata_utf8.dat' INTO TABLE t1 CHARACTER SET utf8; +SELECT HEX(a), CRC32(a) from t1; +DROP TABLE t1; +SET NAMES default; diff --git a/mysql-test/main/func_time.result b/mysql-test/main/func_time.result index 2772f850ce9..151dbc8d5d8 100644 --- a/mysql-test/main/func_time.result +++ b/mysql-test/main/func_time.result @@ -3484,3 +3484,56 @@ t1 CREATE TABLE `t1` ( `c5` varchar(100) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; +# +# MDEV-16152 Expressions with INTERVAL return bad results in some cases +# +SELECT TIMESTAMP'2001-01-01 10:20:30' - INTERVAL '10' YEAR AS c1, +-INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2; +c1 c2 +1991-01-01 10:20:30 1991-01-01 10:20:30 +SELECT TIMESTAMP'2001-01-01 10:20:30' + INTERVAL '10' YEAR AS c1, +INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2, ++INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c3; +c1 c2 c3 +2011-01-01 10:20:30 2011-01-01 10:20:30 2011-01-01 10:20:30 +EXPLAIN EXTENDED SELECT +TIMESTAMP'2001-01-01 10:20:30' - INTERVAL '10' YEAR AS c1, +-INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select TIMESTAMP'2001-01-01 10:20:30' - interval '10' year AS `c1`,TIMESTAMP'2001-01-01 10:20:30' - interval '10' year AS `c2` +EXPLAIN EXTENDED SELECT +TIMESTAMP'2001-01-01 10:20:30' + INTERVAL '10' YEAR AS c1, +INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2, ++INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c3; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select TIMESTAMP'2001-01-01 10:20:30' + interval '10' year AS `c1`,TIMESTAMP'2001-01-01 10:20:30' + interval '10' year AS `c2`,TIMESTAMP'2001-01-01 10:20:30' + interval '10' year AS `c3` +CREATE TABLE t_ts (a timestamp(6)); +CREATE TABLE t_trig (a timestamp(6)); +CREATE FUNCTION fn_sleep_before_now() returns int +BEGIN +INSERT INTO t_ts SET a= current_timestamp(6); +RETURN 0; +END// +CREATE TRIGGER trg_insert_t_ts after INSERT on t_ts for each row +BEGIN +INSERT into t_trig set a= current_timestamp(6); +END// +SET @sav_slow_query_log= @@session.slow_query_log; +SET @@session.slow_query_log= ON; +SELECT current_timestamp(6),fn_sleep_before_now() INTO @ts_cur, @ts_func; +SELECT a FROM t_ts LIMIT 1 into @ts_func; +SELECT a FROM t_trig LIMIT 1 into @ts_trig; +DELETE FROM t_ts; +DELETE FROM t_trig; +SET @@session.slow_query_log= OFF; +SELECT current_timestamp(6),fn_sleep_before_now() INTO @ts_cur, @func_ts; +SELECT a FROM t_ts LIMIT 1 into @ts_func; +SELECT a FROM t_trig LIMIT 1 into @ts_trig; +SET @@session.slow_query_log= @sav_slow_query_log; +DROP FUNCTION fn_sleep_before_now; +DROP TRIGGER trg_insert_t_ts; +DROP TABLE t_ts, t_trig; diff --git a/mysql-test/main/func_time.test b/mysql-test/main/func_time.test index 5417cb20a92..ecbf23f8e36 100644 --- a/mysql-test/main/func_time.test +++ b/mysql-test/main/func_time.test @@ -2057,3 +2057,81 @@ EXECUTE IMMEDIATE USING NULL, '10', 10, 10.0, 10e0, TIME'10:20:30'; SHOW CREATE TABLE t1; DROP TABLE t1; + +--echo # +--echo # MDEV-16152 Expressions with INTERVAL return bad results in some cases +--echo # + +SELECT TIMESTAMP'2001-01-01 10:20:30' - INTERVAL '10' YEAR AS c1, + -INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2; + +SELECT TIMESTAMP'2001-01-01 10:20:30' + INTERVAL '10' YEAR AS c1, + INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2, + +INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c3; + +EXPLAIN EXTENDED SELECT + TIMESTAMP'2001-01-01 10:20:30' - INTERVAL '10' YEAR AS c1, + -INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2; + +EXPLAIN EXTENDED SELECT + TIMESTAMP'2001-01-01 10:20:30' + INTERVAL '10' YEAR AS c1, + INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2, + +INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c3; + +############ +# MDEV-13727 +# Current timestamp functions inside stored functions must return the +# value of the top-level statement's timestamp (its start time). +# This must hold regardless of @@slow_query_log option. +# + +CREATE TABLE t_ts (a timestamp(6)); +CREATE TABLE t_trig (a timestamp(6)); +delimiter //; +CREATE FUNCTION fn_sleep_before_now() returns int +BEGIN + INSERT INTO t_ts SET a= current_timestamp(6); + RETURN 0; +END// +CREATE TRIGGER trg_insert_t_ts after INSERT on t_ts for each row +BEGIN + INSERT into t_trig set a= current_timestamp(6); +END// +delimiter ;// + +SET @sav_slow_query_log= @@session.slow_query_log; + +# @@slow_query_log ON check +SET @@session.slow_query_log= ON; +SELECT current_timestamp(6),fn_sleep_before_now() INTO @ts_cur, @ts_func; + +SELECT a FROM t_ts LIMIT 1 into @ts_func; +SELECT a FROM t_trig LIMIT 1 into @ts_trig; +if (!`SELECT @ts_cur = @ts_func and @ts_func = @ts_trig`) +{ + SELECT @ts_cur, @ts_func, @ts_trig; + --die Error: timestamps must be equal but they diverge +} +DELETE FROM t_ts; +DELETE FROM t_trig; + +# @@slow_query_log OFF check +SET @@session.slow_query_log= OFF; +SELECT current_timestamp(6),fn_sleep_before_now() INTO @ts_cur, @func_ts; +SELECT a FROM t_ts LIMIT 1 into @ts_func; +SELECT a FROM t_trig LIMIT 1 into @ts_trig; +if (!`SELECT @ts_cur = @ts_func and @ts_func = @ts_trig`) +{ + SELECT @ts_cur, @ts_func, @ts_trig; + --die Error: timestamps must be equal but they diverge +} + +# Cleanup +SET @@session.slow_query_log= @sav_slow_query_log; +DROP FUNCTION fn_sleep_before_now; +DROP TRIGGER trg_insert_t_ts; +DROP TABLE t_ts, t_trig; + +# +# End of MDEV-13727 +################### diff --git a/mysql-test/main/grant.result b/mysql-test/main/grant.result index 22add627144..1d4402185a5 100644 --- a/mysql-test/main/grant.result +++ b/mysql-test/main/grant.result @@ -1709,11 +1709,6 @@ drop user mysqluser11@localhost; drop database mysqltest1; End of 5.0 tests set names utf8; -grant select on test.* to юзер_юзер@localhost; -user() -юзер_юзер@localhost -revoke all on test.* from юзер_юзер@localhost; -drop user юзер_юзер@localhost; grant select on test.* to очень_длинный_юзер890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890@localhost; ERROR HY000: String 'очень_длинный_юзер890123456789012345678901234567890123' is too long for user name (should be no longer than 80) set names default; diff --git a/mysql-test/main/grant.test b/mysql-test/main/grant.test index c945e818181..72e427493da 100644 --- a/mysql-test/main/grant.test +++ b/mysql-test/main/grant.test @@ -1510,15 +1510,7 @@ drop database mysqltest1; --echo End of 5.0 tests - -# -# Bug#21432 Database/Table name limited to 64 bytes, not chars, problems with multi-byte -# set names utf8; -grant select on test.* to юзер_юзер@localhost; ---exec $MYSQL --default-character-set=utf8 --user=юзер_юзер -e "select user()" -revoke all on test.* from юзер_юзер@localhost; -drop user юзер_юзер@localhost; --error ER_WRONG_STRING_LENGTH grant select on test.* to очень_длинный_юзер890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890@localhost; set names default; diff --git a/mysql-test/main/grant2.result b/mysql-test/main/grant2.result index 2ccc01d225a..d7e42f9b7aa 100644 --- a/mysql-test/main/grant2.result +++ b/mysql-test/main/grant2.result @@ -809,10 +809,9 @@ LOAD INDEX INTO CACHE t3; Table Op Msg_type Msg_text mysqltest_db1.t3 preload_keys status OK # -# RENAME (doesn't work for temporary tables, thus should fail). +# RENAME should work for temporary tables # RENAME TABLE t3 TO t3_1; -ERROR 42000: INSERT, CREATE command denied to user 'mysqltest_u1'@'localhost' for table 't3_1' # # HANDLER OPEN/READ/CLOSE. # diff --git a/mysql-test/main/grant2.test b/mysql-test/main/grant2.test index 5e8a130feea..cee5df089df 100644 --- a/mysql-test/main/grant2.test +++ b/mysql-test/main/grant2.test @@ -873,9 +873,8 @@ CACHE INDEX t3 IN keycache1; LOAD INDEX INTO CACHE t3; --echo # ---echo # RENAME (doesn't work for temporary tables, thus should fail). +--echo # RENAME should work for temporary tables --echo # ---error ER_TABLEACCESS_DENIED_ERROR RENAME TABLE t3 TO t3_1; --echo # diff --git a/mysql-test/main/grant_not_windows.result b/mysql-test/main/grant_not_windows.result new file mode 100644 index 00000000000..fedfaf984b2 --- /dev/null +++ b/mysql-test/main/grant_not_windows.result @@ -0,0 +1,8 @@ +set names utf8; +create user юзер_юзер@localhost; +grant select on test.* to юзер_юзер@localhost; +user() +юзер_юзер@localhost +revoke all on test.* from юзер_юзер@localhost; +drop user юзер_юзер@localhost; +set names default; diff --git a/mysql-test/main/grant_not_windows.test b/mysql-test/main/grant_not_windows.test new file mode 100644 index 00000000000..55b09232edc --- /dev/null +++ b/mysql-test/main/grant_not_windows.test @@ -0,0 +1,14 @@ + # UTF8 parameters to mysql client do not work on Windows +--source include/not_windows.inc +--source include/not_embedded.inc + +# +# Bug#21432 Database/Table name limited to 64 bytes, not chars, problems with multi-byte +# +set names utf8; +create user юзер_юзер@localhost; +grant select on test.* to юзер_юзер@localhost; +--exec $MYSQL --default-character-set=utf8 --user=юзер_юзер -e "select user()" +revoke all on test.* from юзер_юзер@localhost; +drop user юзер_юзер@localhost; +set names default; diff --git a/mysql-test/main/group_by.result b/mysql-test/main/group_by.result index 62c659dd933..6f1887290c0 100644 --- a/mysql-test/main/group_by.result +++ b/mysql-test/main/group_by.result @@ -2832,3 +2832,14 @@ select distinct 1 from t1 group by a,b with rollup limit 1; 1 1 drop table t1; +# +# MDEV-16170 +# Server crashes in Item_null_result::type_handler on SELECT with ROLLUP +# +CREATE TABLE t1 (d DATE); +INSERT INTO t1 VALUES ('2032-10-08'); +SELECT d != '2023-03-04' AS f, COUNT(*) FROM t1 GROUP BY d WITH ROLLUP; +f COUNT(*) +1 1 +NULL 1 +DROP TABLE t1; diff --git a/mysql-test/main/group_by.test b/mysql-test/main/group_by.test index 275939df5c5..5574a6654cf 100644 --- a/mysql-test/main/group_by.test +++ b/mysql-test/main/group_by.test @@ -1948,3 +1948,12 @@ insert into t1 values(-126,7),(1,1),(0,0),(-1,1),(351,65534); select distinct 1 from t1 group by a,b with rollup limit 1; drop table t1; +--echo # +--echo # MDEV-16170 +--echo # Server crashes in Item_null_result::type_handler on SELECT with ROLLUP +--echo # + +CREATE TABLE t1 (d DATE); +INSERT INTO t1 VALUES ('2032-10-08'); +SELECT d != '2023-03-04' AS f, COUNT(*) FROM t1 GROUP BY d WITH ROLLUP; +DROP TABLE t1; diff --git a/mysql-test/main/implicit_commit.result b/mysql-test/main/implicit_commit.result index 07536ab0719..b0cd0b75e8d 100644 --- a/mysql-test/main/implicit_commit.result +++ b/mysql-test/main/implicit_commit.result @@ -562,7 +562,7 @@ INSERT INTO db1.trans (a) VALUES (1); cache index t3 in keycache; CALL db1.test_if_commit(); IMPLICIT COMMIT -YES +NO set global keycache.key_buffer_size=0; # # SQLCOM_PRELOAD_KEYS @@ -571,7 +571,7 @@ INSERT INTO db1.trans (a) VALUES (1); load index into cache t3; CALL db1.test_if_commit(); IMPLICIT COMMIT -YES +NO # # SQLCOM_FLUSH # diff --git a/mysql-test/main/in_subq_cond_pushdown.result b/mysql-test/main/in_subq_cond_pushdown.result index 56e02c72cbb..a6246ec17d1 100644 --- a/mysql-test/main/in_subq_cond_pushdown.result +++ b/mysql-test/main/in_subq_cond_pushdown.result @@ -2568,7 +2568,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where 1 PRIMARY eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1 2 MATERIALIZED ALL NULL NULL NULL NULL 12 Using where; Using temporary -3 DERIVED t2 ALL NULL NULL NULL NULL 12 Using temporary; Using filesort +3 DERIVED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary; Using filesort EXPLAIN FORMAT=JSON SELECT * FROM t1 WHERE t1.c>3 AND (t1.a,t1.b,t1.c) IN @@ -2623,7 +2623,8 @@ EXPLAIN "table_name": "t2", "access_type": "ALL", "rows": 12, - "filtered": 100 + "filtered": 100, + "attached_condition": "t2.e < 5" } } } @@ -2677,7 +2678,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where 1 PRIMARY eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1 2 MATERIALIZED ALL NULL NULL NULL NULL 12 Using where; Using temporary -3 DERIVED t2 ALL NULL NULL NULL NULL 12 Using temporary; Using filesort +3 DERIVED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary; Using filesort EXPLAIN FORMAT=JSON SELECT * FROM t1 WHERE t1.a>1 AND (t1.a,t1.b,t1.c) IN @@ -2731,7 +2732,8 @@ EXPLAIN "table_name": "t2", "access_type": "ALL", "rows": 12, - "filtered": 100 + "filtered": 100, + "attached_condition": "t2.e < 5 and t2.e > 1" } } } @@ -2786,7 +2788,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where 1 PRIMARY eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1 2 MATERIALIZED ALL NULL NULL NULL NULL 12 Using where; Using temporary -3 DERIVED t2 ALL NULL NULL NULL NULL 12 Using temporary; Using filesort +3 DERIVED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary; Using filesort EXPLAIN FORMAT=JSON SELECT * FROM t1 WHERE t1.a>1 AND t1.c<100 AND (t1.a,t1.b,t1.c) IN @@ -2841,7 +2843,8 @@ EXPLAIN "table_name": "t2", "access_type": "ALL", "rows": 12, - "filtered": 100 + "filtered": 100, + "attached_condition": "t2.e < 5 and t2.e > 1" } } } @@ -2965,7 +2968,7 @@ EXPLAIN "materialized": { "query_block": { "select_id": 3, - "having_condition": "max_g > 25", + "having_condition": "max_g > 25 and t2.e < 5 and t2.e > 1", "filesort": { "sort_key": "t2.f", "temporary_table": { @@ -3116,7 +3119,7 @@ EXPLAIN "materialized": { "query_block": { "select_id": 3, - "having_condition": "max_g > 25", + "having_condition": "max_g > 25 and t2.e < 5 and t2.e > 1", "filesort": { "sort_key": "t2.f", "temporary_table": { @@ -3268,7 +3271,7 @@ EXPLAIN "materialized": { "query_block": { "select_id": 3, - "having_condition": "max_g > 25", + "having_condition": "max_g > 25 and t2.e < 5 and t2.e > 1", "filesort": { "sort_key": "t2.f", "temporary_table": { diff --git a/mysql-test/main/insert_select.result b/mysql-test/main/insert_select.result index 1a3a38b1f35..5094638c92b 100644 --- a/mysql-test/main/insert_select.result +++ b/mysql-test/main/insert_select.result @@ -856,3 +856,12 @@ INSERT IGNORE INTO t1 SELECT t1.a FROM t1,t1 t2,t1 t3,t1 t4,t1 t5,t1 t6,t1 t7; SET GLOBAL myisam_data_pointer_size = @old_myisam_data_pointer_size; DROP TABLE t1; End of 5.1 tests +create table t1 (i int); +create table t2 as select value(i) as a from t1; +show create table t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` binary(0) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1, t2; +End of 5.5 tests diff --git a/mysql-test/main/insert_select.test b/mysql-test/main/insert_select.test index fda89f18d99..0b5cdf95daf 100644 --- a/mysql-test/main/insert_select.test +++ b/mysql-test/main/insert_select.test @@ -425,3 +425,13 @@ SET GLOBAL myisam_data_pointer_size = @old_myisam_data_pointer_size; DROP TABLE t1; --echo End of 5.1 tests + +# +# MDEV-15318 CREATE .. SELECT VALUES produces invalid table structure +# +create table t1 (i int); +create table t2 as select value(i) as a from t1; +show create table t2; +drop table t1, t2; + +--echo End of 5.5 tests diff --git a/mysql-test/main/intersect.result b/mysql-test/main/intersect.result index b589e8bd17e..66c7addfd36 100644 --- a/mysql-test/main/intersect.result +++ b/mysql-test/main/intersect.result @@ -607,6 +607,22 @@ NULL INTERSECT RESULT ALL NULL NULL NULL NULL NULL NULL NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1003 (/* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`) union /* select#3 */ select `__3`.`c` AS `c`,`__3`.`d` AS `d` from ((/* select#2 */ select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2`) intersect (/* select#4 */ select `test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t3`)) `__3` union (/* select#5 */ select 4 AS `4`,4 AS `4`) +set SQL_MODE=ORACLE; +(select a,b from t1) union (select c,d from t2) intersect (select e,f from t3) union (select 4,4); +a b +3 3 +4 4 +explain extended +(select a,b from t1) union (select c,d from t2) intersect (select e,f from t3) union (select 4,4); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 +2 UNION t2 ALL NULL NULL NULL NULL 2 100.00 +3 INTERSECT t3 ALL NULL NULL NULL NULL 2 100.00 +4 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNIT RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 (/* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1") union (/* select#2 */ select "test"."t2"."c" AS "c","test"."t2"."d" AS "d" from "test"."t2") intersect (/* select#3 */ select "test"."t3"."e" AS "e","test"."t3"."f" AS "f" from "test"."t3") union (/* select#4 */ select 4 AS "4",4 AS "4") +set SQL_MODE=default; (select e,f from t3) intersect (select c,d from t2) union (select a,b from t1) union (select 4,4); e f 3 3 @@ -623,6 +639,24 @@ id select_type table type possible_keys key key_len ref rows filtered Extra NULL UNIT RESULT ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1003 (/* select#1 */ select `test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t3`) intersect (/* select#2 */ select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2`) union (/* select#3 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`) union (/* select#4 */ select 4 AS `4`,4 AS `4`) +set SQL_MODE=ORACLE; +(select e,f from t3) intersect (select c,d from t2) union (select a,b from t1) union (select 4,4); +e f +3 3 +4 4 +5 5 +6 6 +explain extended +(select e,f from t3) intersect (select c,d from t2) union (select a,b from t1) union (select 4,4); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t3 ALL NULL NULL NULL NULL 2 100.00 +2 INTERSECT t2 ALL NULL NULL NULL NULL 2 100.00 +3 UNION t1 ALL NULL NULL NULL NULL 2 100.00 +4 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNIT RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 (/* select#1 */ select "test"."t3"."e" AS "e","test"."t3"."f" AS "f" from "test"."t3") intersect (/* select#2 */ select "test"."t2"."c" AS "c","test"."t2"."d" AS "d" from "test"."t2") union (/* select#3 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1") union (/* select#4 */ select 4 AS "4",4 AS "4") +set SQL_MODE=default; (/* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`) union /* select#3 */ select `__3`.`c` AS `c`,`__3`.`d` AS `d` from ((/* select#2 */ select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2`) intersect (/* select#4 */ select `test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t3`)) `__3` union (/* select#5 */ select 4 AS `4`,4 AS `4`); a b 3 3 @@ -772,4 +806,30 @@ SELECT * FROM t1 LEFT OUTER JOIN t2 LEFT OUTER JOIN t3 ON b < c ON a > b count(*) 14848 drop table t1,t2,t3; +# +# MDEV-13695: INTERSECT precedence is not in line with Oracle even +# in SQL_MODE=Oracle +# +create table t12(c1 int); +insert into t12 values(1); +insert into t12 values(2); +create table t13(c1 int); +insert into t13 values(1); +insert into t13 values(3); +create table t234(c1 int); +insert into t234 values(2); +insert into t234 values(3); +insert into t234 values(4); +set SQL_MODE=oracle; +select * from t13 union select * from t234 intersect select * from t12; +c1 +1 +2 +set SQL_MODE=default; +select * from t13 union select * from t234 intersect select * from t12; +c1 +1 +2 +3 +drop table t12,t13,t234; # End of 10.3 tests diff --git a/mysql-test/main/intersect.test b/mysql-test/main/intersect.test index d9d420c786b..fb5e991a24c 100644 --- a/mysql-test/main/intersect.test +++ b/mysql-test/main/intersect.test @@ -147,12 +147,25 @@ insert into t3 values (1,1),(3,3); (select a,b from t1) union (select c,d from t2) intersect (select e,f from t3) union (select 4,4); explain extended (select a,b from t1) union (select c,d from t2) intersect (select e,f from t3) union (select 4,4); +set SQL_MODE=ORACLE; +--sorted_result +(select a,b from t1) union (select c,d from t2) intersect (select e,f from t3) union (select 4,4); +explain extended +(select a,b from t1) union (select c,d from t2) intersect (select e,f from t3) union (select 4,4); +set SQL_MODE=default; + # test result of linear mix operation --sorted_result (select e,f from t3) intersect (select c,d from t2) union (select a,b from t1) union (select 4,4); explain extended (select e,f from t3) intersect (select c,d from t2) union (select a,b from t1) union (select 4,4); +set SQL_MODE=ORACLE; +--sorted_result +(select e,f from t3) intersect (select c,d from t2) union (select a,b from t1) union (select 4,4); +explain extended +(select e,f from t3) intersect (select c,d from t2) union (select a,b from t1) union (select 4,4); +set SQL_MODE=default; --sorted_result (/* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`) union /* select#3 */ select `__3`.`c` AS `c`,`__3`.`d` AS `d` from ((/* select#2 */ select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2`) intersect (/* select#4 */ select `test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t3`)) `__3` union (/* select#5 */ select 4 AS `4`,4 AS `4`); @@ -282,4 +295,29 @@ select count(*) from ( drop table t1,t2,t3; +--echo # +--echo # MDEV-13695: INTERSECT precedence is not in line with Oracle even +--echo # in SQL_MODE=Oracle +--echo # + +create table t12(c1 int); +insert into t12 values(1); +insert into t12 values(2); +create table t13(c1 int); +insert into t13 values(1); +insert into t13 values(3); +create table t234(c1 int); +insert into t234 values(2); +insert into t234 values(3); +insert into t234 values(4); + +set SQL_MODE=oracle; +--sorted_result +select * from t13 union select * from t234 intersect select * from t12; +set SQL_MODE=default; +--sorted_result +select * from t13 union select * from t234 intersect select * from t12; + +drop table t12,t13,t234; + --echo # End of 10.3 tests diff --git a/mysql-test/main/invisible_field.result b/mysql-test/main/invisible_field.result index c331f3fca88..876a80814e5 100644 --- a/mysql-test/main/invisible_field.result +++ b/mysql-test/main/invisible_field.result @@ -551,3 +551,69 @@ select * from t1 natural join t2; b c a d 2 3 1 4 drop table t1, t2; +CREATE TABLE t1 (c CHAR(3), t TIMESTAMP invisible); +INSERT INTO t1 (c,t) VALUES ('foo','2000-01-01 00:00:00'); +CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @a= 1; +INSERT INTO t1 SELECT * FROM t1; +DROP TABLE t1; +create or replace table t1 (a int, b int invisible); +insert into t1 values (1),(2); +select * from t1 into outfile 'f'; +load data infile 'f' into table t1; +select a,b from t1; +a b +1 NULL +2 NULL +1 NULL +2 NULL +load data infile 'f' into table t1 (a,@v) SET b=@v; +select a,b from t1; +a b +1 NULL +2 NULL +1 NULL +2 NULL +1 NULL +2 NULL +load data infile 'f' into table t1 (a,@v) SET b=a; +select a,b from t1; +a b +1 NULL +2 NULL +1 NULL +2 NULL +1 NULL +2 NULL +1 1 +2 2 +truncate table t1; +insert into t1(a,b) values (1,1),(2,2); +select a,b from t1 into outfile 'a'; +load data infile 'a' into table t1(a,b); +select a,b from t1; +a b +1 1 +2 2 +1 1 +2 2 +load data infile 'a' into table t1 (a,@v) SET b=@v; +select a,b from t1; +a b +1 1 +2 2 +1 1 +2 2 +1 1 +2 2 +load data infile 'a' into table t1 (a,@v) SET b=@v+2; +select a,b from t1; +a b +1 1 +2 2 +1 1 +2 2 +1 1 +2 2 +1 3 +2 4 +drop table t1; diff --git a/mysql-test/main/invisible_field.test b/mysql-test/main/invisible_field.test index 884abb1238d..0e3994a78ce 100644 --- a/mysql-test/main/invisible_field.test +++ b/mysql-test/main/invisible_field.test @@ -238,3 +238,36 @@ insert t2 (a,b,d) values (1,2,4), (10, 30, 40); select * from t1 join t2 using (a); select * from t1 natural join t2; drop table t1, t2; +## Triggers MDEV-15754 +CREATE TABLE t1 (c CHAR(3), t TIMESTAMP invisible); +INSERT INTO t1 (c,t) VALUES ('foo','2000-01-01 00:00:00'); + +CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @a= 1; +INSERT INTO t1 SELECT * FROM t1; +# Cleanup +DROP TABLE t1; +##LOAD DATA MDEV-15965 Invisible columns and LOAD DATA don't work well +## together: ER_WARN_TOO_FEW_RECORDS +create or replace table t1 (a int, b int invisible); +insert into t1 values (1),(2); + +select * from t1 into outfile 'f'; +load data infile 'f' into table t1; +select a,b from t1; +load data infile 'f' into table t1 (a,@v) SET b=@v; +select a,b from t1; +load data infile 'f' into table t1 (a,@v) SET b=a; +select a,b from t1; +truncate table t1; + +insert into t1(a,b) values (1,1),(2,2); +select a,b from t1 into outfile 'a'; +load data infile 'a' into table t1(a,b); +select a,b from t1; +load data infile 'a' into table t1 (a,@v) SET b=@v; +select a,b from t1; +load data infile 'a' into table t1 (a,@v) SET b=@v+2; +select a,b from t1; + +#cleanup +drop table t1; diff --git a/mysql-test/main/invisible_field_grant_completely.result b/mysql-test/main/invisible_field_grant_completely.result new file mode 100644 index 00000000000..208a9f24ddf --- /dev/null +++ b/mysql-test/main/invisible_field_grant_completely.result @@ -0,0 +1,68 @@ +set @old_debug= @@debug_dbug; +create user user_1; +show grants for user_1; +Grants for user_1@% +GRANT USAGE ON *.* TO 'user_1'@'%' +# create user +create database d; +use d; + +#Completely Invisible +set debug_dbug= "+d,test_completely_invisible"; +create table t1(a int); +insert into t1 values(1); +select a,invisible from t1; +a invisible +1 9 +set debug_dbug=@old_debug; +grant insert(a) on t1 to user_1; +grant update(a) on t1 to user_1; +grant select(a) on t1 to user_1; +grant delete on t1 to user_1; +connect con1, localhost, user_1,,test; +connection con1; +select user(); +user() +user_1@localhost +use d; +select * from t1; +a +1 +insert into t1 values(2); +select * from t1; +a +1 +2 +insert into t1(a) values(3); +select * from t1; +a +1 +2 +3 +select invisible,a from t1; +ERROR 42S22: Unknown column 'invisible' in 'field list' +delete from t1 where a =1; +update t1 set a=1 where a=3; +select * from t1; +a +2 +1 +connection default; +REVOKE ALL PRIVILEGES, GRANT OPTION FROM user_1; +connection con1; +select * from t1; +ERROR 42000: SELECT command denied to user 'user_1'@'localhost' for table 't1' +select invisible from t1; +ERROR 42000: SELECT command denied to user 'user_1'@'localhost' for table 't1' +disconnect con1; + +#Final Cleanup +connection default; +set debug_dbug= "+d,test_completely_invisible"; +select a,invisible from t1; +a invisible +2 9 +1 9 +drop user user_1; +drop database d; +set @old_debug= @@debug_dbug; diff --git a/mysql-test/main/invisible_field_grant_completely.test b/mysql-test/main/invisible_field_grant_completely.test new file mode 100644 index 00000000000..aeca66b97d2 --- /dev/null +++ b/mysql-test/main/invisible_field_grant_completely.test @@ -0,0 +1,57 @@ +# Grant tests not performed with embedded server +-- source include/not_embedded.inc + +--source include/have_debug.inc +##TEST for invisible coloumn level 3 +set @old_debug= @@debug_dbug; +create user user_1; +show grants for user_1; +--echo # create user +create database d; +use d; + +--echo +--echo #Completely Invisible +set debug_dbug= "+d,test_completely_invisible"; +create table t1(a int); +insert into t1 values(1); +select a,invisible from t1; +set debug_dbug=@old_debug; +grant insert(a) on t1 to user_1; +grant update(a) on t1 to user_1; +grant select(a) on t1 to user_1; +grant delete on t1 to user_1; +connect (con1, localhost, user_1,,test); +connection con1; +select user(); +use d; +select * from t1; +insert into t1 values(2); +select * from t1; +insert into t1(a) values(3); +select * from t1; +--error ER_BAD_FIELD_ERROR +select invisible,a from t1; +delete from t1 where a =1; +update t1 set a=1 where a=3; +select * from t1; + +connection default; +REVOKE ALL PRIVILEGES, GRANT OPTION FROM user_1; + +connection con1; +--error ER_TABLEACCESS_DENIED_ERROR +select * from t1; +--error ER_TABLEACCESS_DENIED_ERROR +select invisible from t1; + +disconnect con1; +--source include/wait_until_disconnected.inc +--echo +--echo #Final Cleanup +connection default; +set debug_dbug= "+d,test_completely_invisible"; +select a,invisible from t1; +drop user user_1; +drop database d; +set @old_debug= @@debug_dbug; diff --git a/mysql-test/main/invisible_field_grant_system.result b/mysql-test/main/invisible_field_grant_system.result new file mode 100644 index 00000000000..77acfdae744 --- /dev/null +++ b/mysql-test/main/invisible_field_grant_system.result @@ -0,0 +1,68 @@ +create user user_1; +show grants for user_1; +Grants for user_1@% +GRANT USAGE ON *.* TO 'user_1'@'%' +# create user +create database d; +use d; + +#System_Invisible +create table t1(a int) with system versioning; +insert into t1 values(1); +select a from t1; +a +1 +select count(row_start) from t1; +count(row_start) +1 +grant insert(a) on t1 to user_1; +grant update(a) on t1 to user_1; +grant select(a) on t1 to user_1; +grant delete on t1 to user_1; +connect con1, localhost, user_1,,test; +connection con1; +select user(); +user() +user_1@localhost +use d; +select * from t1; +a +1 +insert into t1 values(2); +select * from t1; +a +1 +2 +insert into t1(a) values(3); +select * from t1; +a +1 +2 +3 +select a from t1; +a +1 +2 +3 +select count(row_start) from t1; +count(row_start) +3 +delete from t1 where a =1; +update t1 set a=1 where a=3; +select * from t1; +a +2 +1 +connection default; +REVOKE ALL PRIVILEGES, GRANT OPTION FROM user_1; +connection con1; +select * from t1; +ERROR 42000: SELECT command denied to user 'user_1'@'localhost' for table 't1' +select count(row_start) from t1; +ERROR 42000: SELECT command denied to user 'user_1'@'localhost' for table 't1' +disconnect con1; + +#Cleanup +connection default; +drop user user_1; +drop database d; diff --git a/mysql-test/main/invisible_field_grant_system.test b/mysql-test/main/invisible_field_grant_system.test new file mode 100644 index 00000000000..fd54ea72a5a --- /dev/null +++ b/mysql-test/main/invisible_field_grant_system.test @@ -0,0 +1,52 @@ +# Grant tests not performed with embedded server +-- source include/not_embedded.inc + +##TEST for invisible coloumn level 2 +create user user_1; +show grants for user_1; +--echo # create user +create database d; +use d; + +--echo +--echo #System_Invisible +create table t1(a int) with system versioning; +insert into t1 values(1); +select a from t1; +select count(row_start) from t1; +grant insert(a) on t1 to user_1; +grant update(a) on t1 to user_1; +grant select(a) on t1 to user_1; +grant delete on t1 to user_1; +connect (con1, localhost, user_1,,test); +connection con1; +select user(); +use d; +select * from t1; +insert into t1 values(2); +select * from t1; +insert into t1(a) values(3); +select * from t1; +select a from t1; +select count(row_start) from t1; +delete from t1 where a =1; +update t1 set a=1 where a=3; +select * from t1; +connection default; +REVOKE ALL PRIVILEGES, GRANT OPTION FROM user_1; + +connection con1; +--error ER_TABLEACCESS_DENIED_ERROR +select * from t1; +--error ER_TABLEACCESS_DENIED_ERROR +select count(row_start) from t1; + +disconnect con1; +--source include/wait_until_disconnected.inc + +--echo +--echo #Cleanup +--source include/wait_until_disconnected.inc +connection default; +drop user user_1; +drop database d; diff --git a/mysql-test/main/invisible_partition.result b/mysql-test/main/invisible_partition.result new file mode 100644 index 00000000000..4380083c352 --- /dev/null +++ b/mysql-test/main/invisible_partition.result @@ -0,0 +1,18 @@ +CREATE TABLE t1 (a INT NOT NULL, KEY (a)) ENGINE=MEMORY PARTITION BY KEY(a) PARTITIONS 4; +INSERT INTO t1 VALUES (1),(2); +SET debug_dbug="+d,test_pseudo_invisible"; +ALTER TABLE t1 REBUILD PARTITION p2; +ERROR HY000: Internal error: Don't to it with test_pseudo_invisible +SET debug_dbug=''; +ALTER TABLE t1 REPAIR PARTITION p1,p2,p3; +Table Op Msg_type Msg_text +test.t1 repair status OK +Drop table t1; +CREATE TABLE t1 (i INT) PARTITION BY HASH (i) PARTITIONS 3; +SET debug_dbug= "+d,test_pseudo_invisible"; +ALTER TABLE t1 COALESCE PARTITION 1; +ERROR HY000: Internal error: Don't to it with test_pseudo_invisible +SET debug_dbug= ""; +SELECT * FROM t1; +i +DROP TABLE t1; diff --git a/mysql-test/main/invisible_partition.test b/mysql-test/main/invisible_partition.test new file mode 100644 index 00000000000..dd3b3eeaaae --- /dev/null +++ b/mysql-test/main/invisible_partition.test @@ -0,0 +1,24 @@ +--source include/have_partition.inc +--source include/have_binlog_format_row.inc +--source include/have_debug.inc + +CREATE TABLE t1 (a INT NOT NULL, KEY (a)) ENGINE=MEMORY PARTITION BY KEY(a) PARTITIONS 4; +INSERT INTO t1 VALUES (1),(2); +SET debug_dbug="+d,test_pseudo_invisible"; +--error ER_INTERNAL_ERROR +ALTER TABLE t1 REBUILD PARTITION p2; +SET debug_dbug=''; +ALTER TABLE t1 REPAIR PARTITION p1,p2,p3; + +# Cleanup +Drop table t1; +#Mdev-14850 +CREATE TABLE t1 (i INT) PARTITION BY HASH (i) PARTITIONS 3; +SET debug_dbug= "+d,test_pseudo_invisible"; +--error ER_INTERNAL_ERROR +ALTER TABLE t1 COALESCE PARTITION 1; +SET debug_dbug= ""; +SELECT * FROM t1; + +# Cleanup +DROP TABLE t1; diff --git a/mysql-test/main/limit_rows_examined.result b/mysql-test/main/limit_rows_examined.result index 7d1ca948c8b..8458e063d97 100644 --- a/mysql-test/main/limit_rows_examined.result +++ b/mysql-test/main/limit_rows_examined.result @@ -710,8 +710,6 @@ SELECT DISTINCT a FROM t1, t2 HAVING a > ' ' LIMIT ROWS EXAMINED 16; a USA CAN -Warnings: -Warning 1931 Query execution was interrupted. The query examined at least 17 rows, which exceeds LIMIT ROWS EXAMINED (16). The query result may be incomplete drop table t1,t2,t3; set @@optimizer_switch='default'; diff --git a/mysql-test/main/multi_update.result b/mysql-test/main/multi_update.result index 45239f6e090..c40de47668a 100644 --- a/mysql-test/main/multi_update.result +++ b/mysql-test/main/multi_update.result @@ -968,3 +968,75 @@ NULL 6 7 7 8 8 drop table t1, t2; +create table t1 (i int) engine=memory; +insert t1 values (1),(2); +create table t2 (f int) engine=myisam; +insert t2 values (1),(2); +explain update t1, t2 set f = 126 order by f limit 2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 +update t1, t2 set f = 126 order by f limit 2; +select * from t2; +f +126 +2 +drop table t1, t2; +create table t0(a int); +insert t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 (a int, b int, c int, key(a)); +insert t1 select a,a,a from t0; +create table t2 as select * from t1; +create table t3 as select * from t1; +select * from t1, t2 where t1.a=t2.a and t1.b in (select b from t3 where t3.c<=t2.c) order by t2.c, t1.c limit 5; +a b c a b c +0 0 0 0 0 0 +1 1 1 1 1 1 +2 2 2 2 2 2 +3 3 3 3 3 3 +4 4 4 4 4 4 +set optimizer_switch='firstmatch=off'; +explain update t1, t2 set t2.c=1 where t1.a=t2.a and t1.b in (select b from t3 where t3.c< t2.c) order by t2.c, t1.c limit 10; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 10 Using temporary; Using filesort +1 PRIMARY t1 ALL a NULL NULL NULL 10 Using where +1 PRIMARY t3 ALL NULL NULL NULL NULL 10 Using where; Start temporary; End temporary +update t1, t2 set t2.c=1 where t1.a=t2.a and t1.b in (select b from t3 where t3.c<=t2.c) order by t2.c, t1.c limit 5; +select * from t2; +a b c +0 0 1 +1 1 1 +2 2 1 +3 3 1 +4 4 1 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +set optimizer_switch=default; +drop table t0,t1,t2,t3; +create table t0 (x int); +create table t1 (a int); +create table t2 (b int, c int default 0); +insert t0 (x) values (0),(10); +insert t1 (a) values (1), (2); +insert t2 (b) values (1), (2); +create view v1 as select t2.b,t2.c from t1, t2 +where t1.a=t2.b and t2.b < 3 with check option; +select * from t0 join v1 on (x=c); +x b c +0 1 0 +0 2 0 +explain update v1,t0 set c=1 where b=1 and x=c order by x,b limit 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where; Using temporary; Using filesort +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where +1 SIMPLE t0 ALL NULL NULL NULL NULL 2 Using where +update v1,t0 set c=1 where b<3 and x=c order by x,b limit 1; +select * from v1; +b c +1 1 +2 0 +drop view v1; +drop table t0, t1,t2; diff --git a/mysql-test/main/multi_update.test b/mysql-test/main/multi_update.test index 5feebe87a5a..42e34d1e4a1 100644 --- a/mysql-test/main/multi_update.test +++ b/mysql-test/main/multi_update.test @@ -914,3 +914,49 @@ update t1 set c1=NULL; update t1, t2 set t1.c1=t2.c3 where t1.c3=t2.c3 order by t1.c3 desc limit 2; select * from t1; drop table t1, t2; + +# +# MDEV-14551 Can't find record in table on multi-table update with ORDER BY +# + +# simple test with multi-update and Using temporary: +create table t1 (i int) engine=memory; +insert t1 values (1),(2); +create table t2 (f int) engine=myisam; +insert t2 values (1),(2); +explain update t1, t2 set f = 126 order by f limit 2; +update t1, t2 set f = 126 order by f limit 2; +select * from t2; +drop table t1, t2; + +# test with DuplicateElimination +# (so that keep_current_rowid is set for DuplicateElimination too) +create table t0(a int); +insert t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 (a int, b int, c int, key(a)); +insert t1 select a,a,a from t0; +create table t2 as select * from t1; +create table t3 as select * from t1; +select * from t1, t2 where t1.a=t2.a and t1.b in (select b from t3 where t3.c<=t2.c) order by t2.c, t1.c limit 5; +set optimizer_switch='firstmatch=off'; +explain update t1, t2 set t2.c=1 where t1.a=t2.a and t1.b in (select b from t3 where t3.c< t2.c) order by t2.c, t1.c limit 10; +update t1, t2 set t2.c=1 where t1.a=t2.a and t1.b in (select b from t3 where t3.c<=t2.c) order by t2.c, t1.c limit 5; +select * from t2; +set optimizer_switch=default; +drop table t0,t1,t2,t3; + +# test WITH CHECK OPTION +create table t0 (x int); +create table t1 (a int); +create table t2 (b int, c int default 0); +insert t0 (x) values (0),(10); +insert t1 (a) values (1), (2); +insert t2 (b) values (1), (2); +create view v1 as select t2.b,t2.c from t1, t2 + where t1.a=t2.b and t2.b < 3 with check option; +select * from t0 join v1 on (x=c); +explain update v1,t0 set c=1 where b=1 and x=c order by x,b limit 1; +update v1,t0 set c=1 where b<3 and x=c order by x,b limit 1; +select * from v1; +drop view v1; +drop table t0, t1,t2; diff --git a/mysql-test/main/myisam_recover.result b/mysql-test/main/myisam_recover.result index 1af3dbf9ad5..92df67b42d1 100644 --- a/mysql-test/main/myisam_recover.result +++ b/mysql-test/main/myisam_recover.result @@ -87,7 +87,6 @@ a 6 Warnings: Error 145 Table 't1' is marked as crashed and should be repaired -Error 1194 Table 't1' is marked as crashed and should be repaired Error 1034 Number of rows changed from 3 to 6 # # Cleanup @@ -140,7 +139,6 @@ a 4 Warnings: Error 145 Table 't1' is marked as crashed and should be repaired -Error 1194 Table 't1' is marked as crashed and should be repaired Error 1034 Number of rows changed from 1 to 2 connect con2, localhost, root; ALTER TABLE t2 ADD val INT; diff --git a/mysql-test/main/mysql.test b/mysql-test/main/mysql.test index 022a24ce25e..01953ba2112 100644 --- a/mysql-test/main/mysql.test +++ b/mysql-test/main/mysql.test @@ -53,14 +53,22 @@ drop table t1; # # Bug#17939 Wrong table format when using UTF8 strings -# ---exec $MYSQL --default-character-set=utf8 --table -e "SELECT 'John Doe' as '__tañgè Ñãmé'" 2>&1 ---exec $MYSQL --default-character-set=utf8 --table -e "SELECT '__tañgè Ñãmé' as 'John Doe'" 2>&1 +write_file $MYSQL_TMP_DIR/mysql_in; +SELECT 'John Doe' as '__tañgè Ñãmé'; +SELECT '__tañgè Ñãmé' as 'John Doe'; +EOF +--exec $MYSQL --default-character-set=utf8 --table < $MYSQL_TMP_DIR/mysql_in 2>&1 +remove_file $MYSQL_TMP_DIR/mysql_in; # # Bug#18265 -- mysql client: No longer right-justifies numeric columns # ---exec $MYSQL -t --default-character-set utf8 test -e "create table t1 (i int, j int, k char(25) charset utf8); insert into t1 (i) values (1); insert into t1 (k) values ('<----------------------->'); insert into t1 (k) values ('<-----'); insert into t1 (k) values ('Τη γλώσσα'); insert into t1 (k) values ('á›–áš´ áš·á›–á›'); select * from t1; DROP TABLE t1;" +write_file $MYSQL_TMP_DIR/mysql_in; +create table t1 (i int, j int, k char(25) charset utf8); insert into t1 (i) values (1); insert into t1 (k) values ('<----------------------->'); insert into t1 (k) values ('<-----'); insert into t1 (k) values ('Τη γλώσσα'); insert into t1 (k) values ('á›–áš´ áš·á›–á›'); select * from t1; DROP TABLE t1; +EOF +--exec $MYSQL -t --default-character-set utf8 test < $MYSQL_TMP_DIR/mysql_in +remove_file $MYSQL_TMP_DIR/mysql_in; + # # "DESCRIBE" commands may return strange NULLness flags. diff --git a/mysql-test/main/mysql_client_test.result b/mysql-test/main/mysql_client_test.result index 83ef8d442b3..6f65979517b 100644 --- a/mysql-test/main/mysql_client_test.result +++ b/mysql-test/main/mysql_client_test.result @@ -122,5 +122,127 @@ EOF mysql_stmt_next_result(): 0; field_count: 0 # ------------------------------------ + +# cat MYSQL_TMP_DIR/test_explain_meta.out.log +# ------------------------------------ +SELECT number of fields: 1 +EXPALIN number of fields: 10 + - 0: name: 'id'/''; table: ''/''; db: ''; catalog: 'def'; length: 3; max_length: 0; type: 8; decimals: 0 + - 1: name: 'select_type'/''; table: ''/''; db: ''; catalog: 'def'; length: 57; max_length: 0; type: 253; decimals: 39 + - 2: name: 'table'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 3: name: 'type'/''; table: ''/''; db: ''; catalog: 'def'; length: 30; max_length: 0; type: 253; decimals: 39 + - 4: name: 'possible_keys'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39 + - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0 + - 9: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39 +EXPALIN JSON number of fields: 1 + - 0: name: 'EXPLAIN'/''; table: ''/''; db: ''; catalog: 'def'; length: 234; max_length: 0; type: 253; decimals: 39 +ANALYZE number of fields: 13 + - 0: name: 'id'/''; table: ''/''; db: ''; catalog: 'def'; length: 3; max_length: 0; type: 8; decimals: 0 + - 1: name: 'select_type'/''; table: ''/''; db: ''; catalog: 'def'; length: 57; max_length: 0; type: 253; decimals: 39 + - 2: name: 'table'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 3: name: 'type'/''; table: ''/''; db: ''; catalog: 'def'; length: 30; max_length: 0; type: 253; decimals: 39 + - 4: name: 'possible_keys'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39 + - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0 + - 9: name: 'r_rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 10 + - 10: name: 'filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2 + - 11: name: 'r_filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2 + - 12: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39 +ANALYZE JSON number of fields: 1 + - 0: name: 'ANALYZE'/''; table: ''/''; db: ''; catalog: 'def'; length: 234; max_length: 0; type: 253; decimals: 39 +EXPALIN INSERT number of fields: 10 + - 0: name: 'id'/''; table: ''/''; db: ''; catalog: 'def'; length: 3; max_length: 0; type: 8; decimals: 0 + - 1: name: 'select_type'/''; table: ''/''; db: ''; catalog: 'def'; length: 57; max_length: 0; type: 253; decimals: 39 + - 2: name: 'table'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 3: name: 'type'/''; table: ''/''; db: ''; catalog: 'def'; length: 30; max_length: 0; type: 253; decimals: 39 + - 4: name: 'possible_keys'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39 + - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0 + - 9: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39 +EXPALIN JSON INSERT number of fields: 1 + - 0: name: 'EXPLAIN'/''; table: ''/''; db: ''; catalog: 'def'; length: 234; max_length: 0; type: 253; decimals: 39 +ANALYZE INSERT number of fields: 13 + - 0: name: 'id'/''; table: ''/''; db: ''; catalog: 'def'; length: 3; max_length: 0; type: 8; decimals: 0 + - 1: name: 'select_type'/''; table: ''/''; db: ''; catalog: 'def'; length: 57; max_length: 0; type: 253; decimals: 39 + - 2: name: 'table'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 3: name: 'type'/''; table: ''/''; db: ''; catalog: 'def'; length: 30; max_length: 0; type: 253; decimals: 39 + - 4: name: 'possible_keys'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39 + - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0 + - 9: name: 'r_rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 10 + - 10: name: 'filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2 + - 11: name: 'r_filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2 + - 12: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39 +ANALYZE JSON INSERT number of fields: 1 + - 0: name: 'ANALYZE'/''; table: ''/''; db: ''; catalog: 'def'; length: 234; max_length: 0; type: 253; decimals: 39 +EXPALIN UPDATE number of fields: 10 + - 0: name: 'id'/''; table: ''/''; db: ''; catalog: 'def'; length: 3; max_length: 0; type: 8; decimals: 0 + - 1: name: 'select_type'/''; table: ''/''; db: ''; catalog: 'def'; length: 57; max_length: 0; type: 253; decimals: 39 + - 2: name: 'table'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 3: name: 'type'/''; table: ''/''; db: ''; catalog: 'def'; length: 30; max_length: 0; type: 253; decimals: 39 + - 4: name: 'possible_keys'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39 + - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0 + - 9: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39 +EXPALIN JSON UPDATE number of fields: 1 + - 0: name: 'EXPLAIN'/''; table: ''/''; db: ''; catalog: 'def'; length: 234; max_length: 0; type: 253; decimals: 39 +ANALYZE UPDATE number of fields: 13 + - 0: name: 'id'/''; table: ''/''; db: ''; catalog: 'def'; length: 3; max_length: 0; type: 8; decimals: 0 + - 1: name: 'select_type'/''; table: ''/''; db: ''; catalog: 'def'; length: 57; max_length: 0; type: 253; decimals: 39 + - 2: name: 'table'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 3: name: 'type'/''; table: ''/''; db: ''; catalog: 'def'; length: 30; max_length: 0; type: 253; decimals: 39 + - 4: name: 'possible_keys'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39 + - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0 + - 9: name: 'r_rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 10 + - 10: name: 'filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2 + - 11: name: 'r_filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2 + - 12: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39 +ANALYZE JSON UPDATE number of fields: 1 + - 0: name: 'ANALYZE'/''; table: ''/''; db: ''; catalog: 'def'; length: 234; max_length: 0; type: 253; decimals: 39 +EXPALIN DELETE number of fields: 10 + - 0: name: 'id'/''; table: ''/''; db: ''; catalog: 'def'; length: 3; max_length: 0; type: 8; decimals: 0 + - 1: name: 'select_type'/''; table: ''/''; db: ''; catalog: 'def'; length: 57; max_length: 0; type: 253; decimals: 39 + - 2: name: 'table'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 3: name: 'type'/''; table: ''/''; db: ''; catalog: 'def'; length: 30; max_length: 0; type: 253; decimals: 39 + - 4: name: 'possible_keys'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39 + - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0 + - 9: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39 +EXPALIN JSON DELETE number of fields: 1 + - 0: name: 'EXPLAIN'/''; table: ''/''; db: ''; catalog: 'def'; length: 234; max_length: 0; type: 253; decimals: 39 +ANALYZE DELETE number of fields: 13 + - 0: name: 'id'/''; table: ''/''; db: ''; catalog: 'def'; length: 3; max_length: 0; type: 8; decimals: 0 + - 1: name: 'select_type'/''; table: ''/''; db: ''; catalog: 'def'; length: 57; max_length: 0; type: 253; decimals: 39 + - 2: name: 'table'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 3: name: 'type'/''; table: ''/''; db: ''; catalog: 'def'; length: 30; max_length: 0; type: 253; decimals: 39 + - 4: name: 'possible_keys'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39 + - 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39 + - 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39 + - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0 + - 9: name: 'r_rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 10 + - 10: name: 'filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2 + - 11: name: 'r_filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2 + - 12: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39 +ANALYZE JSON DELETE number of fields: 1 + - 0: name: 'ANALYZE'/''; table: ''/''; db: ''; catalog: 'def'; length: 234; max_length: 0; type: 253; decimals: 39 +# ------------------------------------ + SET @@global.general_log= @old_general_log; SET @@global.slow_query_log= @old_slow_query_log; diff --git a/mysql-test/main/mysql_client_test.test b/mysql-test/main/mysql_client_test.test index 260473aa0d0..2c6febd8ffa 100644 --- a/mysql-test/main/mysql_client_test.test +++ b/mysql-test/main/mysql_client_test.test @@ -29,5 +29,12 @@ echo ok; --echo # ------------------------------------ --echo +--echo +--echo # cat MYSQL_TMP_DIR/test_explain_meta.out.log +--echo # ------------------------------------ +--cat_file $MYSQL_TMP_DIR/test_explain_meta.out.log +--echo # ------------------------------------ +--echo + SET @@global.general_log= @old_general_log; SET @@global.slow_query_log= @old_slow_query_log; diff --git a/mysql-test/main/mysql_cp932.test b/mysql-test/main/mysql_cp932.test index 60a129c3805..8fba5750d89 100644 --- a/mysql-test/main/mysql_cp932.test +++ b/mysql-test/main/mysql_cp932.test @@ -10,13 +10,43 @@ # BUG#16217 - MySQL client misinterprets multi-byte char as escape `\' # +let $mysql_in= $MYSQL_TMP_DIR/mysql_in; + # new command \C or charset ---exec $MYSQL --default-character-set=utf8 test -e "\C cp932 \g" ---exec $MYSQL --default-character-set=cp932 test -e "charset utf8;" +write_file $mysql_in; +\C cp932 \g +EOF +--exec $MYSQL --default-character-set=utf8 test < $mysql_in +remove_file $mysql_in; + +write_file $mysql_in; +charset utf8; +EOF +--exec $MYSQL --default-character-set=cp932 test < $mysql_in +remove_file $mysql_in; # its usage to switch internally in mysql to requested charset ---exec $MYSQL --default-character-set=utf8 test -e "charset cp932; select 'ƒ\'; create table t1 (c_cp932 TEXT CHARACTER SET cp932); insert into t1 values('ƒ\'); select * from t1; drop table t1;" ---exec $MYSQL --default-character-set=utf8 test -e "charset cp932; select 'ƒ\'" ---exec $MYSQL --default-character-set=utf8 test -e "/*charset cp932 */; set character_set_client= cp932; select 'ƒ\'" ---exec $MYSQL --default-character-set=utf8 test -e "/*!\C cp932 */; set character_set_client= cp932; select 'ƒ\'" +write_file $mysql_in; +charset cp932; select 'ƒ\'; create table t1 (c_cp932 TEXT CHARACTER SET cp932); insert into t1 values('ƒ\'); select * from t1; drop table t1; +EOF +--exec $MYSQL --default-character-set=utf8 test < $mysql_in +remove_file $mysql_in; + +write_file $mysql_in; +charset cp932; select 'ƒ\' +EOF +--exec $MYSQL --default-character-set=utf8 test < $mysql_in +remove_file $mysql_in; + +write_file $mysql_in; +/*charset cp932 */; set character_set_client= cp932; select 'ƒ\' +EOF +--exec $MYSQL --default-character-set=utf8 test < $mysql_in +remove_file $mysql_in; + +write_file $mysql_in; +/*!\C cp932 */; set character_set_client= cp932; select 'ƒ\' +EOF +--exec $MYSQL --default-character-set=utf8 test < $mysql_in +remove_file $mysql_in; diff --git a/mysql-test/main/mysqld--help.result b/mysql-test/main/mysqld--help.result index 3ab9b5a04e8..6dcf47cac96 100644 --- a/mysql-test/main/mysqld--help.result +++ b/mysql-test/main/mysqld--help.result @@ -2,14 +2,19 @@ Windows bug: happens when a new line is exactly at the right offset. The following options may be given as the first argument: --print-defaults Print the program argument list and exit. --no-defaults Don't read default options from any option file. +The following specify which files/extra groups are read (specified before remaining options): --defaults-file=# Only read default options from the given file #. --defaults-extra-file=# Read this file after the global files are read. +--defaults-group-suffix=# Additionally read default groups with # appended as a suffix. --allow-suspicious-udfs Allows use of UDFs consisting of only one symbol xxx() without corresponding xxx_init() or xxx_deinit(). That also means that one can load any function from any library, for example exit() from libc.so + --alter-algorithm[=name] + Specify the alter table algorithm. One of: DEFAULT, COPY, + INPLACE, NOCOPY, INSTANT -a, --ansi Use ANSI SQL syntax instead of MySQL syntax. This mode will also set transaction isolation level 'serializable'. --auto-increment-increment[=#] @@ -615,7 +620,10 @@ The following options may be given as the first argument: connection before aborting the write --old Use compatible behavior from previous MariaDB version. See also --old-mode - --old-alter-table Use old, non-optimized alter table + --old-alter-table[=name] + Alias for alter_algorithm. Deprecated. Use + --alter-algorithm instead.. One of: DEFAULT, COPY, + INPLACE, NOCOPY, INSTANT --old-mode=name Used to emulate old behavior from earlier MariaDB or MySQL versions. Any combination of: NO_DUP_KEY_WARNINGS_WITH_IGNORE, NO_PROGRESS_INFO, @@ -1031,6 +1039,14 @@ The following options may be given as the first argument: --secure-file-priv=name Limit LOAD DATA, SELECT ... OUTFILE, and LOAD_FILE() to files within specified directory + --secure-timestamp=name + Restricts direct setting of a session timestamp. Possible + levels are: YES - timestamp cannot deviate from the + system clock, REPLICATION - replication thread can adjust + timestamp to match the master's, SUPER - a user with this + privilege and a replication thread can adjust timestamp, + NO - historical behavior, anyone can modify session + timestamp --server-id=# Uniquely identifies the server instance in the community of replication partners --session-track-schema @@ -1223,7 +1239,7 @@ The following options may be given as the first argument: --system-versioning-alter-history=name Versioning ALTER TABLE mode. ERROR: Fail ALTER with error; KEEP: Keep historical system rows and subject them - to ALTER; + to ALTER --table-cache=# Deprecated; use --table-open-cache instead. --table-definition-cache=# The number of cached table definitions @@ -1333,6 +1349,7 @@ The following options may be given as the first argument: Variables (--variable-name=value) allow-suspicious-udfs FALSE +alter-algorithm DEFAULT auto-increment-increment 1 auto-increment-offset 1 autocommit TRUE @@ -1508,7 +1525,7 @@ net-read-timeout 30 net-retry-count 10 net-write-timeout 60 old FALSE -old-alter-table FALSE +old-alter-table DEFAULT old-mode old-passwords FALSE old-style-user-limits FALSE @@ -1608,6 +1625,7 @@ rpl-semi-sync-slave-trace-level 32 safe-user-create FALSE secure-auth TRUE secure-file-priv (No default value) +secure-timestamp NO server-id 1 session-track-schema TRUE session-track-state-change FALSE diff --git a/mysql-test/main/mysqldump.result b/mysql-test/main/mysqldump.result index a1e206dffd7..620afb32666 100644 --- a/mysql-test/main/mysqldump.result +++ b/mysql-test/main/mysqldump.result @@ -4478,6 +4478,14 @@ TRUNCATE mysql.event; SHOW EVENTS; Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation # +# MDEV-13336: add ignore-database option +# with --all-databases +# +DROP DATABASE test; +SHOW DATABASES LIKE 'test'; +Database (test) +CREATE DATABASE test; +# # Bug#31113 mysqldump 5.1 can't handle a dash ("-") in database names # create database `test-database`; diff --git a/mysql-test/main/mysqldump.test b/mysql-test/main/mysqldump.test index 6dde5aa7bc6..ebe54bac44a 100644 --- a/mysql-test/main/mysqldump.test +++ b/mysql-test/main/mysqldump.test @@ -1926,6 +1926,15 @@ TRUNCATE mysql.event; SHOW EVENTS; --remove_file $MYSQLTEST_VARDIR/tmp/bug29938.sql +--echo # +--echo # MDEV-13336: add ignore-database option +--echo # with --all-databases +--echo # +--exec $MYSQL_DUMP --ignore-database test --all-databases > $MYSQLTEST_VARDIR/tmp/mysqldump-MDEV-13336.sql +DROP DATABASE test; +--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/mysqldump-MDEV-13336.sql +SHOW DATABASES LIKE 'test'; +CREATE DATABASE test; --echo # --echo # Bug#31113 mysqldump 5.1 can't handle a dash ("-") in database names diff --git a/mysql-test/main/olap.result b/mysql-test/main/olap.result index bcc96d4951d..6fdbe008016 100644 --- a/mysql-test/main/olap.result +++ b/mysql-test/main/olap.result @@ -816,3 +816,17 @@ a int(11) YES 0 b int(20) YES 0 DROP VIEW v1; DROP TABLE t1; +# +# MDEV-15576: Server crashed in Cached_item_str::cmp / sortcmp or +# Assertion `item->null_value' failed in +# Type_handler_temporal_result::make_sort_key upon SELECT with NULLIF +# and ROLLUP +# +CREATE TABLE t1 (i INT); +INSERT INTO t1 VALUES (1),(2); +SELECT NULLIF( CAST( 'foo' AS DATE ), NULL & 'bar' ) AS f FROM t1 GROUP BY f WITH ROLLUP; +f +NULL +NULL +DROP TABLE t1; +# End of 10.3 Tests diff --git a/mysql-test/main/olap.test b/mysql-test/main/olap.test index 3da08581a87..74dbe8ba10b 100644 --- a/mysql-test/main/olap.test +++ b/mysql-test/main/olap.test @@ -447,3 +447,20 @@ DESC v1; DROP VIEW v1; DROP TABLE t1; + +--echo # +--echo # MDEV-15576: Server crashed in Cached_item_str::cmp / sortcmp or +--echo # Assertion `item->null_value' failed in +--echo # Type_handler_temporal_result::make_sort_key upon SELECT with NULLIF +--echo # and ROLLUP +--echo # + +CREATE TABLE t1 (i INT); +INSERT INTO t1 VALUES (1),(2); +--disable_warnings +SELECT NULLIF( CAST( 'foo' AS DATE ), NULL & 'bar' ) AS f FROM t1 GROUP BY f WITH ROLLUP; +--enable_warnings +DROP TABLE t1; + + +--echo # End of 10.3 Tests diff --git a/mysql-test/main/parser.result b/mysql-test/main/parser.result index a1c6e86a129..2394c958b47 100644 --- a/mysql-test/main/parser.result +++ b/mysql-test/main/parser.result @@ -1349,3 +1349,334 @@ SET GLOBAL a=10; END; $$ ERROR HY000: Unknown system variable 'a' +# +# MDEV-16202 Latest changes made erroneously some keywords reserved in sql_mode=ORACLE +# +CREATE PROCEDURE p1(name VARCHAR(64), pattern TEXT) +BEGIN +DECLARE query TEXT DEFAULT REPLACE(pattern, 'name', name); +DECLARE CONTINUE HANDLER FOR SQLEXCEPTION +BEGIN +SHOW ERRORS; +END; +SELECT query AS ''; +EXECUTE IMMEDIATE query; +END; +$$ +CREATE PROCEDURE p2(name VARCHAR(64)) +BEGIN +CALL p1(name, 'BEGIN NOT ATOMIC DECLARE name INT; SET name=10; SELECT name; END'); +EXECUTE IMMEDIATE REPLACE('CREATE TABLE t1 (name INT)', 'name', name); +CALL p1(name, 'SELECT name FROM t1'); +CALL p1(name, 'SELECT name ''alias'' FROM t1'); +CALL p1(name, 'SELECT name()'); +CALL p1(name, 'SELECT name.name()'); +CALL p1(name, 'SELECT name DATE FROM t1'); +CALL p1(name, 'SELECT name HISTORY FROM t1'); +CALL p1(name, 'SELECT name NEXT FROM t1'); +CALL p1(name, 'SELECT name PERIOD FROM t1'); +CALL p1(name, 'SELECT name PREVIOUS FROM t1'); +CALL p1(name, 'SELECT name SYSTEM FROM t1'); +CALL p1(name, 'SELECT name SYSTEM_TIME FROM t1'); +CALL p1(name, 'SELECT name TIME FROM t1'); +CALL p1(name, 'SELECT name TIMESTAMP FROM t1'); +CALL p1(name, 'SELECT name TRANSACTION FROM t1'); +CALL p1(name, 'SELECT name VALUE FROM t1'); +CALL p1(name, 'SELECT name VERSIONING FROM t1'); +CALL p1(name, 'SELECT name WITHOUT FROM t1'); +DROP TABLE t1; +END; +$$ +CALL p2('date'); +BEGIN NOT ATOMIC DECLARE date INT; SET date=10; SELECT date; END +10 +SELECT date FROM t1 +SELECT date 'alias' FROM t1 +Error 1525 Incorrect DATE value: 'alias' +SELECT date() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1 +SELECT date.date() +Error 1630 FUNCTION date.date does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT date DATE FROM t1 +SELECT date HISTORY FROM t1 +SELECT date NEXT FROM t1 +SELECT date PERIOD FROM t1 +SELECT date PREVIOUS FROM t1 +SELECT date SYSTEM FROM t1 +SELECT date SYSTEM_TIME FROM t1 +SELECT date TIME FROM t1 +SELECT date TIMESTAMP FROM t1 +SELECT date TRANSACTION FROM t1 +SELECT date VALUE FROM t1 +SELECT date VERSIONING FROM t1 +SELECT date WITHOUT FROM t1 +CALL p2('history'); +BEGIN NOT ATOMIC DECLARE history INT; SET history=10; SELECT history; END +10 +SELECT history FROM t1 +SELECT history 'alias' FROM t1 +SELECT history() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT history.history() +Error 1630 FUNCTION history.history does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT history DATE FROM t1 +SELECT history HISTORY FROM t1 +SELECT history NEXT FROM t1 +SELECT history PERIOD FROM t1 +SELECT history PREVIOUS FROM t1 +SELECT history SYSTEM FROM t1 +SELECT history SYSTEM_TIME FROM t1 +SELECT history TIME FROM t1 +SELECT history TIMESTAMP FROM t1 +SELECT history TRANSACTION FROM t1 +SELECT history VALUE FROM t1 +SELECT history VERSIONING FROM t1 +SELECT history WITHOUT FROM t1 +CALL p2('next'); +BEGIN NOT ATOMIC DECLARE next INT; SET next=10; SELECT next; END +10 +SELECT next FROM t1 +SELECT next 'alias' FROM t1 +SELECT next() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT next.next() +Error 1630 FUNCTION next.next does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT next DATE FROM t1 +SELECT next HISTORY FROM t1 +SELECT next NEXT FROM t1 +SELECT next PERIOD FROM t1 +SELECT next PREVIOUS FROM t1 +SELECT next SYSTEM FROM t1 +SELECT next SYSTEM_TIME FROM t1 +SELECT next TIME FROM t1 +SELECT next TIMESTAMP FROM t1 +SELECT next TRANSACTION FROM t1 +SELECT next VALUE FROM t1 +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'FROM t1' at line 1 +SELECT next VERSIONING FROM t1 +SELECT next WITHOUT FROM t1 +CALL p2('period'); +BEGIN NOT ATOMIC DECLARE period INT; SET period=10; SELECT period; END +10 +SELECT period FROM t1 +SELECT period 'alias' FROM t1 +SELECT period() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT period.period() +Error 1630 FUNCTION period.period does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT period DATE FROM t1 +SELECT period HISTORY FROM t1 +SELECT period NEXT FROM t1 +SELECT period PERIOD FROM t1 +SELECT period PREVIOUS FROM t1 +SELECT period SYSTEM FROM t1 +SELECT period SYSTEM_TIME FROM t1 +SELECT period TIME FROM t1 +SELECT period TIMESTAMP FROM t1 +SELECT period TRANSACTION FROM t1 +SELECT period VALUE FROM t1 +SELECT period VERSIONING FROM t1 +SELECT period WITHOUT FROM t1 +CALL p2('previous'); +BEGIN NOT ATOMIC DECLARE previous INT; SET previous=10; SELECT previous; END +10 +SELECT previous FROM t1 +SELECT previous 'alias' FROM t1 +SELECT previous() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT previous.previous() +Error 1630 FUNCTION previous.previous does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT previous DATE FROM t1 +SELECT previous HISTORY FROM t1 +SELECT previous NEXT FROM t1 +SELECT previous PERIOD FROM t1 +SELECT previous PREVIOUS FROM t1 +SELECT previous SYSTEM FROM t1 +SELECT previous SYSTEM_TIME FROM t1 +SELECT previous TIME FROM t1 +SELECT previous TIMESTAMP FROM t1 +SELECT previous TRANSACTION FROM t1 +SELECT previous VALUE FROM t1 +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'FROM t1' at line 1 +SELECT previous VERSIONING FROM t1 +SELECT previous WITHOUT FROM t1 +CALL p2('system'); +BEGIN NOT ATOMIC DECLARE system INT; SET system=10; SELECT system; END +10 +SELECT system FROM t1 +SELECT system 'alias' FROM t1 +SELECT system() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT system.system() +Error 1630 FUNCTION system.system does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT system DATE FROM t1 +SELECT system HISTORY FROM t1 +SELECT system NEXT FROM t1 +SELECT system PERIOD FROM t1 +SELECT system PREVIOUS FROM t1 +SELECT system SYSTEM FROM t1 +SELECT system SYSTEM_TIME FROM t1 +SELECT system TIME FROM t1 +SELECT system TIMESTAMP FROM t1 +SELECT system TRANSACTION FROM t1 +SELECT system VALUE FROM t1 +SELECT system VERSIONING FROM t1 +SELECT system WITHOUT FROM t1 +CALL p2('system_time'); +BEGIN NOT ATOMIC DECLARE system_time INT; SET system_time=10; SELECT system_time; END +10 +SELECT system_time FROM t1 +SELECT system_time 'alias' FROM t1 +SELECT system_time() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT system_time.system_time() +Error 1630 FUNCTION system_time.system_time does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT system_time DATE FROM t1 +SELECT system_time HISTORY FROM t1 +SELECT system_time NEXT FROM t1 +SELECT system_time PERIOD FROM t1 +SELECT system_time PREVIOUS FROM t1 +SELECT system_time SYSTEM FROM t1 +SELECT system_time SYSTEM_TIME FROM t1 +SELECT system_time TIME FROM t1 +SELECT system_time TIMESTAMP FROM t1 +SELECT system_time TRANSACTION FROM t1 +SELECT system_time VALUE FROM t1 +SELECT system_time VERSIONING FROM t1 +SELECT system_time WITHOUT FROM t1 +CALL p2('time'); +BEGIN NOT ATOMIC DECLARE time INT; SET time=10; SELECT time; END +10 +SELECT time FROM t1 +SELECT time 'alias' FROM t1 +Error 1525 Incorrect TIME value: 'alias' +SELECT time() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1 +SELECT time.time() +Error 1630 FUNCTION time.time does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT time DATE FROM t1 +SELECT time HISTORY FROM t1 +SELECT time NEXT FROM t1 +SELECT time PERIOD FROM t1 +SELECT time PREVIOUS FROM t1 +SELECT time SYSTEM FROM t1 +SELECT time SYSTEM_TIME FROM t1 +SELECT time TIME FROM t1 +SELECT time TIMESTAMP FROM t1 +SELECT time TRANSACTION FROM t1 +SELECT time VALUE FROM t1 +SELECT time VERSIONING FROM t1 +SELECT time WITHOUT FROM t1 +CALL p2('timestamp'); +BEGIN NOT ATOMIC DECLARE timestamp INT; SET timestamp=10; SELECT timestamp; END +10 +SELECT timestamp FROM t1 +SELECT timestamp 'alias' FROM t1 +Error 1525 Incorrect DATETIME value: 'alias' +SELECT timestamp() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1 +SELECT timestamp.timestamp() +Error 1630 FUNCTION timestamp.timestamp does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT timestamp DATE FROM t1 +SELECT timestamp HISTORY FROM t1 +SELECT timestamp NEXT FROM t1 +SELECT timestamp PERIOD FROM t1 +SELECT timestamp PREVIOUS FROM t1 +SELECT timestamp SYSTEM FROM t1 +SELECT timestamp SYSTEM_TIME FROM t1 +SELECT timestamp TIME FROM t1 +SELECT timestamp TIMESTAMP FROM t1 +SELECT timestamp TRANSACTION FROM t1 +SELECT timestamp VALUE FROM t1 +SELECT timestamp VERSIONING FROM t1 +SELECT timestamp WITHOUT FROM t1 +CALL p2('transaction'); +BEGIN NOT ATOMIC DECLARE transaction INT; SET transaction=10; SELECT transaction; END +10 +SELECT transaction FROM t1 +SELECT transaction 'alias' FROM t1 +SELECT transaction() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT transaction.transaction() +Error 1630 FUNCTION transaction.transaction does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT transaction DATE FROM t1 +SELECT transaction HISTORY FROM t1 +SELECT transaction NEXT FROM t1 +SELECT transaction PERIOD FROM t1 +SELECT transaction PREVIOUS FROM t1 +SELECT transaction SYSTEM FROM t1 +SELECT transaction SYSTEM_TIME FROM t1 +SELECT transaction TIME FROM t1 +SELECT transaction TIMESTAMP FROM t1 +SELECT transaction TRANSACTION FROM t1 +SELECT transaction VALUE FROM t1 +SELECT transaction VERSIONING FROM t1 +SELECT transaction WITHOUT FROM t1 +CALL p2('value'); +BEGIN NOT ATOMIC DECLARE value INT; SET value=10; SELECT value; END +10 +SELECT value FROM t1 +SELECT value 'alias' FROM t1 +SELECT value() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1 +SELECT value.value() +Error 1630 FUNCTION value.value does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT value DATE FROM t1 +SELECT value HISTORY FROM t1 +SELECT value NEXT FROM t1 +SELECT value PERIOD FROM t1 +SELECT value PREVIOUS FROM t1 +SELECT value SYSTEM FROM t1 +SELECT value SYSTEM_TIME FROM t1 +SELECT value TIME FROM t1 +SELECT value TIMESTAMP FROM t1 +SELECT value TRANSACTION FROM t1 +SELECT value VALUE FROM t1 +SELECT value VERSIONING FROM t1 +SELECT value WITHOUT FROM t1 +CALL p2('versioning'); +BEGIN NOT ATOMIC DECLARE versioning INT; SET versioning=10; SELECT versioning; END +10 +SELECT versioning FROM t1 +SELECT versioning 'alias' FROM t1 +SELECT versioning() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT versioning.versioning() +Error 1630 FUNCTION versioning.versioning does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT versioning DATE FROM t1 +SELECT versioning HISTORY FROM t1 +SELECT versioning NEXT FROM t1 +SELECT versioning PERIOD FROM t1 +SELECT versioning PREVIOUS FROM t1 +SELECT versioning SYSTEM FROM t1 +SELECT versioning SYSTEM_TIME FROM t1 +SELECT versioning TIME FROM t1 +SELECT versioning TIMESTAMP FROM t1 +SELECT versioning TRANSACTION FROM t1 +SELECT versioning VALUE FROM t1 +SELECT versioning VERSIONING FROM t1 +SELECT versioning WITHOUT FROM t1 +CALL p2('without'); +BEGIN NOT ATOMIC DECLARE without INT; SET without=10; SELECT without; END +10 +SELECT without FROM t1 +SELECT without 'alias' FROM t1 +SELECT without() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT without.without() +Error 1630 FUNCTION without.without does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT without DATE FROM t1 +SELECT without HISTORY FROM t1 +SELECT without NEXT FROM t1 +SELECT without PERIOD FROM t1 +SELECT without PREVIOUS FROM t1 +SELECT without SYSTEM FROM t1 +SELECT without SYSTEM_TIME FROM t1 +SELECT without TIME FROM t1 +SELECT without TIMESTAMP FROM t1 +SELECT without TRANSACTION FROM t1 +SELECT without VALUE FROM t1 +SELECT without VERSIONING FROM t1 +SELECT without WITHOUT FROM t1 +DROP PROCEDURE p2; +DROP PROCEDURE p1; diff --git a/mysql-test/main/parser.test b/mysql-test/main/parser.test index 1f176c6afc5..8faab613a0c 100644 --- a/mysql-test/main/parser.test +++ b/mysql-test/main/parser.test @@ -1379,3 +1379,67 @@ BEGIN NOT ATOMIC END; $$ DELIMITER ;$$ + + +--echo # +--echo # MDEV-16202 Latest changes made erroneously some keywords reserved in sql_mode=ORACLE +--echo # + + +DELIMITER $$; +CREATE PROCEDURE p1(name VARCHAR(64), pattern TEXT) +BEGIN + DECLARE query TEXT DEFAULT REPLACE(pattern, 'name', name); + DECLARE CONTINUE HANDLER FOR SQLEXCEPTION + BEGIN + SHOW ERRORS; + END; + SELECT query AS ''; + EXECUTE IMMEDIATE query; +END; +$$ + +CREATE PROCEDURE p2(name VARCHAR(64)) +BEGIN + CALL p1(name, 'BEGIN NOT ATOMIC DECLARE name INT; SET name=10; SELECT name; END'); + EXECUTE IMMEDIATE REPLACE('CREATE TABLE t1 (name INT)', 'name', name); + CALL p1(name, 'SELECT name FROM t1'); + CALL p1(name, 'SELECT name ''alias'' FROM t1'); + CALL p1(name, 'SELECT name()'); + CALL p1(name, 'SELECT name.name()'); + CALL p1(name, 'SELECT name DATE FROM t1'); + CALL p1(name, 'SELECT name HISTORY FROM t1'); + CALL p1(name, 'SELECT name NEXT FROM t1'); + CALL p1(name, 'SELECT name PERIOD FROM t1'); + CALL p1(name, 'SELECT name PREVIOUS FROM t1'); + CALL p1(name, 'SELECT name SYSTEM FROM t1'); + CALL p1(name, 'SELECT name SYSTEM_TIME FROM t1'); + CALL p1(name, 'SELECT name TIME FROM t1'); + CALL p1(name, 'SELECT name TIMESTAMP FROM t1'); + CALL p1(name, 'SELECT name TRANSACTION FROM t1'); + CALL p1(name, 'SELECT name VALUE FROM t1'); + CALL p1(name, 'SELECT name VERSIONING FROM t1'); + CALL p1(name, 'SELECT name WITHOUT FROM t1'); + DROP TABLE t1; +END; +$$ +DELIMITER ;$$ + +--disable_column_names +CALL p2('date'); +CALL p2('history'); +CALL p2('next'); +CALL p2('period'); +CALL p2('previous'); +CALL p2('system'); +CALL p2('system_time'); +CALL p2('time'); +CALL p2('timestamp'); +CALL p2('transaction'); +CALL p2('value'); +CALL p2('versioning'); +CALL p2('without'); +--enable_column_names + +DROP PROCEDURE p2; +DROP PROCEDURE p1; diff --git a/mysql-test/main/partition_alter.result b/mysql-test/main/partition_alter.result index e42604ba056..448c26c7919 100644 --- a/mysql-test/main/partition_alter.result +++ b/mysql-test/main/partition_alter.result @@ -76,6 +76,7 @@ t1 CREATE TABLE `t1` ( insert t1 values (2, '2020-01-03', 20); ERROR 23000: CONSTRAINT `CONSTRAINT_1` failed for `test`.`t1` drop table t1; +db.opt create table t1(id int, d date not null, b bool not null default 0, primary key(id,d)) partition by range columns (d) ( partition p1 values less than ('2016-10-18'), @@ -100,6 +101,7 @@ t1 CREATE TABLE `t1` ( insert t1 values (2, '2020-01-03', 20); ERROR 23000: CONSTRAINT `CONSTRAINT_1` failed for `test`.`t1` drop table t1; +db.opt create table t1 (id_1 int auto_increment, id_2 int, id_3 int, d1 date, dt1 datetime default current_timestamp, dt2 datetime default current_timestamp on update current_timestamp, primary key (id_2, id_3), key(id_1)) partition by hash(id_2) partitions 3 (partition p01, partition p02, partition p03); insert into t1 values(0, 1, 1, NULL, now(), now()); alter online table t1 delay_key_write=1; diff --git a/mysql-test/main/partition_exchange.result b/mysql-test/main/partition_exchange.result index 3bce5aec8e2..b27da5b9b8c 100644 --- a/mysql-test/main/partition_exchange.result +++ b/mysql-test/main/partition_exchange.result @@ -1297,3 +1297,14 @@ SHOW WARNINGS; Level Code Message UNLOCK TABLES; DROP TABLE t, t2, tp; +# +# Assertion `!part_elem->tablespace_name && !table_create_info->tablespace' +# failed during EXCHANGE PARTITION with different TABLESPACE. +# +CREATE TABLE t1 (a VARCHAR(200)) PARTITION BY KEY(a) partitions 10; +ALTER TABLE t1 ADD PARTITION (PARTITION pm TABLESPACE = `innodb_file_per_table`); +CREATE TABLE t2 like t1; +ALTER TABLE t2 REMOVE PARTITIONING; +ALTER TABLE t1 EXCHANGE PARTITION pm WITH TABLE t2; +ERROR HY000: Non matching attribute 'TABLESPACE' between partition and table +DROP TABLE t1, t2; diff --git a/mysql-test/main/partition_exchange.test b/mysql-test/main/partition_exchange.test index 8dc40da0caa..cb33b8dd857 100644 --- a/mysql-test/main/partition_exchange.test +++ b/mysql-test/main/partition_exchange.test @@ -523,3 +523,16 @@ UNLOCK TABLES; dec $count; } DROP TABLE t, t2, tp; + +--echo # +--echo # Assertion `!part_elem->tablespace_name && !table_create_info->tablespace' +--echo # failed during EXCHANGE PARTITION with different TABLESPACE. +--echo # +CREATE TABLE t1 (a VARCHAR(200)) PARTITION BY KEY(a) partitions 10; +ALTER TABLE t1 ADD PARTITION (PARTITION pm TABLESPACE = `innodb_file_per_table`); +CREATE TABLE t2 like t1; +ALTER TABLE t2 REMOVE PARTITIONING; +--error ER_PARTITION_EXCHANGE_DIFFERENT_OPTION +ALTER TABLE t1 EXCHANGE PARTITION pm WITH TABLE t2; +DROP TABLE t1, t2; + diff --git a/mysql-test/main/partition_innodb.test b/mysql-test/main/partition_innodb.test index 7b5a69fe622..49430414913 100644 --- a/mysql-test/main/partition_innodb.test +++ b/mysql-test/main/partition_innodb.test @@ -1,7 +1,3 @@ -if (`select plugin_auth_version < "5.6.25" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB as of 5.6.24 or earlier -} --source include/not_embedded.inc --source include/have_partition.inc --source include/have_innodb.inc diff --git a/mysql-test/main/partition_list.result b/mysql-test/main/partition_list.result index 91c1c106cea..f4cd1c9da43 100644 --- a/mysql-test/main/partition_list.result +++ b/mysql-test/main/partition_list.result @@ -334,3 +334,213 @@ f 1 drop table t1; #end of 10.2 tests +# +# Bug MDEV-16101: More than MAX_REF_PARTS values in a list on ALTER TABLE. +# Currently MAX_REF_PARTS = 32. +CREATE TABLE ts1 (a INT, PRIMARY KEY (`a`)) +PARTITION BY LIST (`a`) +(PARTITION `p ts_0` VALUES IN (101,102,103,104,105,106,107,108,109,110, +111,112,113,114,115,116,117,118,119,120, +121,122,123,124,125,126,127,128,129,130, +131,132,133)); +INSERT INTO ts1 +VALUES (101), (102), (103), (104), (105), (106), (107), (108), (109), (110), +(111), (112), (113), (114), (115), (116), (117), (118), (119), (120), +(121), (122), (123), (124), (125), (126), (127), (128), (129), (130), +(131), (132), (133); +INSERT INTO ts1 VALUES (134); +ERROR HY000: Table has no partition for value 134 +SELECT * FROM ts1; +a +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +ALTER TABLE ts1 ADD PARTITION +(PARTITION `p ts_1` VALUES IN (1,2,3,4,5,6,7,8,9,10, +11,12,13,14,15,16,17,18,19,20, +21,22,23,24,25,26,27,28,29,30, +31,32,33)); +INSERT INTO ts1 +VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), +(11), (12), (13), (14), (15), (16), (17), (18), (19), (20), +(21), (22), (23), (24), (25), (26), (27), (28), (29), (30), +(31), (32), (33); +INSERT INTO ts1 VALUES(34); +ERROR HY000: Table has no partition for value 34 +SELECT * FROM ts1; +a +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +ALTER TABLE ts1 REORGANIZE PARTITION `p ts_1` INTO +(PARTITION `p ts_1` VALUES IN (1,2,3,4,5,6,7,8,9,10, +11,12,13,14,15,16,17,18,19,20, +21,22,23,24,25,26,27,28,29,30, +31,32,33,34,35)); +INSERT INTO ts1 VALUES (34), (35); +INSERT INTO ts1 VALUES (36); +ERROR HY000: Table has no partition for value 36 +SELECT * FROM ts1; +a +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +DROP TABLE ts1; diff --git a/mysql-test/main/partition_list.test b/mysql-test/main/partition_list.test index e2b6aff300f..5eadb72a932 100644 --- a/mysql-test/main/partition_list.test +++ b/mysql-test/main/partition_list.test @@ -209,3 +209,44 @@ select * from t1 where f = 1; drop table t1; --echo #end of 10.2 tests + +--echo # +--echo # Bug MDEV-16101: More than MAX_REF_PARTS values in a list on ALTER TABLE. +--echo # Currently MAX_REF_PARTS = 32. +CREATE TABLE ts1 (a INT, PRIMARY KEY (`a`)) +PARTITION BY LIST (`a`) +(PARTITION `p ts_0` VALUES IN (101,102,103,104,105,106,107,108,109,110, + 111,112,113,114,115,116,117,118,119,120, + 121,122,123,124,125,126,127,128,129,130, + 131,132,133)); +INSERT INTO ts1 +VALUES (101), (102), (103), (104), (105), (106), (107), (108), (109), (110), + (111), (112), (113), (114), (115), (116), (117), (118), (119), (120), + (121), (122), (123), (124), (125), (126), (127), (128), (129), (130), + (131), (132), (133); +--error ER_NO_PARTITION_FOR_GIVEN_VALUE +INSERT INTO ts1 VALUES (134); +SELECT * FROM ts1; +ALTER TABLE ts1 ADD PARTITION +(PARTITION `p ts_1` VALUES IN (1,2,3,4,5,6,7,8,9,10, + 11,12,13,14,15,16,17,18,19,20, + 21,22,23,24,25,26,27,28,29,30, + 31,32,33)); +INSERT INTO ts1 +VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), + (11), (12), (13), (14), (15), (16), (17), (18), (19), (20), + (21), (22), (23), (24), (25), (26), (27), (28), (29), (30), + (31), (32), (33); +--error ER_NO_PARTITION_FOR_GIVEN_VALUE +INSERT INTO ts1 VALUES(34); +SELECT * FROM ts1; +ALTER TABLE ts1 REORGANIZE PARTITION `p ts_1` INTO +(PARTITION `p ts_1` VALUES IN (1,2,3,4,5,6,7,8,9,10, + 11,12,13,14,15,16,17,18,19,20, + 21,22,23,24,25,26,27,28,29,30, + 31,32,33,34,35)); +INSERT INTO ts1 VALUES (34), (35); +--error ER_NO_PARTITION_FOR_GIVEN_VALUE +INSERT INTO ts1 VALUES (36); +SELECT * FROM ts1; +DROP TABLE ts1; diff --git a/mysql-test/main/partition_open_files_limit.test b/mysql-test/main/partition_open_files_limit.test index e62ebd0ade7..5a0bc9c7cb5 100644 --- a/mysql-test/main/partition_open_files_limit.test +++ b/mysql-test/main/partition_open_files_limit.test @@ -4,6 +4,10 @@ DROP TABLE IF EXISTS `t1`; --enable_warnings +call mtr.add_suppression("option 'table_open_cache'"); +call mtr.add_suppression("option 'max_connections'"); + + # On some platforms the lowest possible open_files_limit is too high... let $max_open_files_limit= `SELECT @@open_files_limit > 511`; if ($max_open_files_limit) diff --git a/mysql-test/main/partition_sync.result b/mysql-test/main/partition_sync.result index 18cc506e849..badcb10e5da 100644 --- a/mysql-test/main/partition_sync.result +++ b/mysql-test/main/partition_sync.result @@ -61,3 +61,28 @@ connection default; disconnect con2; disconnect con3; DROP TABLE tbl_with_partitions; +# +# MDEV-15465 Server crash or ASAN heap-use-after-free in Item_func_match::cleanup +# upon using FT search with partitioning. +# +connect con1,localhost,root,,test; +CREATE OR REPLACE TABLE t1 (c CHAR(8)) ENGINE=MyISAM PARTITION BY KEY(c); +connection default; +set debug_sync= 'execute_command_after_close_tables SIGNAL opened WAIT_FOR go'; +DELETE FROM t1 WHERE MATCH(c) AGAINST ('foo' IN BOOLEAN MODE); +connection con1; +set debug_sync= 'now WAIT_FOR opened'; +FLUSH TABLES; +set debug_sync= 'now SIGNAL go'; +connection default; +set debug_sync= 'execute_command_after_close_tables SIGNAL opened WAIT_FOR go'; +SELECT * FROM t1 WHERE MATCH(c) AGAINST ('foo' IN BOOLEAN MODE); +connection con1; +set debug_sync= 'now WAIT_FOR opened'; +FLUSH TABLES; +set debug_sync= 'now SIGNAL go'; +disconnect con1; +connection default; +c +DROP TABLE t1; +set debug_sync= 'RESET'; diff --git a/mysql-test/main/partition_sync.test b/mysql-test/main/partition_sync.test index fd704f35534..22ca7df7e62 100644 --- a/mysql-test/main/partition_sync.test +++ b/mysql-test/main/partition_sync.test @@ -77,6 +77,42 @@ disconnect con2; disconnect con3; DROP TABLE tbl_with_partitions; +--echo # +--echo # MDEV-15465 Server crash or ASAN heap-use-after-free in Item_func_match::cleanup +--echo # upon using FT search with partitioning. +--echo # + +--connect (con1,localhost,root,,test) +CREATE OR REPLACE TABLE t1 (c CHAR(8)) ENGINE=MyISAM PARTITION BY KEY(c); + +--connection default +set debug_sync= 'execute_command_after_close_tables SIGNAL opened WAIT_FOR go'; +--send +DELETE FROM t1 WHERE MATCH(c) AGAINST ('foo' IN BOOLEAN MODE); + +--connection con1 +set debug_sync= 'now WAIT_FOR opened'; +FLUSH TABLES; +set debug_sync= 'now SIGNAL go'; + +--connection default +--reap +set debug_sync= 'execute_command_after_close_tables SIGNAL opened WAIT_FOR go'; +--send +SELECT * FROM t1 WHERE MATCH(c) AGAINST ('foo' IN BOOLEAN MODE); + +--connection con1 +set debug_sync= 'now WAIT_FOR opened'; +FLUSH TABLES; +set debug_sync= 'now SIGNAL go'; + +# Cleanup +--disconnect con1 +--connection default +--reap +DROP TABLE t1; +set debug_sync= 'RESET'; + # Check that all connections opened by test cases in this file are really # gone so execution of other tests won't be affected by their presence. diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result index 89de00a38bc..f9d0b004591 100644 --- a/mysql-test/main/ps.result +++ b/mysql-test/main/ps.result @@ -5240,3 +5240,14 @@ END; $$ CALL p1('x'); DROP PROCEDURE p1; +create table t1 (b blob default ''); +prepare stmt from "alter table t1 force"; +execute stmt; +execute stmt; +execute stmt; +set names latin1; +prepare stmt from "alter table t1 modify b text character set utf8 default 'a'"; +execute stmt; +execute stmt; +execute stmt; +drop table t1; diff --git a/mysql-test/main/ps.test b/mysql-test/main/ps.test index 59b157e2393..7ab24cd541d 100644 --- a/mysql-test/main/ps.test +++ b/mysql-test/main/ps.test @@ -4672,3 +4672,18 @@ DELIMITER ;$$ --disable_result_log CALL p1('x'); DROP PROCEDURE p1; + +# +# MDEV-15746 ASAN heap-use-after-free in Item_change_list::rollback_item_tree_changes on ALTER executed as PS +# +create table t1 (b blob default ''); +prepare stmt from "alter table t1 force"; +execute stmt; +execute stmt; +execute stmt; +set names latin1; +prepare stmt from "alter table t1 modify b text character set utf8 default 'a'"; +execute stmt; +execute stmt; +execute stmt; +drop table t1; diff --git a/mysql-test/r/ps_qc_innodb.result b/mysql-test/main/ps_qc_innodb.result similarity index 100% rename from mysql-test/r/ps_qc_innodb.result rename to mysql-test/main/ps_qc_innodb.result diff --git a/mysql-test/t/ps_qc_innodb.test b/mysql-test/main/ps_qc_innodb.test similarity index 100% rename from mysql-test/t/ps_qc_innodb.test rename to mysql-test/main/ps_qc_innodb.test diff --git a/mysql-test/main/read_only_innodb.result b/mysql-test/main/read_only_innodb.result index abfc5322ed0..b6e294b633c 100644 --- a/mysql-test/main/read_only_innodb.result +++ b/mysql-test/main/read_only_innodb.result @@ -237,6 +237,14 @@ a a 5 10 DROP TABLE temp1, temp2; +# MDEV-14185 CREATE TEMPORARY TABLE AS SELECT causes error 1290 with read_only and InnoDB. + +CREATE TEMPORARY TABLE temp1 ENGINE=INNODB AS SELECT a FROM t1; +SELECT * FROM temp1; +a +1 +DROP TABLE temp1; + # Disconnect and cleanup disconnect con1; diff --git a/mysql-test/main/read_only_innodb.test b/mysql-test/main/read_only_innodb.test index 9ba3ccaca07..a9310a1a78e 100644 --- a/mysql-test/main/read_only_innodb.test +++ b/mysql-test/main/read_only_innodb.test @@ -240,6 +240,15 @@ UPDATE temp1,temp2 SET temp1.a = 5, temp2.a = 10; SELECT * FROM temp1, temp2; DROP TABLE temp1, temp2; +--echo +--echo # MDEV-14185 CREATE TEMPORARY TABLE AS SELECT causes error 1290 with read_only and InnoDB. +--echo + +CREATE TEMPORARY TABLE temp1 ENGINE=INNODB AS SELECT a FROM t1; +SELECT * FROM temp1; +DROP TABLE temp1; + + --echo --echo # Disconnect and cleanup --echo diff --git a/mysql-test/main/rename.result b/mysql-test/main/rename.result index ff8566abe02..3ee9dd593d4 100644 --- a/mysql-test/main/rename.result +++ b/mysql-test/main/rename.result @@ -78,3 +78,69 @@ ERROR HY000: 'test.v1' is not of type 'BASE TABLE' drop view v1; drop table t1; End of 5.0 tests +CREATE OR REPLACE TABLE t1 (a INT); +CREATE OR REPLACE TABLE t2 (a INT); +CREATE OR REPLACE TEMPORARY TABLE t1_tmp (b INT); +CREATE OR REPLACE TEMPORARY TABLE t2_tmp (b INT); +rename table t1 to t2; +ERROR 42S01: Table 't2' already exists +rename table t1 to tmp, tmp to t2; +ERROR 42S01: Table 't2' already exists +rename table t1_tmp to t2_tmp; +ERROR 42S01: Table 't2_tmp' already exists +rename table t1_tmp to tmp, tmp to t2_tmp; +ERROR 42S01: Table 't2_tmp' already exists +show create table t1_tmp; +Table Create Table +t1_tmp CREATE TEMPORARY TABLE `t1_tmp` ( + `b` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +show create table t2_tmp; +Table Create Table +t2_tmp CREATE TEMPORARY TABLE `t2_tmp` ( + `b` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +rename table t1 to t1_tmp; +rename table t2_tmp to t2; +rename table t2 to tmp, tmp to t2; +rename table t1_tmp to tmp, tmp to t1_tmp; +show tables; +Tables_in_test +t1_tmp +t2 +SHOW CREATE TABLE t1_tmp; +Table Create Table +t1_tmp CREATE TEMPORARY TABLE `t1_tmp` ( + `b` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1_tmp; +SHOW CREATE TABLE t1_tmp; +Table Create Table +t1_tmp CREATE TABLE `t1_tmp` ( + `a` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1_tmp; +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TEMPORARY TABLE `t2` ( + `b` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t2; +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t2; +CREATE TABLE t1 (a INT); +insert into t1 values (1); +CREATE TEMPORARY TABLE t1 (b INT); +insert into t1 values (2); +RENAME TABLE t1 TO tmp, t1 TO t2; +select * from tmp; +b +2 +select * from t2; +a +1 +drop table tmp,t2; diff --git a/mysql-test/main/rename.test b/mysql-test/main/rename.test index a55bc845acc..67732d5b5b9 100644 --- a/mysql-test/main/rename.test +++ b/mysql-test/main/rename.test @@ -95,3 +95,49 @@ drop table t1; --source include/wait_until_count_sessions.inc +# +# Test of rename with temporary tables +# + +CREATE OR REPLACE TABLE t1 (a INT); +CREATE OR REPLACE TABLE t2 (a INT); +CREATE OR REPLACE TEMPORARY TABLE t1_tmp (b INT); +CREATE OR REPLACE TEMPORARY TABLE t2_tmp (b INT); + +# Can't rename table over another one +--error ER_TABLE_EXISTS_ERROR +rename table t1 to t2; +--error ER_TABLE_EXISTS_ERROR +rename table t1 to tmp, tmp to t2; +--error ER_TABLE_EXISTS_ERROR +rename table t1_tmp to t2_tmp; +--error ER_TABLE_EXISTS_ERROR +rename table t1_tmp to tmp, tmp to t2_tmp; + +show create table t1_tmp; +show create table t2_tmp; + +# The following should work +rename table t1 to t1_tmp; +rename table t2_tmp to t2; +rename table t2 to tmp, tmp to t2; +rename table t1_tmp to tmp, tmp to t1_tmp; +show tables; +SHOW CREATE TABLE t1_tmp; +drop table t1_tmp; +SHOW CREATE TABLE t1_tmp; +drop table t1_tmp; +SHOW CREATE TABLE t2; +drop table t2; +SHOW CREATE TABLE t2; +drop table t2; + +CREATE TABLE t1 (a INT); +insert into t1 values (1); +CREATE TEMPORARY TABLE t1 (b INT); +insert into t1 values (2); +RENAME TABLE t1 TO tmp, t1 TO t2; +select * from tmp; +select * from t2; +drop table tmp,t2; + diff --git a/mysql-test/main/sp-anchor-row-type-table.result b/mysql-test/main/sp-anchor-row-type-table.result index 28a5180e6c9..6abf1e18315 100644 --- a/mysql-test/main/sp-anchor-row-type-table.result +++ b/mysql-test/main/sp-anchor-row-type-table.result @@ -701,7 +701,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` text DEFAULT NULL, - `c` varchar(1) DEFAULT NULL + `c` char(1) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP PROCEDURE p2; DROP PROCEDURE p1; diff --git a/mysql-test/main/sp-anchor-type.result b/mysql-test/main/sp-anchor-type.result index b2de14bca1c..2b61854d514 100644 --- a/mysql-test/main/sp-anchor-type.result +++ b/mysql-test/main/sp-anchor-type.result @@ -462,15 +462,15 @@ t2 CREATE TABLE `t2` ( `dc100` decimal(10,0) DEFAULT NULL, `dc103` decimal(10,3) DEFAULT NULL, `dc209` decimal(20,9) DEFAULT NULL, - `cc` varchar(10) DEFAULT NULL, + `cc` char(10) DEFAULT NULL, `cv` varchar(10) DEFAULT NULL, `cvu` varchar(10) CHARACTER SET utf8 DEFAULT NULL, `t1` tinytext DEFAULT NULL, `t2` text DEFAULT NULL, `t3` mediumtext DEFAULT NULL, `t4` longtext DEFAULT NULL, - `enum1` varchar(1) DEFAULT NULL, - `set1` varchar(5) DEFAULT NULL, + `enum1` char(1) DEFAULT NULL, + `set1` char(5) DEFAULT NULL, `blob1` tinyblob DEFAULT NULL, `blob2` blob DEFAULT NULL, `blob3` mediumblob DEFAULT NULL, @@ -624,15 +624,15 @@ t2 CREATE TABLE `t2` ( `dc100` decimal(10,0) DEFAULT NULL, `dc103` decimal(10,3) DEFAULT NULL, `dc209` decimal(20,9) DEFAULT NULL, - `cc` varchar(10) DEFAULT NULL, + `cc` char(10) DEFAULT NULL, `cv` varchar(10) DEFAULT NULL, `cvu` varchar(10) CHARACTER SET utf8 DEFAULT NULL, `t1` tinytext DEFAULT NULL, `t2` text DEFAULT NULL, `t3` mediumtext DEFAULT NULL, `t4` longtext DEFAULT NULL, - `enum1` varchar(1) DEFAULT NULL, - `set1` varchar(5) DEFAULT NULL, + `enum1` char(1) DEFAULT NULL, + `set1` char(5) DEFAULT NULL, `blob1` tinyblob DEFAULT NULL, `blob2` blob DEFAULT NULL, `blob3` mediumblob DEFAULT NULL, @@ -1042,7 +1042,7 @@ t1 CREATE TABLE `t1` ( `a_flt0` float DEFAULT NULL, `a_dbl0` double DEFAULT NULL, `a_bit3` bit(3) DEFAULT NULL, - `a_enum0` varchar(1) DEFAULT NULL, + `a_enum0` char(1) DEFAULT NULL, `a_varchar10` varchar(10) DEFAULT NULL, `a_text1` text DEFAULT NULL, `a_tinytext1` tinytext DEFAULT NULL, @@ -1060,7 +1060,7 @@ t1 CREATE TABLE `t1` ( `aa_flt0` float DEFAULT NULL, `aa_dbl0` double DEFAULT NULL, `aa_bit3` bit(3) DEFAULT NULL, - `aa_enum0` varchar(1) DEFAULT NULL, + `aa_enum0` char(1) DEFAULT NULL, `aa_varchar10` varchar(10) DEFAULT NULL, `aa_text1` text DEFAULT NULL, `aa_tinytext1` tinytext DEFAULT NULL, diff --git a/mysql-test/main/sp-bugs.result b/mysql-test/main/sp-bugs.result index 3db1e68321c..a699cd11c5a 100644 --- a/mysql-test/main/sp-bugs.result +++ b/mysql-test/main/sp-bugs.result @@ -292,3 +292,53 @@ SELECT a, a+0; END; $$ ERROR 22007: Illegal set 'a,b' value found during parsing +# +# Start of 10.3 tests +# +# +# MDEV-16117 SP with a single FOR statement creates but further fails to load +# +CREATE PROCEDURE p1() +FOR i IN 1..10 DO +set @x = 5; +END FOR; +$$ +CALL p1; +SELECT body FROM mysql.proc WHERE db='test' AND specific_name='p1'; +body +FOR i IN 1..10 DO +set @x = 5; +END FOR +DROP PROCEDURE p1; +CREATE PROCEDURE p1() WITH t1 AS (SELECT 1) SELECT 1; +$$ +CALL p1; +1 +1 +SELECT body FROM mysql.proc WHERE db='test' AND specific_name='p1'; +body +WITH t1 AS (SELECT 1) SELECT 1 +DROP PROCEDURE p1; +CREATE PROCEDURE p1() VALUES (1); +$$ +CALL p1; +1 +1 +SELECT body FROM mysql.proc WHERE db='test' AND specific_name='p1'; +body +VALUES (1) +DROP PROCEDURE p1; +CREATE FUNCTION f1() RETURNS INT +FOR i IN 1..10 DO +RETURN 1; +END FOR; +$$ +SELECT f1(); +f1() +1 +SELECT body FROM mysql.proc WHERE db='test' AND specific_name='f1'; +body +FOR i IN 1..10 DO +RETURN 1; +END FOR +DROP FUNCTION f1; diff --git a/mysql-test/main/sp-bugs.test b/mysql-test/main/sp-bugs.test index 3239dfeaeec..2dd70d28249 100644 --- a/mysql-test/main/sp-bugs.test +++ b/mysql-test/main/sp-bugs.test @@ -321,3 +321,53 @@ BEGIN END; $$ DELIMITER ;$$ + + +--echo # +--echo # Start of 10.3 tests +--echo # + +--echo # +--echo # MDEV-16117 SP with a single FOR statement creates but further fails to load +--echo # + +DELIMITER $$; +CREATE PROCEDURE p1() + FOR i IN 1..10 DO + set @x = 5; + END FOR; +$$ +DELIMITER ;$$ +CALL p1; +SELECT body FROM mysql.proc WHERE db='test' AND specific_name='p1'; +DROP PROCEDURE p1; + + +DELIMITER $$; +CREATE PROCEDURE p1() WITH t1 AS (SELECT 1) SELECT 1; +$$ +DELIMITER ;$$ +CALL p1; +SELECT body FROM mysql.proc WHERE db='test' AND specific_name='p1'; +DROP PROCEDURE p1; + + +DELIMITER $$; +CREATE PROCEDURE p1() VALUES (1); +$$ +DELIMITER ;$$ +CALL p1; +SELECT body FROM mysql.proc WHERE db='test' AND specific_name='p1'; +DROP PROCEDURE p1; + + +DELIMITER $$; +CREATE FUNCTION f1() RETURNS INT + FOR i IN 1..10 DO + RETURN 1; + END FOR; +$$ +DELIMITER ;$$ +SELECT f1(); +SELECT body FROM mysql.proc WHERE db='test' AND specific_name='f1'; +DROP FUNCTION f1; diff --git a/mysql-test/main/sp-code.result b/mysql-test/main/sp-code.result index 3a4dc9db6f8..001b03a0e3b 100644 --- a/mysql-test/main/sp-code.result +++ b/mysql-test/main/sp-code.result @@ -1301,3 +1301,24 @@ Pos Instruction 28 jump 4 29 cpop 1 DROP PROCEDURE p1; +# +# MDEV-14623: Output of show function code does not show FETCH GROUP NEXT ROW +# for custom aggregates +# +create aggregate function f1(x INT) returns int +begin +declare continue handler for not found return 0; +loop +fetch group next row; +insert into t2 (sal) values (x); +end loop; +end| +show function code f1; +Pos Instruction +0 hpush_jump 2 1 CONTINUE +1 freturn int 0 +2 agg_cfetch +3 stmt 5 "insert into t2 (sal) values (x)" +4 jump 2 +5 hpop 1 +drop function f1; diff --git a/mysql-test/main/sp-code.test b/mysql-test/main/sp-code.test index 1f2f5191f0a..f8c1b4f0a92 100644 --- a/mysql-test/main/sp-code.test +++ b/mysql-test/main/sp-code.test @@ -927,3 +927,22 @@ $$ DELIMITER ;$$ SHOW PROCEDURE CODE p1; DROP PROCEDURE p1; + +--echo # +--echo # MDEV-14623: Output of show function code does not show FETCH GROUP NEXT ROW +--echo # for custom aggregates +--echo # + +delimiter |; +create aggregate function f1(x INT) returns int +begin + declare continue handler for not found return 0; + loop + fetch group next row; + insert into t2 (sal) values (x); + end loop; +end| + +delimiter ;| +show function code f1; +drop function f1; diff --git a/mysql-test/main/sp-destruct.result b/mysql-test/main/sp-destruct.result index a5f96882a37..10cfcafdb3b 100644 --- a/mysql-test/main/sp-destruct.result +++ b/mysql-test/main/sp-destruct.result @@ -171,6 +171,13 @@ create database mysqltest1; create procedure mysqltest1.foo() select "foo"; update mysql.proc set name='' where db='mysqltest1'; drop database mysqltest1; +create procedure p1() set @foo = 10; +alter table mysql.proc drop primary key; +drop procedure p1; +ERROR HY000: Cannot load from mysql.proc. The table is probably corrupted +alter table mysql.proc add primary key (db,name,type); +drop procedure p1; +# Start of 10.3 tests # # MDEV-15444 Querying I_S.PARAMETERS can crash with a corrupted mysql.proc # diff --git a/mysql-test/main/sp-destruct.test b/mysql-test/main/sp-destruct.test index 607c733b492..8870df29299 100644 --- a/mysql-test/main/sp-destruct.test +++ b/mysql-test/main/sp-destruct.test @@ -286,6 +286,17 @@ create procedure mysqltest1.foo() select "foo"; update mysql.proc set name='' where db='mysqltest1'; drop database mysqltest1; +# +# BUG#26881798: SERVER EXITS WHEN PRIMARY KEY IN MYSQL.PROC IS DROPPED +# +create procedure p1() set @foo = 10; +alter table mysql.proc drop primary key; +--error ER_CANNOT_LOAD_FROM_TABLE_V2 +drop procedure p1; +alter table mysql.proc add primary key (db,name,type); +drop procedure p1; + +--echo # Start of 10.3 tests --echo # --echo # MDEV-15444 Querying I_S.PARAMETERS can crash with a corrupted mysql.proc diff --git a/mysql-test/main/sp-expr.result b/mysql-test/main/sp-expr.result new file mode 100644 index 00000000000..ead9856f9ab --- /dev/null +++ b/mysql-test/main/sp-expr.result @@ -0,0 +1,153 @@ +# +# Start of 10.3 tests +# +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +BEGIN NOT ATOMIC +CASE ((1) IN (SELECT a FROM t1)) WHEN 1 THEN SELECT 1; +ELSE SELECT NULL; +END CASE; +END; +$$ +1 +1 +BEGIN NOT ATOMIC +CASE (EXISTS (SELECT a FROM t1)) WHEN 1 THEN SELECT 1; +ELSE SELECT NULL; +END CASE; +END; +$$ +1 +1 +BEGIN NOT ATOMIC +IF ((1) IN (SELECT a FROM t1)) THEN SELECT 1; +ELSE SELECT NULL; +END IF; +END; +$$ +1 +1 +BEGIN NOT ATOMIC +IF (EXISTS (SELECT a FROM t1)) THEN SELECT 1; +ELSE SELECT NULL; +END IF; +END; +$$ +1 +1 +BEGIN NOT ATOMIC +WHILE ((1234) IN (SELECT * FROM t1)) DO +SELECT 1; +END WHILE; +END; +$$ +BEGIN NOT ATOMIC +WHILE (EXISTS (SELECT * FROM t1 WHERE a=1234)) DO +SELECT 1; +END WHILE; +END; +$$ +BEGIN NOT ATOMIC +REPEAT +SELECT 1; +UNTIL (1 IN (SELECT * FROM t1)) +END REPEAT; +END; +$$ +1 +1 +BEGIN NOT ATOMIC +REPEAT +SELECT 1; +UNTIL EXISTS (SELECT * FROM t1 WHERE a=1) +END REPEAT; +END; +$$ +1 +1 +BEGIN NOT ATOMIC +FOR i IN 0..(1 IN (SELECT * FROM t1)) +DO +SELECT i; +END FOR; +END; +$$ +i +0 +i +1 +BEGIN NOT ATOMIC +FOR i IN 0..EXISTS (SELECT * FROM t1 WHERE a=1) +DO +SELECT i; +END FOR; +END; +$$ +i +0 +i +1 +DROP TABLE t1; +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (10); +BEGIN NOT ATOMIC +DECLARE a INT DEFAULT ((10) IN (SELECT * FROM t1)); +SELECT a; +END; +$$ +a +1 +BEGIN NOT ATOMIC +DECLARE a INT DEFAULT EXISTS (SELECT * FROM t1); +SELECT a; +END; +$$ +a +1 +DROP TABLE t1; +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +CREATE FUNCTION f1() RETURNS INT +BEGIN +RETURN ((1) IN (SELECT * FROM t1)); +END; +$$ +CREATE FUNCTION f2() RETURNS INT +BEGIN +RETURN EXISTS (SELECT * FROM t1 WHERE a=1); +END; +$$ +SELECT f1(); +f1() +1 +SELECT f2(); +f2() +1 +DROP FUNCTION f1; +DROP FUNCTION f2; +DROP TABLE t1; +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +BEGIN NOT ATOMIC +DECLARE va INT; +DECLARE cur CURSOR(amin INT) FOR SELECT a FROM t1 WHERE a>amin ORDER BY a; +OPEN cur(1 IN (SELECT * FROM t1)); +FETCH cur INTO va; +SELECT va; +CLOSE cur; +END; +$$ +va +2 +BEGIN NOT ATOMIC +DECLARE va INT; +DECLARE cur CURSOR(amin INT) FOR SELECT a FROM t1 WHERE a>amin ORDER BY a; +OPEN cur(EXISTS (SELECT * FROM t1)); +FETCH cur INTO va; +SELECT va; +CLOSE cur; +END; +$$ +va +2 +DROP TABLE t1; diff --git a/mysql-test/main/sp-expr.test b/mysql-test/main/sp-expr.test new file mode 100644 index 00000000000..a9b1afede88 --- /dev/null +++ b/mysql-test/main/sp-expr.test @@ -0,0 +1,159 @@ +# Testing expressions of different kinds in various parts of SP syntax + +--echo # +--echo # Start of 10.3 tests +--echo # + +# +# Subselects in SP control structures +# + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +DELIMITER $$; + +BEGIN NOT ATOMIC + CASE ((1) IN (SELECT a FROM t1)) WHEN 1 THEN SELECT 1; + ELSE SELECT NULL; + END CASE; +END; +$$ +BEGIN NOT ATOMIC + CASE (EXISTS (SELECT a FROM t1)) WHEN 1 THEN SELECT 1; + ELSE SELECT NULL; + END CASE; +END; +$$ + +BEGIN NOT ATOMIC + IF ((1) IN (SELECT a FROM t1)) THEN SELECT 1; + ELSE SELECT NULL; + END IF; +END; +$$ +BEGIN NOT ATOMIC + IF (EXISTS (SELECT a FROM t1)) THEN SELECT 1; + ELSE SELECT NULL; + END IF; +END; +$$ + +BEGIN NOT ATOMIC + WHILE ((1234) IN (SELECT * FROM t1)) DO + SELECT 1; + END WHILE; +END; +$$ +BEGIN NOT ATOMIC + WHILE (EXISTS (SELECT * FROM t1 WHERE a=1234)) DO + SELECT 1; + END WHILE; +END; +$$ + +BEGIN NOT ATOMIC + REPEAT + SELECT 1; + UNTIL (1 IN (SELECT * FROM t1)) + END REPEAT; +END; +$$ +BEGIN NOT ATOMIC + REPEAT + SELECT 1; + UNTIL EXISTS (SELECT * FROM t1 WHERE a=1) + END REPEAT; +END; +$$ + +BEGIN NOT ATOMIC + FOR i IN 0..(1 IN (SELECT * FROM t1)) + DO + SELECT i; + END FOR; +END; +$$ +BEGIN NOT ATOMIC + FOR i IN 0..EXISTS (SELECT * FROM t1 WHERE a=1) + DO + SELECT i; + END FOR; +END; +$$ +DELIMITER ;$$ +DROP TABLE t1; + + +# +# Subselects as SP variable default values +# + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (10); +DELIMITER $$; +BEGIN NOT ATOMIC + DECLARE a INT DEFAULT ((10) IN (SELECT * FROM t1)); + SELECT a; +END; +$$ +BEGIN NOT ATOMIC + DECLARE a INT DEFAULT EXISTS (SELECT * FROM t1); + SELECT a; +END; +$$ +DELIMITER ;$$ +DROP TABLE t1; + + +# +# Subselects SP function return values +# + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +DELIMITER $$; +CREATE FUNCTION f1() RETURNS INT +BEGIN + RETURN ((1) IN (SELECT * FROM t1)); +END; +$$ +CREATE FUNCTION f2() RETURNS INT +BEGIN + RETURN EXISTS (SELECT * FROM t1 WHERE a=1); +END; +$$ +DELIMITER ;$$ +SELECT f1(); +SELECT f2(); +DROP FUNCTION f1; +DROP FUNCTION f2; +DROP TABLE t1; + + +# +# Subselects in CURSOR parameters +# + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +DELIMITER $$; +BEGIN NOT ATOMIC + DECLARE va INT; + DECLARE cur CURSOR(amin INT) FOR SELECT a FROM t1 WHERE a>amin ORDER BY a; + OPEN cur(1 IN (SELECT * FROM t1)); + FETCH cur INTO va; + SELECT va; + CLOSE cur; +END; +$$ +BEGIN NOT ATOMIC + DECLARE va INT; + DECLARE cur CURSOR(amin INT) FOR SELECT a FROM t1 WHERE a>amin ORDER BY a; + OPEN cur(EXISTS (SELECT * FROM t1)); + FETCH cur INTO va; + SELECT va; + CLOSE cur; +END; +$$ +DELIMITER ;$$ +DROP TABLE t1; diff --git a/mysql-test/main/sp-innodb.result b/mysql-test/main/sp-innodb.result index 2c3bc7dc4f3..6a4fe5146a4 100644 --- a/mysql-test/main/sp-innodb.result +++ b/mysql-test/main/sp-innodb.result @@ -133,3 +133,37 @@ SET @@innodb_lock_wait_timeout= @innodb_lock_wait_timeout_saved; # # BUG 16041903: End of test case # +# +# MDEV-15035: SP using query with outer join and a parameter +# in ON expression +# +CREATE TABLE t1 ( +id int NOT NULL, +PRIMARY KEY (id) +) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1), (2); +CREATE TABLE t2 ( +id int NOT NULL, +id_foo int NOT NULL, +PRIMARY KEY (id) +) ENGINE=InnoDB; +INSERT INTO t2 VALUES (1, 1); +DROP PROCEDURE IF EXISTS test_proc; +CREATE PROCEDURE test_proc(IN param int) +LANGUAGE SQL +READS SQL DATA +BEGIN +SELECT DISTINCT f.id +FROM t1 f +LEFT OUTER JOIN t2 b ON b.id_foo = f.id +WHERE (param <> 0 OR b.id IS NOT NULL); +END| +CALL test_proc(0); +id +1 +CALL test_proc(1); +id +1 +2 +DROP PROCEDURE IF EXISTS test_proc; +DROP TABLE t1, t2; diff --git a/mysql-test/main/sp-innodb.test b/mysql-test/main/sp-innodb.test index 23715166a02..e44a853e713 100644 --- a/mysql-test/main/sp-innodb.test +++ b/mysql-test/main/sp-innodb.test @@ -158,5 +158,47 @@ SET @@innodb_lock_wait_timeout= @innodb_lock_wait_timeout_saved; --echo # BUG 16041903: End of test case --echo # +--echo # +--echo # MDEV-15035: SP using query with outer join and a parameter +--echo # in ON expression +--echo # + +CREATE TABLE t1 ( + id int NOT NULL, + PRIMARY KEY (id) +) ENGINE=InnoDB; + +INSERT INTO t1 VALUES (1), (2); + +CREATE TABLE t2 ( + id int NOT NULL, + id_foo int NOT NULL, + PRIMARY KEY (id) +) ENGINE=InnoDB; + +INSERT INTO t2 VALUES (1, 1); + +--disable_warnings +DROP PROCEDURE IF EXISTS test_proc; +--enable_warnings + +DELIMITER |; +CREATE PROCEDURE test_proc(IN param int) +LANGUAGE SQL +READS SQL DATA +BEGIN + SELECT DISTINCT f.id + FROM t1 f + LEFT OUTER JOIN t2 b ON b.id_foo = f.id + WHERE (param <> 0 OR b.id IS NOT NULL); +END| +DELIMITER ;| + +CALL test_proc(0); +CALL test_proc(1); + +DROP PROCEDURE IF EXISTS test_proc; +DROP TABLE t1, t2; + # Wait till we reached the initial number of concurrent sessions --source include/wait_until_count_sessions.inc diff --git a/mysql-test/main/sp-row.result b/mysql-test/main/sp-row.result index d3be7c2a9b9..ac09f7572c3 100644 --- a/mysql-test/main/sp-row.result +++ b/mysql-test/main/sp-row.result @@ -1286,8 +1286,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` mediumint(9) DEFAULT NULL, - `rec.var` mediumint(9) DEFAULT NULL + `var` mediumint(8) DEFAULT NULL, + `rec.var` mediumint(8) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -1296,8 +1296,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` mediumint(9) DEFAULT NULL, - `rec.var` mediumint(9) DEFAULT NULL + `var` mediumint(8) DEFAULT NULL, + `rec.var` mediumint(8) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -1306,8 +1306,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` mediumint(9) DEFAULT NULL, - `rec.var` mediumint(9) DEFAULT NULL + `var` mediumint(8) DEFAULT NULL, + `rec.var` mediumint(8) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -1316,8 +1316,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` mediumint(9) DEFAULT NULL, - `rec.var` mediumint(9) DEFAULT NULL + `var` mediumint(8) DEFAULT NULL, + `rec.var` mediumint(8) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -1326,8 +1326,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` mediumint(9) DEFAULT NULL, - `rec.var` mediumint(9) DEFAULT NULL + `var` mediumint(8) DEFAULT NULL, + `rec.var` mediumint(8) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -1336,8 +1336,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` mediumint(9) DEFAULT NULL, - `rec.var` mediumint(9) DEFAULT NULL + `var` mediumint(8) DEFAULT NULL, + `rec.var` mediumint(8) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -1346,8 +1346,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` mediumint(9) DEFAULT NULL, - `rec.var` mediumint(9) DEFAULT NULL + `var` mediumint(8) DEFAULT NULL, + `rec.var` mediumint(8) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -1356,8 +1356,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` mediumint(9) DEFAULT NULL, - `rec.var` mediumint(9) DEFAULT NULL + `var` mediumint(8) DEFAULT NULL, + `rec.var` mediumint(8) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -1795,8 +1795,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` varchar(1) DEFAULT NULL, - `rec.var` varchar(1) DEFAULT NULL + `var` char(1) DEFAULT NULL, + `rec.var` char(1) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -1805,8 +1805,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` varbinary(1) DEFAULT NULL, - `rec.var` varbinary(1) DEFAULT NULL + `var` binary(1) DEFAULT NULL, + `rec.var` binary(1) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -1815,8 +1815,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` varchar(1) DEFAULT NULL, - `rec.var` varchar(1) DEFAULT NULL + `var` char(1) DEFAULT NULL, + `rec.var` char(1) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -1825,8 +1825,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` varchar(10) DEFAULT NULL, - `rec.var` varchar(10) DEFAULT NULL + `var` char(10) DEFAULT NULL, + `rec.var` char(10) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -1835,8 +1835,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` varchar(10) CHARACTER SET utf8 DEFAULT NULL, - `rec.var` varchar(10) CHARACTER SET utf8 DEFAULT NULL + `var` char(10) CHARACTER SET utf8 DEFAULT NULL, + `rec.var` char(10) CHARACTER SET utf8 DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -1845,8 +1845,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` varbinary(10) DEFAULT NULL, - `rec.var` varbinary(10) DEFAULT NULL + `var` binary(10) DEFAULT NULL, + `rec.var` binary(10) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -2084,8 +2084,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` mediumtext CHARACTER SET utf8 DEFAULT NULL, - `rec.var` mediumtext CHARACTER SET utf8 DEFAULT NULL + `var` text CHARACTER SET utf8 DEFAULT NULL, + `rec.var` text CHARACTER SET utf8 DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -2094,8 +2094,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` text CHARACTER SET utf8 DEFAULT NULL, - `rec.var` text CHARACTER SET utf8 DEFAULT NULL + `var` tinytext CHARACTER SET utf8 DEFAULT NULL, + `rec.var` tinytext CHARACTER SET utf8 DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; @@ -2104,8 +2104,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `var` longtext CHARACTER SET utf8 DEFAULT NULL, - `rec.var` longtext CHARACTER SET utf8 DEFAULT NULL + `var` mediumtext CHARACTER SET utf8 DEFAULT NULL, + `rec.var` mediumtext CHARACTER SET utf8 DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; DROP PROCEDURE p1; diff --git a/mysql-test/main/sp-vars.result b/mysql-test/main/sp-vars.result index ac1566d0c6e..0d4d18c577d 100644 --- a/mysql-test/main/sp-vars.result +++ b/mysql-test/main/sp-vars.result @@ -1241,3 +1241,72 @@ t1 CREATE TABLE "t1" ( "a" year(4) DEFAULT NULL ) DROP PROCEDURE p1; +# +# MDEV-15960 Wrong data type on CREATE..SELECT char_or_enum_or_text_spvar +# +BEGIN NOT ATOMIC +DECLARE var TINYTEXT CHARACTER SET utf8; +CREATE TABLE t1 AS SELECT var; +END; +$$ +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "var" tinytext CHARACTER SET utf8 DEFAULT NULL +) +DROP TABLE t1; +BEGIN NOT ATOMIC +DECLARE var TEXT CHARACTER SET utf8; +CREATE TABLE t1 AS SELECT var; +END; +$$ +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "var" text CHARACTER SET utf8 DEFAULT NULL +) +DROP TABLE t1; +BEGIN NOT ATOMIC +DECLARE var MEDIUMTEXT CHARACTER SET utf8; +CREATE TABLE t1 AS SELECT var; +END; +$$ +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "var" mediumtext CHARACTER SET utf8 DEFAULT NULL +) +DROP TABLE t1; +BEGIN NOT ATOMIC +DECLARE var LONGTEXT CHARACTER SET utf8; +CREATE TABLE t1 AS SELECT var; +END; +$$ +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "var" longtext CHARACTER SET utf8 DEFAULT NULL +) +DROP TABLE t1; +BEGIN NOT ATOMIC +DECLARE var CHAR(1); +CREATE TABLE t1 AS SELECT var; +END; +$$ +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "var" char(1) DEFAULT NULL +) +DROP TABLE t1; +BEGIN NOT ATOMIC +DECLARE var ENUM('a'); +CREATE TABLE t1 AS SELECT var; +END; +$$ +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "var" char(1) DEFAULT NULL +) +DROP TABLE t1; diff --git a/mysql-test/main/sp-vars.test b/mysql-test/main/sp-vars.test index 57d5563e609..ba8cd4f5ca8 100644 --- a/mysql-test/main/sp-vars.test +++ b/mysql-test/main/sp-vars.test @@ -1536,3 +1536,70 @@ $$ DELIMITER ;$$ CALL p1; DROP PROCEDURE p1; + + +--echo # +--echo # MDEV-15960 Wrong data type on CREATE..SELECT char_or_enum_or_text_spvar +--echo # + +DELIMITER $$; +BEGIN NOT ATOMIC + DECLARE var TINYTEXT CHARACTER SET utf8; + CREATE TABLE t1 AS SELECT var; +END; +$$ +DELIMITER ;$$ +SHOW CREATE TABLE t1; +DROP TABLE t1; + +DELIMITER $$; +BEGIN NOT ATOMIC + DECLARE var TEXT CHARACTER SET utf8; + CREATE TABLE t1 AS SELECT var; +END; +$$ +DELIMITER ;$$ +SHOW CREATE TABLE t1; +DROP TABLE t1; + +DELIMITER $$; +BEGIN NOT ATOMIC + DECLARE var MEDIUMTEXT CHARACTER SET utf8; + CREATE TABLE t1 AS SELECT var; +END; +$$ +DELIMITER ;$$ +SHOW CREATE TABLE t1; +DROP TABLE t1; + +DELIMITER $$; +BEGIN NOT ATOMIC + DECLARE var LONGTEXT CHARACTER SET utf8; + CREATE TABLE t1 AS SELECT var; +END; +$$ +DELIMITER ;$$ +SHOW CREATE TABLE t1; +DROP TABLE t1; + + +DELIMITER $$; +BEGIN NOT ATOMIC + DECLARE var CHAR(1); + CREATE TABLE t1 AS SELECT var; +END; +$$ +DELIMITER ;$$ +SHOW CREATE TABLE t1; +DROP TABLE t1; + + +DELIMITER $$; +BEGIN NOT ATOMIC + DECLARE var ENUM('a'); + CREATE TABLE t1 AS SELECT var; +END; +$$ +DELIMITER ;$$ +SHOW CREATE TABLE t1; +DROP TABLE t1; diff --git a/mysql-test/main/sp.result b/mysql-test/main/sp.result index 582ea33d903..64caf0fd554 100644 --- a/mysql-test/main/sp.result +++ b/mysql-test/main/sp.result @@ -8251,6 +8251,17 @@ DROP PROCEDURE proc_13; DROP PROCEDURE proc_select; DROP TABLE t1, t2; SET max_sp_recursion_depth=default; +# +# MDEV-15347: Valgrind or ASAN errors in mysql_make_view on query +# from information_schema +# +CREATE VIEW v AS SELECT 1; +CREATE FUNCTION f() RETURNS INT RETURN 1; +SELECT * FROM INFORMATION_SCHEMA.TABLES JOIN INFORMATION_SCHEMA.PARAMETERS +UNION +SELECT * FROM INFORMATION_SCHEMA.TABLES JOIN INFORMATION_SCHEMA.PARAMETERS; +DROP FUNCTION f; +DROP VIEW v; #End of 10.1 tests # # MDEV-11081: CURSOR for query with GROUP BY @@ -8571,3 +8582,107 @@ affected rows: 5 DROP PROCEDURE p1; DROP PROCEDURE p2; drop table t1; +# +# MDEV-15957 Unexpected "Data too long" when doing CREATE..SELECT with stored functions +# +CREATE TABLE t1 (a INT(3)); +INSERT INTO t1 VALUES (-999); +CREATE FUNCTION f1(a INT(3)) RETURNS INT(3) RETURN a; +CREATE TABLE t2 AS SELECT CONCAT(a) AS c1, CONCAT(COALESCE(a)) AS c2, CONCAT(f1(a)) AS c3 FROM t1; +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `c1` varchar(11) DEFAULT NULL, + `c2` varchar(11) DEFAULT NULL, + `c3` varchar(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1,t2; +DROP FUNCTION f1; +CREATE FUNCTION f1() RETURNS TINYTEXT CHARACTER SET latin1 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` tinytext DEFAULT NULL, + `c2` tinytext DEFAULT NULL, + `c3` varchar(255) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE FUNCTION f1() RETURNS TEXT CHARACTER SET latin1 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` text DEFAULT NULL, + `c2` text DEFAULT NULL, + `c3` text DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE FUNCTION f1() RETURNS MEDIUMTEXT CHARACTER SET latin1 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` mediumtext DEFAULT NULL, + `c2` mediumtext DEFAULT NULL, + `c3` mediumtext DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE FUNCTION f1() RETURNS LONGTEXT CHARACTER SET latin1 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` longtext DEFAULT NULL, + `c2` longtext DEFAULT NULL, + `c3` longtext DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE FUNCTION f1() RETURNS TINYTEXT CHARACTER SET utf8 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` tinytext CHARACTER SET utf8 DEFAULT NULL, + `c2` text CHARACTER SET utf8 DEFAULT NULL, + `c3` varchar(255) CHARACTER SET utf8 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE FUNCTION f1() RETURNS TEXT CHARACTER SET utf8 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` text CHARACTER SET utf8 DEFAULT NULL, + `c2` mediumtext CHARACTER SET utf8 DEFAULT NULL, + `c3` mediumtext CHARACTER SET utf8 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE FUNCTION f1() RETURNS MEDIUMTEXT CHARACTER SET utf8 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` mediumtext CHARACTER SET utf8 DEFAULT NULL, + `c2` longtext CHARACTER SET utf8 DEFAULT NULL, + `c3` longtext CHARACTER SET utf8 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; +CREATE FUNCTION f1() RETURNS LONGTEXT CHARACTER SET utf8 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` longtext CHARACTER SET utf8 DEFAULT NULL, + `c2` longtext CHARACTER SET utf8 DEFAULT NULL, + `c3` longtext CHARACTER SET utf8 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION f1; diff --git a/mysql-test/main/sp.test b/mysql-test/main/sp.test index 7a6fc3e711d..cfd6604acce 100644 --- a/mysql-test/main/sp.test +++ b/mysql-test/main/sp.test @@ -9749,6 +9749,21 @@ DROP TABLE t1, t2; SET max_sp_recursion_depth=default; +--echo # +--echo # MDEV-15347: Valgrind or ASAN errors in mysql_make_view on query +--echo # from information_schema +--echo # + +CREATE VIEW v AS SELECT 1; +CREATE FUNCTION f() RETURNS INT RETURN 1; +--disable_result_log +SELECT * FROM INFORMATION_SCHEMA.TABLES JOIN INFORMATION_SCHEMA.PARAMETERS +UNION +SELECT * FROM INFORMATION_SCHEMA.TABLES JOIN INFORMATION_SCHEMA.PARAMETERS; +--enable_result_log +DROP FUNCTION f; +DROP VIEW v; + --echo #End of 10.1 tests --echo # @@ -10120,3 +10135,64 @@ CALL p2(); DROP PROCEDURE p1; DROP PROCEDURE p2; drop table t1; + +--echo # +--echo # MDEV-15957 Unexpected "Data too long" when doing CREATE..SELECT with stored functions +--echo # + +CREATE TABLE t1 (a INT(3)); +INSERT INTO t1 VALUES (-999); +CREATE FUNCTION f1(a INT(3)) RETURNS INT(3) RETURN a; +CREATE TABLE t2 AS SELECT CONCAT(a) AS c1, CONCAT(COALESCE(a)) AS c2, CONCAT(f1(a)) AS c3 FROM t1; +SHOW CREATE TABLE t2; +DROP TABLE t1,t2; +DROP FUNCTION f1; + + +CREATE FUNCTION f1() RETURNS TINYTEXT CHARACTER SET latin1 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +CREATE FUNCTION f1() RETURNS TEXT CHARACTER SET latin1 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +CREATE FUNCTION f1() RETURNS MEDIUMTEXT CHARACTER SET latin1 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +CREATE FUNCTION f1() RETURNS LONGTEXT CHARACTER SET latin1 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +CREATE FUNCTION f1() RETURNS TINYTEXT CHARACTER SET utf8 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +CREATE FUNCTION f1() RETURNS TEXT CHARACTER SET utf8 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +CREATE FUNCTION f1() RETURNS MEDIUMTEXT CHARACTER SET utf8 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; + +CREATE FUNCTION f1() RETURNS LONGTEXT CHARACTER SET utf8 RETURN ''; +CREATE TABLE t1 AS SELECT f1() AS c1, COALESCE(f1()) AS c2, CONCAT(f1()) AS c3; +SHOW CREATE TABLE t1; +DROP TABLE t1; +DROP FUNCTION f1; diff --git a/mysql-test/main/statement-expr.result b/mysql-test/main/statement-expr.result new file mode 100644 index 00000000000..c73ed2825ec --- /dev/null +++ b/mysql-test/main/statement-expr.result @@ -0,0 +1,67 @@ +# +# Start of 10.3 tests +# +CREATE TABLE t1 (id INT, id1 INT); +INSERT INTO t1 VALUES (1,7); +INSERT INTO t1 VALUES (1,8); +SELECT ROW(1,7) IN (SELECT id, id1 FROM t1 WHERE id1= 8); +ROW(1,7) IN (SELECT id, id1 FROM t1 WHERE id1= 8) +0 +EXECUTE IMMEDIATE 'SELECT ROW(1, 7) IN (SELECT id, id1 FROM t1 WHERE id1= 8)'; +ROW(1, 7) IN (SELECT id, id1 FROM t1 WHERE id1= 8) +0 +DROP TABLE t1; +EXECUTE IMMEDIATE 'SELECT ?' USING (1 IN (SELECT * FROM t1)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT * FROM t1))' at line 1 +EXECUTE IMMEDIATE 'SELECT ?' USING (SELECT * FROM t1); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT * FROM t1)' at line 1 +CREATE TABLE t1 (id INT); +INSERT INTO t1 VALUES (10); +CREATE PROCEDURE p1(a INT) BEGIN END; +CALL p1((1) IN (SELECT * FROM t1)); +CALL p1(EXISTS (SELECT * FROM t1)); +DROP PROCEDURE p1; +DROP TABLE t1; +SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO=(1 IN (SELECT * FROM t1)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(1 IN (SELECT * FROM t1))' at line 1 +SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO=EXISTS (SELECT * FROM t1); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'EXISTS (SELECT * FROM t1)' at line 1 +BEGIN NOT ATOMIC +DECLARE CONTINUE HANDLER FOR SQLWARNING +RESIGNAL SET MYSQL_ERRNO=(1 IN (SELECT * FROM t1)); +SIGNAL SQLSTATE '01000'; +END; +$$ +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(1 IN (SELECT * FROM t1)); +SIGNAL SQLSTATE '01000'; +END' at line 3 +BEGIN NOT ATOMIC +DECLARE CONTINUE HANDLER FOR SQLWARNING +RESIGNAL SET MYSQL_ERRNO=EXISTS (SELECT * FROM t1); +SIGNAL SQLSTATE '01000'; +END; +$$ +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'EXISTS (SELECT * FROM t1); +SIGNAL SQLSTATE '01000'; +END' at line 3 +PREPARE stmt FROM (1 IN (SELECT * FROM t1)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT * FROM t1))' at line 1 +PREPARE stmt FROM EXISTS (SELECT * FROM t1); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(SELECT * FROM t1)' at line 1 +EXECUTE IMMEDIATE (1 IN (SELECT * FROM t1)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT * FROM t1))' at line 1 +EXECUTE IMMEDIATE EXISTS (SELECT * FROM t1); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(SELECT * FROM t1)' at line 1 +GET DIAGNOSTICS CONDITION (1 IN (SELECT * FROM t1)) @errno=MYSQL_ERRNO; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(1 IN (SELECT * FROM t1)) @errno=MYSQL_ERRNO' at line 1 +GET DIAGNOSTICS CONDITION EXISTS (SELECT * FROM t1) @errno=MYSQL_ERRNO; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'EXISTS (SELECT * FROM t1) @errno=MYSQL_ERRNO' at line 1 +PURGE BINARY LOGS BEFORE (1 IN (SELECT * FROM t1)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT * FROM t1))' at line 1 +PURGE BINARY LOGS BEFORE EXISTS (SELECT * FROM t1); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(SELECT * FROM t1)' at line 1 +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +DO 1 IN (SELECT * FROM t1); +DO EXISTS (SELECT * FROM t1); +DROP TABLE t1; diff --git a/mysql-test/main/statement-expr.test b/mysql-test/main/statement-expr.test new file mode 100644 index 00000000000..7e968d4fb66 --- /dev/null +++ b/mysql-test/main/statement-expr.test @@ -0,0 +1,80 @@ +# Testing expressions of different kinds in various non-SELECT statements + +--echo # +--echo # Start of 10.3 tests +--echo # + +# +# Subselects in non-SELECT statements +# + +CREATE TABLE t1 (id INT, id1 INT); +INSERT INTO t1 VALUES (1,7); +INSERT INTO t1 VALUES (1,8); +SELECT ROW(1,7) IN (SELECT id, id1 FROM t1 WHERE id1= 8); +EXECUTE IMMEDIATE 'SELECT ROW(1, 7) IN (SELECT id, id1 FROM t1 WHERE id1= 8)'; +DROP TABLE t1; + +--error ER_PARSE_ERROR +EXECUTE IMMEDIATE 'SELECT ?' USING (1 IN (SELECT * FROM t1)); +--error ER_PARSE_ERROR +EXECUTE IMMEDIATE 'SELECT ?' USING (SELECT * FROM t1); + + +CREATE TABLE t1 (id INT); +INSERT INTO t1 VALUES (10); +CREATE PROCEDURE p1(a INT) BEGIN END; +CALL p1((1) IN (SELECT * FROM t1)); +CALL p1(EXISTS (SELECT * FROM t1)); +DROP PROCEDURE p1; +DROP TABLE t1; + +--error ER_PARSE_ERROR +SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO=(1 IN (SELECT * FROM t1)); +--error ER_PARSE_ERROR +SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO=EXISTS (SELECT * FROM t1); + +DELIMITER $$; +--error ER_PARSE_ERROR +BEGIN NOT ATOMIC + DECLARE CONTINUE HANDLER FOR SQLWARNING + RESIGNAL SET MYSQL_ERRNO=(1 IN (SELECT * FROM t1)); + SIGNAL SQLSTATE '01000'; +END; +$$ +--error ER_PARSE_ERROR +BEGIN NOT ATOMIC + DECLARE CONTINUE HANDLER FOR SQLWARNING + RESIGNAL SET MYSQL_ERRNO=EXISTS (SELECT * FROM t1); + SIGNAL SQLSTATE '01000'; +END; +$$ +DELIMITER ;$$ + + +--error ER_PARSE_ERROR +PREPARE stmt FROM (1 IN (SELECT * FROM t1)); +--error ER_PARSE_ERROR +PREPARE stmt FROM EXISTS (SELECT * FROM t1); + +--error ER_PARSE_ERROR +EXECUTE IMMEDIATE (1 IN (SELECT * FROM t1)); +--error ER_PARSE_ERROR +EXECUTE IMMEDIATE EXISTS (SELECT * FROM t1); + +--error ER_PARSE_ERROR +GET DIAGNOSTICS CONDITION (1 IN (SELECT * FROM t1)) @errno=MYSQL_ERRNO; +--error ER_PARSE_ERROR +GET DIAGNOSTICS CONDITION EXISTS (SELECT * FROM t1) @errno=MYSQL_ERRNO; + +--error ER_PARSE_ERROR +PURGE BINARY LOGS BEFORE (1 IN (SELECT * FROM t1)); +--error ER_PARSE_ERROR +PURGE BINARY LOGS BEFORE EXISTS (SELECT * FROM t1); + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +DO 1 IN (SELECT * FROM t1); +DO EXISTS (SELECT * FROM t1); +DROP TABLE t1; + diff --git a/mysql-test/main/statistics_close.result b/mysql-test/main/statistics_close.result new file mode 100644 index 00000000000..d7f76e304e4 --- /dev/null +++ b/mysql-test/main/statistics_close.result @@ -0,0 +1,11 @@ +CREATE TABLE t1 (i int); +connect con1,localhost,root,,test; +RENAME TABLE t1 TO t2; +connection default; +FLUSH TABLES; +connection con1; +disconnect con1; +connection default; +DROP TABLE IF EXISTS t1, t2; +Warnings: +Note 1051 Unknown table 'test.t1' diff --git a/mysql-test/main/statistics_close.test b/mysql-test/main/statistics_close.test new file mode 100644 index 00000000000..de22a5a44fe --- /dev/null +++ b/mysql-test/main/statistics_close.test @@ -0,0 +1,18 @@ +# +# MDEV-16123 ASAN heap-use-after-free handler::ha_index_or_rnd_end +# MDEV-13828 Segmentation fault on RENAME TABLE +# + +CREATE TABLE t1 (i int); +--connect (con1,localhost,root,,test) +--send +RENAME TABLE t1 TO t2; +--connection default +FLUSH TABLES; +--connection con1 +--reap + +# Cleanup +--disconnect con1 +--connection default +DROP TABLE IF EXISTS t1, t2; diff --git a/mysql-test/main/status.result b/mysql-test/main/status.result index 18cde57b295..688d8acee1a 100644 --- a/mysql-test/main/status.result +++ b/mysql-test/main/status.result @@ -426,6 +426,18 @@ Threads_running 1 SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='THREADS_RUNNING'; VARIABLE_VALUE 1 +# +# MDEV-15915 Add Feature_json status variable. +# +SHOW STATUS LIKE 'Feature_json'; +Variable_name Value +Feature_json 0 +select json_valid('123'); +json_valid('123') +1 +SHOW STATUS LIKE 'Feature_json'; +Variable_name Value +Feature_json 1 connection default; set @@global.concurrent_insert= @old_concurrent_insert; SET GLOBAL log_output = @old_log_output; diff --git a/mysql-test/main/status.test b/mysql-test/main/status.test index 92fba9ab0a6..221a24aedf4 100644 --- a/mysql-test/main/status.test +++ b/mysql-test/main/status.test @@ -430,6 +430,13 @@ FLUSH STATUS; SHOW STATUS LIKE 'Threads_running'; SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='THREADS_RUNNING'; +--echo # +--echo # MDEV-15915 Add Feature_json status variable. +--echo # +SHOW STATUS LIKE 'Feature_json'; +select json_valid('123'); +SHOW STATUS LIKE 'Feature_json'; + # Restore global concurrent_insert value. Keep in the end of the test file. --connection default set @@global.concurrent_insert= @old_concurrent_insert; diff --git a/mysql-test/main/subselect-crash_15755.result b/mysql-test/main/subselect-crash_15755.result new file mode 100644 index 00000000000..81b4bd16ab5 --- /dev/null +++ b/mysql-test/main/subselect-crash_15755.result @@ -0,0 +1,317 @@ +set global innodb_stats_persistent= 1; +drop table if exists t1; +Warnings: +Note 1051 Unknown table 'test.t1' +create table t1 ( +f1 bigint(20) default 0, +f2 varchar(50) default '', +f3 int(10) default 0, +f4 bigint(20) default 0, +f5 bigint(20) default 0, +f6 varchar(50) default '', +f7 varchar(64) default '', +f8 varchar(30) default '', +f9 varchar(30) default '', +f10 bigint(20) default 0, +f11 bigint(20) default 0, +f12 bigint(20) default 0, +f13 bigint(20) default 0, +f14 varchar(50) default '', +f15 varchar(100) default '', +f16 varchar(30) default '', +f17 varchar(40) default '', +f18 varchar(30) default '', +f19 varchar(10) default '', +f20 varchar(30) default '', +f21 int(10) default 0, +f22 int(10) default 0, +f23 int(10) default 0, +f24 int(10) default 0, +f25 varchar(20) default '', +f26 varchar(20) default '', +f27 varchar(100) default '', +f28 varchar(55) default '', +f29 varchar(20) default '', +f30 varchar(100) default '', +f31 varchar(30) default '', +f32 varchar(20) default '', +f33 int(10) default 0, +f34 int(10) default 0, +f35 varchar(30) default '', +f36 varchar(30) default '', +f37 varchar(30) default '', +f38 varchar(20) default '', +f39 tinyint(4) default 0, +f40 tinyint(4) default 0, +f41 bigint(20) default 0, +f42 varchar(50) default '', +f43 varchar(50) default '', +f44 varchar(50) default '', +f45 int(10) default 0, +f46 tinyint(1) default 0 +) engine=innodb row_format=dynamic; +insert into t1 () values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +select * from t1 where f2 in (select f2 from t1 group by f2 having count(distinct f3) = 1); +f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f16 f17 f18 f19 f20 f21 f22 f23 f24 f25 f26 f27 f28 f29 f30 f31 f32 f33 f34 f35 f36 f37 f38 f39 f40 f41 f42 f43 f44 f45 f46 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +drop table t1; +set global innodb_stats_persistent= 0; diff --git a/mysql-test/main/subselect-crash_15755.test b/mysql-test/main/subselect-crash_15755.test new file mode 100644 index 00000000000..79e259d6337 --- /dev/null +++ b/mysql-test/main/subselect-crash_15755.test @@ -0,0 +1,60 @@ +--source include/have_innodb.inc +set global innodb_stats_persistent= 1; +drop table if exists t1; +create table t1 ( + f1 bigint(20) default 0, + f2 varchar(50) default '', + f3 int(10) default 0, + f4 bigint(20) default 0, + f5 bigint(20) default 0, + f6 varchar(50) default '', + f7 varchar(64) default '', + f8 varchar(30) default '', + f9 varchar(30) default '', + f10 bigint(20) default 0, + f11 bigint(20) default 0, + f12 bigint(20) default 0, + f13 bigint(20) default 0, + f14 varchar(50) default '', + f15 varchar(100) default '', + f16 varchar(30) default '', + f17 varchar(40) default '', + f18 varchar(30) default '', + f19 varchar(10) default '', + f20 varchar(30) default '', + f21 int(10) default 0, + f22 int(10) default 0, + f23 int(10) default 0, + f24 int(10) default 0, + f25 varchar(20) default '', + f26 varchar(20) default '', + f27 varchar(100) default '', + f28 varchar(55) default '', + f29 varchar(20) default '', + f30 varchar(100) default '', + f31 varchar(30) default '', + f32 varchar(20) default '', + f33 int(10) default 0, + f34 int(10) default 0, + f35 varchar(30) default '', + f36 varchar(30) default '', + f37 varchar(30) default '', + f38 varchar(20) default '', + f39 tinyint(4) default 0, + f40 tinyint(4) default 0, + f41 bigint(20) default 0, + f42 varchar(50) default '', + f43 varchar(50) default '', + f44 varchar(50) default '', + f45 int(10) default 0, + f46 tinyint(1) default 0 +) engine=innodb row_format=dynamic; + +insert into t1 () values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +insert into t1 select * from t1; +select * from t1 where f2 in (select f2 from t1 group by f2 having count(distinct f3) = 1); +drop table t1; +set global innodb_stats_persistent= 0; diff --git a/mysql-test/main/subselect4.result b/mysql-test/main/subselect4.result index 25290650e9b..bd9ecdc642b 100644 --- a/mysql-test/main/subselect4.result +++ b/mysql-test/main/subselect4.result @@ -2503,6 +2503,26 @@ SELECT 2 IN (SELECT 2 from DUAL WHERE 1 != 1); SET optimizer_switch= @@global.optimizer_switch; set @@tmp_table_size= @@global.tmp_table_size; # +# MDEV-14515: Wrong results for tableless query with subquery in WHERE +# and implicit aggregation +# +create table t1 (i1 int, i2 int); +insert into t1 values (1314, 1084),(1330, 1084),(1401, 1084),(580, 1084); +create table t2 (cd int); +insert into t2 values +(1330), (1330), (1330), (1330), (1330), (1330), (1330), (1330), +(1330), (1330), (1330), (1330), (1330), (1330), (1330), (1330); +select max(10) from dual +where exists (select 1 from t2 join t1 on t1.i1 = t2.cd and t1.i2 = 345); +max(10) +NULL +insert into t2 select * from t2; +select max(10) from dual +where exists (select 1 from t2 join t1 on t1.i1 = t2.cd and t1.i2 = 345); +max(10) +NULL +DROP TABLE t1,t2; +# # MDEV-10232 Scalar result of subquery changes after adding an outer select stmt # create table t1(c1 int, c2 int, primary key(c2)); diff --git a/mysql-test/main/subselect4.test b/mysql-test/main/subselect4.test index 77ea117b15f..d5a40419185 100644 --- a/mysql-test/main/subselect4.test +++ b/mysql-test/main/subselect4.test @@ -2043,6 +2043,29 @@ SELECT 2 IN (SELECT 2 from DUAL WHERE 1 != 1); SET optimizer_switch= @@global.optimizer_switch; set @@tmp_table_size= @@global.tmp_table_size; +--echo # +--echo # MDEV-14515: Wrong results for tableless query with subquery in WHERE +--echo # and implicit aggregation +--echo # + +create table t1 (i1 int, i2 int); +insert into t1 values (1314, 1084),(1330, 1084),(1401, 1084),(580, 1084); + +create table t2 (cd int); +insert into t2 values + (1330), (1330), (1330), (1330), (1330), (1330), (1330), (1330), + (1330), (1330), (1330), (1330), (1330), (1330), (1330), (1330); + +select max(10) from dual + where exists (select 1 from t2 join t1 on t1.i1 = t2.cd and t1.i2 = 345); + +insert into t2 select * from t2; + +select max(10) from dual + where exists (select 1 from t2 join t1 on t1.i1 = t2.cd and t1.i2 = 345); + +DROP TABLE t1,t2; + --echo # --echo # MDEV-10232 Scalar result of subquery changes after adding an outer select stmt --echo # diff --git a/mysql-test/main/subselect_extra.result b/mysql-test/main/subselect_extra.result index 73642c09324..a3a0f1f9a15 100644 --- a/mysql-test/main/subselect_extra.result +++ b/mysql-test/main/subselect_extra.result @@ -434,7 +434,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t2 system NULL NULL NULL NULL 1 1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where 1 PRIMARY ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t3); Using join buffer (flat, BNL join) -3 DERIVED t1 ALL NULL NULL NULL NULL 3 +3 DERIVED t1 ALL NULL NULL NULL NULL 3 Using where SELECT * FROM t3 WHERE t3.b IN (SELECT v1.b FROM v1, t2 WHERE t2.c = v1.c AND t2.c = v1.b AND v1.b = t3.c); @@ -449,7 +449,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t2 system NULL NULL NULL NULL 1 1 PRIMARY ref key1 key1 8 const,const 0 Start temporary 1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where; End temporary; Using join buffer (flat, BNL join) -3 DERIVED t1 ALL NULL NULL NULL NULL 3 +3 DERIVED t1 ALL NULL NULL NULL NULL 3 Using where SELECT * FROM t3 WHERE t3.b IN (SELECT v1.b FROM v1, t2 WHERE t2.c = v1.c AND t2.c = v1.b AND v1.b = t3.c); diff --git a/mysql-test/main/subselect_sj.result b/mysql-test/main/subselect_sj.result index 9631192da33..b50ae942899 100644 --- a/mysql-test/main/subselect_sj.result +++ b/mysql-test/main/subselect_sj.result @@ -3165,4 +3165,30 @@ id select_type table type possible_keys key key_len ref rows filtered Extra Warnings: Note 1003 /* select#1 */ select `test`.`t1`.`c1` AS `c1`,`test`.`t2`.`c2` AS `c2`,`test`.`t4`.`c4` AS `c4` from `test`.`t1` left join (`test`.`t2` join `test`.`t4`) on(`test`.`t2`.`c2` = `test`.`t1`.`c1` and (`test`.`t1`.`c1`,(/* select#3 */ select `test`.`t3`.`c3` from `test`.`t3` where (`test`.`t2`.`c2`) = `test`.`t3`.`c3`))) where 1 DROP TABLE t1,t2,t3,t4; +# +# MDEV-13699: Assertion `!new_field->field_name.str || +# strlen(new_field->field_name.str) == new_field->field_name.length' +# failed in create_tmp_table on 2nd execution of PS with semijoin +# +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (b INT); +INSERT INTO t2 VALUES (3),(4); +CREATE TABLE t3 (c INT); +CREATE ALGORITHM=MERGE VIEW v3 AS SELECT * FROM t3; +INSERT INTO t3 VALUES (5),(6); +PREPARE stmt FROM +"SELECT * FROM t1 + WHERE EXISTS ( + SELECT * FROM t2 WHERE t1.a IN ( SELECT c AS fld FROM v3 ) + )"; +EXECUTE stmt; +a +EXECUTE stmt; +a +EXECUTE stmt; +a +drop view v3; +drop table t1,t2,t3; +# End of 5.5 test set optimizer_switch=@subselect_sj_tmp; diff --git a/mysql-test/main/subselect_sj.test b/mysql-test/main/subselect_sj.test index 6fdccee339d..04770761553 100644 --- a/mysql-test/main/subselect_sj.test +++ b/mysql-test/main/subselect_sj.test @@ -2846,5 +2846,35 @@ eval EXPLAIN EXTENDED $q2; DROP TABLE t1,t2,t3,t4; +--echo # +--echo # MDEV-13699: Assertion `!new_field->field_name.str || +--echo # strlen(new_field->field_name.str) == new_field->field_name.length' +--echo # failed in create_tmp_table on 2nd execution of PS with semijoin +--echo # + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2); + +CREATE TABLE t2 (b INT); +INSERT INTO t2 VALUES (3),(4); + +CREATE TABLE t3 (c INT); +CREATE ALGORITHM=MERGE VIEW v3 AS SELECT * FROM t3; +INSERT INTO t3 VALUES (5),(6); + +PREPARE stmt FROM + "SELECT * FROM t1 + WHERE EXISTS ( + SELECT * FROM t2 WHERE t1.a IN ( SELECT c AS fld FROM v3 ) + )"; +EXECUTE stmt; +EXECUTE stmt; +EXECUTE stmt; + +drop view v3; +drop table t1,t2,t3; + +--echo # End of 5.5 test + # The following command must be the last one the file set optimizer_switch=@subselect_sj_tmp; diff --git a/mysql-test/main/subselect_sj_jcl6.result b/mysql-test/main/subselect_sj_jcl6.result index 77a073ea2d3..d7b77be7b54 100644 --- a/mysql-test/main/subselect_sj_jcl6.result +++ b/mysql-test/main/subselect_sj_jcl6.result @@ -3179,6 +3179,32 @@ id select_type table type possible_keys key key_len ref rows filtered Extra Warnings: Note 1003 /* select#1 */ select `test`.`t1`.`c1` AS `c1`,`test`.`t2`.`c2` AS `c2`,`test`.`t4`.`c4` AS `c4` from `test`.`t1` left join (`test`.`t2` join `test`.`t4`) on(`test`.`t2`.`c2` = `test`.`t1`.`c1` and (`test`.`t1`.`c1`,(/* select#3 */ select `test`.`t3`.`c3` from `test`.`t3` where (`test`.`t2`.`c2`) = `test`.`t3`.`c3`))) where 1 DROP TABLE t1,t2,t3,t4; +# +# MDEV-13699: Assertion `!new_field->field_name.str || +# strlen(new_field->field_name.str) == new_field->field_name.length' +# failed in create_tmp_table on 2nd execution of PS with semijoin +# +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (b INT); +INSERT INTO t2 VALUES (3),(4); +CREATE TABLE t3 (c INT); +CREATE ALGORITHM=MERGE VIEW v3 AS SELECT * FROM t3; +INSERT INTO t3 VALUES (5),(6); +PREPARE stmt FROM +"SELECT * FROM t1 + WHERE EXISTS ( + SELECT * FROM t2 WHERE t1.a IN ( SELECT c AS fld FROM v3 ) + )"; +EXECUTE stmt; +a +EXECUTE stmt; +a +EXECUTE stmt; +a +drop view v3; +drop table t1,t2,t3; +# End of 5.5 test set optimizer_switch=@subselect_sj_tmp; # # BUG#49129: Wrong result with IN-subquery with join_cache_level=6 and firstmatch=off diff --git a/mysql-test/main/table_value_constr.result b/mysql-test/main/table_value_constr.result index 39caba331ef..9e0a0968932 100644 --- a/mysql-test/main/table_value_constr.result +++ b/mysql-test/main/table_value_constr.result @@ -2071,3 +2071,29 @@ ERROR HY000: Field reference 'b' can't be used in table value constructor select * from (values (1), (t1.b), (2)) as new_tvc; ERROR HY000: Field reference 't1.b' can't be used in table value constructor drop table t1; +# +# MDEV-15940: cursor over TVC +# +BEGIN NOT ATOMIC +DECLARE v INT; +DECLARE cur CURSOR FOR VALUES(7); +OPEN cur; +FETCH cur INTO v; +SELECT v; +END; +| +v +7 +BEGIN NOT ATOMIC +DECLARE v INT DEFAULT 0; +FOR a IN (VALUES (7)) DO SET v = v + 1; END FOR; +SELECT v; +END; +| +v +1 +# +# MDEV-16038: empty row in TVC +# +with t as (values (),()) select 1 from t; +ERROR HY000: Row with no elements is not allowed in table value constructor in this context diff --git a/mysql-test/main/table_value_constr.test b/mysql-test/main/table_value_constr.test index 578f8943fbb..eb5ea59f829 100644 --- a/mysql-test/main/table_value_constr.test +++ b/mysql-test/main/table_value_constr.test @@ -1044,3 +1044,34 @@ select * from (values (1), (b), (2)) as new_tvc; select * from (values (1), (t1.b), (2)) as new_tvc; drop table t1; + +--echo # +--echo # MDEV-15940: cursor over TVC +--echo # + +DELIMITER |; + +BEGIN NOT ATOMIC + DECLARE v INT; + DECLARE cur CURSOR FOR VALUES(7); + OPEN cur; + FETCH cur INTO v; + SELECT v; +END; +| + +BEGIN NOT ATOMIC +DECLARE v INT DEFAULT 0; +FOR a IN (VALUES (7)) DO SET v = v + 1; END FOR; +SELECT v; +END; +| + +DELIMITER ;| + +--echo # +--echo # MDEV-16038: empty row in TVC +--echo # + +--error ER_EMPTY_ROW_IN_TVC +with t as (values (),()) select 1 from t; diff --git a/mysql-test/main/temporal_literal.result b/mysql-test/main/temporal_literal.result index f4f43fe9ba3..d2417d7f9eb 100644 --- a/mysql-test/main/temporal_literal.result +++ b/mysql-test/main/temporal_literal.result @@ -517,7 +517,6 @@ Warning 1292 Incorrect datetime value: '2001-00-00' Warning 1292 Incorrect datetime value: '2001-00-00' Warning 1292 Incorrect datetime value: '2001-00-00' Warning 1292 Incorrect datetime value: '2001-00-00' -Warning 1292 Incorrect datetime value: '2001-00-00' SET sql_mode=DEFAULT; DROP TABLE t1; # diff --git a/mysql-test/main/trigger.result b/mysql-test/main/trigger.result index 8473b3bb90d..537f86e9f40 100644 --- a/mysql-test/main/trigger.result +++ b/mysql-test/main/trigger.result @@ -2345,7 +2345,18 @@ CREATE TABLE t1 (i INT); insert into t2 value (2); DROP VIEW v1; DROP TABLE t1,t2,t3; -End of 10.1 tests. +# +# MDEV-16093 +# Assertion `global_status_var.global_memory_used == 0' failed or +# bytes lost after inserting into table with non-null blob and trigger +# +CREATE TABLE t1 (b BLOB NOT NULL); +CREATE TRIGGER tr BEFORE UPDATE ON t1 FOR EACH ROW BEGIN END; +INSERT INTO t1 VALUES ('foo'); +DROP TABLE t1; +# +# End of 10.1 tests. +# create table t1 (i int); create trigger tr1 after insert on t1 for each row set @a=@a+1; create trigger tr2 after insert on t1 for each row set @a=@a+1; diff --git a/mysql-test/main/trigger.test b/mysql-test/main/trigger.test index 1557ef200e5..020117e046f 100644 --- a/mysql-test/main/trigger.test +++ b/mysql-test/main/trigger.test @@ -2665,8 +2665,20 @@ insert into t2 value (2); DROP VIEW v1; DROP TABLE t1,t2,t3; +--echo # +--echo # MDEV-16093 +--echo # Assertion `global_status_var.global_memory_used == 0' failed or +--echo # bytes lost after inserting into table with non-null blob and trigger +--echo # ---echo End of 10.1 tests. +CREATE TABLE t1 (b BLOB NOT NULL); +CREATE TRIGGER tr BEFORE UPDATE ON t1 FOR EACH ROW BEGIN END; +INSERT INTO t1 VALUES ('foo'); +DROP TABLE t1; + +--echo # +--echo # End of 10.1 tests. +--echo # # # MDEV-10915 Count number of executed triggers diff --git a/mysql-test/main/type_int.result b/mysql-test/main/type_int.result index 39e2e91ecc7..47c859d3ffb 100644 --- a/mysql-test/main/type_int.result +++ b/mysql-test/main/type_int.result @@ -93,3 +93,135 @@ DROP TABLE t1; # # End of 10.2 tests # +# +# Start of 10.3 tests +# +# +# MDEV-15926 MEDIUMINT returns wrong I_S attributes +# +CREATE TABLE t1 (a MEDIUMINT, b MEDIUMINT UNSIGNED); +SELECT COLUMN_NAME, NUMERIC_PRECISION FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1' ORDER BY COLUMN_NAME; +COLUMN_NAME NUMERIC_PRECISION +a 7 +b 8 +DROP TABLE t1; +# +# MDEV-15946 MEDIUMINT(N<8) creates a wrong data type on conversion to string +# +CREATE TABLE t1 ( +uint8 TINYINT(2) UNSIGNED, sint8 TINYINT(2), +uint16 SMALLINT(2) UNSIGNED, sint16 SMALLINT(2), +uint24 MEDIUMINT(2) UNSIGNED, sint24 MEDIUMINT(2), +uint32 INT(2) UNSIGNED, sint32 INT(2), +uint64 BIGINT(2) UNSIGNED, sint64 BIGINT(2) +); +CREATE TABLE t2 AS SELECT +CONCAT(uint8),CONCAT(sint8), +CONCAT(uint16),CONCAT(sint16), +CONCAT(uint24),CONCAT(sint24), +CONCAT(uint32),CONCAT(sint32), +CONCAT(uint64),CONCAT(sint64) +FROM t1; +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `CONCAT(uint8)` varchar(3) DEFAULT NULL, + `CONCAT(sint8)` varchar(4) DEFAULT NULL, + `CONCAT(uint16)` varchar(5) DEFAULT NULL, + `CONCAT(sint16)` varchar(6) DEFAULT NULL, + `CONCAT(uint24)` varchar(8) DEFAULT NULL, + `CONCAT(sint24)` varchar(8) DEFAULT NULL, + `CONCAT(uint32)` varchar(10) DEFAULT NULL, + `CONCAT(sint32)` varchar(11) DEFAULT NULL, + `CONCAT(uint64)` varchar(20) DEFAULT NULL, + `CONCAT(sint64)` varchar(20) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t2; +CREATE TABLE t2 AS SELECT +CONCAT(COALESCE(uint8)),CONCAT(COALESCE(sint8)), +CONCAT(COALESCE(uint16)),CONCAT(COALESCE(sint16)), +CONCAT(COALESCE(uint24)),CONCAT(COALESCE(sint24)), +CONCAT(COALESCE(uint32)),CONCAT(COALESCE(sint32)), +CONCAT(COALESCE(uint64)),CONCAT(COALESCE(sint64)) +FROM t1; +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `CONCAT(COALESCE(uint8))` varchar(3) DEFAULT NULL, + `CONCAT(COALESCE(sint8))` varchar(4) DEFAULT NULL, + `CONCAT(COALESCE(uint16))` varchar(5) DEFAULT NULL, + `CONCAT(COALESCE(sint16))` varchar(6) DEFAULT NULL, + `CONCAT(COALESCE(uint24))` varchar(8) DEFAULT NULL, + `CONCAT(COALESCE(sint24))` varchar(8) DEFAULT NULL, + `CONCAT(COALESCE(uint32))` varchar(10) DEFAULT NULL, + `CONCAT(COALESCE(sint32))` varchar(11) DEFAULT NULL, + `CONCAT(COALESCE(uint64))` varchar(20) DEFAULT NULL, + `CONCAT(COALESCE(sint64))` varchar(20) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t2; +DROP TABLE t1; +CREATE FUNCTION uint8() RETURNS TINYINT(2) UNSIGNED RETURN 1; +CREATE FUNCTION sint8() RETURNS TINYINT(2) RETURN 1; +CREATE FUNCTION uint16() RETURNS SMALLINT(2) UNSIGNED RETURN 1; +CREATE FUNCTION sint16() RETURNS SMALLINT(2) RETURN 1; +CREATE FUNCTION uint24() RETURNS MEDIUMINT(2) UNSIGNED RETURN 1; +CREATE FUNCTION sint24() RETURNS MEDIUMINT(2) RETURN 1; +CREATE FUNCTION uint32() RETURNS INT(2) UNSIGNED RETURN 1; +CREATE FUNCTION sint32() RETURNS INT(2) RETURN 1; +CREATE FUNCTION uint64() RETURNS BIGINT(2) UNSIGNED RETURN 1; +CREATE FUNCTION sint64() RETURNS BIGINT(2) RETURN 1; +CREATE TABLE t1 AS SELECT +CONCAT(uint8()), CONCAT(sint8()), +CONCAT(uint16()),CONCAT(sint16()), +CONCAT(uint24()),CONCAT(sint24()), +CONCAT(uint32()),CONCAT(sint32()), +CONCAT(uint64()),CONCAT(sint64()); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `CONCAT(uint8())` varchar(3) DEFAULT NULL, + `CONCAT(sint8())` varchar(4) DEFAULT NULL, + `CONCAT(uint16())` varchar(5) DEFAULT NULL, + `CONCAT(sint16())` varchar(6) DEFAULT NULL, + `CONCAT(uint24())` varchar(8) DEFAULT NULL, + `CONCAT(sint24())` varchar(8) DEFAULT NULL, + `CONCAT(uint32())` varchar(10) DEFAULT NULL, + `CONCAT(sint32())` varchar(11) DEFAULT NULL, + `CONCAT(uint64())` varchar(20) DEFAULT NULL, + `CONCAT(sint64())` varchar(20) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 AS SELECT +CONCAT(COALESCE(uint8())),CONCAT(COALESCE(sint8())), +CONCAT(COALESCE(uint16())),CONCAT(COALESCE(sint16())), +CONCAT(COALESCE(uint24())),CONCAT(COALESCE(sint24())), +CONCAT(COALESCE(uint32())),CONCAT(COALESCE(sint32())), +CONCAT(COALESCE(uint64())),CONCAT(COALESCE(sint64())); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `CONCAT(COALESCE(uint8()))` varchar(3) DEFAULT NULL, + `CONCAT(COALESCE(sint8()))` varchar(4) DEFAULT NULL, + `CONCAT(COALESCE(uint16()))` varchar(5) DEFAULT NULL, + `CONCAT(COALESCE(sint16()))` varchar(6) DEFAULT NULL, + `CONCAT(COALESCE(uint24()))` varchar(8) DEFAULT NULL, + `CONCAT(COALESCE(sint24()))` varchar(8) DEFAULT NULL, + `CONCAT(COALESCE(uint32()))` varchar(10) DEFAULT NULL, + `CONCAT(COALESCE(sint32()))` varchar(11) DEFAULT NULL, + `CONCAT(COALESCE(uint64()))` varchar(20) DEFAULT NULL, + `CONCAT(COALESCE(sint64()))` varchar(20) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +DROP FUNCTION uint8; +DROP FUNCTION sint8; +DROP FUNCTION uint16; +DROP FUNCTION sint16; +DROP FUNCTION uint24; +DROP FUNCTION sint24; +DROP FUNCTION uint32; +DROP FUNCTION sint32; +DROP FUNCTION uint64; +DROP FUNCTION sint64; +# +# End of 10.3 tests +# diff --git a/mysql-test/main/type_int.test b/mysql-test/main/type_int.test index 271b4d5862a..bcab2b20dc4 100644 --- a/mysql-test/main/type_int.test +++ b/mysql-test/main/type_int.test @@ -76,3 +76,94 @@ DROP TABLE t1; --echo # --echo # End of 10.2 tests --echo # + +--echo # +--echo # Start of 10.3 tests +--echo # + +--echo # +--echo # MDEV-15926 MEDIUMINT returns wrong I_S attributes +--echo # + +CREATE TABLE t1 (a MEDIUMINT, b MEDIUMINT UNSIGNED); +SELECT COLUMN_NAME, NUMERIC_PRECISION FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1' ORDER BY COLUMN_NAME; +DROP TABLE t1; + + +--echo # +--echo # MDEV-15946 MEDIUMINT(N<8) creates a wrong data type on conversion to string +--echo # + +CREATE TABLE t1 ( + uint8 TINYINT(2) UNSIGNED, sint8 TINYINT(2), + uint16 SMALLINT(2) UNSIGNED, sint16 SMALLINT(2), + uint24 MEDIUMINT(2) UNSIGNED, sint24 MEDIUMINT(2), + uint32 INT(2) UNSIGNED, sint32 INT(2), + uint64 BIGINT(2) UNSIGNED, sint64 BIGINT(2) +); + +CREATE TABLE t2 AS SELECT + CONCAT(uint8),CONCAT(sint8), + CONCAT(uint16),CONCAT(sint16), + CONCAT(uint24),CONCAT(sint24), + CONCAT(uint32),CONCAT(sint32), + CONCAT(uint64),CONCAT(sint64) +FROM t1; +SHOW CREATE TABLE t2; +DROP TABLE t2; + +CREATE TABLE t2 AS SELECT + CONCAT(COALESCE(uint8)),CONCAT(COALESCE(sint8)), + CONCAT(COALESCE(uint16)),CONCAT(COALESCE(sint16)), + CONCAT(COALESCE(uint24)),CONCAT(COALESCE(sint24)), + CONCAT(COALESCE(uint32)),CONCAT(COALESCE(sint32)), + CONCAT(COALESCE(uint64)),CONCAT(COALESCE(sint64)) +FROM t1; +SHOW CREATE TABLE t2; +DROP TABLE t2; + +DROP TABLE t1; + +CREATE FUNCTION uint8() RETURNS TINYINT(2) UNSIGNED RETURN 1; +CREATE FUNCTION sint8() RETURNS TINYINT(2) RETURN 1; +CREATE FUNCTION uint16() RETURNS SMALLINT(2) UNSIGNED RETURN 1; +CREATE FUNCTION sint16() RETURNS SMALLINT(2) RETURN 1; +CREATE FUNCTION uint24() RETURNS MEDIUMINT(2) UNSIGNED RETURN 1; +CREATE FUNCTION sint24() RETURNS MEDIUMINT(2) RETURN 1; +CREATE FUNCTION uint32() RETURNS INT(2) UNSIGNED RETURN 1; +CREATE FUNCTION sint32() RETURNS INT(2) RETURN 1; +CREATE FUNCTION uint64() RETURNS BIGINT(2) UNSIGNED RETURN 1; +CREATE FUNCTION sint64() RETURNS BIGINT(2) RETURN 1; + +CREATE TABLE t1 AS SELECT + CONCAT(uint8()), CONCAT(sint8()), + CONCAT(uint16()),CONCAT(sint16()), + CONCAT(uint24()),CONCAT(sint24()), + CONCAT(uint32()),CONCAT(sint32()), + CONCAT(uint64()),CONCAT(sint64()); +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 AS SELECT + CONCAT(COALESCE(uint8())),CONCAT(COALESCE(sint8())), + CONCAT(COALESCE(uint16())),CONCAT(COALESCE(sint16())), + CONCAT(COALESCE(uint24())),CONCAT(COALESCE(sint24())), + CONCAT(COALESCE(uint32())),CONCAT(COALESCE(sint32())), + CONCAT(COALESCE(uint64())),CONCAT(COALESCE(sint64())); +SHOW CREATE TABLE t1; +DROP TABLE t1; + +DROP FUNCTION uint8; +DROP FUNCTION sint8; +DROP FUNCTION uint16; +DROP FUNCTION sint16; +DROP FUNCTION uint24; +DROP FUNCTION sint24; +DROP FUNCTION uint32; +DROP FUNCTION sint32; +DROP FUNCTION uint64; +DROP FUNCTION sint64; + +--echo # +--echo # End of 10.3 tests +--echo # diff --git a/mysql-test/main/union.result b/mysql-test/main/union.result index 4e5f9312e03..39cd68ae851 100644 --- a/mysql-test/main/union.result +++ b/mysql-test/main/union.result @@ -2500,5 +2500,19 @@ t2 CREATE TABLE `t2` ( DROP TABLE t2; DROP TABLE t1; # +# MDEV-13232: Assertion `(&(&share->intern_lock)->m_mutex)->count > 0 && +# pthread_equal(pthread_self(), (&(&share->intern_lock)->m_mutex)-> +# thread)' failed in _ma_state_info_write +# +CREATE TABLE t1 (c1 CHAR(8)); +INSERT INTO t1 VALUES ('10'),('-10'); +CREATE TABLE t2 (c2 CHAR); +SET @a= CAST('10' AS CHAR); +SELECT c1 FROM t1 UNION SELECT - @a FROM t2; +c1 +10 +-10 +drop table t1,t2; +# # End of 10.3 tests # diff --git a/mysql-test/main/union.test b/mysql-test/main/union.test index f86cae87524..b9b38271f83 100644 --- a/mysql-test/main/union.test +++ b/mysql-test/main/union.test @@ -1745,6 +1745,21 @@ SHOW CREATE TABLE t2; DROP TABLE t2; DROP TABLE t1; +--echo # +--echo # MDEV-13232: Assertion `(&(&share->intern_lock)->m_mutex)->count > 0 && +--echo # pthread_equal(pthread_self(), (&(&share->intern_lock)->m_mutex)-> +--echo # thread)' failed in _ma_state_info_write +--echo # + +CREATE TABLE t1 (c1 CHAR(8)); +INSERT INTO t1 VALUES ('10'),('-10'); + +CREATE TABLE t2 (c2 CHAR); +SET @a= CAST('10' AS CHAR); + +SELECT c1 FROM t1 UNION SELECT - @a FROM t2; + +drop table t1,t2; --echo # --echo # End of 10.3 tests diff --git a/mysql-test/main/win.result b/mysql-test/main/win.result index dd4d09bb1eb..3d56cd8e435 100644 --- a/mysql-test/main/win.result +++ b/mysql-test/main/win.result @@ -3300,5 +3300,20 @@ SELECT ROW_NUMBER() OVER(), i FROM t1 WHERE 0; ROW_NUMBER() OVER() i DROP TABLE t1; # +# MDEV-15853: Assertion `tab->filesort_result == 0' failed +# +CREATE TABLE t1 ( a1 int); +insert into t1 values (1),(2),(3); +CREATE TABLE t2 (b1 int, a1 int, a2 int); +insert into t2 values (1,2,3),(2,3,4),(3,4,5); +SELECT COUNT(DISTINCT t2.a2), +rank() OVER (ORDER BY t2.b1) +FROM t2 ,t1 GROUP BY t2.b1 ORDER BY t1.a1; +COUNT(DISTINCT t2.a2) rank() OVER (ORDER BY t2.b1) +1 1 +1 2 +1 3 +DROP TABLE t1,t2; +# # Start of 10.3 tests # diff --git a/mysql-test/main/win.test b/mysql-test/main/win.test index c46aaecfbbf..d483cdbaa83 100644 --- a/mysql-test/main/win.test +++ b/mysql-test/main/win.test @@ -2067,6 +2067,22 @@ SELECT DISTINCT ROW_NUMBER() OVER(), i FROM t1 WHERE 0; SELECT ROW_NUMBER() OVER(), i FROM t1 WHERE 0; DROP TABLE t1; +--echo # +--echo # MDEV-15853: Assertion `tab->filesort_result == 0' failed +--echo # + +CREATE TABLE t1 ( a1 int); +insert into t1 values (1),(2),(3); + +CREATE TABLE t2 (b1 int, a1 int, a2 int); +insert into t2 values (1,2,3),(2,3,4),(3,4,5); + +--sorted_result +SELECT COUNT(DISTINCT t2.a2), + rank() OVER (ORDER BY t2.b1) +FROM t2 ,t1 GROUP BY t2.b1 ORDER BY t1.a1; +DROP TABLE t1,t2; + --echo # --echo # Start of 10.3 tests --echo # diff --git a/mysql-test/main/win_percentile.result b/mysql-test/main/win_percentile.result index c51e2e6bd51..f46fad80d16 100644 --- a/mysql-test/main/win_percentile.result +++ b/mysql-test/main/win_percentile.result @@ -324,3 +324,27 @@ median(score) over (partition by name) c 4.0000000000 4.0000000000 4.0000000000 4.0000000000 drop table t1; +# +# MDEV-13352: MEDIAN window function over a table with virtual column +# in select with CTE and ORDER BY +# +CREATE TABLE t1 (f1 int ,f2 int ,f3 int, f4 int, v1 int AS (-f4) virtual); +INSERT INTO t1(f1,f2,f3,f4) VALUES +(1,10,100,10), (7,11,112,15), (3,14,121,12); +WITH CTE AS (SELECT MIN(f3) OVER () FROM t1) +SELECT MEDIAN(f3) OVER () FROM t1 +ORDER BY f1, f2, f3, f4, v1; +MEDIAN(f3) OVER () +112.0000000000 +112.0000000000 +112.0000000000 +DROP TABLE t1; +# +# MDEV-15846: Sever crashed with MEDIAN() window function +# +CREATE TABLE t1 ( pk int PRIMARY KEY, a1 int, a2 int); +SELECT MEDIAN(`a1`) OVER (), +MEDIAN(`a2`) OVER (PARTITION BY `pk`) +FROM t1; +MEDIAN(`a1`) OVER () MEDIAN(`a2`) OVER (PARTITION BY `pk`) +DROP TABLE t1; diff --git a/mysql-test/main/win_percentile.test b/mysql-test/main/win_percentile.test index 468d8cff56b..e39af8cfd39 100644 --- a/mysql-test/main/win_percentile.test +++ b/mysql-test/main/win_percentile.test @@ -102,3 +102,28 @@ select median(score) over (partition by name), percentile_cont(0.8) within grou select median(score) over (partition by name), percentile_cont(0.9) within group(order by score) over (partition by name) as c from t1; select median(score) over (partition by name), percentile_cont(1) within group(order by score) over (partition by name) as c from t1; drop table t1; + +--echo # +--echo # MDEV-13352: MEDIAN window function over a table with virtual column +--echo # in select with CTE and ORDER BY +--echo # + +CREATE TABLE t1 (f1 int ,f2 int ,f3 int, f4 int, v1 int AS (-f4) virtual); +INSERT INTO t1(f1,f2,f3,f4) VALUES + (1,10,100,10), (7,11,112,15), (3,14,121,12); + +WITH CTE AS (SELECT MIN(f3) OVER () FROM t1) +SELECT MEDIAN(f3) OVER () FROM t1 +ORDER BY f1, f2, f3, f4, v1; +DROP TABLE t1; + +--echo # +--echo # MDEV-15846: Sever crashed with MEDIAN() window function +--echo # + +CREATE TABLE t1 ( pk int PRIMARY KEY, a1 int, a2 int); + +SELECT MEDIAN(`a1`) OVER (), + MEDIAN(`a2`) OVER (PARTITION BY `pk`) +FROM t1; +DROP TABLE t1; diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 1a00a7e694d..ddb79b925b6 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -2,7 +2,7 @@ # -*- cperl -*- # Copyright (c) 2004, 2014, Oracle and/or its affiliates. -# Copyright (c) 2009, 2017, MariaDB Corporation +# Copyright (c) 2009, 2018, MariaDB Corporation # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -653,50 +653,59 @@ sub run_test_server ($$$) { my $worker_savename= basename($worker_savedir); my $savedir= "$opt_vardir/log/$worker_savename"; + # Move any core files from e.g. mysqltest + foreach my $coref (glob("core*"), glob("*.dmp")) + { + mtr_report(" - found '$coref', moving it to '$worker_savedir'"); + move($coref, $worker_savedir); + } + + find( + { + no_chdir => 1, + wanted => sub + { + my $core_file= $File::Find::name; + my $core_name= basename($core_file); + + # Name beginning with core, not ending in .gz + if (($core_name =~ /^core/ and $core_name !~ /\.gz$/) + or (IS_WINDOWS and $core_name =~ /\.dmp$/)) + { + # Ending with .dmp + mtr_report(" - found '$core_name'", + "($num_saved_cores/$opt_max_save_core)"); + + My::CoreDump->show($core_file, $exe_mysqld, $opt_parallel); + + # Limit number of core files saved + if ($opt_max_save_core > 0 && + $num_saved_cores >= $opt_max_save_core) + { + mtr_report(" - deleting it, already saved", + "$opt_max_save_core"); + unlink("$core_file"); + } + else + { + mtr_compress_file($core_file) unless @opt_cases; + ++$num_saved_cores; + } + } + } + }, + $worker_savedir); + if ($opt_max_save_datadir > 0 && $num_saved_datadir >= $opt_max_save_datadir) { mtr_report(" - skipping '$worker_savedir/'"); rmtree($worker_savedir); } - else { + else + { mtr_report(" - saving '$worker_savedir/' to '$savedir/'"); rename($worker_savedir, $savedir); - # Move any core files from e.g. mysqltest - foreach my $coref (glob("core*"), glob("*.dmp")) - { - mtr_report(" - found '$coref', moving it to '$savedir'"); - move($coref, $savedir); - } - if ($opt_max_save_core > 0) { - # Limit number of core files saved - find({ no_chdir => 1, - wanted => sub { - my $core_file= $File::Find::name; - my $core_name= basename($core_file); - - # Name beginning with core, not ending in .gz - if (($core_name =~ /^core/ and $core_name !~ /\.gz$/) - or (IS_WINDOWS and $core_name =~ /\.dmp$/)){ - # Ending with .dmp - mtr_report(" - found '$core_name'", - "($num_saved_cores/$opt_max_save_core)"); - - My::CoreDump->show($core_file, $exe_mysqld, $opt_parallel); - - if ($num_saved_cores >= $opt_max_save_core) { - mtr_report(" - deleting it, already saved", - "$opt_max_save_core"); - unlink("$core_file"); - } else { - mtr_compress_file($core_file) unless @opt_cases; - } - ++$num_saved_cores; - } - } - }, - $savedir); - } } resfile_print_test(); $num_saved_datadir++; @@ -1133,7 +1142,7 @@ sub command_line_setup { 'debug' => \$opt_debug, 'debug-common' => \$opt_debug_common, 'debug-server' => \$opt_debug_server, - 'gdb:s' => sub { $opt_gdb = $_[1] || '#' }, + 'gdb=s' => \$opt_gdb, 'client-gdb' => \$opt_client_gdb, 'manual-gdb' => \$opt_manual_gdb, 'manual-lldb' => \$opt_manual_lldb, @@ -1228,6 +1237,9 @@ sub command_line_setup { 'skip-test-list=s' => \@opt_skip_test_list ); + # fix options (that take an optional argument and *only* after = sign + my %fixopt = ( '--gdb' => '--gdb=#' ); + @ARGV = map { $fixopt{$_} or $_ } @ARGV; GetOptions(%options) or usage("Can't read options"); usage("") if $opt_usage; list_options(\%options) if $opt_list_options; @@ -2573,7 +2585,7 @@ sub setup_vardir() { { for (<$bindir/storage/*$opt_vs_config/*.dll>, <$bindir/plugin/*$opt_vs_config/*.dll>, - <$bindir/libmariadb/plugins/*$opt_vs_config/*.dll>, + <$bindir/libmariadb$opt_vs_config/*.dll>, <$bindir/sql$opt_vs_config/*.dll>) { my $pname=basename($_); @@ -2594,6 +2606,7 @@ sub setup_vardir() { for (<$bindir/storage/*/*.so>, <$bindir/plugin/*/*.so>, <$bindir/libmariadb/plugins/*/*.so>, + <$bindir/libmariadb/*.so>, <$bindir/sql/*.so>) { my $pname=basename($_); @@ -3191,9 +3204,6 @@ sub mysql_install_db { mtr_appendfile_to_file("$sql_dir/mysql_performance_tables.sql", $bootstrap_sql_file); - # Don't install anonymous users - mtr_tofile($bootstrap_sql_file, "set \@skip_auth_anonymous=1;\n"); - # Add the mysql system tables initial data # for a production system mtr_appendfile_to_file("$sql_dir/mysql_system_tables_data.sql", @@ -3211,6 +3221,10 @@ sub mysql_install_db { mtr_appendfile_to_file("$sql_dir/fill_help_tables.sql", $bootstrap_sql_file); + # Create test database + mtr_appendfile_to_file("$sql_dir/mysql_test_db.sql", + $bootstrap_sql_file); + # mysql.gtid_slave_pos was created in InnoDB, but many tests # run without InnoDB. Alter it to MyISAM now mtr_tofile($bootstrap_sql_file, "ALTER TABLE gtid_slave_pos ENGINE=MyISAM;\n"); @@ -3228,6 +3242,10 @@ sub mysql_install_db { sql_to_bootstrap($text)); } + # Remove anonymous users + mtr_tofile($bootstrap_sql_file, + "DELETE FROM mysql.user where user= '';\n"); + # Create mtr database mtr_tofile($bootstrap_sql_file, "CREATE DATABASE mtr CHARSET=latin1;\n"); @@ -3248,7 +3266,6 @@ sub mysql_install_db { # Create directories mysql and test mkpath("$install_datadir/mysql"); - mkpath("$install_datadir/test"); if ( My::SafeProcess->run ( diff --git a/mysql-test/suite.pm b/mysql-test/suite.pm index 651b99f31a5..d0a4b618aaa 100644 --- a/mysql-test/suite.pm +++ b/mysql-test/suite.pm @@ -13,6 +13,9 @@ sub skip_combinations { my %skip = ( 'include/have_innodb.combinations' => [ @combinations ]); + $skip{'include/innodb_encrypt_log.combinations'} = [ 'crypt' ] + unless $ENV{DEBUG_KEY_MANAGEMENT_SO}; + # don't run tests for the wrong platform $skip{'include/platform.combinations'} = [ (IS_WINDOWS) ? 'unix' : 'win' ]; diff --git a/mysql-test/suite/archive/discover.result b/mysql-test/suite/archive/discover.result index 0619ca2051a..99bb955ea24 100644 --- a/mysql-test/suite/archive/discover.result +++ b/mysql-test/suite/archive/discover.result @@ -17,6 +17,7 @@ select * from t1; a 1 2 +db.opt t1.ARZ t1.frm # @@ -30,6 +31,7 @@ show tables; Tables_in_test t1 t2 +db.opt t1.ARZ t2.ARZ t2.frm @@ -41,6 +43,7 @@ show full tables; Tables_in_test Table_type t1 BASE TABLE t2 BASE TABLE +db.opt t1.ARZ t2.ARZ t2.frm @@ -50,6 +53,7 @@ t2.frm flush tables; truncate table t1; ERROR HY000: Storage engine ARCHIVE of the table `test`.`t1` doesn't have this option +db.opt t1.ARZ t1.frm t2.ARZ @@ -59,6 +63,7 @@ t2.frm # flush tables; rename table t2 to t0; +db.opt t0.ARZ t1.ARZ t1.frm @@ -76,6 +81,7 @@ t1 CREATE TABLE `t1` ( # flush tables; drop table t1; +db.opt t0.ARZ # # discover of table non-existance on drop @@ -84,6 +90,7 @@ select * from t0; a flush tables; drop table t0; +db.opt show status like 'Handler_discover'; Variable_name Value Handler_discover 6 @@ -130,6 +137,7 @@ a flush tables; select * from t1; ERROR 42S02: Table 'test.t1' doesn't exist +db.opt create table t1 (a int) engine=archive; select * from t1; a diff --git a/mysql-test/suite/binlog/r/binlog_stm_sp.result b/mysql-test/suite/binlog/r/binlog_stm_sp.result index 6c470517a93..2fbb1cbce2e 100644 --- a/mysql-test/suite/binlog/r/binlog_stm_sp.result +++ b/mysql-test/suite/binlog/r/binlog_stm_sp.result @@ -84,3 +84,91 @@ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP PROCEDURE p1 +# +# MDEV-16020 SP variables inside GROUP BY..WITH ROLLUP break replication +# +FLUSH LOGS; +CREATE TABLE t1 (d DATE); +INSERT INTO t1 VALUES ('1985-05-13'),('1989-12-24'); +CREATE TABLE t2 (d DATE, c BIGINT); +BEGIN NOT ATOMIC +BEGIN +DECLARE var INT DEFAULT 10; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, var; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, var WITH ROLLUP; +END; +BEGIN +DECLARE atomic INT DEFAULT 20; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, atomic; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, atomic WITH ROLLUP; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, `atomic`; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, `atomic` WITH ROLLUP; +END; +BEGIN +DECLARE atomic ROW (atomic INT, xxx INT) DEFAULT (31,32); +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, atomic.atomic; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, atomic.atomic WITH ROLLUP; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, `atomic`.`atomic`; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, `atomic`.`atomic` WITH ROLLUP; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, atomic.xxx; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, atomic.xxx WITH ROLLUP; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, `atomic`.`xxx`; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, `atomic`.`xxx` WITH ROLLUP; +END; +END; +$$ +DROP TABLE t1,t2; +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000002 # Binlog_checkpoint # # master-bin.000002 +master-bin.000002 # Gtid # # GTID #-#-# +master-bin.000002 # Query # # use `test`; CREATE TABLE t1 (d DATE) +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t1 VALUES ('1985-05-13'),('1989-12-24') +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # GTID #-#-# +master-bin.000002 # Query # # use `test`; CREATE TABLE t2 (d DATE, c BIGINT) +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('var',10) +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('var',10) WITH ROLLUP +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('atomic',20) +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('atomic',20) WITH ROLLUP +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('atomic',20) +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('atomic',20) WITH ROLLUP +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('atomic.atomic',31) +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('atomic.atomic',31) WITH ROLLUP +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('atomic.atomic',31) +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('atomic.atomic',31) WITH ROLLUP +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('atomic.xxx',32) +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('atomic.xxx',32) WITH ROLLUP +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('atomic.xxx',32) +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('atomic.xxx',32) WITH ROLLUP +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # GTID #-#-# +master-bin.000002 # Query # # use `test`; DROP TABLE `t1`,`t2` /* generated by server */ diff --git a/mysql-test/suite/binlog/t/binlog_stm_sp.test b/mysql-test/suite/binlog/t/binlog_stm_sp.test index 095b4c7f699..047cab741ce 100644 --- a/mysql-test/suite/binlog/t/binlog_stm_sp.test +++ b/mysql-test/suite/binlog/t/binlog_stm_sp.test @@ -39,3 +39,44 @@ DROP PROCEDURE p1; --let $binlog_file = LAST source include/show_binlog_events.inc; + +--echo # +--echo # MDEV-16020 SP variables inside GROUP BY..WITH ROLLUP break replication +--echo # + +FLUSH LOGS; +CREATE TABLE t1 (d DATE); +INSERT INTO t1 VALUES ('1985-05-13'),('1989-12-24'); +CREATE TABLE t2 (d DATE, c BIGINT); +DELIMITER $$; +BEGIN NOT ATOMIC + BEGIN + DECLARE var INT DEFAULT 10; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, var; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, var WITH ROLLUP; + END; + BEGIN + DECLARE atomic INT DEFAULT 20; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, atomic; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, atomic WITH ROLLUP; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, `atomic`; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, `atomic` WITH ROLLUP; + END; + BEGIN + DECLARE atomic ROW (atomic INT, xxx INT) DEFAULT (31,32); + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, atomic.atomic; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, atomic.atomic WITH ROLLUP; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, `atomic`.`atomic`; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, `atomic`.`atomic` WITH ROLLUP; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, atomic.xxx; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, atomic.xxx WITH ROLLUP; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, `atomic`.`xxx`; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, `atomic`.`xxx` WITH ROLLUP; + END; +END; +$$ +DELIMITER ;$$ +DROP TABLE t1,t2; + +--let $binlog_file = LAST +source include/show_binlog_events.inc; diff --git a/mysql-test/suite/binlog_encryption/encrypted_master.test b/mysql-test/suite/binlog_encryption/encrypted_master.test index 503a40443d2..f67e93ce815 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_master.test +++ b/mysql-test/suite/binlog_encryption/encrypted_master.test @@ -18,6 +18,7 @@ # - with annotated events, default checksums and minimal binlog row image # +--source include/have_partition.inc --source encryption_algorithms.inc --source include/have_innodb.inc --enable_connect_log diff --git a/mysql-test/suite/binlog_encryption/encrypted_slave.test b/mysql-test/suite/binlog_encryption/encrypted_slave.test index f5697d91779..94777957b92 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_slave.test +++ b/mysql-test/suite/binlog_encryption/encrypted_slave.test @@ -9,6 +9,7 @@ # relay logs and binary logs are encrypted on slave. # +--source include/have_partition.inc --source encryption_algorithms.inc --source include/have_innodb.inc diff --git a/mysql-test/suite/binlog_encryption/testdata.opt b/mysql-test/suite/binlog_encryption/testdata.opt deleted file mode 100644 index b0c5b9c8188..00000000000 --- a/mysql-test/suite/binlog_encryption/testdata.opt +++ /dev/null @@ -1 +0,0 @@ ---partition diff --git a/mysql-test/suite/compat/oracle/r/binlog_stm_ps.result b/mysql-test/suite/compat/oracle/r/binlog_stm_ps.result index c60e3493b3f..01fe3be3a04 100644 --- a/mysql-test/suite/compat/oracle/r/binlog_stm_ps.result +++ b/mysql-test/suite/compat/oracle/r/binlog_stm_ps.result @@ -65,3 +65,34 @@ master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (10,20) master-bin.000001 # Query # # COMMIT DROP TABLE t1; +# +# MDEV-16095 Oracle-style placeholder inside GROUP BY..WITH ROLLUP breaks replication +# +FLUSH LOGS; +CREATE TABLE t1 (d DATE); +INSERT INTO t1 VALUES ('1985-05-13'),('1989-12-24'); +CREATE TABLE t2 (d DATE, c BIGINT); +BEGIN +EXECUTE IMMEDIATE 'INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, :param' USING 1; +EXECUTE IMMEDIATE 'INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, :param WITH ROLLUP' USING 1; +END; +$$ +DROP TABLE t1,t2; +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000002 # Binlog_checkpoint # # master-bin.000002 +master-bin.000002 # Gtid # # GTID #-#-# +master-bin.000002 # Query # # use `test`; CREATE TABLE t1 (d DATE) +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t1 VALUES ('1985-05-13'),('1989-12-24') +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # GTID #-#-# +master-bin.000002 # Query # # use `test`; CREATE TABLE t2 (d DATE, c BIGINT) +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, 1 +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # BEGIN GTID #-#-# +master-bin.000002 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, 1 WITH ROLLUP +master-bin.000002 # Query # # COMMIT +master-bin.000002 # Gtid # # GTID #-#-# +master-bin.000002 # Query # # use `test`; DROP TABLE "t1","t2" /* generated by server */ diff --git a/mysql-test/suite/compat/oracle/r/binlog_stm_sp.result b/mysql-test/suite/compat/oracle/r/binlog_stm_sp.result index 9823b155c8f..468309a0e56 100644 --- a/mysql-test/suite/compat/oracle/r/binlog_stm_sp.result +++ b/mysql-test/suite/compat/oracle/r/binlog_stm_sp.result @@ -475,3 +475,36 @@ master-bin.000005 # Gtid # # GTID #-#-# master-bin.000005 # Query # # use `test`; DROP TABLE "t2" /* generated by server */ master-bin.000005 # Gtid # # GTID #-#-# master-bin.000005 # Query # # use `test`; DROP PROCEDURE p1 +# +# MDEV-16020 SP variables inside GROUP BY..WITH ROLLUP break replication +# +FLUSH LOGS; +CREATE TABLE t1 (d DATE); +INSERT INTO t1 VALUES ('1985-05-13'),('1989-12-24'); +CREATE TABLE t2 (d DATE, c BIGINT); +DECLARE +var INT; +BEGIN +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, var; +INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, var WITH ROLLUP; +END; +$$ +DROP TABLE t1,t2; +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000006 # Binlog_checkpoint # # master-bin.000006 +master-bin.000006 # Gtid # # GTID #-#-# +master-bin.000006 # Query # # use `test`; CREATE TABLE t1 (d DATE) +master-bin.000006 # Gtid # # BEGIN GTID #-#-# +master-bin.000006 # Query # # use `test`; INSERT INTO t1 VALUES ('1985-05-13'),('1989-12-24') +master-bin.000006 # Query # # COMMIT +master-bin.000006 # Gtid # # GTID #-#-# +master-bin.000006 # Query # # use `test`; CREATE TABLE t2 (d DATE, c BIGINT) +master-bin.000006 # Gtid # # BEGIN GTID #-#-# +master-bin.000006 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('var',NULL) +master-bin.000006 # Query # # COMMIT +master-bin.000006 # Gtid # # BEGIN GTID #-#-# +master-bin.000006 # Query # # use `test`; INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, NAME_CONST('var',NULL) WITH ROLLUP +master-bin.000006 # Query # # COMMIT +master-bin.000006 # Gtid # # GTID #-#-# +master-bin.000006 # Query # # use `test`; DROP TABLE "t1","t2" /* generated by server */ diff --git a/mysql-test/suite/compat/oracle/r/column_compression.result b/mysql-test/suite/compat/oracle/r/column_compression.result new file mode 100644 index 00000000000..eaa4de002a4 --- /dev/null +++ b/mysql-test/suite/compat/oracle/r/column_compression.result @@ -0,0 +1,9 @@ +SET sql_mode=ORACLE; +SET column_compression_zlib_wrap=true; +CREATE TABLE t1 (a BLOB COMPRESSED); +INSERT INTO t1 VALUES (REPEAT('a',10000)); +SELECT DATA_LENGTH<100 AS c FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test'; +c +1 +DROP TABLE t1; diff --git a/mysql-test/suite/compat/oracle/r/func_concat.result b/mysql-test/suite/compat/oracle/r/func_concat.result index 230b36b94a5..b598f97006e 100644 --- a/mysql-test/suite/compat/oracle/r/func_concat.result +++ b/mysql-test/suite/compat/oracle/r/func_concat.result @@ -255,3 +255,70 @@ SELECT * FROM v1; test 1 DROP VIEW v1; +# +# MDEV-16186 Concatenation operator || returns wrong results in sql_mode=ORACLE +# +SELECT -1<<1||1 AS a FROM DUAL; +a +18446744073709549568 +SELECT -1||0<<1 AS a FROM DUAL; +a +18446744073709551596 +EXPLAIN EXTENDED SELECT -1<<1||1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select -1 << concat_operator_oracle(1,1) AS "a" +EXPLAIN EXTENDED SELECT -1||0<<1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select concat_operator_oracle(-1,0) << 1 AS "a" +SELECT -1+1||1 AS a FROM DUAL; +a +01 +SELECT -1||0+1 AS a FROM DUAL; +a +-9 +EXPLAIN EXTENDED SELECT -1+1||1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select concat_operator_oracle(-1 + 1,1) AS "a" +EXPLAIN EXTENDED SELECT -1||0+1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select concat_operator_oracle(-1,0) + 1 AS "a" +SELECT 1*1||-1 AS a FROM DUAL; +a +1-1 +SELECT 1||1*-1 AS a FROM DUAL; +a +1-1 +EXPLAIN EXTENDED SELECT 1*1||-1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select concat_operator_oracle(1 * 1,-1) AS "a" +EXPLAIN EXTENDED SELECT 1||1*-1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select concat_operator_oracle(1,1 * -1) AS "a" +SELECT -1^1||1 AS a FROM DUAL; +a +184467440737095516141 +SELECT -1||0^1 AS a FROM DUAL; +a +-11 +EXPLAIN EXTENDED SELECT -1^1||1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select concat_operator_oracle(-1 ^ 1,1) AS "a" +EXPLAIN EXTENDED SELECT -1||0^1 AS a FROM DUAL; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select concat_operator_oracle(-1,0 ^ 1) AS "a" diff --git a/mysql-test/suite/compat/oracle/r/func_time.result b/mysql-test/suite/compat/oracle/r/func_time.result new file mode 100644 index 00000000000..063163400ca --- /dev/null +++ b/mysql-test/suite/compat/oracle/r/func_time.result @@ -0,0 +1,31 @@ +SET sql_mode=ORACLE; +# +# Start of 10.3 tests +# +# +# MDEV-16152 Expressions with INTERVAL return bad results in some cases +# +SELECT TIMESTAMP'2001-01-01 10:20:30' - INTERVAL '10' YEAR AS c1, +-INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2; +c1 c2 +1991-01-01 10:20:30 1991-01-01 10:20:30 +SELECT TIMESTAMP'2001-01-01 10:20:30' + INTERVAL '10' YEAR AS c1, +INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2, ++INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c3; +c1 c2 c3 +2011-01-01 10:20:30 2011-01-01 10:20:30 2011-01-01 10:20:30 +EXPLAIN EXTENDED SELECT +TIMESTAMP'2001-01-01 10:20:30' - INTERVAL '10' YEAR AS c1, +-INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select TIMESTAMP'2001-01-01 10:20:30' - interval '10' year AS "c1",TIMESTAMP'2001-01-01 10:20:30' - interval '10' year AS "c2" +EXPLAIN EXTENDED SELECT +TIMESTAMP'2001-01-01 10:20:30' + INTERVAL '10' YEAR AS c1, +INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2, ++INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c3; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select TIMESTAMP'2001-01-01 10:20:30' + interval '10' year AS "c1",TIMESTAMP'2001-01-01 10:20:30' + interval '10' year AS "c2",TIMESTAMP'2001-01-01 10:20:30' + interval '10' year AS "c3" diff --git a/mysql-test/suite/compat/oracle/r/gis.result b/mysql-test/suite/compat/oracle/r/gis.result new file mode 100644 index 00000000000..ebd56a089ad --- /dev/null +++ b/mysql-test/suite/compat/oracle/r/gis.result @@ -0,0 +1,6 @@ +SELECT WITHIN(POINT(1,1), POINT(1,1)); +WITHIN(POINT(1,1), POINT(1,1)) +1 +SELECT WITHIN(POINT(1,1), POINT(0,0)); +WITHIN(POINT(1,1), POINT(0,0)) +0 diff --git a/mysql-test/suite/compat/oracle/r/parser.result b/mysql-test/suite/compat/oracle/r/parser.result index 29588a68045..28ed8eb8185 100644 --- a/mysql-test/suite/compat/oracle/r/parser.result +++ b/mysql-test/suite/compat/oracle/r/parser.result @@ -16,3 +16,435 @@ SET GLOBAL a=10; END; $$ ERROR HY000: Unknown system variable 'a' +# +# MDEV-16202 Latest changes made erroneously some keywords reserved in sql_mode=ORACLE +# +CREATE PROCEDURE p1(name VARCHAR(64), pattern TEXT) AS +query TEXT DEFAULT REPLACE(pattern, 'name', name); +BEGIN +SELECT query AS ''; +EXECUTE IMMEDIATE query; +EXCEPTION +WHEN OTHERS THEN +BEGIN +SHOW ERRORS; +END; +END; +$$ +CREATE PROCEDURE p2(name VARCHAR(64)) AS +BEGIN +CALL p1(name, 'DECLARE name INT; BEGIN name:=10; SELECT name; END'); +EXECUTE IMMEDIATE REPLACE('CREATE TABLE t1 (name INT)', 'name', name); +CALL p1(name, 'SELECT name FROM t1'); +CALL p1(name, 'SELECT name ''alias'' FROM t1'); +CALL p1(name, 'SELECT name()'); +CALL p1(name, 'SELECT name.name()'); +CALL p1(name, 'SELECT name DATE FROM t1'); +CALL p1(name, 'SELECT name HISTORY FROM t1'); +CALL p1(name, 'SELECT name NEXT FROM t1'); +CALL p1(name, 'SELECT name PERIOD FROM t1'); +CALL p1(name, 'SELECT name PREVIOUS FROM t1'); +CALL p1(name, 'SELECT name SYSTEM FROM t1'); +CALL p1(name, 'SELECT name SYSTEM_TIME FROM t1'); +CALL p1(name, 'SELECT name TIME FROM t1'); +CALL p1(name, 'SELECT name TIMESTAMP FROM t1'); +CALL p1(name, 'SELECT name TRANSACTION FROM t1'); +CALL p1(name, 'SELECT name VALUE FROM t1'); +CALL p1(name, 'SELECT name VERSIONING FROM t1'); +CALL p1(name, 'SELECT name WITHOUT FROM t1'); +DROP TABLE t1; +END; +$$ +CALL p2('date'); +DECLARE date INT; BEGIN date:=10; SELECT date; END +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'INT; BEGIN date:=10; SELECT date; END' at line 1 +SELECT date FROM t1 +SELECT date 'alias' FROM t1 +Error 1525 Incorrect DATE value: 'alias' +SELECT date() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1 +SELECT date.date() +Error 1630 FUNCTION date.date does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT date DATE FROM t1 +SELECT date HISTORY FROM t1 +SELECT date NEXT FROM t1 +SELECT date PERIOD FROM t1 +SELECT date PREVIOUS FROM t1 +SELECT date SYSTEM FROM t1 +SELECT date SYSTEM_TIME FROM t1 +SELECT date TIME FROM t1 +SELECT date TIMESTAMP FROM t1 +SELECT date TRANSACTION FROM t1 +SELECT date VALUE FROM t1 +SELECT date VERSIONING FROM t1 +SELECT date WITHOUT FROM t1 +CALL p2('history'); +DECLARE history INT; BEGIN history:=10; SELECT history; END +10 +SELECT history FROM t1 +SELECT history 'alias' FROM t1 +SELECT history() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT history.history() +Error 1630 FUNCTION history.history does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT history DATE FROM t1 +SELECT history HISTORY FROM t1 +SELECT history NEXT FROM t1 +SELECT history PERIOD FROM t1 +SELECT history PREVIOUS FROM t1 +SELECT history SYSTEM FROM t1 +SELECT history SYSTEM_TIME FROM t1 +SELECT history TIME FROM t1 +SELECT history TIMESTAMP FROM t1 +SELECT history TRANSACTION FROM t1 +SELECT history VALUE FROM t1 +SELECT history VERSIONING FROM t1 +SELECT history WITHOUT FROM t1 +CALL p2('next'); +DECLARE next INT; BEGIN next:=10; SELECT next; END +10 +SELECT next FROM t1 +SELECT next 'alias' FROM t1 +SELECT next() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT next.next() +Error 1630 FUNCTION next.next does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT next DATE FROM t1 +SELECT next HISTORY FROM t1 +SELECT next NEXT FROM t1 +SELECT next PERIOD FROM t1 +SELECT next PREVIOUS FROM t1 +SELECT next SYSTEM FROM t1 +SELECT next SYSTEM_TIME FROM t1 +SELECT next TIME FROM t1 +SELECT next TIMESTAMP FROM t1 +SELECT next TRANSACTION FROM t1 +SELECT next VALUE FROM t1 +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'FROM t1' at line 1 +SELECT next VERSIONING FROM t1 +SELECT next WITHOUT FROM t1 +CALL p2('period'); +DECLARE period INT; BEGIN period:=10; SELECT period; END +10 +SELECT period FROM t1 +SELECT period 'alias' FROM t1 +SELECT period() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT period.period() +Error 1630 FUNCTION period.period does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT period DATE FROM t1 +SELECT period HISTORY FROM t1 +SELECT period NEXT FROM t1 +SELECT period PERIOD FROM t1 +SELECT period PREVIOUS FROM t1 +SELECT period SYSTEM FROM t1 +SELECT period SYSTEM_TIME FROM t1 +SELECT period TIME FROM t1 +SELECT period TIMESTAMP FROM t1 +SELECT period TRANSACTION FROM t1 +SELECT period VALUE FROM t1 +SELECT period VERSIONING FROM t1 +SELECT period WITHOUT FROM t1 +CALL p2('previous'); +DECLARE previous INT; BEGIN previous:=10; SELECT previous; END +10 +SELECT previous FROM t1 +SELECT previous 'alias' FROM t1 +SELECT previous() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT previous.previous() +Error 1630 FUNCTION previous.previous does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT previous DATE FROM t1 +SELECT previous HISTORY FROM t1 +SELECT previous NEXT FROM t1 +SELECT previous PERIOD FROM t1 +SELECT previous PREVIOUS FROM t1 +SELECT previous SYSTEM FROM t1 +SELECT previous SYSTEM_TIME FROM t1 +SELECT previous TIME FROM t1 +SELECT previous TIMESTAMP FROM t1 +SELECT previous TRANSACTION FROM t1 +SELECT previous VALUE FROM t1 +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'FROM t1' at line 1 +SELECT previous VERSIONING FROM t1 +SELECT previous WITHOUT FROM t1 +CALL p2('system'); +DECLARE system INT; BEGIN system:=10; SELECT system; END +10 +SELECT system FROM t1 +SELECT system 'alias' FROM t1 +SELECT system() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT system.system() +Error 1630 FUNCTION system.system does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT system DATE FROM t1 +SELECT system HISTORY FROM t1 +SELECT system NEXT FROM t1 +SELECT system PERIOD FROM t1 +SELECT system PREVIOUS FROM t1 +SELECT system SYSTEM FROM t1 +SELECT system SYSTEM_TIME FROM t1 +SELECT system TIME FROM t1 +SELECT system TIMESTAMP FROM t1 +SELECT system TRANSACTION FROM t1 +SELECT system VALUE FROM t1 +SELECT system VERSIONING FROM t1 +SELECT system WITHOUT FROM t1 +CALL p2('system_time'); +DECLARE system_time INT; BEGIN system_time:=10; SELECT system_time; END +10 +SELECT system_time FROM t1 +SELECT system_time 'alias' FROM t1 +SELECT system_time() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT system_time.system_time() +Error 1630 FUNCTION system_time.system_time does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT system_time DATE FROM t1 +SELECT system_time HISTORY FROM t1 +SELECT system_time NEXT FROM t1 +SELECT system_time PERIOD FROM t1 +SELECT system_time PREVIOUS FROM t1 +SELECT system_time SYSTEM FROM t1 +SELECT system_time SYSTEM_TIME FROM t1 +SELECT system_time TIME FROM t1 +SELECT system_time TIMESTAMP FROM t1 +SELECT system_time TRANSACTION FROM t1 +SELECT system_time VALUE FROM t1 +SELECT system_time VERSIONING FROM t1 +SELECT system_time WITHOUT FROM t1 +CALL p2('time'); +DECLARE time INT; BEGIN time:=10; SELECT time; END +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'INT; BEGIN time:=10; SELECT time; END' at line 1 +SELECT time FROM t1 +SELECT time 'alias' FROM t1 +Error 1525 Incorrect TIME value: 'alias' +SELECT time() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1 +SELECT time.time() +Error 1630 FUNCTION time.time does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT time DATE FROM t1 +SELECT time HISTORY FROM t1 +SELECT time NEXT FROM t1 +SELECT time PERIOD FROM t1 +SELECT time PREVIOUS FROM t1 +SELECT time SYSTEM FROM t1 +SELECT time SYSTEM_TIME FROM t1 +SELECT time TIME FROM t1 +SELECT time TIMESTAMP FROM t1 +SELECT time TRANSACTION FROM t1 +SELECT time VALUE FROM t1 +SELECT time VERSIONING FROM t1 +SELECT time WITHOUT FROM t1 +CALL p2('timestamp'); +DECLARE timestamp INT; BEGIN timestamp:=10; SELECT timestamp; END +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'INT; BEGIN timestamp:=10; SELECT timestamp; END' at line 1 +SELECT timestamp FROM t1 +SELECT timestamp 'alias' FROM t1 +Error 1525 Incorrect DATETIME value: 'alias' +SELECT timestamp() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1 +SELECT timestamp.timestamp() +Error 1630 FUNCTION timestamp.timestamp does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT timestamp DATE FROM t1 +SELECT timestamp HISTORY FROM t1 +SELECT timestamp NEXT FROM t1 +SELECT timestamp PERIOD FROM t1 +SELECT timestamp PREVIOUS FROM t1 +SELECT timestamp SYSTEM FROM t1 +SELECT timestamp SYSTEM_TIME FROM t1 +SELECT timestamp TIME FROM t1 +SELECT timestamp TIMESTAMP FROM t1 +SELECT timestamp TRANSACTION FROM t1 +SELECT timestamp VALUE FROM t1 +SELECT timestamp VERSIONING FROM t1 +SELECT timestamp WITHOUT FROM t1 +CALL p2('transaction'); +DECLARE transaction INT; BEGIN transaction:=10; SELECT transaction; END +10 +SELECT transaction FROM t1 +SELECT transaction 'alias' FROM t1 +SELECT transaction() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT transaction.transaction() +Error 1630 FUNCTION transaction.transaction does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT transaction DATE FROM t1 +SELECT transaction HISTORY FROM t1 +SELECT transaction NEXT FROM t1 +SELECT transaction PERIOD FROM t1 +SELECT transaction PREVIOUS FROM t1 +SELECT transaction SYSTEM FROM t1 +SELECT transaction SYSTEM_TIME FROM t1 +SELECT transaction TIME FROM t1 +SELECT transaction TIMESTAMP FROM t1 +SELECT transaction TRANSACTION FROM t1 +SELECT transaction VALUE FROM t1 +SELECT transaction VERSIONING FROM t1 +SELECT transaction WITHOUT FROM t1 +CALL p2('value'); +DECLARE value INT; BEGIN value:=10; SELECT value; END +10 +SELECT value FROM t1 +SELECT value 'alias' FROM t1 +SELECT value() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1 +SELECT value.value() +Error 1630 FUNCTION value.value does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT value DATE FROM t1 +SELECT value HISTORY FROM t1 +SELECT value NEXT FROM t1 +SELECT value PERIOD FROM t1 +SELECT value PREVIOUS FROM t1 +SELECT value SYSTEM FROM t1 +SELECT value SYSTEM_TIME FROM t1 +SELECT value TIME FROM t1 +SELECT value TIMESTAMP FROM t1 +SELECT value TRANSACTION FROM t1 +SELECT value VALUE FROM t1 +SELECT value VERSIONING FROM t1 +SELECT value WITHOUT FROM t1 +CALL p2('versioning'); +DECLARE versioning INT; BEGIN versioning:=10; SELECT versioning; END +10 +SELECT versioning FROM t1 +SELECT versioning 'alias' FROM t1 +SELECT versioning() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT versioning.versioning() +Error 1630 FUNCTION versioning.versioning does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT versioning DATE FROM t1 +SELECT versioning HISTORY FROM t1 +SELECT versioning NEXT FROM t1 +SELECT versioning PERIOD FROM t1 +SELECT versioning PREVIOUS FROM t1 +SELECT versioning SYSTEM FROM t1 +SELECT versioning SYSTEM_TIME FROM t1 +SELECT versioning TIME FROM t1 +SELECT versioning TIMESTAMP FROM t1 +SELECT versioning TRANSACTION FROM t1 +SELECT versioning VALUE FROM t1 +SELECT versioning VERSIONING FROM t1 +SELECT versioning WITHOUT FROM t1 +CALL p2('without'); +DECLARE without INT; BEGIN without:=10; SELECT without; END +10 +SELECT without FROM t1 +SELECT without 'alias' FROM t1 +SELECT without() +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '()' at line 1 +SELECT without.without() +Error 1630 FUNCTION without.without does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual +SELECT without DATE FROM t1 +SELECT without HISTORY FROM t1 +SELECT without NEXT FROM t1 +SELECT without PERIOD FROM t1 +SELECT without PREVIOUS FROM t1 +SELECT without SYSTEM FROM t1 +SELECT without SYSTEM_TIME FROM t1 +SELECT without TIME FROM t1 +SELECT without TIMESTAMP FROM t1 +SELECT without TRANSACTION FROM t1 +SELECT without VALUE FROM t1 +SELECT without VERSIONING FROM t1 +SELECT without WITHOUT FROM t1 +DROP PROCEDURE p2; +DROP PROCEDURE p1; +# +# MDEV-16244 sql_mode=ORACLE: Some keywords do not work in variable declarations +# +SET sql_mode=ORACLE; +DECLARE +do INT; +BEGIN +SELECT do INTO do FROM DUAL; +END; +/ +DECLARE +handler INT; +BEGIN +SELECT handler INTO handler FROM DUAL; +END; +/ +DECLARE +repair INT; +BEGIN +SELECT repair INTO repair FROM DUAL; +END; +/ +DECLARE +shutdown INT; +BEGIN +SELECT shutdown INTO shutdown FROM DUAL; +END; +/ +DECLARE +truncate INT; +BEGIN +SELECT truncate INTO truncate FROM DUAL; +END; +/ +DECLARE +close INT; +BEGIN +SELECT close INTO close FROM DUAL; +END; +/ +DECLARE +commit INT; +BEGIN +SELECT commit INTO commit FROM DUAL; +END; +/ +DECLARE +open INT; +BEGIN +SELECT open INTO open FROM DUAL; +END; +/ +DECLARE +rollback INT; +BEGIN +SELECT rollback INTO rollback FROM DUAL; +END; +/ +DECLARE +savepoint INT; +BEGIN +SELECT savepoint INTO savepoint FROM DUAL; +END; +/ +DECLARE +contains INT; +BEGIN +SELECT contains INTO contains FROM DUAL; +END; +/ +DECLARE +language INT; +BEGIN +SELECT language INTO language FROM DUAL; +END; +/ +DECLARE +no INT; +BEGIN +SELECT no INTO no FROM DUAL; +END; +/ +DECLARE +charset INT; +BEGIN +SELECT charset INTO charset FROM DUAL; +END; +/ +DECLARE +follows INT; +BEGIN +SELECT follows INTO follows FROM DUAL; +END; +/ +DECLARE +precedes INT; +BEGIN +SELECT precedes INTO precedes FROM DUAL; +END; +/ diff --git a/mysql-test/suite/compat/oracle/r/sp-anchor-row-type-table.result b/mysql-test/suite/compat/oracle/r/sp-anchor-row-type-table.result index b878042174b..9edec669908 100644 --- a/mysql-test/suite/compat/oracle/r/sp-anchor-row-type-table.result +++ b/mysql-test/suite/compat/oracle/r/sp-anchor-row-type-table.result @@ -21,7 +21,7 @@ Table Create Table t2 CREATE TABLE "t2" ( "a" int(11) DEFAULT NULL, "b" text DEFAULT NULL, - "c" varchar(1) DEFAULT NULL + "c" char(1) DEFAULT NULL ) DROP PROCEDURE p2; DROP PROCEDURE p1; diff --git a/mysql-test/suite/compat/oracle/r/sp-expr.result b/mysql-test/suite/compat/oracle/r/sp-expr.result new file mode 100644 index 00000000000..bb0c1a5c5c8 --- /dev/null +++ b/mysql-test/suite/compat/oracle/r/sp-expr.result @@ -0,0 +1,158 @@ +SET sql_mode=ORACLE; +# +# Start of 10.3 tests +# +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +BEGIN +CASE ((1) IN (SELECT a FROM t1)) WHEN 1 THEN SELECT 1; +ELSE SELECT NULL; +END CASE; +END; +$$ +1 +1 +BEGIN +CASE (EXISTS (SELECT a FROM t1)) WHEN 1 THEN SELECT 1; +ELSE SELECT NULL; +END CASE; +END; +$$ +1 +1 +BEGIN +IF ((1) IN (SELECT a FROM t1)) THEN SELECT 1; +ELSE SELECT NULL; +END IF; +END; +$$ +1 +1 +BEGIN +IF (EXISTS (SELECT a FROM t1)) THEN SELECT 1; +ELSE SELECT NULL; +END IF; +END; +$$ +1 +1 +BEGIN +WHILE ((1234) IN (SELECT * FROM t1)) LOOP +SELECT 1; +END LOOP; +END; +$$ +BEGIN +WHILE (EXISTS (SELECT * FROM t1 WHERE a=1234)) LOOP +SELECT 1; +END LOOP; +END; +$$ +BEGIN +REPEAT +SELECT 1; +UNTIL (1 IN (SELECT * FROM t1)) +END REPEAT; +END; +$$ +1 +1 +BEGIN +REPEAT +SELECT 1; +UNTIL EXISTS (SELECT * FROM t1 WHERE a=1) +END REPEAT; +END; +$$ +1 +1 +BEGIN +FOR i IN 0..(1 IN (SELECT * FROM t1)) +LOOP +SELECT i; +END LOOP; +END; +$$ +i +0 +i +1 +BEGIN +FOR i IN 0..EXISTS (SELECT * FROM t1 WHERE a=1) +LOOP +SELECT i; +END LOOP; +END; +$$ +i +0 +i +1 +DROP TABLE t1; +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (10); +DECLARE +a INT DEFAULT ((10) IN (SELECT * FROM t1)); +BEGIN +SELECT a; +END; +$$ +a +1 +DECLARE +a INT DEFAULT EXISTS (SELECT * FROM t1); +BEGIN +SELECT a; +END; +$$ +a +1 +DROP TABLE t1; +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +CREATE FUNCTION f1() RETURN INT AS +BEGIN +RETURN ((1) IN (SELECT * FROM t1)); +END; +$$ +CREATE FUNCTION f2() RETURN INT AS +BEGIN +RETURN EXISTS (SELECT * FROM t1 WHERE a=1); +END; +$$ +SELECT f1(); +f1() +1 +SELECT f2(); +f2() +1 +DROP FUNCTION f1; +DROP FUNCTION f2; +DROP TABLE t1; +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +DECLARE +va INT; +CURSOR cur(amin INT) IS SELECT a FROM t1 WHERE a>amin ORDER BY a; +BEGIN +OPEN cur(1 IN (SELECT * FROM t1)); +FETCH cur INTO va; +SELECT va; +CLOSE cur; +END; +$$ +va +2 +DECLARE +va INT; +CURSOR cur(amin INT) IS SELECT a FROM t1 WHERE a>amin ORDER BY a; +BEGIN +OPEN cur(EXISTS (SELECT * FROM t1)); +FETCH cur INTO va; +SELECT va; +CLOSE cur; +END; +$$ +va +2 +DROP TABLE t1; diff --git a/mysql-test/suite/compat/oracle/r/sp-param.result b/mysql-test/suite/compat/oracle/r/sp-param.result index 68ecefa5077..aab1811ef67 100644 --- a/mysql-test/suite/compat/oracle/r/sp-param.result +++ b/mysql-test/suite/compat/oracle/r/sp-param.result @@ -14,7 +14,7 @@ CREATE TABLE t1 AS SELECT f1(REPEAT('a',2000)) AS a;; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "a" text DEFAULT NULL + "a" varchar(2000) DEFAULT NULL ) DROP TABLE t1; DROP FUNCTION f1; @@ -30,7 +30,7 @@ CREATE TABLE t1 AS SELECT f1(REPEAT('a',2000)) AS a;; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "a" text CHARACTER SET utf8 DEFAULT NULL + "a" varchar(2000) CHARACTER SET utf8 DEFAULT NULL ) DROP TABLE t1; DROP FUNCTION f1; @@ -46,7 +46,7 @@ CREATE TABLE t1 AS SELECT f1(REPEAT('a',2000)) AS a;; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "a" blob DEFAULT NULL + "a" varbinary(2000) DEFAULT NULL ) DROP TABLE t1; DROP FUNCTION f1; @@ -62,7 +62,7 @@ CREATE TABLE t1 AS SELECT f1(REPEAT('a',4000)) AS a;; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "a" text DEFAULT NULL + "a" varchar(4000) DEFAULT NULL ) DROP TABLE t1; DROP FUNCTION f1; @@ -78,7 +78,7 @@ CREATE TABLE t1 AS SELECT f1(REPEAT('a',4000)) AS a;; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "a" text DEFAULT NULL + "a" varchar(4000) DEFAULT NULL ) DROP TABLE t1; DROP FUNCTION f1; @@ -94,7 +94,7 @@ CREATE TABLE t1 AS SELECT f1(REPEAT('a',4000)) AS a;; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "a" text CHARACTER SET utf8 DEFAULT NULL + "a" varchar(4000) CHARACTER SET utf8 DEFAULT NULL ) DROP TABLE t1; DROP FUNCTION f1; @@ -110,7 +110,7 @@ CREATE TABLE t1 AS SELECT f1(REPEAT('a',4000)) AS a;; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "a" blob DEFAULT NULL + "a" varbinary(4000) DEFAULT NULL ) DROP TABLE t1; DROP FUNCTION f1; @@ -126,7 +126,7 @@ CREATE TABLE t1 AS SELECT f1(REPEAT('a',4000)) AS a;; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "a" blob DEFAULT NULL + "a" varbinary(4000) DEFAULT NULL ) DROP TABLE t1; DROP FUNCTION f1; diff --git a/mysql-test/suite/compat/oracle/r/sp-row.result b/mysql-test/suite/compat/oracle/r/sp-row.result index 9557a24a1da..72b33768864 100644 --- a/mysql-test/suite/compat/oracle/r/sp-row.result +++ b/mysql-test/suite/compat/oracle/r/sp-row.result @@ -1363,8 +1363,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" mediumint(9) DEFAULT NULL, - "rec.var" mediumint(9) DEFAULT NULL + "var" mediumint(8) DEFAULT NULL, + "rec.var" mediumint(8) DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -1373,8 +1373,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" mediumint(9) DEFAULT NULL, - "rec.var" mediumint(9) DEFAULT NULL + "var" mediumint(8) DEFAULT NULL, + "rec.var" mediumint(8) DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -1383,8 +1383,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" mediumint(9) DEFAULT NULL, - "rec.var" mediumint(9) DEFAULT NULL + "var" mediumint(8) DEFAULT NULL, + "rec.var" mediumint(8) DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -1393,8 +1393,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" mediumint(9) DEFAULT NULL, - "rec.var" mediumint(9) DEFAULT NULL + "var" mediumint(8) DEFAULT NULL, + "rec.var" mediumint(8) DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -1403,8 +1403,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" mediumint(9) DEFAULT NULL, - "rec.var" mediumint(9) DEFAULT NULL + "var" mediumint(8) DEFAULT NULL, + "rec.var" mediumint(8) DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -1413,8 +1413,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" mediumint(9) DEFAULT NULL, - "rec.var" mediumint(9) DEFAULT NULL + "var" mediumint(8) DEFAULT NULL, + "rec.var" mediumint(8) DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -1423,8 +1423,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" mediumint(9) DEFAULT NULL, - "rec.var" mediumint(9) DEFAULT NULL + "var" mediumint(8) DEFAULT NULL, + "rec.var" mediumint(8) DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -1433,8 +1433,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" mediumint(9) DEFAULT NULL, - "rec.var" mediumint(9) DEFAULT NULL + "var" mediumint(8) DEFAULT NULL, + "rec.var" mediumint(8) DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -1872,8 +1872,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" varchar(1) DEFAULT NULL, - "rec.var" varchar(1) DEFAULT NULL + "var" char(1) DEFAULT NULL, + "rec.var" char(1) DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -1882,8 +1882,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" varbinary(1) DEFAULT NULL, - "rec.var" varbinary(1) DEFAULT NULL + "var" binary(1) DEFAULT NULL, + "rec.var" binary(1) DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -1892,8 +1892,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" varchar(1) DEFAULT NULL, - "rec.var" varchar(1) DEFAULT NULL + "var" char(1) DEFAULT NULL, + "rec.var" char(1) DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -1902,8 +1902,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" varchar(10) DEFAULT NULL, - "rec.var" varchar(10) DEFAULT NULL + "var" char(10) DEFAULT NULL, + "rec.var" char(10) DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -1912,8 +1912,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" varchar(10) CHARACTER SET utf8 DEFAULT NULL, - "rec.var" varchar(10) CHARACTER SET utf8 DEFAULT NULL + "var" char(10) CHARACTER SET utf8 DEFAULT NULL, + "rec.var" char(10) CHARACTER SET utf8 DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -1922,8 +1922,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" varbinary(10) DEFAULT NULL, - "rec.var" varbinary(10) DEFAULT NULL + "var" binary(10) DEFAULT NULL, + "rec.var" binary(10) DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -2161,8 +2161,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" mediumtext CHARACTER SET utf8 DEFAULT NULL, - "rec.var" mediumtext CHARACTER SET utf8 DEFAULT NULL + "var" text CHARACTER SET utf8 DEFAULT NULL, + "rec.var" text CHARACTER SET utf8 DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -2171,8 +2171,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" text CHARACTER SET utf8 DEFAULT NULL, - "rec.var" text CHARACTER SET utf8 DEFAULT NULL + "var" tinytext CHARACTER SET utf8 DEFAULT NULL, + "rec.var" tinytext CHARACTER SET utf8 DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; @@ -2181,8 +2181,8 @@ CALL p1(); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "var" longtext CHARACTER SET utf8 DEFAULT NULL, - "rec.var" longtext CHARACTER SET utf8 DEFAULT NULL + "var" mediumtext CHARACTER SET utf8 DEFAULT NULL, + "rec.var" mediumtext CHARACTER SET utf8 DEFAULT NULL ) DROP TABLE t1; DROP PROCEDURE p1; diff --git a/mysql-test/suite/compat/oracle/r/sp.result b/mysql-test/suite/compat/oracle/r/sp.result index 7f042825385..8e126b3f724 100644 --- a/mysql-test/suite/compat/oracle/r/sp.result +++ b/mysql-test/suite/compat/oracle/r/sp.result @@ -1704,15 +1704,15 @@ t2 CREATE TABLE "t2" ( "dc100" decimal(10,0) DEFAULT NULL, "dc103" decimal(10,3) DEFAULT NULL, "dc209" decimal(20,9) DEFAULT NULL, - "cc" varchar(10) DEFAULT NULL, + "cc" char(10) DEFAULT NULL, "cv" varchar(10) DEFAULT NULL, "cvu" varchar(10) CHARACTER SET utf8 DEFAULT NULL, "t1" tinytext DEFAULT NULL, "t2" text DEFAULT NULL, "t3" mediumtext DEFAULT NULL, "t4" longtext DEFAULT NULL, - "enum1" varchar(1) DEFAULT NULL, - "set1" varchar(5) DEFAULT NULL, + "enum1" char(1) DEFAULT NULL, + "set1" char(5) DEFAULT NULL, "blob1" tinyblob DEFAULT NULL, "blob2" longblob DEFAULT NULL, "blob3" mediumblob DEFAULT NULL, @@ -1866,15 +1866,15 @@ t2 CREATE TABLE "t2" ( "dc100" decimal(10,0) DEFAULT NULL, "dc103" decimal(10,3) DEFAULT NULL, "dc209" decimal(20,9) DEFAULT NULL, - "cc" varchar(10) DEFAULT NULL, + "cc" char(10) DEFAULT NULL, "cv" varchar(10) DEFAULT NULL, "cvu" varchar(10) CHARACTER SET utf8 DEFAULT NULL, "t1" tinytext DEFAULT NULL, "t2" text DEFAULT NULL, "t3" mediumtext DEFAULT NULL, "t4" longtext DEFAULT NULL, - "enum1" varchar(1) DEFAULT NULL, - "set1" varchar(5) DEFAULT NULL, + "enum1" char(1) DEFAULT NULL, + "set1" char(5) DEFAULT NULL, "blob1" tinyblob DEFAULT NULL, "blob2" longblob DEFAULT NULL, "blob3" mediumblob DEFAULT NULL, @@ -2382,7 +2382,7 @@ t1 CREATE TABLE "t1" ( "a_flt0" float DEFAULT NULL, "a_dbl0" double DEFAULT NULL, "a_bit3" bit(3) DEFAULT NULL, - "a_enum0" varchar(1) DEFAULT NULL, + "a_enum0" char(1) DEFAULT NULL, "a_varchar10" varchar(10) DEFAULT NULL, "a_text1" text DEFAULT NULL, "a_tinytext1" tinytext DEFAULT NULL, @@ -2400,7 +2400,7 @@ t1 CREATE TABLE "t1" ( "aa_flt0" float DEFAULT NULL, "aa_dbl0" double DEFAULT NULL, "aa_bit3" bit(3) DEFAULT NULL, - "aa_enum0" varchar(1) DEFAULT NULL, + "aa_enum0" char(1) DEFAULT NULL, "aa_varchar10" varchar(10) DEFAULT NULL, "aa_text1" text DEFAULT NULL, "aa_tinytext1" tinytext DEFAULT NULL, diff --git a/mysql-test/suite/compat/oracle/r/statement-expr.result b/mysql-test/suite/compat/oracle/r/statement-expr.result new file mode 100644 index 00000000000..ea3bd5232d3 --- /dev/null +++ b/mysql-test/suite/compat/oracle/r/statement-expr.result @@ -0,0 +1,69 @@ +SET sql_mode=ORACLE; +# +# Start of 10.3 tests +# +CREATE TABLE t1 (id INT, id1 INT); +INSERT INTO t1 VALUES (1,7); +INSERT INTO t1 VALUES (1,8); +SELECT ROW(1,7) IN (SELECT id, id1 FROM t1 WHERE id1= 8); +ROW(1,7) IN (SELECT id, id1 FROM t1 WHERE id1= 8) +0 +EXECUTE IMMEDIATE 'SELECT ROW(1, 7) IN (SELECT id, id1 FROM t1 WHERE id1= 8)'; +ROW(1, 7) IN (SELECT id, id1 FROM t1 WHERE id1= 8) +0 +DROP TABLE t1; +EXECUTE IMMEDIATE 'SELECT ?' USING (1 IN (SELECT * FROM t1)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT * FROM t1))' at line 1 +EXECUTE IMMEDIATE 'SELECT ?' USING (SELECT * FROM t1); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT * FROM t1)' at line 1 +CREATE TABLE t1 (id INT); +INSERT INTO t1 VALUES (10); +CREATE PROCEDURE p1(a INT) AS BEGIN NULL; END; +$$ +CALL p1((1) IN (SELECT * FROM t1)); +CALL p1(EXISTS (SELECT * FROM t1)); +DROP PROCEDURE p1; +DROP TABLE t1; +SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO=(1 IN (SELECT * FROM t1)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(1 IN (SELECT * FROM t1))' at line 1 +SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO=EXISTS (SELECT * FROM t1); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'EXISTS (SELECT * FROM t1)' at line 1 +BEGIN NOT ATOMIC +DECLARE CONTINUE HANDLER FOR SQLWARNING +RESIGNAL SET MYSQL_ERRNO=(1 IN (SELECT * FROM t1)); +SIGNAL SQLSTATE '01000'; +END; +$$ +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(1 IN (SELECT * FROM t1)); +SIGNAL SQLSTATE '01000'; +END' at line 3 +BEGIN NOT ATOMIC +DECLARE CONTINUE HANDLER FOR SQLWARNING +RESIGNAL SET MYSQL_ERRNO=EXISTS (SELECT * FROM t1); +SIGNAL SQLSTATE '01000'; +END; +$$ +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'EXISTS (SELECT * FROM t1); +SIGNAL SQLSTATE '01000'; +END' at line 3 +PREPARE stmt FROM (1 IN (SELECT * FROM t1)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT * FROM t1))' at line 1 +PREPARE stmt FROM EXISTS (SELECT * FROM t1); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(SELECT * FROM t1)' at line 1 +EXECUTE IMMEDIATE (1 IN (SELECT * FROM t1)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT * FROM t1))' at line 1 +EXECUTE IMMEDIATE EXISTS (SELECT * FROM t1); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(SELECT * FROM t1)' at line 1 +GET DIAGNOSTICS CONDITION (1 IN (SELECT * FROM t1)) @errno=MYSQL_ERRNO; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(1 IN (SELECT * FROM t1)) @errno=MYSQL_ERRNO' at line 1 +GET DIAGNOSTICS CONDITION EXISTS (SELECT * FROM t1) @errno=MYSQL_ERRNO; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'EXISTS (SELECT * FROM t1) @errno=MYSQL_ERRNO' at line 1 +PURGE BINARY LOGS BEFORE (1 IN (SELECT * FROM t1)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT * FROM t1))' at line 1 +PURGE BINARY LOGS BEFORE EXISTS (SELECT * FROM t1); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(SELECT * FROM t1)' at line 1 +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +DO 1 IN (SELECT * FROM t1); +DO EXISTS (SELECT * FROM t1); +DROP TABLE t1; diff --git a/mysql-test/suite/compat/oracle/r/table_value_constr.result b/mysql-test/suite/compat/oracle/r/table_value_constr.result new file mode 100644 index 00000000000..31dcecfdb18 --- /dev/null +++ b/mysql-test/suite/compat/oracle/r/table_value_constr.result @@ -0,0 +1,2101 @@ +SET sql_mode=ORACLE; +create table t1 (a int, b int); +insert into t1 values (1,2),(4,6),(9,7), +(1,1),(2,5),(7,8); +# just VALUES +values (1,2); +1 2 +1 2 +values (1,2), (3,4), (5.6,0); +1 2 +1.0 2 +3.0 4 +5.6 0 +values ('abc', 'def'); +abc def +abc def +# UNION that uses VALUES structure(s) +select 1,2 +union +values (1,2); +1 2 +1 2 +values (1,2) +union +select 1,2; +1 2 +1 2 +select 1,2 +union +values (1,2),(3,4),(5,6),(7,8); +1 2 +1 2 +3 4 +5 6 +7 8 +select 3,7 +union +values (1,2),(3,4),(5,6); +3 7 +3 7 +1 2 +3 4 +5 6 +select 3,7,4 +union +values (1,2,5),(4,5,6); +3 7 4 +3 7 4 +1 2 5 +4 5 6 +select 1,2 +union +values (1,7),(3,6.5); +1 2 +1 2.0 +1 7.0 +3 6.5 +select 1,2 +union +values (1,2.0),(3,6); +1 2 +1 2.0 +3 6.0 +select 1.8,2 +union +values (1,2),(3,6); +1.8 2 +1.8 2 +1.0 2 +3.0 6 +values (1,2.4),(3,6) +union +select 2.8,9; +1 2.4 +1.0 2.4 +3.0 6.0 +2.8 9.0 +values (1,2),(3,4),(5,6),(7,8) +union +select 5,6; +1 2 +1 2 +3 4 +5 6 +7 8 +select 'ab','cdf' +union +values ('al','zl'),('we','q'); +ab cdf +ab cdf +al zl +we q +values ('ab', 'cdf') +union +select 'ab','cdf'; +ab cdf +ab cdf +values (1,2) +union +values (1,2),(5,6); +1 2 +1 2 +5 6 +values (1,2) +union +values (3,4),(5,6); +1 2 +1 2 +3 4 +5 6 +values (1,2) +union +values (1,2) +union values (4,5); +1 2 +1 2 +4 5 +# UNION ALL that uses VALUES structure +values (1,2),(3,4) +union all +select 5,6; +1 2 +1 2 +3 4 +5 6 +values (1,2),(3,4) +union all +select 1,2; +1 2 +1 2 +3 4 +1 2 +select 5,6 +union all +values (1,2),(3,4); +5 6 +5 6 +1 2 +3 4 +select 1,2 +union all +values (1,2),(3,4); +1 2 +1 2 +1 2 +3 4 +values (1,2) +union all +values (1,2),(5,6); +1 2 +1 2 +1 2 +5 6 +values (1,2) +union all +values (3,4),(5,6); +1 2 +1 2 +3 4 +5 6 +values (1,2) +union all +values (1,2) +union all +values (4,5); +1 2 +1 2 +1 2 +4 5 +values (1,2) +union all +values (1,2) +union values (1,2); +1 2 +1 2 +values (1,2) +union +values (1,2) +union all +values (1,2); +1 2 +1 2 +1 2 +# EXCEPT that uses VALUES structure(s) +select 1,2 +except +values (3,4),(5,6); +1 2 +1 2 +select 1,2 +except +values (1,2),(3,4); +1 2 +values (1,2),(3,4) +except +select 5,6; +1 2 +1 2 +3 4 +values (1,2),(3,4) +except +select 1,2; +1 2 +3 4 +values (1,2),(3,4) +except +values (5,6); +1 2 +1 2 +3 4 +values (1,2),(3,4) +except +values (1,2); +1 2 +3 4 +# INTERSECT that uses VALUES structure(s) +select 1,2 +intersect +values (3,4),(5,6); +1 2 +select 1,2 +intersect +values (1,2),(3,4); +1 2 +1 2 +values (1,2),(3,4) +intersect +select 5,6; +1 2 +values (1,2),(3,4) +intersect +select 1,2; +1 2 +1 2 +values (1,2),(3,4) +intersect +values (5,6); +1 2 +values (1,2),(3,4) +intersect +values (1,2); +1 2 +1 2 +# combination of different structures that uses VALUES structures : UNION + EXCEPT +values (1,2),(3,4) +except +select 1,2 +union values (1,2); +1 2 +1 2 +3 4 +values (1,2),(3,4) +except +values (1,2) +union +values (1,2); +1 2 +1 2 +3 4 +values (1,2),(3,4) +except +values (1,2) +union +values (3,4); +1 2 +3 4 +values (1,2),(3,4) +union +values (1,2) +except +values (1,2); +1 2 +3 4 +# combination of different structures that uses VALUES structures : UNION ALL + EXCEPT +values (1,2),(3,4) +except +select 1,2 +union all +values (1,2); +1 2 +1 2 +3 4 +values (1,2),(3,4) +except +values (1,2) +union all +values (1,2); +1 2 +1 2 +3 4 +values (1,2),(3,4) +except +values (1,2) +union all +values (3,4); +1 2 +3 4 +3 4 +values (1,2),(3,4) +union all +values (1,2) +except +values (1,2); +1 2 +3 4 +# combination of different structures that uses VALUES structures : UNION + INTERSECT +values (1,2),(3,4) +intersect +select 1,2 +union +values (1,2); +1 2 +1 2 +values (1,2),(3,4) +intersect +values (1,2) +union +values (1,2); +1 2 +1 2 +values (1,2),(3,4) +intersect +values (1,2) +union +values (3,4); +1 2 +1 2 +3 4 +values (1,2),(3,4) +union +values (1,2) +intersect +values (1,2); +1 2 +1 2 +# combination of different structures that uses VALUES structures : UNION ALL + INTERSECT +values (1,2),(3,4) +intersect +select 1,2 +union all +values (1,2); +1 2 +1 2 +1 2 +values (1,2),(3,4) +intersect +values (1,2) +union all +values (1,2); +1 2 +1 2 +1 2 +values (1,2),(3,4) +intersect +values (1,2) +union all +values (3,4); +1 2 +1 2 +3 4 +values (1,2),(3,4) +union all +values (1,2) +intersect +values (1,2); +1 2 +1 2 +# combination of different structures that uses VALUES structures : UNION + UNION ALL +values (1,2),(3,4) +union all +select 1,2 +union +values (1,2); +1 2 +1 2 +3 4 +values (1,2),(3,4) +union all +values (1,2) +union +values (1,2); +1 2 +1 2 +3 4 +values (1,2),(3,4) +union all +values (1,2) +union +values (3,4); +1 2 +1 2 +3 4 +values (1,2),(3,4) +union +values (1,2) +union all +values (1,2); +1 2 +1 2 +3 4 +1 2 +values (1,2) +union +values (1,2) +union all +values (1,2); +1 2 +1 2 +1 2 +# CTE that uses VALUES structure(s) : non-recursive CTE +with t2 as +( +values (1,2),(3,4) +) +select * from t2; +1 2 +1 2 +3 4 +with t2 as +( +select 1,2 +union +values (1,2) +) +select * from t2; +1 2 +1 2 +with t2 as +( +select 1,2 +union +values (1,2),(3,4) +) +select * from t2; +1 2 +1 2 +3 4 +with t2 as +( +values (1,2) +union +select 1,2 +) +select * from t2; +1 2 +1 2 +with t2 as +( +values (1,2),(3,4) +union +select 1,2 +) +select * from t2; +1 2 +1 2 +3 4 +with t2 as +( +values (5,6) +union +values (1,2),(3,4) +) +select * from t2; +5 6 +5 6 +1 2 +3 4 +with t2 as +( +values (1,2) +union +values (1,2),(3,4) +) +select * from t2; +1 2 +1 2 +3 4 +with t2 as +( +select 1,2 +union all +values (1,2),(3,4) +) +select * from t2; +1 2 +1 2 +1 2 +3 4 +with t2 as +( +values (1,2),(3,4) +union all +select 1,2 +) +select * from t2; +1 2 +1 2 +3 4 +1 2 +with t2 as +( +values (1,2) +union all +values (1,2),(3,4) +) +select * from t2; +1 2 +1 2 +1 2 +3 4 +# recursive CTE that uses VALUES structure(s) : singe VALUES structure as anchor +with recursive t2(a,b) as +( +values(1,1) +union +select t1.a, t1.b +from t1,t2 +where t1.a=t2.a +) +select * from t2; +a b +1 1 +1 2 +with recursive t2(a,b) as +( +values(1,1) +union +select t1.a+1, t1.b +from t1,t2 +where t1.a=t2.a +) +select * from t2; +a b +1 1 +2 2 +2 1 +3 5 +# recursive CTE that uses VALUES structure(s) : several VALUES structures as anchors +with recursive t2(a,b) as +( +values(1,1) +union +values (3,4) +union +select t2.a+1, t1.b +from t1,t2 +where t1.a=t2.a +) +select * from t2; +a b +1 1 +3 4 +2 2 +2 1 +3 5 +# recursive CTE that uses VALUES structure(s) : that uses UNION ALL +with recursive t2(a,b,st) as +( +values(1,1,1) +union all +select t2.a, t1.b, t2.st+1 +from t1,t2 +where t1.a=t2.a and st<3 +) +select * from t2; +a b st +1 1 1 +1 2 2 +1 1 2 +1 2 3 +1 2 3 +1 1 3 +1 1 3 +# recursive CTE that uses VALUES structure(s) : computation of factorial (first 10 elements) +with recursive fact(n,f) as +( +values(1,1) +union +select n+1,f*n from fact where n < 10 +) +select * from fact; +n f +1 1 +2 1 +3 2 +4 6 +5 24 +6 120 +7 720 +8 5040 +9 40320 +10 362880 +# Derived table that uses VALUES structure(s) : singe VALUES structure +select * from (values (1,2),(3,4)) as t2; +1 2 +1 2 +3 4 +# Derived table that uses VALUES structure(s) : UNION with VALUES structure(s) +select * from (select 1,2 union values (1,2)) as t2; +1 2 +1 2 +select * from (select 1,2 union values (1,2),(3,4)) as t2; +1 2 +1 2 +3 4 +select * from (values (1,2) union select 1,2) as t2; +1 2 +1 2 +select * from (values (1,2),(3,4) union select 1,2) as t2; +1 2 +1 2 +3 4 +select * from (values (5,6) union values (1,2),(3,4)) as t2; +5 6 +5 6 +1 2 +3 4 +select * from (values (1,2) union values (1,2),(3,4)) as t2; +1 2 +1 2 +3 4 +# Derived table that uses VALUES structure(s) : UNION ALL with VALUES structure(s) +select * from (select 1,2 union all values (1,2),(3,4)) as t2; +1 2 +1 2 +1 2 +3 4 +select * from (values (1,2),(3,4) union all select 1,2) as t2; +1 2 +1 2 +3 4 +1 2 +select * from (values (1,2) union all values (1,2),(3,4)) as t2; +1 2 +1 2 +1 2 +3 4 +# CREATE VIEW that uses VALUES structure(s) : singe VALUES structure +create view v1 as values (1,2),(3,4); +select * from v1; +1 2 +1 2 +3 4 +drop view v1; +# CREATE VIEW that uses VALUES structure(s) : UNION with VALUES structure(s) +create view v1 as +select 1,2 +union +values (1,2); +select * from v1; +1 2 +1 2 +drop view v1; +create view v1 as +select 1,2 +union +values (1,2),(3,4); +select * from v1; +1 2 +1 2 +3 4 +drop view v1; +create view v1 as +values (1,2) +union +select 1,2; +select * from v1; +1 2 +1 2 +drop view v1; +create view v1 as +values (1,2),(3,4) +union +select 1,2; +select * from v1; +1 2 +1 2 +3 4 +drop view v1; +create view v1 as +values (5,6) +union +values (1,2),(3,4); +select * from v1; +5 6 +5 6 +1 2 +3 4 +drop view v1; +# CREATE VIEW that uses VALUES structure(s) : UNION ALL with VALUES structure(s) +create view v1 as +values (1,2) +union +values (1,2),(3,4); +select * from v1; +1 2 +1 2 +3 4 +drop view v1; +create view v1 as +select 1,2 +union all +values (1,2),(3,4); +select * from v1; +1 2 +1 2 +1 2 +3 4 +drop view v1; +create view v1 as +values (1,2),(3,4) +union all +select 1,2; +select * from v1; +1 2 +1 2 +3 4 +1 2 +drop view v1; +create view v1 as +values (1,2) +union all +values (1,2),(3,4); +select * from v1; +1 2 +1 2 +1 2 +3 4 +drop view v1; +# IN-subquery with VALUES structure(s) : simple case +select * from t1 +where a in (values (1)); +a b +1 2 +1 1 +select * from t1 +where a in (select * from (values (1)) as tvc_0); +a b +1 2 +1 1 +explain extended select * from t1 +where a in (values (1)); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY ALL distinct_key NULL NULL NULL 2 100.00 +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where; Using join buffer (flat, BNL join) +3 MATERIALIZED ALL NULL NULL NULL NULL 2 100.00 +2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" semi join ((values (1)) "tvc_0") where "test"."t1"."a" = "tvc_0"."1" +explain extended select * from t1 +where a in (select * from (values (1)) as tvc_0); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY ALL distinct_key NULL NULL NULL 2 100.00 +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where; Using join buffer (flat, BNL join) +2 MATERIALIZED ALL NULL NULL NULL NULL 2 100.00 +3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" semi join ((values (1)) "tvc_0") where "test"."t1"."a" = "tvc_0"."1" +# IN-subquery with VALUES structure(s) : UNION with VALUES on the first place +select * from t1 +where a in (values (1) union select 2); +a b +1 2 +1 1 +2 5 +select * from t1 +where a in (select * from (values (1)) as tvc_0 union +select 2); +a b +1 2 +1 1 +2 5 +explain extended select * from t1 +where a in (values (1) union select 2); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +4 DEPENDENT SUBQUERY ref key0 key0 4 func 2 100.00 +2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +3 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where <"test"."t1"."a">(("test"."t1"."a",(/* select#4 */ select "tvc_0"."1" from (values (1)) "tvc_0" where ("test"."t1"."a") = "tvc_0"."1" union /* select#3 */ select 2 having ("test"."t1"."a") = (2)))) +explain extended select * from t1 +where a in (select * from (values (1)) as tvc_0 union +select 2); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY ref key0 key0 4 func 2 100.00 +3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +4 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where <"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select "tvc_0"."1" from (values (1)) "tvc_0" where ("test"."t1"."a") = "tvc_0"."1" union /* select#4 */ select 2 having ("test"."t1"."a") = (2)))) +# IN-subquery with VALUES structure(s) : UNION with VALUES on the second place +select * from t1 +where a in (select 2 union values (1)); +a b +1 2 +1 1 +2 5 +select * from t1 +where a in (select 2 union +select * from (values (1)) tvc_0); +a b +1 2 +1 1 +2 5 +explain extended select * from t1 +where a in (select 2 union values (1)); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used +4 DEPENDENT UNION ref key0 key0 4 func 2 100.00 +3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where <"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select 2 having ("test"."t1"."a") = (2) union /* select#4 */ select "tvc_0"."1" from (values (1)) "tvc_0" where ("test"."t1"."a") = "tvc_0"."1"))) +explain extended select * from t1 +where a in (select 2 union +select * from (values (1)) tvc_0); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used +3 DEPENDENT UNION ref key0 key0 4 func 2 100.00 +4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where <"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select 2 having ("test"."t1"."a") = (2) union /* select#3 */ select "tvc_0"."1" from (values (1)) "tvc_0" where ("test"."t1"."a") = "tvc_0"."1"))) +# IN-subquery with VALUES structure(s) : UNION ALL +select * from t1 +where a in (values (1) union all select b from t1); +a b +1 2 +1 1 +2 5 +7 8 +select * from t1 +where a in (select * from (values (1)) as tvc_0 union all +select b from t1); +a b +1 2 +1 1 +2 5 +7 8 +explain extended select * from t1 +where a in (values (1) union all select b from t1); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +4 DEPENDENT SUBQUERY ref key0 key0 4 func 2 100.00 +2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +3 DEPENDENT UNION t1 ALL NULL NULL NULL NULL 6 100.00 Using where +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where <"test"."t1"."a">(("test"."t1"."a",(/* select#4 */ select "tvc_0"."1" from (values (1)) "tvc_0" where ("test"."t1"."a") = "tvc_0"."1" union all /* select#3 */ select "test"."t1"."b" from "test"."t1" where ("test"."t1"."a") = "test"."t1"."b"))) +explain extended select * from t1 +where a in (select * from (values (1)) as tvc_0 union all +select b from t1); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY ref key0 key0 4 func 2 100.00 +3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +4 DEPENDENT UNION t1 ALL NULL NULL NULL NULL 6 100.00 Using where +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where <"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select "tvc_0"."1" from (values (1)) "tvc_0" where ("test"."t1"."a") = "tvc_0"."1" union all /* select#4 */ select "test"."t1"."b" from "test"."t1" where ("test"."t1"."a") = "test"."t1"."b"))) +# NOT IN subquery with VALUES structure(s) : simple case +select * from t1 +where a not in (values (1),(2)); +a b +4 6 +9 7 +7 8 +select * from t1 +where a not in (select * from (values (1),(2)) as tvc_0); +a b +4 6 +9 7 +7 8 +explain extended select * from t1 +where a not in (values (1),(2)); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +3 MATERIALIZED ALL NULL NULL NULL NULL 2 100.00 +2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where !<"test"."t1"."a">(("test"."t1"."a","test"."t1"."a" in ( (/* select#3 */ select "tvc_0"."1" from (values (1),(2)) "tvc_0" ), ("test"."t1"."a" in on distinct_key where "test"."t1"."a" = ""."1")))) +explain extended select * from t1 +where a not in (select * from (values (1),(2)) as tvc_0); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 MATERIALIZED ALL NULL NULL NULL NULL 2 100.00 +3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where !<"test"."t1"."a">(("test"."t1"."a","test"."t1"."a" in ( (/* select#2 */ select "tvc_0"."1" from (values (1),(2)) "tvc_0" ), ("test"."t1"."a" in on distinct_key where "test"."t1"."a" = ""."1")))) +# NOT IN subquery with VALUES structure(s) : UNION with VALUES on the first place +select * from t1 +where a not in (values (1) union select 2); +a b +4 6 +9 7 +7 8 +select * from t1 +where a not in (select * from (values (1)) as tvc_0 union +select 2); +a b +4 6 +9 7 +7 8 +explain extended select * from t1 +where a not in (values (1) union select 2); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +4 DEPENDENT SUBQUERY ALL NULL NULL NULL NULL 2 100.00 Using where +2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +3 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where !<"test"."t1"."a">(("test"."t1"."a",(/* select#4 */ select "tvc_0"."1" from (values (1)) "tvc_0" where trigcond(("test"."t1"."a") = "tvc_0"."1") union /* select#3 */ select 2 having trigcond(("test"."t1"."a") = (2))))) +explain extended select * from t1 +where a not in (select * from (values (1)) as tvc_0 union +select 2); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY ALL NULL NULL NULL NULL 2 100.00 Using where +3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +4 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where !<"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select "tvc_0"."1" from (values (1)) "tvc_0" where trigcond(("test"."t1"."a") = "tvc_0"."1") union /* select#4 */ select 2 having trigcond(("test"."t1"."a") = (2))))) +# NOT IN subquery with VALUES structure(s) : UNION with VALUES on the second place +select * from t1 +where a not in (select 2 union values (1)); +a b +4 6 +9 7 +7 8 +select * from t1 +where a not in (select 2 union +select * from (values (1)) as tvc_0); +a b +4 6 +9 7 +7 8 +explain extended select * from t1 +where a not in (select 2 union values (1)); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used +4 DEPENDENT UNION ALL NULL NULL NULL NULL 2 100.00 Using where +3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where !<"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select 2 having trigcond(("test"."t1"."a") = (2)) union /* select#4 */ select "tvc_0"."1" from (values (1)) "tvc_0" where trigcond(("test"."t1"."a") = "tvc_0"."1")))) +explain extended select * from t1 +where a not in (select 2 union +select * from (values (1)) as tvc_0); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used +3 DEPENDENT UNION ALL NULL NULL NULL NULL 2 100.00 Using where +4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where !<"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select 2 having trigcond(("test"."t1"."a") = (2)) union /* select#3 */ select "tvc_0"."1" from (values (1)) "tvc_0" where trigcond(("test"."t1"."a") = "tvc_0"."1")))) +# ANY-subquery with VALUES structure(s) : simple case +select * from t1 +where a = any (values (1),(2)); +a b +1 2 +1 1 +2 5 +select * from t1 +where a = any (select * from (values (1),(2)) as tvc_0); +a b +1 2 +1 1 +2 5 +explain extended select * from t1 +where a = any (values (1),(2)); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY ALL distinct_key NULL NULL NULL 2 100.00 +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where; Using join buffer (flat, BNL join) +3 MATERIALIZED ALL NULL NULL NULL NULL 2 100.00 +2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" semi join ((values (1),(2)) "tvc_0") where "test"."t1"."a" = "tvc_0"."1" +explain extended select * from t1 +where a = any (select * from (values (1),(2)) as tvc_0); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY ALL distinct_key NULL NULL NULL 2 100.00 +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where; Using join buffer (flat, BNL join) +2 MATERIALIZED ALL NULL NULL NULL NULL 2 100.00 +3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" semi join ((values (1),(2)) "tvc_0") where "test"."t1"."a" = "tvc_0"."1" +# ANY-subquery with VALUES structure(s) : UNION with VALUES on the first place +select * from t1 +where a = any (values (1) union select 2); +a b +1 2 +1 1 +2 5 +select * from t1 +where a = any (select * from (values (1)) as tvc_0 union +select 2); +a b +1 2 +1 1 +2 5 +explain extended select * from t1 +where a = any (values (1) union select 2); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +4 DEPENDENT SUBQUERY ref key0 key0 4 func 2 100.00 +2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +3 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where <"test"."t1"."a">(("test"."t1"."a",(/* select#4 */ select "tvc_0"."1" from (values (1)) "tvc_0" where ("test"."t1"."a") = "tvc_0"."1" union /* select#3 */ select 2 having ("test"."t1"."a") = (2)))) +explain extended select * from t1 +where a = any (select * from (values (1)) as tvc_0 union +select 2); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY ref key0 key0 4 func 2 100.00 +3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +4 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where <"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select "tvc_0"."1" from (values (1)) "tvc_0" where ("test"."t1"."a") = "tvc_0"."1" union /* select#4 */ select 2 having ("test"."t1"."a") = (2)))) +# ANY-subquery with VALUES structure(s) : UNION with VALUES on the second place +select * from t1 +where a = any (select 2 union values (1)); +a b +1 2 +1 1 +2 5 +select * from t1 +where a = any (select 2 union +select * from (values (1)) as tvc_0); +a b +1 2 +1 1 +2 5 +explain extended select * from t1 +where a = any (select 2 union values (1)); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used +4 DEPENDENT UNION ref key0 key0 4 func 2 100.00 +3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where <"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select 2 having ("test"."t1"."a") = (2) union /* select#4 */ select "tvc_0"."1" from (values (1)) "tvc_0" where ("test"."t1"."a") = "tvc_0"."1"))) +explain extended select * from t1 +where a = any (select 2 union +select * from (values (1)) as tvc_0); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used +3 DEPENDENT UNION ref key0 key0 4 func 2 100.00 +4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where <"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select 2 having ("test"."t1"."a") = (2) union /* select#3 */ select "tvc_0"."1" from (values (1)) "tvc_0" where ("test"."t1"."a") = "tvc_0"."1"))) +# ALL-subquery with VALUES structure(s) : simple case +select * from t1 +where a = all (values (1)); +a b +1 2 +1 1 +select * from t1 +where a = all (select * from (values (1)) as tvc_0); +a b +1 2 +1 1 +explain extended select * from t1 +where a = all (values (1)); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +3 DEPENDENT SUBQUERY ALL NULL NULL NULL NULL 2 100.00 Using where +2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where (<"test"."t1"."a">(("test"."t1"."a",(/* select#3 */ select "tvc_0"."1" from (values (1)) "tvc_0" where trigcond(("test"."t1"."a") <> "tvc_0"."1"))))) +explain extended select * from t1 +where a = all (select * from (values (1)) as tvc_0); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY ALL NULL NULL NULL NULL 2 100.00 Using where +3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where (<"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select "tvc_0"."1" from (values (1)) "tvc_0" where trigcond(("test"."t1"."a") <> "tvc_0"."1"))))) +# ALL-subquery with VALUES structure(s) : UNION with VALUES on the first place +select * from t1 +where a = all (values (1) union select 1); +a b +1 2 +1 1 +select * from t1 +where a = all (select * from (values (1)) as tvc_0 union +select 1); +a b +1 2 +1 1 +explain extended select * from t1 +where a = all (values (1) union select 1); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +4 DEPENDENT SUBQUERY ALL NULL NULL NULL NULL 2 100.00 Using where +2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +3 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where (<"test"."t1"."a">(("test"."t1"."a",(/* select#4 */ select "tvc_0"."1" from (values (1)) "tvc_0" where trigcond(("test"."t1"."a") <> "tvc_0"."1") union /* select#3 */ select 1 having trigcond(("test"."t1"."a") <> (1)))))) +explain extended select * from t1 +where a = all (select * from (values (1)) as tvc_0 union +select 1); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY ALL NULL NULL NULL NULL 2 100.00 Using where +3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +4 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where (<"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select "tvc_0"."1" from (values (1)) "tvc_0" where trigcond(("test"."t1"."a") <> "tvc_0"."1") union /* select#4 */ select 1 having trigcond(("test"."t1"."a") <> (1)))))) +# ALL-subquery with VALUES structure(s) : UNION with VALUES on the second place +select * from t1 +where a = any (select 1 union values (1)); +a b +1 2 +1 1 +select * from t1 +where a = any (select 1 union +select * from (values (1)) as tvc_0); +a b +1 2 +1 1 +explain extended select * from t1 +where a = any (select 1 union values (1)); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used +4 DEPENDENT UNION ref key0 key0 4 func 2 100.00 +3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where <"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select 1 having ("test"."t1"."a") = (1) union /* select#4 */ select "tvc_0"."1" from (values (1)) "tvc_0" where ("test"."t1"."a") = "tvc_0"."1"))) +explain extended select * from t1 +where a = any (select 1 union +select * from (values (1)) as tvc_0); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where +2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used +3 DEPENDENT UNION ref key0 key0 4 func 2 100.00 +4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where <"test"."t1"."a">(("test"."t1"."a",(/* select#2 */ select 1 having ("test"."t1"."a") = (1) union /* select#3 */ select "tvc_0"."1" from (values (1)) "tvc_0" where ("test"."t1"."a") = "tvc_0"."1"))) +# prepare statement that uses VALUES structure(s): single VALUES structure +prepare stmt1 from ' +values (1,2); +'; +execute stmt1; +1 2 +1 2 +execute stmt1; +1 2 +1 2 +deallocate prepare stmt1; +# prepare statement that uses VALUES structure(s): UNION with VALUES structure(s) +prepare stmt1 from ' + select 1,2 + union + values (1,2),(3,4); +'; +execute stmt1; +1 2 +1 2 +3 4 +execute stmt1; +1 2 +1 2 +3 4 +deallocate prepare stmt1; +prepare stmt1 from ' + values (1,2),(3,4) + union + select 1,2; +'; +execute stmt1; +1 2 +1 2 +3 4 +execute stmt1; +1 2 +1 2 +3 4 +deallocate prepare stmt1; +prepare stmt1 from ' + select 1,2 + union + values (3,4) + union + values (1,2); +'; +execute stmt1; +1 2 +1 2 +3 4 +execute stmt1; +1 2 +1 2 +3 4 +deallocate prepare stmt1; +prepare stmt1 from ' + values (5,6) + union + values (1,2),(3,4); +'; +execute stmt1; +5 6 +5 6 +1 2 +3 4 +execute stmt1; +5 6 +5 6 +1 2 +3 4 +deallocate prepare stmt1; +# prepare statement that uses VALUES structure(s): UNION ALL with VALUES structure(s) +prepare stmt1 from ' + select 1,2 + union + values (1,2),(3,4); +'; +execute stmt1; +1 2 +1 2 +3 4 +execute stmt1; +1 2 +1 2 +3 4 +deallocate prepare stmt1; +prepare stmt1 from ' + values (1,2),(3,4) + union all + select 1,2; +'; +execute stmt1; +1 2 +1 2 +3 4 +1 2 +execute stmt1; +1 2 +1 2 +3 4 +1 2 +deallocate prepare stmt1; +prepare stmt1 from ' + select 1,2 + union all + values (3,4) + union all + values (1,2); +'; +execute stmt1; +1 2 +1 2 +3 4 +1 2 +execute stmt1; +1 2 +1 2 +3 4 +1 2 +deallocate prepare stmt1; +prepare stmt1 from ' + values (1,2) + union all + values (1,2),(3,4); +'; +execute stmt1; +1 2 +1 2 +1 2 +3 4 +execute stmt1; +1 2 +1 2 +1 2 +3 4 +deallocate prepare stmt1; +# explain query that uses VALUES structure(s): single VALUES structure +explain +values (1,2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used +explain format=json +values (1,2); +EXPLAIN +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +# explain query that uses VALUES structure(s): UNION with VALUES structure(s) +explain +select 1,2 +union +values (1,2),(3,4); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain +values (1,2),(3,4) +union +select 1,2; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain +values (5,6) +union +values (1,2),(3,4); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain format=json +select 1,2 +union +values (1,2),(3,4); +EXPLAIN +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +explain format=json +values (1,2),(3,4) +union +select 1,2; +EXPLAIN +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +explain format=json +values (5,6) +union +values (1,2),(3,4); +EXPLAIN +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +explain +select 1,2 +union +values (3,4) +union +values (1,2); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL No tables used +3 UNION NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain format=json +select 1,2 +union +values (3,4) +union +values (1,2); +EXPLAIN +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 3, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +# explain query that uses VALUES structure(s): UNION ALL with VALUES structure(s) +explain +select 1,2 +union +values (1,2),(3,4); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain +values (1,2),(3,4) +union all +select 1,2; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL No tables used +explain +values (1,2) +union all +values (1,2),(3,4); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL No tables used +explain format=json +values (1,2),(3,4) +union all +select 1,2; +EXPLAIN +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +explain format=json +select 1,2 +union +values (1,2),(3,4); +EXPLAIN +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +explain format=json +values (1,2) +union all +values (1,2),(3,4); +EXPLAIN +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +explain +select 1,2 +union all +values (3,4) +union all +values (1,2); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL No tables used +3 UNION NULL NULL NULL NULL NULL NULL NULL No tables used +explain format=json +select 1,2 +union all +values (3,4) +union all +values (1,2); +EXPLAIN +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 3, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +# analyze query that uses VALUES structure(s): single VALUES structure +analyze +values (1,2); +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +analyze format=json +values (1,2); +ANALYZE +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "r_loops": 0, + "r_rows": null, + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +# analyze query that uses VALUES structure(s): UNION with VALUES structure(s) +analyze +select 1,2 +union +values (1,2),(3,4); +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL 2.00 NULL NULL +analyze +values (1,2),(3,4) +union +select 1,2; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL 2.00 NULL NULL +analyze +values (5,6) +union +values (1,2),(3,4); +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL 3.00 NULL NULL +analyze format=json +select 1,2 +union +values (1,2),(3,4); +ANALYZE +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "r_loops": 1, + "r_rows": 2, + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +analyze format=json +values (1,2),(3,4) +union +select 1,2; +ANALYZE +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "r_loops": 1, + "r_rows": 2, + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +analyze format=json +values (5,6) +union +values (1,2),(3,4); +ANALYZE +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "r_loops": 1, + "r_rows": 3, + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +analyze +select 1,2 +union +values (3,4) +union +values (1,2); +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +3 UNION NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL 2.00 NULL NULL +analyze format=json +select 1,2 +union +values (3,4) +union +values (1,2); +ANALYZE +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "r_loops": 1, + "r_rows": 2, + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 3, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +# analyze query that uses VALUES structure(s): UNION ALL with VALUES structure(s) +analyze +select 1,2 +union +values (1,2),(3,4); +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +NULL UNION RESULT ALL NULL NULL NULL NULL NULL 2.00 NULL NULL +analyze +values (1,2),(3,4) +union all +select 1,2; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +analyze +values (1,2) +union all +values (1,2),(3,4); +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +analyze format=json +values (1,2),(3,4) +union all +select 1,2; +ANALYZE +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "r_loops": 0, + "r_rows": null, + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +analyze format=json +select 1,2 +union +values (1,2),(3,4); +ANALYZE +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "r_loops": 1, + "r_rows": 2, + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +analyze format=json +values (1,2) +union all +values (1,2),(3,4); +ANALYZE +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "r_loops": 0, + "r_rows": null, + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +analyze +select 1,2 +union all +values (3,4) +union all +values (1,2); +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +2 UNION NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +3 UNION NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL No tables used +analyze format=json +select 1,2 +union all +values (3,4) +union all +values (1,2); +ANALYZE +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "r_loops": 0, + "r_rows": null, + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 2, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + }, + { + "query_block": { + "select_id": 3, + "operation": "UNION", + "table": { + "message": "No tables used" + } + } + } + ] + } + } +} +# different number of values in TVC +values (1,2),(3,4,5); +ERROR HY000: The used table value constructor has a different number of values +# illegal parameter data types in TVC +values (1,point(1,1)),(1,1); +ERROR HY000: Illegal parameter data types geometry and int for operation 'TABLE VALUE CONSTRUCTOR' +values (1,point(1,1)+1); +ERROR HY000: Illegal parameter data types geometry and int for operation '+' +# field reference in TVC +select * from (values (1), (b), (2)) as new_tvc; +ERROR HY000: Field reference 'b' can't be used in table value constructor +select * from (values (1), (t1.b), (2)) as new_tvc; +ERROR HY000: Field reference 't1.b' can't be used in table value constructor +drop table t1; +# +# MDEV-15940: cursor over TVC +# +DECLARE +v INT; +CURSOR cur IS VALUES(7); +BEGIN +OPEN cur; +FETCH cur INTO v; +SELECT v; +END; +| +v +7 +DECLARE +v INT DEFAULT 0; +BEGIN +FOR a IN (VALUES (7)) LOOP +SET v = v + 1; +END LOOP; +SELECT v; +END; +| +v +1 +# +# MDEV-16038: empty row in TVC +# +with t as (values (),()) select 1 from t; +ERROR HY000: Row with no elements is not allowed in table value constructor in this context diff --git a/mysql-test/suite/compat/oracle/r/versioning.result b/mysql-test/suite/compat/oracle/r/versioning.result new file mode 100644 index 00000000000..ebedcf0f462 --- /dev/null +++ b/mysql-test/suite/compat/oracle/r/versioning.result @@ -0,0 +1,16 @@ +SET sql_mode=ORACLE; +# +# MDEV-15975 PL/SQL parser does not understand historical queries +# +CREATE TABLE t1 (a INT) WITH SYSTEM VERSIONING; +INSERT INTO t1 VALUES (10); +DELETE FROM t1; +INSERT INTO t1 VALUES (20); +SELECT * FROM t1 FOR SYSTEM_TIME ALL; +a +10 +20 +SELECT * FROM t1 FOR SYSTEM_TIME AS OF (NOW()+INTERVAL 10 YEAR); +a +20 +DROP TABLE t1; diff --git a/mysql-test/suite/compat/oracle/r/win.result b/mysql-test/suite/compat/oracle/r/win.result new file mode 100644 index 00000000000..b11eba0d1da --- /dev/null +++ b/mysql-test/suite/compat/oracle/r/win.result @@ -0,0 +1,17 @@ +SET sql_mode=ORACLE; +# +# MDEV-13384: "window" seems like a reserved column name but it's not listed as one +# +# Currently we allow window as an identifier, except for table aliases. +# +CREATE TABLE door (id INT, window VARCHAR(10)); +SELECT id +FROM door as window; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'window' at line 2 +SELECT id, window +FROM door; +id window +SELECT id, window +FROM door as window; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'window' at line 2 +DROP TABLE door; diff --git a/mysql-test/suite/compat/oracle/t/binlog_stm_ps.test b/mysql-test/suite/compat/oracle/t/binlog_stm_ps.test index 996ef574413..f305f611bd2 100644 --- a/mysql-test/suite/compat/oracle/t/binlog_stm_ps.test +++ b/mysql-test/suite/compat/oracle/t/binlog_stm_ps.test @@ -35,3 +35,23 @@ SELECT * FROM t1; --let $binlog_file = LAST source include/show_binlog_events.inc; DROP TABLE t1; + + +--echo # +--echo # MDEV-16095 Oracle-style placeholder inside GROUP BY..WITH ROLLUP breaks replication +--echo # + +FLUSH LOGS; +CREATE TABLE t1 (d DATE); +INSERT INTO t1 VALUES ('1985-05-13'),('1989-12-24'); +CREATE TABLE t2 (d DATE, c BIGINT); +DELIMITER $$; +BEGIN + EXECUTE IMMEDIATE 'INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, :param' USING 1; + EXECUTE IMMEDIATE 'INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, :param WITH ROLLUP' USING 1; +END; +$$ +DELIMITER ;$$ +DROP TABLE t1,t2; +--let $binlog_file = LAST +source include/show_binlog_events.inc; diff --git a/mysql-test/suite/compat/oracle/t/binlog_stm_sp.test b/mysql-test/suite/compat/oracle/t/binlog_stm_sp.test index 065c43eb274..e6f33cb1118 100644 --- a/mysql-test/suite/compat/oracle/t/binlog_stm_sp.test +++ b/mysql-test/suite/compat/oracle/t/binlog_stm_sp.test @@ -194,3 +194,26 @@ DROP TABLE t2; DROP PROCEDURE p1; --let $binlog_file = LAST source include/show_binlog_events.inc; + + +--echo # +--echo # MDEV-16020 SP variables inside GROUP BY..WITH ROLLUP break replication +--echo # + +FLUSH LOGS; +CREATE TABLE t1 (d DATE); +INSERT INTO t1 VALUES ('1985-05-13'),('1989-12-24'); +CREATE TABLE t2 (d DATE, c BIGINT); +DELIMITER $$; +DECLARE + var INT; +BEGIN + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, var; + INSERT INTO t2 SELECT d, COUNT(*) FROM t1 GROUP BY d, var WITH ROLLUP; +END; +$$ +DELIMITER ;$$ +DROP TABLE t1,t2; + +--let $binlog_file = LAST +source include/show_binlog_events.inc; diff --git a/mysql-test/suite/compat/oracle/t/column_compression.test b/mysql-test/suite/compat/oracle/t/column_compression.test new file mode 100644 index 00000000000..5544ff6c0b7 --- /dev/null +++ b/mysql-test/suite/compat/oracle/t/column_compression.test @@ -0,0 +1,11 @@ +--source include/have_innodb.inc +--source include/have_csv.inc + +SET sql_mode=ORACLE; + +SET column_compression_zlib_wrap=true; +CREATE TABLE t1 (a BLOB COMPRESSED); +INSERT INTO t1 VALUES (REPEAT('a',10000)); +SELECT DATA_LENGTH<100 AS c FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test'; +DROP TABLE t1; diff --git a/mysql-test/suite/compat/oracle/t/func_concat.test b/mysql-test/suite/compat/oracle/t/func_concat.test index e1d8a5c477f..7f9fec4f2fe 100644 --- a/mysql-test/suite/compat/oracle/t/func_concat.test +++ b/mysql-test/suite/compat/oracle/t/func_concat.test @@ -114,3 +114,35 @@ SET sql_mode=ORACLE; SHOW CREATE VIEW v1; SELECT * FROM v1; DROP VIEW v1; + + +--echo # +--echo # MDEV-16186 Concatenation operator || returns wrong results in sql_mode=ORACLE +--echo # + +# Concatenation operator || has the same precedence with + +# (stronger than << and weaker than * ^) + +SELECT -1<<1||1 AS a FROM DUAL; +SELECT -1||0<<1 AS a FROM DUAL; + +EXPLAIN EXTENDED SELECT -1<<1||1 AS a FROM DUAL; +EXPLAIN EXTENDED SELECT -1||0<<1 AS a FROM DUAL; + +SELECT -1+1||1 AS a FROM DUAL; +SELECT -1||0+1 AS a FROM DUAL; + +EXPLAIN EXTENDED SELECT -1+1||1 AS a FROM DUAL; +EXPLAIN EXTENDED SELECT -1||0+1 AS a FROM DUAL; + +SELECT 1*1||-1 AS a FROM DUAL; +SELECT 1||1*-1 AS a FROM DUAL; + +EXPLAIN EXTENDED SELECT 1*1||-1 AS a FROM DUAL; +EXPLAIN EXTENDED SELECT 1||1*-1 AS a FROM DUAL; + +SELECT -1^1||1 AS a FROM DUAL; +SELECT -1||0^1 AS a FROM DUAL; + +EXPLAIN EXTENDED SELECT -1^1||1 AS a FROM DUAL; +EXPLAIN EXTENDED SELECT -1||0^1 AS a FROM DUAL; diff --git a/mysql-test/suite/compat/oracle/t/func_time.test b/mysql-test/suite/compat/oracle/t/func_time.test new file mode 100644 index 00000000000..c1174f7f395 --- /dev/null +++ b/mysql-test/suite/compat/oracle/t/func_time.test @@ -0,0 +1,25 @@ +SET sql_mode=ORACLE; + +--echo # +--echo # Start of 10.3 tests +--echo # + +--echo # +--echo # MDEV-16152 Expressions with INTERVAL return bad results in some cases +--echo # + +SELECT TIMESTAMP'2001-01-01 10:20:30' - INTERVAL '10' YEAR AS c1, + -INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2; + +SELECT TIMESTAMP'2001-01-01 10:20:30' + INTERVAL '10' YEAR AS c1, + INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2, + +INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c3; + +EXPLAIN EXTENDED SELECT + TIMESTAMP'2001-01-01 10:20:30' - INTERVAL '10' YEAR AS c1, + -INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2; + +EXPLAIN EXTENDED SELECT + TIMESTAMP'2001-01-01 10:20:30' + INTERVAL '10' YEAR AS c1, + INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c2, + +INTERVAL '10' YEAR + TIMESTAMP'2001-01-01 10:20:30' AS c3; diff --git a/mysql-test/suite/compat/oracle/t/gis.test b/mysql-test/suite/compat/oracle/t/gis.test new file mode 100644 index 00000000000..a684563390b --- /dev/null +++ b/mysql-test/suite/compat/oracle/t/gis.test @@ -0,0 +1,4 @@ +-- source include/have_geometry.inc + +SELECT WITHIN(POINT(1,1), POINT(1,1)); +SELECT WITHIN(POINT(1,1), POINT(0,0)); diff --git a/mysql-test/suite/compat/oracle/t/parser.test b/mysql-test/suite/compat/oracle/t/parser.test index 5aa37c1cd33..a8f5eda7016 100644 --- a/mysql-test/suite/compat/oracle/t/parser.test +++ b/mysql-test/suite/compat/oracle/t/parser.test @@ -22,3 +22,189 @@ BEGIN END; $$ DELIMITER ;$$ + + +--echo # +--echo # MDEV-16202 Latest changes made erroneously some keywords reserved in sql_mode=ORACLE +--echo # + + +DELIMITER $$; +CREATE PROCEDURE p1(name VARCHAR(64), pattern TEXT) AS + query TEXT DEFAULT REPLACE(pattern, 'name', name); +BEGIN + SELECT query AS ''; + EXECUTE IMMEDIATE query; +EXCEPTION + WHEN OTHERS THEN + BEGIN + SHOW ERRORS; + END; +END; +$$ + +CREATE PROCEDURE p2(name VARCHAR(64)) AS +BEGIN + CALL p1(name, 'DECLARE name INT; BEGIN name:=10; SELECT name; END'); + EXECUTE IMMEDIATE REPLACE('CREATE TABLE t1 (name INT)', 'name', name); + CALL p1(name, 'SELECT name FROM t1'); + CALL p1(name, 'SELECT name ''alias'' FROM t1'); + CALL p1(name, 'SELECT name()'); + CALL p1(name, 'SELECT name.name()'); + CALL p1(name, 'SELECT name DATE FROM t1'); + CALL p1(name, 'SELECT name HISTORY FROM t1'); + CALL p1(name, 'SELECT name NEXT FROM t1'); + CALL p1(name, 'SELECT name PERIOD FROM t1'); + CALL p1(name, 'SELECT name PREVIOUS FROM t1'); + CALL p1(name, 'SELECT name SYSTEM FROM t1'); + CALL p1(name, 'SELECT name SYSTEM_TIME FROM t1'); + CALL p1(name, 'SELECT name TIME FROM t1'); + CALL p1(name, 'SELECT name TIMESTAMP FROM t1'); + CALL p1(name, 'SELECT name TRANSACTION FROM t1'); + CALL p1(name, 'SELECT name VALUE FROM t1'); + CALL p1(name, 'SELECT name VERSIONING FROM t1'); + CALL p1(name, 'SELECT name WITHOUT FROM t1'); + DROP TABLE t1; +END; +$$ +DELIMITER ;$$ + +--disable_column_names +CALL p2('date'); +CALL p2('history'); +CALL p2('next'); +CALL p2('period'); +CALL p2('previous'); +CALL p2('system'); +CALL p2('system_time'); +CALL p2('time'); +CALL p2('timestamp'); +CALL p2('transaction'); +CALL p2('value'); +CALL p2('versioning'); +CALL p2('without'); +--enable_column_names + +DROP PROCEDURE p2; +DROP PROCEDURE p1; + + +--echo # +--echo # MDEV-16244 sql_mode=ORACLE: Some keywords do not work in variable declarations +--echo # + +SET sql_mode=ORACLE; +DELIMITER /; + +DECLARE + do INT; +BEGIN + SELECT do INTO do FROM DUAL; +END; +/ + +DECLARE + handler INT; +BEGIN + SELECT handler INTO handler FROM DUAL; +END; +/ + +DECLARE + repair INT; +BEGIN + SELECT repair INTO repair FROM DUAL; +END; +/ + +DECLARE + shutdown INT; +BEGIN + SELECT shutdown INTO shutdown FROM DUAL; +END; +/ + +DECLARE + truncate INT; +BEGIN + SELECT truncate INTO truncate FROM DUAL; +END; +/ + +DECLARE + close INT; +BEGIN + SELECT close INTO close FROM DUAL; +END; +/ + +DECLARE + commit INT; +BEGIN + SELECT commit INTO commit FROM DUAL; +END; +/ + +DECLARE + open INT; +BEGIN + SELECT open INTO open FROM DUAL; +END; +/ + +DECLARE + rollback INT; +BEGIN + SELECT rollback INTO rollback FROM DUAL; +END; +/ + +DECLARE + savepoint INT; +BEGIN + SELECT savepoint INTO savepoint FROM DUAL; +END; +/ + +DECLARE + contains INT; +BEGIN + SELECT contains INTO contains FROM DUAL; +END; +/ + +DECLARE + language INT; +BEGIN + SELECT language INTO language FROM DUAL; +END; +/ + +DECLARE + no INT; +BEGIN + SELECT no INTO no FROM DUAL; +END; +/ + +DECLARE + charset INT; +BEGIN + SELECT charset INTO charset FROM DUAL; +END; +/ +DECLARE + follows INT; +BEGIN + SELECT follows INTO follows FROM DUAL; +END; +/ + +DECLARE + precedes INT; +BEGIN + SELECT precedes INTO precedes FROM DUAL; +END; +/ + +DELIMITER ;/ diff --git a/mysql-test/suite/compat/oracle/t/sp-expr.test b/mysql-test/suite/compat/oracle/t/sp-expr.test new file mode 100644 index 00000000000..06a5c59b80c --- /dev/null +++ b/mysql-test/suite/compat/oracle/t/sp-expr.test @@ -0,0 +1,165 @@ +# Testing expressions of different kinds in various parts of SP syntax + +SET sql_mode=ORACLE; + +--echo # +--echo # Start of 10.3 tests +--echo # + +# +# Subselects in SP control structures +# + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +DELIMITER $$; + +BEGIN + CASE ((1) IN (SELECT a FROM t1)) WHEN 1 THEN SELECT 1; + ELSE SELECT NULL; + END CASE; +END; +$$ +BEGIN + CASE (EXISTS (SELECT a FROM t1)) WHEN 1 THEN SELECT 1; + ELSE SELECT NULL; + END CASE; +END; +$$ + +BEGIN + IF ((1) IN (SELECT a FROM t1)) THEN SELECT 1; + ELSE SELECT NULL; + END IF; +END; +$$ +BEGIN + IF (EXISTS (SELECT a FROM t1)) THEN SELECT 1; + ELSE SELECT NULL; + END IF; +END; +$$ + +BEGIN + WHILE ((1234) IN (SELECT * FROM t1)) LOOP + SELECT 1; + END LOOP; +END; +$$ +BEGIN + WHILE (EXISTS (SELECT * FROM t1 WHERE a=1234)) LOOP + SELECT 1; + END LOOP; +END; +$$ + +BEGIN + REPEAT + SELECT 1; + UNTIL (1 IN (SELECT * FROM t1)) + END REPEAT; +END; +$$ +BEGIN + REPEAT + SELECT 1; + UNTIL EXISTS (SELECT * FROM t1 WHERE a=1) + END REPEAT; +END; +$$ + +BEGIN + FOR i IN 0..(1 IN (SELECT * FROM t1)) + LOOP + SELECT i; + END LOOP; +END; +$$ +BEGIN + FOR i IN 0..EXISTS (SELECT * FROM t1 WHERE a=1) + LOOP + SELECT i; + END LOOP; +END; +$$ +DELIMITER ;$$ +DROP TABLE t1; + + +# +# Subselects as SP variable default values +# + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (10); +DELIMITER $$; +DECLARE + a INT DEFAULT ((10) IN (SELECT * FROM t1)); +BEGIN + SELECT a; +END; +$$ +DECLARE + a INT DEFAULT EXISTS (SELECT * FROM t1); +BEGIN + SELECT a; +END; +$$ +DELIMITER ;$$ +DROP TABLE t1; + + +# +# Subselects SP function return values +# + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +DELIMITER $$; +CREATE FUNCTION f1() RETURN INT AS +BEGIN + RETURN ((1) IN (SELECT * FROM t1)); +END; +$$ +CREATE FUNCTION f2() RETURN INT AS +BEGIN + RETURN EXISTS (SELECT * FROM t1 WHERE a=1); +END; +$$ +DELIMITER ;$$ +SELECT f1(); +SELECT f2(); +DROP FUNCTION f1; +DROP FUNCTION f2; +DROP TABLE t1; + + +# +# Subselects in CURSOR parameters +# + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +DELIMITER $$; +DECLARE + va INT; + CURSOR cur(amin INT) IS SELECT a FROM t1 WHERE a>amin ORDER BY a; +BEGIN + OPEN cur(1 IN (SELECT * FROM t1)); + FETCH cur INTO va; + SELECT va; + CLOSE cur; +END; +$$ +DECLARE + va INT; + CURSOR cur(amin INT) IS SELECT a FROM t1 WHERE a>amin ORDER BY a; +BEGIN + OPEN cur(EXISTS (SELECT * FROM t1)); + FETCH cur INTO va; + SELECT va; + CLOSE cur; +END; +$$ +DELIMITER ;$$ +DROP TABLE t1; diff --git a/mysql-test/suite/compat/oracle/t/statement-expr.test b/mysql-test/suite/compat/oracle/t/statement-expr.test new file mode 100644 index 00000000000..f4a6a25eff3 --- /dev/null +++ b/mysql-test/suite/compat/oracle/t/statement-expr.test @@ -0,0 +1,86 @@ +# Testing expressions of different kinds in various non-SELECT statements + +SET sql_mode=ORACLE; + +--echo # +--echo # Start of 10.3 tests +--echo # + +# +# Subselects in non-SELECT statements +# + +CREATE TABLE t1 (id INT, id1 INT); +INSERT INTO t1 VALUES (1,7); +INSERT INTO t1 VALUES (1,8); +SELECT ROW(1,7) IN (SELECT id, id1 FROM t1 WHERE id1= 8); +EXECUTE IMMEDIATE 'SELECT ROW(1, 7) IN (SELECT id, id1 FROM t1 WHERE id1= 8)'; +DROP TABLE t1; + +--error ER_PARSE_ERROR +EXECUTE IMMEDIATE 'SELECT ?' USING (1 IN (SELECT * FROM t1)); +--error ER_PARSE_ERROR +EXECUTE IMMEDIATE 'SELECT ?' USING (SELECT * FROM t1); + + +CREATE TABLE t1 (id INT); +INSERT INTO t1 VALUES (10); +DELIMITER $$; +CREATE PROCEDURE p1(a INT) AS BEGIN NULL; END; +$$ +DELIMITER ;$$ +CALL p1((1) IN (SELECT * FROM t1)); +CALL p1(EXISTS (SELECT * FROM t1)); +DROP PROCEDURE p1; +DROP TABLE t1; + +--error ER_PARSE_ERROR +SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO=(1 IN (SELECT * FROM t1)); +--error ER_PARSE_ERROR +SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO=EXISTS (SELECT * FROM t1); + +DELIMITER $$; +--error ER_PARSE_ERROR +BEGIN NOT ATOMIC + DECLARE CONTINUE HANDLER FOR SQLWARNING + RESIGNAL SET MYSQL_ERRNO=(1 IN (SELECT * FROM t1)); + SIGNAL SQLSTATE '01000'; +END; +$$ +--error ER_PARSE_ERROR +BEGIN NOT ATOMIC + DECLARE CONTINUE HANDLER FOR SQLWARNING + RESIGNAL SET MYSQL_ERRNO=EXISTS (SELECT * FROM t1); + SIGNAL SQLSTATE '01000'; +END; +$$ +DELIMITER ;$$ + + +--error ER_PARSE_ERROR +PREPARE stmt FROM (1 IN (SELECT * FROM t1)); +--error ER_PARSE_ERROR +PREPARE stmt FROM EXISTS (SELECT * FROM t1); + +--error ER_PARSE_ERROR +EXECUTE IMMEDIATE (1 IN (SELECT * FROM t1)); +--error ER_PARSE_ERROR +EXECUTE IMMEDIATE EXISTS (SELECT * FROM t1); + +--error ER_PARSE_ERROR +GET DIAGNOSTICS CONDITION (1 IN (SELECT * FROM t1)) @errno=MYSQL_ERRNO; +--error ER_PARSE_ERROR +GET DIAGNOSTICS CONDITION EXISTS (SELECT * FROM t1) @errno=MYSQL_ERRNO; + +--error ER_PARSE_ERROR +PURGE BINARY LOGS BEFORE (1 IN (SELECT * FROM t1)); +--error ER_PARSE_ERROR +PURGE BINARY LOGS BEFORE EXISTS (SELECT * FROM t1); + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +DO 1 IN (SELECT * FROM t1); +DO EXISTS (SELECT * FROM t1); +DROP TABLE t1; + + diff --git a/mysql-test/suite/compat/oracle/t/table_value_constr.test b/mysql-test/suite/compat/oracle/t/table_value_constr.test new file mode 100644 index 00000000000..66519e93a36 --- /dev/null +++ b/mysql-test/suite/compat/oracle/t/table_value_constr.test @@ -0,0 +1,1083 @@ +SET sql_mode=ORACLE; + +create table t1 (a int, b int); + +insert into t1 values (1,2),(4,6),(9,7), + (1,1),(2,5),(7,8); + +--echo # just VALUES + +values (1,2); + +values (1,2), (3,4), (5.6,0); + +values ('abc', 'def'); + +--echo # UNION that uses VALUES structure(s) + +select 1,2 +union +values (1,2); + +values (1,2) +union +select 1,2; + +select 1,2 +union +values (1,2),(3,4),(5,6),(7,8); + +select 3,7 +union +values (1,2),(3,4),(5,6); + +select 3,7,4 +union +values (1,2,5),(4,5,6); + +select 1,2 +union +values (1,7),(3,6.5); + +select 1,2 +union +values (1,2.0),(3,6); + +select 1.8,2 +union +values (1,2),(3,6); + +values (1,2.4),(3,6) +union +select 2.8,9; + +values (1,2),(3,4),(5,6),(7,8) +union +select 5,6; + +select 'ab','cdf' +union +values ('al','zl'),('we','q'); + +values ('ab', 'cdf') +union +select 'ab','cdf'; + +values (1,2) +union +values (1,2),(5,6); + +values (1,2) +union +values (3,4),(5,6); + +values (1,2) +union +values (1,2) +union values (4,5); + +--echo # UNION ALL that uses VALUES structure + +values (1,2),(3,4) +union all +select 5,6; + +values (1,2),(3,4) +union all +select 1,2; + +select 5,6 +union all +values (1,2),(3,4); + +select 1,2 +union all +values (1,2),(3,4); + +values (1,2) +union all +values (1,2),(5,6); + +values (1,2) +union all +values (3,4),(5,6); + +values (1,2) +union all +values (1,2) +union all +values (4,5); + +values (1,2) +union all +values (1,2) +union values (1,2); + +values (1,2) +union +values (1,2) +union all +values (1,2); + +--echo # EXCEPT that uses VALUES structure(s) + +select 1,2 +except +values (3,4),(5,6); + +select 1,2 +except +values (1,2),(3,4); + +values (1,2),(3,4) +except +select 5,6; + +values (1,2),(3,4) +except +select 1,2; + +values (1,2),(3,4) +except +values (5,6); + +values (1,2),(3,4) +except +values (1,2); + +--echo # INTERSECT that uses VALUES structure(s) + +select 1,2 +intersect +values (3,4),(5,6); + +select 1,2 +intersect +values (1,2),(3,4); + +values (1,2),(3,4) +intersect +select 5,6; + +values (1,2),(3,4) +intersect +select 1,2; + +values (1,2),(3,4) +intersect +values (5,6); + +values (1,2),(3,4) +intersect +values (1,2); + +--echo # combination of different structures that uses VALUES structures : UNION + EXCEPT + +values (1,2),(3,4) +except +select 1,2 +union values (1,2); + +values (1,2),(3,4) +except +values (1,2) +union +values (1,2); + +values (1,2),(3,4) +except +values (1,2) +union +values (3,4); + +values (1,2),(3,4) +union +values (1,2) +except +values (1,2); + +--echo # combination of different structures that uses VALUES structures : UNION ALL + EXCEPT + +values (1,2),(3,4) +except +select 1,2 +union all +values (1,2); + +values (1,2),(3,4) +except +values (1,2) +union all +values (1,2); + +values (1,2),(3,4) +except +values (1,2) +union all +values (3,4); + +values (1,2),(3,4) +union all +values (1,2) +except +values (1,2); + +--echo # combination of different structures that uses VALUES structures : UNION + INTERSECT + +values (1,2),(3,4) +intersect +select 1,2 +union +values (1,2); + +values (1,2),(3,4) +intersect +values (1,2) +union +values (1,2); + +values (1,2),(3,4) +intersect +values (1,2) +union +values (3,4); + +values (1,2),(3,4) +union +values (1,2) +intersect +values (1,2); + +--echo # combination of different structures that uses VALUES structures : UNION ALL + INTERSECT + +values (1,2),(3,4) +intersect +select 1,2 +union all +values (1,2); + +values (1,2),(3,4) +intersect +values (1,2) +union all +values (1,2); + +values (1,2),(3,4) +intersect +values (1,2) +union all +values (3,4); + +values (1,2),(3,4) +union all +values (1,2) +intersect +values (1,2); + +--echo # combination of different structures that uses VALUES structures : UNION + UNION ALL + +values (1,2),(3,4) +union all +select 1,2 +union +values (1,2); + +values (1,2),(3,4) +union all +values (1,2) +union +values (1,2); + +values (1,2),(3,4) +union all +values (1,2) +union +values (3,4); + +values (1,2),(3,4) +union +values (1,2) +union all +values (1,2); + +values (1,2) +union +values (1,2) +union all +values (1,2); + +--echo # CTE that uses VALUES structure(s) : non-recursive CTE + +with t2 as +( + values (1,2),(3,4) +) +select * from t2; + +with t2 as +( + select 1,2 + union + values (1,2) +) +select * from t2; + +with t2 as +( + select 1,2 + union + values (1,2),(3,4) +) +select * from t2; + +with t2 as +( + values (1,2) + union + select 1,2 +) +select * from t2; + +with t2 as +( + values (1,2),(3,4) + union + select 1,2 +) +select * from t2; + +with t2 as +( + values (5,6) + union + values (1,2),(3,4) +) +select * from t2; + +with t2 as +( + values (1,2) + union + values (1,2),(3,4) +) +select * from t2; + +with t2 as +( + select 1,2 + union all + values (1,2),(3,4) +) +select * from t2; + +with t2 as +( + values (1,2),(3,4) + union all + select 1,2 +) +select * from t2; + +with t2 as +( + values (1,2) + union all + values (1,2),(3,4) +) +select * from t2; + +--echo # recursive CTE that uses VALUES structure(s) : singe VALUES structure as anchor + +with recursive t2(a,b) as +( + values(1,1) + union + select t1.a, t1.b + from t1,t2 + where t1.a=t2.a +) +select * from t2; + +with recursive t2(a,b) as +( + values(1,1) + union + select t1.a+1, t1.b + from t1,t2 + where t1.a=t2.a +) +select * from t2; + +--echo # recursive CTE that uses VALUES structure(s) : several VALUES structures as anchors + +with recursive t2(a,b) as +( + values(1,1) + union + values (3,4) + union + select t2.a+1, t1.b + from t1,t2 + where t1.a=t2.a +) +select * from t2; + +--echo # recursive CTE that uses VALUES structure(s) : that uses UNION ALL + +with recursive t2(a,b,st) as +( + values(1,1,1) + union all + select t2.a, t1.b, t2.st+1 + from t1,t2 + where t1.a=t2.a and st<3 +) +select * from t2; + +--echo # recursive CTE that uses VALUES structure(s) : computation of factorial (first 10 elements) + +with recursive fact(n,f) as +( + values(1,1) + union + select n+1,f*n from fact where n < 10 +) +select * from fact; + +--echo # Derived table that uses VALUES structure(s) : singe VALUES structure + +select * from (values (1,2),(3,4)) as t2; + +--echo # Derived table that uses VALUES structure(s) : UNION with VALUES structure(s) + +select * from (select 1,2 union values (1,2)) as t2; + +select * from (select 1,2 union values (1,2),(3,4)) as t2; + +select * from (values (1,2) union select 1,2) as t2; + +select * from (values (1,2),(3,4) union select 1,2) as t2; + +select * from (values (5,6) union values (1,2),(3,4)) as t2; + +select * from (values (1,2) union values (1,2),(3,4)) as t2; + +--echo # Derived table that uses VALUES structure(s) : UNION ALL with VALUES structure(s) + +select * from (select 1,2 union all values (1,2),(3,4)) as t2; + +select * from (values (1,2),(3,4) union all select 1,2) as t2; + +select * from (values (1,2) union all values (1,2),(3,4)) as t2; + +--echo # CREATE VIEW that uses VALUES structure(s) : singe VALUES structure + +let $drop_view= drop view v1; +let $select_view= select * from v1; + +create view v1 as values (1,2),(3,4); + +eval $select_view; +eval $drop_view; + +--echo # CREATE VIEW that uses VALUES structure(s) : UNION with VALUES structure(s) + +create view v1 as + select 1,2 + union + values (1,2); + +eval $select_view; +eval $drop_view; + +create view v1 as + select 1,2 + union + values (1,2),(3,4); + +eval $select_view; +eval $drop_view; + +create view v1 as + values (1,2) + union + select 1,2; + +eval $select_view; +eval $drop_view; + +create view v1 as + values (1,2),(3,4) + union + select 1,2; + +eval $select_view; +eval $drop_view; + +create view v1 as + values (5,6) + union + values (1,2),(3,4); + +eval $select_view; +eval $drop_view; + +--echo # CREATE VIEW that uses VALUES structure(s) : UNION ALL with VALUES structure(s) + +create view v1 as + values (1,2) + union + values (1,2),(3,4); + +eval $select_view; +eval $drop_view; + +create view v1 as + select 1,2 + union all + values (1,2),(3,4); + +eval $select_view; +eval $drop_view; + +create view v1 as + values (1,2),(3,4) + union all + select 1,2; + +eval $select_view; +eval $drop_view; + +create view v1 as + values (1,2) + union all + values (1,2),(3,4); + +eval $select_view; +eval $drop_view; + +--echo # IN-subquery with VALUES structure(s) : simple case +let $query= +select * from t1 +where a in (values (1)); +let $subst_query= +select * from t1 +where a in (select * from (values (1)) as tvc_0); +eval $query; +eval $subst_query; +eval explain extended $query; +eval explain extended $subst_query; + +--echo # IN-subquery with VALUES structure(s) : UNION with VALUES on the first place +let $query= +select * from t1 +where a in (values (1) union select 2); +let $subst_query= +select * from t1 +where a in (select * from (values (1)) as tvc_0 union + select 2); +eval $query; +eval $subst_query; +eval explain extended $query; +eval explain extended $subst_query; + +--echo # IN-subquery with VALUES structure(s) : UNION with VALUES on the second place +let $query= +select * from t1 +where a in (select 2 union values (1)); +let $subst_query= +select * from t1 +where a in (select 2 union + select * from (values (1)) tvc_0); +eval $query; +eval $subst_query; +eval explain extended $query; +eval explain extended $subst_query; + +--echo # IN-subquery with VALUES structure(s) : UNION ALL +let $query= +select * from t1 +where a in (values (1) union all select b from t1); +let $subst_query= +select * from t1 +where a in (select * from (values (1)) as tvc_0 union all + select b from t1); + +eval $query; +eval $subst_query; +eval explain extended $query; +eval explain extended $subst_query; + +--echo # NOT IN subquery with VALUES structure(s) : simple case +let $query= +select * from t1 +where a not in (values (1),(2)); +let $subst_query= +select * from t1 +where a not in (select * from (values (1),(2)) as tvc_0); + +eval $query; +eval $subst_query; +eval explain extended $query; +eval explain extended $subst_query; + +--echo # NOT IN subquery with VALUES structure(s) : UNION with VALUES on the first place +let $query= +select * from t1 +where a not in (values (1) union select 2); +let $subst_query= +select * from t1 +where a not in (select * from (values (1)) as tvc_0 union + select 2); + +eval $query; +eval $subst_query; +eval explain extended $query; +eval explain extended $subst_query; + +--echo # NOT IN subquery with VALUES structure(s) : UNION with VALUES on the second place +let $query= +select * from t1 +where a not in (select 2 union values (1)); +let $subst_query= +select * from t1 +where a not in (select 2 union + select * from (values (1)) as tvc_0); + +eval $query; +eval $subst_query; +eval explain extended $query; +eval explain extended $subst_query; + +--echo # ANY-subquery with VALUES structure(s) : simple case +let $query= +select * from t1 +where a = any (values (1),(2)); +let $subst_query= +select * from t1 +where a = any (select * from (values (1),(2)) as tvc_0); + +eval $query; +eval $subst_query; +eval explain extended $query; +eval explain extended $subst_query; + +--echo # ANY-subquery with VALUES structure(s) : UNION with VALUES on the first place +let $query= +select * from t1 +where a = any (values (1) union select 2); +let $subst_query= +select * from t1 +where a = any (select * from (values (1)) as tvc_0 union + select 2); + +eval $query; +eval $subst_query; +eval explain extended $query; +eval explain extended $subst_query; + +--echo # ANY-subquery with VALUES structure(s) : UNION with VALUES on the second place +let $query= +select * from t1 +where a = any (select 2 union values (1)); +let $subst_query= +select * from t1 +where a = any (select 2 union + select * from (values (1)) as tvc_0); + +eval $query; +eval $subst_query; +eval explain extended $query; +eval explain extended $subst_query; + +--echo # ALL-subquery with VALUES structure(s) : simple case +let $query= +select * from t1 +where a = all (values (1)); +let $subst_query= +select * from t1 +where a = all (select * from (values (1)) as tvc_0); + +eval $query; +eval $subst_query; +eval explain extended $query; +eval explain extended $subst_query; + +--echo # ALL-subquery with VALUES structure(s) : UNION with VALUES on the first place +let $query= +select * from t1 +where a = all (values (1) union select 1); +let $subst_query= +select * from t1 +where a = all (select * from (values (1)) as tvc_0 union + select 1); + +eval $query; +eval $subst_query; +eval explain extended $query; +eval explain extended $subst_query; + +--echo # ALL-subquery with VALUES structure(s) : UNION with VALUES on the second place +let $query= +select * from t1 +where a = any (select 1 union values (1)); +let $subst_query= +select * from t1 +where a = any (select 1 union + select * from (values (1)) as tvc_0); + +eval $query; +eval $subst_query; +eval explain extended $query; +eval explain extended $subst_query; + +--echo # prepare statement that uses VALUES structure(s): single VALUES structure + +prepare stmt1 from ' +values (1,2); +'; + +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +--echo # prepare statement that uses VALUES structure(s): UNION with VALUES structure(s) + +prepare stmt1 from ' + select 1,2 + union + values (1,2),(3,4); +'; + +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +prepare stmt1 from ' + values (1,2),(3,4) + union + select 1,2; +'; + +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +prepare stmt1 from ' + select 1,2 + union + values (3,4) + union + values (1,2); +'; + +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +prepare stmt1 from ' + values (5,6) + union + values (1,2),(3,4); +'; + +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +--echo # prepare statement that uses VALUES structure(s): UNION ALL with VALUES structure(s) + +prepare stmt1 from ' + select 1,2 + union + values (1,2),(3,4); +'; + +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +prepare stmt1 from ' + values (1,2),(3,4) + union all + select 1,2; +'; + +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +prepare stmt1 from ' + select 1,2 + union all + values (3,4) + union all + values (1,2); +'; + +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +prepare stmt1 from ' + values (1,2) + union all + values (1,2),(3,4); +'; + +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +--echo # explain query that uses VALUES structure(s): single VALUES structure + +explain +values (1,2); + +explain format=json +values (1,2); + +--echo # explain query that uses VALUES structure(s): UNION with VALUES structure(s) + +explain +select 1,2 +union +values (1,2),(3,4); + +explain +values (1,2),(3,4) +union +select 1,2; + +explain +values (5,6) +union +values (1,2),(3,4); + +explain format=json +select 1,2 +union +values (1,2),(3,4); + +explain format=json +values (1,2),(3,4) +union +select 1,2; + +explain format=json +values (5,6) +union +values (1,2),(3,4); + +explain +select 1,2 +union +values (3,4) +union +values (1,2); + +explain format=json +select 1,2 +union +values (3,4) +union +values (1,2); + +--echo # explain query that uses VALUES structure(s): UNION ALL with VALUES structure(s) + +explain +select 1,2 +union +values (1,2),(3,4); + +explain +values (1,2),(3,4) +union all +select 1,2; + +explain +values (1,2) +union all +values (1,2),(3,4); + +explain format=json +values (1,2),(3,4) +union all +select 1,2; + +explain format=json +select 1,2 +union +values (1,2),(3,4); + +explain format=json +values (1,2) +union all +values (1,2),(3,4); + +explain +select 1,2 +union all +values (3,4) +union all +values (1,2); + +explain format=json +select 1,2 +union all +values (3,4) +union all +values (1,2); + +--echo # analyze query that uses VALUES structure(s): single VALUES structure + +analyze +values (1,2); + +analyze format=json +values (1,2); + +--echo # analyze query that uses VALUES structure(s): UNION with VALUES structure(s) + +analyze +select 1,2 +union +values (1,2),(3,4); + +analyze +values (1,2),(3,4) +union +select 1,2; + +analyze +values (5,6) +union +values (1,2),(3,4); + +analyze format=json +select 1,2 +union +values (1,2),(3,4); + +analyze format=json +values (1,2),(3,4) +union +select 1,2; + +analyze format=json +values (5,6) +union +values (1,2),(3,4); + +analyze +select 1,2 +union +values (3,4) +union +values (1,2); + +analyze format=json +select 1,2 +union +values (3,4) +union +values (1,2); + +--echo # analyze query that uses VALUES structure(s): UNION ALL with VALUES structure(s) + +analyze +select 1,2 +union +values (1,2),(3,4); + +analyze +values (1,2),(3,4) +union all +select 1,2; + +analyze +values (1,2) +union all +values (1,2),(3,4); + +analyze format=json +values (1,2),(3,4) +union all +select 1,2; + +analyze format=json +select 1,2 +union +values (1,2),(3,4); + +analyze format=json +values (1,2) +union all +values (1,2),(3,4); + +analyze +select 1,2 +union all +values (3,4) +union all +values (1,2); + +analyze format=json +select 1,2 +union all +values (3,4) +union all +values (1,2); + +--echo # different number of values in TVC +--error ER_WRONG_NUMBER_OF_VALUES_IN_TVC +values (1,2),(3,4,5); + +--echo # illegal parameter data types in TVC +--error ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION +values (1,point(1,1)),(1,1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION +values (1,point(1,1)+1); + +--echo # field reference in TVC +--error ER_FIELD_REFERENCE_IN_TVC +select * from (values (1), (b), (2)) as new_tvc; +--error ER_FIELD_REFERENCE_IN_TVC +select * from (values (1), (t1.b), (2)) as new_tvc; + +drop table t1; + +--echo # +--echo # MDEV-15940: cursor over TVC +--echo # + +DELIMITER |; + +DECLARE + v INT; + CURSOR cur IS VALUES(7); +BEGIN + OPEN cur; + FETCH cur INTO v; + SELECT v; +END; +| + +DECLARE + v INT DEFAULT 0; +BEGIN + FOR a IN (VALUES (7)) LOOP + SET v = v + 1; + END LOOP; + SELECT v; +END; +| + +DELIMITER ;| + +--echo # +--echo # MDEV-16038: empty row in TVC +--echo # + +--error ER_EMPTY_ROW_IN_TVC +with t as (values (),()) select 1 from t; diff --git a/mysql-test/suite/compat/oracle/t/versioning.test b/mysql-test/suite/compat/oracle/t/versioning.test new file mode 100644 index 00000000000..d70058c56e4 --- /dev/null +++ b/mysql-test/suite/compat/oracle/t/versioning.test @@ -0,0 +1,13 @@ +SET sql_mode=ORACLE; + +--echo # +--echo # MDEV-15975 PL/SQL parser does not understand historical queries +--echo # + +CREATE TABLE t1 (a INT) WITH SYSTEM VERSIONING; +INSERT INTO t1 VALUES (10); +DELETE FROM t1; +INSERT INTO t1 VALUES (20); +SELECT * FROM t1 FOR SYSTEM_TIME ALL; +SELECT * FROM t1 FOR SYSTEM_TIME AS OF (NOW()+INTERVAL 10 YEAR); +DROP TABLE t1; diff --git a/mysql-test/suite/compat/oracle/t/win.test b/mysql-test/suite/compat/oracle/t/win.test new file mode 100644 index 00000000000..c6f0b6474cf --- /dev/null +++ b/mysql-test/suite/compat/oracle/t/win.test @@ -0,0 +1,22 @@ +SET sql_mode=ORACLE; + +--echo # +--echo # MDEV-13384: "window" seems like a reserved column name but it's not listed as one +--echo # +--echo # Currently we allow window as an identifier, except for table aliases. +--echo # + +CREATE TABLE door (id INT, window VARCHAR(10)); + +--error ER_PARSE_ERROR +SELECT id +FROM door as window; + +SELECT id, window +FROM door; + +--error ER_PARSE_ERROR +SELECT id, window +FROM door as window; + +DROP TABLE door; diff --git a/mysql-test/suite/encryption/r/innodb-bad-key-change3.result b/mysql-test/suite/encryption/r/innodb-bad-key-change3.result index 79155bf43b9..2d414d75c21 100644 --- a/mysql-test/suite/encryption/r/innodb-bad-key-change3.result +++ b/mysql-test/suite/encryption/r/innodb-bad-key-change3.result @@ -14,6 +14,7 @@ t1 CREATE TABLE `t1` ( INSERT INTO t1 VALUES (1,'foobar'),(2,'barfoo'); FLUSH TABLE t1 FOR EXPORT; # List before copying files +db.opt t1.cfg t1.frm t1.ibd diff --git a/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result b/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result index a12b47ef31d..cca3b9ad686 100644 --- a/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result +++ b/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result @@ -38,6 +38,7 @@ backup: te_crc32 backup: t_crc32 backup: tpe_crc32 backup: tp_crc32 +db.opt t_crc32.cfg t_crc32.frm t_crc32.ibd @@ -247,6 +248,7 @@ backup: te_innodb backup: t_innodb backup: tpe_innodb backup: tp_innodb +db.opt t_innodb.cfg t_innodb.frm t_innodb.ibd @@ -456,6 +458,7 @@ backup: te_none backup: t_none backup: tpe_none backup: tp_none +db.opt t_none.cfg t_none.frm t_none.ibd diff --git a/mysql-test/suite/encryption/r/innodb-discard-import-change.result b/mysql-test/suite/encryption/r/innodb-discard-import-change.result index 51670d89e52..cafdbef4b69 100644 --- a/mysql-test/suite/encryption/r/innodb-discard-import-change.result +++ b/mysql-test/suite/encryption/r/innodb-discard-import-change.result @@ -24,6 +24,7 @@ backup: t2 backup: t3 backup: t4 backup: t5 +db.opt t1.cfg t1.frm t1.ibd diff --git a/mysql-test/suite/encryption/r/innodb-discard-import.result b/mysql-test/suite/encryption/r/innodb-discard-import.result index 91314a77177..9f8521b3245 100644 --- a/mysql-test/suite/encryption/r/innodb-discard-import.result +++ b/mysql-test/suite/encryption/r/innodb-discard-import.result @@ -44,6 +44,7 @@ count(*) 2000 FLUSH TABLE t1,t2,t3,t4 FOR EXPORT; # List before copying files +db.opt t1.cfg t1.frm t1.ibd @@ -60,6 +61,7 @@ backup: t1 backup: t2 backup: t3 backup: t4 +db.opt t1.cfg t1.frm t1.ibd diff --git a/mysql-test/suite/encryption/r/innodb_encryption_discard_import.result b/mysql-test/suite/encryption/r/innodb_encryption_discard_import.result index b503d6c9d30..6b38be38b6d 100644 --- a/mysql-test/suite/encryption/r/innodb_encryption_discard_import.result +++ b/mysql-test/suite/encryption/r/innodb_encryption_discard_import.result @@ -25,6 +25,7 @@ NOT FOUND /foobar/ in t1.ibd NOT FOUND /temp/ in t2.ibd # t3 ... on expecting NOT FOUND NOT FOUND /barfoo/ in t3.ibd +db.opt t1.frm t1.ibd t2.frm @@ -35,6 +36,7 @@ FLUSH TABLES t1, t2, t3 FOR EXPORT; backup: t1 backup: t2 backup: t3 +db.opt t1.cfg t1.frm t1.ibd diff --git a/mysql-test/suite/encryption/t/innodb-scrub-log.test b/mysql-test/suite/encryption/t/innodb-scrub-log.test index 36ecc88b99a..e8149b6b3ff 100644 --- a/mysql-test/suite/encryption/t/innodb-scrub-log.test +++ b/mysql-test/suite/encryption/t/innodb-scrub-log.test @@ -1,7 +1,7 @@ --source include/have_innodb.inc # -# MDEV-11705: InnoDB: Failing assertion: (&log_sys->mutex)->is_owned() if server started with innodb-scrub-log +# MDEV-11705: InnoDB: Failing assertion: (&log_sys.mutex)->is_owned() if server started with innodb-scrub-log # create table t1(a int not null primary key auto_increment, diff --git a/mysql-test/suite/encryption/t/innodb_encryption_discard_import.test b/mysql-test/suite/encryption/t/innodb_encryption_discard_import.test index 099d16f6927..22755571618 100644 --- a/mysql-test/suite/encryption/t/innodb_encryption_discard_import.test +++ b/mysql-test/suite/encryption/t/innodb_encryption_discard_import.test @@ -58,6 +58,7 @@ set autocommit=1; -- source include/search_pattern_in_file.inc --source include/start_mysqld.inc +let MYSQLD_DATADIR =`SELECT @@datadir`; --list_files $MYSQLD_DATADIR/test FLUSH TABLES t1, t2, t3 FOR EXPORT; diff --git a/mysql-test/suite/federated/assisted_discovery.result b/mysql-test/suite/federated/assisted_discovery.result index d44f69effcd..4818ff7bb02 100644 --- a/mysql-test/suite/federated/assisted_discovery.result +++ b/mysql-test/suite/federated/assisted_discovery.result @@ -72,14 +72,6 @@ t1 CREATE TABLE `t1` ( drop table t1; connection slave; drop table t1; -create or replace table t1 (x int) with system versioning; -connection master; -create table t1 engine=federated connection='mysql://root@127.0.0.1:SLAVE_MYPORT/test/t1'; -ERROR HY000: Engine FEDERATED failed to discover table `test`.`t1` with 'CREATE TABLE `t1` ( - `x` int(11) DEFAULT NULL -) WITH SYSTEM VERSIONING CONNECTION='mysql://root@127.0.0.1:SLAVE_MYPORT/test/t1'' -connection slave; -drop table t1; connection master; DROP TABLE IF EXISTS federated.t1; DROP DATABASE IF EXISTS federated; diff --git a/mysql-test/suite/federated/assisted_discovery.test b/mysql-test/suite/federated/assisted_discovery.test index 7099cfedb23..fa83a2a8e19 100644 --- a/mysql-test/suite/federated/assisted_discovery.test +++ b/mysql-test/suite/federated/assisted_discovery.test @@ -54,14 +54,5 @@ drop table t1; connection slave; drop table t1; -create or replace table t1 (x int) with system versioning; -connection master; ---replace_result $SLAVE_MYPORT SLAVE_MYPORT ---error ER_SQL_DISCOVER_ERROR -eval create table t1 engine=federated connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/t1'; - -connection slave; -drop table t1; - source include/federated_cleanup.inc; diff --git a/mysql-test/suite/federated/federatedx_versioning.result b/mysql-test/suite/federated/federatedx_versioning.result new file mode 100644 index 00000000000..7af5a5f3f0c --- /dev/null +++ b/mysql-test/suite/federated/federatedx_versioning.result @@ -0,0 +1,100 @@ +create or replace table t1 ( +x int, +row_start SYS_TYPE as row start invisible, +row_end SYS_TYPE as row end invisible, +period for system_time (row_start, row_end)) +with system versioning; +create or replace table tf engine=FEDERATED connection='mysql://root@127.0.0.1:MASTER_MYPORT/test/t1'; +show create table tf; +Table Create Table +tf CREATE TABLE `tf` ( + `x` int(11) DEFAULT NULL, + `row_start` SYS_TYPE NOT NULL INVISIBLE DEFAULT 0, + `row_end` SYS_TYPE NOT NULL INVISIBLE DEFAULT 0 +) ENGINE=FEDERATED DEFAULT CHARSET=latin1 CONNECTION='mysql://root@127.0.0.1:MASTER_MYPORT/test/t1' +# INSERT +insert into t1 values (1); +select * from tf; +x +1 +insert into tf (x) values (2); +select * from t1; +x +1 +2 +select * from tf; +x +1 +2 +# UPDATE +update tf set x= x + 2; +select *, check_row(row_start, row_end) from t1 for system_time all +order by x; +x check_row(row_start, row_end) +1 HISTORICAL ROW +2 HISTORICAL ROW +3 CURRENT ROW +4 CURRENT ROW +# DELETE +delete from tf; +select *, check_row(row_start, row_end) from t1 for system_time all +order by x; +x check_row(row_start, row_end) +1 HISTORICAL ROW +2 HISTORICAL ROW +3 HISTORICAL ROW +4 HISTORICAL ROW +select * from tf; +x +# TRUNCATE +truncate tf; +select * from t1 for system_time all; +x +# REPLACE +create or replace table t2 ( +id int primary key, y int, +row_start SYS_TYPE as row start invisible, +row_end SYS_TYPE as row end invisible, +period for system_time (row_start, row_end)) +with system versioning; +create or replace table t2f engine=FEDERATED connection='mysql://root@127.0.0.1:MASTER_MYPORT/test/t2'; +insert t2f (id, y) values (1, 2); +replace t2f (id, y) values (1, 3); +select *, check_row(row_start, row_end) from t2 for system_time all +order by y; +id y check_row(row_start, row_end) +1 2 HISTORICAL ROW +1 3 CURRENT ROW +# VIEW +create or replace view vt1 as select * from tf; +insert into vt1 values (3); +update vt1 set x= x + 1; +select *, check_row(row_start, row_end) from t1 for system_time all +order by x; +x check_row(row_start, row_end) +3 HISTORICAL ROW +4 CURRENT ROW +delete from vt1; +select *, check_row(row_start, row_end) from t1 for system_time all +order by x; +x check_row(row_start, row_end) +3 HISTORICAL ROW +4 HISTORICAL ROW +# multi-UPDATE +truncate t1; +truncate t2; +insert into t1 values (1); +insert into t2 values (2, 2); +update tf, t2f set tf.x= 11, t2f.y= 22; +select *, check_row(row_start, row_end) from t1 for system_time all +order by x; +x check_row(row_start, row_end) +1 HISTORICAL ROW +11 CURRENT ROW +select *, check_row(row_start, row_end) from t2 for system_time all +order by y; +id y check_row(row_start, row_end) +2 2 HISTORICAL ROW +2 22 CURRENT ROW +drop database test; +create database test; diff --git a/mysql-test/suite/federated/federatedx_versioning.test b/mysql-test/suite/federated/federatedx_versioning.test new file mode 100644 index 00000000000..692edb5b00a --- /dev/null +++ b/mysql-test/suite/federated/federatedx_versioning.test @@ -0,0 +1,77 @@ +--source include/not_embedded.inc +--source have_federatedx.inc +--source suite/versioning/engines.inc +--source suite/versioning/common.inc + +--replace_result $sys_datatype_expl SYS_TYPE +eval create or replace table t1 ( + x int, + row_start $sys_datatype_expl as row start invisible, + row_end $sys_datatype_expl as row end invisible, + period for system_time (row_start, row_end)) +with system versioning; +--replace_result $MASTER_MYPORT MASTER_MYPORT +eval create or replace table tf engine=FEDERATED connection='mysql://root@127.0.0.1:$MASTER_MYPORT/test/t1'; +--replace_result $MASTER_MYPORT MASTER_MYPORT $sys_datatype_expl SYS_TYPE "'0000-00-00 00:00:00.000000'" 0 +show create table tf; +--echo # INSERT +insert into t1 values (1); +select * from tf; +insert into tf (x) values (2); +select * from t1; +select * from tf; + +--echo # UPDATE +update tf set x= x + 2; +select *, check_row(row_start, row_end) from t1 for system_time all +order by x; + +--echo # DELETE +delete from tf; +select *, check_row(row_start, row_end) from t1 for system_time all +order by x; +select * from tf; + +--echo # TRUNCATE +truncate tf; +select * from t1 for system_time all; + +--echo # REPLACE +--replace_result $sys_datatype_expl SYS_TYPE +eval create or replace table t2 ( + id int primary key, y int, + row_start $sys_datatype_expl as row start invisible, + row_end $sys_datatype_expl as row end invisible, + period for system_time (row_start, row_end)) +with system versioning; +--replace_result $MASTER_MYPORT MASTER_MYPORT +eval create or replace table t2f engine=FEDERATED connection='mysql://root@127.0.0.1:$MASTER_MYPORT/test/t2'; +insert t2f (id, y) values (1, 2); +replace t2f (id, y) values (1, 3); +select *, check_row(row_start, row_end) from t2 for system_time all +order by y; + +--echo # VIEW +create or replace view vt1 as select * from tf; +insert into vt1 values (3); +update vt1 set x= x + 1; +select *, check_row(row_start, row_end) from t1 for system_time all +order by x; +delete from vt1; +select *, check_row(row_start, row_end) from t1 for system_time all +order by x; + +--echo # multi-UPDATE +truncate t1; +truncate t2; +insert into t1 values (1); +insert into t2 values (2, 2); +update tf, t2f set tf.x= 11, t2f.y= 22; +select *, check_row(row_start, row_end) from t1 for system_time all +order by x; +select *, check_row(row_start, row_end) from t2 for system_time all +order by y; + +--source suite/versioning/common_finish.inc +drop database test; +create database test; diff --git a/mysql-test/suite/federated/timestamps.result b/mysql-test/suite/federated/timestamps.result new file mode 100644 index 00000000000..9f3be82a4ec --- /dev/null +++ b/mysql-test/suite/federated/timestamps.result @@ -0,0 +1,64 @@ +connect master,127.0.0.1,root,,test,$MASTER_MYPORT,; +connect slave,127.0.0.1,root,,test,$SLAVE_MYPORT,; +connection master; +CREATE DATABASE federated; +connection slave; +CREATE DATABASE federated; +connection slave; +set global time_zone='Europe/Moscow'; +set time_zone='Europe/Moscow'; +create table federated.t1 (dt datetime, ts timestamp, unique(ts)); +connection master; +set time_zone='+01:00'; +create table t1 engine=federated connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; +set @@timestamp=1; +insert t1 values (now(), now()); +set @@timestamp=2147483647; +insert t1 values (now(), now()); +set @@timestamp=1067121000; +insert t1 values (now(), now()); +set @@timestamp=1067124600; +insert t1 values (now(), now()); +set @@timestamp=0; +select * from t1; +dt ts +1970-01-01 01:00:01 1970-01-01 01:00:01 +2038-01-19 04:14:07 2038-01-19 04:14:07 +2003-10-25 23:30:00 2003-10-25 23:30:00 +2003-10-26 00:30:00 2003-10-26 00:30:00 +delete from t1 where ts='1970-01-01 01:00:01'; +select * from t1; +dt ts +2038-01-19 04:14:07 2038-01-19 04:14:07 +2003-10-25 23:30:00 2003-10-25 23:30:00 +2003-10-26 00:30:00 2003-10-26 00:30:00 +insert t1 values ('1970-01-01 01:00:01', now()); +update t1 set ts=dt; +select * from t1; +dt ts +1970-01-01 01:00:01 1970-01-01 01:00:01 +2038-01-19 04:14:07 2038-01-19 04:14:07 +2003-10-25 23:30:00 2003-10-25 23:30:00 +2003-10-26 00:30:00 2003-10-26 00:30:00 +select * from t1 where ts='2003-10-25 23:30:00'; +dt ts +2003-10-25 23:30:00 2003-10-25 23:30:00 +select * from t1 where ts='2003-10-26 00:30:00'; +dt ts +2003-10-26 00:30:00 2003-10-26 00:30:00 +connection slave; +select * from federated.t1; +dt ts +1970-01-01 01:00:01 1970-01-01 03:00:01 +2038-01-19 04:14:07 2038-01-19 06:14:07 +2003-10-25 23:30:00 2003-10-26 02:30:00 +2003-10-26 00:30:00 2003-10-26 02:30:00 +set global time_zone=default; +connection master; +drop table t1; +connection master; +DROP TABLE IF EXISTS federated.t1; +DROP DATABASE IF EXISTS federated; +connection slave; +DROP TABLE IF EXISTS federated.t1; +DROP DATABASE IF EXISTS federated; diff --git a/mysql-test/suite/federated/timestamps.test b/mysql-test/suite/federated/timestamps.test new file mode 100644 index 00000000000..7b46b797f2f --- /dev/null +++ b/mysql-test/suite/federated/timestamps.test @@ -0,0 +1,45 @@ +source have_federatedx.inc; +source include/federated.inc; + +connection slave; +set global time_zone='Europe/Moscow'; +set time_zone='Europe/Moscow'; +create table federated.t1 (dt datetime, ts timestamp, unique(ts)); + +connection master; +set time_zone='+01:00'; +replace_result $SLAVE_MYPORT SLAVE_PORT; +eval create table t1 engine=federated connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; + +set @@timestamp=1; # min value +insert t1 values (now(), now()); +set @@timestamp=2147483647; # max value +insert t1 values (now(), now()); +set @@timestamp=1067121000; # DST ambiguous (in Europe/Moscow) +insert t1 values (now(), now()); +set @@timestamp=1067124600; # DST ambiguous (in Europe/Moscow) +insert t1 values (now(), now()); +set @@timestamp=0; + +# reads +select * from t1; + +# deletes +delete from t1 where ts='1970-01-01 01:00:01'; +select * from t1; + +# updates +insert t1 values ('1970-01-01 01:00:01', now()); +update t1 set ts=dt; +select * from t1; + +# index lookups +select * from t1 where ts='2003-10-25 23:30:00'; +select * from t1 where ts='2003-10-26 00:30:00'; + +connection slave; +select * from federated.t1; +set global time_zone=default; +connection master; +drop table t1; +source include/federated_cleanup.inc; diff --git a/mysql-test/suite/funcs_1/r/is_columns_innodb.result b/mysql-test/suite/funcs_1/r/is_columns_innodb.result index 297dc2d33f8..8f2ba33b591 100644 --- a/mysql-test/suite/funcs_1/r/is_columns_innodb.result +++ b/mysql-test/suite/funcs_1/r/is_columns_innodb.result @@ -427,9 +427,9 @@ def test tb1 f19 19 NULL YES smallint NULL NULL 5 0 NULL NULL NULL smallint(5) u def test tb1 f2 2 NULL YES char 0 0 NULL NULL NULL latin1 latin1_bin char(0) select,insert,update,references NEVER NULL def test tb1 f20 20 NULL YES smallint NULL NULL 5 0 NULL NULL NULL smallint(5) unsigned zerofill select,insert,update,references NEVER NULL def test tb1 f21 21 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(9) select,insert,update,references NEVER NULL -def test tb1 f22 22 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned select,insert,update,references NEVER NULL -def test tb1 f23 23 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL -def test tb1 f24 24 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL +def test tb1 f22 22 NULL YES mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned select,insert,update,references NEVER NULL +def test tb1 f23 23 NULL YES mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL +def test tb1 f24 24 NULL YES mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL def test tb1 f25 25 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references NEVER NULL def test tb1 f26 26 NULL YES int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references NEVER NULL def test tb1 f27 27 NULL YES int NULL NULL 10 0 NULL NULL NULL int(10) unsigned zerofill select,insert,update,references NEVER NULL @@ -543,9 +543,9 @@ def test tb3 f135 18 999 NO smallint NULL NULL 5 0 NULL NULL NULL smallint(5) un def test tb3 f136 19 00999 NO smallint NULL NULL 5 0 NULL NULL NULL smallint(5) unsigned zerofill select,insert,update,references NEVER NULL def test tb3 f137 20 00999 NO smallint NULL NULL 5 0 NULL NULL NULL smallint(5) unsigned zerofill select,insert,update,references NEVER NULL def test tb3 f138 21 9999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(9) select,insert,update,references NEVER NULL -def test tb3 f139 22 9999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned select,insert,update,references NEVER NULL -def test tb3 f140 23 00009999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL -def test tb3 f141 24 00009999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL +def test tb3 f139 22 9999 NO mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned select,insert,update,references NEVER NULL +def test tb3 f140 23 00009999 NO mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL +def test tb3 f141 24 00009999 NO mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL def test tb3 f142 25 99999 NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references NEVER NULL def test tb3 f143 26 99999 NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references NEVER NULL def test tb3 f144 27 0000099999 NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned zerofill select,insert,update,references NEVER NULL diff --git a/mysql-test/suite/funcs_1/r/is_columns_memory.result b/mysql-test/suite/funcs_1/r/is_columns_memory.result index e94d4c9123a..5eaa5b08cc2 100644 --- a/mysql-test/suite/funcs_1/r/is_columns_memory.result +++ b/mysql-test/suite/funcs_1/r/is_columns_memory.result @@ -434,9 +434,9 @@ def test tb1 f19 11 NULL YES smallint NULL NULL 5 0 NULL NULL NULL smallint(5) u def test tb1 f2 2 NULL YES char 1 1 NULL NULL NULL latin1 latin1_bin char(1) select,insert,update,references NEVER NULL def test tb1 f20 12 NULL YES smallint NULL NULL 5 0 NULL NULL NULL smallint(5) unsigned zerofill select,insert,update,references NEVER NULL def test tb1 f21 13 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(9) select,insert,update,references NEVER NULL -def test tb1 f22 14 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned select,insert,update,references NEVER NULL -def test tb1 f23 15 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL -def test tb1 f24 16 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL +def test tb1 f22 14 NULL YES mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned select,insert,update,references NEVER NULL +def test tb1 f23 15 NULL YES mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL +def test tb1 f24 16 NULL YES mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL def test tb1 f25 17 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references NEVER NULL def test tb1 f26 18 NULL YES int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references NEVER NULL def test tb1 f27 19 NULL YES int NULL NULL 10 0 NULL NULL NULL int(10) unsigned zerofill select,insert,update,references NEVER NULL @@ -538,9 +538,9 @@ def test tb3 f135 12 999 NO smallint NULL NULL 5 0 NULL NULL NULL smallint(5) un def test tb3 f136 13 00999 NO smallint NULL NULL 5 0 NULL NULL NULL smallint(5) unsigned zerofill select,insert,update,references NEVER NULL def test tb3 f137 14 00999 NO smallint NULL NULL 5 0 NULL NULL NULL smallint(5) unsigned zerofill select,insert,update,references NEVER NULL def test tb3 f138 15 9999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(9) select,insert,update,references NEVER NULL -def test tb3 f139 16 9999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned select,insert,update,references NEVER NULL -def test tb3 f140 17 00009999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL -def test tb3 f141 18 00009999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL +def test tb3 f139 16 9999 NO mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned select,insert,update,references NEVER NULL +def test tb3 f140 17 00009999 NO mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL +def test tb3 f141 18 00009999 NO mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL def test tb3 f142 19 99999 NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references NEVER NULL def test tb3 f143 20 99999 NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references NEVER NULL def test tb3 f144 21 0000099999 NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned zerofill select,insert,update,references NEVER NULL diff --git a/mysql-test/suite/funcs_1/r/is_columns_myisam.result b/mysql-test/suite/funcs_1/r/is_columns_myisam.result index c4e11e3f48b..4426583ac4c 100644 --- a/mysql-test/suite/funcs_1/r/is_columns_myisam.result +++ b/mysql-test/suite/funcs_1/r/is_columns_myisam.result @@ -476,9 +476,9 @@ def test tb1 f19 19 NULL YES smallint NULL NULL 5 0 NULL NULL NULL smallint(5) u def test tb1 f2 2 NULL YES char 1 1 NULL NULL NULL latin1 latin1_bin char(1) select,insert,update,references NEVER NULL def test tb1 f20 20 NULL YES smallint NULL NULL 5 0 NULL NULL NULL smallint(5) unsigned zerofill select,insert,update,references NEVER NULL def test tb1 f21 21 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(9) select,insert,update,references NEVER NULL -def test tb1 f22 22 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned select,insert,update,references NEVER NULL -def test tb1 f23 23 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL -def test tb1 f24 24 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL +def test tb1 f22 22 NULL YES mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned select,insert,update,references NEVER NULL +def test tb1 f23 23 NULL YES mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL +def test tb1 f24 24 NULL YES mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL def test tb1 f25 25 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references NEVER NULL def test tb1 f26 26 NULL YES int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references NEVER NULL def test tb1 f27 27 NULL YES int NULL NULL 10 0 NULL NULL NULL int(10) unsigned zerofill select,insert,update,references NEVER NULL @@ -600,9 +600,9 @@ def test tb3 f135 18 999 NO smallint NULL NULL 5 0 NULL NULL NULL smallint(5) un def test tb3 f136 19 00999 NO smallint NULL NULL 5 0 NULL NULL NULL smallint(5) unsigned zerofill select,insert,update,references NEVER NULL def test tb3 f137 20 00999 NO smallint NULL NULL 5 0 NULL NULL NULL smallint(5) unsigned zerofill select,insert,update,references NEVER NULL def test tb3 f138 21 9999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(9) select,insert,update,references NEVER NULL -def test tb3 f139 22 9999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned select,insert,update,references NEVER NULL -def test tb3 f140 23 00009999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL -def test tb3 f141 24 00009999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL +def test tb3 f139 22 9999 NO mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned select,insert,update,references NEVER NULL +def test tb3 f140 23 00009999 NO mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL +def test tb3 f141 24 00009999 NO mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill select,insert,update,references NEVER NULL def test tb3 f142 25 99999 NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references NEVER NULL def test tb3 f143 26 99999 NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references NEVER NULL def test tb3 f144 27 0000099999 NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned zerofill select,insert,update,references NEVER NULL diff --git a/mysql-test/suite/funcs_1/r/is_columns_myisam_embedded.result b/mysql-test/suite/funcs_1/r/is_columns_myisam_embedded.result index ae2b05c058e..6faa8d7b51a 100644 --- a/mysql-test/suite/funcs_1/r/is_columns_myisam_embedded.result +++ b/mysql-test/suite/funcs_1/r/is_columns_myisam_embedded.result @@ -476,9 +476,9 @@ def test tb1 f19 19 NULL YES smallint NULL NULL 5 0 NULL NULL NULL smallint(5) u def test tb1 f2 2 NULL YES char 1 1 NULL NULL NULL latin1 latin1_bin char(1) NEVER NULL def test tb1 f20 20 NULL YES smallint NULL NULL 5 0 NULL NULL NULL smallint(5) unsigned zerofill NEVER NULL def test tb1 f21 21 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(9) NEVER NULL -def test tb1 f22 22 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned NEVER NULL -def test tb1 f23 23 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill NEVER NULL -def test tb1 f24 24 NULL YES mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill NEVER NULL +def test tb1 f22 22 NULL YES mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned NEVER NULL +def test tb1 f23 23 NULL YES mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill NEVER NULL +def test tb1 f24 24 NULL YES mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill NEVER NULL def test tb1 f25 25 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) NEVER NULL def test tb1 f26 26 NULL YES int NULL NULL 10 0 NULL NULL NULL int(10) unsigned NEVER NULL def test tb1 f27 27 NULL YES int NULL NULL 10 0 NULL NULL NULL int(10) unsigned zerofill NEVER NULL @@ -600,9 +600,9 @@ def test tb3 f135 18 999 NO smallint NULL NULL 5 0 NULL NULL NULL smallint(5) un def test tb3 f136 19 00999 NO smallint NULL NULL 5 0 NULL NULL NULL smallint(5) unsigned zerofill NEVER NULL def test tb3 f137 20 00999 NO smallint NULL NULL 5 0 NULL NULL NULL smallint(5) unsigned zerofill NEVER NULL def test tb3 f138 21 9999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(9) NEVER NULL -def test tb3 f139 22 9999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned NEVER NULL -def test tb3 f140 23 00009999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill NEVER NULL -def test tb3 f141 24 00009999 NO mediumint NULL NULL 7 0 NULL NULL NULL mediumint(8) unsigned zerofill NEVER NULL +def test tb3 f139 22 9999 NO mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned NEVER NULL +def test tb3 f140 23 00009999 NO mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill NEVER NULL +def test tb3 f141 24 00009999 NO mediumint NULL NULL 8 0 NULL NULL NULL mediumint(8) unsigned zerofill NEVER NULL def test tb3 f142 25 99999 NO int NULL NULL 10 0 NULL NULL NULL int(11) NEVER NULL def test tb3 f143 26 99999 NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned NEVER NULL def test tb3 f144 27 0000099999 NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned zerofill NEVER NULL diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index ba892fcaa70..58d7b6cc8be 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -23,19 +23,13 @@ galera_as_slave_replication_bundle : MDEV-13549 Galera test failures galera_gcache_recover : MDEV-13549 Galera test failures galera_gcache_recover_full_gcache : MDEV-13549 Galera test failures galera_gcache_recover_manytrx : MDEV-13549 Galera test failures -galera_ist_mysqldump : MDEV-13549 Galera test failures galera_ssl_upgrade : MDEV-13549 Galera test failures galera.MW-329 : wsrep_local_replays not stable galera.MW-328A : have_deadlocks test not stable -galera_var_retry_autocommit: MDEV-15794 Test failure on galera.galera_var_retry_autocommit -galera_var_auto_inc_control_on: MDEV-15803 Test failure on galera.galera_var_auto_inc_control_on -pxc-421 : MDEV-15804 Test failure on galera.pxc-421 query_cache : MDEV-15805 Test failure on galera.query_cache -galera.galera_gra_log : MDEV-15808 Test failure on galera.galera_gra_log galera.MW-44 : MDEV-15809 Test failure on galera.MW-44 galera.galera_pc_ignore_sb : MDEV-15811 Test failure on galera_pc_ignore_sb galera_kill_applier : race condition at the start of the test galera_ist_progress: MDEV-15236 galera_ist_progress fails when trying to read transfer status -GAL-480 : "Lost connection to MySQL" galera_concurrent_ctas : MDEV-15845 Test failure on galera.galera_concurrent_ctas -galera_sst_mysqldump : MDEV-14069 +pxc-421: Lock timeout exceeded diff --git a/mysql-test/suite/galera/include/galera_load_provider.inc b/mysql-test/suite/galera/include/galera_load_provider.inc index 761a1a89fd3..aeab7e6ea19 100644 --- a/mysql-test/suite/galera/include/galera_load_provider.inc +++ b/mysql-test/suite/galera/include/galera_load_provider.inc @@ -5,6 +5,4 @@ --eval SET GLOBAL wsrep_cluster_address = '$wsrep_cluster_address_orig'; --enable_query_log ---enable_reconnect ---source include/wait_until_connected_again.inc ---source include/wait_until_ready.inc +--source include/galera_wait_ready.inc diff --git a/mysql-test/suite/galera/include/galera_st_clean_slave.inc b/mysql-test/suite/galera/include/galera_st_clean_slave.inc index 81ba54aa6f5..3a49f4f6ad2 100644 --- a/mysql-test/suite/galera/include/galera_st_clean_slave.inc +++ b/mysql-test/suite/galera/include/galera_st_clean_slave.inc @@ -64,7 +64,9 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); --connection node_2 --echo Starting server ... --source include/start_mysqld.inc ---source include/wait_until_ready.inc + +--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size' +--source include/wait_condition.inc SET AUTOCOMMIT=OFF; START TRANSACTION; diff --git a/mysql-test/suite/galera/include/galera_st_kill_slave.inc b/mysql-test/suite/galera/include/galera_st_kill_slave.inc index bae37755c65..0b96de55a32 100644 --- a/mysql-test/suite/galera/include/galera_st_kill_slave.inc +++ b/mysql-test/suite/galera/include/galera_st_kill_slave.inc @@ -24,7 +24,6 @@ COMMIT; --source include/kill_galera.inc --connection node_1 ---source include/wait_until_connected_again.inc --let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size' --source include/wait_condition.inc @@ -59,7 +58,9 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); --echo Starting server ... --source include/start_mysqld.inc ---source include/wait_until_ready.inc + +--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size' +--source include/wait_condition.inc SET AUTOCOMMIT=OFF; START TRANSACTION; diff --git a/mysql-test/suite/galera/include/galera_st_kill_slave_ddl.inc b/mysql-test/suite/galera/include/galera_st_kill_slave_ddl.inc index 72e80505870..44a1513fa6e 100644 --- a/mysql-test/suite/galera/include/galera_st_kill_slave_ddl.inc +++ b/mysql-test/suite/galera/include/galera_st_kill_slave_ddl.inc @@ -72,7 +72,9 @@ INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); --connection node_2 --echo Starting server ... --source include/start_mysqld.inc ---source include/wait_until_ready.inc + +--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size' +--source include/wait_condition.inc SET AUTOCOMMIT=OFF; START TRANSACTION; diff --git a/mysql-test/suite/galera/include/galera_st_shutdown_slave.inc b/mysql-test/suite/galera/include/galera_st_shutdown_slave.inc index 1a65ef1bd94..6c09b0ceb0c 100644 --- a/mysql-test/suite/galera/include/galera_st_shutdown_slave.inc +++ b/mysql-test/suite/galera/include/galera_st_shutdown_slave.inc @@ -56,7 +56,9 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); --connection node_2 --echo Starting server ... --source include/start_mysqld.inc ---source include/wait_until_ready.inc + +--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size' +--source include/wait_condition.inc SET AUTOCOMMIT=OFF; START TRANSACTION; diff --git a/mysql-test/suite/galera/include/start_mysqld.inc b/mysql-test/suite/galera/include/start_mysqld.inc index 4ee3d17810c..57af9203d0f 100644 --- a/mysql-test/suite/galera/include/start_mysqld.inc +++ b/mysql-test/suite/galera/include/start_mysqld.inc @@ -12,11 +12,4 @@ if ($galera_wsrep_start_position == '') { --exec echo "restart:$start_mysqld_params" > $_expect_file_name } -# Turn on reconnect ---enable_reconnect - -# Call script that will poll the server waiting for it to be back online again ---source include/wait_until_connected_again.inc - -# Turn off reconnect again ---disable_reconnect +--source include/galera_wait_ready.inc diff --git a/mysql-test/suite/galera/r/MW-44.result b/mysql-test/suite/galera/r/MW-44.result index 7ae34440f7a..459a61030a4 100644 --- a/mysql-test/suite/galera/r/MW-44.result +++ b/mysql-test/suite/galera/r/MW-44.result @@ -1,15 +1,18 @@ -SET @@global.wsrep_replicate_myisam=OFF; -connection node_1; +SET GLOBAL general_log='OFF'; TRUNCATE TABLE mysql.general_log; SELECT COUNT(*) from mysql.general_log; COUNT(*) -1 -connection node_2; +0 +SELECT * FROM mysql.general_log; +event_time user_host thread_id server_id command_type argument +SET GLOBAL general_log='OFF'; TRUNCATE TABLE mysql.general_log; SELECT COUNT(*) from mysql.general_log; COUNT(*) -1 -connection node_1; +0 +SELECT * FROM mysql.general_log; +event_time user_host thread_id server_id command_type argument +SET GLOBAL general_log='ON'; SELECT COUNT(*) from mysql.general_log; COUNT(*) 1 @@ -21,9 +24,8 @@ SET SESSION wsrep_osu_method=TOI; SELECT COUNT(*) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%'; COUNT(*) = 2 1 -connection node_2; +SET GLOBAL general_log='ON'; SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument NOT LIKE 'SELECT%'; COUNT(*) = 0 1 DROP TABLE t1; -SET @@global.wsrep_replicate_myisam=Default; diff --git a/mysql-test/suite/galera/r/galera_encrypt_tmp_files.result b/mysql-test/suite/galera/r/galera_encrypt_tmp_files.result new file mode 100644 index 00000000000..38480d186ba --- /dev/null +++ b/mysql-test/suite/galera/r/galera_encrypt_tmp_files.result @@ -0,0 +1,37 @@ +SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; +VARIABLE_VALUE = 'Synced' +1 +SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +VARIABLE_VALUE = 2 +1 +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; +INSERT INTO t1 VALUES (1); +connection node_2; +SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; +VARIABLE_VALUE = 'Synced' +1 +SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +VARIABLE_VALUE = 2 +1 +SELECT COUNT(*) = 1 FROM t1; +COUNT(*) = 1 +1 +DROP TABLE t1; +connection node_1; +CREATE TABLE `t1` ( +`col1` int(11) NOT NULL, +`col2` varchar(64) NOT NULL DEFAULT '', +`col3` varchar(32) NOT NULL DEFAULT '0', +`col4` varchar(64) NOT NULL DEFAULT '', +`col5` tinyint(4) NOT NULL DEFAULT '0', +`col6` int(11) NOT NULL DEFAULT '0', +`col7` varchar(64) NOT NULL DEFAULT '', +`col8` tinyint(4) NOT NULL DEFAULT '0', +`col9` tinyint(4) NOT NULL DEFAULT '0', +`col10` text NOT NULL, +`col11` varchar(255) NOT NULL DEFAULT '', +`col12` tinyint(4) NOT NULL DEFAULT '1' +) ; +create table t2 (test int); +insert into t2 values (1); +drop table t1,t2; diff --git a/mysql-test/suite/galera/r/galera_gra_log.result b/mysql-test/suite/galera/r/galera_gra_log.result index e45882db523..777eda42046 100644 --- a/mysql-test/suite/galera/r/galera_gra_log.result +++ b/mysql-test/suite/galera/r/galera_gra_log.result @@ -4,6 +4,7 @@ CREATE TABLE t1 (f1 INTEGER); connection node_1; CREATE TABLE t1 (f1 INTEGER); connection node_2; +SET SESSION wsrep_on=ON; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 @@ -30,6 +31,5 @@ DELIMITER ; ROLLBACK /* added by mysqlbinlog */; /*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/; /*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/; -SET SESSION wsrep_on=ON; CALL mtr.add_suppression("Slave SQL: Error 'Table 't1' already exists' on query"); DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_ist_mysqldump.result b/mysql-test/suite/galera/r/galera_ist_mysqldump.result index 8d0010afc44..58a3ca297f8 100644 --- a/mysql-test/suite/galera/r/galera_ist_mysqldump.result +++ b/mysql-test/suite/galera/r/galera_ist_mysqldump.result @@ -1,9 +1,16 @@ Setting SST method to mysqldump ... +call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to '127.0.0.1'"); +call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos"); +connection node_1; CREATE USER 'sst'; GRANT ALL PRIVILEGES ON *.* TO 'sst'; SET GLOBAL wsrep_sst_auth = 'sst:'; +connection node_2; SET GLOBAL wsrep_sst_method = 'mysqldump'; +connection node_1; +connection node_2; Performing State Transfer on a server that has been shut down cleanly and restarted +connection node_1; CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; SET AUTOCOMMIT=OFF; START TRANSACTION; @@ -13,6 +20,7 @@ INSERT INTO t1 VALUES ('node1_committed_before'); INSERT INTO t1 VALUES ('node1_committed_before'); INSERT INTO t1 VALUES ('node1_committed_before'); COMMIT; +connection node_2; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 VALUES ('node2_committed_before'); @@ -22,6 +30,7 @@ INSERT INTO t1 VALUES ('node2_committed_before'); INSERT INTO t1 VALUES ('node2_committed_before'); COMMIT; Shutting down server ... +connection node_1; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 VALUES ('node1_committed_during'); @@ -36,6 +45,7 @@ INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +connect node_1a_galera_st_shutdown_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); @@ -43,6 +53,7 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +connection node_2; Starting server ... SET AUTOCOMMIT=OFF; START TRANSACTION; @@ -52,6 +63,7 @@ INSERT INTO t1 VALUES ('node2_committed_after'); INSERT INTO t1 VALUES ('node2_committed_after'); INSERT INTO t1 VALUES ('node2_committed_after'); COMMIT; +connection node_1; INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); @@ -66,6 +78,7 @@ INSERT INTO t1 VALUES ('node1_committed_after'); INSERT INTO t1 VALUES ('node1_committed_after'); INSERT INTO t1 VALUES ('node1_committed_after'); COMMIT; +connection node_1a_galera_st_shutdown_slave; INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); @@ -80,6 +93,7 @@ COUNT(*) = 0 1 COMMIT; SET AUTOCOMMIT=ON; +connection node_1; SELECT COUNT(*) = 35 FROM t1; COUNT(*) = 35 1 @@ -90,6 +104,7 @@ DROP TABLE t1; COMMIT; SET AUTOCOMMIT=ON; Performing State Transfer on a server that has been killed and restarted +connection node_1; CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; SET AUTOCOMMIT=OFF; START TRANSACTION; @@ -99,6 +114,7 @@ INSERT INTO t1 VALUES ('node1_committed_before'); INSERT INTO t1 VALUES ('node1_committed_before'); INSERT INTO t1 VALUES ('node1_committed_before'); COMMIT; +connection node_2; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 VALUES ('node2_committed_before'); @@ -108,6 +124,7 @@ INSERT INTO t1 VALUES ('node2_committed_before'); INSERT INTO t1 VALUES ('node2_committed_before'); COMMIT; Killing server ... +connection node_1; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 VALUES ('node1_committed_during'); @@ -122,6 +139,7 @@ INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); @@ -129,6 +147,7 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +connection node_2; Performing --wsrep-recover ... Starting server ... Using --wsrep-start-position when starting mysqld ... @@ -140,6 +159,7 @@ INSERT INTO t1 VALUES ('node2_committed_after'); INSERT INTO t1 VALUES ('node2_committed_after'); INSERT INTO t1 VALUES ('node2_committed_after'); COMMIT; +connection node_1; INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); @@ -154,6 +174,7 @@ INSERT INTO t1 VALUES ('node1_committed_after'); INSERT INTO t1 VALUES ('node1_committed_after'); INSERT INTO t1 VALUES ('node1_committed_after'); COMMIT; +connection node_1a_galera_st_kill_slave; INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); @@ -168,6 +189,7 @@ COUNT(*) = 0 1 COMMIT; SET AUTOCOMMIT=ON; +connection node_1; SELECT COUNT(*) = 35 FROM t1; COUNT(*) = 35 1 @@ -179,6 +201,7 @@ COMMIT; SET AUTOCOMMIT=ON; Performing State Transfer on a server that has been killed and restarted while a DDL was in progress on it +connection node_1; CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; SET AUTOCOMMIT=OFF; START TRANSACTION; @@ -187,6 +210,7 @@ INSERT INTO t1 VALUES ('node1_committed_before'); INSERT INTO t1 VALUES ('node1_committed_before'); INSERT INTO t1 VALUES ('node1_committed_before'); INSERT INTO t1 VALUES ('node1_committed_before'); +connection node_2; START TRANSACTION; INSERT INTO t1 VALUES ('node2_committed_before'); INSERT INTO t1 VALUES ('node2_committed_before'); @@ -194,10 +218,13 @@ INSERT INTO t1 VALUES ('node2_committed_before'); INSERT INTO t1 VALUES ('node2_committed_before'); INSERT INTO t1 VALUES ('node2_committed_before'); COMMIT; -SET GLOBAL debug = 'd,sync.alter_opened_table'; +SET GLOBAL debug_dbug = 'd,sync.alter_opened_table'; +connection node_1; ALTER TABLE t1 ADD COLUMN f2 INTEGER; +connection node_2; SET wsrep_sync_wait = 0; Killing server ... +connection node_1; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 (f1) VALUES ('node1_committed_during'); @@ -212,6 +239,7 @@ INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); +connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); @@ -219,7 +247,9 @@ INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); +connection node_2; Performing --wsrep-recover ... +connection node_2; Starting server ... Using --wsrep-start-position when starting mysqld ... SET AUTOCOMMIT=OFF; @@ -230,6 +260,7 @@ INSERT INTO t1 (f1) VALUES ('node2_committed_after'); INSERT INTO t1 (f1) VALUES ('node2_committed_after'); INSERT INTO t1 (f1) VALUES ('node2_committed_after'); COMMIT; +connection node_1; INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); @@ -244,6 +275,7 @@ INSERT INTO t1 (f1) VALUES ('node1_committed_after'); INSERT INTO t1 (f1) VALUES ('node1_committed_after'); INSERT INTO t1 (f1) VALUES ('node1_committed_after'); COMMIT; +connection node_1a_galera_st_kill_slave_ddl; INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); @@ -261,6 +293,7 @@ COUNT(*) = 0 1 COMMIT; SET AUTOCOMMIT=ON; +connection node_1; SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; COUNT(*) = 2 1 @@ -273,8 +306,11 @@ COUNT(*) = 0 DROP TABLE t1; COMMIT; SET AUTOCOMMIT=ON; +SET GLOBAL debug_dbug = $debug_orig; +connection node_1; CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query"); DROP USER sst; +connection node_2; CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query"); CALL mtr.add_suppression("InnoDB: Error: Table \"mysql\"\\.\"innodb_index_stats\" not found"); CALL mtr.add_suppression("Can't open and lock time zone table"); diff --git a/mysql-test/suite/galera/r/galera_ist_rsync.result b/mysql-test/suite/galera/r/galera_ist_rsync.result index e4eb9d821fa..9c0d78d96e9 100644 --- a/mysql-test/suite/galera/r/galera_ist_rsync.result +++ b/mysql-test/suite/galera/r/galera_ist_rsync.result @@ -1,3 +1,5 @@ +connection node_1; +connection node_2; Performing State Transfer on a server that has been temporarily disconnected connection node_1; CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; diff --git a/mysql-test/suite/galera/r/galera_kill_nochanges.result b/mysql-test/suite/galera/r/galera_kill_nochanges.result index a307dde0527..56caf1bd9ea 100644 --- a/mysql-test/suite/galera/r/galera_kill_nochanges.result +++ b/mysql-test/suite/galera/r/galera_kill_nochanges.result @@ -3,6 +3,10 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); connection node_2; Killing server ... +connection node_1; +SET SESSION wsrep_sync_wait = 0; +SET SESSION wsrep_sync_wait = DEFAULT; +connection node_2; connection node_2a; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 diff --git a/mysql-test/suite/galera/r/galera_mdev_15611.result b/mysql-test/suite/galera/r/galera_mdev_15611.result new file mode 100644 index 00000000000..9ea1684494a --- /dev/null +++ b/mysql-test/suite/galera/r/galera_mdev_15611.result @@ -0,0 +1,16 @@ +connection node_1; +CREATE TABLE t1 ( +id int primary key +); +CREATE TABLE t2 ( +id int primary key , +f_id int DEFAULT NULL, FOREIGN KEY(f_id) REFERENCES t1 (id) +); +insert into t1 select 1; +#Running 200 insert in t2 table +select count(*) from t2; +count(*) +200 +delete from t2; +delete from t1; +drop table t2,t1; diff --git a/mysql-test/suite/galera/r/galera_toi_ddl_fk_insert.result b/mysql-test/suite/galera/r/galera_toi_ddl_fk_insert.result index 1726bb6445f..0dbc89978d4 100644 --- a/mysql-test/suite/galera/r/galera_toi_ddl_fk_insert.result +++ b/mysql-test/suite/galera/r/galera_toi_ddl_fk_insert.result @@ -10,16 +10,26 @@ id INT PRIMARY KEY AUTO_INCREMENT, parent_id INT ) ENGINE=InnoDB; INSERT INTO parent VALUES (1, 0); +connection node_2; INSERT INTO child (parent_id) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;; +connection node_1a; INSERT INTO parent (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;; +connection node_2a; INSERT INTO parent (f2) SELECT 2 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;; +connection node_1b; ALTER TABLE child ADD FOREIGN KEY (parent_id) REFERENCES parent(id);; +connection node_1a; +connection node_1b; +connection node_2; +connection node_2a; +connection node_1; SELECT COUNT(*) = 20001 FROM parent; COUNT(*) = 20001 1 SELECT COUNT(*) = 10000 FROM child; COUNT(*) = 10000 1 +connection node_2; SELECT COUNT(*) = 20001 FROM parent; COUNT(*) = 20001 1 diff --git a/mysql-test/suite/galera/r/galera_var_auto_inc_control_on.result b/mysql-test/suite/galera/r/galera_var_auto_inc_control_on.result index 0cbba2fa124..b71cf4c831d 100644 --- a/mysql-test/suite/galera/r/galera_var_auto_inc_control_on.result +++ b/mysql-test/suite/galera/r/galera_var_auto_inc_control_on.result @@ -1,38 +1,37 @@ +connection node_1; CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, node VARCHAR(10)) ENGINE=InnoDB; -SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; -VARIABLE_VALUE -2 -SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_index'; -VARIABLE_VALUE -0 SELECT @@auto_increment_increment = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'); @@auto_increment_increment = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size') 1 -SELECT @@global.auto_increment_offset = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_index') + 1; -@@global.auto_increment_offset = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_index') + 1 +connection node_2; +auto_increment_offset_differ 1 +connection node_1; INSERT INTO t1 VALUES (DEFAULT, 'node1');; +connection node_2; INSERT INTO t1 VALUES (DEFAULT, 'node2');; -SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; -VARIABLE_VALUE -2 -SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_index'; -VARIABLE_VALUE -1 +connection node_1; +connection node_2; SELECT @@auto_increment_increment = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'); @@auto_increment_increment = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size') 1 -SELECT @@global.auto_increment_offset = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_index') + 1; -@@global.auto_increment_offset = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_index') + 1 +connection node_1; +auto_increment_offset_differ 1 +connection node_2; INSERT INTO t1 VALUES (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2');; +connection node_1; INSERT INTO t1 VALUES (DEFAULT, 'node1'), (DEFAULT, 'node1'), (DEFAULT, 'node1'), (DEFAULT, 'node1'), (DEFAULT, 'node1'), (DEFAULT, 'node1'), (DEFAULT, 'node1'), (DEFAULT, 'node1'), (DEFAULT, 'node1'), (DEFAULT, 'node1');; +connection node_2; +connection node_1; +connection node_2; SELECT COUNT(*) = 22 FROM t1; COUNT(*) = 22 1 SELECT COUNT(DISTINCT f1) = 22 FROM t1; COUNT(DISTINCT f1) = 22 1 +connection node_1; SELECT COUNT(*) = 22 FROM t1; COUNT(*) = 22 1 diff --git a/mysql-test/suite/galera/r/galera_var_retry_autocommit.result b/mysql-test/suite/galera/r/galera_var_retry_autocommit.result index 392728baf7d..c0bf6035184 100644 --- a/mysql-test/suite/galera/r/galera_var_retry_autocommit.result +++ b/mysql-test/suite/galera/r/galera_var_retry_autocommit.result @@ -1,44 +1,72 @@ +connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; connection node_1; -CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB; -CREATE TRIGGER tr1 BEFORE INSERT ON t1 FOR EACH ROW SET NEW.f2 = SLEEP(5); -connection node_1; +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; SET SESSION wsrep_retry_autocommit = 0; -INSERT INTO t1 (f1) VALUES (1),(2);; +SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue'; +INSERT INTO t1 (f1) VALUES (2); +connection node_1a; +SET DEBUG_SYNC = 'now WAIT_FOR before_rep'; connection node_2; TRUNCATE TABLE t1; connection node_1; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction -connection node_1; -SET SESSION wsrep_retry_autocommit = 1; -INSERT INTO t1 (f1) VALUES (3),(4);; -connection node_2; -TRUNCATE TABLE t1; -connection node_1; -SELECT * FROM test.t1; -f1 f2 -3 0 -4 0 -connection node_2; -CREATE PROCEDURE repeated_truncate () -BEGIN -DECLARE i INT; -DECLARE CONTINUE HANDLER FOR SQLEXCEPTION BEGIN END; -SET i = 0; -WHILE i <= 1000 DO -TRUNCATE TABLE t1; -SET i = i + 1; -END WHILE; -END| -CALL repeated_truncate(); -connection node_1; -SET SESSION wsrep_retry_autocommit = 1; -INSERT INTO t1 (f1) VALUES (5),(6); -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction -connection node_1; -SET SESSION wsrep_retry_autocommit = 1024; -INSERT INTO t1 (f1) VALUES (7),(8);; -connection node_2; -connection node_1; -include/diff_servers.inc [servers=1 2] +ERROR 40001: Deadlock: wsrep aborted transaction +SELECT COUNT(*) = 0 FROM t1; +COUNT(*) = 0 +1 +SET DEBUG_SYNC = 'RESET'; +DROP TABLE t1; +connection node_1; +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; +SET SESSION wsrep_retry_autocommit = 1; +SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue'; +INSERT INTO t1 (f1) VALUES (2); +connection node_1a; +SET DEBUG_SYNC = 'now WAIT_FOR before_rep'; +connection node_2; +TRUNCATE TABLE t1; +connection node_1; +SELECT COUNT(*) = 1 FROM t1; +COUNT(*) = 1 +1 +SET DEBUG_SYNC = 'RESET'; +DROP TABLE t1; +connection node_1; +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; +SET SESSION wsrep_retry_autocommit = 1; +SET GLOBAL debug_dbug = '+d,sync.wsrep_retry_autocommit'; +SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue EXECUTE 2'; +INSERT INTO t1 VALUES (2);; +connection node_1a; +SET DEBUG_SYNC = 'now WAIT_FOR before_rep'; +connection node_2; +TRUNCATE TABLE t1; +connection node_1a; +SET DEBUG_SYNC = 'now WAIT_FOR wsrep_retry_autocommit_reached'; +SELECT COUNT(*) = 0 FROM t1; +COUNT(*) = 0 +1 +SET DEBUG_SYNC = 'now SIGNAL wsrep_retry_autocommit_continue WAIT_FOR before_rep'; +connection node_2; +TRUNCATE TABLE t1; +connection node_1a; +SELECT COUNT(*) = 0 FROM t1; +COUNT(*) = 0 +1 +connection node_1; +ERROR 40001: Deadlock: wsrep aborted transaction +SET DEBUG_SYNC = 'RESET'; +SET GLOBAL debug_dbug = NULL; +DROP TABLE t1; +connection node_1; +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; +SET SESSION wsrep_retry_autocommit = 64; +SET GLOBAL debug_dbug = '+d,sync.wsrep_retry_autocommit'; +SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue EXECUTE 64'; +INSERT INTO t1 VALUES (2); +connection node_1; +SELECT COUNT(*) = 1 FROM t1; +COUNT(*) = 1 +1 +SET DEBUG_SYNC = 'RESET'; +SET GLOBAL debug_dbug = NULL; DROP TABLE t1; -DROP PROCEDURE repeated_truncate; diff --git a/mysql-test/suite/galera/r/mysql-wsrep#33.result b/mysql-test/suite/galera/r/mysql-wsrep#33.result index fc3db249c25..6a5251204b9 100644 --- a/mysql-test/suite/galera/r/mysql-wsrep#33.result +++ b/mysql-test/suite/galera/r/mysql-wsrep#33.result @@ -1,11 +1,16 @@ +connection node_1; +connection node_2; Setting SST method to mysqldump ... call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to '127.0.0.1'"); call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos"); +connection node_1; CREATE USER 'sst'; GRANT ALL PRIVILEGES ON *.* TO 'sst'; SET GLOBAL wsrep_sst_auth = 'sst:'; +connection node_2; SET GLOBAL wsrep_sst_method = 'mysqldump'; Performing State Transfer on a server that has been temporarily disconnected +connection node_1; CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; SET AUTOCOMMIT=OFF; START TRANSACTION; @@ -15,6 +20,7 @@ INSERT INTO t1 VALUES ('node1_committed_before'); INSERT INTO t1 VALUES ('node1_committed_before'); INSERT INTO t1 VALUES ('node1_committed_before'); COMMIT; +connection node_2; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 VALUES ('node2_committed_before'); @@ -25,6 +31,7 @@ INSERT INTO t1 VALUES ('node2_committed_before'); COMMIT; Unloading wsrep provider ... SET GLOBAL wsrep_provider = 'none'; +connection node_1; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 VALUES ('node1_committed_during'); @@ -39,6 +46,7 @@ INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +connect node_1a_galera_st_disconnect_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); @@ -46,6 +54,7 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +connection node_2; Loading wsrep provider ... SET AUTOCOMMIT=OFF; START TRANSACTION; @@ -55,6 +64,7 @@ INSERT INTO t1 VALUES ('node2_committed_after'); INSERT INTO t1 VALUES ('node2_committed_after'); INSERT INTO t1 VALUES ('node2_committed_after'); COMMIT; +connection node_1; INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); @@ -69,6 +79,7 @@ INSERT INTO t1 VALUES ('node1_committed_after'); INSERT INTO t1 VALUES ('node1_committed_after'); INSERT INTO t1 VALUES ('node1_committed_after'); COMMIT; +connection node_1a_galera_st_disconnect_slave; INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); @@ -83,6 +94,7 @@ COUNT(*) = 0 1 COMMIT; SET AUTOCOMMIT=ON; +connection node_1; SELECT COUNT(*) = 35 FROM t1; COUNT(*) = 35 1 @@ -92,12 +104,15 @@ COUNT(*) = 0 DROP TABLE t1; COMMIT; SET AUTOCOMMIT=ON; +connection node_1; CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query"); DROP USER sst; +connection node_2; CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query"); CALL mtr.add_suppression("InnoDB: Error: Table \"mysql\"\\.\"innodb_index_stats\" not found"); CALL mtr.add_suppression("Can't open and lock time zone table"); CALL mtr.add_suppression("Can't open and lock privilege tables"); CALL mtr.add_suppression("Info table is not ready to be used"); CALL mtr.add_suppression("Native table .* has the wrong structure"); +connection node_2; Restarting server ... diff --git a/mysql-test/suite/galera/r/pxc-421.result b/mysql-test/suite/galera/r/pxc-421.result index d60c7c4540e..a317b3e40e1 100644 --- a/mysql-test/suite/galera/r/pxc-421.result +++ b/mysql-test/suite/galera/r/pxc-421.result @@ -12,7 +12,6 @@ INSERT INTO t1 VALUES (2); connection node_1; INSERT INTO t1 VALUES (3); connection node_2; -set SESSION wsrep_sync_wait=0; INSERT INTO t1 VALUES (4); set GLOBAL wsrep_slave_threads=5; SELECT COUNT(*) = 5 FROM t1; diff --git a/mysql-test/suite/galera/t/MW-44.test b/mysql-test/suite/galera/t/MW-44.test index 2b27e69893c..7c988a29548 100644 --- a/mysql-test/suite/galera/t/MW-44.test +++ b/mysql-test/suite/galera/t/MW-44.test @@ -5,18 +5,22 @@ --source include/galera_cluster.inc --source include/have_innodb.inc -SET @@global.wsrep_replicate_myisam=OFF; --connection node_1 +SET GLOBAL general_log='OFF'; TRUNCATE TABLE mysql.general_log; SELECT COUNT(*) from mysql.general_log; +SELECT * FROM mysql.general_log; --sleep 1 --connection node_2 +SET GLOBAL general_log='OFF'; TRUNCATE TABLE mysql.general_log; SELECT COUNT(*) from mysql.general_log; +SELECT * FROM mysql.general_log; --sleep 1 --connection node_1 +SET GLOBAL general_log='ON'; SELECT COUNT(*) from mysql.general_log; SET SESSION wsrep_osu_method=TOI; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; @@ -27,6 +31,7 @@ SET SESSION wsrep_osu_method=TOI; SELECT COUNT(*) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%'; --connection node_2 +SET GLOBAL general_log='ON'; SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument NOT LIKE 'SELECT%'; DROP TABLE t1; -SET @@global.wsrep_replicate_myisam=Default; + diff --git a/mysql-test/suite/galera/t/galera_encrypt_tmp_files.cnf b/mysql-test/suite/galera/t/galera_encrypt_tmp_files.cnf new file mode 100644 index 00000000000..0f7f80b7d0b --- /dev/null +++ b/mysql-test/suite/galera/t/galera_encrypt_tmp_files.cnf @@ -0,0 +1,8 @@ +!include ../galera_2nodes.cnf +[mysqld] + +encrypt-tmp-files = 1 +plugin-load-add= @ENV.FILE_KEY_MANAGEMENT_SO +file-key-management +loose-file-key-management-filename= @ENV.MYSQL_TEST_DIR/std_data/keys.txt +log-bin diff --git a/mysql-test/suite/galera/t/galera_encrypt_tmp_files.test b/mysql-test/suite/galera/t/galera_encrypt_tmp_files.test new file mode 100644 index 00000000000..c42c3dbd98a --- /dev/null +++ b/mysql-test/suite/galera/t/galera_encrypt_tmp_files.test @@ -0,0 +1,57 @@ +# This file tests that mariadb cluster should not crash when encrypt_tmp_file +# is enabled + +--source include/galera_cluster.inc +--source include/have_innodb.inc + +SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; +SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; + +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; +INSERT INTO t1 VALUES (1); + +--connection node_2 +SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; +SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; + +SELECT COUNT(*) = 1 FROM t1; + +DROP TABLE t1; + +--connection node_1 + +CREATE TABLE `t1` ( + `col1` int(11) NOT NULL, + `col2` varchar(64) NOT NULL DEFAULT '', + `col3` varchar(32) NOT NULL DEFAULT '0', + `col4` varchar(64) NOT NULL DEFAULT '', + `col5` tinyint(4) NOT NULL DEFAULT '0', + `col6` int(11) NOT NULL DEFAULT '0', + `col7` varchar(64) NOT NULL DEFAULT '', + `col8` tinyint(4) NOT NULL DEFAULT '0', + `col9` tinyint(4) NOT NULL DEFAULT '0', + `col10` text NOT NULL, + `col11` varchar(255) NOT NULL DEFAULT '', + `col12` tinyint(4) NOT NULL DEFAULT '1' +) ; + +#Although we just need $counter >= 907 for IO_CACHE to use +#encrypted temp file. Just on safe side I am using $counter +# = 1100 +--disable_query_log +--let $counter=1100 +--let $query= (1,'test','test','test',0,0,'-1',0,0,'','',-1) +while($counter) +{ + --let $query= $query ,(1,'test','test','test',0,0,'-1',0,0,'','',-1) + --dec $counter +} +--let $query= INSERT INTO t1 values $query ; +--eval $query +--enable_query_log +#INSERT INTO `t1` VALUE + +create table t2 (test int); +insert into t2 values (1); + +drop table t1,t2; diff --git a/mysql-test/suite/galera/t/galera_gcs_fragment.test b/mysql-test/suite/galera/t/galera_gcs_fragment.test index 0436e312f6e..80d3a5cb659 100644 --- a/mysql-test/suite/galera/t/galera_gcs_fragment.test +++ b/mysql-test/suite/galera/t/galera_gcs_fragment.test @@ -3,6 +3,11 @@ --source include/have_innodb.inc --source suite/galera/include/galera_have_debug_sync.inc +# Save original auto_increment_offset values. +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc + # Prepare table CREATE TABLE t1 (f1 INT PRIMARY KEY, f2 TEXT); @@ -65,3 +70,5 @@ SELECT * FROM t1; --connection node_1 DROP TABLE t1; + +--source include/auto_increment_offset_restore.inc diff --git a/mysql-test/suite/galera/t/galera_gra_log.test b/mysql-test/suite/galera/t/galera_gra_log.test index adb37de9188..8b5aaaae5bd 100644 --- a/mysql-test/suite/galera/t/galera_gra_log.test +++ b/mysql-test/suite/galera/t/galera_gra_log.test @@ -17,6 +17,7 @@ CREATE TABLE t1 (f1 INTEGER); CREATE TABLE t1 (f1 INTEGER); --connection node_2 +SET SESSION wsrep_on=ON; SELECT COUNT(*) = 0 FROM t1; # Make sure the GRA file produced is readable and contains the failure @@ -24,8 +25,6 @@ SELECT COUNT(*) = 0 FROM t1; --replace_regex /SET TIMESTAMP=[0-9]+/SET TIMESTAMP=/ /pseudo_thread_id=[0-9]+/pseudo_thread_id=/ --exec $MYSQL_BINLOG --short-form $MYSQLTEST_VARDIR/mysqld.2/data/GRA_*.log -SET SESSION wsrep_on=ON; - CALL mtr.add_suppression("Slave SQL: Error 'Table 't1' already exists' on query"); DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_ist_mysqldump.test b/mysql-test/suite/galera/t/galera_ist_mysqldump.test index a9ff8c41f06..f60d5549eda 100644 --- a/mysql-test/suite/galera/t/galera_ist_mysqldump.test +++ b/mysql-test/suite/galera/t/galera_ist_mysqldump.test @@ -4,6 +4,10 @@ --source suite/galera/include/galera_sst_set_mysqldump.inc +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc + # mysql-wsrep#33 - nnoDB: Failing assertion: xid_seqno > trx_sys_cur_xid_seqno in trx_sys_update_wsrep_checkpoint with mysqldump IST # --source suite/galera/include/galera_st_disconnect_slave.inc @@ -14,4 +18,5 @@ --source suite/galera/include/galera_st_kill_slave.inc --source suite/galera/include/galera_st_kill_slave_ddl.inc +--source include/auto_increment_offset_restore.inc --source suite/galera/include/galera_sst_restore.inc diff --git a/mysql-test/suite/galera/t/galera_kill_nochanges.test b/mysql-test/suite/galera/t/galera_kill_nochanges.test index 1903df449e4..4106378885f 100644 --- a/mysql-test/suite/galera/t/galera_kill_nochanges.test +++ b/mysql-test/suite/galera/t/galera_kill_nochanges.test @@ -11,6 +11,14 @@ INSERT INTO t1 VALUES (1); --connection node_2 --source include/kill_galera.inc + +--connection node_1 +SET SESSION wsrep_sync_wait = 0; +--let $wait_condition = SELECT VARIABLE_VALUE = 'non-Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status' +--source include/wait_condition.inc +SET SESSION wsrep_sync_wait = DEFAULT; + +--connection node_2 --source include/start_mysqld.inc --let $galera_connection_name = node_2a diff --git a/mysql-test/suite/galera/t/galera_mdev_15611.cnf b/mysql-test/suite/galera/t/galera_mdev_15611.cnf new file mode 100644 index 00000000000..b6f601c56b1 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_mdev_15611.cnf @@ -0,0 +1,5 @@ +!include ../galera_2nodes.cnf +[mysqld.1] + +[mysqld.2] +wsrep_slave_threads=6 diff --git a/mysql-test/suite/galera/t/galera_mdev_15611.test b/mysql-test/suite/galera/t/galera_mdev_15611.test new file mode 100644 index 00000000000..d32d7e75262 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_mdev_15611.test @@ -0,0 +1,30 @@ +--source include/galera_cluster.inc +--source include/have_innodb.inc + +--connection node_1 +CREATE TABLE t1 ( + id int primary key +); + +CREATE TABLE t2 ( + id int primary key , + f_id int DEFAULT NULL, FOREIGN KEY(f_id) REFERENCES t1 (id) +); + +insert into t1 select 1; + +--disable_query_log +--let $count=200 +--echo #Running 200 insert in t2 table +while($count) +{ + #Repeatedly execute the following SQL until you generate thousands of data + --eval insert into t2 values ($count, 1); + --dec $count +} +--enable_query_log + +select count(*) from t2; +delete from t2; +delete from t1; +drop table t2,t1; diff --git a/mysql-test/suite/galera/t/galera_pc_ignore_sb.test b/mysql-test/suite/galera/t/galera_pc_ignore_sb.test index 89ef2f0f96b..f24ca5cd25b 100644 --- a/mysql-test/suite/galera/t/galera_pc_ignore_sb.test +++ b/mysql-test/suite/galera/t/galera_pc_ignore_sb.test @@ -20,7 +20,9 @@ SET GLOBAL wsrep_provider_options = 'pc.ignore_sb=true'; --source include/kill_galera.inc --connection node_1 ---sleep 2 +--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size' +--source include/wait_condition.inc + CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); DROP TABLE t1; @@ -38,10 +40,8 @@ SET GLOBAL wsrep_cluster_address = ''; --connection node_2 --source include/start_mysqld.inc ---source include/wait_until_connected_again.inc # Restore original auto_increment_offset values. --source include/auto_increment_offset_restore.inc --source include/galera_end.inc - diff --git a/mysql-test/suite/galera/t/galera_var_auto_inc_control_on.test b/mysql-test/suite/galera/t/galera_var_auto_inc_control_on.test index 6abaecb144b..ee31be94edd 100644 --- a/mysql-test/suite/galera/t/galera_var_auto_inc_control_on.test +++ b/mysql-test/suite/galera/t/galera_var_auto_inc_control_on.test @@ -5,24 +5,21 @@ --source include/galera_cluster.inc --source include/have_innodb.inc -# Save original auto_increment_offset values. ---let $node_1=node_1 ---let $node_2=node_2 ---source include/auto_increment_offset_save.inc - --connection node_1 CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, node VARCHAR(10)) ENGINE=InnoDB; +--let $auto_increment_offset_node_1 = `SELECT @@global.auto_increment_offset` # auto_increment_increment is equal to the number of nodes -# auto_increment_offset is equal to the ID of the node - -SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; -SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_index'; SELECT @@auto_increment_increment = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'); -SELECT @@global.auto_increment_offset = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_index') + 1; + +--connection node_2 +--disable_query_log +--eval SELECT @@global.auto_increment_offset != $auto_increment_offset_node_1 AS auto_increment_offset_differ; +--enable_query_log # Expect no conflicts +--connection node_1 --send INSERT INTO t1 VALUES (DEFAULT, 'node1'); --connection node_2 @@ -34,12 +31,18 @@ SELECT @@global.auto_increment_offset = (SELECT VARIABLE_VALUE FROM INFORMATION_ --connection node_2 --reap -SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; -SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_index'; +--let $auto_increment_offset_node_2 = `SELECT @@global.auto_increment_offset` +# auto_increment_increment is equal to the number of nodes SELECT @@auto_increment_increment = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'); -SELECT @@global.auto_increment_offset = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_index') + 1; + +--connection node_1 +--disable_query_log +--eval SELECT @@global.auto_increment_offset != $auto_increment_offset_node_2 AS auto_increment_offset_differ; +--enable_query_log + # Expect no conflicts +--connection node_2 --send INSERT INTO t1 VALUES (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'), (DEFAULT, 'node2'); --connection node_1 diff --git a/mysql-test/suite/galera/t/galera_var_dirty_reads.test b/mysql-test/suite/galera/t/galera_var_dirty_reads.test index 138b7c1c703..3e2108868af 100644 --- a/mysql-test/suite/galera/t/galera_var_dirty_reads.test +++ b/mysql-test/suite/galera/t/galera_var_dirty_reads.test @@ -4,6 +4,7 @@ --source include/galera_cluster.inc --source include/have_innodb.inc +--source include/have_perfschema.inc # Save original auto_increment_offset values. --let $node_1=node_1 diff --git a/mysql-test/suite/galera/t/galera_var_retry_autocommit.test b/mysql-test/suite/galera/t/galera_var_retry_autocommit.test index bf4da3234c5..142f02546b4 100644 --- a/mysql-test/suite/galera/t/galera_var_retry_autocommit.test +++ b/mysql-test/suite/galera/t/galera_var_retry_autocommit.test @@ -1,98 +1,141 @@ # -# Test that the wsrep_retry_autocommit variable is respected. We use an INSERT that -# proceeds very slowly due to extra SLEEP() in a trigger +# Test that the wsrep_retry_autocommit variable is respected. # --source include/galera_cluster.inc --source include/have_innodb.inc +--source include/have_debug_sync.inc ---connection node_1 -CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB; -CREATE TRIGGER tr1 BEFORE INSERT ON t1 FOR EACH ROW SET NEW.f2 = SLEEP(5); +--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 # # With wsrep_retry_autocommit = 0, error is certain # --connection node_1 +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; + SET SESSION wsrep_retry_autocommit = 0; ---send INSERT INTO t1 (f1) VALUES (1),(2); +SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue'; +--send INSERT INTO t1 (f1) VALUES (2) + +--connection node_1a +SET DEBUG_SYNC = 'now WAIT_FOR before_rep'; --connection node_2 ---sleep 1 TRUNCATE TABLE t1; --connection node_1 --error ER_LOCK_DEADLOCK --reap +SELECT COUNT(*) = 0 FROM t1; + +SET DEBUG_SYNC = 'RESET'; +DROP TABLE t1; + # # With wsrep_retry_autocommit = 1, success against one TRUNCATE # --connection node_1 +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; + SET SESSION wsrep_retry_autocommit = 1; ---send INSERT INTO t1 (f1) VALUES (3),(4); +SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue'; +--send INSERT INTO t1 (f1) VALUES (2) + +--connection node_1a +SET DEBUG_SYNC = 'now WAIT_FOR before_rep'; --connection node_2 ---sleep 1 TRUNCATE TABLE t1; --connection node_1 ---error 0 --reap -SELECT * FROM test.t1; - -# -# With wsrep_retry_autocommit = 1, failure against multiple TRUNCATEs -# - ---connection node_2 -DELIMITER |; -CREATE PROCEDURE repeated_truncate () -BEGIN - DECLARE i INT; - DECLARE CONTINUE HANDLER FOR SQLEXCEPTION BEGIN END; - - SET i = 0; - WHILE i <= 1000 DO - TRUNCATE TABLE t1; - SET i = i + 1; - END WHILE; -END| -DELIMITER ;| - -# Begin streaming TRUNCATEs ---let $truncate_connection_id = `SELECT CONNECTION_ID()` ---send CALL repeated_truncate() - ---connection node_1 -SET SESSION wsrep_retry_autocommit = 1; ---sleep 1 ---error ER_LOCK_DEADLOCK -INSERT INTO t1 (f1) VALUES (5),(6); - -# -# With wsrep_retry_autocommit = 1024, success against multiple TRUNCATEs -# - ---connection node_1 -SET SESSION wsrep_retry_autocommit = 1024; ---send INSERT INTO t1 (f1) VALUES (7),(8); - ---sleep 6 - -# Once he stream of TRUNCATEs is complete ---connection node_2 ---reap - -# the INSERT will eventually be sucessfull ---connection node_1 ---error 0 ---reap - ---let $diff_servers = 1 2 ---source include/diff_servers.inc +SELECT COUNT(*) = 1 FROM t1; +SET DEBUG_SYNC = 'RESET'; +DROP TABLE t1; + + +# +# With wsrep_retry_autcommit = 1, failure against multiple TRUNCATEs +# + +--connection node_1 +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; + +SET SESSION wsrep_retry_autocommit = 1; +SET GLOBAL debug_dbug = '+d,sync.wsrep_retry_autocommit'; +SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue EXECUTE 2'; + +--send INSERT INTO t1 VALUES (2); + +--connection node_1a +SET DEBUG_SYNC = 'now WAIT_FOR before_rep'; + +--connection node_2 +TRUNCATE TABLE t1; + +--connection node_1a +SET DEBUG_SYNC = 'now WAIT_FOR wsrep_retry_autocommit_reached'; +SELECT COUNT(*) = 0 FROM t1; +SET DEBUG_SYNC = 'now SIGNAL wsrep_retry_autocommit_continue WAIT_FOR before_rep'; + +--connection node_2 +TRUNCATE TABLE t1; + +--connection node_1a +SELECT COUNT(*) = 0 FROM t1; + +--connection node_1 +--error ER_LOCK_DEADLOCK +--reap + +SET DEBUG_SYNC = 'RESET'; +SET GLOBAL debug_dbug = NULL; +DROP TABLE t1; + + +# +# With wsrep_retry_autocommit = 64, success against 64 TRUNCATEs +# + +--connection node_1 +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; + +SET SESSION wsrep_retry_autocommit = 64; +SET GLOBAL debug_dbug = '+d,sync.wsrep_retry_autocommit'; +SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue EXECUTE 64'; + +--send INSERT INTO t1 VALUES (2) + +--disable_query_log +--disable_result_log +--let $count = 64 +while ($count) +{ + --connection node_1a + SET DEBUG_SYNC = 'now WAIT_FOR before_rep'; + + --connection node_2 + TRUNCATE TABLE t1; + + --connection node_1a + SET DEBUG_SYNC = 'now WAIT_FOR wsrep_retry_autocommit_reached'; + SELECT COUNT(*) = 1 FROM t1; + SET DEBUG_SYNC = 'now SIGNAL wsrep_retry_autocommit_continue'; + + --dec $count +} +--enable_result_log +--enable_query_log + +--connection node_1 +--reap +SELECT COUNT(*) = 1 FROM t1; + +SET DEBUG_SYNC = 'RESET'; +SET GLOBAL debug_dbug = NULL; DROP TABLE t1; -DROP PROCEDURE repeated_truncate; diff --git a/mysql-test/suite/galera/t/pxc-421.test b/mysql-test/suite/galera/t/pxc-421.test index 7bb2354853b..33a2b157f18 100644 --- a/mysql-test/suite/galera/t/pxc-421.test +++ b/mysql-test/suite/galera/t/pxc-421.test @@ -36,9 +36,7 @@ INSERT INTO t1 VALUES (3); --eval SET GLOBAL wsrep_cluster_address = '$wsrep_cluster_address_orig'; --enable_query_log -set SESSION wsrep_sync_wait=0; --source include/wait_until_connected_again.inc ---source include/galera_wait_ready.inc INSERT INTO t1 VALUES (4); set GLOBAL wsrep_slave_threads=5; diff --git a/mysql-test/suite/gcol/r/innodb_virtual_basic.result b/mysql-test/suite/gcol/r/innodb_virtual_basic.result index 0c8880a4799..e886559a06c 100644 --- a/mysql-test/suite/gcol/r/innodb_virtual_basic.result +++ b/mysql-test/suite/gcol/r/innodb_virtual_basic.result @@ -895,8 +895,6 @@ DROP TABLE t; CREATE TABLE t(a TEXT CHARSET UTF8)ENGINE=INNODB; ALTER TABLE t ADD COLUMN b BLOB GENERATED ALWAYS AS (a) VIRTUAL ; ALTER TABLE t ADD FULLTEXT INDEX (a) ; -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID ALTER TABLE t ADD INDEX (b(1)) ; DROP TABLE t; CREATE TABLE t(a TEXT CHARSET UTF8, FULLTEXT INDEX(a))ENGINE=INNODB; diff --git a/mysql-test/suite/gcol/r/innodb_virtual_index.result b/mysql-test/suite/gcol/r/innodb_virtual_index.result index b1f7976c6c0..48efd4edeb1 100644 --- a/mysql-test/suite/gcol/r/innodb_virtual_index.result +++ b/mysql-test/suite/gcol/r/innodb_virtual_index.result @@ -119,8 +119,6 @@ FULLTEXT KEY `ftsic` (`c`,`b`) Warnings: Note 1831 Duplicate index `vbidxcol_2`. This is deprecated and will be disallowed in a future release ALTER TABLE ibstd_08 ADD COLUMN nc07006 BIGINT AUTO_INCREMENT NOT NULL , ADD KEY auto_nc07006(nc07006); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID DROP TABLE ibstd_08; # # Bug 22899305 - GCOLS: FAILING ASSERTION: !(COL->PRTYPE & 256) @@ -183,8 +181,6 @@ ALTER TABLE t1 ADD COLUMN col7a INT GENERATED ALWAYS AS (col5x % col6x) VIRTUAL, ADD FULLTEXT KEY ftidx ( col9 ), algorithm=inplace; ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: INPLACE ADD or DROP of virtual columns cannot be combined with other ALTER TABLE actions. Try ALGORITHM=COPY CREATE FULLTEXT INDEX idx ON t1(col9); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID ALTER TABLE t1 ADD COLUMN col7a INT GENERATED ALWAYS AS (col5x % col6x) VIRTUAL, ADD FULLTEXT KEY ftidx ( col9 ), algorithm=inplace; DROP TABLE t1; @@ -198,3 +194,16 @@ VIRTUAL, ADD UNIQUE index idx (col1), algorithm=inplace; ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: INPLACE ADD or DROP of virtual columns cannot be combined with other ALTER TABLE actions. Try ALGORITHM=COPY DROP TABLE t1; SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency; +# +# Bug 27122803 - BACKPORT FIX FOR BUG 25899959 TO MYSQL-5.7 +# +CREATE TABLE t1 (col1 int(10)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; +ALTER TABLE t1 ADD col2 char(21) AS (col1 * col1), ADD INDEX n (col2); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `col1` int(10) DEFAULT NULL, + `col2` char(21) GENERATED ALWAYS AS (`col1` * `col1`) VIRTUAL, + KEY `n` (`col2`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 +DROP TABLE t1; diff --git a/mysql-test/suite/gcol/t/innodb_virtual_index.test b/mysql-test/suite/gcol/t/innodb_virtual_index.test index 432faeb65ae..6604a6d94f4 100644 --- a/mysql-test/suite/gcol/t/innodb_virtual_index.test +++ b/mysql-test/suite/gcol/t/innodb_virtual_index.test @@ -224,3 +224,11 @@ VIRTUAL, ADD UNIQUE index idx (col1), algorithm=inplace; DROP TABLE t1; SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency; + +--echo # +--echo # Bug 27122803 - BACKPORT FIX FOR BUG 25899959 TO MYSQL-5.7 +--echo # +CREATE TABLE t1 (col1 int(10)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; +ALTER TABLE t1 ADD col2 char(21) AS (col1 * col1), ADD INDEX n (col2); +SHOW CREATE TABLE t1; +DROP TABLE t1; diff --git a/mysql-test/suite/handler/heap.result b/mysql-test/suite/handler/heap.result index fc42e43f710..9679b48a367 100644 --- a/mysql-test/suite/handler/heap.result +++ b/mysql-test/suite/handler/heap.result @@ -1738,7 +1738,7 @@ connection default; CREATE TABLE t1(a INT, b INT, KEY(a), KEY b using btree (b), KEY ab using btree(a, b)) engine=memory; INSERT INTO t1 VALUES (2, 20), (2,20), (1, 10), (4, 40), (3, 30), (5,50), (6,50); HANDLER t1 OPEN; -HANDLER t1 READ a>=(2) limit 3; +HANDLER t1 READ a=(2) limit 3; a b 2 20 2 20 @@ -1841,3 +1841,22 @@ ERROR HY000: Storage engine MEMORY of the table `test`.`t1` doesn't have this op HANDLER t1 CLOSE; DROP TABLE t1; End of 5.3 tests +# +# MDEV-15813 ASAN use-after-poison in hp_hashnr upon +# HANDLER READ on a versioned HEAP table +# +CREATE TABLE t1 (a INT NOT NULL, b INT NOT NULL, CONSTRAINT PRIMARY KEY (a, b), UNIQUE ba(b, a) USING HASH) ENGINE=HEAP; +INSERT INTO t1 VALUES (1, 10), (2, 20), (3,30), (4,40); +HANDLER t1 OPEN AS m; +HANDLER m READ `PRIMARY`= (3,30); +a b +3 30 +HANDLER m READ `PRIMARY`> (3,30); +ERROR HY000: HASH index `PRIMARY` does not support this operation +HANDLER m READ `ba`= (30,3); +a b +3 30 +HANDLER m READ `ba`= (30); +ERROR HY000: HASH index `ba` does not support this operation +HANDLER m CLOSE; +DROP TABLE t1; diff --git a/mysql-test/suite/handler/heap.test b/mysql-test/suite/handler/heap.test index bc070cf743f..d60f92daa76 100644 --- a/mysql-test/suite/handler/heap.test +++ b/mysql-test/suite/handler/heap.test @@ -18,7 +18,7 @@ CREATE TABLE t1(a INT, b INT, KEY(a), KEY b using btree (b), KEY ab using btree( INSERT INTO t1 VALUES (2, 20), (2,20), (1, 10), (4, 40), (3, 30), (5,50), (6,50); HANDLER t1 OPEN; -HANDLER t1 READ a>=(2) limit 3; +HANDLER t1 READ a=(2) limit 3; HANDLER t1 READ a PREV; HANDLER t1 READ a PREV; HANDLER t1 READ a PREV; @@ -85,3 +85,21 @@ HANDLER t1 CLOSE; DROP TABLE t1; --echo End of 5.3 tests + +--echo # +--echo # MDEV-15813 ASAN use-after-poison in hp_hashnr upon +--echo # HANDLER READ on a versioned HEAP table +--echo # + +CREATE TABLE t1 (a INT NOT NULL, b INT NOT NULL, CONSTRAINT PRIMARY KEY (a, b), UNIQUE ba(b, a) USING HASH) ENGINE=HEAP; +INSERT INTO t1 VALUES (1, 10), (2, 20), (3,30), (4,40); +HANDLER t1 OPEN AS m; +HANDLER m READ `PRIMARY`= (3,30); +--error ER_KEY_DOESNT_SUPPORT +HANDLER m READ `PRIMARY`> (3,30); +HANDLER m READ `ba`= (30,3); +--error ER_KEY_DOESNT_SUPPORT +HANDLER m READ `ba`= (30); +HANDLER m CLOSE; +DROP TABLE t1; + diff --git a/mysql-test/suite/handler/innodb.test b/mysql-test/suite/handler/innodb.test index d752da7dc31..6527c4bb8bb 100644 --- a/mysql-test/suite/handler/innodb.test +++ b/mysql-test/suite/handler/innodb.test @@ -9,11 +9,6 @@ # rename t/innodb_handler.test to t/handler_innodb.test # -if (`select plugin_auth_version < "5.6.15" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB before 5.6.15 -} - --source include/have_innodb.inc let $engine_type= InnoDB; diff --git a/mysql-test/suite/handler/interface.result b/mysql-test/suite/handler/interface.result index a4ac32c16b4..c4a169be185 100644 --- a/mysql-test/suite/handler/interface.result +++ b/mysql-test/suite/handler/interface.result @@ -312,3 +312,25 @@ Note 1050 Table 'v' already exists handler v read next; ERROR 42S02: Unknown table 'v' in HANDLER drop view v; +# +# MDEV-15813 ASAN use-after-poison in hp_hashnr upon +# HANDLER READ on a versioned HEAP table +# +CREATE TABLE t1 (g GEOMETRY NOT NULL, SPATIAL gi(g)); +INSERT INTO t1 VALUES (POINT(0,0)); +HANDLER t1 OPEN AS h; +HANDLER h READ `gi`= (10); +ERROR HY000: SPATIAL index `gi` does not support this operation +HANDLER h READ `gi`> (10); +ERROR HY000: SPATIAL index `gi` does not support this operation +HANDLER h CLOSE; +DROP TABLE t1; +CREATE TABLE t1 (w VARCHAR(100), FULLTEXT fk(w)); +INSERT INTO t1 VALUES ('one two three'); +HANDLER t1 OPEN AS h; +HANDLER h READ `fk`= (10); +ERROR HY000: FULLTEXT index `fk` does not support this operation +HANDLER h READ `fk`> (10); +ERROR HY000: FULLTEXT index `fk` does not support this operation +HANDLER h CLOSE; +DROP TABLE t1; diff --git a/mysql-test/suite/handler/interface.test b/mysql-test/suite/handler/interface.test index 2f576c9b291..15853dfdbf5 100644 --- a/mysql-test/suite/handler/interface.test +++ b/mysql-test/suite/handler/interface.test @@ -354,3 +354,28 @@ execute stmt; --error ER_UNKNOWN_TABLE handler v read next; drop view v; + +--echo # +--echo # MDEV-15813 ASAN use-after-poison in hp_hashnr upon +--echo # HANDLER READ on a versioned HEAP table +--echo # + +CREATE TABLE t1 (g GEOMETRY NOT NULL, SPATIAL gi(g)); +INSERT INTO t1 VALUES (POINT(0,0)); +HANDLER t1 OPEN AS h; +--error ER_KEY_DOESNT_SUPPORT +HANDLER h READ `gi`= (10); +--error ER_KEY_DOESNT_SUPPORT +HANDLER h READ `gi`> (10); +HANDLER h CLOSE; +DROP TABLE t1; + +CREATE TABLE t1 (w VARCHAR(100), FULLTEXT fk(w)); +INSERT INTO t1 VALUES ('one two three'); +HANDLER t1 OPEN AS h; +--error ER_KEY_DOESNT_SUPPORT +HANDLER h READ `fk`= (10); +--error ER_KEY_DOESNT_SUPPORT +HANDLER h READ `fk`> (10); +HANDLER h CLOSE; +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/include/alter_instant.inc b/mysql-test/suite/innodb/include/alter_instant.inc new file mode 100644 index 00000000000..cf0c082416b --- /dev/null +++ b/mysql-test/suite/innodb/include/alter_instant.inc @@ -0,0 +1,33 @@ +CREATE TABLE t1(f1 INT NOT NULL, + f2 INT NOT NULL, + f3 INT AS (f2 * f2) VIRTUAL)engine=innodb; + +INSERT INTO t1(f1, f2) VALUES(1, 1); + +--echo # +--echo # ALGORITHM=$algorithm_type +--echo # + +--enable_info +--echo # Add column at the end of the table +--error $error_code +--eval ALTER TABLE t1 ADD COLUMN f4 char(100) default "BIG WALL", ALGORITHM=$algorithm_type + +--echo # Change virtual column expression +--error $error_code +--eval ALTER TABLE t1 CHANGE f3 f3 INT AS (f2 * f2) VIRTUAL, ALGORITHM=$algorithm_type + +--echo # Add virtual column +--error $error_code +--eval ALTER TABLE t1 ADD COLUMN f5 INT AS (f2) VIRTUAL, ALGORITHM=$algorithm_type + +--echo # Rename Column +--error $error_code +--eval ALTER TABLE t1 CHANGE f3 vcol INT AS (f2) VIRTUAL, ALGORITHM=$algorithm_type + +--echo # Rename table +--error $error_code +--eval ALTER TABLE t1 RENAME t2, algorithm=$algorithm_type + +DROP TABLE t2; +--disable_info diff --git a/mysql-test/suite/innodb/include/alter_nocopy.inc b/mysql-test/suite/innodb/include/alter_nocopy.inc new file mode 100644 index 00000000000..6b19d244bd9 --- /dev/null +++ b/mysql-test/suite/innodb/include/alter_nocopy.inc @@ -0,0 +1,33 @@ +CREATE TABLE t1(f1 INT PRIMARY KEY, f2 INT NOT NULL, + f3 INT AS (f2 * f2) VIRTUAL, + f4 INT NOT NULL UNIQUE, + f5 INT NOT NULL, + INDEX`idx`(f2))ENGINE=INNODB; + +CREATE TABLE t2(f1 INT NOT NULL, f2 INT NOT NULL, + INDEX(f1), + FOREIGN KEY `fidx` (f1) REFERENCES t1(f1))ENGINE=INNODB; + +INSERT INTO t1(f1, f2, f4, f5) VALUES(1, 2, 3, 4); + +SELECT @@alter_algorithm; + +--enable_info +--error $error_code +--eval ALTER TABLE t1 ADD INDEX idx1(f4) + +--error $error_code +--eval ALTER TABLE t1 DROP INDEX idx + +--error $error_code +--eval ALTER TABLE t1 ADD UNIQUE INDEX u1(f2) + +--error $error_code +--eval ALTER TABLE t1 DROP INDEX f4 + +SET foreign_key_checks = 0; +--error $error_code +--eval ALTER TABLE t1 ADD FOREIGN KEY(f5) REFERENCES t2(f1) + +DROP TABLE t2, t1; +--disable_info diff --git a/mysql-test/suite/innodb/include/alter_nocopy_fail.inc b/mysql-test/suite/innodb/include/alter_nocopy_fail.inc new file mode 100644 index 00000000000..a075cf96e3c --- /dev/null +++ b/mysql-test/suite/innodb/include/alter_nocopy_fail.inc @@ -0,0 +1,51 @@ +CREATE TABLE t1(f1 INT NOT NULL, + f2 INT NOT NULL, + f3 INT NULL, + f4 INT as (f2) STORED, + f5 INT as (f3) STORED, + PRIMARY KEY(f1))ROW_FORMAT=COMPRESSED, ENGINE=INNODB; +INSERT INTO t1(f1, f2, f3) VALUES(1, 1, 1); + +SELECT @@alter_algorithm; + +--enable_info +--echo # All the following cases needs table rebuild + +--echo # Add and Drop primary key +--error $error_code +--eval ALTER TABLE t1 ADD COLUMN col1 INT NOT NULL,DROP PRIMARY KEY,ADD PRIMARY KEY(col1) + +--echo # Make existing column NULLABLE +--error $error_code +--eval ALTER TABLE t1 MODIFY f2 INT + +--echo # Make existing column NON-NULLABLE +--error $error_code +--eval ALTER TABLE t1 MODIFY f3 INT NOT NULL + +--echo # Drop Stored Column +--error $error_code +--eval ALTER TABLE t1 DROP COLUMN f5 + +--echo # Add base non-generated column as a last column in the compressed table +--error $error_code +--eval ALTER TABLE t1 ADD COLUMN f6 INT NOT NULL + +--echo # Add base non-generated column but not in the last position +--error $error_code +--eval ALTER TABLE t1 ADD COLUMN f7 INT NOT NULL after f3 + +--echo # Force the table to rebuild +--error $error_code +--eval ALTER TABLE t1 FORCE + +--echo # Row format changes +--error $error_code +--eval ALTER TABLE t1 ROW_FORMAT=COMPRESSED + +--echo # Engine table +--error $error_code +--eval ALTER TABLE t1 ENGINE=INNODB + +DROP TABLE t1; +--disable_info diff --git a/mysql-test/suite/innodb/r/alter_algorithm,COPY.rdiff b/mysql-test/suite/innodb/r/alter_algorithm,COPY.rdiff new file mode 100644 index 00000000000..be71e125e22 --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_algorithm,COPY.rdiff @@ -0,0 +1,92 @@ +--- alter_algorithm.result 2018-05-06 23:42:08.022302601 +0530 ++++ alter_algorithm.reject 2018-05-06 23:42:16.382634082 +0530 +@@ -7,35 +7,44 @@ + INSERT INTO t1(f1, f2, f3) VALUES(1, 1, 1); + SELECT @@alter_algorithm; + @@alter_algorithm +-NOCOPY ++COPY + # All the following cases needs table rebuild + # Add and Drop primary key + ALTER TABLE t1 ADD COLUMN col1 INT NOT NULL,DROP PRIMARY KEY,ADD PRIMARY KEY(col1); +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + # Make existing column NULLABLE + ALTER TABLE t1 MODIFY f2 INT; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + # Make existing column NON-NULLABLE + ALTER TABLE t1 MODIFY f3 INT NOT NULL; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + # Drop Stored Column + ALTER TABLE t1 DROP COLUMN f5; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + # Add base non-generated column as a last column in the compressed table + ALTER TABLE t1 ADD COLUMN f6 INT NOT NULL; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + # Add base non-generated column but not in the last position + ALTER TABLE t1 ADD COLUMN f7 INT NOT NULL after f3; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + # Force the table to rebuild + ALTER TABLE t1 FORCE; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + # Row format changes + ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + # Engine table + ALTER TABLE t1 ENGINE=INNODB; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + DROP TABLE t1; + affected rows: 0 + CREATE TABLE t1(f1 INT PRIMARY KEY, f2 INT NOT NULL, +@@ -49,23 +58,23 @@ + INSERT INTO t1(f1, f2, f4, f5) VALUES(1, 2, 3, 4); + SELECT @@alter_algorithm; + @@alter_algorithm +-NOCOPY ++COPY + ALTER TABLE t1 ADD INDEX idx1(f4); +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + ALTER TABLE t1 DROP INDEX idx; +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + ALTER TABLE t1 ADD UNIQUE INDEX u1(f2); +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + ALTER TABLE t1 DROP INDEX f4; +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + SET foreign_key_checks = 0; + affected rows: 0 + ALTER TABLE t1 ADD FOREIGN KEY(f5) REFERENCES t2(f1); +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + DROP TABLE t2, t1; + affected rows: 0 diff --git a/mysql-test/suite/innodb/r/alter_algorithm,INPLACE.rdiff b/mysql-test/suite/innodb/r/alter_algorithm,INPLACE.rdiff new file mode 100644 index 00000000000..71891bbf473 --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_algorithm,INPLACE.rdiff @@ -0,0 +1,66 @@ +--- alter_algorithm.result 2018-05-06 23:42:08.022302601 +0530 ++++ alter_algorithm.reject 2018-05-06 23:45:23.813346814 +0530 +@@ -7,35 +7,44 @@ + INSERT INTO t1(f1, f2, f3) VALUES(1, 1, 1); + SELECT @@alter_algorithm; + @@alter_algorithm +-NOCOPY ++INPLACE + # All the following cases needs table rebuild + # Add and Drop primary key + ALTER TABLE t1 ADD COLUMN col1 INT NOT NULL,DROP PRIMARY KEY,ADD PRIMARY KEY(col1); +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 0 ++info: Records: 0 Duplicates: 0 Warnings: 0 + # Make existing column NULLABLE + ALTER TABLE t1 MODIFY f2 INT; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 0 ++info: Records: 0 Duplicates: 0 Warnings: 0 + # Make existing column NON-NULLABLE + ALTER TABLE t1 MODIFY f3 INT NOT NULL; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 0 ++info: Records: 0 Duplicates: 0 Warnings: 0 + # Drop Stored Column + ALTER TABLE t1 DROP COLUMN f5; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 0 ++info: Records: 0 Duplicates: 0 Warnings: 0 + # Add base non-generated column as a last column in the compressed table + ALTER TABLE t1 ADD COLUMN f6 INT NOT NULL; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 0 ++info: Records: 0 Duplicates: 0 Warnings: 0 + # Add base non-generated column but not in the last position + ALTER TABLE t1 ADD COLUMN f7 INT NOT NULL after f3; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 0 ++info: Records: 0 Duplicates: 0 Warnings: 0 + # Force the table to rebuild + ALTER TABLE t1 FORCE; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 0 ++info: Records: 0 Duplicates: 0 Warnings: 0 + # Row format changes + ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 0 ++info: Records: 0 Duplicates: 0 Warnings: 0 + # Engine table + ALTER TABLE t1 ENGINE=INNODB; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++affected rows: 0 ++info: Records: 0 Duplicates: 0 Warnings: 0 + DROP TABLE t1; + affected rows: 0 + CREATE TABLE t1(f1 INT PRIMARY KEY, f2 INT NOT NULL, +@@ -49,7 +58,7 @@ + INSERT INTO t1(f1, f2, f4, f5) VALUES(1, 2, 3, 4); + SELECT @@alter_algorithm; + @@alter_algorithm +-NOCOPY ++INPLACE + ALTER TABLE t1 ADD INDEX idx1(f4); + affected rows: 0 + info: Records: 0 Duplicates: 0 Warnings: 0 diff --git a/mysql-test/suite/innodb/r/alter_algorithm,INSTANT.rdiff b/mysql-test/suite/innodb/r/alter_algorithm,INSTANT.rdiff new file mode 100644 index 00000000000..6e12b78fb9d --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_algorithm,INSTANT.rdiff @@ -0,0 +1,78 @@ +--- alter_algorithm.result 2018-05-06 23:42:08.022302601 +0530 ++++ alter_algorithm.reject 2018-05-06 23:46:08.482772800 +0530 +@@ -7,35 +7,35 @@ + INSERT INTO t1(f1, f2, f3) VALUES(1, 1, 1); + SELECT @@alter_algorithm; + @@alter_algorithm +-NOCOPY ++INSTANT + # All the following cases needs table rebuild + # Add and Drop primary key + ALTER TABLE t1 ADD COLUMN col1 INT NOT NULL,DROP PRIMARY KEY,ADD PRIMARY KEY(col1); +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++Got one of the listed errors + # Make existing column NULLABLE + ALTER TABLE t1 MODIFY f2 INT; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++Got one of the listed errors + # Make existing column NON-NULLABLE + ALTER TABLE t1 MODIFY f3 INT NOT NULL; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++Got one of the listed errors + # Drop Stored Column + ALTER TABLE t1 DROP COLUMN f5; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++Got one of the listed errors + # Add base non-generated column as a last column in the compressed table + ALTER TABLE t1 ADD COLUMN f6 INT NOT NULL; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++Got one of the listed errors + # Add base non-generated column but not in the last position + ALTER TABLE t1 ADD COLUMN f7 INT NOT NULL after f3; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++Got one of the listed errors + # Force the table to rebuild + ALTER TABLE t1 FORCE; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++Got one of the listed errors + # Row format changes + ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++Got one of the listed errors + # Engine table + ALTER TABLE t1 ENGINE=INNODB; +-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE ++Got one of the listed errors + DROP TABLE t1; + affected rows: 0 + CREATE TABLE t1(f1 INT PRIMARY KEY, f2 INT NOT NULL, +@@ -49,23 +49,18 @@ + INSERT INTO t1(f1, f2, f4, f5) VALUES(1, 2, 3, 4); + SELECT @@alter_algorithm; + @@alter_algorithm +-NOCOPY ++INSTANT + ALTER TABLE t1 ADD INDEX idx1(f4); +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: ADD INDEX. Try ALGORITHM=NOCOPY + ALTER TABLE t1 DROP INDEX idx; +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: DROP INDEX. Try ALGORITHM=NOCOPY + ALTER TABLE t1 ADD UNIQUE INDEX u1(f2); +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: ADD INDEX. Try ALGORITHM=NOCOPY + ALTER TABLE t1 DROP INDEX f4; +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: DROP INDEX. Try ALGORITHM=NOCOPY + SET foreign_key_checks = 0; + affected rows: 0 + ALTER TABLE t1 ADD FOREIGN KEY(f5) REFERENCES t2(f1); +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: ADD INDEX. Try ALGORITHM=NOCOPY + DROP TABLE t2, t1; + affected rows: 0 diff --git a/mysql-test/suite/innodb/r/alter_algorithm.result b/mysql-test/suite/innodb/r/alter_algorithm.result new file mode 100644 index 00000000000..ee91159bf7a --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_algorithm.result @@ -0,0 +1,71 @@ +CREATE TABLE t1(f1 INT NOT NULL, +f2 INT NOT NULL, +f3 INT NULL, +f4 INT as (f2) STORED, +f5 INT as (f3) STORED, +PRIMARY KEY(f1))ROW_FORMAT=COMPRESSED, ENGINE=INNODB; +INSERT INTO t1(f1, f2, f3) VALUES(1, 1, 1); +SELECT @@alter_algorithm; +@@alter_algorithm +NOCOPY +# All the following cases needs table rebuild +# Add and Drop primary key +ALTER TABLE t1 ADD COLUMN col1 INT NOT NULL,DROP PRIMARY KEY,ADD PRIMARY KEY(col1); +ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE +# Make existing column NULLABLE +ALTER TABLE t1 MODIFY f2 INT; +ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE +# Make existing column NON-NULLABLE +ALTER TABLE t1 MODIFY f3 INT NOT NULL; +ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE +# Drop Stored Column +ALTER TABLE t1 DROP COLUMN f5; +ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE +# Add base non-generated column as a last column in the compressed table +ALTER TABLE t1 ADD COLUMN f6 INT NOT NULL; +ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE +# Add base non-generated column but not in the last position +ALTER TABLE t1 ADD COLUMN f7 INT NOT NULL after f3; +ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE +# Force the table to rebuild +ALTER TABLE t1 FORCE; +ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE +# Row format changes +ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE +# Engine table +ALTER TABLE t1 ENGINE=INNODB; +ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE +DROP TABLE t1; +affected rows: 0 +CREATE TABLE t1(f1 INT PRIMARY KEY, f2 INT NOT NULL, +f3 INT AS (f2 * f2) VIRTUAL, +f4 INT NOT NULL UNIQUE, +f5 INT NOT NULL, +INDEX`idx`(f2))ENGINE=INNODB; +CREATE TABLE t2(f1 INT NOT NULL, f2 INT NOT NULL, +INDEX(f1), +FOREIGN KEY `fidx` (f1) REFERENCES t1(f1))ENGINE=INNODB; +INSERT INTO t1(f1, f2, f4, f5) VALUES(1, 2, 3, 4); +SELECT @@alter_algorithm; +@@alter_algorithm +NOCOPY +ALTER TABLE t1 ADD INDEX idx1(f4); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE t1 DROP INDEX idx; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE t1 ADD UNIQUE INDEX u1(f2); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE t1 DROP INDEX f4; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +SET foreign_key_checks = 0; +affected rows: 0 +ALTER TABLE t1 ADD FOREIGN KEY(f5) REFERENCES t2(f1); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +DROP TABLE t2, t1; +affected rows: 0 diff --git a/mysql-test/suite/innodb/r/alter_copy.result b/mysql-test/suite/innodb/r/alter_copy.result index 286c5152ded..ac25c6c6c18 100644 --- a/mysql-test/suite/innodb/r/alter_copy.result +++ b/mysql-test/suite/innodb/r/alter_copy.result @@ -73,6 +73,7 @@ FTSBEING_DELETED_CACHE.ibd FTSCONFIG.ibd FTSDELETED.ibd FTSDELETED_CACHE.ibd +db.opt t.frm t.ibd t1.frm @@ -140,6 +141,7 @@ FTSBEING_DELETED_CACHE.ibd FTSCONFIG.ibd FTSDELETED.ibd FTSDELETED_CACHE.ibd +db.opt t.frm t.ibd t1.frm @@ -206,6 +208,7 @@ FTSBEING_DELETED_CACHE.ibd FTSCONFIG.ibd FTSDELETED.ibd FTSDELETED_CACHE.ibd +db.opt t.frm t.ibd t1.frm diff --git a/mysql-test/suite/innodb/r/alter_crash.result b/mysql-test/suite/innodb/r/alter_crash.result index 5bf25cf8592..3c3aaa68b6a 100644 --- a/mysql-test/suite/innodb/r/alter_crash.result +++ b/mysql-test/suite/innodb/r/alter_crash.result @@ -48,6 +48,7 @@ SELECT * FROM information_schema.innodb_sys_tables WHERE table_id = ID; TABLE_ID NAME FLAG N_COLS SPACE ROW_FORMAT ZIP_PAGE_SIZE SPACE_TYPE # Files in datadir after manual recovery. +db.opt t1.frm t1.ibd SHOW TABLES; @@ -105,6 +106,7 @@ DROP TABLE t2; CREATE TABLE t2 (f1 INT NOT NULL, f2 INT NOT NULL) ENGINE=InnoDB; ALTER TABLE t2 ADD PRIMARY KEY (f2, f1); DROP TABLE t2; +db.opt # ------------------------- # End of Testing Scenario 2 # ------------------------- @@ -123,6 +125,7 @@ SELECT * FROM information_schema.innodb_sys_tables WHERE table_id = ID; TABLE_ID NAME FLAG N_COLS SPACE ROW_FORMAT ZIP_PAGE_SIZE SPACE_TYPE # Files in datadir after manual recovery. +db.opt t1.frm t1.ibd SHOW TABLES; diff --git a/mysql-test/suite/innodb/r/alter_foreign_crash.result b/mysql-test/suite/innodb/r/alter_foreign_crash.result new file mode 100644 index 00000000000..66ffb5f5411 --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_foreign_crash.result @@ -0,0 +1,26 @@ +# +# Bug #20476395 DICT_LOAD_FOREIGNS() FAILED IN +# COMMIT_INPLACE_ALTER_TABLE +# +call mtr.add_suppression("InnoDB: Failed to load table"); +create database bug; +use bug; +create table parent(a serial) engine=innodb; +create table child(a serial, foreign key fk (a) references parent(a))engine=innodb; +insert into parent values(1); +insert into child values(1); +connect con1,localhost,root,,bug; +SET DEBUG_SYNC='innodb_rename_table_ready SIGNAL s1 WAIT_FOR s2 EXECUTE 2'; +ALTER TABLE child ROW_FORMAT=DYNAMIC, ALGORITHM=COPY; +connection default; +SET DEBUG_SYNC='now WAIT_FOR s1'; +SET DEBUG_SYNC='now SIGNAL s2 WAIT_FOR s1'; +disconnect con1; +show tables; +Tables_in_bug +parent +alter table parent row_format=dynamic; +Warnings: +Warning 1088 InnoDB: Could not add foreign key constraints. +drop table parent; +drop database bug; diff --git a/mysql-test/suite/innodb/r/alter_instant,COPY.rdiff b/mysql-test/suite/innodb/r/alter_instant,COPY.rdiff new file mode 100644 index 00000000000..cb4a72614b9 --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_instant,COPY.rdiff @@ -0,0 +1,61 @@ +--- alter_instant.result 2018-04-10 11:19:46.299868465 +0530 ++++ alter_instant.reject 2018-04-10 11:21:19.648918489 +0530 +@@ -8,30 +8,30 @@ + INSERT INTO t1(f1, f2) VALUES(1, 1); + select @@alter_algorithm; + @@alter_algorithm +-NOCOPY ++COPY + # Add column at the end of the table + ALTER TABLE t1 ADD COLUMN f4 char(100) default "BIG WALL"; +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + # Change virtual column expression + ALTER TABLE t1 CHANGE f3 f3 INT AS (f2 * f2) VIRTUAL; +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + # Add virtual column + ALTER TABLE t1 ADD COLUMN f5 INT AS (f2) VIRTUAL; +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + # Rename Column + ALTER TABLE t1 CHANGE f3 vcol INT AS (f2) VIRTUAL; +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + # Rename table + ALTER TABLE t1 RENAME t3; + affected rows: 0 + # Drop Virtual Column + ALTER TABLE t3 DROP COLUMN vcol; +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + # Column length varies + ALTER TABLE t2 CHANGE f3 f3 VARCHAR(20); + affected rows: 0 +@@ -39,12 +39,12 @@ + SET foreign_key_checks = 0; + affected rows: 0 + ALTER TABLE t3 ADD FOREIGN KEY `fidx`(f2) REFERENCES t2(f1); +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + SET foreign_key_checks = 1; + affected rows: 0 + ALTER TABLE t3 DROP FOREIGN KEY `fidx`; +-affected rows: 0 +-info: Records: 0 Duplicates: 0 Warnings: 0 ++affected rows: 1 ++info: Records: 1 Duplicates: 0 Warnings: 0 + DROP TABLE t3, t2; + affected rows: 0 diff --git a/mysql-test/suite/innodb/r/alter_instant,INPLACE.rdiff b/mysql-test/suite/innodb/r/alter_instant,INPLACE.rdiff new file mode 100644 index 00000000000..ec80e1d8ef0 --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_instant,INPLACE.rdiff @@ -0,0 +1,11 @@ +--- alter_instant.result 2018-04-10 11:19:46.299868465 +0530 ++++ alter_instant.reject 2018-04-10 11:22:19.433617807 +0530 +@@ -8,7 +8,7 @@ + INSERT INTO t1(f1, f2) VALUES(1, 1); + select @@alter_algorithm; + @@alter_algorithm +-NOCOPY ++INPLACE + # Add column at the end of the table + ALTER TABLE t1 ADD COLUMN f4 char(100) default "BIG WALL"; + affected rows: 0 diff --git a/mysql-test/suite/innodb/r/alter_instant,INSTANT.rdiff b/mysql-test/suite/innodb/r/alter_instant,INSTANT.rdiff new file mode 100644 index 00000000000..cf2f8a2d719 --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_instant,INSTANT.rdiff @@ -0,0 +1,11 @@ +--- alter_instant.result 2018-04-10 11:19:46.299868465 +0530 ++++ alter_instant.reject 2018-04-10 11:22:47.281949905 +0530 +@@ -8,7 +8,7 @@ + INSERT INTO t1(f1, f2) VALUES(1, 1); + select @@alter_algorithm; + @@alter_algorithm +-NOCOPY ++INSTANT + # Add column at the end of the table + ALTER TABLE t1 ADD COLUMN f4 char(100) default "BIG WALL"; + affected rows: 0 diff --git a/mysql-test/suite/innodb/r/alter_instant.result b/mysql-test/suite/innodb/r/alter_instant.result new file mode 100644 index 00000000000..ec64e41cd01 --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_instant.result @@ -0,0 +1,50 @@ +CREATE TABLE t1(f1 INT NOT NULL, +f2 INT NOT NULL, +f3 INT AS (f2 * f2) VIRTUAL, +INDEX idx (f2))engine=innodb; +CREATE TABLE t2(f1 INT NOT NULL, f2 INT NOT NULL, +f3 VARCHAR(10), +INDEX(f1))ENGINE=INNODB; +INSERT INTO t1(f1, f2) VALUES(1, 1); +select @@alter_algorithm; +@@alter_algorithm +NOCOPY +# Add column at the end of the table +ALTER TABLE t1 ADD COLUMN f4 char(100) default "BIG WALL"; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +# Change virtual column expression +ALTER TABLE t1 CHANGE f3 f3 INT AS (f2 * f2) VIRTUAL; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +# Add virtual column +ALTER TABLE t1 ADD COLUMN f5 INT AS (f2) VIRTUAL; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +# Rename Column +ALTER TABLE t1 CHANGE f3 vcol INT AS (f2) VIRTUAL; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +# Rename table +ALTER TABLE t1 RENAME t3; +affected rows: 0 +# Drop Virtual Column +ALTER TABLE t3 DROP COLUMN vcol; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +# Column length varies +ALTER TABLE t2 CHANGE f3 f3 VARCHAR(20); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +SET foreign_key_checks = 0; +affected rows: 0 +ALTER TABLE t3 ADD FOREIGN KEY `fidx`(f2) REFERENCES t2(f1); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +SET foreign_key_checks = 1; +affected rows: 0 +ALTER TABLE t3 DROP FOREIGN KEY `fidx`; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +DROP TABLE t3, t2; +affected rows: 0 diff --git a/mysql-test/suite/innodb/r/alter_kill.result b/mysql-test/suite/innodb/r/alter_kill.result new file mode 100644 index 00000000000..9b24fddf9ef --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_kill.result @@ -0,0 +1,78 @@ +# +# Bug#16720368 INNODB CRASHES ON BROKEN #SQL*.IBD FILE AT STARTUP +# +SET GLOBAL innodb_file_per_table=1; +CREATE TABLE bug16720368_1 (a INT PRIMARY KEY) ENGINE=InnoDB; +connect con1,localhost,root; +CREATE TABLE bug16720368 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO bug16720368 (a) VALUES (1),(2),(3),(4),(5),(6),(7),(8); +connection default; +# Cleanly shutdown mysqld +disconnect con1; +# Corrupt FIL_PAGE_OFFSET in bug16720368.ibd, +# and update the checksum to the "don't care" value. +# Restart mysqld +# This will succeed after a clean shutdown, due to +# fil_open_single_table_tablespace(check_space_id=FALSE). +SELECT COUNT(*) FROM bug16720368; +COUNT(*) +8 +INSERT INTO bug16720368_1 VALUES(1); +# The table is unaccessible, because after a crash we will +# validate the tablespace header. +SELECT COUNT(*) FROM bug16720368; +ERROR 42S02: Table 'test.bug16720368' doesn't exist in engine +INSERT INTO bug16720368 VALUES(0,1); +ERROR 42S02: Table 'test.bug16720368' doesn't exist in engine +# The table is readable thanks to innodb-force-recovery. +SELECT COUNT(*) FROM bug16720368; +COUNT(*) +8 +INSERT INTO bug16720368 VALUES(0,1); +# Shut down the server cleanly to hide the corruption. +# The table is accessible, because after a clean shutdown we will +# NOT validate the tablespace header. +# We can modify the existing pages, but we cannot allocate or free +# any pages, because that would hit the corruption on page 0. +SELECT COUNT(*) FROM bug16720368; +COUNT(*) +9 +# Shut down the server to uncorrupt the data. +# Restart the server after uncorrupting the file. +INSERT INTO bug16720368 VALUES(9,1); +SELECT COUNT(*) FROM bug16720368; +COUNT(*) +10 +DROP TABLE bug16720368, bug16720368_1; +# +# Bug#16735660 ASSERT TABLE2 == NULL, ROLLBACK OF RESURRECTED TXNS, +# DICT_TABLE_ADD_TO_CACHE +# +SET GLOBAL innodb_file_per_table=1; +CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +BEGIN; +INSERT INTO t1 VALUES(42); +connect con1,localhost,root; +CREATE TABLE bug16735660 (a INT PRIMARY KEY) ENGINE=InnoDB; +XA START 'x'; +INSERT INTO bug16735660 VALUES(1),(2),(3); +XA END 'x'; +XA PREPARE 'x'; +connection default; +# Kill the server +disconnect con1; +# Attempt to start without an *.ibd file. +FOUND 1 /\[ERROR\] InnoDB: Tablespace [0-9]+ was not found at .*test.bug16735660.ibd/ in mysqld.1.err +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +SELECT * FROM bug16735660; +a +1 +2 +3 +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x'; +SELECT * FROM bug16735660; +a +DROP TABLE bug16735660; diff --git a/mysql-test/suite/innodb/r/alter_missing_tablespace.result b/mysql-test/suite/innodb/r/alter_missing_tablespace.result index 1517afd1a39..237d0df26ff 100644 --- a/mysql-test/suite/innodb/r/alter_missing_tablespace.result +++ b/mysql-test/suite/innodb/r/alter_missing_tablespace.result @@ -3,8 +3,10 @@ # OR DISCARDED TABLESPACES # SET GLOBAL innodb_file_per_table=1; -CREATE TABLE t(a INT)ENGINE=InnoDB; +CREATE TABLE t(a SERIAL)ENGINE=InnoDB; CREATE TABLE `x..d` (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +CREATE TABLE t1(a SERIAL)ENGINE=InnoDB; +INSERT INTO t1 VALUES(1),(2),(3); SELECT * FROM t; ERROR 42S02: Table 'test.t' doesn't exist in engine ALTER TABLE t ADD INDEX (a), ALGORITHM=INPLACE; @@ -13,11 +15,16 @@ SHOW WARNINGS; Level Code Message Warning 1812 Tablespace is missing for table 'test/t' Error 1932 Table 'test.t' doesn't exist in engine -ALTER TABLE t1 ADD INDEX (a), ALGORITHM=COPY; -ERROR 42S02: Table 'test.t1' doesn't exist +ALTER TABLE t ADD INDEX (a), ALGORITHM=COPY; +ERROR 42S02: Table 'test.t' doesn't exist in engine SHOW WARNINGS; Level Code Message -Error 1146 Table 'test.t1' doesn't exist +Warning 1812 Tablespace is missing for table 'test/t' +Error 1932 Table 'test.t' doesn't exist in engine +ALTER TABLE t AUTO_INCREMENT=1, ALGORITHM=INPLACE; +ERROR 42S02: Table 'test.t' doesn't exist in engine +ALTER TABLE t AUTO_INCREMENT=1, ALGORITHM=COPY; +ERROR 42S02: Table 'test.t' doesn't exist in engine ALTER TABLE t ALGORITHM=INPLACE, DISCARD TABLESPACE; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'DISCARD TABLESPACE' at line 1 ALTER TABLE t ALGORITHM=COPY, DISCARD TABLESPACE; @@ -32,3 +39,11 @@ DROP TABLE t; SELECT * FROM `x..d`; ERROR 42S02: Table 'test.x..d' doesn't exist in engine DROP TABLE `x..d`; +ALTER TABLE t1 DISCARD TABLESPACE; +ALTER TABLE t1 AUTO_INCREMENT=1, ALGORITHM=INPLACE; +ERROR HY000: Tablespace has been discarded for table `t1` +ALTER TABLE t1 AUTO_INCREMENT=1, FORCE, ALGORITHM=INPLACE; +ERROR HY000: Tablespace has been discarded for table `t1` +ALTER TABLE t1 AUTO_INCREMENT=1, ALGORITHM=COPY; +ERROR HY000: Tablespace has been discarded for table `t1` +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/alter_not_null.result b/mysql-test/suite/innodb/r/alter_not_null.result new file mode 100644 index 00000000000..8380378593d --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_not_null.result @@ -0,0 +1,88 @@ +set @@sql_mode = 'STRICT_TRANS_TABLES'; +CREATE TABLE t1(f1 INT)ENGINE=INNODB; +INSERT INTO t1 VALUES(NULL); +SELECT * FROM t1; +f1 +NULL +ALTER TABLE t1 CHANGE f1 f1 INT NOT NULL; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +SELECT * FROM t1; +f1 +0 +DROP TABLE t1; +CREATE TABLE t1(f1 CHAR(10))ENGINE=INNODB; +INSERT INTO t1 VALUES(NULL); +SELECT * FROM t1; +f1 +NULL +ALTER TABLE t1 CHANGE f1 f1 CHAR(10) NOT NULL; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +SELECT * FROM t1; +f1 + +DROP TABLE t1; +CREATE TABLE t1(f1 VARCHAR(10))ENGINE=INNODB; +INSERT INTO t1 VALUES(NULL); +SELECT * FROM t1; +f1 +NULL +ALTER TABLE t1 CHANGE f1 f1 VARCHAR(20) NOT NULL; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +SELECT * FROM t1; +f1 + +DROP TABLE t1; +CREATE TABLE t1(f1 TEXT)ENGINE=INNODB; +INSERT INTO t1 VALUES(NULL); +SELECT * FROM t1; +f1 +NULL +ALTER TABLE t1 CHANGE f1 f1 TEXT NOT NULL DEFAULT 'abc'; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +SELECT * FROM t1; +f1 +abc +DROP TABLE t1; +CREATE TABLE t1(f1 INT NOT NULL, f2 INT NOT NULL, f3 INT)ENGINE=INNODB; +INSERT INTO t1 VALUES(2, 2, NULL); +SELECT * FROM t1; +f1 f2 f3 +2 2 NULL +ALTER TABLE t1 CHANGE f3 f3 INT NOT NULL DEFAULT (f1 + f2), ALGORITHM=INPLACE; +ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: cannot convert NULL to non-constant DEFAULT. Try ALGORITHM=COPY +UPDATE t1 SET f3 = 0; +SELECT * FROM t1; +f1 f2 f3 +2 2 0 +ALTER TABLE t1 CHANGE f3 f3 INT NOT NULL DEFAULT (f1 + f2); +affected rows: 1 +info: Records: 1 Duplicates: 0 Warnings: 0 +SELECT * FROM t1; +f1 f2 f3 +2 2 0 +DROP TABLE t1; +CREATE TABLE t1(f1 INT NOT NULL DEFAULT 0, b TINYINT)ENGINE=InnoDB; +INSERT INTO t1 VALUES(10, NULL); +SELECT * FROM t1; +f1 b +10 NULL +ALTER TABLE t1 CHANGE b b TINYINT NOT NULL DEFAULT if(unix_timestamp()>1,1000,0), algorithm=INPLACE; +ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: cannot convert NULL to non-constant DEFAULT. Try ALGORITHM=COPY +DROP TABLE t1; +CREATE TABLE t1(a INT, v INT AS (a), c INT, d INT NOT NULL, e INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP COLUMN c, CHANGE COLUMN e e INT NOT NULL, ALGORITHM=INPLACE; +DROP TABLE t1; +CREATE TABLE t1 (a INT, v INT AS (a), d INT NOT NULL, e INT) ENGINE=InnoDB; +ALTER TABLE t1 FORCE, ALGORITHM=INPLACE; +DROP TABLE t1; +# +# MDEV-16126 Crash or ASAN heap-buffer-overflow in +# mach_read_from_n_little_endian upon ALTER TABLE with blob +# +CREATE TABLE t1(a INT, v INT AS (a), b INT, c BLOB) ENGINE=InnoDB; +ALTER TABLE t1 ADD PRIMARY KEY(b); +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/alter_not_null_debug.result b/mysql-test/suite/innodb/r/alter_not_null_debug.result new file mode 100644 index 00000000000..788eef6420b --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_not_null_debug.result @@ -0,0 +1,68 @@ +CREATE TABLE t1(c1 INT NOT NULL, c2 INT, PRIMARY KEY(c1))ENGINE=INNODB; +INSERT INTO t1 VALUES(1, NULL); +SET DEBUG_SYNC= 'row_merge_after_scan +SIGNAL opened WAIT_FOR flushed'; +ALTER TABLE t1 CHANGE c2 c2 INT NOT NULL DEFAULT 2, ALGORITHM=INPLACE; +connect con1,localhost,root; +SET DEBUG_SYNC= 'now WAIT_FOR opened'; +INSERT INTO t1 VALUES(2, NULL); +SET DEBUG_SYNC= 'now SIGNAL flushed'; +connection default; +ERROR 22004: Invalid use of NULL value +SELECT * FROM t1; +c1 c2 +1 NULL +2 NULL +UPDATE t1 SET c2 = 0 WHERE c1 = 2; +SET DEBUG_SYNC= 'row_merge_after_scan +SIGNAL opened WAIT_FOR flushed'; +# Alter ignore can convert the NULL values from +# CONCURRENT DML to constants +ALTER IGNORE TABLE t1 CHANGE c2 c2 INT NOT NULL DEFAULT 2, ALGORITHM=INPLACE; +connection con1; +SET DEBUG_SYNC= 'now WAIT_FOR opened'; +UPDATE t1 SET c2 = NULL WHERE c1 = 2; +INSERT INTO t1 VALUES (3, NULL); +SET DEBUG_SYNC= 'now SIGNAL flushed'; +connection default; +SELECT * FROM t1; +c1 c2 +1 2 +2 2 +3 2 +DROP TABLE t1; +CREATE TABLE t1(c1 INT NOT NULL, c2 INT, c3 INT, PRIMARY KEY(c1))ENGINE=INNODB; +INSERT INTO t1 VALUES(1, NULL, NULL); +SET DEBUG_SYNC= 'row_merge_after_scan +SIGNAL opened WAIT_FOR flushed'; +# Alter Successfully converts from null to not null +ALTER TABLE t1 CHANGE c2 c2 INT NOT NULL DEFAULT 2, ALGORITHM=INPLACE; +connection con1; +SET DEBUG_SYNC= 'now WAIT_FOR opened'; +UPDATE t1 SET c2= 2 WHERE c1 = 1; +INSERT INTO t1 VALUES (2, 3, 4); +SET DEBUG_SYNC= 'now SIGNAL flushed'; +connection default; +SELECT * FROM t1; +c1 c2 c3 +1 2 NULL +2 3 4 +SET DEBUG_SYNC= 'row_merge_after_scan +SIGNAL opened WAIT_FOR flushed'; +# Alter fails because concurrent dml inserts null value +ALTER TABLE t1 CHANGE c3 c3 INT NOT NULL DEFAULT 2, ALGORITHM=INPLACE; +connection con1; +SET DEBUG_SYNC= 'now WAIT_FOR opened'; +UPDATE t1 SET c3= 2 WHERE c1 = 2; +INSERT INTO t1 VALUES (4, 3, NULL); +SET DEBUG_SYNC= 'now SIGNAL flushed'; +connection default; +ERROR 22004: Invalid use of NULL value +SELECT * FROM t1; +c1 c2 c3 +1 2 NULL +2 3 2 +4 3 NULL +DROP TABLE t1; +disconnect con1; +SET DEBUG_SYNC='RESET'; diff --git a/mysql-test/suite/innodb/r/alter_partitioned.result b/mysql-test/suite/innodb/r/alter_partitioned.result new file mode 100644 index 00000000000..ee8233be5c1 --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_partitioned.result @@ -0,0 +1,10 @@ +CREATE TABLE t1(a INT, b VARCHAR(10), INDEX(a))ENGINE=InnoDB +PARTITION BY RANGE(a) +(PARTITION pa VALUES LESS THAN (3), +PARTITION pb VALUES LESS THAN (5)); +CREATE TABLE t2(a INT, FOREIGN KEY(a) REFERENCES t1(a))ENGINE=INNODB +PARTITION BY RANGE(a) +(PARTITION pa VALUES LESS THAN (2), +PARTITION pb VALUES LESS THAN (4)); +ERROR HY000: Foreign key clause is not yet supported in conjunction with partitioning +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/alter_partitioned_debug.result b/mysql-test/suite/innodb/r/alter_partitioned_debug.result new file mode 100644 index 00000000000..d2ec602c6d7 --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_partitioned_debug.result @@ -0,0 +1,27 @@ +CREATE TABLE t1 (a INT, b VARCHAR(10)) ENGINE=InnoDB +PARTITION BY RANGE(a) +(PARTITION pa VALUES LESS THAN (3), +PARTITION pb VALUES LESS THAN (5)); +INSERT INTO t1 VALUES(2,'two'),(2,'two'),(4,'four'); +connect ddl,localhost,root,,test; +SET DEBUG_SYNC = 'inplace_after_index_build SIGNAL go WAIT_FOR done'; +ALTER TABLE t1 ADD UNIQUE KEY (a,b(3)); +connection default; +SET DEBUG_SYNC = 'now WAIT_FOR go'; +BEGIN; +SELECT * FROM t1 FOR UPDATE; +a b +2 two +2 two +4 four +SET DEBUG_SYNC = 'now SIGNAL done'; +connection ddl; +ERROR 23000: Duplicate entry '2-two' for key 'a' +connection default; +DELETE FROM t1; +disconnect ddl; +SET DEBUG_SYNC = 'RESET'; +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/alter_partitioned_xa.result b/mysql-test/suite/innodb/r/alter_partitioned_xa.result new file mode 100644 index 00000000000..2d3add065b7 --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_partitioned_xa.result @@ -0,0 +1,18 @@ +# +# MDEV-14693 XA: Assertion `!clust_index->online_log' failed +# in rollback_inplace_alter_table +# +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB PARTITION BY HASH(a) PARTITIONS 2; +XA START 'xid'; +INSERT INTO t1 VALUES (1,10); +CREATE DATABASE IF NOT EXISTS db; +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the ACTIVE state +connect con1,localhost,root,,test; +SET innodb_lock_wait_timeout= 1, lock_wait_timeout= 2; +ALTER TABLE t1 FORCE; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +disconnect con1; +connection default; +XA END 'xid'; +XA ROLLBACK 'xid'; +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/alter_rename_files.result b/mysql-test/suite/innodb/r/alter_rename_files.result new file mode 100644 index 00000000000..7df63a051da --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_rename_files.result @@ -0,0 +1,20 @@ +CREATE TABLE t1 (x INT NOT NULL UNIQUE KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES(5); +SET GLOBAL innodb_log_checkpoint_now=TRUE; +SET DEBUG_SYNC='commit_cache_rebuild SIGNAL ready WAIT_FOR finish'; +ALTER TABLE t1 ADD PRIMARY KEY(x); +connect con1,localhost,root,,; +SET DEBUG_SYNC='now WAIT_FOR ready'; +SET GLOBAL innodb_log_checkpoint_now=TRUE; +SET DEBUG_SYNC='now SIGNAL finish'; +disconnect con1; +connection default; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `x` int(11) NOT NULL, + PRIMARY KEY (`x`), + UNIQUE KEY `x` (`x`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +SET DEBUG_SYNC='RESET'; diff --git a/mysql-test/suite/innodb/r/analyze_table.result b/mysql-test/suite/innodb/r/analyze_table.result new file mode 100644 index 00000000000..a5c25289ad1 --- /dev/null +++ b/mysql-test/suite/innodb/r/analyze_table.result @@ -0,0 +1,25 @@ +CREATE PROCEDURE populate_t1() +BEGIN +DECLARE i int DEFAULT 1; +START TRANSACTION; +WHILE (i <= 1000000) DO +INSERT INTO t1 VALUES (i, i, CONCAT('a', i)); +SET i = i + 1; +END WHILE; +COMMIT; +END| +CREATE TABLE t1( +class INT, +id INT, +title VARCHAR(100) +) ENGINE=InnoDB; +SELECT COUNT(*) FROM t1; +COUNT(*) +1000000 +SET GLOBAL innodb_stats_persistent_sample_pages=2000; +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +DROP TABLE t1; +DROP PROCEDURE populate_t1; +SET GLOBAL innodb_stats_persistent_sample_pages=default; diff --git a/mysql-test/suite/innodb/r/create_isl_with_direct.result b/mysql-test/suite/innodb/r/create_isl_with_direct.result index 87544e6dcdf..1b830a7d6ce 100644 --- a/mysql-test/suite/innodb/r/create_isl_with_direct.result +++ b/mysql-test/suite/innodb/r/create_isl_with_direct.result @@ -5,6 +5,7 @@ CREATE TABLE t1 (x INT) ENGINE=INNODB, DATA DIRECTORY='MYSQL_TMP_DIR'; # Contents of tmp/test directory containing .ibd file t1.ibd # Contents of the 'test' database directory containing .isl and .frm files +db.opt t1.frm t1.isl DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/dml_purge.result b/mysql-test/suite/innodb/r/dml_purge.result index c6ea5e80f28..95330b80d33 100644 --- a/mysql-test/suite/innodb/r/dml_purge.result +++ b/mysql-test/suite/innodb/r/dml_purge.result @@ -7,13 +7,28 @@ SET GLOBAL innodb_purge_rseg_truncate_frequency = 1; SET GLOBAL innodb_purge_rseg_truncate_frequency = 1; CREATE TABLE t1(a INT PRIMARY KEY, b INT NOT NULL) ROW_FORMAT=REDUNDANT ENGINE=InnoDB; +connect prevent_purge,localhost,root; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection default; INSERT INTO t1 VALUES(1,2),(3,4); +ALTER TABLE t1 ADD COLUMN c INT; UPDATE t1 SET b=-3 WHERE a=3; +connect con1,localhost,root; +BEGIN; +UPDATE t1 SET b=4 WHERE a=3; +disconnect prevent_purge; +connection default; InnoDB 0 transactions not purged +disconnect con1; FLUSH TABLE t1 FOR EXPORT; Clustered index root page contents: -N_RECS=2; LEVEL=0 -header=0x010000030087 (a=0x696e66696d756d00) +N_RECS=3; LEVEL=0 +header=0x0100000300c6 (a=0x696e66696d756d00) +header=0x1000200b0087 (a=0x80000000, + DB_TRX_ID=0x000000000000, + DB_ROLL_PTR=0x80000000000000, + b=0x80000000, + c=NULL(4 bytes)) header=0x0000100900a6 (a=0x80000001, DB_TRX_ID=0x000000000000, DB_ROLL_PTR=0x80000000000000, @@ -22,11 +37,11 @@ header=0x000018090074 (a=0x80000003, DB_TRX_ID=0x000000000000, DB_ROLL_PTR=0x80000000000000, b=0x7ffffffd) -header=0x030008030000 (a=0x73757072656d756d00) +header=0x040008030000 (a=0x73757072656d756d00) UNLOCK TABLES; SELECT * FROM t1; -a b -1 2 -3 -3 +a b c +1 2 NULL +3 -3 NULL DROP TABLE t1; SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency; diff --git a/mysql-test/suite/innodb/r/foreign_key.result b/mysql-test/suite/innodb/r/foreign_key.result index e569fc7dba7..5838c3a1fd5 100644 --- a/mysql-test/suite/innodb/r/foreign_key.result +++ b/mysql-test/suite/innodb/r/foreign_key.result @@ -306,3 +306,14 @@ id member_id SELECT * FROM payment_method; id member_id cardholder_address_id DROP TABLE payment_method,address,member; +# +# Bug #26958695 INNODB NESTED STORED FIELD WITH CONSTRAINT KEY +# PRODUCE BROKEN TABLE (no bug in MariaDB) +# +create table t1(f1 int,f2 int, primary key(f1), key(f2, f1))engine=innodb; +create table t2(f1 int, f2 int as (2) stored, f3 int as (f2) stored, +foreign key(f1) references t1(f2) on update set NULL) +engine=innodb; +insert into t1 values(1, 1); +insert into t2(f1) values(1); +drop table t2, t1; diff --git a/mysql-test/suite/innodb/r/innodb-alter-nullable.result b/mysql-test/suite/innodb/r/innodb-alter-nullable.result index f2c0643f0f1..632f7885b8e 100644 --- a/mysql-test/suite/innodb/r/innodb-alter-nullable.result +++ b/mysql-test/suite/innodb/r/innodb-alter-nullable.result @@ -57,3 +57,7 @@ WHERE NAME='test/t'; TABLE_ID NAME FLAG N_COLS SPACE ROW_FORMAT ZIP_PAGE_SIZE SPACE_TYPE # test/t 33 6 # Dynamic 0 Single DROP TABLE t; +CREATE TABLE t1(c1 INT) ENGINE=InnoDB; +ALTER TABLE t1 ADD CONSTRAINT UNIQUE KEY i1(c1); +ALTER TABLE t1 CHANGE c1 c1 INT NOT NULL,ADD KEY(c1); +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/innodb-alter-timestamp.result b/mysql-test/suite/innodb/r/innodb-alter-timestamp.result index 9659b03d6b2..b8686d6812e 100644 --- a/mysql-test/suite/innodb/r/innodb-alter-timestamp.result +++ b/mysql-test/suite/innodb/r/innodb-alter-timestamp.result @@ -2,7 +2,7 @@ CREATE TABLE t1 (i1 INT UNSIGNED NULL DEFAULT 42) ENGINE=innodb; INSERT INTO t1 VALUES(NULL); ALTER TABLE t1 CHANGE i1 i1 INT UNSIGNED NOT NULL DEFAULT rand(), ALGORITHM=INPLACE; -ERROR 22004: Invalid use of NULL value +ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: cannot convert NULL to non-constant DEFAULT. Try ALGORITHM=COPY ALTER TABLE t1 CHANGE i1 i1 INT UNSIGNED NOT NULL DEFAULT rand(), ALGORITHM=COPY; ERROR 01000: Data truncated for column 'i1' at row 1 @@ -10,20 +10,20 @@ ALTER TABLE t1 CHANGE i1 id INT UNSIGNED NOT NULL AUTO_INCREMENT, ADD PRIMARY KEY(id), ALGORITHM=INPLACE; ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Cannot change column type INPLACE. Try ALGORITHM=COPY ALTER TABLE t1 ADD PRIMARY KEY(i1), ALGORITHM=INPLACE; -ERROR 22004: Invalid use of NULL value -ALTER TABLE t1 CHANGE i1 id INT UNSIGNED NOT NULL AUTO_INCREMENT, -ADD PRIMARY KEY(id); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE t1 CHANGE i1 id INT UNSIGNED NOT NULL AUTO_INCREMENT; affected rows: 1 info: Records: 1 Duplicates: 0 Warnings: 0 SELECT * FROM t1; id -1 +42 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`) -) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1 +) ENGINE=InnoDB AUTO_INCREMENT=43 DEFAULT CHARSET=latin1 DROP TABLE t1; CREATE TABLE t1 (i1 INT UNSIGNED NOT NULL, d1 TIMESTAMP NULL) ENGINE=InnoDB; SHOW CREATE TABLE t1; diff --git a/mysql-test/suite/innodb/r/innodb-alter.result b/mysql-test/suite/innodb/r/innodb-alter.result index de69845dddb..afdeac1c22a 100644 --- a/mysql-test/suite/innodb/r/innodb-alter.result +++ b/mysql-test/suite/innodb/r/innodb-alter.result @@ -419,8 +419,6 @@ ERROR 0A000: LOCK=NONE is not supported. Reason: Fulltext index creation require ALTER TABLE t1 ADD FULLTEXT INDEX (ct), CHANGE c1 pk INT, ALTER c2 SET DEFAULT 42, RENAME TO tt, ALGORITHM=INPLACE, LOCK=SHARED; -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SELECT i.NAME,i.POS,i.MTYPE,i.PRTYPE,i.LEN FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS i INNER JOIN sys_tables st ON i.TABLE_ID=st.TABLE_ID; @@ -485,8 +483,6 @@ ALTER TABLE t1o DROP COLUMN FTS_DOC_ID, ALGORITHM=INPLACE; ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Cannot drop or rename FTS_DOC_ID. Try ALGORITHM=COPY ALTER TABLE t1o DROP COLUMN FTS_DOC_ID, DROP INDEX ct, ALGORITHM=INPLACE; ALTER TABLE t1o ADD FULLTEXT INDEX(ct), ADD COLUMN cu TEXT; -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID ALTER TABLE t1o ADD FULLTEXT INDEX(cu), ADD COLUMN FTS_DOC_ID BIGINT, ALGORITHM=INPLACE; ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: InnoDB presently supports one FULLTEXT index creation at a time. Try ALGORITHM=COPY @@ -702,8 +698,6 @@ FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE name LIKE '%FTS_%' ORDER BY 1, 2; prefix name ALTER TABLE t ADD FULLTEXT INDEX(t); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SELECT sc.pos, sc.NAME FROM information_schema.innodb_sys_columns sc INNER JOIN information_schema.innodb_sys_tables st ON sc.TABLE_ID=st.TABLE_ID @@ -877,6 +871,25 @@ DROP TABLE dest_db.t1; DROP TABLE source_db.t1; DROP DATABASE source_db; DROP DATABASE dest_db; +# +# BUG #26334149 MYSQL CRASHES WHEN FULL TEXT INDEXES IBD FILES ARE +# ORPHANED DUE TO RENAME TABLE +# +CREATE DATABASE db1; +USE db1; +CREATE TABLE notes ( +id int(11) NOT NULL AUTO_INCREMENT, +body text COLLATE utf8_unicode_ci, +PRIMARY KEY (id) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 +COLLATE=utf8_unicode_ci +ROW_FORMAT=COMPRESSED; +ALTER TABLE notes ADD FULLTEXT INDEX index_ft_body (body(255)); +DROP INDEX index_ft_body ON notes; +CREATE DATABASE db2; +RENAME TABLE db1.notes TO db2.notes; +DROP DATABASE db1; +DROP DATABASE db2; USE test; # # MDEV-14038 ALTER TABLE does not exit on error with InnoDB + bad default function diff --git a/mysql-test/suite/innodb/r/innodb-isolation.result b/mysql-test/suite/innodb/r/innodb-isolation.result index 2248d25b39a..ce9c530ff44 100644 --- a/mysql-test/suite/innodb/r/innodb-isolation.result +++ b/mysql-test/suite/innodb/r/innodb-isolation.result @@ -1335,11 +1335,17 @@ UPDATE t6 SET b = "updated by client 2"; SELECT * FROM t6; a b aa bb 1 inserted by client 1 1 inserted by client 1 -2 updated by client 2 2 inserted by client 1 +2 inserted by client 1 2 inserted by client 1 3 inserted by client 1 3 inserted by client 1 4 updated by client 2 4 inserted by client 1 5 updated by client 2 NULL NULL 10 updated by client 2 1 inserted by client 1 +SELECT * FROM t6 LOCK IN SHARE MODE; +a b aa bb +2 updated by client 2 2 inserted by client 1 +4 updated by client 2 4 inserted by client 1 +5 updated by client 2 NULL NULL +10 updated by client 2 1 inserted by client 1 SELECT COUNT(*) FROM t6; COUNT(*) 6 diff --git a/mysql-test/suite/innodb/r/innodb-online-alter-gis.result b/mysql-test/suite/innodb/r/innodb-online-alter-gis.result index 79c0f2386aa..34cc62f0a55 100644 --- a/mysql-test/suite/innodb/r/innodb-online-alter-gis.result +++ b/mysql-test/suite/innodb/r/innodb-online-alter-gis.result @@ -1,13 +1,13 @@ create table t1(a int not null primary key, b geometry not null) engine=innodb; -ALTER ONLINE TABLE t1 ADD SPATIAL INDEX new(b); -ERROR 0A000: LOCK=NONE is not supported. Reason: Do not support online operation on table with GIS index. Try LOCK=SHARED +ALTER ONLINE TABLE t1 ADD SPATIAL INDEX new(b), ALGORITHM=INSTANT; +ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Do not support online operation on table with GIS index. Try ALGORITHM=NOCOPY show warnings; Level Code Message -Error 1846 LOCK=NONE is not supported. Reason: Do not support online operation on table with GIS index. Try LOCK=SHARED +Error 1846 ALGORITHM=INSTANT is not supported. Reason: Do not support online operation on table with GIS index. Try ALGORITHM=NOCOPY show errors; Level Code Message -Error 1846 LOCK=NONE is not supported. Reason: Do not support online operation on table with GIS index. Try LOCK=SHARED -ALTER ONLINE TABLE t1 ADD SPATIAL INDEX new(b), LOCK=SHARED; +Error 1846 ALGORITHM=INSTANT is not supported. Reason: Do not support online operation on table with GIS index. Try ALGORITHM=NOCOPY +ALTER ONLINE TABLE t1 ADD SPATIAL INDEX new(b), LOCK=SHARED, ALGORITHM=NOCOPY; show warnings; Level Code Message show errors; @@ -47,3 +47,21 @@ DESCRIBE t1; Field Type Null Key Default Extra a int(11) YES NULL DROP TABLE t1; +# +# Bug #19077964 ASSERT PAGE_SIZE.EQUALS_TO SPACE_PAGE_SIZE +# BTR_COPY_BLOB_PREFIX +# +CREATE TABLE t1(f1 INT PRIMARY KEY, f3 LINESTRING NOT NULL, +SPATIAL KEY(f3))ENGINE=InnoDB ROW_FORMAT=COMPRESSED +KEY_BLOCK_SIZE=1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) NOT NULL, + `f3` linestring NOT NULL, + PRIMARY KEY (`f1`), + SPATIAL KEY `f3` (`f3`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 +INSERT INTO t1 VALUES (1, ST_linefromtext(concat('linestring', '( 0 0, 1 1, 2 2, 3 3, 4 4, 5 5, 6 6, 7 7, 8 8, 9 9, 10 10, 11 11, 12 12, 13 13, 14 14, 15 15, 16 16, 17 17, 18 18, 19 19, 20 20, 21 21, 22 22, 23 23, 24 24, 25 25, 26 26, 27 27, 28 28, 29 29, 30 30, 31 31, 32 32, 33 33, 34 34, 35 35, 36 36, 37 37, 38 38, 39 39, 40 40, 41 41, 42 42, 43 43, 44 44, 45 45, 46 46, 47 47, 48 48, 49 49, 50 50, 51 51, 52 52, 53 53, 54 54, 55 55, 56 56, 57 57, 58 58, 59 59, 60 60, 61 61, 62 62, 63 63, 64 64, 65 65, 66 66, 67 67, 68 68, 69 69, 70 70, 71 71, 72 72, 73 73, 74 74, 75 75, 76 76, 77 77, 78 78, 79 79, 9999 9999)')));; +ALTER TABLE t1 ROW_FORMAT = DYNAMIC, KEY_BLOCK_SIZE=0, ALGORITHM=INPLACE; +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/innodb-table-online.result b/mysql-test/suite/innodb/r/innodb-table-online.result index 3ac00436e07..d8482ebb23a 100644 --- a/mysql-test/suite/innodb/r/innodb-table-online.result +++ b/mysql-test/suite/innodb/r/innodb-table-online.result @@ -411,9 +411,7 @@ SET @old_sql_mode = @@sql_mode; SET @@sql_mode = 'STRICT_TRANS_TABLES'; ALTER TABLE t1 DROP COLUMN c22f, DROP PRIMARY KEY, ADD PRIMARY KEY c3p5(c3(5)), ALGORITHM = INPLACE; -ERROR 22004: Invalid use of NULL value -ALTER TABLE t1 MODIFY c3 CHAR(255) NOT NULL; -ERROR 22004: Invalid use of NULL value +ERROR 23000: Duplicate entry '' for key 'PRIMARY' SET @@sql_mode = @old_sql_mode; UPDATE t1 SET c3=LEFT(CONCAT(c1,REPEAT('foo',c1)),255) WHERE c3 IS NULL; SET DEBUG_SYNC = 'row_log_table_apply1_before SIGNAL c3p5_created0 WAIT_FOR ins_done0'; diff --git a/mysql-test/suite/innodb/r/innodb-wl5522.result b/mysql-test/suite/innodb/r/innodb-wl5522.result index 4ef92cdfed9..1ed857af473 100644 --- a/mysql-test/suite/innodb/r/innodb-wl5522.result +++ b/mysql-test/suite/innodb/r/innodb-wl5522.result @@ -31,12 +31,14 @@ a b c 822 Devotion asdfuihknaskdf 821 Cavalry ..asdasdfaeraf CREATE TABLE t2(a INT PRIMARY KEY) ENGINE=InnoDB ROW_FORMAT=COMPACT; +db.opt t1.frm t1.ibd t2.frm t2.ibd FLUSH TABLE t1, t2 FOR EXPORT; # List before copying files +db.opt t1.cfg t1.frm t1.ibd @@ -61,12 +63,14 @@ a b c # Restarting server # Done restarting server # List before t1 DISCARD +db.opt t1.frm t1.ibd t2.frm t2.ibd ALTER TABLE t1 DISCARD TABLESPACE; # List after t1 DISCARD +db.opt t1.frm t2.frm t2.ibd @@ -85,6 +89,7 @@ a b c 823 Evolution lsjndofiabsoibeg 822 Devotion asdfuihknaskdf 821 Cavalry ..asdasdfaeraf +db.opt t1.cfg t1.frm t1.ibd @@ -107,6 +112,7 @@ DROP TABLE t1; ALTER TABLE t2 ROW_FORMAT=DYNAMIC; ALTER TABLE t2 DISCARD TABLESPACE; # List after t2 DISCARD +db.opt t2.frm ALTER TABLE t2 IMPORT TABLESPACE; ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x21 and the meta-data file has 0x1) @@ -135,6 +141,7 @@ INSERT INTO t1(c2) SELECT c2 FROM t1; INSERT INTO t1(c2) SELECT c2 FROM t1; INSERT INTO t1(c2) SELECT c2 FROM t1; INSERT INTO t1(c2) SELECT c2 FROM t1; +db.opt t1.frm t1.ibd FLUSH TABLES t1 FOR EXPORT; @@ -142,6 +149,7 @@ SELECT COUNT(*) FROM t1; COUNT(*) 16 backup: t1 +db.opt t1.cfg t1.frm t1.ibd @@ -154,6 +162,7 @@ ALTER TABLE t1 DISCARD TABLESPACE; SELECT * FROM t1; ERROR HY000: Tablespace has been discarded for table `t1` restore: t1 .ibd and .cfg files +db.opt t1.cfg t1.frm t1.ibd @@ -173,6 +182,7 @@ INSERT INTO t1(c2) SELECT c2 FROM t1; INSERT INTO t1(c2) SELECT c2 FROM t1; INSERT INTO t1(c2) SELECT c2 FROM t1; INSERT INTO t1(c2) SELECT c2 FROM t1; +db.opt t1.frm t1.ibd FLUSH TABLES t1 FOR EXPORT; @@ -180,10 +190,12 @@ SELECT COUNT(*) FROM t1; COUNT(*) 16 backup: t1 +db.opt t1.cfg t1.frm t1.ibd UNLOCK TABLES; +db.opt t1.frm t1.ibd INSERT INTO t1(c2) SELECT c2 FROM t1; @@ -196,6 +208,7 @@ ALTER TABLE t1 DISCARD TABLESPACE; SELECT * FROM t1; ERROR HY000: Tablespace has been discarded for table `t1` restore: t1 .ibd and .cfg files +db.opt t1.cfg t1.frm t1.ibd @@ -220,6 +233,7 @@ SELECT COUNT(*) FROM t1 WHERE c2 = 1; COUNT(*) 16 backup: t1 +db.opt t1.cfg t1.frm t1.ibd diff --git a/mysql-test/suite/innodb/r/innodb-wl5980-alter.result b/mysql-test/suite/innodb/r/innodb-wl5980-alter.result index 27866b68303..daa3ffc0a9f 100644 --- a/mysql-test/suite/innodb/r/innodb-wl5980-alter.result +++ b/mysql-test/suite/innodb/r/innodb-wl5980-alter.result @@ -64,6 +64,7 @@ t1 CREATE TABLE `t1` ( KEY `c2` (`c2`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 DATA DIRECTORY='MYSQL_TMP_DIR/alt_dir/' ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -99,6 +100,7 @@ test/t1c3 c3 c2 0 ALTER TABLE t1 CHANGE c2 c2 INT AFTER c1; ALTER TABLE t1 CHANGE c1 c1 INT FIRST; ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -133,6 +135,7 @@ test/t1c2 c2 c2 0 test/t1c3 c3 c2 0 ALTER TABLE t1 CHANGE C2 c3 INT; ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -167,6 +170,7 @@ test/t1c2 c2 c3 0 test/t1c3 c3 c2 0 ALTER TABLE t1 CHANGE c3 C INT; ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -201,6 +205,7 @@ test/t1c2 c2 C 0 test/t1c3 c3 c2 0 ALTER TABLE t1 CHANGE C Cöŀumň_TWO INT; ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -242,6 +247,7 @@ ALTER TABLE t1 CHANGE cöĿǖmň_two c3 INT; ERROR 42S22: Unknown column 'cöĿǖmň_two' in 't1' ALTER TABLE t1 CHANGE cÖĿUMŇ_two c3 INT, RENAME TO t3; ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -289,6 +295,7 @@ ERROR 42000: Identifier name '12345678901234567890123456789012345678901234567890 ALTER TABLE t3 CHANGE c3 `1234567890123456789012345678901234567890123456789012345678901234` INT; ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -325,6 +332,7 @@ ALTER TABLE t3 CHANGE `1234567890123456789012345678901234567890123456789012345678901234` `倀å€å€‚倃倄倅倆倇倈倉倊個倌å€å€Žå€å€å€‘倒倓倔倕倖倗倘候倚倛倜å€å€žå€Ÿå€ å€¡å€¢å€£å€¤å€¥å€¦å€§å€¨å€©å€ªå€«å€¬å€­å€®å€¯å€°å€±å€²å€³å€´å€µå€¶å€·å€¸å€¹å€ºå€»å€¼å€½å€¾Ã¤` INT; ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -344,6 +352,7 @@ ALTER TABLE t3 CHANGE `倀å€å€‚倃倄倅倆倇倈倉倊個倌å€å€Žå€å€å€‘倒倓倔倕倖倗倘候倚倛倜å€å€žå€Ÿå€ å€¡å€¢å€£å€¤å€¥å€¦å€§å€¨å€©å€ªå€«å€¬å€­å€®å€¯å€°å€±å€²å€³å€´å€µå€¶å€·å€¸å€¹å€ºå€»å€¼å€½å€¾Ã„` c3 INT; ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -365,6 +374,7 @@ ALTER TABLE t3 CHANGE c3 😲 INT; ERROR HY000: Invalid utf8mb4 character string: '\xF0\x9F\x98\xB2' ALTER TABLE t3 RENAME TO t2; ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -401,6 +411,7 @@ ON i.TABLE_ID=st.TABLE_ID; NAME NAME test/t1 test/t1 ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -475,6 +486,7 @@ t1c CREATE TABLE `t1c` ( CONSTRAINT `t1c3` FOREIGN KEY (`c3`) REFERENCES `t1p` (`c2`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 DATA DIRECTORY='MYSQL_TMP_DIR/alt_dir/' ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -514,6 +526,7 @@ ERROR HY000: Cannot drop index 'c3': needed in a foreign key constraint SET foreign_key_checks=0; ALTER TABLE t1c DROP INDEX C3; ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -561,6 +574,7 @@ test/t1c2 c2 c3 0 test/t1c3 c3 c2 0 ALTER TABLE t1c DROP FOREIGN KEY t1C3; ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -605,6 +619,7 @@ ID FOR_COL_NAME REF_COL_NAME POS test/t1c2 c2 c3 0 ALTER TABLE t1c DROP INDEX c2, DROP FOREIGN KEY t1C2; ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -646,6 +661,7 @@ INNER JOIN sys_foreign sf ON i.ID = sf.ID; ID FOR_COL_NAME REF_COL_NAME POS ALTER TABLE t1 DROP INDEX c2, CHANGE c3 c2 INT; ### files in MYSQL_DATA_DIR/test +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -684,8 +700,6 @@ ERROR 0A000: LOCK=NONE is not supported. Reason: Fulltext index creation require ALTER TABLE t1 ADD FULLTEXT INDEX (ct), CHANGE c1 pk INT, ALTER c2 SET DEFAULT 42, RENAME TO tt, ALGORITHM=INPLACE, LOCK=SHARED; -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID ### files in MYSQL_DATA_DIR/test FTS_AUX_INDEX_1.isl FTS_AUX_INDEX_2.isl @@ -698,6 +712,7 @@ FTS_AUX_BEING_DELETED_CACHE.isl FTS_AUX_CONFIG.isl FTS_AUX_DELETED.isl FTS_AUX_DELETED_CACHE.isl +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -769,8 +784,6 @@ ALGORITHM=INPLACE; ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Cannot change column type INPLACE. Try ALGORITHM=COPY CREATE TABLE t1n LIKE t1o; ALTER TABLE t1n ADD FULLTEXT INDEX(ct); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID ### files in MYSQL_DATA_DIR/test FTS_AUX_INDEX_1.isl FTS_AUX_INDEX_2.isl @@ -794,6 +807,7 @@ FTS_AUX_BEING_DELETED_CACHE.ibd FTS_AUX_CONFIG.ibd FTS_AUX_DELETED.ibd FTS_AUX_DELETED_CACHE.ibd +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -855,6 +869,7 @@ FTS_AUX_BEING_DELETED_CACHE.ibd FTS_AUX_CONFIG.ibd FTS_AUX_DELETED.ibd FTS_AUX_DELETED_CACHE.ibd +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -910,6 +925,7 @@ FTS_AUX_BEING_DELETED_CACHE.ibd FTS_AUX_CONFIG.ibd FTS_AUX_DELETED.ibd FTS_AUX_DELETED_CACHE.ibd +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -966,6 +982,7 @@ FTS_AUX_BEING_DELETED_CACHE.ibd FTS_AUX_CONFIG.ibd FTS_AUX_DELETED.ibd FTS_AUX_DELETED_CACHE.ibd +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -1031,6 +1048,7 @@ FTS_AUX_BEING_DELETED_CACHE.ibd FTS_AUX_CONFIG.ibd FTS_AUX_DELETED.ibd FTS_AUX_DELETED_CACHE.ibd +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -1099,6 +1117,7 @@ FTS_AUX_BEING_DELETED_CACHE.ibd FTS_AUX_CONFIG.ibd FTS_AUX_DELETED.ibd FTS_AUX_DELETED_CACHE.ibd +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -1164,6 +1183,7 @@ FTS_AUX_BEING_DELETED_CACHE.ibd FTS_AUX_CONFIG.ibd FTS_AUX_DELETED.ibd FTS_AUX_DELETED_CACHE.ibd +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -1221,6 +1241,7 @@ FTS_AUX_BEING_DELETED_CACHE.ibd FTS_AUX_CONFIG.ibd FTS_AUX_DELETED.ibd FTS_AUX_DELETED_CACHE.ibd +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -1276,6 +1297,7 @@ FTS_AUX_BEING_DELETED_CACHE.isl FTS_AUX_CONFIG.isl FTS_AUX_DELETED.isl FTS_AUX_DELETED_CACHE.isl +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -1331,6 +1353,7 @@ FTS_AUX_BEING_DELETED_CACHE.ibd FTS_AUX_CONFIG.ibd FTS_AUX_DELETED.ibd FTS_AUX_DELETED_CACHE.ibd +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -1392,6 +1415,7 @@ FTS_AUX_BEING_DELETED_CACHE.isl FTS_AUX_CONFIG.isl FTS_AUX_DELETED.isl FTS_AUX_DELETED_CACHE.isl +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -1463,6 +1487,7 @@ FTS_AUX_BEING_DELETED_CACHE.isl FTS_AUX_CONFIG.isl FTS_AUX_DELETED.isl FTS_AUX_DELETED_CACHE.isl +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -1511,6 +1536,7 @@ FTS_AUX_BEING_DELETED_CACHE.ibd FTS_AUX_CONFIG.ibd FTS_AUX_DELETED.ibd FTS_AUX_DELETED_CACHE.ibd +db.opt sys_foreign.frm sys_foreign.ibd sys_indexes.frm @@ -1560,4 +1586,5 @@ ID FOR_COL_NAME REF_COL_NAME POS # DROP TABLE tt, t1o, sys_tables, sys_indexes, sys_foreign; ### files in MYSQL_DATA_DIR/test +db.opt ### files in MYSQL_TMP_DIR/alt_dir/test diff --git a/mysql-test/suite/innodb/r/innodb.result b/mysql-test/suite/innodb/r/innodb.result index aafd31ae6f1..86f90c2bd79 100644 --- a/mysql-test/suite/innodb/r/innodb.result +++ b/mysql-test/suite/innodb/r/innodb.result @@ -1701,7 +1701,7 @@ variable_value - @innodb_rows_inserted_orig 964 SELECT variable_value - @innodb_rows_updated_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_updated'; variable_value - @innodb_rows_updated_orig -866 +865 SELECT variable_value - @innodb_row_lock_waits_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_row_lock_waits'; variable_value - @innodb_row_lock_waits_orig 0 diff --git a/mysql-test/suite/innodb/r/innodb_bug27216817.result b/mysql-test/suite/innodb/r/innodb_bug27216817.result new file mode 100644 index 00000000000..0210ced9f91 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb_bug27216817.result @@ -0,0 +1,24 @@ +create table t1 (a int not null, b int not null) engine=innodb; +insert t1 values (1,2),(3,4); +lock table t1 write, t1 tr read; +flush status; +alter table t1 add primary key (b); +show status like 'Handler_read_rnd_next'; +Variable_name Value +Handler_read_rnd_next 0 +unlock tables; +alter table t1 drop primary key; +lock table t1 write; +flush status; +alter table t1 add primary key (b); +show status like 'Handler_read_rnd_next'; +Variable_name Value +Handler_read_rnd_next 0 +unlock tables; +alter table t1 drop primary key; +flush status; +alter table t1 add primary key (b); +show status like 'Handler_read_rnd_next'; +Variable_name Value +Handler_read_rnd_next 0 +drop table t1; diff --git a/mysql-test/suite/innodb/r/innodb_bug54044.result b/mysql-test/suite/innodb/r/innodb_bug54044.result index 01e8c58f896..29b0127f20b 100644 --- a/mysql-test/suite/innodb/r/innodb_bug54044.result +++ b/mysql-test/suite/innodb/r/innodb_bug54044.result @@ -16,9 +16,3 @@ tmp CREATE TABLE `tmp` ( `NULL` binary(0) DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 DROP TABLE tmp; -CREATE TABLE t1 (a VARCHAR(3)) ENGINE=InnoDB; -INSERT INTO t1 VALUES ('foo'),('bar'); -FLUSH TABLES; -CREATE TEMPORARY TABLE tmp ENGINE=InnoDB AS SELECT VALUE(a) FROM t1; -ERROR HY000: Can't create table `test`.`tmp` (errno: 168 "Unknown (generic) error from engine") -DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/instant_alter.result b/mysql-test/suite/innodb/r/instant_alter.result index 1580ba29717..a70a3d077e0 100644 --- a/mysql-test/suite/innodb/r/instant_alter.result +++ b/mysql-test/suite/innodb/r/instant_alter.result @@ -440,6 +440,12 @@ SELECT * FROM t1; a b a 1 DROP TABLE t1; +CREATE TABLE t1 (a INT, b VARCHAR(8), PRIMARY KEY(b,a)) ENGINE=InnoDB ROW_FORMAT=REDUNDANT; +INSERT INTO t1 VALUES (1,'foo'); +ALTER TABLE t1 ADD COLUMN c INT; +UPDATE t1 SET c = 1; +UPDATE t1 SET c = 2; +DROP TABLE t1; CREATE TABLE t1 (id INT PRIMARY KEY, c2 INT UNIQUE, c3 POINT NOT NULL DEFAULT ST_GeomFromText('POINT(3 4)'), @@ -826,6 +832,12 @@ SELECT * FROM t1; a b a 1 DROP TABLE t1; +CREATE TABLE t1 (a INT, b VARCHAR(8), PRIMARY KEY(b,a)) ENGINE=InnoDB ROW_FORMAT=COMPACT; +INSERT INTO t1 VALUES (1,'foo'); +ALTER TABLE t1 ADD COLUMN c INT; +UPDATE t1 SET c = 1; +UPDATE t1 SET c = 2; +DROP TABLE t1; CREATE TABLE t1 (id INT PRIMARY KEY, c2 INT UNIQUE, c3 POINT NOT NULL DEFAULT ST_GeomFromText('POINT(3 4)'), @@ -1212,10 +1224,16 @@ SELECT * FROM t1; a b a 1 DROP TABLE t1; +CREATE TABLE t1 (a INT, b VARCHAR(8), PRIMARY KEY(b,a)) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +INSERT INTO t1 VALUES (1,'foo'); +ALTER TABLE t1 ADD COLUMN c INT; +UPDATE t1 SET c = 1; +UPDATE t1 SET c = 2; +DROP TABLE t1; disconnect analyze; SELECT variable_value-@old_instant instants FROM information_schema.global_status WHERE variable_name = 'innodb_instant_alter_column'; instants -36 +39 SET GLOBAL innodb_purge_rseg_truncate_frequency= @saved_frequency; diff --git a/mysql-test/suite/innodb/r/instant_alter_crash.result b/mysql-test/suite/innodb/r/instant_alter_crash.result index 2daaf10fa67..d9e57b397f3 100644 --- a/mysql-test/suite/innodb/r/instant_alter_crash.result +++ b/mysql-test/suite/innodb/r/instant_alter_crash.result @@ -101,4 +101,5 @@ t2 CREATE TABLE `t2` ( UNIQUE KEY `c2` (`c2`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT DROP TABLE t1,t2; +db.opt SET GLOBAL innodb_purge_rseg_truncate_frequency=@saved_frequency; diff --git a/mysql-test/suite/innodb/r/instant_alter_debug.result b/mysql-test/suite/innodb/r/instant_alter_debug.result index 3aec7553ff0..389a04007e2 100644 --- a/mysql-test/suite/innodb/r/instant_alter_debug.result +++ b/mysql-test/suite/innodb/r/instant_alter_debug.result @@ -32,18 +32,11 @@ pk f CREATE TABLE t4 (pk INT PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t4 VALUES (0); ALTER TABLE t4 ADD COLUMN b INT; -SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS +SELECT COUNT(*)>0 FROM INFORMATION_SCHEMA.COLUMNS LEFT JOIN t4 ON (NUMERIC_SCALE = pk); -COUNT(*) -1748 -SET DEBUG_SYNC='innodb_inplace_alter_table_enter SIGNAL enter WAIT_FOR delete'; +COUNT(*)>0 +1 ALTER TABLE t4 ADD COLUMN c INT; -connect dml,localhost,root,,; -SET DEBUG_SYNC='now WAIT_FOR enter'; -DELETE FROM t4; -InnoDB 0 transactions not purged -SET DEBUG_SYNC='now SIGNAL delete'; -connection default; CREATE TABLE t5 (i INT, KEY(i)) ENGINE=InnoDB; INSERT INTO t5 VALUES (-42); ALTER TABLE t5 ADD UNIQUE ui(i); @@ -61,7 +54,7 @@ INSERT INTO t8 VALUES (NULL); ALTER TABLE t8 ADD c CHAR(3); SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL rebuilt WAIT_FOR dml'; ALTER TABLE t8 FORCE; -connection dml; +connect dml,localhost,root,,; SET DEBUG_SYNC='now WAIT_FOR rebuilt'; BEGIN; INSERT INTO t8 SET i=1; @@ -164,4 +157,28 @@ INSERT INTO t11 () VALUES (); UPDATE t11 SET c22 = 1; InnoDB 0 transactions not purged DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11; +# +# MDEV-15060 Assertion in row_log_table_apply_op after instant ADD +# when the table is emptied during subsequent ALTER TABLE +# +CREATE TABLE t1 (a INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES (NULL); +ALTER TABLE t1 ADD COLUMN b INT NOT NULL; +connect stop_purge,localhost,root; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connect ddl,localhost,root,,test; +DELETE FROM t1; +SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL copied WAIT_FOR logged'; +ALTER TABLE t1 FORCE; +connection default; +SET DEBUG_SYNC='now WAIT_FOR copied'; +BEGIN; +INSERT INTO t1 SET b=1; +ROLLBACK; +disconnect stop_purge; +InnoDB 2 transactions not purged +SET DEBUG_SYNC='now SIGNAL logged'; +disconnect ddl; +DROP TABLE t1; +SET DEBUG_SYNC='RESET'; SET GLOBAL innodb_purge_rseg_truncate_frequency = @save_frequency; diff --git a/mysql-test/suite/innodb/r/log_file_name.result b/mysql-test/suite/innodb/r/log_file_name.result index 0d764f65617..99eba0e33e7 100644 --- a/mysql-test/suite/innodb/r/log_file_name.result +++ b/mysql-test/suite/innodb/r/log_file_name.result @@ -98,5 +98,6 @@ FOUND 1 /InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace/ i FOUND 1 /\[Warning\] InnoDB: Tablespace \d+ was not found at .*u[1-5].ibd, and innodb_force_recovery was set. All redo log for this tablespace will be ignored!/ in mysqld.1.err DROP TABLE u1,u2,u3,u6; # List of files: +db.opt SHOW TABLES; Tables_in_test diff --git a/mysql-test/suite/innodb/r/rename_table_debug.result b/mysql-test/suite/innodb/r/rename_table_debug.result index 976b609bdd5..646bd4faf5d 100644 --- a/mysql-test/suite/innodb/r/rename_table_debug.result +++ b/mysql-test/suite/innodb/r/rename_table_debug.result @@ -1,3 +1,4 @@ +FLUSH TABLES; CREATE TABLE t1 (a SERIAL, b INT, c INT, d INT) ENGINE=InnoDB; INSERT INTO t1 () VALUES (); connect con1,localhost,root,,test; diff --git a/mysql-test/suite/innodb/r/row_format_redundant.result b/mysql-test/suite/innodb/r/row_format_redundant.result index 6deecd10ada..63172897927 100644 --- a/mysql-test/suite/innodb/r/row_format_redundant.result +++ b/mysql-test/suite/innodb/r/row_format_redundant.result @@ -77,3 +77,4 @@ ib_buffer_pool ib_logfile0 ib_logfile1 ibdata1 +db.opt diff --git a/mysql-test/suite/innodb/r/stored_fk.result b/mysql-test/suite/innodb/r/stored_fk.result new file mode 100644 index 00000000000..35524d5a88f --- /dev/null +++ b/mysql-test/suite/innodb/r/stored_fk.result @@ -0,0 +1,74 @@ +# Create statement with FK on base column of stored column +create table t1(f1 int, f2 int as(f1) stored, +foreign key(f1) references t2(f1) on delete cascade)engine=innodb; +ERROR HY000: Can't create table `test`.`t1` (errno: 150 "Foreign key constraint is incorrectly formed") +# adding new stored column during alter table copy operation. +create table t1(f1 int primary key) engine=innodb; +create table t2(f1 int not null, f2 int as (f1) virtual, +foreign key(f1) references t1(f1) on update cascade)engine=innodb; +alter table t2 add column f3 int as (f1) stored, add column f4 int as (f1) virtual; +show create table t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `f1` int(11) NOT NULL, + `f2` int(11) GENERATED ALWAYS AS (`f1`) VIRTUAL, + `f3` int(11) GENERATED ALWAYS AS (`f1`) STORED, + `f4` int(11) GENERATED ALWAYS AS (`f1`) VIRTUAL, + KEY `f1` (`f1`), + CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`f1`) REFERENCES `t1` (`f1`) ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +drop table t2; +# adding foreign key constraint for base columns during alter copy. +create table t2(f1 int not null, f2 int as (f1) stored) engine=innodb; +alter table t2 add foreign key(f1) references t1(f1) on update cascade, algorithm=copy; +show create table t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `f1` int(11) NOT NULL, + `f2` int(11) GENERATED ALWAYS AS (`f1`) STORED, + KEY `f1` (`f1`), + CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`f1`) REFERENCES `t1` (`f1`) ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +drop table t2; +# adding foreign key constraint for base columns during online alter. +create table t2(f1 int not null, f2 int as (f1) stored) engine=innodb; +set foreign_key_checks = 0; +alter table t2 add foreign key(f1) references t1(f1) on update cascade, algorithm=inplace; +ERROR 0A000: Cannot add foreign key on the base column of stored column +drop table t2; +# adding stored column via online alter. +create table t2(f1 int not null, +foreign key(f1) references t1(f1) on update cascade)engine=innodb; +alter table t2 add column f2 int as (f1) stored, algorithm=inplace; +ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY +drop table t2, t1; +# +# BUG#26731689 FK ON TABLE WITH GENERATED COLS: ASSERTION POS < N_DEF +# +CREATE TABLE s (a INT, b INT GENERATED ALWAYS AS (0) STORED, c INT, +d INT GENERATED ALWAYS AS (0) VIRTUAL, e INT) ENGINE=innodb; +CREATE TABLE t (a INT) ENGINE=innodb; +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (e) REFERENCES t(a) ON UPDATE SET null; +ERROR HY000: Failed to add the foreign key constaint. Missing index for constraint 'c' in the referenced table 't' +ALTER TABLE t ADD PRIMARY KEY(a); +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (e) REFERENCES t(a) ON UPDATE SET null; +DROP TABLE s,t; +CREATE TABLE s (a INT GENERATED ALWAYS AS (0) VIRTUAL, +b INT GENERATED ALWAYS AS (0) STORED, c INT) ENGINE=innodb; +CREATE TABLE t (a INT) ENGINE=innodb; +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (c) REFERENCES t(a) ON UPDATE SET null; +ERROR HY000: Failed to add the foreign key constaint. Missing index for constraint 'c' in the referenced table 't' +ALTER TABLE t ADD PRIMARY KEY(a); +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (c) REFERENCES t(a) ON UPDATE SET null; +DROP TABLE s,t; +CREATE TABLE s (a INT, b INT GENERATED ALWAYS AS (0) STORED) ENGINE=innodb; +CREATE TABLE t (a INT PRIMARY KEY) ENGINE=innodb; +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (a) REFERENCES t(a) ON UPDATE SET null; +DROP TABLE s,t; +CREATE TABLE s (a INT, b INT) ENGINE=innodb; +CREATE TABLE t (a INT) ENGINE=innodb; +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (a) REFERENCES t(a) ON UPDATE SET null; +ERROR HY000: Failed to add the foreign key constaint. Missing index for constraint 'c' in the referenced table 't' +ALTER TABLE t ADD PRIMARY KEY(a); +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (a) REFERENCES t(a) ON UPDATE SET null; +DROP TABLE s,t; diff --git a/mysql-test/suite/innodb/r/temporary_table.result b/mysql-test/suite/innodb/r/temporary_table.result index 310741b5798..533d32089c3 100644 --- a/mysql-test/suite/innodb/r/temporary_table.result +++ b/mysql-test/suite/innodb/r/temporary_table.result @@ -638,3 +638,23 @@ t1 CREATE TEMPORARY TABLE `t1` ( `j` int(11) DEFAULT NULL, PRIMARY KEY (`i`) KEY_BLOCK_SIZE=8 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +DROP TABLE t1; +CREATE TEMPORARY TABLE t1(f1 INT, KEY(f1)) ENGINE=InnoDB; +INSERT INTO t1 VALUES(NULL); +UPDATE t1 SET f1 = 0; +START TRANSACTION; +UPDATE t1 SET f1 = 4; +UPDATE t1 SET f1 = 0; +ROLLBACK; +SELECT * FROM t1; +f1 +0 +DROP TABLE t1; +# +# MDEV-15874 CREATE TABLE creates extra transaction +# +call mtr.add_suppression("Warning 150 Create table `mysqld.1`.`t1` with foreign key constraint failed. Temporary tables can't have foreign key constraints.*"); +SET FOREIGN_KEY_CHECKS = 0; +CREATE TEMPORARY TABLE t1(f1 INT NOT NULL, +FOREIGN KEY(f1) REFERENCES t0(f1))ENGINE=InnoDB; +ERROR HY000: Can't create table `test`.`t1` (errno: 150 "Foreign key constraint is incorrectly formed") diff --git a/mysql-test/suite/innodb/r/undo_log.result b/mysql-test/suite/innodb/r/undo_log.result index a40c6b5b3bf..6fe0da3da47 100644 --- a/mysql-test/suite/innodb/r/undo_log.result +++ b/mysql-test/suite/innodb/r/undo_log.result @@ -140,3 +140,16 @@ CHECK TABLE test_tab; Table Op Msg_type Msg_text test.test_tab check status OK DROP TABLE test_tab; +SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency; +SET GLOBAL innodb_purge_rseg_truncate_frequency = 1; +CREATE TEMPORARY TABLE t2(i INT)ENGINE=InnoDB; +CREATE TABLE t1(i TEXT NOT NULL) ENGINE=INNODB; +BEGIN; +INSERT t1 SET i=REPEAT('1234567890',840); +UPDATE t1 SET i=''; +INSERT INTO t2 VALUES(2); +ROLLBACK; +InnoDB 0 transactions not purged +DROP TABLE t1; +DROP TABLE t2; +SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency; diff --git a/mysql-test/suite/innodb/t/alter_algorithm.combinations b/mysql-test/suite/innodb/t/alter_algorithm.combinations new file mode 100644 index 00000000000..197748d168e --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_algorithm.combinations @@ -0,0 +1,11 @@ +[COPY] +--alter_algorithm=copy + +[INPLACE] +--alter_algorithm=inplace + +[NOCOPY] +--alter_algorithm=nocopy + +[INSTANT] +--alter_algorithm=instant diff --git a/mysql-test/suite/innodb/t/alter_algorithm.inc b/mysql-test/suite/innodb/t/alter_algorithm.inc new file mode 100644 index 00000000000..1aa3caf7d66 --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_algorithm.inc @@ -0,0 +1,2 @@ +# See also alter_algorithm.combinations +--source include/have_innodb.inc diff --git a/mysql-test/suite/innodb/t/alter_algorithm.test b/mysql-test/suite/innodb/t/alter_algorithm.test new file mode 100644 index 00000000000..5a720489281 --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_algorithm.test @@ -0,0 +1,22 @@ +--source include/have_innodb.inc +let $algorithm = `SELECT @@ALTER_ALGORITHM`; +let $error_code = 0; + +if ($algorithm == "NOCOPY") { + let $error_code = ER_ALTER_OPERATION_NOT_SUPPORTED; +} + +if ($algorithm == "INSTANT") { + let $error_code = ER_ALTER_OPERATION_NOT_SUPPORTED, ER_ALTER_OPERATION_NOT_SUPPORTED_REASON; +} + +--source include/alter_nocopy_fail.inc + +if ($algorithm == "NOCOPY") { + let $error_code = 0; +} + +if ($algorithm == "INSTANT") { + let $error_code = ER_ALTER_OPERATION_NOT_SUPPORTED_REASON; +} +--source include/alter_nocopy.inc diff --git a/mysql-test/suite/innodb/t/alter_foreign_crash.test b/mysql-test/suite/innodb/t/alter_foreign_crash.test new file mode 100644 index 00000000000..1952a1b30d4 --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_foreign_crash.test @@ -0,0 +1,37 @@ +--source include/have_innodb.inc +--source include/have_debug_sync.inc +# The embedded server does not support restarting. +--source include/not_embedded.inc + +--echo # +--echo # Bug #20476395 DICT_LOAD_FOREIGNS() FAILED IN +--echo # COMMIT_INPLACE_ALTER_TABLE +--echo # + +call mtr.add_suppression("InnoDB: Failed to load table"); + +create database bug; +use bug; + +create table parent(a serial) engine=innodb; +create table child(a serial, foreign key fk (a) references parent(a))engine=innodb; + +insert into parent values(1); +insert into child values(1); + +connect (con1,localhost,root,,bug); +SET DEBUG_SYNC='innodb_rename_table_ready SIGNAL s1 WAIT_FOR s2 EXECUTE 2'; +--send ALTER TABLE child ROW_FORMAT=DYNAMIC, ALGORITHM=COPY +connection default; +SET DEBUG_SYNC='now WAIT_FOR s1'; +SET DEBUG_SYNC='now SIGNAL s2 WAIT_FOR s1'; + +--let $shutdown_timeout= 0 +--source include/restart_mysqld.inc +disconnect con1; + +show tables; +alter table parent row_format=dynamic; + +drop table parent; +drop database bug; diff --git a/mysql-test/suite/innodb/t/alter_instant.test b/mysql-test/suite/innodb/t/alter_instant.test new file mode 100644 index 00000000000..dddb7b8ce27 --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_instant.test @@ -0,0 +1,45 @@ +--source alter_algorithm.inc + +CREATE TABLE t1(f1 INT NOT NULL, + f2 INT NOT NULL, + f3 INT AS (f2 * f2) VIRTUAL, + INDEX idx (f2))engine=innodb; + +CREATE TABLE t2(f1 INT NOT NULL, f2 INT NOT NULL, + f3 VARCHAR(10), + INDEX(f1))ENGINE=INNODB; + +INSERT INTO t1(f1, f2) VALUES(1, 1); + +select @@alter_algorithm; + +--enable_info +--echo # Add column at the end of the table +--eval ALTER TABLE t1 ADD COLUMN f4 char(100) default "BIG WALL" + +--echo # Change virtual column expression +--eval ALTER TABLE t1 CHANGE f3 f3 INT AS (f2 * f2) VIRTUAL + +--echo # Add virtual column +--eval ALTER TABLE t1 ADD COLUMN f5 INT AS (f2) VIRTUAL + +--echo # Rename Column +--eval ALTER TABLE t1 CHANGE f3 vcol INT AS (f2) VIRTUAL + +--echo # Rename table +--eval ALTER TABLE t1 RENAME t3 + +--echo # Drop Virtual Column +--eval ALTER TABLE t3 DROP COLUMN vcol + +--echo # Column length varies +--eval ALTER TABLE t2 CHANGE f3 f3 VARCHAR(20) + +SET foreign_key_checks = 0; +--eval ALTER TABLE t3 ADD FOREIGN KEY `fidx`(f2) REFERENCES t2(f1) + +SET foreign_key_checks = 1; +--eval ALTER TABLE t3 DROP FOREIGN KEY `fidx` + +DROP TABLE t3, t2; +--disable_info diff --git a/mysql-test/suite/innodb/t/alter_kill-master.opt b/mysql-test/suite/innodb/t/alter_kill-master.opt new file mode 100644 index 00000000000..e472160c2b7 --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_kill-master.opt @@ -0,0 +1 @@ +--innodb-doublewrite=false diff --git a/mysql-test/suite/innodb/t/alter_kill.test b/mysql-test/suite/innodb/t/alter_kill.test new file mode 100644 index 00000000000..922378d2919 --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_kill.test @@ -0,0 +1,158 @@ +--source include/have_innodb.inc +# The embedded server does not support restarting in mysql-test-run. +-- source include/not_embedded.inc +-- source include/no_valgrind_without_big.inc + +let MYSQLD_DATADIR=`select @@datadir`; +let PAGE_SIZE=`select @@innodb_page_size`; + +-- disable_query_log +call mtr.add_suppression("InnoDB: innodb_force_recovery is on."); +call mtr.add_suppression("InnoDB: Header page contains inconsistent data in .*bug16720368.ibd"); +call mtr.add_suppression("InnoDB: Checksum mismatch in datafile:.*bug16720368"); +call mtr.add_suppression("InnoDB: Ignoring tablespace for.*bug16720368"); +call mtr.add_suppression("Found 1 prepared XA transactions"); +call mtr.add_suppression("InnoDB: Operating system error.*in a file operation"); +call mtr.add_suppression("InnoDB: \(The error means\|If you are\)"); +call mtr.add_suppression("InnoDB: Ignoring tablespace `test/bug16720368` because it could not be opened"); +call mtr.add_suppression("InnoDB: Tablespace .* was not found at.*bug16735660"); +call mtr.add_suppression("InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace."); +call mtr.add_suppression("InnoDB: Plugin initialization aborted*"); +call mtr.add_suppression("Plugin 'InnoDB' init function returned error."); +call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed."); +-- enable_query_log + +-- echo # +-- echo # Bug#16720368 INNODB CRASHES ON BROKEN #SQL*.IBD FILE AT STARTUP +-- echo # + +SET GLOBAL innodb_file_per_table=1; + +CREATE TABLE bug16720368_1 (a INT PRIMARY KEY) ENGINE=InnoDB; + +connect (con1,localhost,root); +CREATE TABLE bug16720368 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO bug16720368 (a) VALUES (1),(2),(3),(4),(5),(6),(7),(8); + +connection default; + +-- echo # Cleanly shutdown mysqld +-- source include/shutdown_mysqld.inc + +disconnect con1; + +-- echo # Corrupt FIL_PAGE_OFFSET in bug16720368.ibd, +-- echo # and update the checksum to the "don't care" value. +perl; +my $file = "$ENV{MYSQLD_DATADIR}/test/bug16720368.ibd"; +open(FILE, "+<$file") || die "Unable to open $file"; +print FILE pack("H*","deadbeefc001cafe") || die "Unable to write $file"; +seek(FILE, $ENV{PAGE_SIZE}-8, 0) || die "Unable to seek $file"; +print FILE pack("H*","deadbeef") || die "Unable to write $file"; +close(FILE) || die "Unable to close $file"; +EOF + +-- echo # Restart mysqld +-- source include/start_mysqld.inc + +-- echo # This will succeed after a clean shutdown, due to +-- echo # fil_open_single_table_tablespace(check_space_id=FALSE). +SELECT COUNT(*) FROM bug16720368; + +INSERT INTO bug16720368_1 VALUES(1); + +--let $shutdown_timeout= 0 +--source include/restart_mysqld.inc + +-- echo # The table is unaccessible, because after a crash we will +-- echo # validate the tablespace header. +--error ER_NO_SUCH_TABLE_IN_ENGINE +SELECT COUNT(*) FROM bug16720368; +--error ER_NO_SUCH_TABLE_IN_ENGINE +INSERT INTO bug16720368 VALUES(0,1); + +let $restart_parameters = --innodb-force-recovery=3; +--let $shutdown_timeout= 0 +--source include/restart_mysqld.inc + +-- echo # The table is readable thanks to innodb-force-recovery. +SELECT COUNT(*) FROM bug16720368; +INSERT INTO bug16720368 VALUES(0,1); + +-- echo # Shut down the server cleanly to hide the corruption. +let $shutdown_timeout=; +let $restart_parameters =; +-- source include/restart_mysqld.inc + +-- echo # The table is accessible, because after a clean shutdown we will +-- echo # NOT validate the tablespace header. +-- echo # We can modify the existing pages, but we cannot allocate or free +-- echo # any pages, because that would hit the corruption on page 0. +SELECT COUNT(*) FROM bug16720368; + +-- echo # Shut down the server to uncorrupt the data. +-- source include/shutdown_mysqld.inc + +# Uncorrupt the FIL_PAGE_OFFSET. +perl; +my $file = "$ENV{MYSQLD_DATADIR}/test/bug16720368.ibd"; +open(FILE, "+<$file") || die "Unable to open $file"; +# Uncorrupt FIL_PAGE_OFFSET. +print FILE pack("H*","deadbeef00000000") || die "Unable to write $file"; +close(FILE) || die "Unable to close $file"; +EOF + +-- echo # Restart the server after uncorrupting the file. +-- source include/start_mysqld.inc + +INSERT INTO bug16720368 VALUES(9,1); +SELECT COUNT(*) FROM bug16720368; +# A debug assertion would fail in buf_block_align_instance() +# if we did not uncorrupt the page number first. +DROP TABLE bug16720368, bug16720368_1; + +-- echo # +-- echo # Bug#16735660 ASSERT TABLE2 == NULL, ROLLBACK OF RESURRECTED TXNS, +-- echo # DICT_TABLE_ADD_TO_CACHE +-- echo # + +SET GLOBAL innodb_file_per_table=1; + +CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +BEGIN; +INSERT INTO t1 VALUES(42); + +-- connect (con1,localhost,root) + +CREATE TABLE bug16735660 (a INT PRIMARY KEY) ENGINE=InnoDB; + +XA START 'x'; +INSERT INTO bug16735660 VALUES(1),(2),(3); +XA END 'x'; +XA PREPARE 'x'; + +-- connection default + +-- source include/kill_mysqld.inc +-- disconnect con1 +-- move_file $MYSQLD_DATADIR/test/bug16735660.ibd $MYSQLD_DATADIR/bug16735660.omg + +-- echo # Attempt to start without an *.ibd file. +let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err; +--source include/start_mysqld.inc + +let SEARCH_PATTERN= \[ERROR\] InnoDB: Tablespace [0-9]+ was not found at .*test.bug16735660.ibd; +-- source include/search_pattern_in_file.inc + +-- move_file $MYSQLD_DATADIR/bug16735660.omg $MYSQLD_DATADIR/test/bug16735660.ibd + +-- source include/restart_mysqld.inc + +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +SELECT * FROM bug16735660; + +XA RECOVER; +XA ROLLBACK 'x'; + +SELECT * FROM bug16735660; +DROP TABLE bug16735660; diff --git a/mysql-test/suite/innodb/t/alter_missing_tablespace.test b/mysql-test/suite/innodb/t/alter_missing_tablespace.test index 922013cfe43..bdcbdfb4408 100644 --- a/mysql-test/suite/innodb/t/alter_missing_tablespace.test +++ b/mysql-test/suite/innodb/t/alter_missing_tablespace.test @@ -22,8 +22,10 @@ call mtr.add_suppression("InnoDB: ALTER TABLE `test`.`t` DISCARD TABLESPACE fail let $MYSQLD_DATADIR=`select @@datadir`; SET GLOBAL innodb_file_per_table=1; -CREATE TABLE t(a INT)ENGINE=InnoDB; +CREATE TABLE t(a SERIAL)ENGINE=InnoDB; CREATE TABLE `x..d` (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +CREATE TABLE t1(a SERIAL)ENGINE=InnoDB; +INSERT INTO t1 VALUES(1),(2),(3); --source include/shutdown_mysqld.inc @@ -42,10 +44,15 @@ SELECT * FROM t; ALTER TABLE t ADD INDEX (a), ALGORITHM=INPLACE; SHOW WARNINGS; ---error ER_NO_SUCH_TABLE -ALTER TABLE t1 ADD INDEX (a), ALGORITHM=COPY; +--error ER_NO_SUCH_TABLE_IN_ENGINE +ALTER TABLE t ADD INDEX (a), ALGORITHM=COPY; SHOW WARNINGS; +--error ER_NO_SUCH_TABLE_IN_ENGINE +ALTER TABLE t AUTO_INCREMENT=1, ALGORITHM=INPLACE; +--error ER_NO_SUCH_TABLE_IN_ENGINE +ALTER TABLE t AUTO_INCREMENT=1, ALGORITHM=COPY; + --error ER_PARSE_ERROR ALTER TABLE t ALGORITHM=INPLACE, DISCARD TABLESPACE; --error ER_PARSE_ERROR @@ -57,3 +64,12 @@ DROP TABLE t; --error ER_NO_SUCH_TABLE_IN_ENGINE SELECT * FROM `x..d`; DROP TABLE `x..d`; + +ALTER TABLE t1 DISCARD TABLESPACE; +--error ER_TABLESPACE_DISCARDED +ALTER TABLE t1 AUTO_INCREMENT=1, ALGORITHM=INPLACE; +--error ER_TABLESPACE_DISCARDED +ALTER TABLE t1 AUTO_INCREMENT=1, FORCE, ALGORITHM=INPLACE; +--error ER_TABLESPACE_DISCARDED +ALTER TABLE t1 AUTO_INCREMENT=1, ALGORITHM=COPY; +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/alter_not_null.test b/mysql-test/suite/innodb/t/alter_not_null.test new file mode 100644 index 00000000000..f4606dfa6c5 --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_not_null.test @@ -0,0 +1,75 @@ +--source include/have_innodb.inc +set @@sql_mode = 'STRICT_TRANS_TABLES'; + +CREATE TABLE t1(f1 INT)ENGINE=INNODB; +INSERT INTO t1 VALUES(NULL); +SELECT * FROM t1; +--enable_info +ALTER TABLE t1 CHANGE f1 f1 INT NOT NULL; +--disable_info +SELECT * FROM t1; +DROP TABLE t1; + +CREATE TABLE t1(f1 CHAR(10))ENGINE=INNODB; +INSERT INTO t1 VALUES(NULL); +SELECT * FROM t1; +--enable_info +ALTER TABLE t1 CHANGE f1 f1 CHAR(10) NOT NULL; +--disable_info +SELECT * FROM t1; +DROP TABLE t1; + +CREATE TABLE t1(f1 VARCHAR(10))ENGINE=INNODB; +INSERT INTO t1 VALUES(NULL); +SELECT * FROM t1; +--enable_info +ALTER TABLE t1 CHANGE f1 f1 VARCHAR(20) NOT NULL; +--disable_info +SELECT * FROM t1; +DROP TABLE t1; + +CREATE TABLE t1(f1 TEXT)ENGINE=INNODB; +INSERT INTO t1 VALUES(NULL); +SELECT * FROM t1; +--enable_info +ALTER TABLE t1 CHANGE f1 f1 TEXT NOT NULL DEFAULT 'abc'; +--disable_info +SELECT * FROM t1; +DROP TABLE t1; + +CREATE TABLE t1(f1 INT NOT NULL, f2 INT NOT NULL, f3 INT)ENGINE=INNODB; +INSERT INTO t1 VALUES(2, 2, NULL); +SELECT * FROM t1; +--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON +ALTER TABLE t1 CHANGE f3 f3 INT NOT NULL DEFAULT (f1 + f2), ALGORITHM=INPLACE; +UPDATE t1 SET f3 = 0; +SELECT * FROM t1; +--enable_info +ALTER TABLE t1 CHANGE f3 f3 INT NOT NULL DEFAULT (f1 + f2); +--disable_info +SELECT * FROM t1; +DROP TABLE t1; + +CREATE TABLE t1(f1 INT NOT NULL DEFAULT 0, b TINYINT)ENGINE=InnoDB; +INSERT INTO t1 VALUES(10, NULL); +SELECT * FROM t1; +--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON +ALTER TABLE t1 CHANGE b b TINYINT NOT NULL DEFAULT if(unix_timestamp()>1,1000,0), algorithm=INPLACE; +DROP TABLE t1; + +CREATE TABLE t1(a INT, v INT AS (a), c INT, d INT NOT NULL, e INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP COLUMN c, CHANGE COLUMN e e INT NOT NULL, ALGORITHM=INPLACE; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, v INT AS (a), d INT NOT NULL, e INT) ENGINE=InnoDB; +ALTER TABLE t1 FORCE, ALGORITHM=INPLACE; +DROP TABLE t1; + +--echo # +--echo # MDEV-16126 Crash or ASAN heap-buffer-overflow in +--echo # mach_read_from_n_little_endian upon ALTER TABLE with blob +--echo # + +CREATE TABLE t1(a INT, v INT AS (a), b INT, c BLOB) ENGINE=InnoDB; +ALTER TABLE t1 ADD PRIMARY KEY(b); +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/alter_not_null_debug.test b/mysql-test/suite/innodb/t/alter_not_null_debug.test new file mode 100644 index 00000000000..9c1500dc829 --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_not_null_debug.test @@ -0,0 +1,68 @@ +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc + +CREATE TABLE t1(c1 INT NOT NULL, c2 INT, PRIMARY KEY(c1))ENGINE=INNODB; +INSERT INTO t1 VALUES(1, NULL); +SET DEBUG_SYNC= 'row_merge_after_scan +SIGNAL opened WAIT_FOR flushed'; +send ALTER TABLE t1 CHANGE c2 c2 INT NOT NULL DEFAULT 2, ALGORITHM=INPLACE; +connect (con1,localhost,root); +SET DEBUG_SYNC= 'now WAIT_FOR opened'; +INSERT INTO t1 VALUES(2, NULL); +SET DEBUG_SYNC= 'now SIGNAL flushed'; +connection default; +--error ER_INVALID_USE_OF_NULL +reap; +SELECT * FROM t1; +UPDATE t1 SET c2 = 0 WHERE c1 = 2; +SET DEBUG_SYNC= 'row_merge_after_scan +SIGNAL opened WAIT_FOR flushed'; +--echo # Alter ignore can convert the NULL values from +--echo # CONCURRENT DML to constants +send ALTER IGNORE TABLE t1 CHANGE c2 c2 INT NOT NULL DEFAULT 2, ALGORITHM=INPLACE; +connection con1; +SET DEBUG_SYNC= 'now WAIT_FOR opened'; +UPDATE t1 SET c2 = NULL WHERE c1 = 2; +INSERT INTO t1 VALUES (3, NULL); +SET DEBUG_SYNC= 'now SIGNAL flushed'; +connection default; +reap; +SELECT * FROM t1; +DROP TABLE t1; + + +CREATE TABLE t1(c1 INT NOT NULL, c2 INT, c3 INT, PRIMARY KEY(c1))ENGINE=INNODB; +INSERT INTO t1 VALUES(1, NULL, NULL); +SET DEBUG_SYNC= 'row_merge_after_scan +SIGNAL opened WAIT_FOR flushed'; +--echo # Alter Successfully converts from null to not null + +send ALTER TABLE t1 CHANGE c2 c2 INT NOT NULL DEFAULT 2, ALGORITHM=INPLACE; + +connection con1; +SET DEBUG_SYNC= 'now WAIT_FOR opened'; +UPDATE t1 SET c2= 2 WHERE c1 = 1; +INSERT INTO t1 VALUES (2, 3, 4); +SET DEBUG_SYNC= 'now SIGNAL flushed'; +connection default; +reap; +SELECT * FROM t1; + +SET DEBUG_SYNC= 'row_merge_after_scan +SIGNAL opened WAIT_FOR flushed'; +--echo # Alter fails because concurrent dml inserts null value + +send ALTER TABLE t1 CHANGE c3 c3 INT NOT NULL DEFAULT 2, ALGORITHM=INPLACE; +connection con1; +SET DEBUG_SYNC= 'now WAIT_FOR opened'; +UPDATE t1 SET c3= 2 WHERE c1 = 2; +INSERT INTO t1 VALUES (4, 3, NULL); +SET DEBUG_SYNC= 'now SIGNAL flushed'; +connection default; +--error ER_INVALID_USE_OF_NULL +reap; +SELECT * FROM t1; +DROP TABLE t1; +disconnect con1; +SET DEBUG_SYNC='RESET'; diff --git a/mysql-test/suite/innodb/t/alter_partitioned.test b/mysql-test/suite/innodb/t/alter_partitioned.test new file mode 100644 index 00000000000..9bf6c8f047a --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_partitioned.test @@ -0,0 +1,15 @@ +--source include/have_innodb.inc +--source include/have_partition.inc + +CREATE TABLE t1(a INT, b VARCHAR(10), INDEX(a))ENGINE=InnoDB +PARTITION BY RANGE(a) +(PARTITION pa VALUES LESS THAN (3), +PARTITION pb VALUES LESS THAN (5)); + +--error ER_FOREIGN_KEY_ON_PARTITIONED +CREATE TABLE t2(a INT, FOREIGN KEY(a) REFERENCES t1(a))ENGINE=INNODB +PARTITION BY RANGE(a) +(PARTITION pa VALUES LESS THAN (2), +PARTITION pb VALUES LESS THAN (4)); + +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/alter_partitioned_debug.test b/mysql-test/suite/innodb/t/alter_partitioned_debug.test new file mode 100644 index 00000000000..34565e12036 --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_partitioned_debug.test @@ -0,0 +1,34 @@ +--source include/have_innodb.inc +--source include/have_partition.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc + +CREATE TABLE t1 (a INT, b VARCHAR(10)) ENGINE=InnoDB +PARTITION BY RANGE(a) +(PARTITION pa VALUES LESS THAN (3), +PARTITION pb VALUES LESS THAN (5)); + +INSERT INTO t1 VALUES(2,'two'),(2,'two'),(4,'four'); + +connect ddl,localhost,root,,test; +SET DEBUG_SYNC = 'inplace_after_index_build SIGNAL go WAIT_FOR done'; +send ALTER TABLE t1 ADD UNIQUE KEY (a,b(3)); + +connection default; +SET DEBUG_SYNC = 'now WAIT_FOR go'; +BEGIN; +SELECT * FROM t1 FOR UPDATE; +SET DEBUG_SYNC = 'now SIGNAL done'; + +connection ddl; +--error ER_DUP_ENTRY +reap; + +connection default; +DELETE FROM t1; +disconnect ddl; + +SET DEBUG_SYNC = 'RESET'; + +CHECK TABLE t1; +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/alter_partitioned_xa.test b/mysql-test/suite/innodb/t/alter_partitioned_xa.test new file mode 100644 index 00000000000..f0883802cd6 --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_partitioned_xa.test @@ -0,0 +1,31 @@ +--source include/have_innodb.inc +--source include/have_partition.inc + +--echo # +--echo # MDEV-14693 XA: Assertion `!clust_index->online_log' failed +--echo # in rollback_inplace_alter_table +--echo # + +# A bug in meta-data locking (MDL) for XA transactions causes +# a bug in InnoDB error handling for ALTER TABLE to be triggered. +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB PARTITION BY HASH(a) PARTITIONS 2; +XA START 'xid'; +INSERT INTO t1 VALUES (1,10); +# XA bug: The following releases the MDL on t1! +--error ER_XAER_RMFAIL +CREATE DATABASE IF NOT EXISTS db; + +--connect (con1,localhost,root,,test) +SET innodb_lock_wait_timeout= 1, lock_wait_timeout= 2; +# Here, innodb_lock_wait_timeout would be exceeded, causing the operation +# to roll back when InnoDB is attempting to commit. +# (Instead, lock_wait_timeout should be exceeded!) +--error ER_LOCK_WAIT_TIMEOUT +ALTER TABLE t1 FORCE; + +# Cleanup +--disconnect con1 +--connection default +XA END 'xid'; +XA ROLLBACK 'xid'; +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/alter_rename_files.test b/mysql-test/suite/innodb/t/alter_rename_files.test new file mode 100644 index 00000000000..3ed1cb5d9fa --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_rename_files.test @@ -0,0 +1,31 @@ +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/have_innodb.inc +--source include/count_sessions.inc + +CREATE TABLE t1 (x INT NOT NULL UNIQUE KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES(5); + +SET GLOBAL innodb_log_checkpoint_now=TRUE; + +# Start an ALTER TABLE and stop it before renaming the files +SET DEBUG_SYNC='commit_cache_rebuild SIGNAL ready WAIT_FOR finish'; + +--send ALTER TABLE t1 ADD PRIMARY KEY(x) + +connect (con1,localhost,root,,); + +SET DEBUG_SYNC='now WAIT_FOR ready'; + +SET GLOBAL innodb_log_checkpoint_now=TRUE; + +SET DEBUG_SYNC='now SIGNAL finish'; + +disconnect con1; +connection default; +reap; +SHOW CREATE TABLE t1; +DROP TABLE t1; +SET DEBUG_SYNC='RESET'; + +--source include/wait_until_count_sessions.inc diff --git a/mysql-test/suite/innodb/t/analyze_table.test b/mysql-test/suite/innodb/t/analyze_table.test new file mode 100644 index 00000000000..e9db3668f02 --- /dev/null +++ b/mysql-test/suite/innodb/t/analyze_table.test @@ -0,0 +1,42 @@ +# +# BUG#22385442 - INNODB: DIFFICULT TO FIND FREE BLOCKS IN THE BUFFER POOL +# + +--source include/have_innodb.inc +--source include/big_test.inc + +DELIMITER |; +CREATE PROCEDURE populate_t1() +BEGIN + DECLARE i int DEFAULT 1; + + START TRANSACTION; + WHILE (i <= 1000000) DO + INSERT INTO t1 VALUES (i, i, CONCAT('a', i)); + SET i = i + 1; + END WHILE; + COMMIT; +END| +DELIMITER ;| + +CREATE TABLE t1( + class INT, + id INT, + title VARCHAR(100) +) ENGINE=InnoDB; + +-- disable_query_log +CALL populate_t1(); +-- enable_query_log + +SELECT COUNT(*) FROM t1; + +SET GLOBAL innodb_stats_persistent_sample_pages=2000; + +ANALYZE TABLE t1; + +DROP TABLE t1; + +DROP PROCEDURE populate_t1; + +SET GLOBAL innodb_stats_persistent_sample_pages=default; diff --git a/mysql-test/suite/innodb/t/dml_purge.test b/mysql-test/suite/innodb/t/dml_purge.test index 93b7f56111c..37178982c8d 100644 --- a/mysql-test/suite/innodb/t/dml_purge.test +++ b/mysql-test/suite/innodb/t/dml_purge.test @@ -15,11 +15,25 @@ SET GLOBAL innodb_purge_rseg_truncate_frequency = 1; CREATE TABLE t1(a INT PRIMARY KEY, b INT NOT NULL) ROW_FORMAT=REDUNDANT ENGINE=InnoDB; +--connect (prevent_purge,localhost,root) +START TRANSACTION WITH CONSISTENT SNAPSHOT; + +--connection default INSERT INTO t1 VALUES(1,2),(3,4); +ALTER TABLE t1 ADD COLUMN c INT; UPDATE t1 SET b=-3 WHERE a=3; -# Initiate a full purge, which should reset all DB_TRX_ID. +--connect (con1,localhost,root) +BEGIN; +# For purgeable records, we must record DB_TRX_ID=0 in the undo log! +UPDATE t1 SET b=4 WHERE a=3; +--disconnect prevent_purge + +--connection default +# Initiate a full purge, which should reset the DB_TRX_ID except for a=3. --source include/wait_all_purged.inc +# Initiate a ROLLBACK of the update, which should reset the DB_TRX_ID for a=3. +--disconnect con1 FLUSH TABLE t1 FOR EXPORT; # The following is based on innodb.table_flags: @@ -34,7 +48,7 @@ sysseek(FILE, 3*$ps, 0) || die "Unable to seek $file"; die "Unable to read $file" unless sysread(FILE, $page, $ps) == $ps; print "N_RECS=", unpack("n", substr($page,38+16,2)); print "; LEVEL=", unpack("n", substr($page,38+26,2)), "\n"; -my @fields=("a","DB_TRX_ID","DB_ROLL_PTR", "b"); +my @fields=qw(a DB_TRX_ID DB_ROLL_PTR b c); for (my $offset= 0x65; $offset; $offset= unpack("n", substr($page,$offset-2,2))) { diff --git a/mysql-test/suite/innodb/t/foreign_key.test b/mysql-test/suite/innodb/t/foreign_key.test index 862717647b5..b586f3e9406 100644 --- a/mysql-test/suite/innodb/t/foreign_key.test +++ b/mysql-test/suite/innodb/t/foreign_key.test @@ -276,4 +276,16 @@ SELECT * FROM payment_method; DROP TABLE payment_method,address,member; +--echo # +--echo # Bug #26958695 INNODB NESTED STORED FIELD WITH CONSTRAINT KEY +--echo # PRODUCE BROKEN TABLE (no bug in MariaDB) +--echo # +create table t1(f1 int,f2 int, primary key(f1), key(f2, f1))engine=innodb; +create table t2(f1 int, f2 int as (2) stored, f3 int as (f2) stored, + foreign key(f1) references t1(f2) on update set NULL) +engine=innodb; +insert into t1 values(1, 1); +insert into t2(f1) values(1); +drop table t2, t1; + --source include/wait_until_count_sessions.inc diff --git a/mysql-test/suite/innodb/t/innodb-alter-nullable.test b/mysql-test/suite/innodb/t/innodb-alter-nullable.test index bb5cdee000a..d039459f91f 100644 --- a/mysql-test/suite/innodb/t/innodb-alter-nullable.test +++ b/mysql-test/suite/innodb/t/innodb-alter-nullable.test @@ -71,6 +71,11 @@ WHERE NAME='test/t'; DROP TABLE t; +CREATE TABLE t1(c1 INT) ENGINE=InnoDB; +ALTER TABLE t1 ADD CONSTRAINT UNIQUE KEY i1(c1); +ALTER TABLE t1 CHANGE c1 c1 INT NOT NULL,ADD KEY(c1); +DROP TABLE t1; + # Check that all connections opened by test cases in this file are really # gone so execution of other tests won't be affected by their presence. --source include/wait_until_count_sessions.inc diff --git a/mysql-test/suite/innodb/t/innodb-alter-timestamp.test b/mysql-test/suite/innodb/t/innodb-alter-timestamp.test index d8acc02cbdb..32a54354016 100644 --- a/mysql-test/suite/innodb/t/innodb-alter-timestamp.test +++ b/mysql-test/suite/innodb/t/innodb-alter-timestamp.test @@ -3,7 +3,7 @@ CREATE TABLE t1 (i1 INT UNSIGNED NULL DEFAULT 42) ENGINE=innodb; INSERT INTO t1 VALUES(NULL); --enable_info ---error ER_INVALID_USE_OF_NULL +--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON ALTER TABLE t1 CHANGE i1 i1 INT UNSIGNED NOT NULL DEFAULT rand(), ALGORITHM=INPLACE; --error WARN_DATA_TRUNCATED @@ -12,10 +12,8 @@ ALGORITHM=COPY; --error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON ALTER TABLE t1 CHANGE i1 id INT UNSIGNED NOT NULL AUTO_INCREMENT, ADD PRIMARY KEY(id), ALGORITHM=INPLACE; ---error ER_INVALID_USE_OF_NULL ALTER TABLE t1 ADD PRIMARY KEY(i1), ALGORITHM=INPLACE; -ALTER TABLE t1 CHANGE i1 id INT UNSIGNED NOT NULL AUTO_INCREMENT, -ADD PRIMARY KEY(id); +ALTER TABLE t1 CHANGE i1 id INT UNSIGNED NOT NULL AUTO_INCREMENT; --disable_info SELECT * FROM t1; SHOW CREATE TABLE t1; diff --git a/mysql-test/suite/innodb/t/innodb-alter.test b/mysql-test/suite/innodb/t/innodb-alter.test index 7a7b4f3a845..e8266c44a90 100644 --- a/mysql-test/suite/innodb/t/innodb-alter.test +++ b/mysql-test/suite/innodb/t/innodb-alter.test @@ -504,6 +504,28 @@ eval ALTER TABLE $source_db.t1 DROP INDEX index2, algorithm=inplace; eval DROP TABLE $source_db.t1; eval DROP DATABASE $source_db; eval DROP DATABASE $dest_db; + +--echo # +--echo # BUG #26334149 MYSQL CRASHES WHEN FULL TEXT INDEXES IBD FILES ARE +--echo # ORPHANED DUE TO RENAME TABLE +--echo # +CREATE DATABASE db1; USE db1; +CREATE TABLE notes ( + id int(11) NOT NULL AUTO_INCREMENT, + body text COLLATE utf8_unicode_ci, + PRIMARY KEY (id) + ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 +COLLATE=utf8_unicode_ci +ROW_FORMAT=COMPRESSED; + +ALTER TABLE notes ADD FULLTEXT INDEX index_ft_body (body(255)); +DROP INDEX index_ft_body ON notes; + +CREATE DATABASE db2; +RENAME TABLE db1.notes TO db2.notes; +DROP DATABASE db1; +DROP DATABASE db2; + USE test; # diff --git a/mysql-test/suite/innodb/t/innodb-isolation.test b/mysql-test/suite/innodb/t/innodb-isolation.test index 56cd668b305..d00fefb9fc6 100644 --- a/mysql-test/suite/innodb/t/innodb-isolation.test +++ b/mysql-test/suite/innodb/t/innodb-isolation.test @@ -301,6 +301,7 @@ SELECT COUNT(*) FROM t5; UPDATE t6 SET b = "updated by client 2"; SELECT * FROM t6; +SELECT * FROM t6 LOCK IN SHARE MODE; SELECT COUNT(*) FROM t6; DELETE FROM t7; diff --git a/mysql-test/suite/innodb/t/innodb-mdev7046.test b/mysql-test/suite/innodb/t/innodb-mdev7046.test index 208dcd52f35..4804e253427 100644 --- a/mysql-test/suite/innodb/t/innodb-mdev7046.test +++ b/mysql-test/suite/innodb/t/innodb-mdev7046.test @@ -40,9 +40,6 @@ ALTER TABLE t1 ENGINE=InnoDB; drop table t1; -let $datadir=`select @@datadir`; ---remove_file $datadir/test/db.opt - --enable_query_log --enable_result_log --enable_warnings diff --git a/mysql-test/suite/innodb/t/innodb-online-alter-gis.test b/mysql-test/suite/innodb/t/innodb-online-alter-gis.test index 2cb88d398bb..1c99c6eeb9d 100644 --- a/mysql-test/suite/innodb/t/innodb-online-alter-gis.test +++ b/mysql-test/suite/innodb/t/innodb-online-alter-gis.test @@ -2,10 +2,10 @@ create table t1(a int not null primary key, b geometry not null) engine=innodb; --error 1846 -ALTER ONLINE TABLE t1 ADD SPATIAL INDEX new(b); +ALTER ONLINE TABLE t1 ADD SPATIAL INDEX new(b), ALGORITHM=INSTANT; show warnings; show errors; -ALTER ONLINE TABLE t1 ADD SPATIAL INDEX new(b), LOCK=SHARED; +ALTER ONLINE TABLE t1 ADD SPATIAL INDEX new(b), LOCK=SHARED, ALGORITHM=NOCOPY; show warnings; show errors; drop table t1; @@ -29,3 +29,34 @@ CREATE TABLE t1 (a INT) ENGINE=InnoDB; ALTER TABLE t1 ADD COLUMN b LINESTRING DEFAULT POINT(1,1); DESCRIBE t1; DROP TABLE t1; + +--echo # +--echo # Bug #19077964 ASSERT PAGE_SIZE.EQUALS_TO SPACE_PAGE_SIZE +--echo # BTR_COPY_BLOB_PREFIX +--echo # + +CREATE TABLE t1(f1 INT PRIMARY KEY, f3 LINESTRING NOT NULL, + SPATIAL KEY(f3))ENGINE=InnoDB ROW_FORMAT=COMPRESSED + KEY_BLOCK_SIZE=1; +SHOW CREATE TABLE t1; + +let $points = 80; +let $x = 0; +let $y = 0; +let $linestr = (; + +while ($points) +{ + let $linestr = $linestr $x $y,; + dec $points; + inc $x; + inc $y; +} + +let $linestr = $linestr 9999 9999); + +--eval INSERT INTO t1 VALUES (1, ST_linefromtext(concat('linestring', '$linestr'))); + +ALTER TABLE t1 ROW_FORMAT = DYNAMIC, KEY_BLOCK_SIZE=0, ALGORITHM=INPLACE; + +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/innodb-table-online.test b/mysql-test/suite/innodb/t/innodb-table-online.test index 1bb4b686b56..538e4b68762 100644 --- a/mysql-test/suite/innodb/t/innodb-table-online.test +++ b/mysql-test/suite/innodb/t/innodb-table-online.test @@ -358,12 +358,10 @@ SET @old_sql_mode = @@sql_mode; # NULL -> NOT NULL only allowed INPLACE if strict sql_mode is on. # And adding a PRIMARY KEY will also add NOT NULL implicitly! SET @@sql_mode = 'STRICT_TRANS_TABLES'; ---error ER_INVALID_USE_OF_NULL +--error ER_DUP_ENTRY ALTER TABLE t1 DROP COLUMN c22f, DROP PRIMARY KEY, ADD PRIMARY KEY c3p5(c3(5)), ALGORITHM = INPLACE; ---error ER_INVALID_USE_OF_NULL -ALTER TABLE t1 MODIFY c3 CHAR(255) NOT NULL; SET @@sql_mode = @old_sql_mode; UPDATE t1 SET c3=LEFT(CONCAT(c1,REPEAT('foo',c1)),255) WHERE c3 IS NULL; @@ -397,6 +395,7 @@ ROLLBACK; --echo # session con1 connection con1; ALTER TABLE t1 MODIFY c3 CHAR(255) NOT NULL; + SET DEBUG_SYNC = 'row_log_table_apply1_before SIGNAL c3p5_created WAIT_FOR ins_done'; --send ALTER TABLE t1 DROP PRIMARY KEY, DROP COLUMN c22f, diff --git a/mysql-test/suite/innodb/t/innodb_bug13510739.test b/mysql-test/suite/innodb/t/innodb_bug13510739.test index d2193996d68..f10bcd8e272 100644 --- a/mysql-test/suite/innodb/t/innodb_bug13510739.test +++ b/mysql-test/suite/innodb/t/innodb_bug13510739.test @@ -2,10 +2,6 @@ # Bug#13510739 63775: SERVER CRASH ON HANDLER READ NEXT AFTER DELETE RECORD. # -if (`select plugin_auth_version < "5.6.15" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB before 5.6.15 -} -- source include/have_innodb.inc CREATE TABLE bug13510739 (c INTEGER NOT NULL, PRIMARY KEY (c)) ENGINE=INNODB; diff --git a/mysql-test/suite/innodb/t/innodb_bug27216817.test b/mysql-test/suite/innodb/t/innodb_bug27216817.test new file mode 100644 index 00000000000..a93932b4a04 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_bug27216817.test @@ -0,0 +1,28 @@ +# +# BUG#27216817: INNODB: FAILING ASSERTION: +# PREBUILT->TABLE->N_MYSQL_HANDLES_OPENED == 1 +# + +source include/have_innodb.inc; +create table t1 (a int not null, b int not null) engine=innodb; +insert t1 values (1,2),(3,4); + +lock table t1 write, t1 tr read; +flush status; +alter table t1 add primary key (b); +show status like 'Handler_read_rnd_next'; +unlock tables; +alter table t1 drop primary key; + +lock table t1 write; +flush status; +alter table t1 add primary key (b); +show status like 'Handler_read_rnd_next'; +unlock tables; +alter table t1 drop primary key; + +flush status; +alter table t1 add primary key (b); +show status like 'Handler_read_rnd_next'; + +drop table t1; diff --git a/mysql-test/suite/innodb/t/innodb_bug54044.test b/mysql-test/suite/innodb/t/innodb_bug54044.test index 655e629b61b..cfc6f3c3f0a 100644 --- a/mysql-test/suite/innodb/t/innodb_bug54044.test +++ b/mysql-test/suite/innodb/t/innodb_bug54044.test @@ -16,13 +16,3 @@ CREATE TABLE tmp ENGINE = INNODB AS SELECT COALESCE(NULL, NULL, NULL), GREATEST(NULL, NULL), NULL; SHOW CREATE TABLE tmp; DROP TABLE tmp; - -# These 'create table' operations should fail because of -# using NULL datatype - -CREATE TABLE t1 (a VARCHAR(3)) ENGINE=InnoDB; -INSERT INTO t1 VALUES ('foo'),('bar'); -FLUSH TABLES; ---error 1005 -CREATE TEMPORARY TABLE tmp ENGINE=InnoDB AS SELECT VALUE(a) FROM t1; -DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/instant_alter.test b/mysql-test/suite/innodb/t/instant_alter.test index d95f412fb37..c414b92f713 100644 --- a/mysql-test/suite/innodb/t/instant_alter.test +++ b/mysql-test/suite/innodb/t/instant_alter.test @@ -311,6 +311,14 @@ INSERT INTO t1 SET a='a'; SELECT * FROM t1; DROP TABLE t1; +# MDEV-16065 Assertion failed in btr_pcur_restore_position_func on UPDATE +eval CREATE TABLE t1 (a INT, b VARCHAR(8), PRIMARY KEY(b,a)) $engine; +INSERT INTO t1 VALUES (1,'foo'); +ALTER TABLE t1 ADD COLUMN c INT; +UPDATE t1 SET c = 1; +UPDATE t1 SET c = 2; +DROP TABLE t1; + dec $format; } disconnect analyze; diff --git a/mysql-test/suite/innodb/t/instant_alter_debug.test b/mysql-test/suite/innodb/t/instant_alter_debug.test index 69aab6e2fc1..9d85d281361 100644 --- a/mysql-test/suite/innodb/t/instant_alter_debug.test +++ b/mysql-test/suite/innodb/t/instant_alter_debug.test @@ -1,7 +1,7 @@ --source include/have_innodb.inc --source include/have_debug.inc --source include/have_debug_sync.inc ---source include/have_innodb.inc + SET @save_frequency= @@GLOBAL.innodb_purge_rseg_truncate_frequency; SET GLOBAL innodb_purge_rseg_truncate_frequency=1; @@ -38,18 +38,9 @@ SELECT * FROM t3; CREATE TABLE t4 (pk INT PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t4 VALUES (0); ALTER TABLE t4 ADD COLUMN b INT; -SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS +SELECT COUNT(*)>0 FROM INFORMATION_SCHEMA.COLUMNS LEFT JOIN t4 ON (NUMERIC_SCALE = pk); -SET DEBUG_SYNC='innodb_inplace_alter_table_enter SIGNAL enter WAIT_FOR delete'; ---send ALTER TABLE t4 ADD COLUMN c INT; -connect (dml,localhost,root,,); -SET DEBUG_SYNC='now WAIT_FOR enter'; -DELETE FROM t4; ---source include/wait_all_purged.inc -SET DEBUG_SYNC='now SIGNAL delete'; -connection default; -reap; CREATE TABLE t5 (i INT, KEY(i)) ENGINE=InnoDB; INSERT INTO t5 VALUES (-42); @@ -72,7 +63,7 @@ ALTER TABLE t8 ADD c CHAR(3); SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL rebuilt WAIT_FOR dml'; --send ALTER TABLE t8 FORCE; -connection dml; +connect (dml,localhost,root,,); SET DEBUG_SYNC='now WAIT_FOR rebuilt'; BEGIN; INSERT INTO t8 SET i=1; @@ -178,4 +169,52 @@ UPDATE t11 SET c22 = 1; --source include/wait_all_purged.inc DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11; +--echo # +--echo # MDEV-15060 Assertion in row_log_table_apply_op after instant ADD +--echo # when the table is emptied during subsequent ALTER TABLE +--echo # + +CREATE TABLE t1 (a INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES (NULL); +ALTER TABLE t1 ADD COLUMN b INT NOT NULL; +connect stop_purge,localhost,root; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connect ddl,localhost,root,,test; +DELETE FROM t1; +SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL copied WAIT_FOR logged'; +send ALTER TABLE t1 FORCE; +connection default; +SET DEBUG_SYNC='now WAIT_FOR copied'; + +BEGIN; +INSERT INTO t1 SET b=1; +ROLLBACK; +disconnect stop_purge; + +# Wait for purge to empty the table. +# (This is based on wait_all_purged.inc, but there are 2 transactions +# from the pending ALTER TABLE t1 FORCE.) + +let $wait_counter= 300; +while ($wait_counter) +{ + --replace_regex /.*History list length ([0-9]+).*/\1/ + let $remaining= `SHOW ENGINE INNODB STATUS`; + if ($remaining == 'InnoDB 2') + { + let $wait_counter= 0; + } + if ($wait_counter) + { + real_sleep 0.1; + dec $wait_counter; + } +} +echo $remaining transactions not purged; + +SET DEBUG_SYNC='now SIGNAL logged'; +disconnect ddl; +DROP TABLE t1; +SET DEBUG_SYNC='RESET'; + SET GLOBAL innodb_purge_rseg_truncate_frequency = @save_frequency; diff --git a/mysql-test/suite/innodb/t/rename_table_debug.test b/mysql-test/suite/innodb/t/rename_table_debug.test index df4331cf8bb..c75f2fbca10 100644 --- a/mysql-test/suite/innodb/t/rename_table_debug.test +++ b/mysql-test/suite/innodb/t/rename_table_debug.test @@ -3,6 +3,7 @@ --source include/have_debug_sync.inc --source include/not_embedded.inc +FLUSH TABLES; LET $datadir= `SELECT @@datadir`; CREATE TABLE t1 (a SERIAL, b INT, c INT, d INT) ENGINE=InnoDB; diff --git a/mysql-test/suite/innodb/t/stored_fk.test b/mysql-test/suite/innodb/t/stored_fk.test new file mode 100644 index 00000000000..b9c7c934555 --- /dev/null +++ b/mysql-test/suite/innodb/t/stored_fk.test @@ -0,0 +1,94 @@ +--source include/have_innodb.inc + +--echo # Create statement with FK on base column of stored column +--error ER_CANT_CREATE_TABLE +create table t1(f1 int, f2 int as(f1) stored, + foreign key(f1) references t2(f1) on delete cascade)engine=innodb; + +--echo # adding new stored column during alter table copy operation. +create table t1(f1 int primary key) engine=innodb; +create table t2(f1 int not null, f2 int as (f1) virtual, + foreign key(f1) references t1(f1) on update cascade)engine=innodb; + +# MySQL 5.7 would refuse this +#--error ER_ERROR_ON_RENAME +alter table t2 add column f3 int as (f1) stored, add column f4 int as (f1) virtual; +show create table t2; +drop table t2; + +--echo # adding foreign key constraint for base columns during alter copy. +create table t2(f1 int not null, f2 int as (f1) stored) engine=innodb; +# MySQL 5.7 would refuse this +alter table t2 add foreign key(f1) references t1(f1) on update cascade, algorithm=copy; +show create table t2; +drop table t2; + +--echo # adding foreign key constraint for base columns during online alter. +create table t2(f1 int not null, f2 int as (f1) stored) engine=innodb; +set foreign_key_checks = 0; +--error 138 +alter table t2 add foreign key(f1) references t1(f1) on update cascade, algorithm=inplace; +drop table t2; + +--echo # adding stored column via online alter. +create table t2(f1 int not null, + foreign key(f1) references t1(f1) on update cascade)engine=innodb; +--error ER_ALTER_OPERATION_NOT_SUPPORTED +alter table t2 add column f2 int as (f1) stored, algorithm=inplace; +drop table t2, t1; + +--echo # +--echo # BUG#26731689 FK ON TABLE WITH GENERATED COLS: ASSERTION POS < N_DEF +--echo # + +CREATE TABLE s (a INT, b INT GENERATED ALWAYS AS (0) STORED, c INT, + d INT GENERATED ALWAYS AS (0) VIRTUAL, e INT) ENGINE=innodb; + +CREATE TABLE t (a INT) ENGINE=innodb; + +# This would fail. No corresponding index +--error ER_FK_NO_INDEX_PARENT +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (e) REFERENCES t(a) ON UPDATE SET null; + +ALTER TABLE t ADD PRIMARY KEY(a); + +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (e) REFERENCES t(a) ON UPDATE SET null; + +DROP TABLE s,t; + +CREATE TABLE s (a INT GENERATED ALWAYS AS (0) VIRTUAL, + b INT GENERATED ALWAYS AS (0) STORED, c INT) ENGINE=innodb; + +CREATE TABLE t (a INT) ENGINE=innodb; + +# This would fail. No corresponding index +--error ER_FK_NO_INDEX_PARENT +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (c) REFERENCES t(a) ON UPDATE SET null; + +ALTER TABLE t ADD PRIMARY KEY(a); + +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (c) REFERENCES t(a) ON UPDATE SET null; + +DROP TABLE s,t; + +CREATE TABLE s (a INT, b INT GENERATED ALWAYS AS (0) STORED) ENGINE=innodb; + +CREATE TABLE t (a INT PRIMARY KEY) ENGINE=innodb; + +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (a) REFERENCES t(a) ON UPDATE SET null; + +DROP TABLE s,t; + +CREATE TABLE s (a INT, b INT) ENGINE=innodb; + +CREATE TABLE t (a INT) ENGINE=innodb; + +# This would fail. No corresponding index +--error ER_FK_NO_INDEX_PARENT +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (a) REFERENCES t(a) ON UPDATE SET null; + +ALTER TABLE t ADD PRIMARY KEY(a); + +ALTER TABLE s ADD CONSTRAINT c FOREIGN KEY (a) REFERENCES t(a) ON UPDATE SET null; + +DROP TABLE s,t; diff --git a/mysql-test/suite/innodb/t/temporary_table.test b/mysql-test/suite/innodb/t/temporary_table.test index 4184daaf064..52334b05999 100644 --- a/mysql-test/suite/innodb/t/temporary_table.test +++ b/mysql-test/suite/innodb/t/temporary_table.test @@ -21,6 +21,7 @@ call mtr.add_suppression("InnoDB: Plugin initialization aborted"); call mtr.add_suppression("innodb_temporary and innodb_system file names seem to be the same"); call mtr.add_suppression("Could not create the shared innodb_temporary"); call mtr.add_suppression("InnoDB: syntax error in file path"); +call mtr.add_suppression("InnoDB: Unable to parse innodb_temp_data_file_path="); --enable_query_log let $MYSQL_TMP_DIR = `select @@tmpdir`; @@ -464,3 +465,23 @@ ALTER TABLE t1 ROW_FORMAT = DYNAMIC; set innodb_strict_mode = ON; ALTER TABLE t1 ADD COLUMN j INT; SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TEMPORARY TABLE t1(f1 INT, KEY(f1)) ENGINE=InnoDB; +INSERT INTO t1 VALUES(NULL); +UPDATE t1 SET f1 = 0; +START TRANSACTION; +UPDATE t1 SET f1 = 4; +UPDATE t1 SET f1 = 0; +ROLLBACK; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # MDEV-15874 CREATE TABLE creates extra transaction +--echo # +call mtr.add_suppression("Warning 150 Create table `mysqld.1`.`t1` with foreign key constraint failed. Temporary tables can't have foreign key constraints.*"); +SET FOREIGN_KEY_CHECKS = 0; +--error ER_CANT_CREATE_TABLE +CREATE TEMPORARY TABLE t1(f1 INT NOT NULL, + FOREIGN KEY(f1) REFERENCES t0(f1))ENGINE=InnoDB; diff --git a/mysql-test/suite/innodb/t/tmpdir.test b/mysql-test/suite/innodb/t/tmpdir.test index 98517a97c7a..b43a02363dc 100644 --- a/mysql-test/suite/innodb/t/tmpdir.test +++ b/mysql-test/suite/innodb/t/tmpdir.test @@ -1,11 +1,6 @@ --source include/have_innodb.inc --source include/count_sessions.inc -if (`select plugin_auth_version <= "5.6.28-MariaDB-76.1" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in XtraDB as of 5.6.28-MariaDB-76.1 or earlier -} - --echo # --echo # Bug #19183565 CREATE DYNAMIC INNODB_TMPDIR VARIABLE TO CONTROL --echo # WHERE INNODB WRITES TEMP FILES diff --git a/mysql-test/suite/innodb/t/undo_log.test b/mysql-test/suite/innodb/t/undo_log.test index c1a98793d91..1f4cf9702d9 100644 --- a/mysql-test/suite/innodb/t/undo_log.test +++ b/mysql-test/suite/innodb/t/undo_log.test @@ -137,3 +137,17 @@ ROLLBACK; SELECT COUNT(*) FROM test_tab; CHECK TABLE test_tab; DROP TABLE test_tab; + +SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency; +SET GLOBAL innodb_purge_rseg_truncate_frequency = 1; +CREATE TEMPORARY TABLE t2(i INT)ENGINE=InnoDB; +CREATE TABLE t1(i TEXT NOT NULL) ENGINE=INNODB; +BEGIN; +INSERT t1 SET i=REPEAT('1234567890',840); +UPDATE t1 SET i=''; +INSERT INTO t2 VALUES(2); +ROLLBACK; +--source include/wait_all_purged.inc +DROP TABLE t1; +DROP TABLE t2; +SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency; diff --git a/mysql-test/suite/innodb_fts/r/innodb-fts-basic.result b/mysql-test/suite/innodb_fts/r/basic.result similarity index 87% rename from mysql-test/suite/innodb_fts/r/innodb-fts-basic.result rename to mysql-test/suite/innodb_fts/r/basic.result index fe767476fe6..d96127fbc34 100644 --- a/mysql-test/suite/innodb_fts/r/innodb-fts-basic.result +++ b/mysql-test/suite/innodb_fts/r/basic.result @@ -1,3 +1,10 @@ +CREATE TEMPORARY TABLE articles ( +id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, +title VARCHAR(200), +body TEXT, +FULLTEXT (title,body) +) ENGINE=InnoDB; +ERROR HY000: Cannot create FULLTEXT index on temporary InnoDB table CREATE TABLE articles ( id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, title VARCHAR(200), @@ -257,3 +264,37 @@ WHERE MATCH (title,body) AGAINST ('"more test proximity"' IN BOOLEAN MODE); id title body drop table articles; +# +# Bug #22679185 INVALID INNODB FTS DOC ID DURING INSERT +# +create table t1 (f1 int not null primary key, f2 varchar(100), +FTS_DOC_ID bigint(20) unsigned not null, +unique key `FTS_DOC_ID_INDEX` (`FTS_DOC_ID`), +fulltext key (f2))engine=innodb; +insert into t1 values(1, "This is the first record", 20000); +insert into t1 values(2, "This is the second record", 40000); +select FTS_DOC_ID from t1; +FTS_DOC_ID +20000 +40000 +drop table t1; +create table t1 (f1 int not null primary key, f2 varchar(100), +FTS_DOC_ID bigint(20) unsigned not null auto_increment, +unique key `FTS_DOC_ID_INDEX` (`FTS_DOC_ID`), +fulltext key (f2))engine=innodb; +set auto_increment_increment = 65535; +insert into t1(f1, f2) values(1, "This is the first record"); +insert into t1(f1, f2) values(2, "This is the second record"); +insert into t1(f1, f2) values(3, "This is the third record"); +select FTS_DOC_ID from t1; +FTS_DOC_ID +1 +65536 +131071 +drop table t1; +call mtr.add_suppression("\\[ERROR\\] InnoDB: Doc ID 20030101000000 is too big. Its difference with largest used Doc ID 0 cannot exceed or equal to 65535"); +CREATE TABLE t1 (FTS_DOC_ID BIGINT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, +title VARCHAR(200), FULLTEXT(title)) ENGINE=InnoDB; +INSERT INTO t1 VALUES (NULL, NULL), (20030101000000, 20030102000000); +ERROR HY000: Invalid InnoDB FTS Doc ID +DROP TABLE t1; diff --git a/mysql-test/suite/innodb_fts/r/create.result b/mysql-test/suite/innodb_fts/r/create.result index 615f0368599..f3650fb3dc9 100644 --- a/mysql-test/suite/innodb_fts/r/create.result +++ b/mysql-test/suite/innodb_fts/r/create.result @@ -9,8 +9,6 @@ INSERT INTO t SET t=REPEAT(_utf8 0xefbc90,84); INSERT INTO t SET t=REPEAT('befor',17); INSERT INTO t SET t='BeforeTheIndexCreation'; CREATE FULLTEXT INDEX ft ON t(t); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t SET t='this was inserted after creating the index'; INSERT INTO t SET t=REPEAT(_utf8 0xefbc91,84); INSERT INTO t SET t=REPEAT('after',17); @@ -90,8 +88,6 @@ INSERT INTO t SET t=REPEAT(_utf8 0xefbc90,84); INSERT INTO t SET t=REPEAT('befor',17); INSERT INTO t SET t='BeforeTheIndexCreation'; CREATE FULLTEXT INDEX ft ON t(t); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t SET t='this was inserted after creating the index'; INSERT INTO t SET t=REPEAT(_utf8 0xefbc91,84); INSERT INTO t SET t=REPEAT('after',17); diff --git a/mysql-test/suite/innodb_fts/r/fulltext.result b/mysql-test/suite/innodb_fts/r/fulltext.result index fcf196a0631..0e30dd0be05 100644 --- a/mysql-test/suite/innodb_fts/r/fulltext.result +++ b/mysql-test/suite/innodb_fts/r/fulltext.result @@ -277,8 +277,6 @@ PRIMARY KEY (id), KEY ind5 (title) ) ENGINE = InnoDB; CREATE FULLTEXT INDEX ft1 ON t1(title); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID insert into t1 (title) values ('this is a test'); select * from t1 where match title against ('test' in boolean mode); id title diff --git a/mysql-test/suite/innodb_fts/r/fulltext2.result b/mysql-test/suite/innodb_fts/r/fulltext2.result index 2aa7d2a6754..9376d088244 100644 --- a/mysql-test/suite/innodb_fts/r/fulltext2.result +++ b/mysql-test/suite/innodb_fts/r/fulltext2.result @@ -1,4 +1,3 @@ -DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( i int(10) unsigned not null auto_increment primary key, a varchar(255) not null, diff --git a/mysql-test/suite/innodb_fts/r/fulltext_table_evict.result b/mysql-test/suite/innodb_fts/r/fulltext_table_evict.result new file mode 100644 index 00000000000..d9d329aa6c0 --- /dev/null +++ b/mysql-test/suite/innodb_fts/r/fulltext_table_evict.result @@ -0,0 +1,19 @@ +# +# Bug Bug #27304661 MYSQL CRASH DOING SYNC INDEX ] +# [FATAL] INNODB: SEMAPHORE WAIT HAS LASTED > 600 +# +CREATE TABLE t1 ( +id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, +f1 TEXT(500), +FULLTEXT idx (f1) +) ENGINE=InnoDB; +insert into t1 (f1) values ('fjdhfsjhf'),('dhjfhjshfj'),('dhjafjhfj'); +set @save_table_definition_cache=@@global.table_definition_cache; +set @save_table_open_cache=@@global.table_open_cache; +set global table_definition_cache=400; +set global table_open_cache= 1024; +SET @save_dbug = @@GLOBAL.debug_dbug; +SET GLOBAL DEBUG_DBUG="+d,crash_if_fts_table_is_evicted"; +set @@global.table_definition_cache=@save_table_definition_cache; +set @@global.table_open_cache=@save_table_open_cache; +drop table t1; diff --git a/mysql-test/suite/innodb_fts/r/fulltext_var.result b/mysql-test/suite/innodb_fts/r/fulltext_var.result index 9fe586210c8..ee4ba98d6eb 100644 --- a/mysql-test/suite/innodb_fts/r/fulltext_var.result +++ b/mysql-test/suite/innodb_fts/r/fulltext_var.result @@ -1,4 +1,3 @@ -drop table if exists t1; show variables like "ft\_%"; Variable_name Value ft_boolean_syntax + -><()~*:""&| diff --git a/mysql-test/suite/innodb_fts/r/innodb-fts-ddl.result b/mysql-test/suite/innodb_fts/r/innodb-fts-ddl.result index cd7d8f03923..6ee5f9f4322 100644 --- a/mysql-test/suite/innodb_fts/r/innodb-fts-ddl.result +++ b/mysql-test/suite/innodb_fts/r/innodb-fts-ddl.result @@ -10,9 +10,9 @@ INSERT INTO fts_test (title,body) VALUES ('1001 MySQL Tricks','1. Never run mysqld as root. 2. ...'), ('MySQL vs. YourSQL','In the following database comparison ...'), ('MySQL Security','When configured properly, MySQL ...'); -CREATE FULLTEXT INDEX idx on fts_test (title, body); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID +ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY; +ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE +ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=INPLACE; SELECT * FROM fts_test WHERE MATCH (title, body) AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE); id title body @@ -26,7 +26,7 @@ INSERT INTO fts_test (title,body) VALUES ('1001 MySQL Tricks','1. Never run mysqld as root. 2. ...'), ('MySQL vs. YourSQL','In the following database comparison ...'), ('MySQL Security','When configured properly, MySQL ...'); -CREATE FULLTEXT INDEX idx on fts_test (title, body); +ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY; SELECT * FROM fts_test WHERE MATCH (title, body) AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE); id title body @@ -68,7 +68,6 @@ FTS_DOC_ID BIGINT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, title VARCHAR(200), body TEXT ) ENGINE=InnoDB; -create unique index FTS_DOC_ID_INDEX on fts_test(FTS_DOC_ID); INSERT INTO fts_test (title,body) VALUES ('MySQL Tutorial','DBMS stands for DataBase ...') , ('How To Use MySQL Well','After you went through a ...'), @@ -78,7 +77,7 @@ INSERT INTO fts_test (title,body) VALUES ('MySQL Security','When configured properly, MySQL ...'); CREATE FULLTEXT INDEX idx on fts_test (title, body) LOCK=NONE; ERROR 0A000: LOCK=NONE is not supported. Reason: Fulltext index creation requires a lock. Try LOCK=SHARED -CREATE FULLTEXT INDEX idx on fts_test (title, body); +ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY; ALTER TABLE fts_test ROW_FORMAT=REDUNDANT, LOCK=NONE; ERROR 0A000: LOCK=NONE is not supported. Reason: InnoDB presently supports one FULLTEXT index creation at a time. Try LOCK=SHARED ALTER TABLE fts_test ROW_FORMAT=REDUNDANT; @@ -95,7 +94,6 @@ FTS_DOC_ID title body 1 MySQL Tutorial DBMS stands for DataBase ... 3 Optimizing MySQL In this tutorial we will show ... drop index idx on fts_test; -drop index FTS_DOC_ID_INDEX on fts_test; CREATE FULLTEXT INDEX idx on fts_test (title, body); SELECT * FROM fts_test WHERE MATCH (title, body) AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE); @@ -202,7 +200,7 @@ FTS_DOC_ID title body DROP TABLE articles; create table articles(`FTS_DOC_ID` serial, `col32` timestamp not null,`col115` text) engine=innodb; -create fulltext index `idx5` on articles(`col115`) ; +create fulltext index `idx5` on articles(`col115`) ; alter ignore table articles add primary key (`col32`) ; drop table articles; CREATE TABLE articles ( @@ -218,16 +216,15 @@ INSERT INTO articles VALUES (5, 'MySQL vs. YourSQL','In the following database comparison ...'), (6, 'MySQL Security','When configured properly, MySQL ...'); CREATE FULLTEXT INDEX idx on articles (title, body); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID DROP INDEX idx ON articles; CREATE UNIQUE INDEX idx2 ON articles(id); CREATE FULLTEXT INDEX idx on articles (title, body); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SELECT * FROM articles WHERE MATCH (title, body) AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE); id title body 1 MySQL Tutorial DBMS stands for DataBase ... 3 Optimizing MySQL In this tutorial we will show ... DROP TABLE articles; +CREATE TABLE t1 (a VARCHAR(3)) ENGINE=InnoDB; +ALTER TABLE t1 ADD FULLTEXT KEY(a), ADD COLUMN b VARCHAR(3), ADD FULLTEXT KEY(b); +DROP TABLE t1; diff --git a/mysql-test/suite/innodb_fts/r/innodb-fts-fic.result b/mysql-test/suite/innodb_fts/r/innodb-fts-fic.result index 731abad9198..36d3826be59 100644 --- a/mysql-test/suite/innodb_fts/r/innodb-fts-fic.result +++ b/mysql-test/suite/innodb_fts/r/innodb-fts-fic.result @@ -13,8 +13,6 @@ INSERT INTO articles (title,body) VALUES ('MySQL vs. YourSQL','In the following database comparison ...'), ('MySQL Security','When configured properly, MySQL ...'); CREATE FULLTEXT INDEX idx on articles (title, body); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SELECT * FROM articles WHERE MATCH (title, body) AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE); id title body @@ -105,8 +103,6 @@ INSERT INTO articles (title,body) VALUES ('MySQL vs. YourSQL','In the following database comparison ...'), ('MySQL Security','When configured properly, MySQL ...'); CREATE FULLTEXT INDEX idx on articles (title); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID CREATE FULLTEXT INDEX idx2 on articles (body); SELECT * FROM articles WHERE MATCH (title, body) AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE); diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_misc.result b/mysql-test/suite/innodb_fts/r/innodb_fts_misc.result index 74ade61c940..1322867551f 100644 --- a/mysql-test/suite/innodb_fts/r/innodb_fts_misc.result +++ b/mysql-test/suite/innodb_fts/r/innodb_fts_misc.result @@ -9,8 +9,6 @@ INSERT INTO t1 (a,b) VALUES ('How To Use MySQL Well','After you went through a ...'), ('Optimizing MySQL','In this tutorial we will show ...'); ALTER TABLE t1 ADD FULLTEXT INDEX idx (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -161,8 +159,6 @@ a VARCHAR(200), b TEXT ) ENGINE = InnoDB; CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 (a,b) VALUES ('MySQL Tutorial','DBMS stands for DataBase ...') , ('How To Use MySQL Well','After you went through a ...'), @@ -340,8 +336,6 @@ a VARCHAR(200), b TEXT ) ENGINE = InnoDB; CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 (a,b) VALUES ('MySQL from Tutorial','DBMS stands for DataBase ...'); INSERT INTO t1 (a,b) VALUES @@ -420,8 +414,6 @@ set names utf8; "----------Test1---------" create table t50 (s1 varchar(60) character set utf8 collate utf8_bin) engine = innodb; create fulltext index i on t50 (s1); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID insert into t50 values ('ABCDE'),('FGHIJ'),('KLMNO'),('VÃÆ·Wİ'); select * from t50 where match(s1) against ('VÃÆ·Wİ'); s1 @@ -431,8 +423,6 @@ drop table t50; create table t50 (s1 int unsigned primary key auto_increment, s2 varchar(60) character set utf8) engine = innodb; create fulltext index i on t50 (s2); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID insert into t50 (s2) values ('FGHIJ'),('KLMNO'),('VÃÆ·Wİ'),('ABCDE'); select * from t50 order by s2; s1 s2 @@ -445,8 +435,6 @@ drop table t50; create table t50 (id int unsigned primary key auto_increment, s2 varchar(60) character set utf8) engine = innodb; create fulltext index i on t50 (s2); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID insert into t50 (s2) values ('FGHIJ'),('KLMNO'),('VÃÆ·Wİ'),('ABCDE'); set @@autocommit=0; update t50 set s2 = lower(s2); @@ -470,8 +458,6 @@ set @@autocommit=1; create table t50 (id int unsigned primary key auto_increment, s2 varchar(60) character set utf8) engine = innodb; create fulltext index i on t50 (s2); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID insert into t50 (s2) values ('FGHIJ'),('KLMNO'),('VÃÆ·Wİ'),('ABCD*'); select * from t50 where match(s2) against ('abcd*' in natural language mode); @@ -507,8 +493,6 @@ INSERT INTO t1 (a,b) VALUES ('aab MySQL vs. YourSQL','In the following database comparison ...'), ('aaa MySQL Security','When configured properly, MySQL ...'); ALTER TABLE t1 ADD FULLTEXT INDEX idx (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SELECT * FROM t1 ORDER BY MATCH(a,b) AGAINST ('aac') DESC; id a b 3 aac Optimizing MySQL In this tutorial we will show ... @@ -556,8 +540,6 @@ INSERT INTO t1 (a,b) VALUES ('How To Use MySQL Well','After you went through a q ...abdd'), ('Optimizing MySQL','In this tutorial we will show ...abed'); ALTER TABLE t1 ADD FULLTEXT INDEX idx (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -637,8 +619,6 @@ DROP TABLE t2,t1; set names utf8; CREATE TABLE t1 (s1 INT, s2 VARCHAR(200) CHARACTER SET UTF8 COLLATE UTF8_SPANISH_CI) ENGINE = InnoDB; CREATE FULLTEXT INDEX i ON t1 (s2); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 VALUES (1,'aaCen'),(2,'aaCha'),(3,'aaCio'),(4,'aaçen'),(5,'aaçha'),(6,'aaçio'); SELECT * FROM t1 WHERE MATCH(s2) AGAINST ('aach*' IN BOOLEAN MODE); s1 s2 @@ -656,8 +636,6 @@ DROP TABLE t1; "----------Test14---------" CREATE TABLE t1(s1 INT , s2 VARCHAR(100) CHARACTER SET sjis) ENGINE = InnoDB; CREATE FULLTEXT INDEX i ON t1 (s2); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 VALUES (1,'ペペペ'),(2,'テテテ'),(3,'ルルル'),(4,'ã‚°ã‚°ã‚°'); DROP TABLE t1; "----------Test15---------" @@ -675,8 +653,6 @@ Warnings: Note 1051 Unknown table 'test.t2' CREATE TABLE t2 (s1 VARCHAR(60) CHARACTER SET UTF8 COLLATE UTF8_POLISH_CI) ENGINE = InnoDB; CREATE FULLTEXT INDEX i ON t2 ( s1); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t2 VALUES ('a'),('b'),('c'),('d'),('ÅÅÅÅ'),('LLLL'),(NULL),('ÅÅÅÅ ÅÅÅÅ'),('LLLLLLLL'); SELECT * FROM t2 WHERE MATCH(s1) AGAINST ('LLLL' COLLATE UTF8_UNICODE_520_CI); @@ -686,8 +662,6 @@ DROP TABLE t1,t2; "----------Test16---------" CREATE TABLE t1 (s1 INT, s2 VARCHAR(50) CHARACTER SET UTF8) ENGINE = InnoDB; CREATE FULLTEXT INDEX i ON t1(s2); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 VALUES (2, 'ÄŸÄ— DaÅ›i p '); SELECT * FROM t1 WHERE MATCH(s2) AGAINST ('+p +"ÄŸÄ— DaÅ›i*"' IN BOOLEAN MODE); s1 s2 @@ -696,8 +670,6 @@ DROP TABLE t1; CREATE TABLE t1 ( id INT , char_column VARCHAR(60) CHARACTER SET UTF8) ENGINE = InnoDB; INSERT INTO t1 VALUES (1,'İóëɠ'); CREATE FULLTEXT INDEX i ON t1 (char_column); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SELECT * FROM t1 WHERE MATCH(char_column) AGAINST ('"İóëɠ"' IN BOOLEAN MODE); id char_column 1 İóëɠ @@ -707,8 +679,6 @@ CREATE TABLE t1 ( id INT , char_column VARCHAR(60) CHARACTER SET UTF32, char_col INSERT INTO t1 (char_column) VALUES ('abcde'),('fghij'),('klmno'),('qrstu'); UPDATE t1 SET char_column2 = char_column; CREATE FULLTEXT INDEX i ON t1 (char_column2); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SELECT * FROM t1 WHERE MATCH(char_column) AGAINST ('abc*' IN BOOLEAN MODE); ERROR HY000: Can't find FULLTEXT index matching the column list DROP TABLE t1; @@ -716,18 +686,14 @@ DROP TABLE t1; CREATE TABLE t1 ( id INT , char_column VARCHAR(60) CHARACTER SET UTF8) ENGINE = InnoDB; INSERT INTO t1 VALUES (1,'aaa'),(2,'bbb'),(3,'ccc'); CREATE FULLTEXT INDEX i ON t1 (char_column); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID HANDLER t1 OPEN; HANDLER t1 READ i = ('aaa'); -id char_column +ERROR HY000: FULLTEXT index `i` does not support this operation DROP TABLE t1; "----------Test25---------" CREATE TABLE t1 ( id INT , char_column VARCHAR(60) CHARACTER SET UTF8 COLLATE UTF8_CROATIAN_CI) ENGINE=InnoDB; INSERT INTO t1 VALUES (1,'LJin'),(2,'ljin'),(3,'lmin'),(4,'LJLJLJLJLJ'); CREATE FULLTEXT INDEX i ON t1 (char_column); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SELECT count(*) FROM t1 WHERE MATCH (char_column) AGAINST ('lj*' IN BOOLEAN MODE); count(*) 3 @@ -962,8 +928,6 @@ INSERT INTO t1 (a,b) VALUES ('How To Use MySQL Well','After you went through a ...'), ('Optimizing MySQL','In this tutorial we will show ...'); ALTER TABLE t1 ADD FULLTEXT INDEX idx (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -1037,8 +1001,6 @@ INSERT INTO t1 VALUES (2, 'How To Use MySQL Well','After you went through a ...'), (3, 'Optimizing MySQL','In this tutorial we will show ...'); ALTER TABLE t1 ADD FULLTEXT INDEX idx (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID ALTER TABLE t1 ADD UNIQUE INDEX (`id`); SELECT id FROM t1 WHERE MATCH (a,b) AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE); @@ -1069,8 +1031,6 @@ INSERT INTO t1 VALUES (2, 'How To Use MySQL Well','After you went through a ...'), (3, 'Optimizing MySQL','In this tutorial we will show ...'); ALTER TABLE t1 ADD UNIQUE INDEX (`id`), ADD FULLTEXT INDEX idx (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SELECT id FROM t1 WHERE MATCH (a,b) AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE); id @@ -1164,8 +1124,6 @@ INSERT INTO t1 (a,b) VALUES ('How To Use MySQL Well','After you went through a ...'), ('Optimizing MySQL','In this tutorial we will show ...'); ALTER TABLE t1 ADD FULLTEXT INDEX idx (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID COMMIT WORK AND CHAIN; INSERT INTO t1 (a,b) VALUES ('1001 MySQL Tricks','1. Never run mysqld as root. 2. ...'), diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result index b76784d4ffd..aec3d7f777d 100644 --- a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result +++ b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result @@ -473,8 +473,6 @@ INSERT INTO t1 (a,b) VALUES ('How To Use MySQL Well','After you went through a ...'), ('Optimizing MySQL','In this tutorial we will show ...'); ALTER TABLE t1 ADD FULLTEXT INDEX idx (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -666,8 +664,6 @@ INSERT INTO t1 (a,b) VALUES ('Trial version','query performace @1255 minute on 2.1Hz Memory 2GB...') , ('when To Use MySQL Well','for free faq mail@xyz.com ...'); CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SELECT * FROM t1 WHERE MATCH(a,b) AGAINST ("вредит χωÏá½¶Ï‚") ORDER BY id; id a b 1 Я могу еÑть Ñтекло оно мне не вредит diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_multiple_index.result b/mysql-test/suite/innodb_fts/r/innodb_fts_multiple_index.result index b0f7d7727d6..a6dfc2d4b4a 100644 --- a/mysql-test/suite/innodb_fts/r/innodb_fts_multiple_index.result +++ b/mysql-test/suite/innodb_fts/r/innodb_fts_multiple_index.result @@ -9,8 +9,6 @@ INSERT INTO t1 (a,b) VALUES ('How To Use MySQL Well','After you went through a ...'), ('Optimizing MySQL','In this tutorial we will show ...'); ALTER TABLE t1 ADD FULLTEXT INDEX idx_1 (a); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID ALTER TABLE t1 ADD FULLTEXT INDEX idx_2 (b); SHOW CREATE TABLE t1; Table Create Table diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_plugin.result b/mysql-test/suite/innodb_fts/r/innodb_fts_plugin.result index b7688e9ef0f..dc71156b7a1 100644 --- a/mysql-test/suite/innodb_fts/r/innodb_fts_plugin.result +++ b/mysql-test/suite/innodb_fts/r/innodb_fts_plugin.result @@ -67,8 +67,6 @@ INSERT INTO articles (title, body) VALUES ('1001 MySQL Tricks','How to use full-text search engine'), ('Go MySQL Tricks','How to use full text search engine'); ALTER TABLE articles ADD FULLTEXT INDEX (title, body) WITH PARSER simple_parser; -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SELECT * FROM articles WHERE MATCH(title, body) AGAINST('mysql'); id title body diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_proximity.result b/mysql-test/suite/innodb_fts/r/innodb_fts_proximity.result index ca9a57dc3e7..d67981e0851 100644 --- a/mysql-test/suite/innodb_fts/r/innodb_fts_proximity.result +++ b/mysql-test/suite/innodb_fts/r/innodb_fts_proximity.result @@ -5,8 +5,6 @@ a VARCHAR(200), b TEXT ) ENGINE= InnoDB; CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 (a,b) VALUES ('MySQL from Tutorial','DBMS stands for DataBase ...') , ('when To Use MySQL Well','After that you went through a ...'), @@ -111,8 +109,6 @@ INSERT INTO t1 (a,b) VALUES ('when To Use MySQL Well','After that you went through a ...'), ('where will Optimizing MySQL','what In this tutorial we will show ...'); CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 (a,b) VALUES ('MySQL Tutorial','request docteam@oraclehelp.com ...') , ('Trial version','query performace @1255 minute on 2.1Hz Memory 2GB...'), @@ -148,8 +144,6 @@ INSERT INTO t1 (a,b,c) VALUES (repeat("b", 19000), 'XYZ, long text', 'very lon INSERT INTO t1 (a,b,c) VALUES (repeat("b", 19000), 'XYZ, very little long blob very much blob', 'very long blob'); INSERT INTO t1 (a,b,c) VALUES (repeat("b", 19000),"very 租车 ä¾› blob","new 供需分æžinformation"); CREATE FULLTEXT INDEX idx on t1 (a,b,c); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 (a,b,c) VALUES (repeat("x", 19000), 'new, long text', 'very new blob'); INSERT INTO t1 (a,b,c) VALUES ('interesting, long text', repeat("x", 19000), 'very very good new blob'); SELECT count(*) FROM t1 @@ -191,8 +185,6 @@ b TEXT INSERT INTO t1 (a,b) VALUES ('MySQL from Tutorial','DBMS stands for + DataBase ...'); CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SELECT * FROM t1 WHERE MATCH (a,b) AGAINST ('"stands database"@3' IN BOOLEAN MODE); @@ -208,8 +200,6 @@ c TEXT INSERT INTO t1 (a,b,c) VALUES (repeat("b", 19000), 'XYZ, long text', 'very long blob'); INSERT INTO t1 (a,b,c) VALUES ('XYZ, 租车 very little long blob very much blob', repeat("b", 19000), 'very long but smaller blob'); CREATE FULLTEXT INDEX idx on t1 (a,b,c); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID DELETE FROM t1; INSERT INTO t1 (a,b,c) VALUES (repeat("b", 19000), 'XYZ, long text', 'very long blob'); INSERT INTO t1 (a,b,c) VALUES ('XYZ, 租车 very little long blob is a very much longer blob', repeat("b", 19000), 'this is very long but smaller blob'); diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_result_cache_limit.result b/mysql-test/suite/innodb_fts/r/innodb_fts_result_cache_limit.result index 40a38e62a1e..61eb5294463 100644 --- a/mysql-test/suite/innodb_fts/r/innodb_fts_result_cache_limit.result +++ b/mysql-test/suite/innodb_fts/r/innodb_fts_result_cache_limit.result @@ -4,8 +4,6 @@ a VARCHAR(200), b TEXT ) ENGINE= InnoDB; CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 (a,b) VALUES ('MySQL from Tutorial','DBMS stands for DataBase ...') , ('when To Use MySQL Well','After that you went through a ...'), diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_transaction.result b/mysql-test/suite/innodb_fts/r/innodb_fts_transaction.result index 6f5409f63cd..fc6a1d1afe7 100644 --- a/mysql-test/suite/innodb_fts/r/innodb_fts_transaction.result +++ b/mysql-test/suite/innodb_fts/r/innodb_fts_transaction.result @@ -5,8 +5,6 @@ a VARCHAR(200), b TEXT ) ENGINE = InnoDB STATS_PERSISTENT=0; CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 (a,b) VALUES ('MySQL from Tutorial','DBMS stands for DataBase ...') , ('when To Use MySQL Well','After that you went through a ...'), @@ -128,8 +126,6 @@ a VARCHAR(200), b TEXT ) ENGINE = InnoDB; CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 (a,b) VALUES ('MySQL from Tutorial','DBMS stands for DataBase ...') , ('when To Use MySQL Well','After that you went through a ...'), @@ -241,8 +237,6 @@ a VARCHAR(200), b TEXT ) ENGINE = InnoDB; CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 (a,b) VALUES ('MySQL from Tutorial','DBMS stands for DataBase ...') , ('when To Use MySQL Well','After that you went through a ...'), @@ -328,8 +322,6 @@ INSERT INTO t1 (a,b) VALUES connect con2,localhost,root,,; SET NAMES UTF8; CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID connection con1; SELECT * FROM t1 WHERE MATCH (a,b) @@ -546,8 +538,6 @@ select @@session.tx_isolation; @@session.tx_isolation REPEATABLE-READ CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 (a,b) VALUES ('adding record using session 1','for verifying multiple concurrent transactions'), ('Мога да Ñм Ñтъкло', 'то Mне ми вреди'); @@ -663,8 +653,6 @@ select @@session.tx_isolation; @@session.tx_isolation REPEATABLE-READ CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 (a,b) VALUES ('adding record using session 1','for verifying multiple concurrent transactions'), ('Мога да Ñм Ñтъкло', 'то Mне ми вреди'); @@ -810,8 +798,6 @@ select @@session.tx_isolation; @@session.tx_isolation REPEATABLE-READ CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 (a,b) VALUES ('adding record using session 1','for verifying multiple concurrent transactions'), ('Мога да Ñм Ñтъкло', 'то Mне ми вреди'); @@ -932,8 +918,6 @@ select @@session.tx_isolation; @@session.tx_isolation REPEATABLE-READ CREATE FULLTEXT INDEX idx on t1 (a,b); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID INSERT INTO t1 (a,b) VALUES ('adding record using session 1','for verifying multiple concurrent transactions'), ('Мога да Ñм Ñтъкло', 'то Mне ми вреди'); diff --git a/mysql-test/suite/innodb_fts/t/innodb-fts-basic.test b/mysql-test/suite/innodb_fts/t/basic.test similarity index 83% rename from mysql-test/suite/innodb_fts/t/innodb-fts-basic.test rename to mysql-test/suite/innodb_fts/t/basic.test index 095713130f1..0c0920c5f16 100644 --- a/mysql-test/suite/innodb_fts/t/innodb-fts-basic.test +++ b/mysql-test/suite/innodb_fts/t/basic.test @@ -2,12 +2,15 @@ -- source include/have_innodb.inc -if (`select plugin_auth_version <= "5.6.10" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB 5.6.10 or earlier -} - # Create FTS table +--error ER_INNODB_NO_FT_TEMP_TABLE +CREATE TEMPORARY TABLE articles ( + id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, + title VARCHAR(200), + body TEXT, + FULLTEXT (title,body) + ) ENGINE=InnoDB; + CREATE TABLE articles ( id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, title VARCHAR(200), @@ -226,3 +229,37 @@ SELECT * FROM articles AGAINST ('"more test proximity"' IN BOOLEAN MODE); drop table articles; + +--echo # +--echo # Bug #22679185 INVALID INNODB FTS DOC ID DURING INSERT +--echo # + +create table t1 (f1 int not null primary key, f2 varchar(100), + FTS_DOC_ID bigint(20) unsigned not null, + unique key `FTS_DOC_ID_INDEX` (`FTS_DOC_ID`), + fulltext key (f2))engine=innodb; + +insert into t1 values(1, "This is the first record", 20000); +insert into t1 values(2, "This is the second record", 40000); +select FTS_DOC_ID from t1; +drop table t1; + + +create table t1 (f1 int not null primary key, f2 varchar(100), + FTS_DOC_ID bigint(20) unsigned not null auto_increment, + unique key `FTS_DOC_ID_INDEX` (`FTS_DOC_ID`), + fulltext key (f2))engine=innodb; + +set auto_increment_increment = 65535; +insert into t1(f1, f2) values(1, "This is the first record"); +insert into t1(f1, f2) values(2, "This is the second record"); +insert into t1(f1, f2) values(3, "This is the third record"); +select FTS_DOC_ID from t1; +drop table t1; + +call mtr.add_suppression("\\[ERROR\\] InnoDB: Doc ID 20030101000000 is too big. Its difference with largest used Doc ID 0 cannot exceed or equal to 65535"); +CREATE TABLE t1 (FTS_DOC_ID BIGINT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, + title VARCHAR(200), FULLTEXT(title)) ENGINE=InnoDB; +--error 182 +INSERT INTO t1 VALUES (NULL, NULL), (20030101000000, 20030102000000); +DROP TABLE t1; diff --git a/mysql-test/suite/innodb_fts/t/fulltext2.test b/mysql-test/suite/innodb_fts/t/fulltext2.test index 33b6a7ac88e..1c31bcd0319 100644 --- a/mysql-test/suite/innodb_fts/t/fulltext2.test +++ b/mysql-test/suite/innodb_fts/t/fulltext2.test @@ -7,14 +7,6 @@ # --source include/have_innodb.inc ---disable_warnings -DROP TABLE IF EXISTS t1; ---enable_warnings - -if (`select plugin_auth_version <= "5.6.10" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB 5.6.10 or earlier -} CREATE TABLE t1 ( i int(10) unsigned not null auto_increment primary key, diff --git a/mysql-test/suite/innodb_fts/t/fulltext_table_evict.test b/mysql-test/suite/innodb_fts/t/fulltext_table_evict.test new file mode 100644 index 00000000000..2e7aa655aa1 --- /dev/null +++ b/mysql-test/suite/innodb_fts/t/fulltext_table_evict.test @@ -0,0 +1,48 @@ +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/big_test.inc + +--echo # +--echo # Bug Bug #27304661 MYSQL CRASH DOING SYNC INDEX ] +--echo # [FATAL] INNODB: SEMAPHORE WAIT HAS LASTED > 600 +--echo # + +CREATE TABLE t1 ( + id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, + f1 TEXT(500), + FULLTEXT idx (f1) + ) ENGINE=InnoDB; +insert into t1 (f1) values ('fjdhfsjhf'),('dhjfhjshfj'),('dhjafjhfj'); + +--source include/restart_mysqld.inc + +set @save_table_definition_cache=@@global.table_definition_cache; +set @save_table_open_cache=@@global.table_open_cache; + +set global table_definition_cache=400; +set global table_open_cache= 1024; + +SET @save_dbug = @@GLOBAL.debug_dbug; +SET GLOBAL DEBUG_DBUG="+d,crash_if_fts_table_is_evicted"; +#Create 1000 tables, try the best to evict t1 . + +--disable_query_log +let $loop=1000; +while($loop) +{ + eval create table t_$loop(id int, name text(100), fulltext idxt_$loop(name) )engine=innodb; + dec $loop; +} + +let $loop=1000; +while($loop) +{ + eval drop table t_$loop; + dec $loop; +} + +SET GLOBAL DEBUG_DBUG = @save_dbug; +--enable_query_log +set @@global.table_definition_cache=@save_table_definition_cache; +set @@global.table_open_cache=@save_table_open_cache; +drop table t1; diff --git a/mysql-test/suite/innodb_fts/t/fulltext_var.test b/mysql-test/suite/innodb_fts/t/fulltext_var.test index 03eab7e8557..2b94aa58424 100644 --- a/mysql-test/suite/innodb_fts/t/fulltext_var.test +++ b/mysql-test/suite/innodb_fts/t/fulltext_var.test @@ -3,15 +3,6 @@ # --source include/have_innodb.inc ---disable_warnings -drop table if exists t1; ---enable_warnings - -if (`select plugin_auth_version <= "5.6.10" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB 5.6.10 or earlier -} - # Save ft_boolean_syntax variable let $saved_ft_boolean_syntax=`select @@global.ft_boolean_syntax`; diff --git a/mysql-test/suite/innodb_fts/t/innodb-fts-ddl.test b/mysql-test/suite/innodb_fts/t/innodb-fts-ddl.test index 23065a97002..ddd92556772 100644 --- a/mysql-test/suite/innodb_fts/t/innodb-fts-ddl.test +++ b/mysql-test/suite/innodb_fts/t/innodb-fts-ddl.test @@ -18,14 +18,17 @@ INSERT INTO fts_test (title,body) VALUES ('MySQL vs. YourSQL','In the following database comparison ...'), ('MySQL Security','When configured properly, MySQL ...'); +# Table does rebuild when fts index builds for the first time +--error ER_ALTER_OPERATION_NOT_SUPPORTED +ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY; + # Create the FTS index -CREATE FULLTEXT INDEX idx on fts_test (title, body); +ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=INPLACE; # Select word "tutorial" in the table SELECT * FROM fts_test WHERE MATCH (title, body) AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE); - # Drop the FTS idx DROP INDEX idx ON fts_test; @@ -38,9 +41,8 @@ INSERT INTO fts_test (title,body) VALUES ('MySQL vs. YourSQL','In the following database comparison ...'), ('MySQL Security','When configured properly, MySQL ...'); - -# Recreate the FTS index -CREATE FULLTEXT INDEX idx on fts_test (title, body); +# FTS_DOC_ID hidden column and FTS_DOC_ID index exist +ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY; # Select word "tutorial" in the table SELECT * FROM fts_test WHERE MATCH (title, body) @@ -81,8 +83,6 @@ CREATE TABLE fts_test ( body TEXT ) ENGINE=InnoDB; -create unique index FTS_DOC_ID_INDEX on fts_test(FTS_DOC_ID); - # Insert six rows INSERT INTO fts_test (title,body) VALUES ('MySQL Tutorial','DBMS stands for DataBase ...') , @@ -97,7 +97,7 @@ INSERT INTO fts_test (title,body) VALUES # column already exists. This has not been implemented yet. --error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON CREATE FULLTEXT INDEX idx on fts_test (title, body) LOCK=NONE; -CREATE FULLTEXT INDEX idx on fts_test (title, body); +ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY; --error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON ALTER TABLE fts_test ROW_FORMAT=REDUNDANT, LOCK=NONE; @@ -117,8 +117,6 @@ SELECT * FROM fts_test WHERE MATCH (title, body) # Drop the FTS_DOC_ID_INDEX and try again drop index idx on fts_test; -drop index FTS_DOC_ID_INDEX on fts_test; - CREATE FULLTEXT INDEX idx on fts_test (title, body); SELECT * FROM fts_test WHERE MATCH (title, body) @@ -232,7 +230,7 @@ DROP TABLE articles; create table articles(`FTS_DOC_ID` serial, `col32` timestamp not null,`col115` text) engine=innodb; -create fulltext index `idx5` on articles(`col115`) ; +create fulltext index `idx5` on articles(`col115`) ; alter ignore table articles add primary key (`col32`) ; @@ -271,3 +269,10 @@ SELECT * FROM articles WHERE MATCH (title, body) AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE); DROP TABLE articles; + +# Add more than one FTS index +CREATE TABLE t1 (a VARCHAR(3)) ENGINE=InnoDB; +ALTER TABLE t1 ADD FULLTEXT KEY(a), ADD COLUMN b VARCHAR(3), ADD FULLTEXT KEY(b); + +# Cleanup +DROP TABLE t1; diff --git a/mysql-test/suite/innodb_fts/t/innodb_fts_misc.test b/mysql-test/suite/innodb_fts/t/innodb_fts_misc.test index 68ca8974512..150d632b26f 100644 --- a/mysql-test/suite/innodb_fts/t/innodb_fts_misc.test +++ b/mysql-test/suite/innodb_fts/t/innodb_fts_misc.test @@ -648,6 +648,7 @@ CREATE TABLE t1 ( id INT , char_column VARCHAR(60) CHARACTER SET UTF8) ENGINE = INSERT INTO t1 VALUES (1,'aaa'),(2,'bbb'),(3,'ccc'); CREATE FULLTEXT INDEX i ON t1 (char_column); HANDLER t1 OPEN; +--error ER_KEY_DOESNT_SUPPORT HANDLER t1 READ i = ('aaa'); DROP TABLE t1; #23. Duplicate key error when there are no unique indexes (procedure test) diff --git a/mysql-test/suite/innodb_gis/r/types.result b/mysql-test/suite/innodb_gis/r/types.result index 12e1eb44cd7..dc46710c365 100644 --- a/mysql-test/suite/innodb_gis/r/types.result +++ b/mysql-test/suite/innodb_gis/r/types.result @@ -47,6 +47,7 @@ SELECT ST_AsText(g) FROM t_wl6455 WHERE g = POINT(10,10); ST_AsText(g) INSERT INTO t_wl6455 VALUES(10, POINT(10,10)); COMMIT; +FLUSH TABLES; INSERT INTO t_wl6455 VALUES(11, POINT(11,11)); BEGIN; INSERT INTO t_wl6455 VALUES(1, POINT(1,1)); diff --git a/mysql-test/suite/innodb_gis/t/types.test b/mysql-test/suite/innodb_gis/t/types.test index fab0db5f755..0db836b94ba 100644 --- a/mysql-test/suite/innodb_gis/t/types.test +++ b/mysql-test/suite/innodb_gis/t/types.test @@ -62,6 +62,9 @@ SELECT ST_AsText(g) FROM t_wl6455 WHERE g = POINT(10,10); INSERT INTO t_wl6455 VALUES(10, POINT(10,10)); COMMIT; +# Avoid corrupting non-crash-safe system tables on the kill below. +FLUSH TABLES; + INSERT INTO t_wl6455 VALUES(11, POINT(11,11)); BEGIN; INSERT INTO t_wl6455 VALUES(1, POINT(1,1)); diff --git a/mysql-test/suite/innodb_zip/r/cmp_per_index.result b/mysql-test/suite/innodb_zip/r/cmp_per_index.result index 5b001279b58..7b27fa722b9 100644 --- a/mysql-test/suite/innodb_zip/r/cmp_per_index.result +++ b/mysql-test/suite/innodb_zip/r/cmp_per_index.result @@ -72,8 +72,17 @@ index_name PRIMARY compress_ops 65 compress_ops_ok 65 uncompress_ops 0 +SHOW CREATE TABLE t; +Table t +Create Table CREATE TABLE `t` ( + `a` int(11) NOT NULL, + `b` varchar(512) DEFAULT NULL, + `c` varchar(16) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `b` (`b`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=2 SET GLOBAL innodb_cmp_per_index_enabled=ON; -SELECT COUNT(*) FROM t; +SELECT COUNT(*) FROM t IGNORE INDEX(b); COUNT(*) 128 SELECT database_name, @@ -87,15 +96,9 @@ FROM information_schema.innodb_cmp_per_index ORDER BY 1, 2, 3; database_name test table_name t -index_name b -compress_ops 0 -compress_ops_ok 0 -uncompress_ops 6 -database_name test -table_name t index_name PRIMARY compress_ops 0 compress_ops_ok 0 -uncompress_ops 5 +uncompress_ops 4 DROP TABLE t; SET GLOBAL innodb_cmp_per_index_enabled=default; diff --git a/mysql-test/suite/innodb_zip/r/restart.result b/mysql-test/suite/innodb_zip/r/restart.result index c4e6daf73fe..1075cf990b2 100644 --- a/mysql-test/suite/innodb_zip/r/restart.result +++ b/mysql-test/suite/innodb_zip/r/restart.result @@ -227,6 +227,7 @@ test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/ # Shutdown the server and list the tablespace OS files # ---- MYSQL_DATA_DIR/test +db.opt t1_restart.frm t1_restart.ibd t2_restart.frm @@ -485,6 +486,7 @@ SUBPARTITION BY HASH (`c1`) # Shutdown the server and make a backup of a tablespace # ---- MYSQL_DATA_DIR/test +db.opt t4_restart.frm t4_restart.ibd t5_restart.frm @@ -588,6 +590,7 @@ ERROR 42S01: Table 't55_restart' already exists RENAME TABLE t5_restart TO t55_restart; ERROR HY000: Error on rename of './test/t5_restart' to './test/t55_restart' (errno: 184 "Tablespace already exists") ---- MYSQL_DATA_DIR/test +db.opt t4_restart.frm t4_restart.ibd t5_restart.frm @@ -684,6 +687,7 @@ SUBPARTITION BY HASH (`c1`) (SUBPARTITION `s2` DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, SUBPARTITION `s3` DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB)) ---- MYSQL_DATA_DIR/test +db.opt t4_restart.frm t4_restart.ibd t55_restart.frm @@ -786,6 +790,7 @@ SUBPARTITION BY HASH (`c1`) # Move the remote tablespaces to a new location and change the ISL files # ---- MYSQL_DATA_DIR/test +db.opt t4_restart.frm t4_restart.ibd t55_restart.frm @@ -816,6 +821,7 @@ t77_restart#p#p1#sp#s3.ibd # Moving tablespace 't66_restart' from MYSQL_TMP_DIR/alt_dir to MYSQL_TMP_DIR/new_dir # Moving tablespace 't77_restart' from MYSQL_TMP_DIR/alt_dir to MYSQL_TMP_DIR/new_dir ---- MYSQL_DATA_DIR/test +db.opt t4_restart.frm t4_restart.isl t55_restart.frm @@ -931,6 +937,7 @@ SUBPARTITION BY HASH (`c1`) # Move the remote tablespaces back to the default datadir and delete the ISL file. # ---- MYSQL_DATA_DIR/test +db.opt t4_restart.frm t4_restart.isl t55_restart.frm @@ -961,6 +968,7 @@ t77_restart#p#p1#sp#s3.ibd # Moving 't66_restart' from MYSQL_TMP_DIR/new_dir to MYSQL_DATA_DIR # Moving 't77_restart' from MYSQL_TMP_DIR/new_dir to MYSQL_DATA_DIR ---- MYSQL_DATA_DIR/test +db.opt t4_restart.frm t4_restart.ibd t55_restart.frm diff --git a/mysql-test/suite/innodb_zip/r/wl5522_zip.result b/mysql-test/suite/innodb_zip/r/wl5522_zip.result index b4c0597dcf9..f57e2191d9f 100644 --- a/mysql-test/suite/innodb_zip/r/wl5522_zip.result +++ b/mysql-test/suite/innodb_zip/r/wl5522_zip.result @@ -29,12 +29,14 @@ a b c 823 Evolution lsjndofiabsoibeg 822 Devotion asdfuihknaskdf 821 Cavalry ..asdasdfaeraf +db.opt t1.frm t1.ibd # Restarting server # Done restarting server FLUSH TABLE t1 FOR EXPORT; # List before copying files +db.opt t1.cfg t1.frm t1.ibd @@ -56,10 +58,12 @@ a b c # Restarting server # Done restarting server # List before t1 DISCARD +db.opt t1.frm t1.ibd ALTER TABLE t1 DISCARD TABLESPACE; # List after t1 DISCARD +db.opt t1.frm ALTER TABLE t1 IMPORT TABLESPACE; ALTER TABLE t1 ENGINE InnoDB; @@ -76,6 +80,7 @@ a b c 823 Evolution lsjndofiabsoibeg 822 Devotion asdfuihknaskdf 821 Cavalry ..asdasdfaeraf +db.opt t1.cfg t1.frm t1.ibd @@ -112,6 +117,7 @@ INSERT INTO t1(c2) SELECT c2 FROM t1; INSERT INTO t1(c2) SELECT c2 FROM t1; INSERT INTO t1(c2) SELECT c2 FROM t1; INSERT INTO t1(c2) SELECT c2 FROM t1; +db.opt t1.frm t1.ibd FLUSH TABLES t1 FOR EXPORT; @@ -119,6 +125,7 @@ SELECT COUNT(*) FROM t1; COUNT(*) 16 backup: t1 +db.opt t1.cfg t1.frm t1.ibd @@ -131,6 +138,7 @@ ALTER TABLE t1 DISCARD TABLESPACE; SELECT * FROM t1; ERROR HY000: Tablespace has been discarded for table `t1` restore: t1 .ibd and .cfg files +db.opt t1.cfg t1.frm t1.ibd @@ -150,6 +158,7 @@ INSERT INTO t1(c2) SELECT c2 FROM t1; INSERT INTO t1(c2) SELECT c2 FROM t1; INSERT INTO t1(c2) SELECT c2 FROM t1; INSERT INTO t1(c2) SELECT c2 FROM t1; +db.opt t1.frm t1.ibd FLUSH TABLES t1 FOR EXPORT; @@ -157,10 +166,12 @@ SELECT COUNT(*) FROM t1; COUNT(*) 16 backup: t1 +db.opt t1.cfg t1.frm t1.ibd UNLOCK TABLES; +db.opt t1.frm t1.ibd INSERT INTO t1(c2) SELECT c2 FROM t1; @@ -173,6 +184,7 @@ ALTER TABLE t1 DISCARD TABLESPACE; SELECT * FROM t1; ERROR HY000: Tablespace has been discarded for table `t1` restore: t1 .ibd and .cfg files +db.opt t1.cfg t1.frm t1.ibd @@ -198,6 +210,7 @@ SELECT COUNT(*) FROM t1 WHERE c2 = 1; COUNT(*) 16 backup: t1 +db.opt t1.cfg t1.frm t1.ibd diff --git a/mysql-test/suite/innodb_zip/t/cmp_per_index.test b/mysql-test/suite/innodb_zip/t/cmp_per_index.test index b26d5a4f243..15f5b2de6e4 100644 --- a/mysql-test/suite/innodb_zip/t/cmp_per_index.test +++ b/mysql-test/suite/innodb_zip/t/cmp_per_index.test @@ -102,9 +102,11 @@ ORDER BY 1, 2, 3; -- source include/restart_mysqld.inc +SHOW CREATE TABLE t; + SET GLOBAL innodb_cmp_per_index_enabled=ON; -SELECT COUNT(*) FROM t; +SELECT COUNT(*) FROM t IGNORE INDEX(b); SELECT database_name, diff --git a/mysql-test/suite/maria/alter.result b/mysql-test/suite/maria/alter.result index 1a7daf5a1ee..c63688dddd6 100644 --- a/mysql-test/suite/maria/alter.result +++ b/mysql-test/suite/maria/alter.result @@ -31,3 +31,19 @@ pk i 8 88 9 99 DROP TABLE t1; +CREATE TABLE t1 (f INT) ENGINE=Aria transactional=1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f` int(11) DEFAULT NULL +) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=1 +INSERT INTO t1 VALUES (1),(2); +ALTER TABLE t1 ORDER BY unknown_column; +ERROR 42S22: Unknown column 'unknown_column' in 'order clause' +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f` int(11) DEFAULT NULL +) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=1 +CREATE TABLE t2 SELECT * FROM t1; +DROP TABLE t1, t2; diff --git a/mysql-test/suite/maria/alter.test b/mysql-test/suite/maria/alter.test index abca4865688..09672cdfa3b 100644 --- a/mysql-test/suite/maria/alter.test +++ b/mysql-test/suite/maria/alter.test @@ -25,3 +25,20 @@ INSERT INTO t1 VALUES (2,0),(3,33),(4,0),(5,55),(6,66),(7,0),(8,88),(9,99); ALTER TABLE t1 ENABLE KEYS; SELECT * FROM t1 WHERE i = 0 OR pk BETWEEN 6 AND 10; DROP TABLE t1; + +# +# MDEV-14943 +# Assertion `block->type == PAGECACHE_EMPTY_PAGE || block->type == type || +# type == PAGECACHE_LSN_PAGE || type == PAGECACHE_READ_UNKNOWN_PAGE || +# block->type == PAGECACHE_READ_UNKNOWN_PAGE' failed in pagecache_read upon +# CREATE ... SELECT from Aria table +# + +CREATE TABLE t1 (f INT) ENGINE=Aria transactional=1; +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES (1),(2); +--error ER_BAD_FIELD_ERROR +ALTER TABLE t1 ORDER BY unknown_column; +SHOW CREATE TABLE t1; +CREATE TABLE t2 SELECT * FROM t1; +DROP TABLE t1, t2; diff --git a/mysql-test/suite/maria/lock.result b/mysql-test/suite/maria/lock.result index 90250568ef5..660f64070ca 100644 --- a/mysql-test/suite/maria/lock.result +++ b/mysql-test/suite/maria/lock.result @@ -99,3 +99,38 @@ f2 3 unlock tables; DROP TABLE t1,t2,tmp; +# +# MDEV-10378 Assertion `trn' failed in virtual int ha_maria::start_stmt +# +CREATE TABLE t1 (f1 VARCHAR(3), f2 INT, pk INT, PRIMARY KEY (pk)) ENGINE=Aria; +INSERT INTO t1 VALUES ('foo',10,1), ('foo',1,2); +LOCK TABLE t1 WRITE; +ALTER TABLE t1 ADD UNIQUE KEY (f1); +ERROR 23000: Duplicate entry 'foo' for key 'f1' +ALTER TABLE t1 ADD KEY (f2); +DROP TABLE t1; +# End of 10.2 tests +# +# MDEV-14669 Assertion `file->trn == trn' failed in ha_maria::start_stmt +# +CREATE TABLE t1 (i INT) ENGINE=Aria; +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (c CHAR(1)) ENGINE=Aria; +INSERT INTO t2 VALUES ('a'),('b'); +connect con1,localhost,root,,test; +LOCK TABLE t1 WRITE; +OPTIMIZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +connection default; +DELETE FROM t2 WHERE c < 1; +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'a' +Warning 1292 Truncated incorrect DOUBLE value: 'b' +connection con1; +OPTIMIZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 optimize status Table is already up to date +disconnect con1; +connection default; +DROP TABLE t1, t2; diff --git a/mysql-test/suite/maria/lock.test b/mysql-test/suite/maria/lock.test index 57447a18c55..37356a359d8 100644 --- a/mysql-test/suite/maria/lock.test +++ b/mysql-test/suite/maria/lock.test @@ -105,3 +105,37 @@ INSERT INTO t2 (f2) SELECT f3 FROM tmp AS tmp_alias; select * from t2; unlock tables; DROP TABLE t1,t2,tmp; + +--echo # +--echo # MDEV-10378 Assertion `trn' failed in virtual int ha_maria::start_stmt +--echo # + +CREATE TABLE t1 (f1 VARCHAR(3), f2 INT, pk INT, PRIMARY KEY (pk)) ENGINE=Aria; +INSERT INTO t1 VALUES ('foo',10,1), ('foo',1,2); +LOCK TABLE t1 WRITE; +--error ER_DUP_ENTRY +ALTER TABLE t1 ADD UNIQUE KEY (f1); +ALTER TABLE t1 ADD KEY (f2); +DROP TABLE t1; + +--echo # End of 10.2 tests + +--echo # +--echo # MDEV-14669 Assertion `file->trn == trn' failed in ha_maria::start_stmt +--echo # + +CREATE TABLE t1 (i INT) ENGINE=Aria; +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (c CHAR(1)) ENGINE=Aria; +INSERT INTO t2 VALUES ('a'),('b'); +--connect (con1,localhost,root,,test) +LOCK TABLE t1 WRITE; +OPTIMIZE TABLE t1; +--connection default +DELETE FROM t2 WHERE c < 1; +--connection con1 +OPTIMIZE TABLE t1; +# Cleanup +--disconnect con1 +--connection default +DROP TABLE t1, t2; diff --git a/mysql-test/suite/maria/maria-recover.result b/mysql-test/suite/maria/maria-recover.result index b96cf8811db..4d0b4317afe 100644 --- a/mysql-test/suite/maria/maria-recover.result +++ b/mysql-test/suite/maria/maria-recover.result @@ -27,7 +27,6 @@ ThursdayMorningsMarket ThursdayMorningsMarketb Warnings: Error 145 t_corrupted2' is marked as crashed and should be repaired -Error 1194 t_corrupted2' is marked as crashed and should be repaired Error 1034 1 client is using or hasn't closed the table properly Error 1034 Wrong base information on indexpage at page: 1 select * from t_corrupted2; diff --git a/mysql-test/suite/mariabackup/absolute_ibdata_paths.opt b/mysql-test/suite/mariabackup/absolute_ibdata_paths.opt new file mode 100644 index 00000000000..52b6b743ac8 --- /dev/null +++ b/mysql-test/suite/mariabackup/absolute_ibdata_paths.opt @@ -0,0 +1 @@ +--innodb --innodb-data-home-dir= --innodb-data-file-path=$MYSQLTEST_VARDIR/tmp/absolute_path_ibdata1:3M;ibdata_second:1M:autoextend \ No newline at end of file diff --git a/mysql-test/suite/mariabackup/absolute_ibdata_paths.result b/mysql-test/suite/mariabackup/absolute_ibdata_paths.result new file mode 100644 index 00000000000..fe211e71f2f --- /dev/null +++ b/mysql-test/suite/mariabackup/absolute_ibdata_paths.result @@ -0,0 +1,10 @@ +CREATE TABLE t(i INT) ENGINE INNODB; +INSERT INTO t VALUES(1); +# xtrabackup backup +# remove datadir +# xtrabackup copy back +# restart server +SELECT * from t; +i +1 +DROP TABLE t; diff --git a/mysql-test/suite/mariabackup/absolute_ibdata_paths.test b/mysql-test/suite/mariabackup/absolute_ibdata_paths.test new file mode 100644 index 00000000000..6717f16d199 --- /dev/null +++ b/mysql-test/suite/mariabackup/absolute_ibdata_paths.test @@ -0,0 +1,31 @@ +# This test just backs up and restores empty database +# Innodb system tablespace is specified with absolute path in the .opt file +CREATE TABLE t(i INT) ENGINE INNODB; +INSERT INTO t VALUES(1); +echo # xtrabackup backup; + +let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; +let $_innodb_data_file_path=`select @@innodb_data_file_path`; +let $_innodb_data_home_dir=`select @@innodb_data_home_dir`; +let $_datadir= `SELECT @@datadir`; + +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +--enable_result_log +exec $XTRABACKUP --prepare --target-dir=$targetdir; + +--source include/shutdown_mysqld.inc +echo # remove datadir; +rmdir $_datadir; +#remove out-of-datadir ibdata1 +remove_file $MYSQLTEST_VARDIR/tmp/absolute_path_ibdata1; +echo # xtrabackup copy back; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir "--innodb_data_file_path=$_innodb_data_file_path" --innodb_data_home_dir=$_innodb_data_home_dir; +echo # restart server; +--source include/start_mysqld.inc +--enable_result_log + +SELECT * from t; +DROP TABLE t; +rmdir $targetdir; + diff --git a/mysql-test/suite/mariabackup/backup_ssl.result b/mysql-test/suite/mariabackup/backup_ssl.result new file mode 100644 index 00000000000..6e59da6d43a --- /dev/null +++ b/mysql-test/suite/mariabackup/backup_ssl.result @@ -0,0 +1,9 @@ +GRANT ALL PRIVILEGES on *.* TO backup_user IDENTIFIED by 'x' REQUIRE SSL; +FLUSH PRIVILEGES; +# xtrabackup backup +# xtrabackup prepare +# shutdown server +# remove datadir +# xtrabackup move back +# restart server +DROP USER backup_user; diff --git a/mysql-test/suite/mariabackup/backup_ssl.test b/mysql-test/suite/mariabackup/backup_ssl.test new file mode 100644 index 00000000000..e858c834d29 --- /dev/null +++ b/mysql-test/suite/mariabackup/backup_ssl.test @@ -0,0 +1,16 @@ +GRANT ALL PRIVILEGES on *.* TO backup_user IDENTIFIED by 'x' REQUIRE SSL; +FLUSH PRIVILEGES; +echo # xtrabackup backup; +let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --user=backup_user --password=x --ssl --backup --target-dir=$targetdir; +--enable_result_log + +echo # xtrabackup prepare; +--disable_result_log +exec $XTRABACKUP --prepare --target-dir=$targetdir; +-- source include/restart_and_restore.inc +--enable_result_log +DROP USER backup_user; +rmdir $targetdir; + diff --git a/mysql-test/suite/mariabackup/lock_ddl_per_table.opt b/mysql-test/suite/mariabackup/lock_ddl_per_table.opt new file mode 100644 index 00000000000..bbb6d7f9ff4 --- /dev/null +++ b/mysql-test/suite/mariabackup/lock_ddl_per_table.opt @@ -0,0 +1 @@ +--loose-partition diff --git a/mysql-test/suite/mariabackup/lock_ddl_per_table.test b/mysql-test/suite/mariabackup/lock_ddl_per_table.test index 04edb9e05e9..2689508e554 100644 --- a/mysql-test/suite/mariabackup/lock_ddl_per_table.test +++ b/mysql-test/suite/mariabackup/lock_ddl_per_table.test @@ -1,4 +1,5 @@ --source include/have_debug.inc +--source include/have_partition.inc CREATE TABLE t(i INT) ENGINE INNODB; INSERT INTO t VALUES(1); diff --git a/mysql-test/suite/mariabackup/partition_datadir.opt b/mysql-test/suite/mariabackup/partition_datadir.opt deleted file mode 100644 index 8a3240370eb..00000000000 --- a/mysql-test/suite/mariabackup/partition_datadir.opt +++ /dev/null @@ -1 +0,0 @@ ---partition \ No newline at end of file diff --git a/mysql-test/suite/mariabackup/partition_datadir.test b/mysql-test/suite/mariabackup/partition_datadir.test index 882b0111267..c525d34a02c 100644 --- a/mysql-test/suite/mariabackup/partition_datadir.test +++ b/mysql-test/suite/mariabackup/partition_datadir.test @@ -1,3 +1,4 @@ +--source include/have_partition.inc let $targetdir=$MYSQLTEST_VARDIR/backup; mkdir $targetdir; mkdir $MYSQLTEST_VARDIR/partitdata; diff --git a/mysql-test/suite/mariabackup/suite.opt b/mysql-test/suite/mariabackup/suite.opt index 752a2e41983..3b5cc4f4c45 100644 --- a/mysql-test/suite/mariabackup/suite.opt +++ b/mysql-test/suite/mariabackup/suite.opt @@ -1 +1 @@ ---innodb --loose-changed_page_bitmaps --innodb-sys-tables --partition +--innodb --loose-changed_page_bitmaps --innodb-sys-tables diff --git a/mysql-test/suite/multi_source/info_logs.result b/mysql-test/suite/multi_source/info_logs.result index e177c9826a9..531a6178cdb 100644 --- a/mysql-test/suite/multi_source/info_logs.result +++ b/mysql-test/suite/multi_source/info_logs.result @@ -89,17 +89,17 @@ MASTER 2.2 # EOF # show all slaves status; -Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos - Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 relay.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 1 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 1073741824 7 0 60.000 -MASTER 2.2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 relay-master@00202@002e2.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 1073741824 7 0 60.000 +Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos + Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 relay.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 1 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 +MASTER 2.2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 relay-master@00202@002e2.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 include/wait_for_slave_to_start.inc set default_master_connection = 'MASTER 2.2'; include/wait_for_slave_to_start.inc set default_master_connection = ''; show all slaves status; -Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos - Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 relay.000004 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 1 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 1073741824 6 0 60.000 -MASTER 2.2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 relay-master@00202@002e2.000004 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 1073741824 6 0 60.000 +Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos + Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 relay.000004 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 1 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 6 0 60.000 +MASTER 2.2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 relay-master@00202@002e2.000004 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 6 0 60.000 # # List of files matching '*info*' pattern # after slave server restart diff --git a/mysql-test/suite/multi_source/multi_parallel.cnf b/mysql-test/suite/multi_source/multi_parallel.cnf new file mode 100644 index 00000000000..0f0dc5c9a82 --- /dev/null +++ b/mysql-test/suite/multi_source/multi_parallel.cnf @@ -0,0 +1,6 @@ +!include my.cnf +[mysqld.1] +gtid_domain_id=1 + +[mysqld.2] +gtid_domain_id=2 diff --git a/mysql-test/suite/multi_source/multi_parallel.result b/mysql-test/suite/multi_source/multi_parallel.result new file mode 100644 index 00000000000..bca01d57932 --- /dev/null +++ b/mysql-test/suite/multi_source/multi_parallel.result @@ -0,0 +1,64 @@ +connect master1,127.0.0.1,root,,,$SERVER_MYPORT_1; +connect master2,127.0.0.1,root,,,$SERVER_MYPORT_2; +connect slave,127.0.0.1,root,,,$SERVER_MYPORT_3; +set global slave_parallel_threads=10; +change master 'master1' to +master_port=MYPORT_1, +master_host='127.0.0.1', +master_user='root'; +change master 'master2' to +master_port=MYPORT_2, +master_host='127.0.0.1', +master_user='root'; +start all slaves; +set default_master_connection = 'master1'; +include/wait_for_slave_to_start.inc +set default_master_connection = 'master2'; +include/wait_for_slave_to_start.inc +## Slave status variable +set default_master_connection = 'master1'; +show status like 'slave_running'; +Variable_name Value +Slave_running ON +set default_master_connection = 'master2'; +show status like 'slave_running'; +Variable_name Value +Slave_running ON +#master 1 +connection master1; +##Running CURD operation +connection slave; +Slave_DDL_Groups= 20; +Slave_Non_Transactional_Groups= 20; +Slave_Transactional_Groups= 0; +#master 2 +connection master2; +##Running CURD operation +connection slave; +Slave_DDL_Groups= 20; +Slave_Non_Transactional_Groups= 20; +Slave_Transactional_Groups= 0; +#master 1 +connection master1; +##Running CURD operation +connection slave; +Slave_DDL_Groups= 40; +Slave_Non_Transactional_Groups= 20; +Slave_Transactional_Groups= 20; +stop all slaves; +Warnings: +Note 1938 SLAVE 'master2' stopped +Note 1938 SLAVE 'master1' stopped +set default_master_connection = 'master1'; +include/wait_for_slave_to_stop.inc +set default_master_connection = 'master2'; +include/wait_for_slave_to_stop.inc +set global slave_parallel_threads=0; +include/reset_master_slave.inc +disconnect slave; +connection master1; +include/reset_master_slave.inc +disconnect master1; +connection master2; +include/reset_master_slave.inc +disconnect master2; diff --git a/mysql-test/suite/multi_source/multi_parallel.test b/mysql-test/suite/multi_source/multi_parallel.test new file mode 100644 index 00000000000..a1385198b61 --- /dev/null +++ b/mysql-test/suite/multi_source/multi_parallel.test @@ -0,0 +1,125 @@ +# This test file tests events counter like Slave_ddl_groups, +# Slave_non_transactional_groups, Slave_transactional_groups +--source include/not_embedded.inc +--source include/have_innodb.inc +--let $rpl_server_count= 0 + +--connect (master1,127.0.0.1,root,,,$SERVER_MYPORT_1) +--connect (master2,127.0.0.1,root,,,$SERVER_MYPORT_2) +--connect (slave,127.0.0.1,root,,,$SERVER_MYPORT_3) + +#save state +--let $par_thd= `select @@slave_parallel_threads;` + +set global slave_parallel_threads=10; + +--replace_result $SERVER_MYPORT_1 MYPORT_1 +eval change master 'master1' to +master_port=$SERVER_MYPORT_1, +master_host='127.0.0.1', +master_user='root'; + +--replace_result $SERVER_MYPORT_2 MYPORT_2 +eval change master 'master2' to +master_port=$SERVER_MYPORT_2, +master_host='127.0.0.1', +master_user='root'; + + +#start all slaves +--disable_warnings +start all slaves; +--enable_warnings +set default_master_connection = 'master1'; +--source include/wait_for_slave_to_start.inc + +set default_master_connection = 'master2'; +--source include/wait_for_slave_to_start.inc + +--echo ## Slave status variable + +set default_master_connection = 'master1'; +show status like 'slave_running'; + +set default_master_connection = 'master2'; +show status like 'slave_running'; + + +--echo #master 1 +--connection master1 + +--let $loop_counter=10 +--let $table_engine=myisam +--source multi_parallel_loop.inc +--save_master_pos + +--connection slave + +--sync_with_master 0,'master1' +--let $status= query_get_value(show slave 'master1' status, Slave_DDL_Groups, 1) +--echo Slave_DDL_Groups= $status; + +--let $status= query_get_value(show slave 'master1' status, Slave_Non_Transactional_Groups, 1) +--echo Slave_Non_Transactional_Groups= $status; + +--let $status= query_get_value(show slave 'master1' status, Slave_Transactional_Groups, 1) +--echo Slave_Transactional_Groups= $status; + +--echo #master 2 +--connection master2 + +--let $loop_counter=10 +--let $table_engine=myisam +--source multi_parallel_loop.inc +--save_master_pos + +--connection slave +--sync_with_master 0,'master2' +--let $status= query_get_value(show slave 'master2' status, Slave_DDL_Groups, 1) +--echo Slave_DDL_Groups= $status; + +--let $status= query_get_value(show slave 'master2' status, Slave_Non_Transactional_Groups, 1) +--echo Slave_Non_Transactional_Groups= $status; + +--let $status= query_get_value(show slave 'master2' status, Slave_Transactional_Groups, 1) +--echo Slave_Transactional_Groups= $status; + +--echo #master 1 +--connection master1 + +--let $loop_counter=10 +--let $table_engine=innodb +--source multi_parallel_loop.inc +--save_master_pos + +--connection slave + +--sync_with_master 0,'master1' +--let $status= query_get_value(show slave 'master1' status, Slave_DDL_Groups, 1) +--echo Slave_DDL_Groups= $status; + +--let $status= query_get_value(show slave 'master1' status, Slave_Non_Transactional_Groups, 1) +--echo Slave_Non_Transactional_Groups= $status; + +--let $status= query_get_value(show slave 'master1' status, Slave_Transactional_Groups, 1) +--echo Slave_Transactional_Groups= $status; + + +# Cleanup +stop all slaves; +set default_master_connection = 'master1'; +--source include/wait_for_slave_to_stop.inc + +set default_master_connection = 'master2'; +--source include/wait_for_slave_to_stop.inc + +--eval set global slave_parallel_threads=$par_thd + +--source include/reset_master_slave.inc +--disconnect slave +--connection master1 +--source include/reset_master_slave.inc +--disconnect master1 +--connection master2 +--source include/reset_master_slave.inc +--disconnect master2 diff --git a/mysql-test/suite/multi_source/multi_parallel_loop.inc b/mysql-test/suite/multi_source/multi_parallel_loop.inc new file mode 100644 index 00000000000..bf692b28ede --- /dev/null +++ b/mysql-test/suite/multi_source/multi_parallel_loop.inc @@ -0,0 +1,19 @@ +#create a table,insert data and drop table + +#parameters +# loop_counter +# table_engine +--echo ##Running CURD operation +--disable_query_log +while ($loop_counter) +{ + #DDL statement + --eval create table t1(a int primary key) engine=$table_engine; + + #non trans update statement + insert into t1 values(1); + insert into t1 values(2); + drop table t1; + --dec $loop_counter +} +--enable_query_log diff --git a/mysql-test/suite/multi_source/reset_slave.result b/mysql-test/suite/multi_source/reset_slave.result index 353970ac8ff..c1d74ab9f3f 100644 --- a/mysql-test/suite/multi_source/reset_slave.result +++ b/mysql-test/suite/multi_source/reset_slave.result @@ -13,15 +13,15 @@ insert into t1 values (1),(2); connection slave; stop slave 'master1'; show slave 'master1' status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State - 127.0.0.1 root MYPORT_1 60 master-bin.000001 mysqld-relay-bin-master1.000002 master-bin.000001 No No 0 0 None 0 No NULL No 0 0 1 No conservative 0 NULL +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups + 127.0.0.1 root MYPORT_1 60 master-bin.000001 mysqld-relay-bin-master1.000002 master-bin.000001 No No 0 0 None 0 No NULL No 0 0 1 No conservative 0 NULL 2 1 0 mysqld-relay-bin-master1.000001 mysqld-relay-bin-master1.000002 mysqld-relay-bin-master1.index reset slave 'master1'; show slave 'master1' status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State - 127.0.0.1 root MYPORT_1 60 4 No No 0 0 0 None 0 No NULL No 0 0 1 No conservative 0 NULL +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups + 127.0.0.1 root MYPORT_1 60 4 No No 0 0 0 None 0 No NULL No 0 0 1 No conservative 0 NULL 2 1 0 reset slave 'master1' all; show slave 'master1' status; ERROR HY000: There is no master connection 'master1' diff --git a/mysql-test/suite/multi_source/simple.result b/mysql-test/suite/multi_source/simple.result index 419b9951905..93ea1c023bc 100644 --- a/mysql-test/suite/multi_source/simple.result +++ b/mysql-test/suite/multi_source/simple.result @@ -18,9 +18,9 @@ connection slave; connection master2; connection slave; show all slaves status; -Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos -slave1 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 mysqld-relay-bin-slave1.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 1 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 1073741824 7 0 60.000 -slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 mysqld-relay-bin-slave2.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 1073741824 7 0 60.000 +Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos +slave1 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 mysqld-relay-bin-slave1.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 1 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 +slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 mysqld-relay-bin-slave2.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 start all slaves; stop slave 'slave1'; show slave 'slave1' status; @@ -74,21 +74,24 @@ Parallel_Mode conservative SQL_Delay 0 SQL_Remaining_Delay NULL Slave_SQL_Running_State +Slave_DDL_Groups 0 +Slave_Non_Transactional_Groups 0 +Slave_Transactional_Groups 0 reset slave 'slave1'; show all slaves status; -Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos -slave1 127.0.0.1 root MYPORT_1 60 4 No No 0 0 0 None 0 No NULL No 0 0 1 No conservative 0 NULL 0 1073741824 7 0 60.000 -slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 mysqld-relay-bin-slave2.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 1073741824 7 0 60.000 +Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos +slave1 127.0.0.1 root MYPORT_1 60 4 No No 0 0 0 None 0 No NULL No 0 0 1 No conservative 0 NULL 0 0 0 0 1073741824 7 0 60.000 +slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 mysqld-relay-bin-slave2.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 reset slave 'slave1' all; show all slaves status; -Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos -slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 mysqld-relay-bin-slave2.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 1073741824 7 0 60.000 +Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos +slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 mysqld-relay-bin-slave2.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 stop all slaves; Warnings: Note 1938 SLAVE 'slave2' stopped show all slaves status; -Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos -slave2 127.0.0.1 root MYPORT_2 60 master-bin.000001 mysqld-relay-bin-slave2.000002 master-bin.000001 No No 0 0 None 0 No NULL No 0 0 2 No conservative 0 NULL 0 1073741824 7 0 60.000 +Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos +slave2 127.0.0.1 root MYPORT_2 60 master-bin.000001 mysqld-relay-bin-slave2.000002 master-bin.000001 No No 0 0 None 0 No NULL No 0 0 2 No conservative 0 NULL 0 0 0 0 1073741824 7 0 60.000 stop all slaves; include/reset_master_slave.inc disconnect slave; diff --git a/mysql-test/suite/multi_source/syntax.result b/mysql-test/suite/multi_source/syntax.result index a17a61d3e7c..35f4b3048ad 100644 --- a/mysql-test/suite/multi_source/syntax.result +++ b/mysql-test/suite/multi_source/syntax.result @@ -1,11 +1,11 @@ include/master-slave.inc [connection master] show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups show slave '' status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups show all slaves status; -Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos +Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos # # Check error handling # diff --git a/mysql-test/suite/parts/inc/part_alter_values.inc b/mysql-test/suite/parts/inc/part_alter_values.inc index 0d4929d9820..ac69169a9ca 100644 --- a/mysql-test/suite/parts/inc/part_alter_values.inc +++ b/mysql-test/suite/parts/inc/part_alter_values.inc @@ -35,3 +35,13 @@ ALTER TABLE t1 REORGANIZE PARTITION p1 INTO PARTITION p3 VALUES IN (4,5,6) ); DROP TABLE t1; + +# +# MDEV-15456 Server crashes upon adding or dropping a partition in ALTER under LOCK TABLE after ER_SAME_NAME_PARTITION +# +--eval create table t1 (i int) engine=$engine partition by range(i) (partition p0 values less than (10)) +lock table t1 write; +--error ER_SAME_NAME_PARTITION +alter table t1 add partition (partition p0 values less than (20)); +alter table t1 add partition (partition p1 values less than (20)) /* comment */; +drop table t1; diff --git a/mysql-test/suite/parts/r/partition_alter_innodb.result b/mysql-test/suite/parts/r/partition_alter_innodb.result index 99697086170..f3921a1db26 100644 --- a/mysql-test/suite/parts/r/partition_alter_innodb.result +++ b/mysql-test/suite/parts/r/partition_alter_innodb.result @@ -42,3 +42,9 @@ PARTITION p3 VALUES IN (4,5,6) ); ERROR HY000: Syntax error: LIST PARTITIONING requires definition of VALUES IN for each partition DROP TABLE t1; +create table t1 (i int) engine=InnoDB partition by range(i) (partition p0 values less than (10)); +lock table t1 write; +alter table t1 add partition (partition p0 values less than (20)); +ERROR HY000: Duplicate partition name p0 +alter table t1 add partition (partition p1 values less than (20)) /* comment */; +drop table t1; diff --git a/mysql-test/suite/parts/r/partition_alter_maria.result b/mysql-test/suite/parts/r/partition_alter_maria.result index c7e9028a29c..77f511d9b3b 100644 --- a/mysql-test/suite/parts/r/partition_alter_maria.result +++ b/mysql-test/suite/parts/r/partition_alter_maria.result @@ -69,3 +69,9 @@ PARTITION p3 VALUES IN (4,5,6) ); ERROR HY000: Syntax error: LIST PARTITIONING requires definition of VALUES IN for each partition DROP TABLE t1; +create table t1 (i int) engine=Aria partition by range(i) (partition p0 values less than (10)); +lock table t1 write; +alter table t1 add partition (partition p0 values less than (20)); +ERROR HY000: Duplicate partition name p0 +alter table t1 add partition (partition p1 values less than (20)) /* comment */; +drop table t1; diff --git a/mysql-test/suite/parts/r/partition_alter_myisam.result b/mysql-test/suite/parts/r/partition_alter_myisam.result index 50b8b802ad4..ce3e04d6c97 100644 --- a/mysql-test/suite/parts/r/partition_alter_myisam.result +++ b/mysql-test/suite/parts/r/partition_alter_myisam.result @@ -42,6 +42,12 @@ PARTITION p3 VALUES IN (4,5,6) ); ERROR HY000: Syntax error: LIST PARTITIONING requires definition of VALUES IN for each partition DROP TABLE t1; +create table t1 (i int) engine=MyISAM partition by range(i) (partition p0 values less than (10)); +lock table t1 write; +alter table t1 add partition (partition p0 values less than (20)); +ERROR HY000: Duplicate partition name p0 +alter table t1 add partition (partition p1 values less than (20)) /* comment */; +drop table t1; create table t1 ( c1 int, c2 int, c3 varchar(100)) delay_key_write=1 partition by key(c1) ( partition p01 data directory = 'MYSQL_TMP_DIR' diff --git a/mysql-test/suite/parts/r/partition_basic_symlink_innodb.result b/mysql-test/suite/parts/r/partition_basic_symlink_innodb.result index c86b057433a..65a5edba254 100644 --- a/mysql-test/suite/parts/r/partition_basic_symlink_innodb.result +++ b/mysql-test/suite/parts/r/partition_basic_symlink_innodb.result @@ -53,6 +53,7 @@ Warning 1618 option ignored Warning 1618 option ignored # Verifying .frm, .par, .isl & .ibd files ---- MYSQLD_DATADIR/test +db.opt t1#P#p0.isl t1#P#p1.isl t1.frm @@ -88,6 +89,7 @@ t1 CREATE TABLE `t1` ( PARTITION `p1` DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' ENGINE = MyISAM) # Verifying .frm, .par and MyISAM files (.MYD, MYI) ---- MYSQLD_DATADIR/test +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -115,6 +117,7 @@ t1 CREATE TABLE `t1` ( PARTITION `p1` DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' ENGINE = InnoDB) # Verifying .frm, .par, .isl and InnoDB .ibd files ---- MYSQLD_DATADIR/test +db.opt t1#P#p0.isl t1#P#p1.isl t1.frm diff --git a/mysql-test/suite/parts/r/partition_debug.result b/mysql-test/suite/parts/r/partition_debug.result index 0c43cc70c99..7c6a0f33a9f 100644 --- a/mysql-test/suite/parts/r/partition_debug.result +++ b/mysql-test/suite/parts/r/partition_debug.result @@ -40,6 +40,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -75,6 +76,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -85,6 +87,7 @@ t2.MYD t2.MYI t2.frm # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -152,6 +155,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -187,6 +191,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -197,6 +202,7 @@ t2.MYD t2.MYI t2.frm # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -264,6 +270,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -299,6 +306,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -309,6 +317,7 @@ t2.MYD t2.MYI t2.frm # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -376,6 +385,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -413,6 +423,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sqlx-nnnn_nnnn.MYD #sqlx-nnnn_nnnn.MYI +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -421,6 +432,7 @@ t1.frm t1.par t2.frm # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -488,6 +500,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -525,6 +538,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sqlx-nnnn_nnnn.MYD #sqlx-nnnn_nnnn.MYI +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -533,6 +547,7 @@ t1.frm t1.par t2.frm # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -600,6 +615,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -637,6 +653,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sqlx-nnnn_nnnn.MYD #sqlx-nnnn_nnnn.MYI +db.opt t1#P#p1.MYD t1#P#p1.MYI t1.frm @@ -645,6 +662,7 @@ t2.MYD t2.MYI t2.frm # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -712,6 +730,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -749,6 +768,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sqlx-nnnn_nnnn.MYD #sqlx-nnnn_nnnn.MYI +db.opt t1#P#p1.MYD t1#P#p1.MYI t1.frm @@ -757,6 +777,7 @@ t2.MYD t2.MYI t2.frm # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -824,6 +845,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -859,6 +881,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -869,6 +892,7 @@ t2.MYD t2.MYI t2.frm # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -936,6 +960,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -971,6 +996,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -981,6 +1007,7 @@ t2.MYD t2.MYI t2.frm # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1048,6 +1075,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1083,6 +1111,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error in DDL log # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1150,6 +1179,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1185,6 +1215,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error in DDL log # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1252,6 +1283,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1287,6 +1319,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error on rename of './test/t2' to './test/#sqlx-nnnn_nnnn' (errno: 0 "Internal error/check (Not system error)") # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1354,6 +1387,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1389,6 +1423,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error in DDL log # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1456,6 +1491,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1491,6 +1527,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error on rename of './test/t1#P#p0' to './test/t2' (errno: 0 "Internal error/check (Not system error)") # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1558,6 +1595,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1593,6 +1631,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error in DDL log # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1660,6 +1699,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1695,6 +1735,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error on rename of './test/#sqlx-nnnn_nnnn' to './test/t1#P#p0' (errno: 0 "Internal error/check (Not system error)") # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1762,6 +1803,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1797,6 +1839,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error in DDL log # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1864,6 +1907,7 @@ CREATE TABLE t1 (a INT, b VARCHAR(64)) PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -1899,6 +1943,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error in DDL log # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD diff --git a/mysql-test/suite/parts/r/partition_debug_innodb.result b/mysql-test/suite/parts/r/partition_debug_innodb.result index 5af7990c84b..dfb576ae52d 100644 --- a/mysql-test/suite/parts/r/partition_debug_innodb.result +++ b/mysql-test/suite/parts/r/partition_debug_innodb.result @@ -20,6 +20,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -47,11 +48,13 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -85,6 +88,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -114,11 +118,13 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -152,6 +158,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -181,11 +188,13 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -219,6 +228,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -248,11 +258,13 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -286,6 +298,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -315,12 +328,14 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -354,6 +369,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -383,12 +399,14 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -422,6 +440,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -451,12 +470,14 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -490,6 +511,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -519,12 +541,14 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -560,6 +584,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -587,12 +612,14 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -628,6 +655,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -655,12 +683,14 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -697,6 +727,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -724,6 +755,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -756,6 +788,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -784,6 +817,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -818,6 +852,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -845,6 +880,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -877,6 +913,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -905,6 +942,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -939,6 +977,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -966,6 +1005,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -998,6 +1038,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1026,6 +1067,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1060,6 +1102,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1087,6 +1130,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1119,6 +1163,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1147,6 +1192,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1181,6 +1227,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1208,6 +1255,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1240,6 +1288,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1268,6 +1317,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1302,6 +1352,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1329,6 +1380,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1361,6 +1413,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1389,6 +1442,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1423,6 +1477,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1450,6 +1505,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1482,6 +1538,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1510,6 +1567,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1544,6 +1602,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1571,6 +1630,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -1605,6 +1665,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1633,6 +1694,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -1669,6 +1731,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1696,6 +1759,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -1730,6 +1794,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1758,6 +1823,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -1794,6 +1860,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1821,6 +1888,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -1855,6 +1923,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1883,6 +1952,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -1920,6 +1990,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1946,11 +2017,13 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -1984,6 +2057,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2012,11 +2086,13 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2050,6 +2126,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2078,11 +2155,13 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2116,6 +2195,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2144,11 +2224,13 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1.frm t1.par @@ -2176,6 +2258,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2204,11 +2287,13 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1.frm t1.par @@ -2236,6 +2321,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2264,11 +2350,13 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1.frm t1.par @@ -2296,6 +2384,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2322,11 +2411,13 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1.frm t1.par @@ -2354,6 +2445,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2380,10 +2472,12 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1.frm t1.par @@ -2411,6 +2505,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2437,10 +2532,12 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1.frm t1.par @@ -2469,6 +2566,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2495,6 +2593,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2527,6 +2626,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2554,6 +2654,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2588,6 +2689,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2614,6 +2716,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2646,6 +2749,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2673,6 +2777,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2707,6 +2812,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2733,6 +2839,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2765,6 +2872,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2792,6 +2900,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2826,6 +2935,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2852,6 +2962,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1.frm t1.par @@ -2878,6 +2989,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2905,6 +3017,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1.frm t1.par @@ -2933,6 +3046,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -2959,6 +3073,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1.frm t1.par @@ -2985,6 +3100,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3012,6 +3128,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1.frm t1.par @@ -3040,6 +3157,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3066,6 +3184,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1.frm t1.par @@ -3092,6 +3211,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3119,6 +3239,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1.frm t1.par @@ -3147,6 +3268,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3173,6 +3295,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1.frm t1.par @@ -3199,6 +3322,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3226,6 +3350,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1.frm t1.par @@ -3254,6 +3379,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3280,6 +3406,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1.frm t1.par @@ -3306,6 +3433,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3333,6 +3461,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1.frm t1.par @@ -3361,6 +3490,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3387,6 +3517,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1.frm t1.par @@ -3413,6 +3544,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3440,6 +3572,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1.frm t1.par @@ -3470,6 +3603,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3498,11 +3632,13 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3536,6 +3672,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3566,11 +3703,13 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3604,6 +3743,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3634,11 +3774,13 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3672,6 +3814,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3702,6 +3845,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10#TMP#.ibd t1#P#p10.ibd @@ -3709,6 +3853,7 @@ t1#P#p20#TMP#.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3742,6 +3887,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3772,6 +3918,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10#TMP#.ibd t1#P#p10.ibd @@ -3779,6 +3926,7 @@ t1#P#p20#TMP#.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3812,6 +3960,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3842,6 +3991,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10#TMP#.ibd t1#P#p10.ibd @@ -3849,6 +3999,7 @@ t1#P#p20#TMP#.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3882,6 +4033,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3912,6 +4064,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10#TMP#.ibd t1#P#p10.ibd @@ -3919,6 +4072,7 @@ t1#P#p20#TMP#.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -3954,6 +4108,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -3984,6 +4139,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.ibd t1#P#p10#TMP#.ibd t1#P#p10.ibd @@ -3991,6 +4147,7 @@ t1#P#p20#TMP#.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -4026,6 +4183,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4054,6 +4212,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p10#TMP#.ibd t1#P#p10.ibd @@ -4061,6 +4220,7 @@ t1#P#p20#TMP#.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -4096,6 +4256,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4124,6 +4285,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p10#TMP#.ibd t1#P#p10.ibd @@ -4131,6 +4293,7 @@ t1#P#p20#TMP#.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -4166,6 +4329,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4194,12 +4358,14 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -4235,6 +4401,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4263,12 +4430,14 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -4306,6 +4475,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4334,6 +4504,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4366,6 +4537,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4395,6 +4567,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4429,6 +4602,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4457,6 +4631,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4489,6 +4664,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4518,6 +4694,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4552,6 +4729,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4580,6 +4758,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4612,6 +4791,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4641,6 +4821,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4675,6 +4856,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4703,6 +4885,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4735,6 +4918,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4764,6 +4948,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4798,6 +4983,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4826,6 +5012,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4858,6 +5045,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4887,6 +5075,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4921,6 +5110,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4949,6 +5139,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -4981,6 +5172,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5010,6 +5202,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5044,6 +5237,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5072,6 +5266,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -5106,6 +5301,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5135,6 +5331,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -5171,6 +5368,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5199,6 +5397,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -5233,6 +5432,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5262,6 +5462,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -5298,6 +5499,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5326,6 +5528,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -5360,6 +5563,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5389,6 +5593,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -5425,6 +5630,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5453,6 +5659,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -5487,6 +5694,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5516,6 +5724,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -5552,6 +5761,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5580,6 +5790,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -5614,6 +5825,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5643,6 +5855,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -5679,6 +5892,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5707,6 +5921,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -5741,6 +5956,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -5770,6 +5986,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.ibd t1#P#p10.ibd t1#P#p20.ibd @@ -5826,6 +6043,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -5858,6 +6076,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -5865,6 +6084,7 @@ t1.par t2.frm t2.ibd # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -5931,6 +6151,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -5963,6 +6184,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -5970,6 +6192,7 @@ t1.par t2.frm t2.ibd # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6036,6 +6259,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6068,6 +6292,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6075,6 +6300,7 @@ t1.par t2.frm t2.ibd # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6141,6 +6367,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6174,12 +6401,14 @@ ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sqlx-nnnn_nnnn.ibd +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm t1.par t2.frm # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6246,6 +6475,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6279,12 +6509,14 @@ ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sqlx-nnnn_nnnn.ibd +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm t1.par t2.frm # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6351,6 +6583,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6384,12 +6617,14 @@ ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sqlx-nnnn_nnnn.ibd +db.opt t1#P#p1.ibd t1.frm t1.par t2.frm t2.ibd # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6456,6 +6691,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6489,12 +6725,14 @@ ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sqlx-nnnn_nnnn.ibd +db.opt t1#P#p1.ibd t1.frm t1.par t2.frm t2.ibd # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6561,6 +6799,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6593,6 +6832,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6600,6 +6840,7 @@ t1.par t2.frm t2.ibd # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6666,6 +6907,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before crash +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6698,6 +6940,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6705,6 +6948,7 @@ t1.par t2.frm t2.ibd # State after crash recovery +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6771,6 +7015,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6803,6 +7048,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error in DDL log # State after failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6869,6 +7115,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6901,6 +7148,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error in DDL log # State after failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6967,6 +7215,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -6999,6 +7248,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error on rename of './test/t2' to './test/#sqlx-nnnn_nnnn' (errno: 0 "Internal error/check (Not system error)") # State after failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -7065,6 +7315,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -7097,6 +7348,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error in DDL log # State after failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -7163,6 +7415,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -7195,6 +7448,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error on rename of './test/t1#P#p0' to './test/t2' (errno: 0 "Internal error/check (Not system error)") # State after failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -7261,6 +7515,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -7293,6 +7548,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error in DDL log # State after failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -7359,6 +7615,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -7391,6 +7648,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error on rename of './test/#sqlx-nnnn_nnnn' to './test/t1#P#p0' (errno: 0 "Internal error/check (Not system error)") # State after failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -7457,6 +7715,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -7489,6 +7748,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error in DDL log # State after failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -7555,6 +7815,7 @@ PARTITION BY RANGE (a) PARTITION p1 VALUES LESS THAN MAXVALUE); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1"); # State before failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm @@ -7587,6 +7848,7 @@ a b ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; ERROR HY000: Error in DDL log # State after failure +db.opt t1#P#p0.ibd t1#P#p1.ibd t1.frm diff --git a/mysql-test/suite/parts/r/partition_debug_myisam.result b/mysql-test/suite/parts/r/partition_debug_myisam.result index ba31418bfb0..fc2dcf7202a 100644 --- a/mysql-test/suite/parts/r/partition_debug_myisam.result +++ b/mysql-test/suite/parts/r/partition_debug_myisam.result @@ -17,6 +17,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -46,6 +47,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -53,6 +55,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -88,6 +91,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -119,6 +123,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -126,6 +131,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -161,6 +167,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -192,6 +199,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -199,6 +207,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -234,6 +243,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -265,6 +275,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -272,6 +283,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -307,6 +319,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -338,6 +351,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -347,6 +361,7 @@ t1#P#p20.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -382,6 +397,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -413,6 +429,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -422,6 +439,7 @@ t1#P#p20.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -457,6 +475,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -488,6 +507,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -497,6 +517,7 @@ t1#P#p20.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -532,6 +553,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -563,6 +585,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -572,6 +595,7 @@ t1#P#p20.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -610,6 +634,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -639,6 +664,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -648,6 +674,7 @@ t1#P#p20.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -686,6 +713,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -715,6 +743,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -724,6 +753,7 @@ t1#P#p20.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -763,6 +793,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -792,6 +823,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -826,6 +858,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -856,6 +889,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -892,6 +926,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -921,6 +956,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -955,6 +991,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -985,6 +1022,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1021,6 +1059,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1050,6 +1089,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1084,6 +1124,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1114,6 +1155,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1150,6 +1192,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1179,6 +1222,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1213,6 +1257,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1243,6 +1288,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1279,6 +1325,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1308,6 +1355,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1342,6 +1390,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1372,6 +1421,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1408,6 +1458,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1437,6 +1488,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1471,6 +1523,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1501,6 +1554,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1537,6 +1591,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1566,6 +1621,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1600,6 +1656,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1630,6 +1687,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1666,6 +1724,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1695,6 +1754,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1732,6 +1792,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1762,6 +1823,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1801,6 +1863,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1830,6 +1893,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1867,6 +1931,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1897,6 +1962,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1936,6 +2002,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -1965,6 +2032,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2002,6 +2070,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2032,6 +2101,7 @@ ALTER TABLE t1 ADD PARTITION (PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2072,6 +2142,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2100,6 +2171,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2107,6 +2179,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2142,6 +2215,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2172,6 +2246,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2179,6 +2254,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2214,6 +2290,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2244,6 +2321,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2251,6 +2329,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2286,6 +2365,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2316,6 +2396,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2323,6 +2404,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -2351,6 +2433,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2381,6 +2464,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2388,6 +2472,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -2416,6 +2501,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2446,6 +2532,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2453,6 +2540,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -2481,6 +2569,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2509,6 +2598,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2516,6 +2606,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -2544,6 +2635,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2572,11 +2664,13 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -2605,6 +2699,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2633,11 +2728,13 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -2667,6 +2764,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2695,6 +2793,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2729,6 +2828,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2758,6 +2858,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2794,6 +2895,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2822,6 +2924,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2856,6 +2959,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2885,6 +2989,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2921,6 +3026,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2949,6 +3055,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -2983,6 +3090,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3012,6 +3120,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3048,6 +3157,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3076,6 +3186,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -3103,6 +3214,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3132,6 +3244,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -3161,6 +3274,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3189,6 +3303,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -3216,6 +3331,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3245,6 +3361,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -3274,6 +3391,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3302,6 +3420,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -3329,6 +3448,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3358,6 +3478,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -3387,6 +3508,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3415,6 +3537,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -3442,6 +3565,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3471,6 +3595,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -3500,6 +3625,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3528,6 +3654,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -3555,6 +3682,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3584,6 +3712,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -3613,6 +3742,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3641,6 +3771,7 @@ a b ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -3668,6 +3799,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3697,6 +3829,7 @@ LOCK TABLE t1 WRITE; ALTER TABLE t1 DROP PARTITION p10; ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1.frm @@ -3728,6 +3861,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3758,6 +3892,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3765,6 +3900,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3800,6 +3936,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3832,6 +3969,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3839,6 +3977,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3874,6 +4013,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3906,6 +4046,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3913,6 +4054,7 @@ t1#P#p10.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3948,6 +4090,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -3980,6 +4123,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10#TMP#.MYD @@ -3991,6 +4135,7 @@ t1#P#p20#TMP#.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4026,6 +4171,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4058,6 +4204,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10#TMP#.MYD @@ -4069,6 +4216,7 @@ t1#P#p20#TMP#.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4104,6 +4252,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4136,6 +4285,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10#TMP#.MYD @@ -4147,6 +4297,7 @@ t1#P#p20#TMP#.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4182,6 +4333,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4214,6 +4366,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10#TMP#.MYD @@ -4225,6 +4378,7 @@ t1#P#p20#TMP#.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4263,6 +4417,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4295,6 +4450,7 @@ ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) #sql-t1.frm #sql-t1.par +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10#TMP#.MYD @@ -4306,6 +4462,7 @@ t1#P#p20#TMP#.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4344,6 +4501,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4374,6 +4532,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10#TMP#.MYD @@ -4385,6 +4544,7 @@ t1#P#p20#TMP#.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4423,6 +4583,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4453,6 +4614,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10#TMP#.MYD @@ -4464,6 +4626,7 @@ t1#P#p20#TMP#.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4502,6 +4665,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4532,6 +4696,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4541,6 +4706,7 @@ t1#P#p20.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4579,6 +4745,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before crash +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4609,6 +4776,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Lost connection to MySQL server during query # State after crash (before recovery) +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4618,6 +4786,7 @@ t1#P#p20.MYI t1.frm t1.par # State after crash recovery +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4658,6 +4827,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4688,6 +4858,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4722,6 +4893,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4753,6 +4925,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4789,6 +4962,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4819,6 +4993,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4853,6 +5028,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4884,6 +5060,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4920,6 +5097,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4950,6 +5128,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -4984,6 +5163,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5015,6 +5195,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5051,6 +5232,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5081,6 +5263,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5115,6 +5298,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5146,6 +5330,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5182,6 +5367,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5212,6 +5398,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5246,6 +5433,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5277,6 +5465,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5313,6 +5502,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5343,6 +5533,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5377,6 +5568,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5408,6 +5600,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5444,6 +5637,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5474,6 +5668,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5511,6 +5706,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5542,6 +5738,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5581,6 +5778,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5611,6 +5809,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5648,6 +5847,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5679,6 +5879,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5718,6 +5919,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5748,6 +5950,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5785,6 +5988,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5816,6 +6020,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5855,6 +6060,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5885,6 +6091,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5922,6 +6129,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5953,6 +6161,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -5992,6 +6201,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -6022,6 +6232,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -6059,6 +6270,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -6090,6 +6302,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -6129,6 +6342,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -6159,6 +6373,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -6196,6 +6411,7 @@ PARTITION BY LIST (a) PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19)); INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"); # State before failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD @@ -6227,6 +6443,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29)); ERROR HY000: Unknown error # State after failure +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p10.MYD diff --git a/mysql-test/suite/parts/r/partition_debug_sync_innodb.result b/mysql-test/suite/parts/r/partition_debug_sync_innodb.result index 678d1968aa4..4841c93e29f 100644 --- a/mysql-test/suite/parts/r/partition_debug_sync_innodb.result +++ b/mysql-test/suite/parts/r/partition_debug_sync_innodb.result @@ -53,6 +53,7 @@ t1 CREATE TABLE `t1` ( ) ENGINE=InnoDB DEFAULT CHARSET=latin1 PARTITION BY RANGE (`a`) (PARTITION `p0` VALUES LESS THAN MAXVALUE ENGINE = InnoDB) +db.opt t1#P#p0.ibd t1.frm t1.par @@ -79,6 +80,7 @@ connection default; TABLE_SCHEMA TABLE_NAME PARTITION_NAME PARTITION_ORDINAL_POSITION PARTITION_DESCRIPTION TABLE_ROWS test t1 p0 1 10 1 test t1 p10 2 MAXVALUE 3 +db.opt t1#P#p0.ibd t1#P#p10.ibd t1.frm @@ -98,4 +100,5 @@ a 21 33 drop table t1; +db.opt SET DEBUG_SYNC = 'RESET'; diff --git a/mysql-test/suite/parts/r/partition_recover_myisam.result b/mysql-test/suite/parts/r/partition_recover_myisam.result index ccbc9a6c9ef..4b9e3f5c283 100644 --- a/mysql-test/suite/parts/r/partition_recover_myisam.result +++ b/mysql-test/suite/parts/r/partition_recover_myisam.result @@ -18,7 +18,6 @@ a 11 Warnings: Error 145 Table 't1_will_crash' is marked as crashed and should be repaired -Error 1194 Table 't1_will_crash' is marked as crashed and should be repaired Error 1034 1 client is using or hasn't closed the table properly Error 1034 Size of indexfile is: 1024 Should be: 2048 Error 1034 Size of datafile is: 77 Should be: 7 @@ -48,7 +47,6 @@ a 11 Warnings: Error 145 Table 't1_will_crash#P#p1' is marked as crashed and should be repaired -Error 1194 Table 't1_will_crash' is marked as crashed and should be repaired Error 1034 1 client is using or hasn't closed the table properly Error 1034 Size of indexfile is: 1024 Should be: 2048 Error 1034 Size of datafile is: 28 Should be: 7 diff --git a/mysql-test/suite/parts/r/quoting.result b/mysql-test/suite/parts/r/show_create.result similarity index 89% rename from mysql-test/suite/parts/r/quoting.result rename to mysql-test/suite/parts/r/show_create.result index 66606832e77..79ac61d180d 100644 --- a/mysql-test/suite/parts/r/quoting.result +++ b/mysql-test/suite/parts/r/show_create.result @@ -91,3 +91,15 @@ t2 CREATE TABLE "t2" ( PARTITION BY RANGE ("f1") (PARTITION "p1" VALUES LESS THAN MAXVALUE ENGINE = MyISAM) drop table t1, t2; +set sql_mode=default; +create table t_partition (f1 int) partition by hash(f1) partitions 2; +select * from t_partition as tbl; +f1 +show create table t_partition; +Table Create Table +t_partition CREATE TABLE `t_partition` ( + `f1` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 + PARTITION BY HASH (`f1`) +PARTITIONS 2 +drop table t_partition; diff --git a/mysql-test/suite/parts/t/quoting.test b/mysql-test/suite/parts/t/show_create.test similarity index 73% rename from mysql-test/suite/parts/t/quoting.test rename to mysql-test/suite/parts/t/show_create.test index 61af8d2d345..9c43b163790 100644 --- a/mysql-test/suite/parts/t/quoting.test +++ b/mysql-test/suite/parts/t/show_create.test @@ -30,3 +30,12 @@ set sql_mode=ansi_quotes; show create table t1; show create table t2; drop table t1, t2; +set sql_mode=default; + +# +# MDEV-14750 Valgrind Invalid read, ASAN heap-use-after-free in Item_ident::print upon SHOW CREATE on partitioned table +# +create table t_partition (f1 int) partition by hash(f1) partitions 2; +select * from t_partition as tbl; +show create table t_partition; +drop table t_partition; diff --git a/mysql-test/suite/perfschema/r/partition.result b/mysql-test/suite/perfschema/r/partition.result new file mode 100644 index 00000000000..9bc624268bb --- /dev/null +++ b/mysql-test/suite/perfschema/r/partition.result @@ -0,0 +1,10 @@ +# +# MDEV-10679 +# Server crashes in in mysql_create_frm_image upon query from +# performance schema in ps-protocol mode +# +CREATE TABLE t1 (i INT); +ALTER TABLE t1 ADD PARTITION (PARTITION p VALUES LESS THAN (1)); +ERROR HY000: Partition management on a not partitioned table is not possible +SELECT * FROM performance_schema.events_stages_summary_by_user_by_event_name; +DROP TABLE t1; diff --git a/mysql-test/suite/perfschema/t/partition.test b/mysql-test/suite/perfschema/t/partition.test new file mode 100644 index 00000000000..0b3b204dee7 --- /dev/null +++ b/mysql-test/suite/perfschema/t/partition.test @@ -0,0 +1,16 @@ +--source include/have_perfschema.inc +--source include/have_partition.inc + +--echo # +--echo # MDEV-10679 +--echo # Server crashes in in mysql_create_frm_image upon query from +--echo # performance schema in ps-protocol mode +--echo # + +CREATE TABLE t1 (i INT); +--error ER_PARTITION_MGMT_ON_NONPARTITIONED +ALTER TABLE t1 ADD PARTITION (PARTITION p VALUES LESS THAN (1)); +--disable_result_log +SELECT * FROM performance_schema.events_stages_summary_by_user_by_event_name; +--enable_result_log +DROP TABLE t1; diff --git a/mysql-test/suite/plugins/r/server_audit.result b/mysql-test/suite/plugins/r/server_audit.result index 0d02ae47586..01392760317 100644 --- a/mysql-test/suite/plugins/r/server_audit.result +++ b/mysql-test/suite/plugins/r/server_audit.result @@ -191,6 +191,17 @@ select 2; 2 2 drop table t1; +set global server_audit_events='query_dml_no_select'; +create table t1(id int); +insert into t1 values (1), (2); +select * from t1; +id +1 +2 +select 2; +2 +2 +drop table t1; set global server_audit_events=''; set global server_audit_query_log_limit= 15; select (1), (2), (3), (4); @@ -352,6 +363,7 @@ TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'SET PASSWORD \n# comment\nFOR u1 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'SET PASSWORD FOR u1=',ID TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'CREATE USER u3 IDENTIFIED BY *****',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop user u1, u2, u3',0 +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'insert into t1 values (1), (2)',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_events=\'\'',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global serv',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'select (1), (2)',0 diff --git a/mysql-test/suite/plugins/t/server_audit.test b/mysql-test/suite/plugins/t/server_audit.test index 6c5eaffd9a2..4af1ed883e3 100644 --- a/mysql-test/suite/plugins/t/server_audit.test +++ b/mysql-test/suite/plugins/t/server_audit.test @@ -121,6 +121,13 @@ select 2; /*! select 2*/; /*comment*/ select 2; drop table t1; +set global server_audit_events='query_dml_no_select'; +create table t1(id int); +insert into t1 values (1), (2); +select * from t1; +select 2; +drop table t1; + set global server_audit_events=''; set global server_audit_query_log_limit= 15; diff --git a/mysql-test/suite/rpl/disabled.def b/mysql-test/suite/rpl/disabled.def index e37ad842790..d4617398c64 100644 --- a/mysql-test/suite/rpl/disabled.def +++ b/mysql-test/suite/rpl/disabled.def @@ -17,3 +17,5 @@ rpl_row_binlog_max_cache_size : MDEV-11092 rpl_blackhole : MDEV-11094 rpl_row_mysqlbinlog : MDEV-11095 rpl_row_index_choice : MDEV-11666 +rpl_parallel2 : fails after MDEV-16172 +rpl_semi_sync_after_sync : fails after MDEV-16172 diff --git a/mysql-test/suite/rpl/include/rpl_implicit_commit_binlog.test b/mysql-test/suite/rpl/include/rpl_implicit_commit_binlog.test index 0b6402308ce..20c79ed4b3b 100644 --- a/mysql-test/suite/rpl/include/rpl_implicit_commit_binlog.test +++ b/mysql-test/suite/rpl/include/rpl_implicit_commit_binlog.test @@ -32,349 +32,97 @@ INSERT INTO tt_2(ddl_case) VALUES(0); --echo # CHECK IMPLICT COMMIT --echo ######################################################################### SET AUTOCOMMIT= 0; -let $ddl_cases= 43; -while ($ddl_cases >= 1) -{ - --echo -b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- - let $in_temporary= no; - let $ok= yes; - # - # In SBR and MIXED modes, the commit event is usually the third event in the - # binary log: - # - # 1: BEGIN - # 2: INSERT - # 3: COMMIT - # 4: DDL EVENT which triggered the previous commmit. - # - if (`select @@binlog_format = 'STATEMENT' || @@binlog_format = 'MIXED'`) - { - let $commit_event_row_number= 3; - } - # - # In RBR mode, the commit event is usually the fourth event in the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: ROW EVENT - # 4: COMMIT - # 5: DDL EVENT which triggered the previous commmit. - # - if (`select @@binlog_format = 'ROW'`) - { - let $commit_event_row_number= 5; - } - - let $first_binlog_position= query_get_value("SHOW MASTER STATUS", Position, 1); - --enable_query_log - eval INSERT INTO tt_1(ddl_case) VALUES ($ddl_cases); - if ($ddl_cases == 43) - { - let $cmd= CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "$UDF_EXAMPLE_SO"; - } - if ($ddl_cases == 42) - { - let $cmd= DROP FUNCTION myfunc_int; - } - if ($ddl_cases == 41) - { - let $cmd= LOAD INDEX INTO CACHE nt_1 IGNORE LEAVES; - } - if ($ddl_cases == 40) - { - let $cmd= LOAD INDEX INTO CACHE tt_1, tt_2 IGNORE LEAVES; - } - if ($ddl_cases == 39) - { - let $cmd= ANALYZE TABLE nt_1; - } - if ($ddl_cases == 38) - { - let $cmd= CHECK TABLE nt_1; - } - if ($ddl_cases == 37) - { - let $cmd= OPTIMIZE TABLE nt_1; - } - if ($ddl_cases == 36) - { - let $cmd= REPAIR TABLE nt_1; - } - if ($ddl_cases == 35) - { - let $cmd= LOCK TABLES tt_1 WRITE; - } - if ($ddl_cases == 34) - { - let $cmd= UNLOCK TABLES; - } - if ($ddl_cases == 33) - { - let $cmd= CREATE USER 'user'@'localhost'; - } - if ($ddl_cases == 32) - { - let $cmd= GRANT ALL ON *.* TO 'user'@'localhost'; - } - if ($ddl_cases == 31) - { - let $cmd= SET PASSWORD FOR 'user'@'localhost' = PASSWORD('newpass'); - } - if ($ddl_cases == 30) - { - let $cmd= REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'user'@'localhost'; - } - if ($ddl_cases == 29) - { - let $cmd= RENAME USER 'user'@'localhost' TO 'user_new'@'localhost'; - } - if ($ddl_cases == 28) - { - let $cmd= DROP USER 'user_new'@'localhost'; - } - if ($ddl_cases == 27) - { - let $cmd= CREATE EVENT evt ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO SELECT * FROM tt_1; - } - if ($ddl_cases == 26) - { - let $cmd= ALTER EVENT evt COMMENT 'evt'; - } - if ($ddl_cases == 25) - { - let $cmd= DROP EVENT evt; - } - if ($ddl_cases == 24) - { - let $cmd= CREATE TRIGGER tr AFTER INSERT ON tt_1 FOR EACH ROW UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; - } - if ($ddl_cases == 23) - { - let $cmd= DROP TRIGGER tr; - # - # In RBR mode, due to the trigger the tt_2 is also updated: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: TABLE MAP EVENT - # 4: ROW EVENT - # 5: COMMIT - # 6: DDL EVENT which triggered the previous commmit. - # - if (`select @@binlog_format = 'ROW'`) - { - let $commit_event_row_number= 6; - } - } - if ($ddl_cases == 22) - { - let $cmd= CREATE FUNCTION fc () RETURNS VARCHAR(64) RETURN "fc"; - } - if ($ddl_cases == 21) - { - let $cmd= ALTER FUNCTION fc COMMENT 'fc'; - } - if ($ddl_cases == 20) - { - let $cmd= DROP FUNCTION fc; - } - if ($ddl_cases == 19) - { - let $cmd= CREATE PROCEDURE pc () UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; - } - if ($ddl_cases == 18) - { - let $cmd= ALTER PROCEDURE pc COMMENT 'pc'; - } - if ($ddl_cases == 17) - { - let $cmd= DROP PROCEDURE pc; - } - if ($ddl_cases == 16) - { - let $cmd= CREATE VIEW v AS SELECT * FROM tt_1; - } - if ($ddl_cases == 15) - { - let $cmd= ALTER VIEW v AS SELECT * FROM tt_1; - } - if ($ddl_cases == 14) - { - let $cmd= DROP VIEW v; - } - if ($ddl_cases == 13) - { - let $cmd= CREATE INDEX ix ON tt_1(ddl_case); - } - if ($ddl_cases == 12) - { - let $cmd= DROP INDEX ix ON tt_1; - } - if ($ddl_cases == 11) - { - let $cmd= CREATE TEMPORARY TABLE tt_xx (a int); - let $in_temporary= yes; - # In SBR and MIXED modes, the DDL statement is written to the binary log but - # does not commit the current transaction. - # - # 1: BEGIN - # 2: CREATE TEMPORARY - # 3: INSERT - # 4: COMMIT - # - # In RBR the transaction is not committed either and the statement is not - # written to the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: ROW EVENT - # 4: COMMIT - # - if (`select @@binlog_format = 'STATEMENT' || @@binlog_format = 'MIXED'` ) - { - let $commit_event_row_number= 4; - } - } - if ($ddl_cases == 10) - { - let $cmd= ALTER TABLE tt_xx ADD COLUMN (b int); - # - # In MIXED mode, the changes are logged as rows and we have what follows: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: ROW EVENT - # 4: COMMIT - # 5: DDL EVENT which triggered the previous commmit. - # - if (`select @@binlog_format = 'MIXED'`) - { - let $commit_event_row_number= 5; - } - } - if ($ddl_cases == 9) - { - let $cmd= ALTER TABLE tt_xx RENAME new_tt_xx; - # - # In MIXED mode, the changes are logged as rows and we have what follows: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: ROW EVENT - # 4: COMMIT - # 5: DDL EVENT which triggered the previous commmit. - # - if (`select @@binlog_format = 'MIXED'`) - { - let $commit_event_row_number= 5; - } - } - if ($ddl_cases == 8) - { - let $cmd= DROP TEMPORARY TABLE IF EXISTS new_tt_xx; - let $in_temporary= yes; - # - # In SBR and MIXED modes, the DDL statement is written to the binary log - # but does not commit the current transaction: - # - # In SBR, we have what follows: - # - # 1: BEGIN - # 2: INSERT - # 3: DROP TEMPORARY - # 4: COMMIT - # - # In RBR the transaction is not committed either and the statement is not - # written to the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: ROW EVENT - # 4: COMMIT - # - if (`select @@binlog_format = 'STATEMENT'`) - { - let $commit_event_row_number= 4; - } - if (`select @@binlog_format = 'ROW'`) - { - let $commit_event_row_number= 5; - } - # In MIXED mode, the changes are logged as rows and we have what follows: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: ROW EVENT - # 4: DROP TEMPORARY - # 5: COMMIT - # - if (`select @@binlog_format = 'MIXED'`) - { - let $commit_event_row_number= 6; - } - } - if ($ddl_cases == 7) - { - let $cmd= CREATE TABLE tt_xx (a int); - } - if ($ddl_cases == 6) - { - let $cmd= ALTER TABLE tt_xx ADD COLUMN (b int); - } - if ($ddl_cases == 5) - { - let $cmd= RENAME TABLE tt_xx TO new_tt_xx; - } - if ($ddl_cases == 4) - { - let $cmd= TRUNCATE TABLE new_tt_xx; - } - if ($ddl_cases == 3) - { - let $cmd= DROP TABLE IF EXISTS tt_xx, new_tt_xx; - } - if ($ddl_cases == 2) - { - let $cmd= CREATE DATABASE db; - } - if ($ddl_cases == 1) - { - let $cmd= DROP DATABASE IF EXISTS db; - } - --replace_result $UDF_EXAMPLE_SO UDF_EXAMPLE_LIB - --eval $cmd - --disable_query_log - # - # When a temporary table is either created or dropped, there is no implicit - # commit. The flag in_temporary is used to avoid aborting the test in such - # cases. Thus we force the commit. - # - if ($in_temporary == yes) - { - --eval COMMIT - } - let $event_commit= query_get_value("SHOW BINLOG EVENTS FROM $first_binlog_position", Info, $commit_event_row_number); - if (`SELECT SUBSTRING("$event_commit",1,6) != "COMMIT"`) - { - if ($ok == yes) - { - --echo it *does not* commit the current transaction. - --echo $cmd - --echo $event_commit - SHOW BINLOG EVENTS; - exit; - } - } +INSERT INTO tt_1(ddl_case) VALUES (43); +replace_result $UDF_EXAMPLE_SO UDF_EXAMPLE_LIB; +eval CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "$UDF_EXAMPLE_SO"; +INSERT INTO tt_1(ddl_case) VALUES (42); +DROP FUNCTION myfunc_int; +INSERT INTO tt_1(ddl_case) VALUES (41); +LOAD INDEX INTO CACHE nt_1 IGNORE LEAVES; +INSERT INTO tt_1(ddl_case) VALUES (40); +LOAD INDEX INTO CACHE tt_1, tt_2 IGNORE LEAVES; +INSERT INTO tt_1(ddl_case) VALUES (39); +ANALYZE TABLE nt_1; +INSERT INTO tt_1(ddl_case) VALUES (38); +CHECK TABLE nt_1; +INSERT INTO tt_1(ddl_case) VALUES (37); +OPTIMIZE TABLE nt_1; +INSERT INTO tt_1(ddl_case) VALUES (36); +REPAIR TABLE nt_1; +INSERT INTO tt_1(ddl_case) VALUES (35); +LOCK TABLES tt_1 WRITE; +INSERT INTO tt_1(ddl_case) VALUES (34); +UNLOCK TABLES; +INSERT INTO tt_1(ddl_case) VALUES (33); +CREATE USER 'user'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (32); +GRANT ALL ON *.* TO 'user'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (31); +SET PASSWORD FOR 'user'@'localhost' = PASSWORD('newpass'); +INSERT INTO tt_1(ddl_case) VALUES (30); +REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'user'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (29); +RENAME USER 'user'@'localhost' TO 'user_new'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (28); +DROP USER 'user_new'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (27); +CREATE EVENT evt ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO SELECT * FROM tt_1; +INSERT INTO tt_1(ddl_case) VALUES (26); +ALTER EVENT evt COMMENT 'evt'; +INSERT INTO tt_1(ddl_case) VALUES (25); +DROP EVENT evt; +INSERT INTO tt_1(ddl_case) VALUES (24); +CREATE TRIGGER tr AFTER INSERT ON tt_1 FOR EACH ROW UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; +INSERT INTO tt_1(ddl_case) VALUES (23); +DROP TRIGGER tr; +INSERT INTO tt_1(ddl_case) VALUES (22); +CREATE FUNCTION fc () RETURNS VARCHAR(64) RETURN "fc"; +INSERT INTO tt_1(ddl_case) VALUES (21); +ALTER FUNCTION fc COMMENT 'fc'; +INSERT INTO tt_1(ddl_case) VALUES (20); +DROP FUNCTION fc; +INSERT INTO tt_1(ddl_case) VALUES (19); +CREATE PROCEDURE pc () UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; +INSERT INTO tt_1(ddl_case) VALUES (18); +ALTER PROCEDURE pc COMMENT 'pc'; +INSERT INTO tt_1(ddl_case) VALUES (17); +DROP PROCEDURE pc; +INSERT INTO tt_1(ddl_case) VALUES (16); +CREATE VIEW v AS SELECT * FROM tt_1; +INSERT INTO tt_1(ddl_case) VALUES (15); +ALTER VIEW v AS SELECT * FROM tt_1; +INSERT INTO tt_1(ddl_case) VALUES (14); +DROP VIEW v; +INSERT INTO tt_1(ddl_case) VALUES (13); +CREATE INDEX ix ON tt_1(ddl_case); +INSERT INTO tt_1(ddl_case) VALUES (12); +DROP INDEX ix ON tt_1; +INSERT INTO tt_1(ddl_case) VALUES (11); +CREATE TEMPORARY TABLE tt_xx (a int); +INSERT INTO tt_1(ddl_case) VALUES (10); +ALTER TABLE tt_xx ADD COLUMN (b int); +INSERT INTO tt_1(ddl_case) VALUES (9); +ALTER TABLE tt_xx RENAME new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (8); +DROP TEMPORARY TABLE IF EXISTS new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (7); +CREATE TABLE tt_xx (a int); +INSERT INTO tt_1(ddl_case) VALUES (6); +ALTER TABLE tt_xx ADD COLUMN (b int); +INSERT INTO tt_1(ddl_case) VALUES (5); +RENAME TABLE tt_xx TO new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (4); +TRUNCATE TABLE new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (3); +DROP TABLE IF EXISTS tt_xx, new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (2); +CREATE DATABASE db; +INSERT INTO tt_1(ddl_case) VALUES (1); +DROP DATABASE IF EXISTS db; + +source include/show_binlog_events.inc; - --echo -e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - let $binlog_start= $first_binlog_position; - --echo -b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- - --source include/show_binlog_events.inc - --echo -e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --echo - dec $ddl_cases; -} SET AUTOCOMMIT= 1; --echo ################################################################################### diff --git a/mysql-test/suite/rpl/r/rename.result b/mysql-test/suite/rpl/r/rename.result new file mode 100644 index 00000000000..8220ae1f3b1 --- /dev/null +++ b/mysql-test/suite/rpl/r/rename.result @@ -0,0 +1,36 @@ +include/master-slave.inc +[connection master] +# +# MDEV-16229 Replication aborts with ER_VIEW_SELECT_TMPTABLE after +# half-failed RENAME +# +CREATE TABLE t1 (a INT); +CREATE TEMPORARY TABLE t1 (b INT); +RENAME TABLE t1 TO tmp, tmp TO t1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TEMPORARY TABLE `t1` ( + `b` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +CREATE VIEW v AS SELECT * FROM t1; +ERROR HY000: View's SELECT refers to a temporary table 't1' +RENAME TABLE t1 TO tmp, t1 TO t2; +SHOW CREATE TABLE tmp; +Table Create Table +tmp CREATE TEMPORARY TABLE `tmp` ( + `b` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +CREATE VIEW v AS SELECT * FROM tmp; +ERROR HY000: View's SELECT refers to a temporary table 'tmp' +CREATE VIEW v AS SELECT * FROM t2; +connection slave; +connection master; +DROP VIEW v; +DROP TABLE tmp; +DROP TABLE t2; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_mdev12179.result b/mysql-test/suite/rpl/r/rpl_mdev12179.result index 40059375356..8373eb43774 100644 --- a/mysql-test/suite/rpl/r/rpl_mdev12179.result +++ b/mysql-test/suite/rpl/r/rpl_mdev12179.result @@ -1,5 +1,6 @@ include/rpl_init.inc [topology=1->2] connection server_2; +call mtr.add_suppression("The automatically created table.*name may not be entirely in lowercase"); SET GLOBAL gtid_pos_auto_engines="innodb"; ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first include/stop_slave.inc @@ -152,12 +153,12 @@ a 1 2 *** Verify that mysql.gtid_slave_pos_InnoDB is auto-created *** -SELECT table_name, engine FROM information_schema.tables +SELECT lower(table_name), engine FROM information_schema.tables WHERE table_schema='mysql' AND table_name LIKE 'gtid_slave_pos%' ORDER BY table_name; -table_name engine +lower(table_name) engine gtid_slave_pos MyISAM -gtid_slave_pos_InnoDB InnoDB +gtid_slave_pos_innodb InnoDB include/stop_slave.inc SET sql_log_bin=0; INSERT INTO mysql.gtid_slave_pos SELECT * FROM mysql.gtid_slave_pos_InnoDB; @@ -245,15 +246,24 @@ a 3 4 *** Verify that mysql.gtid_slave_pos_InnoDB is auto-created *** -SELECT table_name, engine FROM information_schema.tables +SELECT lower(table_name), engine FROM information_schema.tables WHERE table_schema='mysql' AND table_name LIKE 'gtid_slave_pos%' ORDER BY table_name; -table_name engine +lower(table_name) engine gtid_slave_pos MyISAM -gtid_slave_pos_InnoDB InnoDB +gtid_slave_pos_innodb InnoDB SELECT domain_id, max(seq_no) FROM mysql.gtid_slave_pos GROUP BY domain_id; domain_id max(seq_no) 0 13 +connection server_2; +*** Restart the slave server to prove 'gtid_slave_pos_innodb' autodiscovery *** +connection server_2; +SELECT max(seq_no) FROM mysql.gtid_slave_pos_InnoDB into @seq_no; +connection server_1; +INSERT INTO t2(a) SELECT 1+MAX(a) FROM t2; +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc include/stop_slave.inc SET GLOBAL gtid_pos_auto_engines=""; SET sql_log_bin=0; diff --git a/mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result b/mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result index 9db73e06c0f..f900a8b0e9a 100644 --- a/mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result +++ b/mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result @@ -14,620 +14,330 @@ INSERT INTO tt_2(ddl_case) VALUES(0); # CHECK IMPLICT COMMIT ######################################################################### SET AUTOCOMMIT= 0; --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (43); CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "UDF_EXAMPLE_LIB"; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (43) -master-bin.000001 # Xid # # COMMIT /* XID */ -master-bin.000001 # Gtid # # GTID #-#-# -master-bin.000001 # Query # # use `test`; CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "LIB" --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (42); DROP FUNCTION myfunc_int; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (42) -master-bin.000001 # Xid # # COMMIT /* XID */ -master-bin.000001 # Gtid # # GTID #-#-# -master-bin.000001 # Query # # use `test`; DROP FUNCTION myfunc_int --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (41); LOAD INDEX INTO CACHE nt_1 IGNORE LEAVES; Table Op Msg_type Msg_text test.nt_1 preload_keys status OK --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (41) -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (40); LOAD INDEX INTO CACHE tt_1, tt_2 IGNORE LEAVES; Table Op Msg_type Msg_text test.tt_1 preload_keys note The storage engine for the table doesn't support preload_keys test.tt_2 preload_keys note The storage engine for the table doesn't support preload_keys --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (40) -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (39); ANALYZE TABLE nt_1; Table Op Msg_type Msg_text test.nt_1 analyze status Table is already up to date --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (39) -master-bin.000001 # Xid # # COMMIT /* XID */ -master-bin.000001 # Gtid # # GTID #-#-# -master-bin.000001 # Query # # use `test`; ANALYZE TABLE nt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (38); CHECK TABLE nt_1; Table Op Msg_type Msg_text test.nt_1 check status OK --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (38) -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (37); OPTIMIZE TABLE nt_1; Table Op Msg_type Msg_text test.nt_1 optimize status Table is already up to date --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- +INSERT INTO tt_1(ddl_case) VALUES (36); +REPAIR TABLE nt_1; +Table Op Msg_type Msg_text +test.nt_1 repair status OK +INSERT INTO tt_1(ddl_case) VALUES (35); +LOCK TABLES tt_1 WRITE; +INSERT INTO tt_1(ddl_case) VALUES (34); +UNLOCK TABLES; +INSERT INTO tt_1(ddl_case) VALUES (33); +CREATE USER 'user'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (32); +GRANT ALL ON *.* TO 'user'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (31); +SET PASSWORD FOR 'user'@'localhost' = PASSWORD('newpass'); +INSERT INTO tt_1(ddl_case) VALUES (30); +REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'user'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (29); +RENAME USER 'user'@'localhost' TO 'user_new'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (28); +DROP USER 'user_new'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (27); +CREATE EVENT evt ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO SELECT * FROM tt_1; +INSERT INTO tt_1(ddl_case) VALUES (26); +ALTER EVENT evt COMMENT 'evt'; +INSERT INTO tt_1(ddl_case) VALUES (25); +DROP EVENT evt; +INSERT INTO tt_1(ddl_case) VALUES (24); +CREATE TRIGGER tr AFTER INSERT ON tt_1 FOR EACH ROW UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; +INSERT INTO tt_1(ddl_case) VALUES (23); +DROP TRIGGER tr; +INSERT INTO tt_1(ddl_case) VALUES (22); +CREATE FUNCTION fc () RETURNS VARCHAR(64) RETURN "fc"; +INSERT INTO tt_1(ddl_case) VALUES (21); +ALTER FUNCTION fc COMMENT 'fc'; +INSERT INTO tt_1(ddl_case) VALUES (20); +DROP FUNCTION fc; +INSERT INTO tt_1(ddl_case) VALUES (19); +CREATE PROCEDURE pc () UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; +INSERT INTO tt_1(ddl_case) VALUES (18); +ALTER PROCEDURE pc COMMENT 'pc'; +INSERT INTO tt_1(ddl_case) VALUES (17); +DROP PROCEDURE pc; +INSERT INTO tt_1(ddl_case) VALUES (16); +CREATE VIEW v AS SELECT * FROM tt_1; +INSERT INTO tt_1(ddl_case) VALUES (15); +ALTER VIEW v AS SELECT * FROM tt_1; +INSERT INTO tt_1(ddl_case) VALUES (14); +DROP VIEW v; +INSERT INTO tt_1(ddl_case) VALUES (13); +CREATE INDEX ix ON tt_1(ddl_case); +INSERT INTO tt_1(ddl_case) VALUES (12); +DROP INDEX ix ON tt_1; +INSERT INTO tt_1(ddl_case) VALUES (11); +CREATE TEMPORARY TABLE tt_xx (a int); +INSERT INTO tt_1(ddl_case) VALUES (10); +ALTER TABLE tt_xx ADD COLUMN (b int); +INSERT INTO tt_1(ddl_case) VALUES (9); +ALTER TABLE tt_xx RENAME new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (8); +DROP TEMPORARY TABLE IF EXISTS new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (7); +CREATE TABLE tt_xx (a int); +INSERT INTO tt_1(ddl_case) VALUES (6); +ALTER TABLE tt_xx ADD COLUMN (b int); +INSERT INTO tt_1(ddl_case) VALUES (5); +RENAME TABLE tt_xx TO new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (4); +TRUNCATE TABLE new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (3); +DROP TABLE IF EXISTS tt_xx, new_tt_xx; +Warnings: +Note 1051 Unknown table 'test.tt_xx' +INSERT INTO tt_1(ddl_case) VALUES (2); +CREATE DATABASE db; +INSERT INTO tt_1(ddl_case) VALUES (1); +DROP DATABASE IF EXISTS db; include/show_binlog_events.inc Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE tt_1 (ddl_case INT, PRIMARY KEY(ddl_case)) ENGINE = Innodb +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE tt_2 (ddl_case INT, PRIMARY KEY(ddl_case)) ENGINE = Innodb +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE nt_1 (ddl_case INT, PRIMARY KEY(ddl_case)) ENGINE = MyIsam +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES(0) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tt_2(ddl_case) VALUES(0) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (43) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "LIB" +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (42) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP FUNCTION myfunc_int +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (41) +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (40) +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (39) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; ANALYZE TABLE nt_1 +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (38) +master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (37) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; OPTIMIZE TABLE nt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (36); -REPAIR TABLE nt_1; -Table Op Msg_type Msg_text -test.nt_1 repair status OK --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (36) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; REPAIR TABLE nt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (35); -LOCK TABLES tt_1 WRITE; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (35) master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (34); -UNLOCK TABLES; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (34) master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (33); -CREATE USER 'user'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (33) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE USER 'user'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (32); -GRANT ALL ON *.* TO 'user'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (32) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; GRANT ALL ON *.* TO 'user'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (31); -SET PASSWORD FOR 'user'@'localhost' = PASSWORD('newpass'); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (31) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; SET PASSWORD FOR 'user'@'localhost'='*D8DECEC305209EEFEC43008E1D420E1AA06B19E0' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (30); -REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'user'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (30) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'user'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (29); -RENAME USER 'user'@'localhost' TO 'user_new'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (29) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; RENAME USER 'user'@'localhost' TO 'user_new'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (28); -DROP USER 'user_new'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (28) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP USER 'user_new'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (27); -CREATE EVENT evt ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO SELECT * FROM tt_1; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (27) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` EVENT evt ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO SELECT * FROM tt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (26); -ALTER EVENT evt COMMENT 'evt'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (26) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER EVENT evt COMMENT 'evt' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (25); -DROP EVENT evt; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (25) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP EVENT evt --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (24); -CREATE TRIGGER tr AFTER INSERT ON tt_1 FOR EACH ROW UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (24) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` TRIGGER tr AFTER INSERT ON tt_1 FOR EACH ROW UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (23); -DROP TRIGGER tr; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (23) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP TRIGGER tr --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (22); -CREATE FUNCTION fc () RETURNS VARCHAR(64) RETURN "fc"; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (22) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` FUNCTION `fc`() RETURNS varchar(64) CHARSET latin1 RETURN "fc" --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (21); -ALTER FUNCTION fc COMMENT 'fc'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (21) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER FUNCTION fc COMMENT 'fc' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (20); -DROP FUNCTION fc; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (20) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP FUNCTION fc --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (19); -CREATE PROCEDURE pc () UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (19) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` PROCEDURE `pc`() UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (18); -ALTER PROCEDURE pc COMMENT 'pc'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (18) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER PROCEDURE pc COMMENT 'pc' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (17); -DROP PROCEDURE pc; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (17) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP PROCEDURE pc --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (16); -CREATE VIEW v AS SELECT * FROM tt_1; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (16) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS SELECT * FROM tt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (15); -ALTER VIEW v AS SELECT * FROM tt_1; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (15) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS SELECT * FROM tt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (14); -DROP VIEW v; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (14) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP VIEW v --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (13); -CREATE INDEX ix ON tt_1(ddl_case); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (13) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE INDEX ix ON tt_1(ddl_case) --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (12); -DROP INDEX ix ON tt_1; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (12) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP INDEX ix ON tt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (11); -CREATE TEMPORARY TABLE tt_xx (a int); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (11) master-bin.000001 # Query # # use `test`; CREATE TEMPORARY TABLE tt_xx (a int) +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (10) master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (10); -ALTER TABLE tt_xx ADD COLUMN (b int); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; ALTER TABLE tt_xx ADD COLUMN (b int) master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (10) -master-bin.000001 # Table_map # # table_id: # (test.tt_1) -master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (9) master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (9); -ALTER TABLE tt_xx RENAME new_tt_xx; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; ALTER TABLE tt_xx RENAME new_tt_xx master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (9) -master-bin.000001 # Table_map # # table_id: # (test.tt_1) -master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (8); -DROP TEMPORARY TABLE IF EXISTS new_tt_xx; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (8) -master-bin.000001 # Table_map # # table_id: # (test.tt_1) -master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (8) master-bin.000001 # Query # # DROP TEMPORARY TABLE IF EXISTS `test`.`new_tt_xx` /* generated by server */ -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (7); -CREATE TABLE tt_xx (a int); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (7) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE TABLE tt_xx (a int) --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (6); -ALTER TABLE tt_xx ADD COLUMN (b int); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (6) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER TABLE tt_xx ADD COLUMN (b int) --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (5); -RENAME TABLE tt_xx TO new_tt_xx; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (5) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; RENAME TABLE tt_xx TO new_tt_xx --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (4); -TRUNCATE TABLE new_tt_xx; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (4) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; TRUNCATE TABLE new_tt_xx --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (3); -DROP TABLE IF EXISTS tt_xx, new_tt_xx; -Warnings: -Note 1051 Unknown table 'test.tt_xx' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (3) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS `tt_xx`,`new_tt_xx` /* generated by server */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (2); -CREATE DATABASE db; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (2) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # CREATE DATABASE db --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (1); -DROP DATABASE IF EXISTS db; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (1) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # DROP DATABASE IF EXISTS db --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - +SET AUTOCOMMIT= 1; ################################################################################### # CHECK CONSISTENCY ################################################################################### +connection slave; include/diff_tables.inc [master:tt_1,slave:tt_1] ################################################################################### # CLEAN ################################################################################### +connection master; +DROP TABLE tt_1; +DROP TABLE tt_2; +DROP TABLE nt_1; +connection slave; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result b/mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result index 91e9eef35fd..ef393873b97 100644 --- a/mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result +++ b/mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result @@ -14,13 +14,125 @@ INSERT INTO tt_2(ddl_case) VALUES(0); # CHECK IMPLICT COMMIT ######################################################################### SET AUTOCOMMIT= 0; --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (43); CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "UDF_EXAMPLE_LIB"; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- +INSERT INTO tt_1(ddl_case) VALUES (42); +DROP FUNCTION myfunc_int; +INSERT INTO tt_1(ddl_case) VALUES (41); +LOAD INDEX INTO CACHE nt_1 IGNORE LEAVES; +Table Op Msg_type Msg_text +test.nt_1 preload_keys status OK +INSERT INTO tt_1(ddl_case) VALUES (40); +LOAD INDEX INTO CACHE tt_1, tt_2 IGNORE LEAVES; +Table Op Msg_type Msg_text +test.tt_1 preload_keys note The storage engine for the table doesn't support preload_keys +test.tt_2 preload_keys note The storage engine for the table doesn't support preload_keys +INSERT INTO tt_1(ddl_case) VALUES (39); +ANALYZE TABLE nt_1; +Table Op Msg_type Msg_text +test.nt_1 analyze status Table is already up to date +INSERT INTO tt_1(ddl_case) VALUES (38); +CHECK TABLE nt_1; +Table Op Msg_type Msg_text +test.nt_1 check status OK +INSERT INTO tt_1(ddl_case) VALUES (37); +OPTIMIZE TABLE nt_1; +Table Op Msg_type Msg_text +test.nt_1 optimize status Table is already up to date +INSERT INTO tt_1(ddl_case) VALUES (36); +REPAIR TABLE nt_1; +Table Op Msg_type Msg_text +test.nt_1 repair status OK +INSERT INTO tt_1(ddl_case) VALUES (35); +LOCK TABLES tt_1 WRITE; +INSERT INTO tt_1(ddl_case) VALUES (34); +UNLOCK TABLES; +INSERT INTO tt_1(ddl_case) VALUES (33); +CREATE USER 'user'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (32); +GRANT ALL ON *.* TO 'user'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (31); +SET PASSWORD FOR 'user'@'localhost' = PASSWORD('newpass'); +INSERT INTO tt_1(ddl_case) VALUES (30); +REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'user'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (29); +RENAME USER 'user'@'localhost' TO 'user_new'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (28); +DROP USER 'user_new'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (27); +CREATE EVENT evt ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO SELECT * FROM tt_1; +INSERT INTO tt_1(ddl_case) VALUES (26); +ALTER EVENT evt COMMENT 'evt'; +INSERT INTO tt_1(ddl_case) VALUES (25); +DROP EVENT evt; +INSERT INTO tt_1(ddl_case) VALUES (24); +CREATE TRIGGER tr AFTER INSERT ON tt_1 FOR EACH ROW UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; +INSERT INTO tt_1(ddl_case) VALUES (23); +DROP TRIGGER tr; +INSERT INTO tt_1(ddl_case) VALUES (22); +CREATE FUNCTION fc () RETURNS VARCHAR(64) RETURN "fc"; +INSERT INTO tt_1(ddl_case) VALUES (21); +ALTER FUNCTION fc COMMENT 'fc'; +INSERT INTO tt_1(ddl_case) VALUES (20); +DROP FUNCTION fc; +INSERT INTO tt_1(ddl_case) VALUES (19); +CREATE PROCEDURE pc () UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; +INSERT INTO tt_1(ddl_case) VALUES (18); +ALTER PROCEDURE pc COMMENT 'pc'; +INSERT INTO tt_1(ddl_case) VALUES (17); +DROP PROCEDURE pc; +INSERT INTO tt_1(ddl_case) VALUES (16); +CREATE VIEW v AS SELECT * FROM tt_1; +INSERT INTO tt_1(ddl_case) VALUES (15); +ALTER VIEW v AS SELECT * FROM tt_1; +INSERT INTO tt_1(ddl_case) VALUES (14); +DROP VIEW v; +INSERT INTO tt_1(ddl_case) VALUES (13); +CREATE INDEX ix ON tt_1(ddl_case); +INSERT INTO tt_1(ddl_case) VALUES (12); +DROP INDEX ix ON tt_1; +INSERT INTO tt_1(ddl_case) VALUES (11); +CREATE TEMPORARY TABLE tt_xx (a int); +INSERT INTO tt_1(ddl_case) VALUES (10); +ALTER TABLE tt_xx ADD COLUMN (b int); +INSERT INTO tt_1(ddl_case) VALUES (9); +ALTER TABLE tt_xx RENAME new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (8); +DROP TEMPORARY TABLE IF EXISTS new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (7); +CREATE TABLE tt_xx (a int); +INSERT INTO tt_1(ddl_case) VALUES (6); +ALTER TABLE tt_xx ADD COLUMN (b int); +INSERT INTO tt_1(ddl_case) VALUES (5); +RENAME TABLE tt_xx TO new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (4); +TRUNCATE TABLE new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (3); +DROP TABLE IF EXISTS tt_xx, new_tt_xx; +Warnings: +Note 1051 Unknown table 'test.tt_xx' +INSERT INTO tt_1(ddl_case) VALUES (2); +CREATE DATABASE db; +INSERT INTO tt_1(ddl_case) VALUES (1); +DROP DATABASE IF EXISTS db; include/show_binlog_events.inc Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE tt_1 (ddl_case INT, PRIMARY KEY(ddl_case)) ENGINE = Innodb +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE tt_2 (ddl_case INT, PRIMARY KEY(ddl_case)) ENGINE = Innodb +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE nt_1 (ddl_case INT, PRIMARY KEY(ddl_case)) ENGINE = MyIsam +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES(0) +master-bin.000001 # Table_map # # table_id: # (test.tt_1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO tt_2(ddl_case) VALUES(0) +master-bin.000001 # Table_map # # table_id: # (test.tt_2) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (43) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -28,15 +140,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "LIB" --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (42); -DROP FUNCTION myfunc_int; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (42) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -44,84 +147,24 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP FUNCTION myfunc_int --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (41); -LOAD INDEX INTO CACHE nt_1 IGNORE LEAVES; -Table Op Msg_type Msg_text -test.nt_1 preload_keys status OK --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (41) master-bin.000001 # Table_map # # table_id: # (test.tt_1) master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (40); -LOAD INDEX INTO CACHE tt_1, tt_2 IGNORE LEAVES; -Table Op Msg_type Msg_text -test.tt_1 preload_keys note The storage engine for the table doesn't support preload_keys -test.tt_2 preload_keys note The storage engine for the table doesn't support preload_keys --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (40) master-bin.000001 # Table_map # # table_id: # (test.tt_1) master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (39); -ANALYZE TABLE nt_1; -Table Op Msg_type Msg_text -test.nt_1 analyze status Table is already up to date --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (39) master-bin.000001 # Table_map # # table_id: # (test.tt_1) master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ANALYZE TABLE nt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (38); -CHECK TABLE nt_1; -Table Op Msg_type Msg_text -test.nt_1 check status OK --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (38) master-bin.000001 # Table_map # # table_id: # (test.tt_1) master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (37); -OPTIMIZE TABLE nt_1; -Table Op Msg_type Msg_text -test.nt_1 optimize status Table is already up to date --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (37) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -129,17 +172,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; OPTIMIZE TABLE nt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (36); -REPAIR TABLE nt_1; -Table Op Msg_type Msg_text -test.nt_1 repair status OK --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (36) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -147,43 +179,16 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; REPAIR TABLE nt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (35); -LOCK TABLES tt_1 WRITE; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (35) master-bin.000001 # Table_map # # table_id: # (test.tt_1) master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (34); -UNLOCK TABLES; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (34) master-bin.000001 # Table_map # # table_id: # (test.tt_1) master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (33); -CREATE USER 'user'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (33) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -191,15 +196,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE USER 'user'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (32); -GRANT ALL ON *.* TO 'user'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (32) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -207,15 +203,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; GRANT ALL ON *.* TO 'user'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (31); -SET PASSWORD FOR 'user'@'localhost' = PASSWORD('newpass'); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (31) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -223,15 +210,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; SET PASSWORD FOR 'user'@'localhost'='*D8DECEC305209EEFEC43008E1D420E1AA06B19E0' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (30); -REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'user'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (30) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -239,15 +217,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'user'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (29); -RENAME USER 'user'@'localhost' TO 'user_new'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (29) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -255,15 +224,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; RENAME USER 'user'@'localhost' TO 'user_new'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (28); -DROP USER 'user_new'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (28) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -271,15 +231,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP USER 'user_new'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (27); -CREATE EVENT evt ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO SELECT * FROM tt_1; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (27) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -287,15 +238,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` EVENT evt ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO SELECT * FROM tt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (26); -ALTER EVENT evt COMMENT 'evt'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (26) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -303,15 +245,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER EVENT evt COMMENT 'evt' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (25); -DROP EVENT evt; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (25) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -319,15 +252,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP EVENT evt --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (24); -CREATE TRIGGER tr AFTER INSERT ON tt_1 FOR EACH ROW UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (24) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -335,15 +259,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` TRIGGER tr AFTER INSERT ON tt_1 FOR EACH ROW UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (23); -DROP TRIGGER tr; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (23) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -352,15 +267,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP TRIGGER tr --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (22); -CREATE FUNCTION fc () RETURNS VARCHAR(64) RETURN "fc"; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (22) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -369,15 +275,6 @@ master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` FUNCTION `fc`() RETURNS varchar(64) CHARSET latin1 RETURN "fc" --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (21); -ALTER FUNCTION fc COMMENT 'fc'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (21) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -385,15 +282,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER FUNCTION fc COMMENT 'fc' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (20); -DROP FUNCTION fc; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (20) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -401,15 +289,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP FUNCTION fc --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (19); -CREATE PROCEDURE pc () UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (19) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -418,15 +297,6 @@ master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` PROCEDURE `pc`() UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (18); -ALTER PROCEDURE pc COMMENT 'pc'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (18) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -434,15 +304,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER PROCEDURE pc COMMENT 'pc' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (17); -DROP PROCEDURE pc; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (17) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -450,15 +311,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP PROCEDURE pc --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (16); -CREATE VIEW v AS SELECT * FROM tt_1; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (16) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -466,15 +318,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS SELECT * FROM tt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (15); -ALTER VIEW v AS SELECT * FROM tt_1; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (15) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -482,15 +325,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS SELECT * FROM tt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (14); -DROP VIEW v; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (14) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -498,15 +332,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP VIEW v --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (13); -CREATE INDEX ix ON tt_1(ddl_case); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (13) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -514,15 +339,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE INDEX ix ON tt_1(ddl_case) --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (12); -DROP INDEX ix ON tt_1; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (12) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -530,87 +346,29 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP INDEX ix ON tt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (11); -CREATE TEMPORARY TABLE tt_xx (a int); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (11) master-bin.000001 # Table_map # # table_id: # (test.tt_1) master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (10); -ALTER TABLE tt_xx ADD COLUMN (b int); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (10) master-bin.000001 # Table_map # # table_id: # (test.tt_1) master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (9); -ALTER TABLE tt_xx RENAME new_tt_xx; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (9) master-bin.000001 # Table_map # # table_id: # (test.tt_1) master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (8); -DROP TEMPORARY TABLE IF EXISTS new_tt_xx; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (8) master-bin.000001 # Table_map # # table_id: # (test.tt_1) master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (7); -CREATE TABLE tt_xx (a int); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (7) master-bin.000001 # Table_map # # table_id: # (test.tt_1) master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE TABLE tt_xx (a int) --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (6); -ALTER TABLE tt_xx ADD COLUMN (b int); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (6) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -618,15 +376,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER TABLE tt_xx ADD COLUMN (b int) --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (5); -RENAME TABLE tt_xx TO new_tt_xx; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (5) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -634,15 +383,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; RENAME TABLE tt_xx TO new_tt_xx --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (4); -TRUNCATE TABLE new_tt_xx; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (4) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -650,17 +390,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; TRUNCATE TABLE new_tt_xx --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (3); -DROP TABLE IF EXISTS tt_xx, new_tt_xx; -Warnings: -Note 1051 Unknown table 'test.tt_xx' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (3) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -668,15 +397,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS `tt_xx`,`new_tt_xx` /* generated by server */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (2); -CREATE DATABASE db; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (2) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -684,15 +404,6 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # CREATE DATABASE db --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (1); -DROP DATABASE IF EXISTS db; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # INSERT INTO tt_1(ddl_case) VALUES (1) master-bin.000001 # Table_map # # table_id: # (test.tt_1) @@ -700,13 +411,18 @@ master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # DROP DATABASE IF EXISTS db --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - +SET AUTOCOMMIT= 1; ################################################################################### # CHECK CONSISTENCY ################################################################################### +connection slave; include/diff_tables.inc [master:tt_1,slave:tt_1] ################################################################################### # CLEAN ################################################################################### +connection master; +DROP TABLE tt_1; +DROP TABLE tt_2; +DROP TABLE nt_1; +connection slave; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result b/mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result index 3d5a6964be4..f900a8b0e9a 100644 --- a/mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result +++ b/mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result @@ -14,618 +14,330 @@ INSERT INTO tt_2(ddl_case) VALUES(0); # CHECK IMPLICT COMMIT ######################################################################### SET AUTOCOMMIT= 0; --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (43); CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "UDF_EXAMPLE_LIB"; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (43) -master-bin.000001 # Xid # # COMMIT /* XID */ -master-bin.000001 # Gtid # # GTID #-#-# -master-bin.000001 # Query # # use `test`; CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "LIB" --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (42); DROP FUNCTION myfunc_int; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (42) -master-bin.000001 # Xid # # COMMIT /* XID */ -master-bin.000001 # Gtid # # GTID #-#-# -master-bin.000001 # Query # # use `test`; DROP FUNCTION myfunc_int --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (41); LOAD INDEX INTO CACHE nt_1 IGNORE LEAVES; Table Op Msg_type Msg_text test.nt_1 preload_keys status OK --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (41) -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (40); LOAD INDEX INTO CACHE tt_1, tt_2 IGNORE LEAVES; Table Op Msg_type Msg_text test.tt_1 preload_keys note The storage engine for the table doesn't support preload_keys test.tt_2 preload_keys note The storage engine for the table doesn't support preload_keys --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (40) -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (39); ANALYZE TABLE nt_1; Table Op Msg_type Msg_text test.nt_1 analyze status Table is already up to date --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (39) -master-bin.000001 # Xid # # COMMIT /* XID */ -master-bin.000001 # Gtid # # GTID #-#-# -master-bin.000001 # Query # # use `test`; ANALYZE TABLE nt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (38); CHECK TABLE nt_1; Table Op Msg_type Msg_text test.nt_1 check status OK --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (38) -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- INSERT INTO tt_1(ddl_case) VALUES (37); OPTIMIZE TABLE nt_1; Table Op Msg_type Msg_text test.nt_1 optimize status Table is already up to date --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- +INSERT INTO tt_1(ddl_case) VALUES (36); +REPAIR TABLE nt_1; +Table Op Msg_type Msg_text +test.nt_1 repair status OK +INSERT INTO tt_1(ddl_case) VALUES (35); +LOCK TABLES tt_1 WRITE; +INSERT INTO tt_1(ddl_case) VALUES (34); +UNLOCK TABLES; +INSERT INTO tt_1(ddl_case) VALUES (33); +CREATE USER 'user'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (32); +GRANT ALL ON *.* TO 'user'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (31); +SET PASSWORD FOR 'user'@'localhost' = PASSWORD('newpass'); +INSERT INTO tt_1(ddl_case) VALUES (30); +REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'user'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (29); +RENAME USER 'user'@'localhost' TO 'user_new'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (28); +DROP USER 'user_new'@'localhost'; +INSERT INTO tt_1(ddl_case) VALUES (27); +CREATE EVENT evt ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO SELECT * FROM tt_1; +INSERT INTO tt_1(ddl_case) VALUES (26); +ALTER EVENT evt COMMENT 'evt'; +INSERT INTO tt_1(ddl_case) VALUES (25); +DROP EVENT evt; +INSERT INTO tt_1(ddl_case) VALUES (24); +CREATE TRIGGER tr AFTER INSERT ON tt_1 FOR EACH ROW UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; +INSERT INTO tt_1(ddl_case) VALUES (23); +DROP TRIGGER tr; +INSERT INTO tt_1(ddl_case) VALUES (22); +CREATE FUNCTION fc () RETURNS VARCHAR(64) RETURN "fc"; +INSERT INTO tt_1(ddl_case) VALUES (21); +ALTER FUNCTION fc COMMENT 'fc'; +INSERT INTO tt_1(ddl_case) VALUES (20); +DROP FUNCTION fc; +INSERT INTO tt_1(ddl_case) VALUES (19); +CREATE PROCEDURE pc () UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; +INSERT INTO tt_1(ddl_case) VALUES (18); +ALTER PROCEDURE pc COMMENT 'pc'; +INSERT INTO tt_1(ddl_case) VALUES (17); +DROP PROCEDURE pc; +INSERT INTO tt_1(ddl_case) VALUES (16); +CREATE VIEW v AS SELECT * FROM tt_1; +INSERT INTO tt_1(ddl_case) VALUES (15); +ALTER VIEW v AS SELECT * FROM tt_1; +INSERT INTO tt_1(ddl_case) VALUES (14); +DROP VIEW v; +INSERT INTO tt_1(ddl_case) VALUES (13); +CREATE INDEX ix ON tt_1(ddl_case); +INSERT INTO tt_1(ddl_case) VALUES (12); +DROP INDEX ix ON tt_1; +INSERT INTO tt_1(ddl_case) VALUES (11); +CREATE TEMPORARY TABLE tt_xx (a int); +INSERT INTO tt_1(ddl_case) VALUES (10); +ALTER TABLE tt_xx ADD COLUMN (b int); +INSERT INTO tt_1(ddl_case) VALUES (9); +ALTER TABLE tt_xx RENAME new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (8); +DROP TEMPORARY TABLE IF EXISTS new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (7); +CREATE TABLE tt_xx (a int); +INSERT INTO tt_1(ddl_case) VALUES (6); +ALTER TABLE tt_xx ADD COLUMN (b int); +INSERT INTO tt_1(ddl_case) VALUES (5); +RENAME TABLE tt_xx TO new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (4); +TRUNCATE TABLE new_tt_xx; +INSERT INTO tt_1(ddl_case) VALUES (3); +DROP TABLE IF EXISTS tt_xx, new_tt_xx; +Warnings: +Note 1051 Unknown table 'test.tt_xx' +INSERT INTO tt_1(ddl_case) VALUES (2); +CREATE DATABASE db; +INSERT INTO tt_1(ddl_case) VALUES (1); +DROP DATABASE IF EXISTS db; include/show_binlog_events.inc Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE tt_1 (ddl_case INT, PRIMARY KEY(ddl_case)) ENGINE = Innodb +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE tt_2 (ddl_case INT, PRIMARY KEY(ddl_case)) ENGINE = Innodb +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE nt_1 (ddl_case INT, PRIMARY KEY(ddl_case)) ENGINE = MyIsam +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES(0) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tt_2(ddl_case) VALUES(0) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (43) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "LIB" +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (42) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP FUNCTION myfunc_int +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (41) +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (40) +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (39) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; ANALYZE TABLE nt_1 +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (38) +master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (37) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; OPTIMIZE TABLE nt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (36); -REPAIR TABLE nt_1; -Table Op Msg_type Msg_text -test.nt_1 repair status OK --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (36) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; REPAIR TABLE nt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (35); -LOCK TABLES tt_1 WRITE; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (35) master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (34); -UNLOCK TABLES; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (34) master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (33); -CREATE USER 'user'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (33) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE USER 'user'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (32); -GRANT ALL ON *.* TO 'user'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (32) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; GRANT ALL ON *.* TO 'user'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (31); -SET PASSWORD FOR 'user'@'localhost' = PASSWORD('newpass'); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (31) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; SET PASSWORD FOR 'user'@'localhost'='*D8DECEC305209EEFEC43008E1D420E1AA06B19E0' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (30); -REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'user'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (30) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'user'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (29); -RENAME USER 'user'@'localhost' TO 'user_new'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (29) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; RENAME USER 'user'@'localhost' TO 'user_new'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (28); -DROP USER 'user_new'@'localhost'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (28) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP USER 'user_new'@'localhost' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (27); -CREATE EVENT evt ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO SELECT * FROM tt_1; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (27) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` EVENT evt ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO SELECT * FROM tt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (26); -ALTER EVENT evt COMMENT 'evt'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (26) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER EVENT evt COMMENT 'evt' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (25); -DROP EVENT evt; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (25) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP EVENT evt --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (24); -CREATE TRIGGER tr AFTER INSERT ON tt_1 FOR EACH ROW UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (24) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` TRIGGER tr AFTER INSERT ON tt_1 FOR EACH ROW UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (23); -DROP TRIGGER tr; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (23) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP TRIGGER tr --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (22); -CREATE FUNCTION fc () RETURNS VARCHAR(64) RETURN "fc"; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (22) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` FUNCTION `fc`() RETURNS varchar(64) CHARSET latin1 RETURN "fc" --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (21); -ALTER FUNCTION fc COMMENT 'fc'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (21) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER FUNCTION fc COMMENT 'fc' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (20); -DROP FUNCTION fc; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (20) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP FUNCTION fc --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (19); -CREATE PROCEDURE pc () UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (19) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` PROCEDURE `pc`() UPDATE tt_2 SET ddl_case = ddl_case WHERE ddl_case= NEW.ddl_case --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (18); -ALTER PROCEDURE pc COMMENT 'pc'; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (18) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER PROCEDURE pc COMMENT 'pc' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (17); -DROP PROCEDURE pc; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (17) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP PROCEDURE pc --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (16); -CREATE VIEW v AS SELECT * FROM tt_1; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (16) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS SELECT * FROM tt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (15); -ALTER VIEW v AS SELECT * FROM tt_1; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (15) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS SELECT * FROM tt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (14); -DROP VIEW v; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (14) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP VIEW v --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (13); -CREATE INDEX ix ON tt_1(ddl_case); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (13) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE INDEX ix ON tt_1(ddl_case) --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (12); -DROP INDEX ix ON tt_1; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (12) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP INDEX ix ON tt_1 --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (11); -CREATE TEMPORARY TABLE tt_xx (a int); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (11) master-bin.000001 # Query # # use `test`; CREATE TEMPORARY TABLE tt_xx (a int) -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (10); -ALTER TABLE tt_xx ADD COLUMN (b int); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (10) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER TABLE tt_xx ADD COLUMN (b int) --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (9); -ALTER TABLE tt_xx RENAME new_tt_xx; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (9) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER TABLE tt_xx RENAME new_tt_xx --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (8); -DROP TEMPORARY TABLE IF EXISTS new_tt_xx; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (8) master-bin.000001 # Query # # DROP TEMPORARY TABLE IF EXISTS `test`.`new_tt_xx` /* generated by server */ -master-bin.000001 # Xid # # COMMIT /* XID */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (7); -CREATE TABLE tt_xx (a int); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (7) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; CREATE TABLE tt_xx (a int) --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (6); -ALTER TABLE tt_xx ADD COLUMN (b int); --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (6) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; ALTER TABLE tt_xx ADD COLUMN (b int) --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (5); -RENAME TABLE tt_xx TO new_tt_xx; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (5) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; RENAME TABLE tt_xx TO new_tt_xx --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (4); -TRUNCATE TABLE new_tt_xx; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (4) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; TRUNCATE TABLE new_tt_xx --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (3); -DROP TABLE IF EXISTS tt_xx, new_tt_xx; -Warnings: -Note 1051 Unknown table 'test.tt_xx' --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (3) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS `tt_xx`,`new_tt_xx` /* generated by server */ --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (2); -CREATE DATABASE db; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (2) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # CREATE DATABASE db --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -INSERT INTO tt_1(ddl_case) VALUES (1); -DROP DATABASE IF EXISTS db; --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- --b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b- -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Query # # use `test`; INSERT INTO tt_1(ddl_case) VALUES (1) master-bin.000001 # Xid # # COMMIT /* XID */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # DROP DATABASE IF EXISTS db --e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e- - +SET AUTOCOMMIT= 1; ################################################################################### # CHECK CONSISTENCY ################################################################################### +connection slave; include/diff_tables.inc [master:tt_1,slave:tt_1] ################################################################################### # CLEAN ################################################################################### +connection master; +DROP TABLE tt_1; +DROP TABLE tt_2; +DROP TABLE nt_1; +connection slave; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rename.test b/mysql-test/suite/rpl/t/rename.test new file mode 100644 index 00000000000..ac499157918 --- /dev/null +++ b/mysql-test/suite/rpl/t/rename.test @@ -0,0 +1,33 @@ +--source include/have_binlog_format_mixed.inc +--source include/master-slave.inc + +--echo # +--echo # MDEV-16229 Replication aborts with ER_VIEW_SELECT_TMPTABLE after +--echo # half-failed RENAME +--echo # + +CREATE TABLE t1 (a INT); +CREATE TEMPORARY TABLE t1 (b INT); +RENAME TABLE t1 TO tmp, tmp TO t1; +SHOW CREATE TABLE t1; +--error ER_VIEW_SELECT_TMPTABLE +CREATE VIEW v AS SELECT * FROM t1; + +RENAME TABLE t1 TO tmp, t1 TO t2; +SHOW CREATE TABLE tmp; +SHOW CREATE TABLE t2; +--error ER_VIEW_SELECT_TMPTABLE +CREATE VIEW v AS SELECT * FROM tmp; +CREATE VIEW v AS SELECT * FROM t2; + +--sync_slave_with_master + +# Cleanup + +--connection master + +DROP VIEW v; +DROP TABLE tmp; +DROP TABLE t2; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_mdev12179.test b/mysql-test/suite/rpl/t/rpl_mdev12179.test index a9113c91797..eb0f6c04b42 100644 --- a/mysql-test/suite/rpl/t/rpl_mdev12179.test +++ b/mysql-test/suite/rpl/t/rpl_mdev12179.test @@ -3,6 +3,8 @@ --source include/rpl_init.inc --connection server_2 +call mtr.add_suppression("The automatically created table.*name may not be entirely in lowercase"); + --error ER_SLAVE_MUST_STOP SET GLOBAL gtid_pos_auto_engines="innodb"; --source include/stop_slave.inc @@ -161,7 +163,8 @@ let $wait_condition= SELECT EXISTS (SELECT * FROM information_schema.tables WHERE table_schema='mysql' AND table_name='gtid_slave_pos_InnoDB'); --source include/wait_condition.inc -SELECT table_name, engine FROM information_schema.tables +# MDEV-15373 lowercases 'table_name' to satisfy --lower-case-table-names options +SELECT lower(table_name), engine FROM information_schema.tables WHERE table_schema='mysql' AND table_name LIKE 'gtid_slave_pos%' ORDER BY table_name; @@ -225,7 +228,7 @@ let $wait_condition= SELECT EXISTS (SELECT * FROM information_schema.tables WHERE table_schema='mysql' AND table_name='gtid_slave_pos_InnoDB'); --source include/wait_condition.inc -SELECT table_name, engine FROM information_schema.tables +SELECT lower(table_name), engine FROM information_schema.tables WHERE table_schema='mysql' AND table_name LIKE 'gtid_slave_pos%' ORDER BY table_name; SELECT domain_id, max(seq_no) FROM mysql.gtid_slave_pos GROUP BY domain_id; @@ -265,6 +268,39 @@ while (!$done) # Note that at this point, the contents of table t2, as well as the GTID # position, is non-deterministic. +# MDEV-15373 engine gtid_slave_pos table name disobeys lower-case-table-names +# This snippet verifies that engine gtid_slave_pos table is found, +# its data are up-to-date. +--write_file $MYSQLTEST_VARDIR/tmp/mysqld.2.expect +wait +EOF +--connection server_2 +--shutdown_server 30 +--source include/wait_until_disconnected.inc + +--echo *** Restart the slave server to prove 'gtid_slave_pos_innodb' autodiscovery *** +--append_file $MYSQLTEST_VARDIR/tmp/mysqld.2.expect +restart: --skip-slave-start=0 +EOF + +--connection server_2 +--enable_reconnect +--source include/wait_until_connected_again.inc +SELECT max(seq_no) FROM mysql.gtid_slave_pos_InnoDB into @seq_no; + +--connection server_1 +INSERT INTO t2(a) SELECT 1+MAX(a) FROM t2; +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc +if (`SELECT max(seq_no) <> @seq_no + 1 FROM mysql.gtid_slave_pos_InnoDB`) +{ + SELECT * FROM mysql.gtid_slave_pos_InnoDB; + --die Inconsistent table +} +# +# end of MDEV-15373 #--connection server_2 --source include/stop_slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_mdev382.test b/mysql-test/suite/rpl/t/rpl_mdev382.test index cb67052b47d..3ec877cdb1a 100644 --- a/mysql-test/suite/rpl/t/rpl_mdev382.test +++ b/mysql-test/suite/rpl/t/rpl_mdev382.test @@ -2,11 +2,6 @@ --source include/have_binlog_format_statement.inc --source include/master-slave.inc -if (`select plugin_auth_version <= "5.6.10" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB 5.6.10 or earlier -} - # MDEV-382: multiple SQL injections in replication code. # Test previous SQL injection attack against binlog for SAVEPOINT statement. diff --git a/mysql-test/suite/rpl/t/rpl_row_triggers.test b/mysql-test/suite/rpl/t/rpl_row_triggers.test index 00f9197ace8..d5f29b9207d 100644 --- a/mysql-test/suite/rpl/t/rpl_row_triggers.test +++ b/mysql-test/suite/rpl/t/rpl_row_triggers.test @@ -1,5 +1,4 @@ -- source include/have_binlog_format_row.inc --- source include/have_rbr_triggers.inc -- source include/have_innodb.inc -- source include/master-slave.inc diff --git a/mysql-test/suite/sql_sequence/debug_sync.opt b/mysql-test/suite/sql_sequence/debug_sync.opt new file mode 100644 index 00000000000..7ba8cab8a92 --- /dev/null +++ b/mysql-test/suite/sql_sequence/debug_sync.opt @@ -0,0 +1 @@ +--loose-debug-sync-timeout=2 diff --git a/mysql-test/suite/sql_sequence/debug_sync.result b/mysql-test/suite/sql_sequence/debug_sync.result new file mode 100644 index 00000000000..516b2343a82 --- /dev/null +++ b/mysql-test/suite/sql_sequence/debug_sync.result @@ -0,0 +1,7 @@ +connect con1,localhost,root,,; +CREATE TEMPORARY SEQUENCE f ENGINE=InnoDB; +disconnect con1; +connection default; +SELECT 'Still alive' AS `Heartbeat`; +Heartbeat +Still alive diff --git a/mysql-test/suite/sql_sequence/debug_sync.test b/mysql-test/suite/sql_sequence/debug_sync.test new file mode 100644 index 00000000000..ecc75f790a3 --- /dev/null +++ b/mysql-test/suite/sql_sequence/debug_sync.test @@ -0,0 +1,15 @@ +--source include/have_innodb.inc +--source include/have_debug_sync.inc + +# +# MDEV-13029 +# Assertion `ds_control' failed in debug_sync upon closing connection after +# creating temporary sequence + +--connect (con1,localhost,root,,) +CREATE TEMPORARY SEQUENCE f ENGINE=InnoDB; +--disconnect con1 +--connection default +--sleep 3 + +SELECT 'Still alive' AS `Heartbeat`; diff --git a/mysql-test/suite/sql_sequence/lock.result b/mysql-test/suite/sql_sequence/lock.result index 05a06b218b3..e92bdbffcf9 100644 --- a/mysql-test/suite/sql_sequence/lock.result +++ b/mysql-test/suite/sql_sequence/lock.result @@ -27,3 +27,13 @@ DROP SEQUENCE s1; ERROR HY000: Table 's1' was locked with a READ lock and can't be updated unlock tables; DROP SEQUENCE s1; +CREATE SEQUENCE seq1; +CREATE SEQUENCE seq2; +LOCK TABLE seq1 WRITE, seq2 WRITE; +INSERT INTO seq1 VALUES (1, 1, 100000, 1, 1, 100, 1, 1); +DROP SEQUENCE seq1, seq2; +CREATE OR REPLACE SEQUENCE s1 ENGINE=MyISAM; +LOCK TABLE s1 WRITE; +TRUNCATE TABLE s1; +ERROR HY000: Storage engine SEQUENCE of the table `test`.`s1` doesn't have this option +DROP SEQUENCE s1; diff --git a/mysql-test/suite/sql_sequence/lock.test b/mysql-test/suite/sql_sequence/lock.test index 2208a1f1ffa..1cb6aa6fb54 100644 --- a/mysql-test/suite/sql_sequence/lock.test +++ b/mysql-test/suite/sql_sequence/lock.test @@ -38,3 +38,27 @@ SELECT NEXTVAL(s); DROP SEQUENCE s1; unlock tables; DROP SEQUENCE s1; + +# +# MDEV-15106 Unexpected ER_WRONG_INSERT_INTO_SEQUENCE upon INSERT with +# multiple locks on sequences +# + +CREATE SEQUENCE seq1; +CREATE SEQUENCE seq2; +LOCK TABLE seq1 WRITE, seq2 WRITE; +INSERT INTO seq1 VALUES (1, 1, 100000, 1, 1, 100, 1, 1); +DROP SEQUENCE seq1, seq2; + +# +# MDEV-15970 +# Assertion `!is_set() || (m_status == DA_OK_BULK && is_bulk_op())' failure and/or +# ER_KEY_NOT_FOUND upon TRUNCATE sequence under lock +# + +CREATE OR REPLACE SEQUENCE s1 ENGINE=MyISAM; +LOCK TABLE s1 WRITE; +--error ER_ILLEGAL_HA +TRUNCATE TABLE s1; +# Cleanup +DROP SEQUENCE s1; diff --git a/mysql-test/suite/sql_sequence/other.result b/mysql-test/suite/sql_sequence/other.result index e3ec94cf2c5..ff58e35772b 100644 --- a/mysql-test/suite/sql_sequence/other.result +++ b/mysql-test/suite/sql_sequence/other.result @@ -49,10 +49,6 @@ insert into s1 (next_not_cached_value, minimum_value) values (100,1000); ERROR HY000: Field 'maximum_value' doesn't have a default value insert into s1 values (next value for s1, 1,9223372036854775806,1,1,1000,0,0); ERROR HY000: Table 's1' is specified twice, both as a target for 'INSERT' and as a separate source for data -insert into s1 values (next value for s2, 1,9223372036854775806,1,1,1000,0,0); -ERROR HY000: Wrong INSERT into a SEQUENCE. One can only do single table INSERT into a sequence object (like with mysqldump). If you want to change the SEQUENCE, use ALTER SEQUENCE instead. -insert into s1 select * from s2; -ERROR HY000: Wrong INSERT into a SEQUENCE. One can only do single table INSERT into a sequence object (like with mysqldump). If you want to change the SEQUENCE, use ALTER SEQUENCE instead. insert into s1 values(1000,9223372036854775806,1,1,1,1000,0,0); ERROR HY000: Sequence 'test.s1' values are conflicting insert into s1 values(0,9223372036854775806,1,1,1,1000,0,0); @@ -72,6 +68,20 @@ next_not_cached_value minimum_value maximum_value start_value increment cache_si 2000 1 9223372036854775806 1 1 1000 0 0 insert into s2 values(0, 1, 10, 1, 2, 1, 1, 0); ERROR HY000: Sequence 'test.s2' values are conflicting +select * from s1; +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count +2000 1 9223372036854775806 1 1 1000 0 0 +insert into s1 values (next value for s2, 1,9223372036854775806,1,1,1000,0,0); +select * from s1; +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count +1 1 9223372036854775806 1 1 1000 0 0 +select * from s2; +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count +1001 1 9223372036854775806 1 1 1000 0 0 +insert into s1 select * from s2; +select * from s1; +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count +1001 1 9223372036854775806 1 1 1000 0 0 drop sequence s1,s2; # # UPDATE and DELETE @@ -184,3 +194,19 @@ ERROR HY000: Function or expression 'setval()' cannot be used in the CHECK claus CREATE TABLE t1 (a int, b int as (next value for s1 > 0)); ERROR HY000: Function or expression 'nextval()' cannot be used in the GENERATED ALWAYS AS clause of `b` drop sequence s1; +# +# MDEV-13024: Server crashes in my_store_ptr upon DELETE from +# sequence in multi-table format +# +CREATE SEQUENCE s; +CREATE table t1 (a int); +insert into t1 values (1),(2); +DELETE s FROM s; +ERROR HY000: Storage engine SEQUENCE of the table `test`.`s` doesn't have this option +delete t1,s from s,t1; +ERROR HY000: Storage engine SEQUENCE of the table `test`.`s` doesn't have this option +delete s,t1 from t1,s; +ERROR HY000: Storage engine SEQUENCE of the table `test`.`s` doesn't have this option +DROP SEQUENCE s; +DROP TABLE t1; +# End of 10.3 tests diff --git a/mysql-test/suite/sql_sequence/other.test b/mysql-test/suite/sql_sequence/other.test index ff0db9e158d..5759b195950 100644 --- a/mysql-test/suite/sql_sequence/other.test +++ b/mysql-test/suite/sql_sequence/other.test @@ -38,10 +38,6 @@ create sequence s2; insert into s1 (next_not_cached_value, minimum_value) values (100,1000); --error ER_UPDATE_TABLE_USED insert into s1 values (next value for s1, 1,9223372036854775806,1,1,1000,0,0); ---error ER_WRONG_INSERT_INTO_SEQUENCE -insert into s1 values (next value for s2, 1,9223372036854775806,1,1,1000,0,0); ---error ER_WRONG_INSERT_INTO_SEQUENCE -insert into s1 select * from s2; --error ER_SEQUENCE_INVALID_DATA insert into s1 values(1000,9223372036854775806,1,1,1,1000,0,0); --error ER_SEQUENCE_INVALID_DATA @@ -53,6 +49,13 @@ select next value for s1; select * from s1; --error ER_SEQUENCE_INVALID_DATA insert into s2 values(0, 1, 10, 1, 2, 1, 1, 0); + +select * from s1; +insert into s1 values (next value for s2, 1,9223372036854775806,1,1,1000,0,0); +select * from s1; +select * from s2; +insert into s1 select * from s2; +select * from s1; drop sequence s1,s2; --echo # @@ -157,3 +160,23 @@ CREATE table t1 (a int check (setval(s1,10))); --error ER_GENERATED_COLUMN_FUNCTION_IS_NOT_ALLOWED CREATE TABLE t1 (a int, b int as (next value for s1 > 0)); drop sequence s1; + + +--echo # +--echo # MDEV-13024: Server crashes in my_store_ptr upon DELETE from +--echo # sequence in multi-table format +--echo # +CREATE SEQUENCE s; +CREATE table t1 (a int); +insert into t1 values (1),(2); +--error ER_ILLEGAL_HA +DELETE s FROM s; +--error ER_ILLEGAL_HA +delete t1,s from s,t1; +--error ER_ILLEGAL_HA +delete s,t1 from t1,s; +DROP SEQUENCE s; +DROP TABLE t1; + + +--echo # End of 10.3 tests diff --git a/mysql-test/suite/sql_sequence/replication_drop.result b/mysql-test/suite/sql_sequence/replication_drop.result new file mode 100644 index 00000000000..1cd7022765e --- /dev/null +++ b/mysql-test/suite/sql_sequence/replication_drop.result @@ -0,0 +1,5 @@ +CREATE SEQUENCE seq ENGINE=InnoDB; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +INSERT INTO seq VALUES (1,1,100,1,1,1,1,1); +ERROR HY000: Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT and at least one table uses a storage engine limited to row-based logging. InnoDB is limited to row-logging when transaction isolation level is READ COMMITTED or READ UNCOMMITTED. +DROP SEQUENCE seq; diff --git a/mysql-test/suite/sql_sequence/replication_drop.test b/mysql-test/suite/sql_sequence/replication_drop.test new file mode 100644 index 00000000000..ca050246391 --- /dev/null +++ b/mysql-test/suite/sql_sequence/replication_drop.test @@ -0,0 +1,17 @@ +# +# Test for MDEV-15812 +# Assertion `m_lock_type == 2' failed in +# handler::~handler on dropping a sequence after +# ER_BINLOG_STMT_MODE_AND_ROW_ENGINE +# + +--source include/have_innodb.inc +--source include/have_binlog_format_statement.inc + +CREATE SEQUENCE seq ENGINE=InnoDB; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +--error ER_BINLOG_STMT_MODE_AND_ROW_ENGINE +INSERT INTO seq VALUES (1,1,100,1,1,1,1,1); + +# Cleanup +DROP SEQUENCE seq; diff --git a/mysql-test/suite/sql_sequence/replication_mixed.result b/mysql-test/suite/sql_sequence/replication_mixed.result new file mode 100644 index 00000000000..f581d59fc30 --- /dev/null +++ b/mysql-test/suite/sql_sequence/replication_mixed.result @@ -0,0 +1,35 @@ +include/master-slave.inc +[connection master] +# +# MDEV-16234 +# CREATE TABLE .. SELECT LASTVAL is written to binlog as single +# statement, causes discrepancy between master and slave +# +CREATE SEQUENCE s1 ENGINE=InnoDB; +SELECT NEXTVAL(s1); +NEXTVAL(s1) +1 +CREATE TABLE t1 ENGINE=InnoDB SELECT LASTVAL(s1) AS a; +INSERT INTO t1 VALUES (NEXTVAL(s1)); +INSERT INTO t1 VALUES (LASTVAL(s1)); +SELECT * FROM t1; +a +1 +2 +2 +SELECT * from s1; +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count +1001 1 9223372036854775806 1 1 1000 0 0 +connection slave; +SELECT * FROM t1; +a +1 +2 +2 +SELECT * from s1; +next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count +1001 1 9223372036854775806 1 1 1000 0 0 +connection master; +DROP TABLE t1; +DROP SEQUENCE s1; +include/rpl_end.inc diff --git a/mysql-test/suite/sql_sequence/replication_mixed.test b/mysql-test/suite/sql_sequence/replication_mixed.test new file mode 100644 index 00000000000..0096ab5acd8 --- /dev/null +++ b/mysql-test/suite/sql_sequence/replication_mixed.test @@ -0,0 +1,27 @@ +--source include/have_innodb.inc +--source include/have_binlog_format_mixed.inc +--source include/master-slave.inc + +--echo # +--echo # MDEV-16234 +--echo # CREATE TABLE .. SELECT LASTVAL is written to binlog as single +--echo # statement, causes discrepancy between master and slave +--echo # + +CREATE SEQUENCE s1 ENGINE=InnoDB; +SELECT NEXTVAL(s1); +CREATE TABLE t1 ENGINE=InnoDB SELECT LASTVAL(s1) AS a; +INSERT INTO t1 VALUES (NEXTVAL(s1)); +INSERT INTO t1 VALUES (LASTVAL(s1)); +SELECT * FROM t1; +SELECT * from s1; +--sync_slave_with_master +SELECT * FROM t1; +SELECT * from s1; + +# Cleanup +--connection master +DROP TABLE t1; +DROP SEQUENCE s1; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/sql_sequence/setval.result b/mysql-test/suite/sql_sequence/setval.result index b7cda27cf96..504d460fea5 100644 --- a/mysql-test/suite/sql_sequence/setval.result +++ b/mysql-test/suite/sql_sequence/setval.result @@ -254,3 +254,18 @@ def SETVAL(s1,10) 8 20 2 Y 32896 0 63 SETVAL(s1,10) 10 DROP SEQUENCE s1; +# +# MDEV-15732: Assertion `next_free_value % real_increment == offset && +# next_free_value >= reserved_until' failed in +# sequence_definition::adjust_values upon SETVAL for sequence with +# INCREMENT 0 +# +CREATE SEQUENCE s INCREMENT 0; +SELECT NEXTVAL(s); +NEXTVAL(s) +1 +SELECT SETVAL(s, 10); +SETVAL(s, 10) +10 +DROP SEQUENCE s; +# End of 10.3 tests diff --git a/mysql-test/suite/sql_sequence/setval.test b/mysql-test/suite/sql_sequence/setval.test index 8f8059fdcae..1993bdbe077 100644 --- a/mysql-test/suite/sql_sequence/setval.test +++ b/mysql-test/suite/sql_sequence/setval.test @@ -137,3 +137,20 @@ SELECT SETVAL(s1,10); DROP SEQUENCE s1; --enable_ps_protocol --disable_metadata + +--echo # +--echo # MDEV-15732: Assertion `next_free_value % real_increment == offset && +--echo # next_free_value >= reserved_until' failed in +--echo # sequence_definition::adjust_values upon SETVAL for sequence with +--echo # INCREMENT 0 +--echo # + +CREATE SEQUENCE s INCREMENT 0; +SELECT NEXTVAL(s); +SELECT SETVAL(s, 10); + +# Cleanup +DROP SEQUENCE s; + + +--echo # End of 10.3 tests diff --git a/mysql-test/suite/sql_sequence/temporary.result b/mysql-test/suite/sql_sequence/temporary.result index 69463667210..b5c70fd3a50 100644 --- a/mysql-test/suite/sql_sequence/temporary.result +++ b/mysql-test/suite/sql_sequence/temporary.result @@ -17,3 +17,27 @@ drop temporary sequence s1; CREATE TEMPORARY SEQUENCE s1 ENGINE=InnoDB; INSERT INTO s1 VALUES (1, 1, 1000, 1, 1, 1, 1, 0); DROP TEMPORARY SEQUENCE s1; +# +# MDEV-13007 ALTER .. ENGINE on temporary sequence may go wrong +# +create temporary sequence s1 engine=aria; +alter table s1 engine myisam; +select nextval(s1); +nextval(s1) +1 +drop temporary sequence s1; +create temporary sequence s1 engine=innodb; +alter table s1 engine myisam; +select nextval(s1); +nextval(s1) +1 +drop temporary sequence s1; +create temporary sequence s1; +alter table s1 engine innodb; +select nextval(s1); +nextval(s1) +1 +select nextval(s1); +nextval(s1) +2 +drop temporary sequence s1; diff --git a/mysql-test/suite/sql_sequence/temporary.test b/mysql-test/suite/sql_sequence/temporary.test index 4943ed66ea4..aeacf6e9497 100644 --- a/mysql-test/suite/sql_sequence/temporary.test +++ b/mysql-test/suite/sql_sequence/temporary.test @@ -23,3 +23,21 @@ drop temporary sequence s1; CREATE TEMPORARY SEQUENCE s1 ENGINE=InnoDB; INSERT INTO s1 VALUES (1, 1, 1000, 1, 1, 1, 1, 0); DROP TEMPORARY SEQUENCE s1; + +--echo # +--echo # MDEV-13007 ALTER .. ENGINE on temporary sequence may go wrong +--echo # + +create temporary sequence s1 engine=aria; +alter table s1 engine myisam; +select nextval(s1); +drop temporary sequence s1; +create temporary sequence s1 engine=innodb; +alter table s1 engine myisam; +select nextval(s1); +drop temporary sequence s1; +create temporary sequence s1; +alter table s1 engine innodb; +select nextval(s1); +select nextval(s1); +drop temporary sequence s1; diff --git a/mysql-test/suite/storage_engine/parts/repair_table.result b/mysql-test/suite/storage_engine/parts/repair_table.result index 727c648ef6f..63131250a01 100644 --- a/mysql-test/suite/storage_engine/parts/repair_table.result +++ b/mysql-test/suite/storage_engine/parts/repair_table.result @@ -101,6 +101,7 @@ INSERT INTO t1 (a,b) VALUES (10,'j'); REPAIR TABLE t1 USE_FRM; Table Op Msg_type Msg_text test.t1 repair status OK +db.opt t1#P#p0.MYD t1#P#p0.MYI t1#P#p1.MYD @@ -125,7 +126,6 @@ a b 15 o Warnings: Error 145 Table './test/t1#P#p0' is marked as crashed and should be repaired -Error 1194 Table 't1' is marked as crashed and should be repaired Error 1034 Number of rows changed from 3 to 2 # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). # If you got a difference in error message, just add it to rdiff file @@ -151,7 +151,6 @@ a b 15 o Warnings: Error 145 Table './test/t1#P#p0' is marked as crashed and should be repaired -Error 1194 Table 't1' is marked as crashed and should be repaired Error 1034 Number of rows changed from 2 to 3 # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). # If you got a difference in error message, just add it to rdiff file @@ -176,7 +175,6 @@ a b 15 o Warnings: Error 145 Table './test/t1#P#p1' is marked as crashed and should be repaired -Error 1194 Table 't1' is marked as crashed and should be repaired Error 1034 Number of rows changed from 4 to 3 # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). # If you got a difference in error message, just add it to rdiff file @@ -205,7 +203,6 @@ a b 15 o Warnings: Error 145 Table './test/t1#P#p1' is marked as crashed and should be repaired -Error 1194 Table 't1' is marked as crashed and should be repaired Error 1034 Number of rows changed from 3 to 4 # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). # If you got a difference in error message, just add it to rdiff file diff --git a/mysql-test/suite/storage_engine/repair_table.result b/mysql-test/suite/storage_engine/repair_table.result index d518e47756f..cbd861647ea 100644 --- a/mysql-test/suite/storage_engine/repair_table.result +++ b/mysql-test/suite/storage_engine/repair_table.result @@ -73,6 +73,7 @@ REPAIR TABLE t1 USE_FRM; Table Op Msg_type Msg_text test.t1 repair warning Number of rows changed from 0 to 3 test.t1 repair status OK +db.opt t1.MYD t1.MYI t1.frm diff --git a/mysql-test/suite/sys_vars/inc/secure_timestamp_func.inc b/mysql-test/suite/sys_vars/inc/secure_timestamp_func.inc new file mode 100644 index 00000000000..8bc3e42df59 --- /dev/null +++ b/mysql-test/suite/sys_vars/inc/secure_timestamp_func.inc @@ -0,0 +1,49 @@ +# +# MDEV-15923 option to control who can set session @@timestamp +# +source include/have_binlog_format_statement.inc; +source include/master-slave.inc; + +connection slave; +select @@secure_timestamp; + +### SUPER +disable_abort_on_error; +set timestamp=1234567890.101112; +enable_abort_on_error; +select if(now(6) > 20100101, 'READONLY', 'EDITABLE') as 'SUPER'; + +### non-privileged user +create user foo@127.0.0.1; +connect con2,127.0.0.1,foo,,test,$SLAVE_MYPORT; +disable_abort_on_error; +set timestamp=1234567890.101112; +enable_abort_on_error; +select if(now(6) > 20100101, 'READONLY', 'EDITABLE') as 'non-privileged'; +disconnect con2; +connection slave; +drop user foo@127.0.0.1; + +### replication +connection master; +set time_zone='+00:00'; +set timestamp=1234567890.101112; +select @@timestamp, now(6); + +create table t1 (b varchar(20), a timestamp(6) default current_timestamp(6)); +insert t1 (b) values ('replicated'); +sync_slave_with_master; +create trigger t1rbr before insert on t1 for each row set new.a=now(6); +set @@global.slave_run_triggers_for_rbr= yes; +binlog 'LQfqWg8BAAAA/AAAAAABAAABAAQAMTAuMy42LU1hcmlhREItZGVidWctbG9nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtB+paEzgNAAgAEgAEBAQEEgAA5AAEGggAAAAICAgCAAAACgoKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEEwQADQgICAoKCgFlBcaR'; +binlog '0gKWSRMBAAAAMQAAAHQDAAAAAB8AAAAAAAEABHRlc3QAAnQxAAIPEQMUAAYBQFUzwA==0gKWSRcBAAAAMAAAAKQDAAAAAB8AAAAAAAEAAv/8BmJpbmxvZ0mWAtIBivg3mwo+'; +set @@global.slave_run_triggers_for_rbr= default; +select b, if(a > 20100101, 'READONLY', 'EDITABLE') as 'REPLICATION' from t1; +connection master; +#set binlog_format=row; +#insert t1 (b) values ('binlog'); +#let datadir=`select @@datadir`; +#exec $MYSQL_BINLOG $datadir/master-bin.000001; +drop table t1; + +source include/rpl_end.inc; diff --git a/mysql-test/suite/sys_vars/r/collation_database_func.result b/mysql-test/suite/sys_vars/r/collation_database_func.result index 99f0e253005..8bcf9fea224 100644 --- a/mysql-test/suite/sys_vars/r/collation_database_func.result +++ b/mysql-test/suite/sys_vars/r/collation_database_func.result @@ -54,7 +54,7 @@ utf8_roman_ci utf8_roman_ci utf8 utf8 USE test; SELECT @@collation_database,@@collation_server,@@character_set_database,@@character_set_server; @@collation_database @@collation_server @@character_set_database @@character_set_server -utf8_roman_ci utf8_roman_ci utf8 utf8 +latin1_swedish_ci utf8_roman_ci latin1 utf8 'fill table with some test data'; CREATE TABLE t1(a CHAR(20))CHARACTER SET=latin1; INSERT INTO t1 VALUES('Muffler'),('Müller'),('MX Systems'); diff --git a/mysql-test/suite/sys_vars/r/innodb_change_buffering_basic.result b/mysql-test/suite/sys_vars/r/innodb_change_buffering_basic.result index f80ed54100f..92e22c6aa34 100644 --- a/mysql-test/suite/sys_vars/r/innodb_change_buffering_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_change_buffering_basic.result @@ -50,9 +50,11 @@ ERROR HY000: Variable 'innodb_change_buffering' is a GLOBAL variable and should set global innodb_change_buffering=1.1; ERROR 42000: Incorrect argument type to variable 'innodb_change_buffering' set global innodb_change_buffering=1; -ERROR 42000: Incorrect argument type to variable 'innodb_change_buffering' +SELECT @@global.innodb_change_buffering; +@@global.innodb_change_buffering +inserts set global innodb_change_buffering=-2; -ERROR 42000: Incorrect argument type to variable 'innodb_change_buffering' +ERROR 42000: Variable 'innodb_change_buffering' can't be set to the value of '-2' set global innodb_change_buffering=1e1; ERROR 42000: Incorrect argument type to variable 'innodb_change_buffering' set global innodb_change_buffering='some'; diff --git a/mysql-test/suite/sys_vars/r/innodb_flush_method_basic.result b/mysql-test/suite/sys_vars/r/innodb_flush_method_basic.result index 4a85748092d..21d56208141 100644 --- a/mysql-test/suite/sys_vars/r/innodb_flush_method_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_flush_method_basic.result @@ -1,27 +1,27 @@ '#---------------------BS_STVARS_029_01----------------------#' SELECT COUNT(@@GLOBAL.innodb_flush_method); COUNT(@@GLOBAL.innodb_flush_method) -0 -0 Expected +1 +1 Expected '#---------------------BS_STVARS_029_02----------------------#' SET @@GLOBAL.innodb_flush_method=1; ERROR HY000: Variable 'innodb_flush_method' is a read only variable Expected error 'Read only variable' SELECT COUNT(@@GLOBAL.innodb_flush_method); COUNT(@@GLOBAL.innodb_flush_method) -0 -0 Expected +1 +1 Expected '#---------------------BS_STVARS_029_03----------------------#' SELECT @@GLOBAL.innodb_flush_method = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_flush_method'; @@GLOBAL.innodb_flush_method = VARIABLE_VALUE -NULL +1 1 Expected SELECT COUNT(@@GLOBAL.innodb_flush_method); COUNT(@@GLOBAL.innodb_flush_method) -0 -0 Expected +1 +1 Expected SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_flush_method'; @@ -31,13 +31,13 @@ COUNT(VARIABLE_VALUE) '#---------------------BS_STVARS_029_04----------------------#' SELECT @@innodb_flush_method = @@GLOBAL.innodb_flush_method; @@innodb_flush_method = @@GLOBAL.innodb_flush_method -NULL +1 1 Expected '#---------------------BS_STVARS_029_05----------------------#' SELECT COUNT(@@innodb_flush_method); COUNT(@@innodb_flush_method) -0 -0 Expected +1 +1 Expected SELECT COUNT(@@local.innodb_flush_method); ERROR HY000: Variable 'innodb_flush_method' is a GLOBAL variable Expected error 'Variable is a GLOBAL variable' @@ -46,8 +46,8 @@ ERROR HY000: Variable 'innodb_flush_method' is a GLOBAL variable Expected error 'Variable is a GLOBAL variable' SELECT COUNT(@@GLOBAL.innodb_flush_method); COUNT(@@GLOBAL.innodb_flush_method) -0 -0 Expected +1 +1 Expected SELECT innodb_flush_method = @@SESSION.innodb_flush_method; ERROR 42S22: Unknown column 'innodb_flush_method' in 'field list' Expected error 'Readonly variable' diff --git a/mysql-test/suite/sys_vars/r/innodb_flush_method_func.result b/mysql-test/suite/sys_vars/r/innodb_flush_method_func.result new file mode 100644 index 00000000000..a7c8a2d3cc6 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_flush_method_func.result @@ -0,0 +1,15 @@ +call mtr.add_suppression("InnoDB: Failed to set .*DIRECT"); +select @@innodb_flush_method; +@@innodb_flush_method +fsync +create table t(a serial) engine=innodb; +FLUSH TABLES; +select @@innodb_flush_method; +@@innodb_flush_method +O_DIRECT_NO_FSYNC +insert into t values(0); +FLUSH TABLES; +select @@innodb_flush_method; +@@innodb_flush_method +fsync +drop table t; diff --git a/mysql-test/suite/sys_vars/r/old_alter_table_basic.result b/mysql-test/suite/sys_vars/r/old_alter_table_basic.result index 5cc17917242..8b668340d1a 100644 --- a/mysql-test/suite/sys_vars/r/old_alter_table_basic.result +++ b/mysql-test/suite/sys_vars/r/old_alter_table_basic.result @@ -1,45 +1,105 @@ SET @start_global_value = @@global.old_alter_table; SELECT @start_global_value; @start_global_value -0 +DEFAULT select @@global.old_alter_table; @@global.old_alter_table -0 +DEFAULT select @@session.old_alter_table; @@session.old_alter_table -0 +DEFAULT show global variables like 'old_alter_table'; Variable_name Value -old_alter_table OFF +old_alter_table DEFAULT show session variables like 'old_alter_table'; Variable_name Value -old_alter_table OFF +old_alter_table DEFAULT select * from information_schema.global_variables where variable_name='old_alter_table'; VARIABLE_NAME VARIABLE_VALUE -OLD_ALTER_TABLE OFF +OLD_ALTER_TABLE DEFAULT select * from information_schema.session_variables where variable_name='old_alter_table'; VARIABLE_NAME VARIABLE_VALUE -OLD_ALTER_TABLE OFF +OLD_ALTER_TABLE DEFAULT set global old_alter_table=1; -set session old_alter_table=ON; +set session old_alter_table=1; select @@global.old_alter_table; @@global.old_alter_table -1 +COPY select @@session.old_alter_table; @@session.old_alter_table -1 +COPY show global variables like 'old_alter_table'; Variable_name Value -old_alter_table ON +old_alter_table COPY show session variables like 'old_alter_table'; Variable_name Value -old_alter_table ON +old_alter_table COPY select * from information_schema.global_variables where variable_name='old_alter_table'; VARIABLE_NAME VARIABLE_VALUE -OLD_ALTER_TABLE ON +OLD_ALTER_TABLE COPY select * from information_schema.session_variables where variable_name='old_alter_table'; VARIABLE_NAME VARIABLE_VALUE -OLD_ALTER_TABLE ON +OLD_ALTER_TABLE COPY +set global old_alter_table=2; +set session old_alter_table=2; +select @@global.old_alter_table; +@@global.old_alter_table +INPLACE +select @@session.old_alter_table; +@@session.old_alter_table +INPLACE +show global variables like 'old_alter_table'; +Variable_name Value +old_alter_table INPLACE +show session variables like 'old_alter_table'; +Variable_name Value +old_alter_table INPLACE +select * from information_schema.global_variables where variable_name='old_alter_table'; +VARIABLE_NAME VARIABLE_VALUE +OLD_ALTER_TABLE INPLACE +select * from information_schema.session_variables where variable_name='old_alter_table'; +VARIABLE_NAME VARIABLE_VALUE +OLD_ALTER_TABLE INPLACE +set global old_alter_table=3; +set session old_alter_table=3; +select @@global.old_alter_table; +@@global.old_alter_table +NOCOPY +select @@session.old_alter_table; +@@session.old_alter_table +NOCOPY +show global variables like 'old_alter_table'; +Variable_name Value +old_alter_table NOCOPY +show session variables like 'old_alter_table'; +Variable_name Value +old_alter_table NOCOPY +select * from information_schema.global_variables where variable_name='old_alter_table'; +VARIABLE_NAME VARIABLE_VALUE +OLD_ALTER_TABLE NOCOPY +select * from information_schema.session_variables where variable_name='old_alter_table'; +VARIABLE_NAME VARIABLE_VALUE +OLD_ALTER_TABLE NOCOPY +set global old_alter_table=4; +set session old_alter_table=4; +select @@global.old_alter_table; +@@global.old_alter_table +INSTANT +select @@session.old_alter_table; +@@session.old_alter_table +INSTANT +show global variables like 'old_alter_table'; +Variable_name Value +old_alter_table INSTANT +show session variables like 'old_alter_table'; +Variable_name Value +old_alter_table INSTANT +select * from information_schema.global_variables where variable_name='old_alter_table'; +VARIABLE_NAME VARIABLE_VALUE +OLD_ALTER_TABLE INSTANT +select * from information_schema.session_variables where variable_name='old_alter_table'; +VARIABLE_NAME VARIABLE_VALUE +OLD_ALTER_TABLE INSTANT set global old_alter_table=1.1; ERROR 42000: Incorrect argument type to variable 'old_alter_table' set global old_alter_table=1e1; @@ -49,4 +109,4 @@ ERROR 42000: Variable 'old_alter_table' can't be set to the value of 'foo' SET @@global.old_alter_table = @start_global_value; SELECT @@global.old_alter_table; @@global.old_alter_table -0 +DEFAULT diff --git a/mysql-test/suite/sys_vars/r/secure_timestamp_no.result b/mysql-test/suite/sys_vars/r/secure_timestamp_no.result new file mode 100644 index 00000000000..34eeb083c28 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/secure_timestamp_no.result @@ -0,0 +1,40 @@ +include/master-slave.inc +[connection master] +connection slave; +select @@secure_timestamp; +@@secure_timestamp +NO +set timestamp=1234567890.101112; +select if(now(6) > 20100101, 'READONLY', 'EDITABLE') as 'SUPER'; +SUPER +EDITABLE +create user foo@127.0.0.1; +connect con2,127.0.0.1,foo,,test,$SLAVE_MYPORT; +set timestamp=1234567890.101112; +select if(now(6) > 20100101, 'READONLY', 'EDITABLE') as 'non-privileged'; +non-privileged +EDITABLE +disconnect con2; +connection slave; +drop user foo@127.0.0.1; +connection master; +set time_zone='+00:00'; +set timestamp=1234567890.101112; +select @@timestamp, now(6); +@@timestamp now(6) +1234567890.101112 2009-02-13 23:31:30.101112 +create table t1 (b varchar(20), a timestamp(6) default current_timestamp(6)); +insert t1 (b) values ('replicated'); +connection slave; +create trigger t1rbr before insert on t1 for each row set new.a=now(6); +set @@global.slave_run_triggers_for_rbr= yes; +binlog 'LQfqWg8BAAAA/AAAAAABAAABAAQAMTAuMy42LU1hcmlhREItZGVidWctbG9nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtB+paEzgNAAgAEgAEBAQEEgAA5AAEGggAAAAICAgCAAAACgoKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEEwQADQgICAoKCgFlBcaR'; +binlog '0gKWSRMBAAAAMQAAAHQDAAAAAB8AAAAAAAEABHRlc3QAAnQxAAIPEQMUAAYBQFUzwA==0gKWSRcBAAAAMAAAAKQDAAAAAB8AAAAAAAEAAv/8BmJpbmxvZ0mWAtIBivg3mwo+'; +set @@global.slave_run_triggers_for_rbr= default; +select b, if(a > 20100101, 'READONLY', 'EDITABLE') as 'REPLICATION' from t1; +b REPLICATION +replicated EDITABLE +binlog EDITABLE +connection master; +drop table t1; +include/rpl_end.inc diff --git a/mysql-test/suite/sys_vars/r/secure_timestamp_rpl.result b/mysql-test/suite/sys_vars/r/secure_timestamp_rpl.result new file mode 100644 index 00000000000..e2d79745a46 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/secure_timestamp_rpl.result @@ -0,0 +1,42 @@ +include/master-slave.inc +[connection master] +connection slave; +select @@secure_timestamp; +@@secure_timestamp +REPLICATION +set timestamp=1234567890.101112; +ERROR HY000: The MariaDB server is running with the --secure-timestamp=REPLICATION option so it cannot execute this statement +select if(now(6) > 20100101, 'READONLY', 'EDITABLE') as 'SUPER'; +SUPER +READONLY +create user foo@127.0.0.1; +connect con2,127.0.0.1,foo,,test,$SLAVE_MYPORT; +set timestamp=1234567890.101112; +ERROR HY000: The MariaDB server is running with the --secure-timestamp=REPLICATION option so it cannot execute this statement +select if(now(6) > 20100101, 'READONLY', 'EDITABLE') as 'non-privileged'; +non-privileged +READONLY +disconnect con2; +connection slave; +drop user foo@127.0.0.1; +connection master; +set time_zone='+00:00'; +set timestamp=1234567890.101112; +select @@timestamp, now(6); +@@timestamp now(6) +1234567890.101112 2009-02-13 23:31:30.101112 +create table t1 (b varchar(20), a timestamp(6) default current_timestamp(6)); +insert t1 (b) values ('replicated'); +connection slave; +create trigger t1rbr before insert on t1 for each row set new.a=now(6); +set @@global.slave_run_triggers_for_rbr= yes; +binlog 'LQfqWg8BAAAA/AAAAAABAAABAAQAMTAuMy42LU1hcmlhREItZGVidWctbG9nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtB+paEzgNAAgAEgAEBAQEEgAA5AAEGggAAAAICAgCAAAACgoKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEEwQADQgICAoKCgFlBcaR'; +binlog '0gKWSRMBAAAAMQAAAHQDAAAAAB8AAAAAAAEABHRlc3QAAnQxAAIPEQMUAAYBQFUzwA==0gKWSRcBAAAAMAAAAKQDAAAAAB8AAAAAAAEAAv/8BmJpbmxvZ0mWAtIBivg3mwo+'; +set @@global.slave_run_triggers_for_rbr= default; +select b, if(a > 20100101, 'READONLY', 'EDITABLE') as 'REPLICATION' from t1; +b REPLICATION +replicated EDITABLE +binlog READONLY +connection master; +drop table t1; +include/rpl_end.inc diff --git a/mysql-test/suite/sys_vars/r/secure_timestamp_super.result b/mysql-test/suite/sys_vars/r/secure_timestamp_super.result new file mode 100644 index 00000000000..6fbb5e5fbf0 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/secure_timestamp_super.result @@ -0,0 +1,41 @@ +include/master-slave.inc +[connection master] +connection slave; +select @@secure_timestamp; +@@secure_timestamp +SUPER +set timestamp=1234567890.101112; +select if(now(6) > 20100101, 'READONLY', 'EDITABLE') as 'SUPER'; +SUPER +EDITABLE +create user foo@127.0.0.1; +connect con2,127.0.0.1,foo,,test,$SLAVE_MYPORT; +set timestamp=1234567890.101112; +ERROR 42000: Access denied; you need (at least one of) the SUPER privilege(s) for this operation +select if(now(6) > 20100101, 'READONLY', 'EDITABLE') as 'non-privileged'; +non-privileged +READONLY +disconnect con2; +connection slave; +drop user foo@127.0.0.1; +connection master; +set time_zone='+00:00'; +set timestamp=1234567890.101112; +select @@timestamp, now(6); +@@timestamp now(6) +1234567890.101112 2009-02-13 23:31:30.101112 +create table t1 (b varchar(20), a timestamp(6) default current_timestamp(6)); +insert t1 (b) values ('replicated'); +connection slave; +create trigger t1rbr before insert on t1 for each row set new.a=now(6); +set @@global.slave_run_triggers_for_rbr= yes; +binlog 'LQfqWg8BAAAA/AAAAAABAAABAAQAMTAuMy42LU1hcmlhREItZGVidWctbG9nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtB+paEzgNAAgAEgAEBAQEEgAA5AAEGggAAAAICAgCAAAACgoKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEEwQADQgICAoKCgFlBcaR'; +binlog '0gKWSRMBAAAAMQAAAHQDAAAAAB8AAAAAAAEABHRlc3QAAnQxAAIPEQMUAAYBQFUzwA==0gKWSRcBAAAAMAAAAKQDAAAAAB8AAAAAAAEAAv/8BmJpbmxvZ0mWAtIBivg3mwo+'; +set @@global.slave_run_triggers_for_rbr= default; +select b, if(a > 20100101, 'READONLY', 'EDITABLE') as 'REPLICATION' from t1; +b REPLICATION +replicated EDITABLE +binlog EDITABLE +connection master; +drop table t1; +include/rpl_end.inc diff --git a/mysql-test/suite/sys_vars/r/secure_timestamp_yes.result b/mysql-test/suite/sys_vars/r/secure_timestamp_yes.result new file mode 100644 index 00000000000..d317cfc30b2 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/secure_timestamp_yes.result @@ -0,0 +1,42 @@ +include/master-slave.inc +[connection master] +connection slave; +select @@secure_timestamp; +@@secure_timestamp +YES +set timestamp=1234567890.101112; +ERROR HY000: The MariaDB server is running with the --secure-timestamp=YES option so it cannot execute this statement +select if(now(6) > 20100101, 'READONLY', 'EDITABLE') as 'SUPER'; +SUPER +READONLY +create user foo@127.0.0.1; +connect con2,127.0.0.1,foo,,test,$SLAVE_MYPORT; +set timestamp=1234567890.101112; +ERROR HY000: The MariaDB server is running with the --secure-timestamp=YES option so it cannot execute this statement +select if(now(6) > 20100101, 'READONLY', 'EDITABLE') as 'non-privileged'; +non-privileged +READONLY +disconnect con2; +connection slave; +drop user foo@127.0.0.1; +connection master; +set time_zone='+00:00'; +set timestamp=1234567890.101112; +select @@timestamp, now(6); +@@timestamp now(6) +1234567890.101112 2009-02-13 23:31:30.101112 +create table t1 (b varchar(20), a timestamp(6) default current_timestamp(6)); +insert t1 (b) values ('replicated'); +connection slave; +create trigger t1rbr before insert on t1 for each row set new.a=now(6); +set @@global.slave_run_triggers_for_rbr= yes; +binlog 'LQfqWg8BAAAA/AAAAAABAAABAAQAMTAuMy42LU1hcmlhREItZGVidWctbG9nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtB+paEzgNAAgAEgAEBAQEEgAA5AAEGggAAAAICAgCAAAACgoKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEEwQADQgICAoKCgFlBcaR'; +binlog '0gKWSRMBAAAAMQAAAHQDAAAAAB8AAAAAAAEABHRlc3QAAnQxAAIPEQMUAAYBQFUzwA==0gKWSRcBAAAAMAAAAKQDAAAAAB8AAAAAAAEAAv/8BmJpbmxvZ0mWAtIBivg3mwo+'; +set @@global.slave_run_triggers_for_rbr= default; +select b, if(a > 20100101, 'READONLY', 'EDITABLE') as 'REPLICATION' from t1; +b REPLICATION +replicated READONLY +binlog READONLY +connection master; +drop table t1; +include/rpl_end.inc diff --git a/mysql-test/suite/sys_vars/r/slave_run_triggers_for_rbr_basic.result b/mysql-test/suite/sys_vars/r/slave_run_triggers_for_rbr_basic.result deleted file mode 100644 index 02a3cdf27ce..00000000000 --- a/mysql-test/suite/sys_vars/r/slave_run_triggers_for_rbr_basic.result +++ /dev/null @@ -1,45 +0,0 @@ -SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; -SET @@global.slave_run_triggers_for_rbr= NO; -select @@global.slave_run_triggers_for_rbr; -@@global.slave_run_triggers_for_rbr -NO -SET @@global.slave_run_triggers_for_rbr= YES; -select @@global.slave_run_triggers_for_rbr; -@@global.slave_run_triggers_for_rbr -YES -SET @@global.slave_run_triggers_for_rbr= LOGGING; -select @@global.slave_run_triggers_for_rbr; -@@global.slave_run_triggers_for_rbr -LOGGING -SET @@global.slave_run_triggers_for_rbr= default; -select @@global.slave_run_triggers_for_rbr; -@@global.slave_run_triggers_for_rbr -NO -SET @@global.slave_run_triggers_for_rbr= 0; -select @@global.slave_run_triggers_for_rbr; -@@global.slave_run_triggers_for_rbr -NO -SET @@global.slave_run_triggers_for_rbr= 1; -select @@global.slave_run_triggers_for_rbr; -@@global.slave_run_triggers_for_rbr -YES -SET @@global.slave_run_triggers_for_rbr= 2; -select @@global.slave_run_triggers_for_rbr; -@@global.slave_run_triggers_for_rbr -LOGGING -SET @@global.slave_run_triggers_for_rbr= 3; -ERROR 42000: Variable 'slave_run_triggers_for_rbr' can't be set to the value of '3' -select @@global.slave_run_triggers_for_rbr; -@@global.slave_run_triggers_for_rbr -LOGGING -SET @@global.slave_run_triggers_for_rbr= "N"; -ERROR 42000: Variable 'slave_run_triggers_for_rbr' can't be set to the value of 'N' -select @@global.slave_run_triggers_for_rbr; -@@global.slave_run_triggers_for_rbr -LOGGING -SET @@global.slave_run_triggers_for_rbr= -1; -ERROR 42000: Variable 'slave_run_triggers_for_rbr' can't be set to the value of '-1' -select @@global.slave_run_triggers_for_rbr; -@@global.slave_run_triggers_for_rbr -LOGGING -SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff index 3d9d6d27fb3..d99d0aeaf35 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff @@ -134,15 +134,6 @@ VARIABLE_COMMENT Maximum number of seconds that semaphore times out in InnoDB. NUMERIC_MIN_VALUE 1 NUMERIC_MAX_VALUE 4294967295 -@@ -922,7 +922,7 @@ - GLOBAL_VALUE_ORIGIN COMPILE-TIME - DEFAULT_VALUE 100 - VARIABLE_SCOPE GLOBAL --VARIABLE_TYPE BIGINT -+VARIABLE_TYPE INT - VARIABLE_COMMENT Percentage of B-tree page filled during bulk insert - NUMERIC_MIN_VALUE 10 - NUMERIC_MAX_VALUE 100 @@ -936,7 +936,7 @@ GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE 0 @@ -302,8 +293,8 @@ GLOBAL_VALUE_ORIGIN CONFIG DEFAULT_VALUE 16777216 VARIABLE_SCOPE GLOBAL --VARIABLE_TYPE BIGINT -+VARIABLE_TYPE INT +-VARIABLE_TYPE BIGINT UNSIGNED ++VARIABLE_TYPE INT UNSIGNED VARIABLE_COMMENT The size of the buffer which InnoDB uses to write log to the log files on disk. NUMERIC_MIN_VALUE 262144 -NUMERIC_MAX_VALUE 9223372036854775807 @@ -368,8 +359,8 @@ GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE 0 VARIABLE_SCOPE GLOBAL --VARIABLE_TYPE BIGINT -+VARIABLE_TYPE INT +-VARIABLE_TYPE BIGINT UNSIGNED ++VARIABLE_TYPE INT UNSIGNED VARIABLE_COMMENT How many files at the maximum InnoDB keeps open at the same time. NUMERIC_MIN_VALUE 0 -NUMERIC_MAX_VALUE 9223372036854775807 diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result index 5e73ee465c4..d2153cc560d 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result @@ -1,6 +1,7 @@ select * from information_schema.system_variables where variable_name like 'innodb%' and variable_name not in ( +'innodb_version', # always the same as the server version 'innodb_disallow_writes', # only available WITH_WSREP 'innodb_numa_interleave', # only available WITH_NUMA 'innodb_sched_priority_cleaner', # linux only @@ -321,7 +322,7 @@ GLOBAL_VALUE 8388608 GLOBAL_VALUE_ORIGIN CONFIG DEFAULT_VALUE 134217728 VARIABLE_SCOPE GLOBAL -VARIABLE_TYPE BIGINT +VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT The size of the memory buffer InnoDB uses to cache data and indexes of its tables. NUMERIC_MIN_VALUE 5242880 NUMERIC_MAX_VALUE 9223372036854775807 @@ -363,12 +364,12 @@ GLOBAL_VALUE all GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE all VARIABLE_SCOPE GLOBAL -VARIABLE_TYPE VARCHAR -VARIABLE_COMMENT Buffer changes to reduce random access: OFF, ON, inserting, deleting, changing, or purging. +VARIABLE_TYPE ENUM +VARIABLE_COMMENT Buffer changes to secondary indexes. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST none,inserts,deletes,changes,purges,all READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_CHANGE_BUFFERING_DEBUG @@ -543,7 +544,7 @@ VARIABLE_NAME INNODB_DATA_FILE_PATH SESSION_VALUE NULL GLOBAL_VALUE ibdata1:12M:autoextend GLOBAL_VALUE_ORIGIN CONFIG -DEFAULT_VALUE +DEFAULT_VALUE ibdata1:12M:autoextend VARIABLE_SCOPE GLOBAL VARIABLE_TYPE VARCHAR VARIABLE_COMMENT Path to individual files and their sizes. @@ -923,7 +924,7 @@ GLOBAL_VALUE 100 GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE 100 VARIABLE_SCOPE GLOBAL -VARIABLE_TYPE BIGINT +VARIABLE_TYPE INT UNSIGNED VARIABLE_COMMENT Percentage of B-tree page filled during bulk insert NUMERIC_MIN_VALUE 10 NUMERIC_MAX_VALUE 100 @@ -989,16 +990,16 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_FLUSH_METHOD SESSION_VALUE NULL -GLOBAL_VALUE +GLOBAL_VALUE fsync GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE +DEFAULT_VALUE fsync VARIABLE_SCOPE GLOBAL -VARIABLE_TYPE VARCHAR +VARIABLE_TYPE ENUM VARIABLE_COMMENT With which method to flush data. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST fsync,O_DSYNC,littlesync,nosync,O_DIRECT,O_DIRECT_NO_FSYNC READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_FLUSH_NEIGHBORS @@ -1357,7 +1358,7 @@ GLOBAL_VALUE 1048576 GLOBAL_VALUE_ORIGIN CONFIG DEFAULT_VALUE 16777216 VARIABLE_SCOPE GLOBAL -VARIABLE_TYPE BIGINT +VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT The size of the buffer which InnoDB uses to write log to the log files on disk. NUMERIC_MIN_VALUE 262144 NUMERIC_MAX_VALUE 9223372036854775807 @@ -1679,7 +1680,7 @@ GLOBAL_VALUE 300 GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE 0 VARIABLE_SCOPE GLOBAL -VARIABLE_TYPE BIGINT +VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT How many files at the maximum InnoDB keeps open at the same time. NUMERIC_MIN_VALUE 0 NUMERIC_MAX_VALUE 9223372036854775807 @@ -2251,7 +2252,7 @@ VARIABLE_NAME INNODB_TEMP_DATA_FILE_PATH SESSION_VALUE NULL GLOBAL_VALUE ibtmp1:12M:autoextend GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE +DEFAULT_VALUE ibtmp1:12M:autoextend VARIABLE_SCOPE GLOBAL VARIABLE_TYPE VARCHAR VARIABLE_COMMENT Path to files and their sizes making temp-tablespace. @@ -2401,20 +2402,6 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE -VARIABLE_NAME INNODB_VERSION -SESSION_VALUE NULL -GLOBAL_VALUE 5.7.21 -GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE NULL -VARIABLE_SCOPE GLOBAL -VARIABLE_TYPE VARCHAR -VARIABLE_COMMENT InnoDB version -NUMERIC_MIN_VALUE NULL -NUMERIC_MAX_VALUE NULL -NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL -READ_ONLY YES -COMMAND_LINE_ARGUMENT NULL VARIABLE_NAME INNODB_WRITE_IO_THREADS SESSION_VALUE NULL GLOBAL_VALUE 2 diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result index a7d92473dc1..feba4a4d248 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result @@ -26,6 +26,20 @@ variable_name not in ( 'version_malloc_library', 'version_ssl_library', 'version' ) order by variable_name; +VARIABLE_NAME ALTER_ALGORITHM +SESSION_VALUE DEFAULT +GLOBAL_VALUE DEFAULT +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE DEFAULT +VARIABLE_SCOPE SESSION +VARIABLE_TYPE ENUM +VARIABLE_COMMENT Specify the alter table algorithm +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST DEFAULT,COPY,INPLACE,NOCOPY,INSTANT +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME AUTOCOMMIT SESSION_VALUE ON GLOBAL_VALUE ON @@ -2589,17 +2603,17 @@ ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME OLD_ALTER_TABLE -SESSION_VALUE OFF -GLOBAL_VALUE OFF +SESSION_VALUE DEFAULT +GLOBAL_VALUE DEFAULT GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE OFF +DEFAULT_VALUE DEFAULT VARIABLE_SCOPE SESSION -VARIABLE_TYPE BOOLEAN -VARIABLE_COMMENT Use old, non-optimized alter table +VARIABLE_TYPE ENUM +VARIABLE_COMMENT Alias for alter_algorithm. Deprecated. Use --alter-algorithm instead. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST OFF,ON +ENUM_VALUE_LIST DEFAULT,COPY,INPLACE,NOCOPY,INSTANT READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME OLD_MODE @@ -3526,6 +3540,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME SECURE_TIMESTAMP +SESSION_VALUE NULL +GLOBAL_VALUE NO +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE NO +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE ENUM +VARIABLE_COMMENT Restricts direct setting of a session timestamp. Possible levels are: YES - timestamp cannot deviate from the system clock, REPLICATION - replication thread can adjust timestamp to match the master's, SUPER - a user with this privilege and a replication thread can adjust timestamp, NO - historical behavior, anyone can modify session timestamp +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST NO,SUPER,REPLICATION,YES +READ_ONLY YES +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME SERVER_ID SESSION_VALUE 1 GLOBAL_VALUE 1 @@ -4065,7 +4093,7 @@ GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE ERROR VARIABLE_SCOPE SESSION VARIABLE_TYPE ENUM -VARIABLE_COMMENT Versioning ALTER TABLE mode. ERROR: Fail ALTER with error; KEEP: Keep historical system rows and subject them to ALTER; +VARIABLE_COMMENT Versioning ALTER TABLE mode. ERROR: Fail ALTER with error; KEEP: Keep historical system rows and subject them to ALTER NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result index f6f88692ff2..727e7ee729b 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result @@ -26,6 +26,20 @@ variable_name not in ( 'version_malloc_library', 'version_ssl_library', 'version' ) order by variable_name; +VARIABLE_NAME ALTER_ALGORITHM +SESSION_VALUE DEFAULT +GLOBAL_VALUE DEFAULT +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE DEFAULT +VARIABLE_SCOPE SESSION +VARIABLE_TYPE ENUM +VARIABLE_COMMENT Specify the alter table algorithm +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST DEFAULT,COPY,INPLACE,NOCOPY,INSTANT +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME AUTOCOMMIT SESSION_VALUE ON GLOBAL_VALUE ON @@ -2799,17 +2813,17 @@ ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME OLD_ALTER_TABLE -SESSION_VALUE OFF -GLOBAL_VALUE OFF +SESSION_VALUE DEFAULT +GLOBAL_VALUE DEFAULT GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE OFF +DEFAULT_VALUE DEFAULT VARIABLE_SCOPE SESSION -VARIABLE_TYPE BOOLEAN -VARIABLE_COMMENT Use old, non-optimized alter table +VARIABLE_TYPE ENUM +VARIABLE_COMMENT Alias for alter_algorithm. Deprecated. Use --alter-algorithm instead. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST OFF,ON +ENUM_VALUE_LIST DEFAULT,COPY,INPLACE,NOCOPY,INSTANT READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME OLD_MODE @@ -4142,6 +4156,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME SECURE_TIMESTAMP +SESSION_VALUE NULL +GLOBAL_VALUE NO +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE NO +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE ENUM +VARIABLE_COMMENT Restricts direct setting of a session timestamp. Possible levels are: YES - timestamp cannot deviate from the system clock, REPLICATION - replication thread can adjust timestamp to match the master's, SUPER - a user with this privilege and a replication thread can adjust timestamp, NO - historical behavior, anyone can modify session timestamp +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST NO,SUPER,REPLICATION,YES +READ_ONLY YES +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME SERVER_ID SESSION_VALUE 1 GLOBAL_VALUE 1 @@ -5017,7 +5045,7 @@ GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE ERROR VARIABLE_SCOPE SESSION VARIABLE_TYPE ENUM -VARIABLE_COMMENT Versioning ALTER TABLE mode. ERROR: Fail ALTER with error; KEEP: Keep historical system rows and subject them to ALTER; +VARIABLE_COMMENT Versioning ALTER TABLE mode. ERROR: Fail ALTER with error; KEEP: Keep historical system rows and subject them to ALTER NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL diff --git a/mysql-test/suite/sys_vars/t/innodb_change_buffering_basic.test b/mysql-test/suite/sys_vars/t/innodb_change_buffering_basic.test index aba3b1e3479..19f0890feff 100644 --- a/mysql-test/suite/sys_vars/t/innodb_change_buffering_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_change_buffering_basic.test @@ -48,9 +48,9 @@ set @@session.innodb_change_buffering='some'; # --error ER_WRONG_TYPE_FOR_VAR set global innodb_change_buffering=1.1; ---error ER_WRONG_TYPE_FOR_VAR set global innodb_change_buffering=1; ---error ER_WRONG_TYPE_FOR_VAR +SELECT @@global.innodb_change_buffering; +--error ER_WRONG_VALUE_FOR_VAR set global innodb_change_buffering=-2; --error ER_WRONG_TYPE_FOR_VAR set global innodb_change_buffering=1e1; diff --git a/mysql-test/suite/sys_vars/t/innodb_flush_method_basic.test b/mysql-test/suite/sys_vars/t/innodb_flush_method_basic.test index 9f99c1305fd..76a9ed9cefd 100644 --- a/mysql-test/suite/sys_vars/t/innodb_flush_method_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_flush_method_basic.test @@ -29,7 +29,7 @@ # Displaying default value # #################################################################### SELECT COUNT(@@GLOBAL.innodb_flush_method); ---echo 0 Expected +--echo 1 Expected --echo '#---------------------BS_STVARS_029_02----------------------#' @@ -42,7 +42,7 @@ SET @@GLOBAL.innodb_flush_method=1; --echo Expected error 'Read only variable' SELECT COUNT(@@GLOBAL.innodb_flush_method); ---echo 0 Expected +--echo 1 Expected @@ -60,7 +60,7 @@ WHERE VARIABLE_NAME='innodb_flush_method'; --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_flush_method); ---echo 0 Expected +--echo 1 Expected --disable_warnings SELECT COUNT(VARIABLE_VALUE) @@ -86,7 +86,7 @@ SELECT @@innodb_flush_method = @@GLOBAL.innodb_flush_method; ################################################################################ SELECT COUNT(@@innodb_flush_method); ---echo 0 Expected +--echo 1 Expected --Error ER_INCORRECT_GLOBAL_LOCAL_VAR SELECT COUNT(@@local.innodb_flush_method); @@ -97,7 +97,7 @@ SELECT COUNT(@@SESSION.innodb_flush_method); --echo Expected error 'Variable is a GLOBAL variable' SELECT COUNT(@@GLOBAL.innodb_flush_method); ---echo 0 Expected +--echo 1 Expected --Error ER_BAD_FIELD_ERROR SELECT innodb_flush_method = @@SESSION.innodb_flush_method; diff --git a/mysql-test/suite/sys_vars/t/innodb_flush_method_func.test b/mysql-test/suite/sys_vars/t/innodb_flush_method_func.test new file mode 100644 index 00000000000..aad91d0f4a3 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_flush_method_func.test @@ -0,0 +1,26 @@ +--source include/have_innodb.inc +# Embedded server tests do not support restarting. +--source include/not_embedded.inc + +call mtr.add_suppression("InnoDB: Failed to set .*DIRECT"); +--replace_result unbuffered fsync +select @@innodb_flush_method; + +create table t(a serial) engine=innodb; + +# kill and restart +FLUSH TABLES; +let $shutdown_timeout= 0; +let $restart_parameters=--innodb-flush-method=5; +--source include/restart_mysqld.inc +select @@innodb_flush_method; + +insert into t values(0); +# kill and restart +FLUSH TABLES; +let $shutdown_timeout= 0; +let $restart_parameters=--innodb-flush-method=0; +--source include/restart_mysqld.inc +select @@innodb_flush_method; + +drop table t; diff --git a/mysql-test/suite/sys_vars/t/innodb_stats_include_delete_marked_basic.test b/mysql-test/suite/sys_vars/t/innodb_stats_include_delete_marked_basic.test index a3a9b99a133..2a3a0f9b44e 100644 --- a/mysql-test/suite/sys_vars/t/innodb_stats_include_delete_marked_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_stats_include_delete_marked_basic.test @@ -18,11 +18,6 @@ --source include/have_innodb.inc -if (`select plugin_auth_version <= "5.7.17" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not present before MySQL 5.7.17 or 5.6.35 -} - #################################################################### # Display default value # #################################################################### diff --git a/mysql-test/suite/sys_vars/t/old_alter_table_basic.test b/mysql-test/suite/sys_vars/t/old_alter_table_basic.test index cce180fea67..9a6cb4779e5 100644 --- a/mysql-test/suite/sys_vars/t/old_alter_table_basic.test +++ b/mysql-test/suite/sys_vars/t/old_alter_table_basic.test @@ -20,7 +20,7 @@ select * from information_schema.session_variables where variable_name='old_alte # show that it's writable # set global old_alter_table=1; -set session old_alter_table=ON; +set session old_alter_table=1; select @@global.old_alter_table; select @@session.old_alter_table; show global variables like 'old_alter_table'; @@ -28,6 +28,32 @@ show session variables like 'old_alter_table'; select * from information_schema.global_variables where variable_name='old_alter_table'; select * from information_schema.session_variables where variable_name='old_alter_table'; +set global old_alter_table=2; +set session old_alter_table=2; +select @@global.old_alter_table; +select @@session.old_alter_table; +show global variables like 'old_alter_table'; +show session variables like 'old_alter_table'; +select * from information_schema.global_variables where variable_name='old_alter_table'; +select * from information_schema.session_variables where variable_name='old_alter_table'; + +set global old_alter_table=3; +set session old_alter_table=3; +select @@global.old_alter_table; +select @@session.old_alter_table; +show global variables like 'old_alter_table'; +show session variables like 'old_alter_table'; +select * from information_schema.global_variables where variable_name='old_alter_table'; +select * from information_schema.session_variables where variable_name='old_alter_table'; + +set global old_alter_table=4; +set session old_alter_table=4; +select @@global.old_alter_table; +select @@session.old_alter_table; +show global variables like 'old_alter_table'; +show session variables like 'old_alter_table'; +select * from information_schema.global_variables where variable_name='old_alter_table'; +select * from information_schema.session_variables where variable_name='old_alter_table'; # # incorrect types # diff --git a/mysql-test/suite/sys_vars/t/secure_timestamp_no-slave.opt b/mysql-test/suite/sys_vars/t/secure_timestamp_no-slave.opt new file mode 100644 index 00000000000..4c9000c0ebe --- /dev/null +++ b/mysql-test/suite/sys_vars/t/secure_timestamp_no-slave.opt @@ -0,0 +1 @@ +--secure-timestamp=no diff --git a/mysql-test/suite/sys_vars/t/secure_timestamp_no.test b/mysql-test/suite/sys_vars/t/secure_timestamp_no.test new file mode 100644 index 00000000000..8871666abd0 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/secure_timestamp_no.test @@ -0,0 +1,4 @@ +# +# MDEV-15923 option to control who can set session @@timestamp +# +source inc/secure_timestamp_func.inc; diff --git a/mysql-test/suite/sys_vars/t/secure_timestamp_rpl-slave.opt b/mysql-test/suite/sys_vars/t/secure_timestamp_rpl-slave.opt new file mode 100644 index 00000000000..13754861d0f --- /dev/null +++ b/mysql-test/suite/sys_vars/t/secure_timestamp_rpl-slave.opt @@ -0,0 +1 @@ +--secure-timestamp=replication diff --git a/mysql-test/suite/sys_vars/t/secure_timestamp_rpl.test b/mysql-test/suite/sys_vars/t/secure_timestamp_rpl.test new file mode 100644 index 00000000000..8871666abd0 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/secure_timestamp_rpl.test @@ -0,0 +1,4 @@ +# +# MDEV-15923 option to control who can set session @@timestamp +# +source inc/secure_timestamp_func.inc; diff --git a/mysql-test/suite/sys_vars/t/secure_timestamp_super-slave.opt b/mysql-test/suite/sys_vars/t/secure_timestamp_super-slave.opt new file mode 100644 index 00000000000..f747ec6c0c9 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/secure_timestamp_super-slave.opt @@ -0,0 +1 @@ +--secure-timestamp=super diff --git a/mysql-test/suite/sys_vars/t/secure_timestamp_super.test b/mysql-test/suite/sys_vars/t/secure_timestamp_super.test new file mode 100644 index 00000000000..8871666abd0 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/secure_timestamp_super.test @@ -0,0 +1,4 @@ +# +# MDEV-15923 option to control who can set session @@timestamp +# +source inc/secure_timestamp_func.inc; diff --git a/mysql-test/suite/sys_vars/t/secure_timestamp_yes-slave.opt b/mysql-test/suite/sys_vars/t/secure_timestamp_yes-slave.opt new file mode 100644 index 00000000000..a74d68957ef --- /dev/null +++ b/mysql-test/suite/sys_vars/t/secure_timestamp_yes-slave.opt @@ -0,0 +1 @@ +--secure-timestamp=yes diff --git a/mysql-test/suite/sys_vars/t/secure_timestamp_yes.test b/mysql-test/suite/sys_vars/t/secure_timestamp_yes.test new file mode 100644 index 00000000000..8871666abd0 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/secure_timestamp_yes.test @@ -0,0 +1,4 @@ +# +# MDEV-15923 option to control who can set session @@timestamp +# +source inc/secure_timestamp_func.inc; diff --git a/mysql-test/suite/sys_vars/t/slave_run_triggers_for_rbr_basic.test b/mysql-test/suite/sys_vars/t/slave_run_triggers_for_rbr_basic.test deleted file mode 100644 index ac5296677b9..00000000000 --- a/mysql-test/suite/sys_vars/t/slave_run_triggers_for_rbr_basic.test +++ /dev/null @@ -1,30 +0,0 @@ - --- source include/have_rbr_triggers.inc - -SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; -SET @@global.slave_run_triggers_for_rbr= NO; -select @@global.slave_run_triggers_for_rbr; -SET @@global.slave_run_triggers_for_rbr= YES; -select @@global.slave_run_triggers_for_rbr; -SET @@global.slave_run_triggers_for_rbr= LOGGING; -select @@global.slave_run_triggers_for_rbr; -SET @@global.slave_run_triggers_for_rbr= default; -select @@global.slave_run_triggers_for_rbr; -SET @@global.slave_run_triggers_for_rbr= 0; -select @@global.slave_run_triggers_for_rbr; -SET @@global.slave_run_triggers_for_rbr= 1; -select @@global.slave_run_triggers_for_rbr; -SET @@global.slave_run_triggers_for_rbr= 2; -select @@global.slave_run_triggers_for_rbr; ---error ER_WRONG_VALUE_FOR_VAR -SET @@global.slave_run_triggers_for_rbr= 3; -select @@global.slave_run_triggers_for_rbr; ---error ER_WRONG_VALUE_FOR_VAR -SET @@global.slave_run_triggers_for_rbr= "N"; -select @@global.slave_run_triggers_for_rbr; ---error ER_WRONG_VALUE_FOR_VAR -SET @@global.slave_run_triggers_for_rbr= -1; -select @@global.slave_run_triggers_for_rbr; - - -SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; diff --git a/mysql-test/suite/sys_vars/t/sysvars_innodb.test b/mysql-test/suite/sys_vars/t/sysvars_innodb.test index fe91835eac5..9be60723f85 100644 --- a/mysql-test/suite/sys_vars/t/sysvars_innodb.test +++ b/mysql-test/suite/sys_vars/t/sysvars_innodb.test @@ -3,16 +3,12 @@ --source include/not_valgrind.inc --source include/word_size.inc -if (`select plugin_auth_version <= "5.6.34-79.1" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in XtraDB as of 10.1.21-MariaDB or earlier -} - --vertical_results --replace_regex /^\/\S+/PATH/ /\.\//PATH/ select * from information_schema.system_variables where variable_name like 'innodb%' and variable_name not in ( + 'innodb_version', # always the same as the server version 'innodb_disallow_writes', # only available WITH_WSREP 'innodb_numa_interleave', # only available WITH_NUMA 'innodb_sched_priority_cleaner', # linux only diff --git a/mysql-test/suite/vcol/r/binlog.result b/mysql-test/suite/vcol/r/binlog.result new file mode 100644 index 00000000000..35bfb50ba2f --- /dev/null +++ b/mysql-test/suite/vcol/r/binlog.result @@ -0,0 +1,70 @@ +include/master-slave.inc +[connection master] +CREATE TABLE t1 ( +pk SERIAL, +vcol_date DATE AS (col_date) PERSISTENT, +vcol_int INT AS (col_int) VIRTUAL, +vcol_year YEAR AS (col_year) PERSISTENT, +vcol_blob BLOB AS (col_blob) VIRTUAL, +col_date DATE, +col_int INT NULL, +col_blob BLOB NULL, +col_year YEAR, +PRIMARY KEY(pk) +) ENGINE=InnoDB; +INSERT INTO t1 (col_date,col_int,col_blob,col_year) VALUES ('2010-04-24',5,'foo',1981); +SET SQL_MODE=''; +set binlog_row_image="FULL"; +CREATE VIEW v1 AS SELECT * FROM t1; +REPLACE INTO v1 SELECT pk, vcol_date, vcol_int, vcol_year, vcol_blob, col_date, col_int, col_blob, 1982 FROM t1; +Warnings: +Warning 1906 The value specified for generated column 'vcol_date' in table 't1' ignored +Warning 1906 The value specified for generated column 'vcol_int' in table 't1' ignored +Warning 1906 The value specified for generated column 'vcol_year' in table 't1' ignored +Warning 1906 The value specified for generated column 'vcol_blob' in table 't1' ignored +select col_date,col_int,col_blob,col_year from v1; +col_date col_int col_blob col_year +2010-04-24 5 foo 1982 +connection slave; +select col_date,col_int,col_blob,col_year from v1; +col_date col_int col_blob col_year +2010-04-24 5 foo 1982 +connection master; +DROP VIEW v1; +set binlog_row_image="MINIMAL"; +CREATE VIEW v1 AS SELECT * FROM t1; +REPLACE INTO v1 SELECT pk, vcol_date, vcol_int, vcol_year, vcol_blob, col_date, col_int, col_blob, 1983 FROM t1; +Warnings: +Warning 1906 The value specified for generated column 'vcol_date' in table 't1' ignored +Warning 1906 The value specified for generated column 'vcol_int' in table 't1' ignored +Warning 1906 The value specified for generated column 'vcol_year' in table 't1' ignored +Warning 1906 The value specified for generated column 'vcol_blob' in table 't1' ignored +select col_date,col_int,col_blob,col_year from v1; +col_date col_int col_blob col_year +2010-04-24 5 foo 1983 +connection slave; +select col_date,col_int,col_blob,col_year from v1; +col_date col_int col_blob col_year +2010-04-24 5 foo 1983 +connection master; +DROP VIEW v1; +set @@binlog_row_image="NOBLOB"; +CREATE VIEW v1 AS SELECT * FROM t1; +REPLACE INTO v1 SELECT pk, vcol_date, vcol_int, vcol_year, vcol_blob, col_date, col_int, col_blob, 1984 FROM t1; +Warnings: +Warning 1906 The value specified for generated column 'vcol_date' in table 't1' ignored +Warning 1906 The value specified for generated column 'vcol_int' in table 't1' ignored +Warning 1906 The value specified for generated column 'vcol_year' in table 't1' ignored +Warning 1906 The value specified for generated column 'vcol_blob' in table 't1' ignored +select col_date,col_int,col_blob,col_year from v1; +col_date col_int col_blob col_year +2010-04-24 5 foo 1984 +connection slave; +select col_date,col_int,col_blob,col_year from v1; +col_date col_int col_blob col_year +2010-04-24 5 foo 1984 +connection master; +DROP VIEW v1; +set @@binlog_row_image=default; +DROP TABLE t1; +include/rpl_end.inc diff --git a/mysql-test/suite/vcol/r/partition.result b/mysql-test/suite/vcol/r/partition.result index 349deed653d..6afc57be067 100644 --- a/mysql-test/suite/vcol/r/partition.result +++ b/mysql-test/suite/vcol/r/partition.result @@ -18,3 +18,13 @@ x 15 22 DROP TABLE t1; +create table t1 (i int, v int as (i) virtual) +partition by range columns (i) +subpartition by hash(v) subpartitions 3 ( +partition p1 values less than (3), +partition pn values less than (maxvalue)); +insert t1 set i= 0; +set statement sql_mode= '' for update t1 set i= 1, v= 2; +Warnings: +Warning 1906 The value specified for generated column 'v' in table 't1' ignored +drop table t1; diff --git a/mysql-test/suite/vcol/r/update.result b/mysql-test/suite/vcol/r/update.result index 5c7905cf547..5a6355e1773 100644 --- a/mysql-test/suite/vcol/r/update.result +++ b/mysql-test/suite/vcol/r/update.result @@ -9,8 +9,20 @@ a b c 2 3 4 drop table t1; create table t1 (a int, c int as(a), p varchar(20) as(y), y char(20), index (p,c)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `c` int(11) GENERATED ALWAYS AS (`a`) VIRTUAL, + `p` varchar(20) GENERATED ALWAYS AS (`y`) VIRTUAL, + `y` char(20) DEFAULT NULL, + KEY `p` (`p`,`c`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 insert into t1 (a,y) values(1, "yyy"); update t1 set a = 100 where a = 1; +check table t1; +Table Op Msg_type Msg_text +test.t1 check status OK drop table t1; create table t1 ( a varchar(10000), diff --git a/mysql-test/suite/vcol/r/update_binlog.result b/mysql-test/suite/vcol/r/update_binlog.result new file mode 100644 index 00000000000..d4102fc460a --- /dev/null +++ b/mysql-test/suite/vcol/r/update_binlog.result @@ -0,0 +1,361 @@ +set binlog_row_image="FULL"; +set @@default_storage_engine="myisam"; +create table t1 (a int, b int as (a+1), c int as (b+1) stored); +insert t1 set a=1; +select * from t1; +a b c +1 2 3 +update t1 set a=2; +select * from t1; +a b c +2 3 4 +drop table t1; +create table t1 (a int, c int as(a), p varchar(20) as(y), y char(20), index (p,c)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `c` int(11) GENERATED ALWAYS AS (`a`) VIRTUAL, + `p` varchar(20) GENERATED ALWAYS AS (`y`) VIRTUAL, + `y` char(20) DEFAULT NULL, + KEY `p` (`p`,`c`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +insert into t1 (a,y) values(1, "yyy"); +update t1 set a = 100 where a = 1; +check table t1; +Table Op Msg_type Msg_text +test.t1 check status OK +drop table t1; +create table t1 ( +a varchar(10000), +b varchar(3000), +c varchar(14000) generated always as (concat(a,b)) virtual, +d varchar(5000) generated always as (b) virtual, +e int(11) generated always as (10) virtual, +h int(11) not null primary key, +index(c(100), d(20))); +insert t1 (a,b,h) values (repeat('g', 10000), repeat('x', 2800), 1); +update t1 set a = repeat(cast(1 as char), 2000); +drop table t1; +create table t1 ( +a varchar(10000), +b varchar(3000), +c varchar(14000) generated always as (concat(a,b)) virtual, +i varchar(5000) generated always as (b) virtual, +d varchar(5000) generated always as (i) virtual, +e int(11) generated always as (10) virtual, +h int(11) not null primary key, +index(c(100), d(20))); +insert t1 (a,b,h) values (repeat('g', 10000), repeat('x', 2800), 1); +update t1 set a = repeat(cast(1 as char), 2000); +drop table t1; +create table t1(a blob not null, b int, c varbinary (10) generated always as (a) virtual, unique (c(9))); +insert t1 (a,b) values ('a', 1); +replace t1 set a = 'a',b =1; +insert t1 (a,b) values ('a', 1) on duplicate key update a='b', b=2; +select * from t1; +a b c +b 2 b +drop table t1; +create table t (a int primary key, b int, c int as (b), index (c)); +insert t (a,b) values (9,0); +create table t2 select * from t; +update t, t2 set t.b=10 where t.a=t2.a; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c +9 10 10 +drop table t, t2; +create table t1 (a int, b int, c int, d int, e int); +insert t1 values (1,2,3,4,5), (1,2,3,4,5); +SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR +create table t (a int primary key, +b int, c blob as (b), index (c(57)), +d blob, e blob as (d), index (e(57))) +replace select * from t1; +Warnings: +Warning 1906 The value specified for generated column 'c' in table 't' ignored +Warning 1906 The value specified for generated column 'e' in table 't' ignored +Warning 1906 The value specified for generated column 'c' in table 't' ignored +Warning 1906 The value specified for generated column 'e' in table 't' ignored +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +1 2 2 4 4 +update t set a=10, b=1, d=1; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 1 1 1 1 +replace t (a,b,d) values (10,2,2); +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 2 2 2 2 +insert t(a,b,d) values (10) on duplicate key update b=3; +ERROR 21S01: Column count doesn't match value count at row 1 +insert t(a,b,d) values (10,2,2) on duplicate key update b=3, d=3; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 3 3 3 3 +replace t (a,b,d) select 10,4,4; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 4 4 4 4 +insert t(a,b,d) select 10,4,4 on duplicate key update b=5, d=5; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 5 5 5 5 +replace delayed t (a,b,d) values (10,6,6); +flush tables; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 6 6 6 6 +insert delayed t(a,b,d) values (10,6,6) on duplicate key update b=7, d=7; +flush tables; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 7 7 7 7 +load data infile 'MYSQLTEST_VARDIR/tmp/vblobs.txt' replace into table t; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 8 8 8 8 +update t set a=11, b=9, d=9 where a>5; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +11 9 9 9 9 +create table t2 select * from t; +update t, t2 set t.b=10, t.d=10 where t.a=t2.a; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +11 10 10 10 10 +update t, t tt set t.b=11, tt.d=11 where t.a=tt.a; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +11 11 11 11 11 +drop table t, t1, t2; +create table t (f1 int, f2 int, f3 int as (f1*2) virtual, key(f3,f2)); +insert into t (f1,f2) values (1,1),(2,2); +create view v as +select a2.f1, a2.f2, a1.f3 +from t a1, t a2 +where a2.f3 <> 0 +with local check option; +update v set f3 = 52; +drop view v; +drop table t; +set binlog_row_image="MINIMAL"; +create table t1 (a int, b int as (a+1), c int as (b+1) stored); +insert t1 set a=1; +select * from t1; +a b c +1 2 3 +update t1 set a=2; +select * from t1; +a b c +2 3 4 +drop table t1; +create table t1 (a int, c int as(a), p varchar(20) as(y), y char(20), index (p,c)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `c` int(11) GENERATED ALWAYS AS (`a`) VIRTUAL, + `p` varchar(20) GENERATED ALWAYS AS (`y`) VIRTUAL, + `y` char(20) DEFAULT NULL, + KEY `p` (`p`,`c`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +insert into t1 (a,y) values(1, "yyy"); +update t1 set a = 100 where a = 1; +check table t1; +Table Op Msg_type Msg_text +test.t1 check status OK +drop table t1; +create table t1 ( +a varchar(10000), +b varchar(3000), +c varchar(14000) generated always as (concat(a,b)) virtual, +d varchar(5000) generated always as (b) virtual, +e int(11) generated always as (10) virtual, +h int(11) not null primary key, +index(c(100), d(20))); +insert t1 (a,b,h) values (repeat('g', 10000), repeat('x', 2800), 1); +update t1 set a = repeat(cast(1 as char), 2000); +drop table t1; +create table t1 ( +a varchar(10000), +b varchar(3000), +c varchar(14000) generated always as (concat(a,b)) virtual, +i varchar(5000) generated always as (b) virtual, +d varchar(5000) generated always as (i) virtual, +e int(11) generated always as (10) virtual, +h int(11) not null primary key, +index(c(100), d(20))); +insert t1 (a,b,h) values (repeat('g', 10000), repeat('x', 2800), 1); +update t1 set a = repeat(cast(1 as char), 2000); +drop table t1; +create table t1(a blob not null, b int, c varbinary (10) generated always as (a) virtual, unique (c(9))); +insert t1 (a,b) values ('a', 1); +replace t1 set a = 'a',b =1; +insert t1 (a,b) values ('a', 1) on duplicate key update a='b', b=2; +select * from t1; +a b c +b 2 b +drop table t1; +create table t (a int primary key, b int, c int as (b), index (c)); +insert t (a,b) values (9,0); +create table t2 select * from t; +update t, t2 set t.b=10 where t.a=t2.a; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c +9 10 10 +drop table t, t2; +create table t1 (a int, b int, c int, d int, e int); +insert t1 values (1,2,3,4,5), (1,2,3,4,5); +SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR +create table t (a int primary key, +b int, c blob as (b), index (c(57)), +d blob, e blob as (d), index (e(57))) +replace select * from t1; +Warnings: +Warning 1906 The value specified for generated column 'c' in table 't' ignored +Warning 1906 The value specified for generated column 'e' in table 't' ignored +Warning 1906 The value specified for generated column 'c' in table 't' ignored +Warning 1906 The value specified for generated column 'e' in table 't' ignored +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +1 2 2 4 4 +update t set a=10, b=1, d=1; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 1 1 1 1 +replace t (a,b,d) values (10,2,2); +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 2 2 2 2 +insert t(a,b,d) values (10) on duplicate key update b=3; +ERROR 21S01: Column count doesn't match value count at row 1 +insert t(a,b,d) values (10,2,2) on duplicate key update b=3, d=3; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 3 3 3 3 +replace t (a,b,d) select 10,4,4; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 4 4 4 4 +insert t(a,b,d) select 10,4,4 on duplicate key update b=5, d=5; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 5 5 5 5 +replace delayed t (a,b,d) values (10,6,6); +flush tables; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 6 6 6 6 +insert delayed t(a,b,d) values (10,6,6) on duplicate key update b=7, d=7; +flush tables; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 7 7 7 7 +load data infile 'MYSQLTEST_VARDIR/tmp/vblobs.txt' replace into table t; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +10 8 8 8 8 +update t set a=11, b=9, d=9 where a>5; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +11 9 9 9 9 +create table t2 select * from t; +update t, t2 set t.b=10, t.d=10 where t.a=t2.a; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +11 10 10 10 10 +update t, t tt set t.b=11, tt.d=11 where t.a=tt.a; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +a b c d e +11 11 11 11 11 +drop table t, t1, t2; +create table t (f1 int, f2 int, f3 int as (f1*2) virtual, key(f3,f2)); +insert into t (f1,f2) values (1,1),(2,2); +create view v as +select a2.f1, a2.f2, a1.f3 +from t a1, t a2 +where a2.f3 <> 0 +with local check option; +update v set f3 = 52; +drop view v; +drop table t; diff --git a/mysql-test/suite/vcol/t/binlog.test b/mysql-test/suite/vcol/t/binlog.test new file mode 100644 index 00000000000..95bb4df4cc5 --- /dev/null +++ b/mysql-test/suite/vcol/t/binlog.test @@ -0,0 +1,55 @@ +--source include/have_innodb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +# +# MDEV-15243 +# Server crashes in in Field_blob::pack upon REPLACE into view with virtual +# columns with binlog enabled +# + +CREATE TABLE t1 ( + pk SERIAL, + vcol_date DATE AS (col_date) PERSISTENT, + vcol_int INT AS (col_int) VIRTUAL, + vcol_year YEAR AS (col_year) PERSISTENT, + vcol_blob BLOB AS (col_blob) VIRTUAL, + col_date DATE, + col_int INT NULL, + col_blob BLOB NULL, + col_year YEAR, + PRIMARY KEY(pk) +) ENGINE=InnoDB; + +INSERT INTO t1 (col_date,col_int,col_blob,col_year) VALUES ('2010-04-24',5,'foo',1981); +SET SQL_MODE=''; + +set binlog_row_image="FULL"; +CREATE VIEW v1 AS SELECT * FROM t1; +REPLACE INTO v1 SELECT pk, vcol_date, vcol_int, vcol_year, vcol_blob, col_date, col_int, col_blob, 1982 FROM t1; +select col_date,col_int,col_blob,col_year from v1; +sync_slave_with_master; +select col_date,col_int,col_blob,col_year from v1; +connection master; +DROP VIEW v1; +set binlog_row_image="MINIMAL"; +CREATE VIEW v1 AS SELECT * FROM t1; +REPLACE INTO v1 SELECT pk, vcol_date, vcol_int, vcol_year, vcol_blob, col_date, col_int, col_blob, 1983 FROM t1; +select col_date,col_int,col_blob,col_year from v1; +sync_slave_with_master; +select col_date,col_int,col_blob,col_year from v1; +connection master; +DROP VIEW v1; +set @@binlog_row_image="NOBLOB"; +CREATE VIEW v1 AS SELECT * FROM t1; +REPLACE INTO v1 SELECT pk, vcol_date, vcol_int, vcol_year, vcol_blob, col_date, col_int, col_blob, 1984 FROM t1; +select col_date,col_int,col_blob,col_year from v1; +sync_slave_with_master; +select col_date,col_int,col_blob,col_year from v1; +connection master; +DROP VIEW v1; +set @@binlog_row_image=default; + +DROP TABLE t1; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/vcol/t/partition.test b/mysql-test/suite/vcol/t/partition.test index 67cda6b6d8b..889724fb1c5 100644 --- a/mysql-test/suite/vcol/t/partition.test +++ b/mysql-test/suite/vcol/t/partition.test @@ -18,3 +18,15 @@ INSERT t1 (id, store_id) VALUES(1, 2), (3, 4), (3, 12), (4, 18); CREATE INDEX idx ON t1(x); SELECT x FROM t1; DROP TABLE t1; + +# +# MDEV-15626 Assertion on update virtual column in partitioned table +# +create table t1 (i int, v int as (i) virtual) +partition by range columns (i) +subpartition by hash(v) subpartitions 3 ( + partition p1 values less than (3), + partition pn values less than (maxvalue)); +insert t1 set i= 0; +set statement sql_mode= '' for update t1 set i= 1, v= 2; +drop table t1; diff --git a/mysql-test/suite/vcol/t/update.test b/mysql-test/suite/vcol/t/update.test index 1797bdd501e..53189ee3219 100644 --- a/mysql-test/suite/vcol/t/update.test +++ b/mysql-test/suite/vcol/t/update.test @@ -15,8 +15,10 @@ drop table t1; # this tests TABLE::mark_columns_needed_for_update() # create table t1 (a int, c int as(a), p varchar(20) as(y), y char(20), index (p,c)); +show create table t1; insert into t1 (a,y) values(1, "yyy"); update t1 set a = 100 where a = 1; +check table t1; drop table t1; # diff --git a/mysql-test/suite/vcol/t/update_binlog.test b/mysql-test/suite/vcol/t/update_binlog.test new file mode 100644 index 00000000000..458aac480c7 --- /dev/null +++ b/mysql-test/suite/vcol/t/update_binlog.test @@ -0,0 +1,14 @@ +# +# Check that vcol update works with binlog enabled +# + +--source include/have_binlog_format_row.inc + +set binlog_row_image="FULL"; +set @@default_storage_engine="myisam"; + +--source update.test + +set binlog_row_image="MINIMAL"; + +--source update.test diff --git a/mysql-test/suite/versioning/common.inc b/mysql-test/suite/versioning/common.inc index 13732146b64..137480e1f62 100644 --- a/mysql-test/suite/versioning/common.inc +++ b/mysql-test/suite/versioning/common.inc @@ -6,7 +6,7 @@ select ifnull(max(transaction_id), 0) into @start_trx_id from mysql.transaction_ set @test_start=now(6); delimiter ~~; -create procedure if not exists verify_vtq() +create procedure if not exists verify_trt() begin set @i= 0; select @@ -22,7 +22,7 @@ begin from mysql.transaction_registry; end~~ -create procedure if not exists verify_vtq_dummy(recs int) +create procedure if not exists verify_trt_dummy(recs int) begin declare i int default 1; create temporary table tmp (No int, A bool, B bool, C bool, D bool); diff --git a/mysql-test/suite/versioning/common_finish.inc b/mysql-test/suite/versioning/common_finish.inc index 5e4b5401e03..6e568013827 100644 --- a/mysql-test/suite/versioning/common_finish.inc +++ b/mysql-test/suite/versioning/common_finish.inc @@ -1,6 +1,6 @@ --disable_query_log -drop procedure verify_vtq; -drop procedure verify_vtq_dummy; +drop procedure verify_trt; +drop procedure verify_trt_dummy; drop function current_row; drop function check_row; --enable_query_log diff --git a/mysql-test/suite/versioning/disabled.def b/mysql-test/suite/versioning/disabled.def index 11e45360f19..888298bbb09 100644 --- a/mysql-test/suite/versioning/disabled.def +++ b/mysql-test/suite/versioning/disabled.def @@ -9,4 +9,3 @@ # Do not use any TAB characters for whitespace. # ############################################################################## -cte: MDEV-14820 diff --git a/mysql-test/suite/versioning/r/alter.result b/mysql-test/suite/versioning/r/alter.result index a6f1bb38a76..fafcf3c30b0 100644 --- a/mysql-test/suite/versioning/r/alter.result +++ b/mysql-test/suite/versioning/r/alter.result @@ -265,14 +265,14 @@ t CREATE TABLE `t` ( ) ENGINE=InnoDB DEFAULT CHARSET=latin1 WITH SYSTEM VERSIONING alter table t drop system versioning; insert into t values(1); -call verify_vtq; +call verify_trt; No A B C D alter table t add column trx_start bigint(20) unsigned as row start invisible, add column trx_end bigint(20) unsigned as row end invisible, add period for system_time(trx_start, trx_end), add system versioning; -call verify_vtq; +call verify_trt; No A B C D 1 1 1 1 1 show create table t; @@ -284,13 +284,13 @@ t CREATE TABLE `t` ( PERIOD FOR SYSTEM_TIME (`trx_start`, `trx_end`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 WITH SYSTEM VERSIONING alter table t drop column trx_start, drop column trx_end; -call verify_vtq; +call verify_trt; No A B C D alter table t drop system versioning, algorithm=copy; -call verify_vtq; +call verify_trt; No A B C D alter table t add system versioning, algorithm=copy; -call verify_vtq; +call verify_trt; No A B C D show create table t; Table Create Table @@ -312,7 +312,7 @@ t CREATE TABLE `t` ( select * from t; a b 2 NULL -call verify_vtq; +call verify_trt; No A B C D alter table t drop column b, algorithm=copy; show create table t; @@ -324,7 +324,7 @@ select * from t for system_time all; a 2 1 -call verify_vtq; +call verify_trt; No A B C D alter table t drop system versioning, algorithm=copy; show create table t; @@ -332,7 +332,7 @@ Table Create Table t CREATE TABLE `t` ( `a` int(11) DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 -call verify_vtq; +call verify_trt; No A B C D create or replace table t (a int); insert t values (1),(2),(3),(4); @@ -454,8 +454,6 @@ t CREATE TABLE `t` ( `c` text DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 WITH SYSTEM VERSIONING alter table t add fulltext key (c); -Warnings: -Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID create or replace table t (a int) with system versioning; alter table t drop column a; ERROR HY000: Table `t` must have at least one versioned column @@ -526,5 +524,9 @@ ERROR HY000: System versioning tables in the `mysql` database are not suported alter table user add system versioning; ERROR HY000: System versioning tables in the `mysql` database are not suported use test; +# MDEV-15956 Strange ER_UNSUPPORTED_ACTION_ON_GENERATED_COLUMN upon ALTER on versioning column +create or replace table t1 (i int, j int as (i), s timestamp(6) as row start, e timestamp(6) as row end, period for system_time(s,e)) with system versioning; +alter table t1 modify s timestamp(6) as row start; +ERROR HY000: Can not change system versioning field `s` drop database test; create database test; diff --git a/mysql-test/suite/versioning/r/commit_id.result b/mysql-test/suite/versioning/r/commit_id.result index 1f4c1344f7d..abf2eaf91ba 100644 --- a/mysql-test/suite/versioning/r/commit_id.result +++ b/mysql-test/suite/versioning/r/commit_id.result @@ -30,39 +30,39 @@ transaction_id = @tx2 1 set @ts3= now(6); select -vtq_trx_id(@ts0) < @tx0 as A, -vtq_trx_id(@ts0, true) = @tx0 as B, -vtq_trx_id(@ts1) = @tx0 as C, -vtq_trx_id(@ts1, true) = @tx1 as D, -vtq_trx_id(@ts2) = @tx1 as E, -vtq_trx_id(@ts2, true) = @tx2 as F, -vtq_trx_id(@ts3) = @tx2 as G, -vtq_trx_id(@ts3, true) is null as H; +trt_trx_id(@ts0) < @tx0 as A, +trt_trx_id(@ts0, true) = @tx0 as B, +trt_trx_id(@ts1) = @tx0 as C, +trt_trx_id(@ts1, true) = @tx1 as D, +trt_trx_id(@ts2) = @tx1 as E, +trt_trx_id(@ts2, true) = @tx2 as F, +trt_trx_id(@ts3) = @tx2 as G, +trt_trx_id(@ts3, true) is null as H; A B C D E F G H 1 1 1 1 1 1 1 1 select -vtq_commit_id(@ts0) < @tx0 as A, -vtq_commit_id(@ts0, true) = vtq_commit_id(null, @tx0) as B, -vtq_commit_id(@ts1) = vtq_commit_id(null, @tx0) as C, -vtq_commit_id(@ts1, true) = vtq_commit_id(null, @tx1) as D, -vtq_commit_id(@ts2) = vtq_commit_id(null, @tx1) as E, -vtq_commit_id(@ts2, true) = vtq_commit_id(null, @tx2) as F, -vtq_commit_id(@ts3) = vtq_commit_id(null, @tx2) as G, -vtq_commit_id(@ts3, true) is null as H; +trt_commit_id(@ts0) < @tx0 as A, +trt_commit_id(@ts0, true) = trt_commit_id(null, @tx0) as B, +trt_commit_id(@ts1) = trt_commit_id(null, @tx0) as C, +trt_commit_id(@ts1, true) = trt_commit_id(null, @tx1) as D, +trt_commit_id(@ts2) = trt_commit_id(null, @tx1) as E, +trt_commit_id(@ts2, true) = trt_commit_id(null, @tx2) as F, +trt_commit_id(@ts3) = trt_commit_id(null, @tx2) as G, +trt_commit_id(@ts3, true) is null as H; A B C D E F G H 1 1 1 1 1 1 1 1 select -vtq_trx_sees(@tx1, @tx0) as A, -not vtq_trx_sees(@tx0, @tx1) as B, -vtq_trx_sees_eq(@tx1, @tx1) as C, -not vtq_trx_sees(@tx1, @tx1) as D, -vtq_trx_sees(@tx2, 0) as E, -vtq_trx_sees(-1, @tx2) as F; +trt_trx_sees(@tx1, @tx0) as A, +not trt_trx_sees(@tx0, @tx1) as B, +trt_trx_sees_eq(@tx1, @tx1) as C, +not trt_trx_sees(@tx1, @tx1) as D, +trt_trx_sees(@tx2, 0) as E, +trt_trx_sees(-1, @tx2) as F; A B C D E F 1 1 1 1 1 1 -select vtq_trx_sees(0, @tx2); -vtq_trx_sees(0, @tx2) -NULL +select trt_trx_sees(0, @tx2); +trt_trx_sees(0, @tx2) +0 set transaction isolation level read uncommitted; insert into t1 values (); select sys_trx_start from t1 where id = last_insert_id() into @tx3; @@ -88,7 +88,7 @@ select isolation_level = 'REPEATABLE-READ' from mysql.transaction_registry where isolation_level = 'REPEATABLE-READ' 1 drop table t1; -call verify_vtq; +call verify_trt; No A B C D 1 1 1 1 1 2 1 1 1 1 diff --git a/mysql-test/suite/versioning/r/cte.result b/mysql-test/suite/versioning/r/cte.result index fda5e086be2..fc070a70120 100644 --- a/mysql-test/suite/versioning/r/cte.result +++ b/mysql-test/suite/versioning/r/cte.result @@ -1,9 +1,9 @@ +set time_zone="+00:00"; set default_storage_engine=innodb; create or replace table dept ( -dept_id int(10) primary key, +dept_id int(10) primary key, name varchar(100) -) -with system versioning; +) with system versioning; create or replace table emp ( emp_id int(10) primary key, dept_id int(10) not null, @@ -18,16 +18,51 @@ constraint `mgr-fk` foreign key (mgr) references emp (emp_id) on delete restrict on update restrict -) -with system versioning; +) with system versioning; insert into dept (dept_id, name) values (10, "accounting"); -insert into emp (emp_id, name, salary, dept_id, mgr) values +insert into emp (emp_id, name, salary, dept_id, mgr) values (1, "bill", 1000, 10, null), (20, "john", 500, 10, 1), (30, "jane", 750, 10,1 ); -select max(sys_trx_start) into @ts_1 from emp; +select row_start into @ts_1 from emp where name="jane"; update emp set mgr=30 where name ="john"; -select sys_trx_start into @ts_2 from emp where name="john"; +explain extended +with ancestors as ( +select e.emp_id, e.name, e.mgr, e.salary from emp as e where name = 'bill' + union +select e.emp_id, e.name, e.mgr, e.salary from emp as e +) select * from ancestors for system_time as of @ts_1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY ALL NULL NULL NULL NULL 8 100.00 +2 DERIVED e ALL NULL NULL NULL NULL 4 100.00 Using where +3 UNION e ALL NULL NULL NULL NULL 4 100.00 Using where +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 with ancestors as (/* select#2 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME ALL `e` where `test`.`e`.`name` = 'bill' and `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1` union /* select#3 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME ALL `e` where `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1`)/* select#1 */ select `ancestors`.`emp_id` AS `emp_id`,`ancestors`.`name` AS `name`,`ancestors`.`mgr` AS `mgr`,`ancestors`.`salary` AS `salary` from `ancestors` +select row_start into @ts_2 from emp where name="john"; +explain extended /* All report to 'Bill' */ +with recursive +ancestors +as +( +select e.emp_id, e.name, e.mgr, e.salary +from emp for system_time as of timestamp @ts_1 as e +where name = 'bill' + union +select e.emp_id, e.name, e.mgr, e.salary +from emp for system_time as of timestamp @ts_1 as e, +ancestors as a +where e.mgr = a.emp_id +) +select * from ancestors; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY ALL NULL NULL NULL NULL 4 100.00 +2 DERIVED e ALL NULL NULL NULL NULL 4 100.00 Using where +3 RECURSIVE UNION e ALL mgr-fk NULL NULL NULL 4 100.00 Using where +3 RECURSIVE UNION ref key0 key0 5 test.e.mgr 2 100.00 +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 with recursive ancestors as (/* select#2 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME ALL `e` where `test`.`e`.`name` = 'bill' and `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1` union /* select#3 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME ALL `e` join `ancestors` `a` where `a`.`emp_id` = `test`.`e`.`mgr` and `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1`)/* select#1 */ select `ancestors`.`emp_id` AS `emp_id`,`ancestors`.`name` AS `name`,`ancestors`.`mgr` AS `mgr`,`ancestors`.`salary` AS `salary` from `ancestors` /* All report to 'Bill' */ with recursive ancestors @@ -36,7 +71,7 @@ as select e.emp_id, e.name, e.mgr, e.salary from emp for system_time as of timestamp @ts_1 as e where name = 'bill' - union + union select e.emp_id, e.name, e.mgr, e.salary from emp for system_time as of timestamp @ts_1 as e, ancestors as a @@ -47,25 +82,186 @@ emp_id name mgr salary 1 bill NULL 1000 20 john 1 500 30 jane 1 750 -/* Expected 3 rows */ +explain extended with recursive +ancestors +as +( +select e.emp_id, e.name, e.mgr, e.salary +from emp as e +where name = 'bill' + union +select e.emp_id, e.name, e.mgr, e.salary +from emp as e, +ancestors as a +where e.mgr = a.emp_id +) +select * from ancestors for system_time as of timestamp @ts_1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY ALL NULL NULL NULL NULL 4 100.00 +2 DERIVED e ALL NULL NULL NULL NULL 4 100.00 Using where +3 RECURSIVE UNION e ALL mgr-fk NULL NULL NULL 4 100.00 Using where +3 RECURSIVE UNION ref key0 key0 5 test.e.mgr 2 100.00 +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 with recursive ancestors as (/* select#2 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME ALL `e` where `test`.`e`.`name` = 'bill' and `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1` union /* select#3 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME ALL `e` join `ancestors` `a` where `a`.`emp_id` = `test`.`e`.`mgr` and `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1`)/* select#1 */ select `ancestors`.`emp_id` AS `emp_id`,`ancestors`.`name` AS `name`,`ancestors`.`mgr` AS `mgr`,`ancestors`.`salary` AS `salary` from `ancestors` with recursive ancestors as ( select e.emp_id, e.name, e.mgr, e.salary -from emp for system_time as of timestamp @ts_2 as e +from emp as e where name = 'bill' - union + union select e.emp_id, e.name, e.mgr, e.salary -from emp for system_time as of timestamp @ts_2 as e, +from emp as e, ancestors as a where e.mgr = a.emp_id ) -select * from ancestors; +select * from ancestors for system_time as of timestamp @ts_1; emp_id name mgr salary 1 bill NULL 1000 +20 john 1 500 30 jane 1 750 -20 john 30 500 +explain extended with recursive +ancestors +as +( +select e.emp_id, e.name, e.mgr, e.salary +from emp as e +where name = 'bill' + union +select e.emp_id, e.name, e.mgr, e.salary +from emp as e, +ancestors as a +where e.mgr = a.emp_id +) +select name from emp where emp_id in (select emp_id from ancestors for system_time as of timestamp @ts_1); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY ALL distinct_key NULL NULL NULL 4 100.00 +1 PRIMARY emp ALL PRIMARY NULL NULL NULL 4 75.00 Using where; Using join buffer (flat, BNL join) +4 MATERIALIZED ALL NULL NULL NULL NULL 4 100.00 +2 DERIVED e ALL NULL NULL NULL NULL 4 100.00 Using where +3 RECURSIVE UNION e ALL mgr-fk NULL NULL NULL 4 100.00 Using where +3 RECURSIVE UNION ref key0 key0 5 test.e.mgr 2 100.00 +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 with recursive ancestors as (/* select#2 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME ALL `e` where `test`.`e`.`name` = 'bill' and `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1` union /* select#3 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME ALL `e` join `ancestors` `a` where `a`.`emp_id` = `test`.`e`.`mgr` and `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1`)/* select#1 */ select `test`.`emp`.`name` AS `name` from `test`.`emp` FOR SYSTEM_TIME ALL semi join (`ancestors`) where `test`.`emp`.`emp_id` = `ancestors`.`emp_id` and `test`.`emp`.`row_end` = TIMESTAMP'2038-01-19 03:14:07.999999' +with recursive +ancestors +as +( +select e.emp_id, e.name, e.mgr, e.salary +from emp as e +where name = 'bill' + union +select e.emp_id, e.name, e.mgr, e.salary +from emp as e, +ancestors as a +where e.mgr = a.emp_id +) +select name from emp where emp_id in (select emp_id from ancestors for system_time as of timestamp @ts_1); +name +bill +john +jane +with recursive +ancestors +as +( +select e.emp_id, e.name, e.mgr, e.salary +from emp as e +where name = 'bill' + union +select e.emp_id, e.name, e.mgr, e.salary +from emp as e, +ancestors as a +where e.mgr = a.emp_id +) +select * from ancestors for system_time as of @ts_2, +ancestors for system_time as of @ts_2 a2; +emp_id name mgr salary emp_id name mgr salary +1 bill NULL 1000 1 bill NULL 1000 +30 jane 1 750 1 bill NULL 1000 +20 john 30 500 1 bill NULL 1000 +1 bill NULL 1000 30 jane 1 750 +30 jane 1 750 30 jane 1 750 +20 john 30 500 30 jane 1 750 +1 bill NULL 1000 20 john 30 500 +30 jane 1 750 20 john 30 500 +20 john 30 500 20 john 30 500 +with recursive +ancestors +as +( +select e.emp_id, e.name, e.mgr, e.salary +from emp as e +where name = 'bill' + union +select e.emp_id, e.name, e.mgr, e.salary +from emp as e, +ancestors as a +where e.mgr = a.emp_id +) +select * from ancestors for system_time as of @ts_2, +ancestors for system_time as of now() a2; +ERROR HY000: Conflicting FOR SYSTEM_TIME clauses in WITH RECURSIVE +with recursive +ancestors +as +( +select e.emp_id, e.name, e.mgr, e.salary +from emp as e +where name = 'bill' + union +select e.emp_id, e.name, e.mgr, e.salary +from emp as e, +ancestors as a +where e.mgr = a.emp_id +) +select * from ancestors, +ancestors for system_time as of @ts_2 a2; +ERROR HY000: Conflicting FOR SYSTEM_TIME clauses in WITH RECURSIVE +with recursive +ancestors +as +( +select e.emp_id, e.name, e.mgr, e.salary +from emp as e +where name = 'bill' + union +select e.emp_id, e.name, e.mgr, e.salary +from emp as e, +ancestors as a +where e.mgr = a.emp_id +) +select * from ancestors for system_time as of @ts_2, +ancestors a2; +ERROR HY000: Conflicting FOR SYSTEM_TIME clauses in WITH RECURSIVE +with recursive +ancestors +as +( +select e.emp_id, e.name, e.mgr, e.salary +from emp as e +where name = 'bill' + union +select e.emp_id, e.name, e.mgr, e.salary +from emp as e, +ancestors as a +where e.mgr = a.emp_id +) +select * from ancestors for system_time as of @ts_2 +where emp_id in (select * from ancestors); +ERROR HY000: Conflicting FOR SYSTEM_TIME clauses in WITH RECURSIVE +# SYSTEM_TIME to internal recursive instance is prohibited +with recursive cte as +( +select * from emp +union all +select * from cte for system_time as of @ts_1 +) +select * from cte; +ERROR HY000: Table `cte` is not system-versioned create or replace table emp ( emp_id int, name varchar(127), mgr int) with system versioning; create or replace table addr ( emp_id int, address varchar(100)) with system versioning; insert emp values (1, 'bill', 0), (2, 'bill', 1), (3, 'kate', 1); diff --git a/mysql-test/suite/versioning/r/foreign,trx_id.rdiff b/mysql-test/suite/versioning/r/foreign,trx_id.rdiff deleted file mode 100644 index c2c66ed11e1..00000000000 --- a/mysql-test/suite/versioning/r/foreign,trx_id.rdiff +++ /dev/null @@ -1,166 +0,0 @@ ---- suite/versioning/r/foreign.result -+++ suite/versioning/r/foreign,trx_id.reject -@@ -6,8 +6,8 @@ - ) engine innodb; - create table child( - parent_id int, --sys_start timestamp(6) as row start invisible, --sys_end timestamp(6) as row end invisible, -+sys_start bigint(20) unsigned as row start invisible, -+sys_end bigint(20) unsigned as row end invisible, - period for system_time(sys_start, sys_end), - foreign key(parent_id) references parent(id) - on delete restrict -@@ -39,8 +39,8 @@ - ) engine innodb; - create table child( - parent_id int(10) unsigned primary key, --sys_start timestamp(6) as row start invisible, --sys_end timestamp(6) as row end invisible, -+sys_start bigint(20) unsigned as row start invisible, -+sys_end bigint(20) unsigned as row end invisible, - period for system_time(sys_start, sys_end), - foreign key(parent_id) references parent(id) - ) engine innodb with system versioning; -@@ -58,19 +58,39 @@ - ) engine innodb; - create table child( - parent_id int, --sys_start timestamp(6) as row start invisible, --sys_end timestamp(6) as row end invisible, -+sys_start bigint(20) unsigned as row start invisible, -+sys_end bigint(20) unsigned as row end invisible, - period for system_time(sys_start, sys_end), - foreign key(parent_id) references parent(id) - on delete cascade - on update cascade - ) engine innodb with system versioning; --ERROR HY000: CASCADE is not supported for TIMESTAMP(6) AS ROW START/END system-versioned tables -+insert into parent values(1); -+insert into child values(1); -+delete from parent where id = 1; -+delete from child where parent_id = 1; -+delete from parent where id = 1; -+select * from child; -+parent_id -+select * from child for system_time all; -+parent_id -+1 -+insert into parent values(1); -+insert into child values(1); -+update parent set id = id + 1; -+select * from child; -+parent_id -+2 -+select * from child for system_time all; -+parent_id -+1 -+2 -+drop table child; - drop table parent; - create or replace table parent ( - id int primary key, --sys_start timestamp(6) as row start invisible, --sys_end timestamp(6) as row end invisible, -+sys_start bigint(20) unsigned as row start invisible, -+sys_end bigint(20) unsigned as row end invisible, - period for system_time(sys_start, sys_end) - ) with system versioning - engine innodb; -@@ -97,8 +117,8 @@ - create or replace table child ( - id int primary key, - parent_id int not null, --row_start timestamp(6) as row start invisible, --row_end timestamp(6) as row end invisible, -+row_start bigint(20) unsigned as row start invisible, -+row_end bigint(20) unsigned as row end invisible, - period for system_time(row_start, row_end), - constraint `parent-fk` - foreign key (parent_id) references parent (id) -@@ -106,7 +126,18 @@ - on update restrict - ) with system versioning - engine innodb; --ERROR HY000: CASCADE is not supported for TIMESTAMP(6) AS ROW START/END system-versioned tables -+insert into parent (id) values (3); -+insert into child (id, parent_id) values (3, 3); -+## FIXME: #415 update of foreign constraints is disabled -+delete from child; -+## FIXME END -+delete from parent; -+select * from child; -+id parent_id -+select *, row_start < row_end, row_end < MAXVAL from child for system_time all; -+id parent_id row_start < row_end row_end < MAXVAL -+3 3 1 1 -+drop table child; - drop table parent; - ################# - # Test SET NULL # -@@ -116,22 +147,39 @@ - ) engine innodb; - create table child( - parent_id int, --sys_start timestamp(6) as row start invisible, --sys_end timestamp(6) as row end invisible, -+sys_start bigint(20) unsigned as row start invisible, -+sys_end bigint(20) unsigned as row end invisible, - period for system_time(sys_start, sys_end), - foreign key(parent_id) references parent(id) - on delete set null - on update set null - ) engine innodb with system versioning; --ERROR HY000: SET NULL is not supported for TIMESTAMP(6) AS ROW START/END system-versioned tables -+insert into parent values(1); -+insert into child values(1); -+delete from child; -+insert into child values(1); -+## FIXME: #415 update of foreign constraints is disabled -+delete from child where parent_id = 1; -+## FIXME END -+delete from parent where id = 1; -+select * from child; -+parent_id -+select * from child for system_time from timestamp 0 to timestamp now(6); -+parent_id -+1 -+1 -+delete from child; -+insert into parent values(1); -+insert into child values(1); -+drop table child; - drop table parent; - ########################### - # Parent table is foreign # - ########################### - create or replace table parent( - id int unique key, --sys_start timestamp(6) as row start invisible, --sys_end timestamp(6) as row end invisible, -+sys_start bigint(20) unsigned as row start invisible, -+sys_end bigint(20) unsigned as row end invisible, - period for system_time(sys_start, sys_end) - ) engine innodb with system versioning; - create or replace table child( -@@ -162,16 +210,16 @@ - create or replace table a ( - cola int(10) primary key, - v_cola int(10) as (cola mod 10) virtual, --sys_start timestamp(6) as row start invisible, --sys_end timestamp(6) as row end invisible, -+sys_start bigint(20) unsigned as row start invisible, -+sys_end bigint(20) unsigned as row end invisible, - period for system_time(sys_start, sys_end) - ) engine=innodb with system versioning; - create index v_cola on a (v_cola); - create or replace table b( - cola int(10), - v_cola int(10), --sys_start timestamp(6) as row start invisible, --sys_end timestamp(6) as row end invisible, -+sys_start bigint(20) unsigned as row start invisible, -+sys_end bigint(20) unsigned as row end invisible, - period for system_time(sys_start, sys_end) - ) engine=innodb with system versioning; - alter table b add constraint `v_cola_fk` diff --git a/mysql-test/suite/versioning/r/insert2.result b/mysql-test/suite/versioning/r/insert2.result index 1a8131130b1..03f8e5875a8 100644 --- a/mysql-test/suite/versioning/r/insert2.result +++ b/mysql-test/suite/versioning/r/insert2.result @@ -71,24 +71,6 @@ i c current_row 1 foo 1 drop table t1; drop table t2; -set timestamp=1000000019; -select now() < sysdate(); -now() < sysdate() -1 -create table t1 (a int) with system versioning; -insert t1 values (1); -set @a=sysdate(6); -select * from t1 for system_time as of now(6); -a -select * from t1 for system_time as of sysdate(6); -a -1 -update t1 set a=2; -delete from t1; -select *, row_start > @a, row_end > @a from t1 for system_time all; -a row_start > @a row_end > @a -1 0 1 -2 1 1 # # MDEV-14871 Server crashes in fill_record / fill_record_n_invoke_before_triggers upon inserting into versioned table with trigger # diff --git a/mysql-test/suite/versioning/r/partition.result b/mysql-test/suite/versioning/r/partition.result index e44c5279e60..bfec0ce2d4b 100644 --- a/mysql-test/suite/versioning/r/partition.result +++ b/mysql-test/suite/versioning/r/partition.result @@ -479,6 +479,28 @@ insert into t1 values (1),(2),(3); update t1 set a = 4; delete from t1; delete from t1 where a is not null; +# MDEV-14823 Wrong error message upon selecting from a system_time partition +create or replace table t1 (i int) with system versioning partition by system_time limit 10 (partition p0 history, partition pn current); +select * from t1 partition (p0) for system_time all; +ERROR HY000: SYSTEM_TIME partitions in table `t1` does not support historical query +# MDEV-15380 Index for versioned table gets corrupt after partitioning and DELETE +create or replace table t1 (pk int primary key) +engine=myisam +with system versioning +partition by key() partitions 3; +set timestamp=1523466002.799571; +insert into t1 values (11),(12); +set timestamp=1523466004.169435; +delete from t1 where pk in (11, 12); +Same test but for Aria storage engine +create or replace table t1 (pk int primary key) +engine=aria +with system versioning +partition by key() partitions 3; +set timestamp=1523466002.799571; +insert into t1 values (11),(12); +set timestamp=1523466004.169435; +delete from t1 where pk in (11, 12); # Test cleanup drop database test; create database test; diff --git a/mysql-test/suite/versioning/r/rpl_stmt.result b/mysql-test/suite/versioning/r/partition_rotation.result similarity index 91% rename from mysql-test/suite/versioning/r/rpl_stmt.result rename to mysql-test/suite/versioning/r/partition_rotation.result index 5c725bae6f6..7e25f122238 100644 --- a/mysql-test/suite/versioning/r/rpl_stmt.result +++ b/mysql-test/suite/versioning/r/partition_rotation.result @@ -1,5 +1,3 @@ -include/master-slave.inc -[connection master] set timestamp=unix_timestamp('2001-02-03 10:20:30'); create or replace table t1 (i int) with system versioning partition by system_time interval 1 day @@ -10,22 +8,23 @@ insert t1 values (1); delete from t1; set timestamp=unix_timestamp('2001-02-04 10:20:50'); insert t1 values (2); +Warnings: +Warning 4114 Versioned table `test`.`t1`: partition `p1` is full, add more HISTORY partitions delete from t1; -connection slave; +Warnings: +Warning 4114 Versioned table `test`.`t1`: partition `p1` is full, add more HISTORY partitions select subpartition_name,partition_description,table_rows from information_schema.partitions where table_schema='test' and table_name='t1'; subpartition_name partition_description table_rows p1sp0 2001-02-04 10:20:30 1 p1sp1 2001-02-04 10:20:30 1 pnsp0 CURRENT 0 pnsp1 CURRENT 0 -connection master; set timestamp=unix_timestamp('2001-02-04 10:20:55'); alter table t1 add partition (partition p0 history, partition p2 history); set timestamp=unix_timestamp('2001-02-04 10:30:00'); insert t1 values (4),(5); set timestamp=unix_timestamp('2001-02-04 10:30:10'); update t1 set i=6 where i=5; -connection slave; select subpartition_name,partition_description,table_rows from information_schema.partitions where table_schema='test' and table_name='t1'; subpartition_name partition_description table_rows p1sp0 2001-02-04 10:20:30 1 @@ -56,6 +55,4 @@ i explain partitions select * from t1 for system_time all where row_end = @ts; id select_type table partitions type possible_keys key key_len ref rows Extra 1 SIMPLE t1 p1_p1sp0,p1_p1sp1 # NULL NULL NULL NULL # # -connection master; drop table t1; -include/rpl_end.inc diff --git a/mysql-test/suite/versioning/r/select.result b/mysql-test/suite/versioning/r/select.result index ff13b0ebe76..80c408980ec 100644 --- a/mysql-test/suite/versioning/r/select.result +++ b/mysql-test/suite/versioning/r/select.result @@ -339,6 +339,8 @@ select x from t1 for system_time as of timestamp @ts; x 1 set @ts= timestamp'1-1-1 0:0:0'; +select x from t1 for system_time as of timestamp @ts; +x ## TRANSACTION specifier select x from t1 for system_time as of transaction @trx_start; x @@ -390,7 +392,7 @@ create or replace table t1 (x int) with system versioning; select * from t1 for system_time as of current_timestamp; x select * from t1 for system_time as of now; -ERROR 42S22: Unknown column 'now' in 'on clause' +ERROR 42S22: Unknown column 'now' in 'FOR SYSTEM_TIME' ### Issue #405, NATURAL JOIN failure create or replace table t1 (a int) with system versioning; create or replace table t2 (b int); @@ -512,7 +514,31 @@ Warnings: Note 1003 select `test`.`t1`.`f1` AS `f1` from `test`.`t1` FOR SYSTEM_TIME ALL join `test`.`t2` left join (`test`.`t3` left join `test`.`t4` FOR SYSTEM_TIME ALL on(`test`.`t4`.`f4` = `test`.`t2`.`f2` and `test`.`t4`.`row_end` = TIMESTAMP'2038-01-19 03:14:07.999999')) on(`test`.`t3`.`f3` = `test`.`t2`.`f2`) where `test`.`t1`.`row_end` = TIMESTAMP'2038-01-19 03:14:07.999999' drop view v1; drop table t1, t2, t3, t4; -call verify_vtq_dummy(34); +# +# MDEV-15980 FOR SYSTEM_TIME BETWEEN and FROM .. TO work with negative intervals +# +create or replace table t1 ( +a int, +row_start SYS_DATATYPE as row start invisible, +row_end SYS_DATATYPE as row end invisible, +period for system_time (row_start, row_end) +) with system versioning; +insert into t1 values (1); +delete from t1; +select row_start from t1 for system_time all into @t1; +select row_end from t1 for system_time all into @t2; +select * from t1 for system_time between @t1 and @t2; +a +1 +select * from t1 for system_time between @t2 and @t1; +a +select * from t1 for system_time from @t1 to @t2; +a +1 +select * from t1 for system_time from @t2 to @t1; +a +drop table t1; +call verify_trt_dummy(34); No A B C D 1 1 1 1 1 2 1 1 1 1 diff --git a/mysql-test/suite/versioning/r/select2,trx_id.rdiff b/mysql-test/suite/versioning/r/select2,trx_id.rdiff index 6b1d6527a21..a657b94c031 100644 --- a/mysql-test/suite/versioning/r/select2,trx_id.rdiff +++ b/mysql-test/suite/versioning/r/select2,trx_id.rdiff @@ -5,7 +5,7 @@ insert into t1(x, y) values(3, 33); select sys_start from t1 where x = 3 and y = 33 into @t1; +set @x1= @t1; -+select vtq_commit_ts(@x1) into @t1; ++select trt_commit_ts(@x1) into @t1; select x, y from t1; x y 0 100 diff --git a/mysql-test/suite/versioning/r/select2.result b/mysql-test/suite/versioning/r/select2.result index 9267ab8c913..bb5c82ee444 100644 --- a/mysql-test/suite/versioning/r/select2.result +++ b/mysql-test/suite/versioning/r/select2.result @@ -332,5 +332,13 @@ select * from (select * from t1 for system_time all, t2 for system_time all) for ERROR HY000: Table `t` is not system-versioned select * from (t1 for system_time all join t2 for system_time all) for system_time all; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1 +# MDEV-16043 Assertion thd->Item_change_list::is_empty() failed in mysql_parse upon SELECT from a view reading from a versioned table +create or replace table t1 (a int) with system versioning; +create or replace view v1 as select * from t1; +prepare stmt from "select * from t1 where exp( '20010609211642053929' )"; +execute stmt; +ERROR 22003: DOUBLE value is out of range in 'exp('20010609211642053929')' +select a from v1; +a drop view v1; drop table t1, t2; diff --git a/mysql-test/suite/versioning/r/sysvars.result b/mysql-test/suite/versioning/r/sysvars.result index 22f53ca6fbe..b23742462d1 100644 --- a/mysql-test/suite/versioning/r/sysvars.result +++ b/mysql-test/suite/versioning/r/sysvars.result @@ -126,4 +126,7 @@ select * from t for system_time between '0-0-0' and current_timestamp(6); a 2 1 +show status like "Feature_system_versioning"; +Variable_name Value +Feature_system_versioning 2 drop table t; diff --git a/mysql-test/suite/versioning/r/truncate.result b/mysql-test/suite/versioning/r/truncate.result index 181b120eafb..308501915dc 100644 --- a/mysql-test/suite/versioning/r/truncate.result +++ b/mysql-test/suite/versioning/r/truncate.result @@ -1,7 +1,12 @@ create table t (a int); delete history from t before system_time now(); ERROR HY000: Table `t` is not system-versioned -create or replace table t (a int) with system versioning; +create or replace table t ( +a int, +row_start SYS_TYPE as row start invisible, +row_end SYS_TYPE as row end invisible, +period for system_time (row_start, row_end)) +with system versioning; insert into t values (1); update t set a=2; set @test = 'correct'; @@ -12,7 +17,12 @@ select @test from t; @test correct drop table t; -create table t (a int) with system versioning; +create or replace table t ( +a int, +row_start SYS_TYPE as row start invisible, +row_end SYS_TYPE as row end invisible, +period for system_time (row_start, row_end)) +with system versioning; insert into t values (1), (2); update t set a=11 where a=1; set @ts1=now(6); @@ -48,7 +58,6 @@ drop procedure truncate_sp; # Truncate partitioned create or replace table t (a int) with system versioning -engine myisam partition by system_time limit 1 ( partition p0 history, partition p1 history, @@ -61,7 +70,12 @@ select * from t for system_time all; a 3 # VIEW -create or replace table t (i int) with system versioning; +create or replace table t ( +i int, +row_start SYS_TYPE as row start invisible, +row_end SYS_TYPE as row end invisible, +period for system_time (row_start, row_end)) +with system versioning; delete history from t; create or replace view v as select * from t; delete history from v; @@ -86,3 +100,5 @@ ERROR 42S02: 'v' is a view unlock tables; drop view v; drop table t; +drop database test; +create database test; diff --git a/mysql-test/suite/versioning/r/trx_id.result b/mysql-test/suite/versioning/r/trx_id.result index c8c9b69ca15..7b2ea04985d 100644 --- a/mysql-test/suite/versioning/r/trx_id.result +++ b/mysql-test/suite/versioning/r/trx_id.result @@ -153,5 +153,229 @@ select x, row_start < row_end from t1 for system_time all; x row_start < row_end 4 1 2 1 +# MDEV-16010 Too many rows with AS OF point_in_the_past_or_NULL +create or replace table t1 ( +x int, +row_start bigint unsigned as row start invisible, +row_end bigint unsigned as row end invisible, +period for system_time (row_start, row_end) +) with system versioning engine innodb; +insert into t1 (x) values (1); +delete from t1; +select * from t1 for system_time as of timestamp'1990-1-1 00:00'; +x +select * from t1 for system_time as of NULL; +x +# MDEV-16024 transaction_registry.begin_timestamp is wrong for explicit transactions +create or replace table t1 ( +x int(11) default null, +row_start bigint(20) unsigned generated always as row start invisible, +row_end bigint(20) unsigned generated always as row end invisible, +period for system_time (row_start, row_end) +) engine=innodb with system versioning; +begin; +set @ts1= now(6); +insert into t1 values (1); +commit; +select row_start from t1 into @trx_id; +select trt_begin_ts(@trx_id) <= @ts1 as BEGIN_TS_GOOD; +BEGIN_TS_GOOD +1 drop database test; create database test; +use test; +# +# MDEV-16100 FOR SYSTEM_TIME erroneously resolves string user variables as transaction IDs +# +CREATE TABLE t1 ( +x INT, +sys_trx_start BIGINT UNSIGNED AS ROW START, +sys_trx_end BIGINT UNSIGNED AS ROW END, +PERIOD FOR SYSTEM_TIME (sys_trx_start, sys_trx_end) +) WITH SYSTEM VERSIONING ENGINE=INNODB; +INSERT INTO t1 (x) VALUES (1); +SET @ts= DATE_ADD(NOW(), INTERVAL 1 YEAR); +EXPLAIN EXTENDED SELECT x FROM t1 FOR SYSTEM_TIME AS OF TRANSACTION @ts; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 1 100.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`x` AS `x` from `test`.`t1` FOR SYSTEM_TIME ALL where trt_trx_sees(`test`.`t1`.`sys_trx_end`,@`ts`) and trt_trx_sees_eq(@`ts`,`test`.`t1`.`sys_trx_start`) +EXPLAIN EXTENDED SELECT x FROM t1 FOR SYSTEM_TIME AS OF TIMESTAMP @ts; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 1 100.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`x` AS `x` from `test`.`t1` FOR SYSTEM_TIME ALL where trt_trx_sees(`test`.`t1`.`sys_trx_end`,(trt_trx_id(@`ts`))) and trt_trx_sees_eq((trt_trx_id(@`ts`)),`test`.`t1`.`sys_trx_start`) +EXPLAIN EXTENDED SELECT x FROM t1 FOR SYSTEM_TIME AS OF @ts; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 1 100.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`x` AS `x` from `test`.`t1` FOR SYSTEM_TIME ALL where trt_trx_sees(`test`.`t1`.`sys_trx_end`,(trt_trx_id(@`ts`))) and trt_trx_sees_eq((trt_trx_id(@`ts`)),`test`.`t1`.`sys_trx_start`) +DROP TABLE t1; +# +# Testing AS OF with expressions of various kinds and data types +# +CREATE TABLE t1 +( +x INT, +sys_trx_start BIGINT UNSIGNED AS ROW START INVISIBLE, +sys_trx_end BIGINT UNSIGNED AS ROW END INVISIBLE, +PERIOD FOR SYSTEM_TIME (sys_trx_start, sys_trx_end) +) WITH SYSTEM VERSIONING; +INSERT INTO t1 VALUES (1); +CREATE TABLE t2 +( +x INT, +sys_trx_start TIMESTAMP(6) AS ROW START INVISIBLE, +sys_trx_end TIMESTAMP(6) AS ROW END INVISIBLE, +PERIOD FOR SYSTEM_TIME (sys_trx_start, sys_trx_end) +) WITH SYSTEM VERSIONING; +INSERT INTO t2 VALUES (1); +# +# ROW is not supported +# +SELECT * FROM t1 FOR SYSTEM_TIME AS OF (1,1); +ERROR HY000: Illegal parameter data type row for operation 'FOR SYSTEM_TIME' +SELECT * FROM t2 FOR SYSTEM_TIME AS OF (1,1); +ERROR HY000: Illegal parameter data type row for operation 'FOR SYSTEM_TIME' +# +# DOUBLE is not supported, use explicit CAST +# +SELECT * FROM t1 FOR SYSTEM_TIME AS OF RAND(); +ERROR HY000: Illegal parameter data type double for operation 'FOR SYSTEM_TIME' +SELECT * FROM t1 FOR SYSTEM_TIME AS OF (RAND()); +ERROR HY000: Illegal parameter data type double for operation 'FOR SYSTEM_TIME' +SELECT * FROM t1 FOR SYSTEM_TIME AS OF COALESCE(RAND()); +ERROR HY000: Illegal parameter data type double for operation 'FOR SYSTEM_TIME' +SELECT * FROM t2 FOR SYSTEM_TIME AS OF RAND(); +ERROR HY000: Illegal parameter data type double for operation 'FOR SYSTEM_TIME' +SELECT * FROM t2 FOR SYSTEM_TIME AS OF (RAND()); +ERROR HY000: Illegal parameter data type double for operation 'FOR SYSTEM_TIME' +SELECT * FROM t2 FOR SYSTEM_TIME AS OF COALESCE(RAND()); +ERROR HY000: Illegal parameter data type double for operation 'FOR SYSTEM_TIME' +# +# DECIMAL is not supported, use explicit CAST +# +SELECT * FROM t1 FOR SYSTEM_TIME AS OF 10.1; +ERROR HY000: Illegal parameter data type decimal for operation 'FOR SYSTEM_TIME' +SELECT * FROM t1 FOR SYSTEM_TIME AS OF COALESCE(10.1); +ERROR HY000: Illegal parameter data type decimal for operation 'FOR SYSTEM_TIME' +SELECT * FROM t2 FOR SYSTEM_TIME AS OF 10.1; +ERROR HY000: Illegal parameter data type decimal for operation 'FOR SYSTEM_TIME' +SELECT * FROM t2 FOR SYSTEM_TIME AS OF COALESCE(10.1); +ERROR HY000: Illegal parameter data type decimal for operation 'FOR SYSTEM_TIME' +# +# YEAR is not supported, use explicit CAST +# +BEGIN NOT ATOMIC +DECLARE var YEAR; +SELECT * FROM t1 FOR SYSTEM_TIME AS OF var; +END; +$$ +ERROR HY000: Illegal parameter data type year for operation 'FOR SYSTEM_TIME' +BEGIN NOT ATOMIC +DECLARE var YEAR; +SELECT * FROM t2 FOR SYSTEM_TIME AS OF var; +END; +$$ +ERROR HY000: Illegal parameter data type year for operation 'FOR SYSTEM_TIME' +# +# ENUM is not supported, use explicit CAST +# +BEGIN NOT ATOMIC +DECLARE var ENUM('xxx') DEFAULT 'xxx'; +SELECT * FROM t1 FOR SYSTEM_TIME AS OF var; +END; +$$ +ERROR HY000: Illegal parameter data type enum for operation 'FOR SYSTEM_TIME' +BEGIN NOT ATOMIC +DECLARE var ENUM('xxx') DEFAULT 'xxx'; +SELECT * FROM t2 FOR SYSTEM_TIME AS OF var; +END; +$$ +ERROR HY000: Illegal parameter data type enum for operation 'FOR SYSTEM_TIME' +# +# SET is not supported, use explicit CAST +# +BEGIN NOT ATOMIC +DECLARE var SET('xxx') DEFAULT 'xxx'; +SELECT * FROM t1 FOR SYSTEM_TIME AS OF var; +END; +$$ +ERROR HY000: Illegal parameter data type set for operation 'FOR SYSTEM_TIME' +BEGIN NOT ATOMIC +DECLARE var SET('xxx') DEFAULT 'xxx'; +SELECT * FROM t2 FOR SYSTEM_TIME AS OF var; +END; +$$ +ERROR HY000: Illegal parameter data type set for operation 'FOR SYSTEM_TIME' +# +# BIT is resolved to TRANSACTION +# +BEGIN NOT ATOMIC +DECLARE var BIT(10); +SELECT * FROM t1 FOR SYSTEM_TIME AS OF var; +END; +$$ +x +BEGIN NOT ATOMIC +DECLARE var BIT(10); +SELECT * FROM t2 FOR SYSTEM_TIME AS OF var; +END; +$$ +ERROR HY000: Transaction system versioning for `t2` is not supported +# +# String literals resolve to TIMESTAMP +# +SELECT * FROM t1 FOR SYSTEM_TIME AS OF '2038-12-30 00:00:00'; +x +1 +SELECT * FROM t2 FOR SYSTEM_TIME AS OF '2038-12-30 00:00:00'; +x +# +# HEX hybrids resolve to TRANSACTION +# +SELECT * FROM t1 FOR SYSTEM_TIME AS OF (0x60); +x +1 +SELECT * FROM t2 FOR SYSTEM_TIME AS OF (0x60); +ERROR HY000: Transaction system versioning for `t2` is not supported +# +# BIT literals resolve to TRANSACTION +# +SELECT * FROM t1 FOR SYSTEM_TIME AS OF (b'1100000'); +x +1 +SELECT * FROM t2 FOR SYSTEM_TIME AS OF (b'1100000'); +ERROR HY000: Transaction system versioning for `t2` is not supported +DROP TABLE t1, t2; +# +# MDEV-16094 Crash when using AS OF with a stored function +# +CREATE FUNCTION fts() RETURNS DATETIME RETURN '2001-01-01 10:20:30'; +CREATE FUNCTION ftx() RETURNS BIGINT UNSIGNED RETURN 1; +CREATE TABLE ttx +( +x INT, +start_timestamp BIGINT UNSIGNED GENERATED ALWAYS AS ROW START, +end_timestamp BIGINT UNSIGNED GENERATED ALWAYS AS ROW END, +PERIOD FOR SYSTEM_TIME(start_timestamp, end_timestamp) +) ENGINE=InnoDB WITH SYSTEM VERSIONING; +CREATE TABLE tts +( +x INT, +start_timestamp TIMESTAMP(6) GENERATED ALWAYS AS ROW START, +end_timestamp TIMESTAMP(6) GENERATED ALWAYS AS ROW END, +PERIOD FOR SYSTEM_TIME(start_timestamp, end_timestamp) +) ENGINE=InnoDB WITH SYSTEM VERSIONING; +SELECT * FROM tts FOR SYSTEM_TIME AS OF fts(); +x start_timestamp end_timestamp +SELECT * FROM tts FOR SYSTEM_TIME AS OF ftx(); +ERROR HY000: Transaction system versioning for `tts` is not supported +SELECT * FROM ttx FOR SYSTEM_TIME AS OF fts(); +x start_timestamp end_timestamp +SELECT * FROM ttx FOR SYSTEM_TIME AS OF ftx(); +x start_timestamp end_timestamp +DROP TABLE tts; +DROP TABLE ttx; +DROP FUNCTION fts; +DROP FUNCTION ftx; diff --git a/mysql-test/suite/versioning/t/alter.test b/mysql-test/suite/versioning/t/alter.test index 83f8898c672..d570b6f4259 100644 --- a/mysql-test/suite/versioning/t/alter.test +++ b/mysql-test/suite/versioning/t/alter.test @@ -172,23 +172,23 @@ alter table t drop system versioning; insert into t values(1); -call verify_vtq; +call verify_trt; alter table t add column trx_start bigint(20) unsigned as row start invisible, add column trx_end bigint(20) unsigned as row end invisible, add period for system_time(trx_start, trx_end), add system versioning; -call verify_vtq; +call verify_trt; show create table t; alter table t drop column trx_start, drop column trx_end; -call verify_vtq; +call verify_trt; alter table t drop system versioning, algorithm=copy; -call verify_vtq; +call verify_trt; alter table t add system versioning, algorithm=copy; -call verify_vtq; +call verify_trt; show create table t; @@ -198,31 +198,31 @@ select * from t for system_time all; alter table t add column b int, algorithm=copy; show create table t; select * from t; -call verify_vtq; +call verify_trt; alter table t drop column b, algorithm=copy; show create table t; select * from t for system_time all; -call verify_vtq; +call verify_trt; ## FIXME: #414 IB: inplace for VERS_TIMESTAMP versioning if (0) { alter table t drop system versioning, algorithm=inplace; -call verify_vtq; +call verify_trt; alter table t add system versioning, algorithm=inplace; -call verify_vtq; +call verify_trt; show create table t; update t set a= 1; select * from t for system_time all; -call verify_vtq; +call verify_trt; alter table t add column b int, algorithm=inplace; show create table t; select * from t; -call verify_vtq; +call verify_trt; alter table t drop column b, algorithm=inplace; show create table t; @@ -232,7 +232,7 @@ select * from t for system_time all; alter table t drop system versioning, algorithm=copy; show create table t; -call verify_vtq; +call verify_trt; # nullable autoinc test w/o versioning create or replace table t (a int); @@ -341,7 +341,7 @@ select * from t; show create table t; -call verify_vtq; +call verify_trt; } create or replace table t (a int) with system versioning; @@ -449,5 +449,10 @@ create or replace table t (x int) with system versioning; alter table user add system versioning; use test; +--echo # MDEV-15956 Strange ER_UNSUPPORTED_ACTION_ON_GENERATED_COLUMN upon ALTER on versioning column +create or replace table t1 (i int, j int as (i), s timestamp(6) as row start, e timestamp(6) as row end, period for system_time(s,e)) with system versioning; +--error ER_VERS_ALTER_SYSTEM_FIELD +alter table t1 modify s timestamp(6) as row start; + drop database test; create database test; diff --git a/mysql-test/suite/versioning/t/commit_id.test b/mysql-test/suite/versioning/t/commit_id.test index 68d73416054..0f9cf1eb391 100644 --- a/mysql-test/suite/versioning/t/commit_id.test +++ b/mysql-test/suite/versioning/t/commit_id.test @@ -10,7 +10,7 @@ with system versioning engine innodb; -# VTQ_TRX_ID, VTQ_COMMIT_ID, VTQ_TRX_SEES # +# TRT_TRX_ID, TRT_COMMIT_ID, TRT_TRX_SEES # insert into t1 values (); @@ -36,36 +36,36 @@ order by transaction_id desc limit 1; set @ts3= now(6); select - vtq_trx_id(@ts0) < @tx0 as A, - vtq_trx_id(@ts0, true) = @tx0 as B, - vtq_trx_id(@ts1) = @tx0 as C, - vtq_trx_id(@ts1, true) = @tx1 as D, - vtq_trx_id(@ts2) = @tx1 as E, - vtq_trx_id(@ts2, true) = @tx2 as F, - vtq_trx_id(@ts3) = @tx2 as G, - vtq_trx_id(@ts3, true) is null as H; + trt_trx_id(@ts0) < @tx0 as A, + trt_trx_id(@ts0, true) = @tx0 as B, + trt_trx_id(@ts1) = @tx0 as C, + trt_trx_id(@ts1, true) = @tx1 as D, + trt_trx_id(@ts2) = @tx1 as E, + trt_trx_id(@ts2, true) = @tx2 as F, + trt_trx_id(@ts3) = @tx2 as G, + trt_trx_id(@ts3, true) is null as H; select - vtq_commit_id(@ts0) < @tx0 as A, - vtq_commit_id(@ts0, true) = vtq_commit_id(null, @tx0) as B, - vtq_commit_id(@ts1) = vtq_commit_id(null, @tx0) as C, - vtq_commit_id(@ts1, true) = vtq_commit_id(null, @tx1) as D, - vtq_commit_id(@ts2) = vtq_commit_id(null, @tx1) as E, - vtq_commit_id(@ts2, true) = vtq_commit_id(null, @tx2) as F, - vtq_commit_id(@ts3) = vtq_commit_id(null, @tx2) as G, - vtq_commit_id(@ts3, true) is null as H; + trt_commit_id(@ts0) < @tx0 as A, + trt_commit_id(@ts0, true) = trt_commit_id(null, @tx0) as B, + trt_commit_id(@ts1) = trt_commit_id(null, @tx0) as C, + trt_commit_id(@ts1, true) = trt_commit_id(null, @tx1) as D, + trt_commit_id(@ts2) = trt_commit_id(null, @tx1) as E, + trt_commit_id(@ts2, true) = trt_commit_id(null, @tx2) as F, + trt_commit_id(@ts3) = trt_commit_id(null, @tx2) as G, + trt_commit_id(@ts3, true) is null as H; select - vtq_trx_sees(@tx1, @tx0) as A, - not vtq_trx_sees(@tx0, @tx1) as B, - vtq_trx_sees_eq(@tx1, @tx1) as C, - not vtq_trx_sees(@tx1, @tx1) as D, - vtq_trx_sees(@tx2, 0) as E, - vtq_trx_sees(-1, @tx2) as F; + trt_trx_sees(@tx1, @tx0) as A, + not trt_trx_sees(@tx0, @tx1) as B, + trt_trx_sees_eq(@tx1, @tx1) as C, + not trt_trx_sees(@tx1, @tx1) as D, + trt_trx_sees(@tx2, 0) as E, + trt_trx_sees(-1, @tx2) as F; -select vtq_trx_sees(0, @tx2); +select trt_trx_sees(0, @tx2); -# VTQ_ISO_LEVEL # +# TRT_ISO_LEVEL # set transaction isolation level read uncommitted; insert into t1 values (); @@ -89,6 +89,6 @@ select isolation_level = 'REPEATABLE-READ' from mysql.transaction_registry where drop table t1; -call verify_vtq; +call verify_trt; -- source suite/versioning/common_finish.inc diff --git a/mysql-test/suite/versioning/t/cte.test b/mysql-test/suite/versioning/t/cte.test index 9df0bb3dfba..0055b2100d3 100644 --- a/mysql-test/suite/versioning/t/cte.test +++ b/mysql-test/suite/versioning/t/cte.test @@ -1,10 +1,10 @@ -- source include/have_innodb.inc +set time_zone="+00:00"; set default_storage_engine=innodb; create or replace table dept ( - dept_id int(10) primary key, + dept_id int(10) primary key, name varchar(100) -) -with system versioning; +) with system versioning; create or replace table emp ( emp_id int(10) primary key, @@ -20,21 +20,29 @@ create or replace table emp ( foreign key (mgr) references emp (emp_id) on delete restrict on update restrict -) -with system versioning; +) with system versioning; insert into dept (dept_id, name) values (10, "accounting"); -insert into emp (emp_id, name, salary, dept_id, mgr) values +insert into emp (emp_id, name, salary, dept_id, mgr) values (1, "bill", 1000, 10, null), (20, "john", 500, 10, 1), (30, "jane", 750, 10,1 ); -select max(sys_trx_start) into @ts_1 from emp; +select row_start into @ts_1 from emp where name="jane"; update emp set mgr=30 where name ="john"; -select sys_trx_start into @ts_2 from emp where name="john"; +explain extended +with ancestors as ( + select e.emp_id, e.name, e.mgr, e.salary from emp as e where name = 'bill' + union + select e.emp_id, e.name, e.mgr, e.salary from emp as e +) select * from ancestors for system_time as of @ts_1; + +select row_start into @ts_2 from emp where name="john"; + +let $q= /* All report to 'Bill' */ with recursive ancestors @@ -43,7 +51,7 @@ as select e.emp_id, e.name, e.mgr, e.salary from emp for system_time as of timestamp @ts_1 as e where name = 'bill' - union + union select e.emp_id, e.name, e.mgr, e.salary from emp for system_time as of timestamp @ts_1 as e, ancestors as a @@ -51,21 +59,138 @@ as ) select * from ancestors; -/* Expected 3 rows */ +eval explain extended $q; +eval $q; + +let $q=with recursive +ancestors +as +( + select e.emp_id, e.name, e.mgr, e.salary + from emp as e + where name = 'bill' + union + select e.emp_id, e.name, e.mgr, e.salary + from emp as e, + ancestors as a + where e.mgr = a.emp_id +) +select * from ancestors for system_time as of timestamp @ts_1; + +eval explain extended $q; +eval $q; + +let $q=with recursive +ancestors +as +( + select e.emp_id, e.name, e.mgr, e.salary + from emp as e + where name = 'bill' + union + select e.emp_id, e.name, e.mgr, e.salary + from emp as e, + ancestors as a + where e.mgr = a.emp_id +) +select name from emp where emp_id in (select emp_id from ancestors for system_time as of timestamp @ts_1); + +eval explain extended $q; +eval $q; + with recursive ancestors as ( select e.emp_id, e.name, e.mgr, e.salary - from emp for system_time as of timestamp @ts_2 as e + from emp as e where name = 'bill' - union + union select e.emp_id, e.name, e.mgr, e.salary - from emp for system_time as of timestamp @ts_2 as e, + from emp as e, ancestors as a where e.mgr = a.emp_id ) -select * from ancestors; +select * from ancestors for system_time as of @ts_2, + ancestors for system_time as of @ts_2 a2; + +--error ER_CONFLICTING_FOR_SYSTEM_TIME +with recursive +ancestors +as +( + select e.emp_id, e.name, e.mgr, e.salary + from emp as e + where name = 'bill' + union + select e.emp_id, e.name, e.mgr, e.salary + from emp as e, + ancestors as a + where e.mgr = a.emp_id +) +select * from ancestors for system_time as of @ts_2, + ancestors for system_time as of now() a2; + +--error ER_CONFLICTING_FOR_SYSTEM_TIME +with recursive +ancestors +as +( + select e.emp_id, e.name, e.mgr, e.salary + from emp as e + where name = 'bill' + union + select e.emp_id, e.name, e.mgr, e.salary + from emp as e, + ancestors as a + where e.mgr = a.emp_id +) +select * from ancestors, + ancestors for system_time as of @ts_2 a2; + +--error ER_CONFLICTING_FOR_SYSTEM_TIME +with recursive +ancestors +as +( + select e.emp_id, e.name, e.mgr, e.salary + from emp as e + where name = 'bill' + union + select e.emp_id, e.name, e.mgr, e.salary + from emp as e, + ancestors as a + where e.mgr = a.emp_id +) +select * from ancestors for system_time as of @ts_2, + ancestors a2; + +--error ER_CONFLICTING_FOR_SYSTEM_TIME +with recursive +ancestors +as +( + select e.emp_id, e.name, e.mgr, e.salary + from emp as e + where name = 'bill' + union + select e.emp_id, e.name, e.mgr, e.salary + from emp as e, + ancestors as a + where e.mgr = a.emp_id +) +select * from ancestors for system_time as of @ts_2 + where emp_id in (select * from ancestors); + +--echo # SYSTEM_TIME to internal recursive instance is prohibited +--error ER_VERS_NOT_VERSIONED +with recursive cte as +( + select * from emp + union all + select * from cte for system_time as of @ts_1 +) +select * from cte; create or replace table emp ( emp_id int, name varchar(127), mgr int) with system versioning; create or replace table addr ( emp_id int, address varchar(100)) with system versioning; diff --git a/mysql-test/suite/versioning/t/insert2.test b/mysql-test/suite/versioning/t/insert2.test index b65f3c55b94..1e7d2166064 100644 --- a/mysql-test/suite/versioning/t/insert2.test +++ b/mysql-test/suite/versioning/t/insert2.test @@ -1,6 +1,6 @@ --source include/have_innodb.inc -# VTQ test +# TRT test create table t1( x int unsigned, @@ -69,24 +69,6 @@ select i, c, e>TIMESTAMP'2038-01-01 00:00:00' AS current_row from t1; drop table t1; drop table t2; -# -# MDEV-14788 System versioning cannot be based on local timestamps, as it is now -# -set timestamp=1000000019; -select now() < sysdate(); -create table t1 (a int) with system versioning; -insert t1 values (1); - ---source suite/versioning/wait_system_clock.inc -set @a=sysdate(6); - -select * from t1 for system_time as of now(6); -select * from t1 for system_time as of sysdate(6); -update t1 set a=2; -delete from t1; ---sorted_result -select *, row_start > @a, row_end > @a from t1 for system_time all; - --echo # --echo # MDEV-14871 Server crashes in fill_record / fill_record_n_invoke_before_triggers upon inserting into versioned table with trigger --echo # diff --git a/mysql-test/suite/versioning/t/partition.test b/mysql-test/suite/versioning/t/partition.test index a9ea928b167..d9e784b082b 100644 --- a/mysql-test/suite/versioning/t/partition.test +++ b/mysql-test/suite/versioning/t/partition.test @@ -425,6 +425,30 @@ update t1 set a = 4; delete from t1; delete from t1 where a is not null; +--echo # MDEV-14823 Wrong error message upon selecting from a system_time partition +create or replace table t1 (i int) with system versioning partition by system_time limit 10 (partition p0 history, partition pn current); +--error ER_VERS_QUERY_IN_PARTITION +select * from t1 partition (p0) for system_time all; + +--echo # MDEV-15380 Index for versioned table gets corrupt after partitioning and DELETE +create or replace table t1 (pk int primary key) + engine=myisam + with system versioning + partition by key() partitions 3; +set timestamp=1523466002.799571; +insert into t1 values (11),(12); +set timestamp=1523466004.169435; +delete from t1 where pk in (11, 12); +--echo Same test but for Aria storage engine +create or replace table t1 (pk int primary key) + engine=aria + with system versioning + partition by key() partitions 3; +set timestamp=1523466002.799571; +insert into t1 values (11),(12); +set timestamp=1523466004.169435; +delete from t1 where pk in (11, 12); + --echo # Test cleanup drop database test; create database test; diff --git a/mysql-test/suite/versioning/t/rpl_stmt.test b/mysql-test/suite/versioning/t/partition_rotation.test similarity index 81% rename from mysql-test/suite/versioning/t/rpl_stmt.test rename to mysql-test/suite/versioning/t/partition_rotation.test index c585c6082d0..4937a2a069b 100644 --- a/mysql-test/suite/versioning/t/rpl_stmt.test +++ b/mysql-test/suite/versioning/t/partition_rotation.test @@ -1,17 +1,8 @@ --source include/have_partition.inc ---source include/have_binlog_format_statement.inc ---source include/master-slave.inc - -# -# The test below isn't a replication test as such, -# but it uses replication to get custom timestamps and repeatable -# behavior into versioning. -# # # partition rotation # -disable_warnings; set timestamp=unix_timestamp('2001-02-03 10:20:30'); create or replace table t1 (i int) with system versioning partition by system_time interval 1 day @@ -21,17 +12,16 @@ set timestamp=unix_timestamp('2001-02-03 10:20:40'); insert t1 values (1); delete from t1; set timestamp=unix_timestamp('2001-02-04 10:20:50'); insert t1 values (2); delete from t1; -enable_warnings; -sync_slave_with_master; + select subpartition_name,partition_description,table_rows from information_schema.partitions where table_schema='test' and table_name='t1'; -connection master; + set timestamp=unix_timestamp('2001-02-04 10:20:55'); alter table t1 add partition (partition p0 history, partition p2 history); set timestamp=unix_timestamp('2001-02-04 10:30:00'); insert t1 values (4),(5); set timestamp=unix_timestamp('2001-02-04 10:30:10'); update t1 set i=6 where i=5; -sync_slave_with_master; + select subpartition_name,partition_description,table_rows from information_schema.partitions where table_schema='test' and table_name='t1'; --echo ## pruning check @@ -47,7 +37,4 @@ select * from t1 for system_time all where row_end = @ts; --replace_column 5 # 10 # 11 # explain partitions select * from t1 for system_time all where row_end = @ts; -connection master; drop table t1; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/versioning/t/select.test b/mysql-test/suite/versioning/t/select.test index ac8406320e8..bb154f0b248 100644 --- a/mysql-test/suite/versioning/t/select.test +++ b/mysql-test/suite/versioning/t/select.test @@ -41,7 +41,7 @@ if ($MTR_COMBINATION_TRX_ID) { --disable_query_log set @x1= @t1; - select vtq_commit_ts(@x1) into @t1; + select trt_commit_ts(@x1) into @t1; --enable_query_log } @@ -221,6 +221,8 @@ select x from t1 for system_time as of timestamp @ts; set @ts= timestamp'1-1-1 0:0:0'; +select x from t1 for system_time as of timestamp @ts; + --echo ## TRANSACTION specifier select x from t1 for system_time as of transaction @trx_start; @@ -326,6 +328,26 @@ select f1 from t1 join t2 left join t3 left join t4 on f3 = f4 on f3 = f2; drop view v1; drop table t1, t2, t3, t4; -call verify_vtq_dummy(34); +--echo # +--echo # MDEV-15980 FOR SYSTEM_TIME BETWEEN and FROM .. TO work with negative intervals +--echo # +--replace_result $sys_datatype_expl SYS_DATATYPE +eval create or replace table t1 ( + a int, + row_start $sys_datatype_expl as row start invisible, + row_end $sys_datatype_expl as row end invisible, + period for system_time (row_start, row_end) +) with system versioning; +insert into t1 values (1); +delete from t1; +select row_start from t1 for system_time all into @t1; +select row_end from t1 for system_time all into @t2; +select * from t1 for system_time between @t1 and @t2; +select * from t1 for system_time between @t2 and @t1; +select * from t1 for system_time from @t1 to @t2; +select * from t1 for system_time from @t2 to @t1; +drop table t1; + +call verify_trt_dummy(34); -- source suite/versioning/common_finish.inc diff --git a/mysql-test/suite/versioning/t/select2.test b/mysql-test/suite/versioning/t/select2.test index bedda892c82..d1b73fa799b 100644 --- a/mysql-test/suite/versioning/t/select2.test +++ b/mysql-test/suite/versioning/t/select2.test @@ -31,7 +31,7 @@ insert into t1(x, y) values(3, 33); select sys_start from t1 where x = 3 and y = 33 into @t1; if($MTR_COMBINATION_TRX_ID) { set @x1= @t1; - select vtq_commit_ts(@x1) into @t1; + select trt_commit_ts(@x1) into @t1; } select x, y from t1; @@ -202,6 +202,14 @@ select * from (select * from t1 for system_time all, t2 for system_time all) for --error ER_PARSE_ERROR select * from (t1 for system_time all join t2 for system_time all) for system_time all; +--echo # MDEV-16043 Assertion thd->Item_change_list::is_empty() failed in mysql_parse upon SELECT from a view reading from a versioned table +create or replace table t1 (a int) with system versioning; +create or replace view v1 as select * from t1; +prepare stmt from "select * from t1 where exp( '20010609211642053929' )"; +--error ER_DATA_OUT_OF_RANGE +execute stmt; +select a from v1; + drop view v1; drop table t1, t2; diff --git a/mysql-test/suite/versioning/t/sysvars.test b/mysql-test/suite/versioning/t/sysvars.test index 08b520e959e..160af12fe02 100644 --- a/mysql-test/suite/versioning/t/sysvars.test +++ b/mysql-test/suite/versioning/t/sysvars.test @@ -84,4 +84,6 @@ select * from t for system_time all; select * from t for system_time from '0-0-0' to current_timestamp(6); select * from t for system_time between '0-0-0' and current_timestamp(6); +show status like "Feature_system_versioning"; + drop table t; diff --git a/mysql-test/suite/versioning/t/truncate.test b/mysql-test/suite/versioning/t/truncate.test index f52d52d81ea..4e039a76567 100644 --- a/mysql-test/suite/versioning/t/truncate.test +++ b/mysql-test/suite/versioning/t/truncate.test @@ -1,3 +1,4 @@ +--source suite/versioning/common.inc --source include/have_partition.inc --source suite/versioning/engines.inc @@ -6,7 +7,13 @@ create table t (a int); delete history from t before system_time now(); # TRUNCATE is not DELETE and trigger must not be called. -create or replace table t (a int) with system versioning; +--replace_result $sys_datatype_expl SYS_TYPE +eval create or replace table t ( + a int, + row_start $sys_datatype_expl as row start invisible, + row_end $sys_datatype_expl as row end invisible, + period for system_time (row_start, row_end)) +with system versioning; insert into t values (1); update t set a=2; set @test = 'correct'; @@ -16,7 +23,13 @@ delete history from t; select @test from t; drop table t; -create table t (a int) with system versioning; +--replace_result $sys_datatype_expl SYS_TYPE +eval create or replace table t ( + a int, + row_start $sys_datatype_expl as row start invisible, + row_end $sys_datatype_expl as row end invisible, + period for system_time (row_start, row_end)) +with system versioning; insert into t values (1), (2); update t set a=11 where a=1; --real_sleep 0.01 @@ -45,7 +58,6 @@ drop procedure truncate_sp; --echo # Truncate partitioned create or replace table t (a int) with system versioning -engine myisam partition by system_time limit 1 ( partition p0 history, partition p1 history, @@ -57,7 +69,13 @@ delete history from t; select * from t for system_time all; --echo # VIEW -create or replace table t (i int) with system versioning; +--replace_result $sys_datatype_expl SYS_TYPE +eval create or replace table t ( + i int, + row_start $sys_datatype_expl as row start invisible, + row_end $sys_datatype_expl as row end invisible, + period for system_time (row_start, row_end)) +with system versioning; delete history from t; create or replace view v as select * from t; --error ER_IT_IS_A_VIEW @@ -88,3 +106,6 @@ delete history from v before system_time now(6); unlock tables; drop view v; drop table t; + +drop database test; +create database test; diff --git a/mysql-test/suite/versioning/t/trx_id.opt b/mysql-test/suite/versioning/t/trx_id.opt new file mode 100644 index 00000000000..412290a7585 --- /dev/null +++ b/mysql-test/suite/versioning/t/trx_id.opt @@ -0,0 +1 @@ +--plugin-load-add=test_versioning diff --git a/mysql-test/suite/versioning/t/trx_id.test b/mysql-test/suite/versioning/t/trx_id.test index 45d453dc7d0..aab28d1057c 100644 --- a/mysql-test/suite/versioning/t/trx_id.test +++ b/mysql-test/suite/versioning/t/trx_id.test @@ -141,5 +141,273 @@ update t1 set x= 4; commit; select x, row_start < row_end from t1 for system_time all; +--echo # MDEV-16010 Too many rows with AS OF point_in_the_past_or_NULL +create or replace table t1 ( + x int, + row_start bigint unsigned as row start invisible, + row_end bigint unsigned as row end invisible, + period for system_time (row_start, row_end) +) with system versioning engine innodb; +insert into t1 (x) values (1); +delete from t1; +select * from t1 for system_time as of timestamp'1990-1-1 00:00'; +select * from t1 for system_time as of NULL; + +--echo # MDEV-16024 transaction_registry.begin_timestamp is wrong for explicit transactions +create or replace table t1 ( + x int(11) default null, + row_start bigint(20) unsigned generated always as row start invisible, + row_end bigint(20) unsigned generated always as row end invisible, + period for system_time (row_start, row_end) +) engine=innodb with system versioning; +begin; +set @ts1= now(6); +--sleep 0.01 +insert into t1 values (1); +commit; + +select row_start from t1 into @trx_id; +select trt_begin_ts(@trx_id) <= @ts1 as BEGIN_TS_GOOD; + drop database test; create database test; +use test; + + +--echo # +--echo # MDEV-16100 FOR SYSTEM_TIME erroneously resolves string user variables as transaction IDs +--echo # + +CREATE TABLE t1 ( + x INT, + sys_trx_start BIGINT UNSIGNED AS ROW START, + sys_trx_end BIGINT UNSIGNED AS ROW END, + PERIOD FOR SYSTEM_TIME (sys_trx_start, sys_trx_end) +) WITH SYSTEM VERSIONING ENGINE=INNODB; +INSERT INTO t1 (x) VALUES (1); +SET @ts= DATE_ADD(NOW(), INTERVAL 1 YEAR); +EXPLAIN EXTENDED SELECT x FROM t1 FOR SYSTEM_TIME AS OF TRANSACTION @ts; +EXPLAIN EXTENDED SELECT x FROM t1 FOR SYSTEM_TIME AS OF TIMESTAMP @ts; +EXPLAIN EXTENDED SELECT x FROM t1 FOR SYSTEM_TIME AS OF @ts; +DROP TABLE t1; + + +--echo # +--echo # Testing AS OF with expressions of various kinds and data types +--echo # + +CREATE TABLE t1 +( + x INT, + sys_trx_start BIGINT UNSIGNED AS ROW START INVISIBLE, + sys_trx_end BIGINT UNSIGNED AS ROW END INVISIBLE, + PERIOD FOR SYSTEM_TIME (sys_trx_start, sys_trx_end) +) WITH SYSTEM VERSIONING; +INSERT INTO t1 VALUES (1); + +CREATE TABLE t2 +( + x INT, + sys_trx_start TIMESTAMP(6) AS ROW START INVISIBLE, + sys_trx_end TIMESTAMP(6) AS ROW END INVISIBLE, + PERIOD FOR SYSTEM_TIME (sys_trx_start, sys_trx_end) +) WITH SYSTEM VERSIONING; +INSERT INTO t2 VALUES (1); + +--echo # +--echo # ROW is not supported +--echo # + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT * FROM t1 FOR SYSTEM_TIME AS OF (1,1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT * FROM t2 FOR SYSTEM_TIME AS OF (1,1); + + +--echo # +--echo # DOUBLE is not supported, use explicit CAST +--echo # + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT * FROM t1 FOR SYSTEM_TIME AS OF RAND(); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT * FROM t1 FOR SYSTEM_TIME AS OF (RAND()); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT * FROM t1 FOR SYSTEM_TIME AS OF COALESCE(RAND()); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT * FROM t2 FOR SYSTEM_TIME AS OF RAND(); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT * FROM t2 FOR SYSTEM_TIME AS OF (RAND()); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT * FROM t2 FOR SYSTEM_TIME AS OF COALESCE(RAND()); + + +--echo # +--echo # DECIMAL is not supported, use explicit CAST +--echo # + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT * FROM t1 FOR SYSTEM_TIME AS OF 10.1; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT * FROM t1 FOR SYSTEM_TIME AS OF COALESCE(10.1); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT * FROM t2 FOR SYSTEM_TIME AS OF 10.1; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT * FROM t2 FOR SYSTEM_TIME AS OF COALESCE(10.1); + + +--echo # +--echo # YEAR is not supported, use explicit CAST +--echo # + +DELIMITER $$; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +BEGIN NOT ATOMIC + DECLARE var YEAR; + SELECT * FROM t1 FOR SYSTEM_TIME AS OF var; +END; +$$ +DELIMITER ;$$ + +DELIMITER $$; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +BEGIN NOT ATOMIC + DECLARE var YEAR; + SELECT * FROM t2 FOR SYSTEM_TIME AS OF var; +END; +$$ +DELIMITER ;$$ + + +--echo # +--echo # ENUM is not supported, use explicit CAST +--echo # + +DELIMITER $$; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +BEGIN NOT ATOMIC + DECLARE var ENUM('xxx') DEFAULT 'xxx'; + SELECT * FROM t1 FOR SYSTEM_TIME AS OF var; +END; +$$ +DELIMITER ;$$ + + +DELIMITER $$; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +BEGIN NOT ATOMIC + DECLARE var ENUM('xxx') DEFAULT 'xxx'; + SELECT * FROM t2 FOR SYSTEM_TIME AS OF var; +END; +$$ +DELIMITER ;$$ + + +--echo # +--echo # SET is not supported, use explicit CAST +--echo # + +DELIMITER $$; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +BEGIN NOT ATOMIC + DECLARE var SET('xxx') DEFAULT 'xxx'; + SELECT * FROM t1 FOR SYSTEM_TIME AS OF var; +END; +$$ +DELIMITER ;$$ + +DELIMITER $$; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +BEGIN NOT ATOMIC + DECLARE var SET('xxx') DEFAULT 'xxx'; + SELECT * FROM t2 FOR SYSTEM_TIME AS OF var; +END; +$$ +DELIMITER ;$$ + + +--echo # +--echo # BIT is resolved to TRANSACTION +--echo # + +DELIMITER $$; +BEGIN NOT ATOMIC + DECLARE var BIT(10); + SELECT * FROM t1 FOR SYSTEM_TIME AS OF var; +END; +$$ +DELIMITER ;$$ + +DELIMITER $$; +--error ER_VERS_ENGINE_UNSUPPORTED +BEGIN NOT ATOMIC + DECLARE var BIT(10); + SELECT * FROM t2 FOR SYSTEM_TIME AS OF var; +END; +$$ +DELIMITER ;$$ + + +--echo # +--echo # String literals resolve to TIMESTAMP +--echo # + +SELECT * FROM t1 FOR SYSTEM_TIME AS OF '2038-12-30 00:00:00'; +SELECT * FROM t2 FOR SYSTEM_TIME AS OF '2038-12-30 00:00:00'; + + +--echo # +--echo # HEX hybrids resolve to TRANSACTION +--echo # + +SELECT * FROM t1 FOR SYSTEM_TIME AS OF (0x60); +--error ER_VERS_ENGINE_UNSUPPORTED +SELECT * FROM t2 FOR SYSTEM_TIME AS OF (0x60); + + +--echo # +--echo # BIT literals resolve to TRANSACTION +--echo # + +SELECT * FROM t1 FOR SYSTEM_TIME AS OF (b'1100000'); +--error ER_VERS_ENGINE_UNSUPPORTED +SELECT * FROM t2 FOR SYSTEM_TIME AS OF (b'1100000'); + +DROP TABLE t1, t2; + + +--echo # +--echo # MDEV-16094 Crash when using AS OF with a stored function +--echo # + +CREATE FUNCTION fts() RETURNS DATETIME RETURN '2001-01-01 10:20:30'; +CREATE FUNCTION ftx() RETURNS BIGINT UNSIGNED RETURN 1; + +CREATE TABLE ttx +( + x INT, + start_timestamp BIGINT UNSIGNED GENERATED ALWAYS AS ROW START, + end_timestamp BIGINT UNSIGNED GENERATED ALWAYS AS ROW END, + PERIOD FOR SYSTEM_TIME(start_timestamp, end_timestamp) +) ENGINE=InnoDB WITH SYSTEM VERSIONING; + +CREATE TABLE tts +( + x INT, + start_timestamp TIMESTAMP(6) GENERATED ALWAYS AS ROW START, + end_timestamp TIMESTAMP(6) GENERATED ALWAYS AS ROW END, + PERIOD FOR SYSTEM_TIME(start_timestamp, end_timestamp) +) ENGINE=InnoDB WITH SYSTEM VERSIONING; + +SELECT * FROM tts FOR SYSTEM_TIME AS OF fts(); +--error ER_VERS_ENGINE_UNSUPPORTED +SELECT * FROM tts FOR SYSTEM_TIME AS OF ftx(); +SELECT * FROM ttx FOR SYSTEM_TIME AS OF fts(); +SELECT * FROM ttx FOR SYSTEM_TIME AS OF ftx(); + +DROP TABLE tts; +DROP TABLE ttx; +DROP FUNCTION fts; +DROP FUNCTION ftx; diff --git a/mysql-test/unstable-tests b/mysql-test/unstable-tests index 288df9233a5..dea3f3c2764 100644 --- a/mysql-test/unstable-tests +++ b/mysql-test/unstable-tests @@ -23,166 +23,162 @@ # ############################################################################## +# Based on bb-10.3-release 037df4189f1adf53e87d6b7134408a547e83bbae -main.alter_table : Modified in 10.2.8 -main.alter_table_online : Modified in 10.2.8 -main.analyze_format_json : MDEV-11866 - Mismatch; modified in 10.2.8 +main.alter_table : Modified in 10.3.7 +main.alter_table_errors : Added in 10.3.7 main.analyze_stmt_slow_query_log : MDEV-12237 - Wrong result -main.analyze_stmt_orderby : MDEV-11866 - Mismatch -main.binary_to_hex : Added in 10.2.8 -main.bootstrap : Modified in 10.2.7 -main.check_constraint : Modified in 10.2.8 -main.client_xml : Modified in 10.2.7 -main.count_distinct : Modified in 10.2.8 -main.create_drop_event : Modified in 10.2.8 -main.cte_nonrecursive : Modified in 10.2.7 -main.ctype_ucs : Modified in 10.2.7 -main.derived_cond_pushdown : Modified in 10.2.7 -main.derived_view : Modified in 10.2.7 -main.drop-no_root : MDEV-12633 - Valgrind -main.errors : Modified in 10.2.8 +main.ansi : Modified in 10.3.7 +main.assign_key_cache : Added in 10.2.15 +main.assign_key_cache_debug : Added in 10.2.15 +main.auth_named_pipe : MDEV-14724 - System error 2 +main.check_constraint : Modified in 10.3.7 +main.connect : MDEV-16270 - Wrong result; modified in 10.2.15 +main.connect_debug : Added in 10.2.15 +main.connect2 : MDEV-13885 - Server crash +main.create_drop_event : MDEV-16271 - Wrong result +main.create_or_replace : Modified in 10.3.7 +main.cte_recursive : Modified in 10.3.7 +main.ctype_ucs : Modified in 10.2.15 +main.ctype_utf8mb4 : Modified in 10.2.15 +main.custom_aggregate_functions : Modified in 10.3.7 +main.derived_cond_pushdown : Modified in 10.3.7 +main.distinct : MDEV-14194 - Crash; modified in 10.3.7 +main.drop_bad_db_type : MDEV-15676 - Wrong result main.events_2 : MDEV-13277 - Crash -main.func_concat : Modified in 10.2.7 -main.func_crypt : Modified in 10.2.7 -main.func_group : Modified in 10.2.7 -main.func_json : MDEV-11648 - Crash, valgrind; modified in 10.2.8 -main.func_regexp_pcre : MDEV-13412 - Crash; modified in 10.2.8 -main.gis : Modified in 10.2.8 -main.gis-json : Modified in 10.2.8 -main.group_by : Modified in 10.2.8 -main.index_merge_innodb : MDEV-7142 - Plan mismatch; include file modified in 10.2.8 -main.index_merge_myisam : Include file modified in 10.2.8 -main.innodb_ext_key : Modified in 10.2.7 +main.events_slowlog : MDEV-12821 - Wrong result +main.explain_slowquerylog : Modified in 10.3.7 +main.func_str : Modified in 10.2.15 +main.grant_not_windows : Added in 10.3.7 +main.index_merge_innodb : MDEV-7142 - Plan mismatch main.innodb_mysql_lock : MDEV-7861 - Wrong result -main.join_outer : Modified in 10.2.7 +main.insert_select : Modified in 10.3.7 +main.invisible_field_grant_completely : Added in 10.3.7 +main.invisible_field_grant_system : Added in 10.3.7 main.kill-2 : MDEV-13257 - Wrong result -main.loadxml : Data file modified in 10.2.8 +main.kill_processlist-6619 : MDEV-10793 - Wrong result +main.lock : Modified in 10.2.15 main.log_slow : MDEV-13263 - Wrong result -main.mdl : Added in 10.2.7 -main.mdl_sync : Modified in 10.2.7 -main.myisam_debug : Modified in 10.2.7 -main.mysql : Modified in 10.2.7 -main.mysql_client_test : MDEV-12633 - Valgrind -main.mysql_client_test_comp : MDEV-12633 - Valgrind -main.mysql_client_test_nonblock : MDEV-12633 - Valgrind, CONC-208 - Error on Power -main.mysql_upgrade : Modified in 10.2.8 +main.mdev375 : Modified in 10.2.15 +main.mdev-504 : MDEV-15171 - warning +main.multi_update : Modified in 10.3.7 +main.myisam : Modified in 10.2.15 +main.myisam_recover : Modified in 10.2.15 +main.mysql : Modified in 10.3.7 +main.mysql_client_test_nonblock : CONC-208 - Error on Power; MDEV-15096 - exec failed +main.mysql_cp932 : Modified in 10.3.7 +main.mysql_upgrade_noengine : MDEV-14355 - Wrong result main.mysql_upgrade_ssl : MDEV-13492 - Unknown SSL error -main.mysqlcheck : MDEV-12633 - Valgrind -main.mysqld--help : Modified in 10.2.8 +main.mysqldump : MDEV-14800 - Stack smashing detected main.mysqld_option_err : MDEV-12747 - Timeout main.mysqlhotcopy_myisam : MDEV-10995 - Hang on debug -main.mysqltest : Modified in 10.2.7 +main.mysqltest : MDEV-13887 - Wrong result main.openssl_1 : MDEV-13492 - Unknown SSL error -main.order_by : Modified in 10.2.7 -main.partition_alter : Modified in 10.2.7 -main.read_only : Modified in 10.2.8 +main.order_by_optimizer_innodb : MDEV-10683 - Wrong result +main.parser : Modified in 10.3.7 +main.partition_debug_sync : MDEV-15669 - Deadlock found when trying to get lock +main.partition_innodb : Modified in 10.3.7 +main.partition_list : Modified in 10.2.15 +main.ps : MDEV-11017 - Wrong result; modified in 10.2.15 +main.query_cache : MDEV-16180 - Wrong result +main.query_cache_debug : MDEV-15281 - Query cache is disabled +main.range_vs_index_merge_innodb : MDEV-15283 - Server has gone away +main.read_only_innodb : Modified in 10.2.15 +main.select : MDEV-15430 - Wrong result with clang-4 +main.select_jcl6 : MDEV-15430 - Wrong result with clang-4 +main.select_pkeycache : MDEV-15430 - Wrong result with clang-4 +main.set_statement : MDEV-13183 - Wrong result main.shm : MDEV-12727 - Mismatch, ERROR 2013 -main.show_check : MDEV-12633 - Valgrind -main.sp : MDEV-7866 - Mismatch; modified in 10.2.8 -main.sp-destruct : Modified in 10.2.8 -main.sp-security : Modified in 10.2.8 -main.ssl_7937 : MDEV-11546 - Timeout on Windows +main.show_explain : MDEV-10674 - Wrong result code +main.sp : MDEV-7866 - Mismatch; modified in 10.3.7 +main.sp-code : Modified in 10.3.7 +main.sp-destruct : Modified in 10.2.15 +main.sp-innodb : Modified in 10.2.15 main.ssl_ca : MDEV-10895 - SSL connection error on Power +main.ssl_cert_verify : MDEV-13735 - Server crash main.ssl_connect : MDEV-13492 - Unknown SSL error main.ssl_timeout : MDEV-11244 - Crash main.stat_tables_par : MDEV-13266 - Wrong result -main.statistics : Modified in 10.2.8 +main.statistics : Modified in 10.2.15 main.status : MDEV-13255 - Wrong result -main.subselect : Modified in 10.2.8 -main.subselect_innodb : Modified in 10.2.7 -main.subselect_mat_cost_bugs : Modified in 10.2.7 -main.subselect_nulls : Modified in 10.2.8 -main.subselect_sj : Modified in 10.2.7 -main.subselect_sj_mat : Modified in 10.2.7 -main.subselect_sj2_mat : Modified in 10.2.7 -main.trigger : Modified in 10.2.7 -main.type_json : Modified in 10.2.8 -main.type_num : Modified in 10.2.8 -main.union : Modified in 10.2.8 -main.view : Modified in 10.2.8 -main.win : Modified in 10.2.8 -main.win_insert_select : Added in 10.2.7 +main.subselect4 : Modified in 10.2.15 +main.subselect-crash_15755 : Added in 10.3.7 +main.subselect_innodb : MDEV-10614 - Wrong result +main.subselect_sj : Modified in 10.2.15 +main.symlink-myisam-11902 : MDEV-15098 - Error 40 from storage engine +main.tc_heuristic_recover : MDEV-14189 - Wrong result +main.type_blob : MDEV-15195 - Wrong result +main.type_datetime_hires : MDEV-15430 - Wrong result with clang-4 +main.type_float : MDEV-15430 - Wrong result with clang-4 +main.type_time_hires : MDEV-15430 - Wrong result with clang-4 +main.type_timestamp_hires : MDEV-15430 - Wrong result with clang-4 +main.userstat : MDEV-12904 - SSL errors +main.variables : Modified in 10.2.15 +main.view : Modified in 10.2.15 +main.win : Modified in 10.2.15 #---------------------------------------------------------------- +archive.archive_bitfield : MDEV-11771 - table is marked as crashed archive.mysqlhotcopy_archive : MDEV-10995 - Hang on debug #---------------------------------------------------------------- binlog.binlog_commit_wait : MDEV-10150 - Mismatch -binlog.binlog_innodb : Modified in 10.2.8 -binlog.binlog_parallel_replication_marks_row : Added in 10.2.7 -binlog.binlog_parallel_replication_marks_stm_mix : Added in 10.2.7 -binlog.binlog_unsafe : Modified in 10.2.8 -binlog.flashback : Modified in 10.2.7 -binlog.mysqladmin : Added in 10.2.7 +binlog.binlog_flush_binlogs_delete_domain : MDEV-14431 - Wrong exit code +binlog.binlog_xa_recover : MDEV-8517 - Extra checkpoint #---------------------------------------------------------------- binlog_encryption.binlog_xa_recover : MDEV-12908 - Extra checkpoint +binlog_encryption.encrypted_master : MDEV-14201 - Extra warnings +binlog_encryption.encrypted_master_switch_to_unencrypted : MDEV-14190 - Can't init tc log +binlog_encryption.encryption_combo : MDEV-14199 - Table is marked as crashed binlog_encryption.rpl_binlog_errors : MDEV-12742 - Crash binlog_encryption.rpl_parallel : MDEV-10653 - Timeout in include +binlog_encryption.rpl_relayrotate : MDEV-15194 - Timeout binlog_encryption.rpl_semi_sync : MDEV-11673 - Valgrind binlog_encryption.rpl_skip_replication : MDEV-13571 - Unexpected warning +binlog_encryption.rpl_ssl : MDEV-14507 - Timeouts binlog_encryption.rpl_stm_relay_ign_space : MDEV-13278 - Wrong result (test assertion) +binlog_encryption.rpl_sync : MDEV-13830 - Assertion failure +binlog_encryption.rpl_typeconv : Include file modified in 10.2.15 #---------------------------------------------------------------- -connect.alter_xml : Modified in 10.2.8 -connect.alter_xml2 : Added in 10.2.8 -connect.dir : Modified in 10.2.8 -connect.infoschema-9739 : Modified in 10.2.8 -connect.infoschema2-9739 : Added in 10.2.8 -connect.jdbc_new : Modified in 10.2.8 -connect.json : Enabled in 10.2.8 -connect.json_java_2 : Added in 10.2.8 -connect.json_java_3 : Added in 10.2.8 -connect.json_mongo_c : Added in 10.2.8 -connect.json_udf : Enabled in 10.2.8 -connect.json_udf_bin : Enabled in 10.2.8 -connect.mongo_c : Added in 10.2.8 -connect.mongo_java_2 : Added in 10.2.8 -connect.mongo_java_3 : Added in 10.2.8 -connect.mul_new : Added in 10.2.8 -connect.mysql_exec : Modified in 10.2.8 -connect.tbl : MDEV-10179 - Mismatch, MDEV-9844 - Valgrind, crash; modified in 10.2.8 -connect.tbl_thread : MDEV-10179 - Mismatch, MDEV-9844 - Valgrind, crash; added in 10.2.8 +compat/oracle.column_compression : Added in 10.3.7 +compat/oracle.func_concat : Modified in 10.3.7 +compat/oracle.gis : Added in 10.3.7 +compat/oracle.parser : Modified in 10.3.7 +compat/oracle.table_value_constr : Added in 10.3.7 +compat/oracle.versioning : Added in 10.3.7 +compat/oracle.win : Added in 10.3.7 + +#---------------------------------------------------------------- + +connect.pivot : MDEV-14803 - Failed to discover table connect.vcol : MDEV-12374 - Fails on Windows -connect.xml : Modified in 10.2.8 -connect.xml2 : Added in 10.2.8 -connect.xml2_grant : Added in 10.2.8 -connect.xml2_html : Added in 10.2.8 -connect.xml2_mdev5261 : Added in 10.2.8 -connect.xml2_mult : Added in 10.2.8 -connect.xml2_zip : Added in 10.2.8 -connect.xml_grant : Modified in 10.2.8 -connect.xml_html : Modified in 10.2.8 -connect.xml_mdev5261 : Modified in 10.2.8 -connect.xml_mult : Modified in 10.2.8 -connect.xml_zip : Modified in 10.2.8 -connect.zip : Modified in 10.2.8 #---------------------------------------------------------------- -csv.read_only : Added in 10.2.7 - -#---------------------------------------------------------------- - -encryption.create_or_replace : MDEV-9359, MDEV-13516 - Assertion failure, MDEV-12694 - Timeout -encryption.innochecksum : Modified in 10.2.8 -encryption.innodb-checksum-algorithm : Added in 10.2.7 -encryption.innodb-compressed-blob : Modified in 10.2.7 -encryption.innodb-discard-import-change : MDEV-12632 - Valgrind +encryption.create_or_replace : MDEV-12694 - Timeout; MDEV-16115 - Trying to access tablespace +encryption.debug_key_management : MDEV-13841 - Timeout +encryption.encrypt_and_grep : MDEV-13765 - Wrong result +encryption.innochecksum : MDEV-13644 - Assertion failure +encryption.innodb-compressed-blob : MDEV-14728 - Unable to get certificate +encryption.innodb_encrypt_log : MDEV-13725 - Wrong result +encryption.innodb_encryption : MDEV-15675 - Timeout encryption.innodb-encryption-alter : MDEV-13566 - Lock wait timeout -encryption.innodb_encryption_discard_import : MDEV-12903 - Wrong result -encryption.innodb_encryption_filekeys : MDEV-9962 - Timeout -encryption.innodb_encrypt_log : MDEV-13253 - Wrong result; modified in 10.2.8 -encryption.innodb_encrypt_log_corruption : MDEV-13253 - Wrong result -encryption.innodb_encryption-page-compression : Re-enabled in 10.2.7 -encryption.innodb_encryption_tables : MDEV-9359 - Assertion failure -encryption.innodb-first-page-read : Added in 10.2.7 -encryption.innodb-key-rotation-disable : Modified in 10.2.7 -encryption.second_plugin-12863 : Added in 10.2.8 +encryption.innodb_encryption_discard_import : MDEV-16116 - Wrong result; modified in 10.2.15 +encryption.innodb_encryption_filekeys : MDEV-15673 - Timeout; modified in 10.2.15 +encryption.innodb_encryption-page-compression : MDEV-12630 - crash or assertion failure +encryption.innodb_encryption_row_compressed : MDEV-16113 - Crash +encryption.innodb-first-page-read : MDEV-14356 - Timeout in wait condition +encryption.innodb_lotoftables : Modified in 10.2.15 +encryption.innodb-redo-badkey : MDEV-13893 - Page cannot be decrypted; include file modified in 10.2.15 +encryption.innodb-redo-nokeys : Include file modified in 10.2.15 +encryption.innodb-remove-encryption : Added in 10.2.15 +encryption.innodb-spatial-index : MDEV-13746 - Wrong result #---------------------------------------------------------------- @@ -190,166 +186,179 @@ engines/rr_trx.* : MDEV-10998 - Not maintained #---------------------------------------------------------------- -federated.assisted_discovery : Modified in 10.2.7 +federated.assisted_discovery : Modified in 10.3.7 +federated.federated_bug_585688 : MDEV-14805 - Server crash, MDEV-12907 - Valgrind federated.federated_innodb : MDEV-10617 - Wrong checksum federated.federated_transactions : MDEV-10617 - Wrong checksum federated.federatedx : MDEV-10617 - Wrong checksum -federated.net_thd_crash-12725 : Added in 10.2.8 +federated.federatedx_versioning : Added in 10.3.7 +federated.timestamps : Added in 10.3.7 #---------------------------------------------------------------- -funcs_1.is_character_sets : Modified in 10.2.8 -funcs_1.is_coll_char_set_appl : Modified in 10.2.8 -funcs_1.is_collations : Modified in 10.2.8 -funcs_1.is_engines : Modified in 10.2.8 -funcs_1.is_events : Modified in 10.2.8 -funcs_1.is_key_column_usage : Include file modified in 10.2.8 -funcs_1.is_key_column_usage_embedded : Modified in 10.2.8 -funcs_1.is_routines_embedded : Modified in 10.2.8 -funcs_1.is_schemata_embedded : Modified in 10.2.8 -funcs_1.is_table_constraints : Modified in 10.2.8 -funcs_1.is_tables_embedded : Modified in 10.2.8 -funcs_1.is_triggers_embedded : Modified in 10.2.8 -funcs_1.is_views_embedded : Modified in 10.2.8 +funcs_1.processlist_val_no_prot : MDEV-11223 - Wrong result funcs_2/charset.* : MDEV-10999 - Not maintained #---------------------------------------------------------------- -galera.galera_defaults : Modified in 10.2.7 -galera.MW-309 : Added in 10.2.7 -galera.MW-369 : Added in 10.2.7 +galera.* : Suite is not stable yet +galera_3nodes.* : Suite is not stable yet #---------------------------------------------------------------- -gcol.gcol_bugfixes : Modified in 10.2.8 -gcol.gcol_keys_innodb : Modified in 10.2.8 -gcol.gcol_keys_myisam : Include file modified in 10.2.8 -gcol.gcol_rollback : Modified in 10.2.8 -gcol.innodb_virtual_basic : Modified in 10.2.8 -gcol.innodb_virtual_debug_purge : MDEV-13568 - Wrong result; modified in 10.2.8 -gcol.innodb_virtual_purge : Modified in 10.2.8 +gcol.innodb_virtual_debug : MDEV-14134 - Crash, assertion failure +gcol.innodb_virtual_fk : Modified in 10.2.15 +gcol.innodb_virtual_index : Modified in 10.3.7 #---------------------------------------------------------------- -innodb.101_compatibility : MDEV-13570 - Crash; perl file modified in 10.2.7 -innodb.alter_missing_tablespace : Modified in 10.2.7 -innodb.deadlock_detect : MDEV-13262 - Wrong error code -innodb.defrag_mdl-9155 : MDEV-11336 - Timeout -innodb.doublewrite : Modified in 10.2.7 -innodb.drop_table_background : Added in 10.2.7 -innodb.foreign_key : Modified in 10.2.8 -innodb.group_commit_binlog_pos : Modified in 10.2.7 -innodb.group_commit_binlog_pos_no_optimize_thread : Modified in 10.2.7 -innodb.ibuf_not_empty : MDEV-12741 - Tablespace error -innodb.index_merge_threshold : Modified in 10.2.8 -innodb.innodb-32k : Opt file modified in 10.2.8 -innodb.innodb-32k-crash : Opt file modified in 10.2.8 -innodb.innodb-64k : Modified in 10.2.8 -innodb.innodb-64k-crash : Opt file modified in 10.2.8 -innodb.innodb-alter-debug : Modified in 10.2.7 -innodb.innodb-alter-nullable : Modified in 10.2.7 -innodb.innodb-alter-table : Modified in 10.2.7 -innodb.innodb-alter-tempfile : Modified in 10.2.7 -innodb.innodb-alter-timestamp : Modified in 10.2.7 +handler.heap : Modified in 10.3.7 +handler.innodb : Modified in 10.3.7 +handler.interface : Modified in 10.3.7 + +#---------------------------------------------------------------- + +innodb.101_compatibility : MDEV-13891 - Wrong result +innodb.alter_copy : MDEV-16181 - Assertion failure +innodb.alter_foreign_crash : Added in 10.3.7 +innodb.alter_kill : MDEV-16273 - Unknown storage engine 'InnoDB'; added in 10.3.7 +innodb.alter_missing_tablespace : Modified in 10.2.15 +innodb.alter_partitioned : Added in 10.3.7 +innodb.alter_partitioned_debug : Added in 10.2.15 +innodb.alter_partitioned_xa : Added in 10.2.15 +innodb.alter_rename_files : Added in 10.3.7 +innodb.analyze_table : Added in 10.3.7 +innodb.autoinc_persist : MDEV-15282 - Assertion failure +innodb.doublewrite : MDEV-12905 - Server crash; include file modified in 10.2.15 +innodb.foreign_key : Modified in 10.2.15 +innodb.group_commit_crash : MDEV-14191 - InnoDB registration failed +innodb.group_commit_crash_no_optimize_thread : MDEV-13830 - Assertion failure +innodb.innodb-64k-crash : MDEV-13872 - Failure and crash on startup +innodb.innodb-alter : Modified in 10.2.15 +innodb.innodb-alter-nullable : Modified in 10.2.15 +innodb.innodb-alter-tempfile : MDEV-15285 - Table already exists +innodb.innodb_bug13510739 : Modified in 10.3.7 innodb.innodb_bug14147491 : MDEV-11808 - Index is corrupt -innodb.innodb_bug53290 : MDEV-12634 - Valgrind -innodb.innodb_defragment : MDEV-11336 - Mismatch -innodb.innodb_defragment_fill_factor : Modified in 10.2.8 -innodb.innodb_defragment_small : MDEV-11336 - Mismatch -innodb.innodb_defrag_binlog : MDEV-11336 - Mismatch -innodb.innodb_defrag_concurrent : MDEV-11336 - Assertion failure, mismatch -innodb.innodb_defrag_stats : MDEV-11336 - Mismatch -innodb.innodb-enlarge-blob : Added in 10.2.8 -innodb.innodb_force_recovery : Modified in 10.2.7 +innodb.innodb_bug27216817 : Added in 10.2.15 +innodb.innodb_bug30423 : MDEV-7311 - Wrong result +innodb.innodb_bug48024 : MDEV-14352 - Assertion failure +innodb.innodb_bug54044 : Modified in 10.3.7 +innodb.innodb_bug59641 : MDEV-13830 - Assertion failure +innodb.innodb_bulk_create_index_replication : MDEV-15273 - Slave failed to start +innodb.innodb_defrag_stats_many_tables : MDEV-14198 - Table is full innodb.innodb-get-fk : MDEV-13276 - Server crash +innodb.innodb-index : Modified in 10.2.15 +innodb.innodb-index-online : MDEV-14809 - Cannot save statistics innodb.innodb_information_schema : MDEV-8851 - Wrong result -innodb.innodb_max_recordsize_32k : Added in 10.2.8 -innodb.innodb_max_recordsize_64k : Added in 10.2.8 -innodb.innodb-page_compression_default : Modified in 10.2.7 -innodb.innodb-page_compression_snappy : Modified in 10.2.7 -innodb.innodb_stats_persistent : Added in 10.2.7 +innodb.innodb_information_schema_buffer : MDEV-16267 - Wrong result +innodb.innodb-isolation : Modified in 10.2.15 +innodb.innodb_max_recordsize_32k : MDEV-14801 - Operation failed +innodb.innodb_max_recordsize_64k : MDEV-15203 - Wrong result +innodb.innodb-online-alter-gis : Modified in 10.3.7 +innodb.innodb-page_compression_default : MDEV-13644 - Assertion failure +innodb.innodb-page_compression_lzma : MDEV-14353 - Wrong result +innodb.innodb_prefix_index_restart_server : Modified in 10.2.15 +innodb.innodb_stats_persistent_debug : MDEV-14801 - Operation failed +innodb.innodb-table-online : MDEV-13894 - Wrong result innodb.innodb_sys_semaphore_waits : MDEV-10331 - Semaphore wait -innodb.innodb-virtual-columns2 : Added in 10.2.6 -innodb.innodb-wl5522-debug : Modified in 10.2.6 -innodb.insert_debug : Modified in 10.2.6 +innodb.innodb-wl5522-debug : MDEV-14200 - Wrong errno +innodb.innodb_zip_innochecksum2 : MDEV-13882 - Extra warnings +innodb.innodb_zip_innochecksum3 : MDEV-14486 - Resource temporarily unavailable +innodb.log_alter_table : Include file modified in 10.2.15 innodb.log_corruption : MDEV-13251 - Wrong result -innodb.log_data_file_size : Modified in 10.2.7 -innodb.log_file : Modified in 10.2.7 -innodb.log_file_name : Modified in 10.2.7 -innodb.log_file_size : MDEV-13471 - Crash; modified in 10.2.7 -innodb.purge_thread_shutdown : Added in 10.2.8 -innodb.read_only_recovery : Modified in 10.2.8 -innodb.rename_table : Added in 10.2.8 -innodb.row_format_redundant : Added in 10.2.7 -innodb.table_flags : MDEV-13572 - Wrong result; added in 10.2.7 -innodb.temporary_table : MDEV-13265 - Wrong result; modified in 10.2.7 -innodb.truncate_debug : MDEV-13256 - Timeout -innodb.truncate_purge_debug : Modified in 10.2.7 +innodb.log_data_file_size : MDEV-14204 - Server failed to start; include file modified in 10.2.15 +innodb.log_file_name : MDEV-14193 - Exception; include file modified in 10.2.15 +innodb.log_file_name_debug : Include file modified in 10.2.15 +innodb.log_file_size : MDEV-15668 - Not found pattern +innodb.mdev-15707 : Added in 10.2.15 +innodb.monitor : MDEV-16179 - Wrong result +innodb.purge_secondary : MDEV-15681 - Wrong result +innodb.purge_thread_shutdown : MDEV-13792 - Wrong result +innodb.read_only_recovery : MDEV-13886 - Server crash +innodb.recovery_shutdown : MDEV-15671 - Checksum mismatch in datafile +innodb.row_format_redundant : MDEV-15192 - Trying to access missing tablespace +innodb.stored_fk : Added in 10.2.15 +innodb.table_definition_cache_debug : MDEV-14206 - Extra warning +innodb.table_flags : MDEV-13572 - Wrong result +innodb.temp_table_savepoint : MDEV-16182 - Wrong result +innodb.temporary_table : MDEV-13265 - Wrong result; modified in 10.3.7 +innodb.tmpdir : Modified in 10.3.7 +innodb.undo_log : Modified in 10.2.15 +innodb.update_time : MDEV-14804 - Wrong result +innodb.xa_recovery : MDEV-15279 - mysqld got exception +innodb_fts.basic : Added in 10.2.15 +innodb_fts.fulltext2 : MDEV-14727 - Long semaphore wait; modified in 10.3.7 innodb_fts.fulltext_misc : MDEV-12636 - Valgrind -innodb_fts.innodb_fts_plugin : Modified in 10.2.7 +innodb_fts.fulltext_table_evict : Added in 10.2.15 +innodb_fts.fulltext_var : Modified in 10.3.7 +innodb_fts.innodb_fts_misc : Modified in 10.3.7 +innodb_fts.innodb_fts_plugin : MDEV-13888 - Errors in server log innodb_fts.innodb_fts_stopword_charset : MDEV-13259 - Table crashed +innodb_fts.sync : MDEV-14808 - Wrong result -innodb_gis.1 : Modified in 10.2.8 -innodb_gis.gis : Modified in 10.2.8 +innodb_gis.rtree_compress2 : MDEV-16269 - Wrong result +innodb_gis.rtree_concurrent_srch : MDEV-15284 - Wrong result with embedded +innodb_gis.rtree_purge : MDEV-15275 - Timeout +innodb_gis.rtree_recovery : MDEV-15274 - Error on check +innodb_gis.rtree_split : MDEV-14208 - Too many arguments +innodb_gis.rtree_undo : MDEV-14456 - Timeout in include file +innodb_gis.types : MDEV-15679 - Table is marked as crashed; modified in 10.2.15 -innodb_zip.bug36169 : Modified in 10.2.7 -innodb_zip.bug36172 : Modified in 10.2.7 -innodb_zip.bug52745 : Modified in 10.2.7 -innodb_zip.bug53591 : Modified in 10.2.7 -innodb_zip.bug56680 : Modified in 10.2.7 -innodb_zip.cmp_drop_table : Modified in 10.2.7 -innodb_zip.create_options : Modified in 10.2.7 -innodb_zip.innochecksum : Modified in 10.2.7 -innodb_zip.innochecksum_2 : Modified in 10.2.7 -innodb_zip.innochecksum_3 : MDEV-13279 - Extra warnings; modified in 10.2.8 -innodb_zip.innodb-zip : Modified in 10.2.7 -innodb_zip.recover : Modified in 10.2.7 -innodb_zip.restart : Modified in 10.2.7 +innodb_zip.cmp_per_index : MDEV-14490 - Table is marked as crashed; modified in 10.2.15 +innodb_zip.innochecksum_3 : MDEV-13279 - Extra warnings +innodb_zip.wl6470_1 : MDEV-14240 - Assertion failure innodb_zip.wl6501_1 : MDEV-10891 - Can't create UNIX socket -innodb_zip.wl5522_debug_zip : MDEV-11600 - Operating system error number 2; modified in 10.2.7 -innodb_zip.wl5522_zip : Modified in 10.2.7 -innodb_zip.wl6344_compress_level : Modified in 10.2.7 -innodb_zip.wl6501_scale_1 : MDEV-13254 - Timeout +innodb_zip.wl5522_debug_zip : MDEV-11600 - Operating system error number 2 +innodb_zip.wl6501_scale_1 : MDEV-13254 - Timeout, MDEV-14104 - Error 192 #---------------------------------------------------------------- +maria.alter : Modified in 10.3.7 maria.insert_select : MDEV-12757 - Timeout +maria.lock : Modified in 10.3.7 +maria.maria : MDEV-14430 - Extra warning #---------------------------------------------------------------- -mariabackup.* : suite.pm and .opt modified in 10.2.7 - -mariabackup.full_backup : Modified in 10.2.7 -mariabackup.huge_lsn : Added in 10.2.8 -mariabackup.incremental_backup : Modified in 10.2.8 -mariabackup.incremental_encrypted : Modified in 10.2.7 -mariabackup.partial : Modified in 10.2.7 -mariabackup.partial_exclude : Modified in 10.2.7 -mariabackup.small_ibd : Modified in 10.2.7 -mariabackup.xb_aws_key_management : Modified in 10.2.7 -mariabackup.xb_compressed_encrypted : Modified in 10.2.7 -mariabackup.xb_file_key_management : Modified in 10.2.7 -mariabackup.xb_partition : Modified in 10.2.7 -mariabackup.xbstream : Modified in 10.2.7 +mariabackup.absolute_ibdata_paths : Added in 10.2.15 +mariabackup.apply-log-only : MDEV-14192 - Assertion failure +mariabackup.apply-log-only-incr : MDEV-14192 - Assertion failure +mariabackup.backup_ssl : Added in 10.2.15 +mariabackup.data_directory : MDEV-15270 - Error on exec +mariabackup.incremental_backup : MDEV-14192 - Assertion failure +mariabackup.incremental_encrypted : MDEV-14188 - Wrong result, MDEV-15667 - timeout +mariabackup.mdev-14447 : MDEV-15201 - Timeout +mariabackup.partial_exclude : MDEV-15270 - Error on exec +mariabackup.unsupported_redo : Modified in 10.2.15 +mariabackup.xbstream : MDEV-14192 - Crash +mariabackup.xb_aws_key_management : MDEV-15680 - Error: xtrabackup_copy_logfile() failed +mariabackup.xb_compressed_encrypted : MDEV-14812 - Segmentation fault +mariabackup.xb_history : MDEV-16268 - Error on exec +mariabackup.xb_page_compress : MDEV-14810 - status: 1, errno: 11 #---------------------------------------------------------------- +mroonga/storage.* : MDEV-16275 - Wrong result + mroonga/storage.index_multiple_column_unique_datetime_index_read : MDEV-8643 - Valgrind #---------------------------------------------------------------- -multi_source.info_logs : MDEV-12629 - Valgrind -multi_source.mdev-9544 : Added in 10.2.7 +multi_source.gtid : MDEV-14202 - Crash +multi_source.info_logs : MDEV-12629 - Valgrind, MDEV-10042 - wrong result multi_source.reset_slave : MDEV-10690 - Wrong result multi_source.simple : MDEV-4633 - Wrong result #---------------------------------------------------------------- -parts.longname : Added in 10.2.7 -parts.partition_debug_innodb : MDEV-10891 - Can't create UNIX socket -parts.quoting : Added in 10.2.7 +parts.partition_alter_innodb : Include file modified in 10.2.15 +parts.partition_alter_maria : Include file modified in 10.2.15 +parts.partition_alter_myisam : Include file modified in 10.2.15 +parts.partition_auto_increment_maria : MDEV-14430 - Extra warning +parts.partition_debug_innodb : MDEV-10891 - Can't create UNIX socket; MDEV-15095 - Table doesn't exist +parts.show_create : Added in 10.3.7 #---------------------------------------------------------------- @@ -357,16 +366,21 @@ percona.* : MDEV-10997 - Not maintained #---------------------------------------------------------------- -perfschema.bad_option_2 : Modified in 10.2.7 +perfschema.bad_option_1 : MDEV-13892 - Timeout perfschema.bad_option_3 : MDEV-12728 - Timeout on Power +perfschema.bad_option_5 : MDEV-14197 - Timeout +perfschema.dml_file_instances : MDEV-15179 - Wrong result perfschema.hostcache_ipv4_addrinfo_again_allow : MDEV-12759 - Crash +perfschema.hostcache_ipv4_max_con : Modified in 10.2.15 perfschema.hostcache_ipv6_addrinfo_again_allow : MDEV-12752 - Crash perfschema.hostcache_ipv6_addrinfo_bad_allow : MDEV-13260 - Crash +perfschema.hostcache_ipv6_max_con : Modified in 10.2.15 perfschema.hostcache_ipv6_ssl : MDEV-10696 - Crash -perfschema.privilege_table_io : Modified in 10.2.8 perfschema.setup_actors : MDEV-10679 - Crash +perfschema.socket_connect : MDEV-15677 - Wrong result +perfschema.socket_summary_by_event_name_func : MDEV-10622 - Wrong result perfschema.stage_mdl_procedure : MDEV-11545 - Missing row -perfschema.start_server_1_digest : Added in 10.2.7 +perfschema.threads_mysql : MDEV-10677 - Wrong result #---------------------------------------------------------------- @@ -375,44 +389,74 @@ perfschema_stress.* : MDEV-10996 - Not maintained #---------------------------------------------------------------- plugins.feedback_plugin_send : MDEV-7932, MDEV-11118 - Connection problems and such +plugins.server_audit : Modified in 10.2.15 +plugins.thread_pool_server_audit : MDEV-14295 - Wrong result #---------------------------------------------------------------- -rocksdb.* : MyRocks is alpha-quality and tests are unstable +rocksdb.* : MDEV-12474 and more, tests are unstable #---------------------------------------------------------------- -roles.current_role_view-12666 : Added in 10.2.7 -roles.show_create_database-10463 : Added in 10.2.7 - -#---------------------------------------------------------------- - -rpl.circular_serverid0 : Added in 10.2.7 rpl.rpl_binlog_errors : MDEV-12742 - Crash rpl.rpl_binlog_index : MDEV-9501 - Failed registering on master -rpl.rpl_domain_id_filter_io_crash : MDEV-12729 - Timeout in include file +rpl.rpl_colSize : MDEV-16112 - Server crash +rpl.rpl_ctype_latin1 : MDEV-14813 - Wrong result on Mac +rpl.rpl_domain_id_filter_io_crash : MDEV-12729 - Timeout in include file, MDEV-13677 - Server crash rpl.rpl_domain_id_filter_restart : MDEV-10684 - Wrong result -rpl.rpl_gtid_crash : MDEV-9501 - Failed registering on master +rpl.rpl_extra_col_master_myisam : MDEV-14203 - Extra warning +rpl.rpl_gtid_crash : MDEV-9501 - Failed registering on master, MDEV-13643 - Lost connection +rpl.rpl_gtid_delete_domain : MDEV-14463 - Timeout rpl.rpl_gtid_errorhandling : MDEV-13261 - Crash -rpl.rpl_gtid_stop_start : MDEV-11621 - Table marked as crashed, MDEV-12731 - Valgrind +rpl.rpl_gtid_reconnect : MDEV-14497 - Crash +rpl.rpl_insert_id : MDEV-15197 - Wrong result rpl.rpl_mariadb_slave_capability : MDEV-11018 - Extra lines in binlog -rpl.rpl_mdev-11092 : Added in 10.2.7 -rpl.rpl_parallel : MDEV-12730 - Assertion failure +rpl.rpl_mdev12179 : Modified in 10.3.7 +rpl.rpl_mdev382 : Modified in 10.3.7 +rpl.rpl_mdev6020 : MDEV-15272 - Server crash +rpl.rpl_mixed_mixing_engines : MDEV-14489 - Sync slave with master failed +rpl.rpl_non_direct_mixed_mixing_engines : MDEV-14489 - Sync slave with master failed +rpl.rpl_non_direct_row_mixing_engines : MDEV-14491 - Long semaphore wait +rpl.rpl_non_direct_stm_mixing_engines : MDEV-14489 - Failed sync_slave_with_master +rpl.rpl_parallel : MDEV-10653 - Timeouts +rpl.rpl_parallel_conflicts : MDEV-15272 - Server crash rpl.rpl_parallel_mdev6589 : MDEV-12979 - Assertion failure -rpl.rpl_parallel_optimistic : Modified in 10.2.7 -rpl.rpl_parallel_optimistic_nobinlog : MDEV-12746 - Timeouts, mismatch +rpl.rpl_parallel_optimistic : MDEV-15278 - Failed to sync with master +rpl.rpl_parallel_optimistic_nobinlog : MDEV-15278 - Failed to sync with master rpl.rpl_parallel_retry : MDEV-11119 - Crash -rpl.rpl_temporal_mysql56_to_mariadb53 : MDEV-9501 - Failed registering on master -rpl.rpl_reset_slave_fail : Added in 10.2.8 -rpl.rpl_semi_sync_uninstall_plugin : MDEV-10892 - Assertion failure +rpl.rpl_parallel_temptable : MDEV-10356 - Crash +rpl.rpl_row_basic_2myisam : MDEV-13875 - command "diff_files" failed +rpl.rpl_row_drop_create_temp_table : MDEV-14487 - Wrong result +rpl.rpl_row_img_eng_min : MDEV-13875 - diff_files failed +rpl.rpl_row_img_eng_noblob : MDEV-13875 - command "diff_files" failed +rpl.rpl_row_index_choice : MDEV-15196 - Slave crash +rpl.rpl_row_mixing_engines : MDEV-14491 - Long semaphore wait +rpl.rpl_row_until : MDEV-14052 - Master will not send events with checksum +rpl.rpl_semi_sync : MDEV-11220 - Wrong result +rpl.rpl_semi_sync_after_sync : MDEV-14366 - Wrong result +rpl.rpl_semi_sync_after_sync_row : MDEV-14366 - Wrong result +rpl.rpl_semi_sync_uninstall_plugin : MDEV-7140 - Assorted failures +rpl.rpl_semisync_ali_issues : MDEV-16272 - Wrong result rpl.rpl_set_statement_default_master : MDEV-13258 - Extra warning +rpl.rpl_show_slave_hosts : MDEV-10681 - Crash rpl.rpl_skip_replication : MDEV-13258 - Extra warning rpl.rpl_slave_grp_exec : MDEV-10514 - Deadlock +rpl.rpl_slave_load_tmpdir_not_exist : MDEV-14203 - Extra warning rpl.rpl_slow_query_log : MDEV-13250 - Test abort rpl.rpl_sp_effects : MDEV-13249 - Crash rpl.rpl_start_stop_slave : MDEV-13567 - Sync slave timeout +rpl.rpl_stm_reset_slave : MDEV-16274 - Connection attributes were truncated +rpl.rpl_mixed_implicit_commit_binlog : Include file modified in 10.2.15 +rpl.rpl_row_implicit_commit_binlog : Include file modified in 10.2.15 +rpl.rpl_stm_implicit_commit_binlog : Include file modified in 10.2.15 +rpl.rpl_stm_mixing_engines : MDEV-14489 - Sync slave with master failed rpl.rpl_stm_multi_query : MDEV-9501 - Failed registering on master -rpl.rpl_upgrade_master_info : MDEV-11620 - Table marked as crashed +rpl.rpl_stm_relay_ign_space : MDEV-14360 - Test assertion +rpl.rpl_stm_stop_middle_group : MDEV-13791 - Server crash +rpl.rpl_sync : MDEV-13830 - Assertion failure +rpl.rpl_temporal_mysql56_to_mariadb53 : MDEV-9501 - Failed registering on master +rpl.rpl_typeconv : Include file modified in 10.2.15 +rpl.sec_behind_master-5114 : MDEV-13878 - Wrong result rpl/extra/rpl_tests.* : MDEV-10994 - Not maintained @@ -420,6 +464,10 @@ rpl/extra/rpl_tests.* : MDEV-10994 - Not maintained spider.basic_sql : MDEV-11186 - Internal check fails +spider/bg.direct_aggregate : MDEV-7098 - Packets out of order +spider/bg.ha_part : MDEV-7914 - Crash (only fixed in 10.3) +spider/bg.spider3_fixes : MDEV-12639 - Syntax error + spider/handler.* : MDEV-10990 - Not maintained #---------------------------------------------------------------- @@ -433,90 +481,67 @@ storage_engine.* : Not always timely maintained #---------------------------------------------------------------- -sys_vars.back_log_basic : Modified in 10.2.8 -sys_vars.delay_key_write_func : Modified in 10.2.7 -sys_vars.innodb_sched_priority_cleaner_basic : Modified in 10.2.7 -sys_vars.rpl_init_slave_func : MDEV-10149 - Test assertion -sys_vars.tmp_disk_table_size_basic : Added in 10.2.7 -sys_vars.tmp_disk_table_size_func : Added in 10.2.7 -sys_vars.tmp_memory_table_size_basic : Added in 10.2.7 +sys_vars.innodb_buffer_pool_dump_at_shutdown_basic : MDEV-14280 - Unexpected error +sys_vars.innodb_stats_include_delete_marked_basic : Modified in 10.3.7 +sys_vars.max_prepared_stmt_count_basic : Modified in 10.2.15 +sys_vars.rpl_init_slave_func : MDEV-10149 - Test assertion +sys_vars.slow_query_log_func : MDEV-14273 - Wrong result +sys_vars.sysvars_innodb : Modified in 10.3.7 +sys_vars.sysvars_server_embedded : Opt file added in 10.2.15 +sys_vars.sysvars_server_notembedded : Opt file added in 10.2.15 +sys_vars.thread_cache_size_func : MDEV-11775 - Wrong result #---------------------------------------------------------------- -tokudb.background_job_manager : Modified in 10.2.7 -tokudb.bug-1657908 : Added in 10.2.7 tokudb.change_column_all_1000_10 : MDEV-12640 - Lost connection tokudb.change_column_bin : MDEV-12640 - Lost connection tokudb.change_column_char : MDEV-12822 - Lost connection -tokudb.dir_cmd : Added in 10.2.7 +tokudb.dir_per_db : MDEV-11537 - Wrong result +tokudb.hotindex-insert-0 : MDEV-15271 - Timeout +tokudb.hotindex-insert-2 : MDEV-15271 - Timeout tokudb.hotindex-insert-bigchar : MDEV-12640 - Crash +tokudb.hotindex-update-0 : MDEV-15198 - Timeout tokudb.hotindex-update-1 : MDEV-12640 - Crash -tokudb.kill_query_blocked_in_lt : Added in 10.2.8 -tokudb.locks-select-update-3 : Modified in 10.2.8 tokudb.rows-32m-rand-insert : MDEV-12640 - Crash tokudb.rows-32m-seq-insert : MDEV-12640 - Crash +tokudb.savepoint-5 : MDEV-15280 - Wrong result +tokudb.type_datetime : MDEV-15193 - Wrong result -tokudb_mariadb.mdev12972 : Modified in 10.2.8 -tokudb_mariadb.mdev6657 : MDEV-12737 - Mismatch or valgrind +tokudb_alter_table.hcad_all_add2 : MDEV-15269 - Timeout -tokudb_backup.* : MDEV-11001 - Missing include file; suite.pm modified in 10.2.7 +tokudb_bugs.db917 : Modified in 10.2.15 +tokudb_bugs.xa : MDEV-11804 - Lock wait timeout + +tokudb_backup.* : MDEV-11001 - Missing include file tokudb_sys_vars.* : MDEV-11001 - Missing include file tokudb_rpl.* : MDEV-11001 - Missing include file -tokudb_backup.backup_master_info : Added in 10.2.7 -tokudb_backup.backup_master_state : Added in 10.2.7 -tokudb_backup.empty_slave_info_file : Added in 10.2.7 -tokudb_backup.innodb_use_native_aio_enabled : Added in 10.2.7 -tokudb_backup.rpl_safe_slave : Include file modified in 10.2.8 -tokudb_backup.rpl_tokudb_commit_sync : Added in 10.2.7 - -tokudb_bugs.db233 : Modified in 10.2.7 -tokudb_bugs.leak172 : Modified in 10.2.7 - tokudb_parts.partition_alter4_tokudb : MDEV-12640 - Lost connection -rpl-tokudb.rpl_not_null_tokudb : Modified in 10.2.8 -rpl-tokudb.rpl_parallel_tokudb_delete_pk : Opt file modified in 10.2.8 -rpl-tokudb.rpl_parallel_tokudb_update_pk_uc0_lookup0 : Modified in 10.2.8 -rpl-tokudb.rpl_parallel_tokudb_write_pk : Modified in 10.2.8 -rpl-tokudb.rpl_rfr_disable_on_expl_pk_absence : Added in 10.2.8 -rpl-tokudb.rpl_row_basic_3tokudb : Modified in 10.2.8 -rpl-tokudb.rpl_tokudb_commit_after_flush : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_insert_id : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_insert_id_pk : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_multi_update : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_multi_update2 : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_multi_update3 : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_rfr_partition_table : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_row_crash_safe : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_row_img_blobs : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_row_img_eng_full : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_row_img_eng_min : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_row_img_eng_noblob : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_row_img_idx_full : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_row_img_idx_min : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_row_img_idx_noblob : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_row_log : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_row_lower_case_table_names : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_row_sp003 : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_row_sp006 : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_row_trig004 : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_stm_log : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_stm_mixed_crash_safe : Added in 10.2.8 -rpl-tokudb.rpl_tokudb_stm_mixed_lower_case_table_names : Added in 10.2.8 - #---------------------------------------------------------------- +unit.conc_basic-t : MDEV-15286 - not ok 7 - test_reconnect_maxpackage +unit.conc_misc : MDEV-14811 - not ok 12 - test_conc49 unit.conc_ps_bugs : MDEV-13252 - not ok 44 test_bug4236 +unit.lf : MDEV-12897 - Signal 11 thrown +unit.my_atomic : MDEV-15670 - Signal 11 thrown #---------------------------------------------------------------- -vcol.cross_db : Added in 10.2.7 -vcol.upgrade : Added in 10.2.7 +vcol.partition : Modified in 10.2.15 #---------------------------------------------------------------- -wsrep.pool_of_threads : MDEV-12234 - GLIBCXX_3.4.20 not found; modified in 10.2.7 +versioning.partition : Modified in 10.3.7 +versioning.truncate : Modified in 10.3.7 + +#---------------------------------------------------------------- + +wsrep.binlog_format : MDEV-11532 - Could not execute check-testcase +wsrep.foreign_key : MDEV-14725 - WSREP has not yet prepared node +wsrep.mdev_6832 : MDEV-14195 - Check testcase failed +wsrep.pool_of_threads : MDEV-12234 - GLIBCXX_3.4.20 not found +wsrep.variables : MDEV-14311 - Wrong result wsrep_info.plugin : MDEV-13569 - No nodes coming from prim view diff --git a/mysys/CMakeLists.txt b/mysys/CMakeLists.txt index 6988d66376d..93fca192b3a 100644 --- a/mysys/CMakeLists.txt +++ b/mysys/CMakeLists.txt @@ -43,7 +43,7 @@ SET(MYSYS_SOURCES array.c charset-def.c charset.c checksum.c my_default.c my_getncpus.c my_safehash.c my_chmod.c my_rnd.c my_uuid.c wqueue.c waiting_threads.c ma_dyncol.c ../sql-common/my_time.c my_rdtsc.c my_context.c psi_noop.c - my_atomic_writes.c + my_atomic_writes.c my_likely.c file_logger.c my_dlerror.c) IF (WIN32) @@ -74,7 +74,7 @@ ENDIF() ADD_CONVENIENCE_LIBRARY(mysys ${MYSYS_SOURCES}) TARGET_LINK_LIBRARIES(mysys dbug strings mysys_ssl ${ZLIB_LIBRARY} - ${LIBNSL} ${LIBM} ${LIBRT} ${LIBDL} ${LIBSOCKET} ${LIBEXECINFO} ${CRC32_VPMSUM_LIBRARY}) + ${LIBNSL} ${LIBM} ${LIBRT} ${LIBDL} ${LIBSOCKET} ${LIBEXECINFO} ${CRC32_LIBRARY}) DTRACE_INSTRUMENT(mysys) IF(HAVE_BFD_H) diff --git a/mysys/file_logger.c b/mysys/file_logger.c index 35a077c4391..3565397c79a 100644 --- a/mysys/file_logger.c +++ b/mysys/file_logger.c @@ -227,7 +227,7 @@ int logger_printf(LOGGER_HANDLE *log, const char *fmt, ...) void logger_init_mutexes() { #ifdef HAVE_PSI_INTERFACE - if (PSI_server) + if (unlikely(PSI_server)) PSI_server->register_mutex("sql_logger", mutex_list, 1); #endif } diff --git a/mysys/hash.c b/mysys/hash.c index d9952afe318..d89f175c161 100644 --- a/mysys/hash.c +++ b/mysys/hash.c @@ -748,7 +748,7 @@ my_bool my_hash_update(HASH *hash, uchar *record, uchar *old_key, } -uchar *my_hash_element(HASH *hash, ulong idx) +uchar *my_hash_element(HASH *hash, size_t idx) { if (idx < hash->records) return dynamic_element(&hash->array,idx,HASH_LINK*)->data; diff --git a/mysys/lf_hash.c b/mysys/lf_hash.c index a7c07679993..60dcfd8bed4 100644 --- a/mysys/lf_hash.c +++ b/mysys/lf_hash.c @@ -1,5 +1,5 @@ -/* Copyright (c) 2006, 2010, Oracle and/or its affiliates. - Copyright (c) 2009, 2016, MariaDB +/* Copyright (c) 2006, 2018, Oracle and/or its affiliates. + Copyright (c) 2009, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -102,8 +102,8 @@ retry: do { /* PTR() isn't necessary below, head is a dummy node */ cursor->curr= (LF_SLIST *)(*cursor->prev); lf_pin(pins, 1, cursor->curr); - } while (*cursor->prev != (intptr)cursor->curr && LF_BACKOFF()); - + } while (my_atomic_loadptr((void**)cursor->prev) != cursor->curr && + LF_BACKOFF()); for (;;) { if (unlikely(!cursor->curr)) diff --git a/mysys/mf_cache.c b/mysys/mf_cache.c index a3426889a82..478900ddb2a 100644 --- a/mysys/mf_cache.c +++ b/mysys/mf_cache.c @@ -20,36 +20,6 @@ #include "my_static.h" #include "mysys_err.h" -/** - Remove an open tempfile so that it doesn't survive if we crash - - If the operating system doesn't support this, just remember - the file name for later removal -*/ - -static my_bool cache_remove_open_tmp(IO_CACHE *cache __attribute__((unused)), - const char *name) -{ -#if O_TEMPORARY == 0 -#if !defined(CANT_DELETE_OPEN_FILES) - /* The following should always succeed */ - (void) my_delete(name,MYF(MY_WME | ME_NOINPUT)); -#else - int length; - if (!(cache->file_name= - (char*) my_malloc((length=strlen(name)+1),MYF(MY_WME)))) - { - my_close(cache->file,MYF(0)); - cache->file = -1; - errno=my_errno=ENOMEM; - return 1; - } - memcpy(cache->file_name,name,length); -#endif -#endif /* O_TEMPORARY == 0 */ - return 0; -} - /** Open tempfile cached by IO_CACHE @@ -88,14 +58,11 @@ my_bool real_open_cached_file(IO_CACHE *cache) char name_buff[FN_REFLEN]; int error=1; DBUG_ENTER("real_open_cached_file"); - if ((cache->file=create_temp_file(name_buff, cache->dir, + if ((cache->file= create_temp_file(name_buff, cache->dir, cache->prefix[0] ? cache->prefix : 0, - (O_RDWR | O_BINARY | O_TRUNC | - O_TEMPORARY | O_SHORT_LIVED), - MYF(MY_WME))) >= 0) + O_BINARY, MYF(MY_WME | MY_TEMPORARY))) >= 0) { error=0; - cache_remove_open_tmp(cache, name_buff); } DBUG_RETURN(error); } diff --git a/mysys/mf_tempfile.c b/mysys/mf_tempfile.c index 62b3e09747f..0ff7066fd95 100644 --- a/mysys/mf_tempfile.c +++ b/mysys/mf_tempfile.c @@ -22,7 +22,9 @@ #include #endif - +#ifdef HAVE_MKOSTEMP +#define mkstemp(A) mkostemp(A, O_CLOEXEC) +#endif /* @brief @@ -45,8 +47,11 @@ implementation, it's main use is to generate a file with a name that does not already exist. - When passing O_TEMPORARY flag in "mode" the file should - be automatically deleted + When passing MY_TEMPORARY flag in MyFlags the file is automatically deleted + + "mode" bits that always must be used for newly created files with + unique file names (O_EXCL | O_TRUNC | O_CREAT | O_RDWR) are added + automatically, and shouldn't be specified by the caller. The implementation using mkstemp should be considered the reference implementation when adding a new or modifying an @@ -55,51 +60,55 @@ */ File create_temp_file(char *to, const char *dir, const char *prefix, - int mode __attribute__((unused)), - myf MyFlags __attribute__((unused))) + int mode, myf MyFlags) { File file= -1; -#ifdef __WIN__ - TCHAR path_buf[MAX_PATH-14]; -#endif DBUG_ENTER("create_temp_file"); DBUG_PRINT("enter", ("dir: %s, prefix: %s", dir, prefix)); -#if defined (__WIN__) + DBUG_ASSERT((mode & (O_EXCL | O_TRUNC | O_CREAT | O_RDWR)) == 0); - /* - Use GetTempPath to determine path for temporary files. - This is because the documentation for GetTempFileName - has the following to say about this parameter: - "If this parameter is NULL, the function fails." - */ - if (!dir) - { - if(GetTempPath(sizeof(path_buf), path_buf) > 0) - dir = path_buf; - } - /* - Use GetTempFileName to generate a unique filename, create - the file and release it's handle - - uses up to the first three letters from prefix - */ - if (GetTempFileName(dir, prefix, 0, to) == 0) - DBUG_RETURN(-1); + mode|= O_TRUNC | O_CREAT | O_RDWR; /* not O_EXCL, see Windows code below */ - DBUG_PRINT("info", ("name: %s", to)); - - /* - Open the file without the "open only if file doesn't already exist" - since the file has already been created by GetTempFileName - */ - if ((file= my_open(to, (mode & ~O_EXCL), MyFlags)) < 0) +#ifdef _WIN32 { - /* Open failed, remove the file created by GetTempFileName */ - int tmp= my_errno; - (void) my_delete(to, MYF(0)); - my_errno= tmp; - } + TCHAR path_buf[MAX_PATH-14]; + /* + Use GetTempPath to determine path for temporary files. + This is because the documentation for GetTempFileName + has the following to say about this parameter: + "If this parameter is NULL, the function fails." + */ + if (!dir) + { + if(GetTempPath(sizeof(path_buf), path_buf) > 0) + dir = path_buf; + } + /* + Use GetTempFileName to generate a unique filename, create + the file and release it's handle + - uses up to the first three letters from prefix + */ + if (GetTempFileName(dir, prefix, 0, to) == 0) + DBUG_RETURN(-1); + DBUG_PRINT("info", ("name: %s", to)); + + if (MyFlags & MY_TEMPORARY) + mode|= O_SHORT_LIVED | O_TEMPORARY; + + /* + Open the file without O_EXCL flag + since the file has already been created by GetTempFileName + */ + if ((file= my_open(to, mode, MyFlags)) < 0) + { + /* Open failed, remove the file created by GetTempFileName */ + int tmp= my_errno; + (void) my_delete(to, MYF(0)); + my_errno= tmp; + } + } #elif defined(HAVE_MKSTEMP) { char prefix_buff[30]; @@ -119,8 +128,8 @@ File create_temp_file(char *to, const char *dir, const char *prefix, } strmov(convert_dirname(to,dir,NullS),prefix_buff); org_file=mkstemp(to); - if (mode & O_TEMPORARY) - (void) my_delete(to, MYF(MY_WME | ME_NOINPUT)); + if (org_file >= 0 && (MyFlags & MY_TEMPORARY)) + (void) my_delete(to, MYF(MY_WME)); file=my_register_filename(org_file, to, FILE_BY_MKSTEMP, EE_CANTCREATEFILE, MyFlags); /* If we didn't manage to register the name, remove the temp file */ @@ -132,46 +141,10 @@ File create_temp_file(char *to, const char *dir, const char *prefix, my_errno=tmp; } } -#elif defined(HAVE_TEMPNAM) - { - extern char **environ; - - char *res,**old_env,*temp_env[1]; - if (dir && !dir[0]) - { /* Change empty string to current dir */ - to[0]= FN_CURLIB; - to[1]= 0; - dir=to; - } - - old_env= (char**) environ; - if (dir) - { /* Don't use TMPDIR if dir is given */ - environ=(const char**) temp_env; - temp_env[0]=0; - } - - if ((res=tempnam((char*) dir, (char*) prefix))) - { - strmake(to,res,FN_REFLEN-1); - (*free)(res); - file=my_create(to,0, - (int) (O_RDWR | O_BINARY | O_TRUNC | O_EXCL | O_NOFOLLOW | - O_TEMPORARY | O_SHORT_LIVED), - MYF(MY_WME)); - - } - else - { - DBUG_PRINT("error",("Got error: %d from tempnam",errno)); - } - - environ=(const char**) old_env; - } #else #error No implementation found for create_temp_file #endif if (file >= 0) - thread_safe_increment(my_tmp_file_created,&THR_LOCK_open); + statistic_increment(my_tmp_file_created,&THR_LOCK_open); DBUG_RETURN(file); } diff --git a/mysys/my_default.c b/mysys/my_default.c index b7819ef99f0..f2d5bde41d5 100644 --- a/mysys/my_default.c +++ b/mysys/my_default.c @@ -1099,10 +1099,12 @@ void print_defaults(const char *conf_file, const char **groups) } } puts("\nThe following options may be given as the first argument:\n\ ---print-defaults Print the program argument list and exit.\n\ ---no-defaults Don't read default options from any option file.\n\ ---defaults-file=# Only read default options from the given file #.\n\ ---defaults-extra-file=# Read this file after the global files are read."); +--print-defaults Print the program argument list and exit.\n\ +--no-defaults Don't read default options from any option file.\n\ +The following specify which files/extra groups are read (specified before remaining options):\n\ +--defaults-file=# Only read default options from the given file #.\n\ +--defaults-extra-file=# Read this file after the global files are read.\n\ +--defaults-group-suffix=# Additionally read default groups with # appended as a suffix."); } diff --git a/mysys/my_fopen.c b/mysys/my_fopen.c index 213eac82040..59baeaec744 100644 --- a/mysys/my_fopen.c +++ b/mysys/my_fopen.c @@ -61,15 +61,13 @@ FILE *my_fopen(const char *filename, int flags, myf MyFlags) int filedesc= my_fileno(fd); if ((uint)filedesc >= my_file_limit) { - thread_safe_increment(my_stream_opened,&THR_LOCK_open); + statistic_increment(my_stream_opened,&THR_LOCK_open); DBUG_RETURN(fd); /* safeguard */ } - mysql_mutex_lock(&THR_LOCK_open); my_file_info[filedesc].name= (char*) my_strdup(filename,MyFlags); - my_stream_opened++; - my_file_total_opened++; + statistic_increment(my_stream_opened, &THR_LOCK_open); + statistic_increment(my_file_total_opened, &THR_LOCK_open); my_file_info[filedesc].type= STREAM_BY_FOPEN; - mysql_mutex_unlock(&THR_LOCK_open); DBUG_PRINT("exit",("stream: %p", fd)); DBUG_RETURN(fd); } @@ -161,13 +159,22 @@ FILE *my_freopen(const char *path, const char *mode, FILE *stream) int my_fclose(FILE *fd, myf MyFlags) { int err,file; + char *name= NULL; DBUG_ENTER("my_fclose"); DBUG_PRINT("my",("stream: %p MyFlags: %lu", fd, MyFlags)); - mysql_mutex_lock(&THR_LOCK_open); file= my_fileno(fd); + if ((uint) file < my_file_limit && my_file_info[file].type != UNOPEN) + { + name= my_file_info[file].name; + my_file_info[file].name= NULL; + my_file_info[file].type= UNOPEN; + } #ifndef _WIN32 - err= fclose(fd); + do + { + err= fclose(fd); + } while (err == -1 && errno == EINTR); #else err= my_win_fclose(fd); #endif @@ -176,16 +183,15 @@ int my_fclose(FILE *fd, myf MyFlags) my_errno=errno; if (MyFlags & (MY_FAE | MY_WME)) my_error(EE_BADCLOSE, MYF(ME_BELL+ME_WAITTANG), - my_filename(file),errno); + name,errno); } else - my_stream_opened--; - if ((uint) file < my_file_limit && my_file_info[file].type != UNOPEN) + statistic_decrement(my_stream_opened, &THR_LOCK_open); + + if (name) { - my_file_info[file].type = UNOPEN; - my_free(my_file_info[file].name); + my_free(name); } - mysql_mutex_unlock(&THR_LOCK_open); DBUG_RETURN(err); } /* my_fclose */ @@ -215,13 +221,12 @@ FILE *my_fdopen(File Filedes, const char *name, int Flags, myf MyFlags) } else { - mysql_mutex_lock(&THR_LOCK_open); - my_stream_opened++; + statistic_increment(my_stream_opened, &THR_LOCK_open); if ((uint) Filedes < (uint) my_file_limit) { if (my_file_info[Filedes].type != UNOPEN) { - my_file_opened--; /* File is opened with my_open ! */ + statistic_decrement(my_file_opened, &THR_LOCK_open); /* File is opened with my_open ! */ } else { @@ -229,7 +234,6 @@ FILE *my_fdopen(File Filedes, const char *name, int Flags, myf MyFlags) } my_file_info[Filedes].type = STREAM_BY_FDOPEN; } - mysql_mutex_unlock(&THR_LOCK_open); } DBUG_PRINT("exit",("stream: %p", fd)); diff --git a/mysys/my_init.c b/mysys/my_init.c index 9bd1185a3bf..972750da0ff 100644 --- a/mysys/my_init.c +++ b/mysys/my_init.c @@ -121,6 +121,9 @@ my_bool my_init(void) DBUG_PRINT("exit", ("home: '%s'", home_dir)); #ifdef __WIN__ win32_init_tcp_ip(); +#endif +#ifdef CHECK_UNLIKELY + init_my_likely(); #endif DBUG_RETURN(0); } @@ -155,17 +158,36 @@ void my_end(int infoflag) } if ((infoflag & MY_CHECK_ERROR) || print_info) + { /* Test if some file is left open */ + char ebuff[512]; + uint i, open_files, open_streams; - { /* Test if some file is left open */ - if (my_file_opened | my_stream_opened) + for (open_streams= open_files= i= 0 ; i < my_file_limit ; i++) + { + if (my_file_info[i].type == UNOPEN) + continue; + if (my_file_info[i].type == STREAM_BY_FOPEN || + my_file_info[i].type == STREAM_BY_FDOPEN) + open_streams++; + else + open_files++; + +#ifdef EXTRA_DEBUG + fprintf(stderr, EE(EE_FILE_NOT_CLOSED), my_file_info[i].name, i); + fputc('\n', stderr); +#endif + } + if (open_files || open_streams) { - char ebuff[512]; my_snprintf(ebuff, sizeof(ebuff), EE(EE_OPEN_WARNING), - my_file_opened, my_stream_opened); + open_files, open_streams); my_message_stderr(EE_OPEN_WARNING, ebuff, ME_BELL); DBUG_PRINT("error", ("%s", ebuff)); - my_print_open_files(); } + +#ifdef CHECK_UNLIKELY + end_my_likely(info_file); +#endif } free_charsets(); my_error_unregister_all(); diff --git a/mysys/my_likely.c b/mysys/my_likely.c new file mode 100644 index 00000000000..c6fca5b7146 --- /dev/null +++ b/mysys/my_likely.c @@ -0,0 +1,173 @@ +/* Copyright (c) 2018, MariaDB Corporation Ab. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + +/* + Checks that my_likely/my_unlikely is correctly used + + Note that we can't use mysql_mutex or my_malloc here as these + uses likely() macros and the likely_mutex would be used twice +*/ + +#include "mysys_priv.h" +#include +#include + +#ifndef CHECK_UNLIKEY +my_bool likely_inited= 0; + +typedef struct st_likely_entry +{ + const char *key; + size_t key_length; + uint line; + ulonglong ok,fail; +} LIKELY_ENTRY; + +static uchar *get_likely_key(LIKELY_ENTRY *part, size_t *length, + my_bool not_used __attribute__((unused))) +{ + *length= part->key_length; + return (uchar*) part->key; +} + +pthread_mutex_t likely_mutex; +HASH likely_hash; + +void init_my_likely() +{ + /* Allocate big enough to avoid malloc calls */ + my_hash_init2(&likely_hash, 10000, &my_charset_bin, + 1024, 0, 0, + (my_hash_get_key) get_likely_key, 0, + free, HASH_UNIQUE); + likely_inited= 1; + pthread_mutex_init(&likely_mutex, MY_MUTEX_INIT_FAST); +} + +static int likely_cmp(LIKELY_ENTRY **a, LIKELY_ENTRY **b) +{ + int cmp; + if ((cmp= strcmp((*a)->key, (*b)->key))) + return cmp; + return (int) ((*a)->line - (*b)->line); +} + + +void end_my_likely(FILE *out) +{ + uint i; + FILE *likely_file; + my_bool do_close= 0; + LIKELY_ENTRY **sort_ptr= 0; + + likely_inited= 0; + + if (!(likely_file= out)) + { + char name[80]; + sprintf(name, "/tmp/unlikely-%lu.out", (ulong) getpid()); + if ((likely_file= my_fopen(name, O_TRUNC | O_WRONLY, MYF(MY_WME)))) + do_close= 1; + else + likely_file= stderr; + } + fflush(likely_file); + fputs("Wrong likely/unlikely usage:\n", likely_file); + if (!(sort_ptr= (LIKELY_ENTRY**) + malloc(sizeof(LIKELY_ENTRY*) *likely_hash.records))) + { + fprintf(stderr, "ERROR: Out of memory in end_my_likely\n"); + goto err; + } + + for (i=0 ; i < likely_hash.records ; i++) + sort_ptr[i]= (LIKELY_ENTRY *) my_hash_element(&likely_hash, i); + + my_qsort(sort_ptr, likely_hash.records, sizeof(LIKELY_ENTRY*), + (qsort_cmp) likely_cmp); + + for (i=0 ; i < likely_hash.records ; i++) + { + LIKELY_ENTRY *entry= sort_ptr[i]; + if (entry->fail > entry->ok) + fprintf(likely_file, + "%50s line: %6u ok: %8lld fail: %8lld\n", + entry->key, entry->line, entry->ok, entry->fail); + } + fputs("\n", likely_file); + fflush(likely_file); +err: + free((void*) sort_ptr); + if (do_close) + my_fclose(likely_file, MYF(MY_WME)); + pthread_mutex_destroy(&likely_mutex); + my_hash_free(&likely_hash); +} + + +static LIKELY_ENTRY *my_likely_find(const char *file_name, uint line) +{ + char key[80], *pos; + LIKELY_ENTRY *entry; + size_t length; + + if (!likely_inited) + return 0; + + pos= strnmov(key, file_name, sizeof(key)-4); + int3store(pos+1, line); + length= (size_t) (pos-key)+4; + + pthread_mutex_lock(&likely_mutex); + if (!(entry= (LIKELY_ENTRY*) my_hash_search(&likely_hash, (uchar*) key, + length))) + { + if (!(entry= (LIKELY_ENTRY *) malloc(sizeof(*entry) + length))) + return 0; + entry->key= (char*) (entry+1); + memcpy((void*) entry->key, key, length); + entry->key_length= length; + entry->line= line; + entry->ok= entry->fail= 0; + + if (my_hash_insert(&likely_hash, (void*) entry)) + { + pthread_mutex_unlock(&likely_mutex); + free(entry); + return 0; + } + } + pthread_mutex_unlock(&likely_mutex); + return entry; +} + + +int my_likely_ok(const char *file_name, uint line) +{ + LIKELY_ENTRY *entry= my_likely_find(file_name, line); + if (entry) + entry->ok++; + return 0; +} + + +int my_likely_fail(const char *file_name, uint line) +{ + LIKELY_ENTRY *entry= my_likely_find(file_name, line); + if (entry) + entry->fail++; + return 0; +} +#endif /* CHECK_UNLIKEY */ diff --git a/mysys/my_open.c b/mysys/my_open.c index 3999810eb2e..54e53089da9 100644 --- a/mysys/my_open.c +++ b/mysys/my_open.c @@ -76,12 +76,18 @@ File my_open(const char *FileName, int Flags, myf MyFlags) int my_close(File fd, myf MyFlags) { int err; + char *name= NULL; DBUG_ENTER("my_close"); DBUG_PRINT("my",("fd: %d MyFlags: %lu",fd, MyFlags)); if (!(MyFlags & (MY_WME | MY_FAE))) MyFlags|= my_global_flags; - mysql_mutex_lock(&THR_LOCK_open); + if ((uint) fd < my_file_limit && my_file_info[fd].type != UNOPEN) + { + name= my_file_info[fd].name; + my_file_info[fd].name= NULL; + my_file_info[fd].type= UNOPEN; + } #ifndef _WIN32 do { @@ -96,15 +102,13 @@ int my_close(File fd, myf MyFlags) my_errno=errno; if (MyFlags & (MY_FAE | MY_WME)) my_error(EE_BADCLOSE, MYF(ME_BELL | ME_WAITTANG | (MyFlags & (ME_JUST_INFO | ME_NOREFRESH))), - my_filename(fd),errno); + name,errno); } - if ((uint) fd < my_file_limit && my_file_info[fd].type != UNOPEN) + if (name) { - my_free(my_file_info[fd].name); - my_file_info[fd].type = UNOPEN; + my_free(name); } - my_file_opened--; - mysql_mutex_unlock(&THR_LOCK_open); + statistic_decrement(my_file_opened, &THR_LOCK_open); DBUG_RETURN(err); } /* my_close */ @@ -134,15 +138,13 @@ File my_register_filename(File fd, const char *FileName, enum file_type { if ((uint) fd >= my_file_limit) { - thread_safe_increment(my_file_opened,&THR_LOCK_open); + statistic_increment(my_file_opened,&THR_LOCK_open); DBUG_RETURN(fd); /* safeguard */ } - mysql_mutex_lock(&THR_LOCK_open); my_file_info[fd].name = (char*) my_strdup(FileName, MyFlags); - my_file_opened++; - my_file_total_opened++; + statistic_increment(my_file_opened,&THR_LOCK_open); + statistic_increment(my_file_total_opened,&THR_LOCK_open); my_file_info[fd].type = type_of_file; - mysql_mutex_unlock(&THR_LOCK_open); DBUG_PRINT("exit",("fd: %d",fd)); DBUG_RETURN(fd); } @@ -159,26 +161,3 @@ File my_register_filename(File fd, const char *FileName, enum file_type } DBUG_RETURN(-1); } - - - - -#ifdef EXTRA_DEBUG - -void my_print_open_files(void) -{ - if (my_file_opened | my_stream_opened) - { - uint i; - for (i= 0 ; i < my_file_limit ; i++) - { - if (my_file_info[i].type != UNOPEN) - { - fprintf(stderr, EE(EE_FILE_NOT_CLOSED), my_file_info[i].name, i); - fputc('\n', stderr); - } - } - } -} - -#endif diff --git a/mysys/my_winfile.c b/mysys/my_winfile.c index ad87bf718d2..0762a95e827 100644 --- a/mysys/my_winfile.c +++ b/mysys/my_winfile.c @@ -528,7 +528,7 @@ FILE *my_win_fopen(const char *filename, const char *type) { FILE *file; int flags= 0; - DBUG_ENTER("my_win_open"); + DBUG_ENTER("my_win_fopen"); /* If we are not creating, then we need to use my_access to make sure @@ -585,7 +585,7 @@ int my_win_fclose(FILE *file) { File fd; - DBUG_ENTER("my_win_close"); + DBUG_ENTER("my_win_fclose"); fd= my_fileno(file); if(fd < 0) DBUG_RETURN(-1); diff --git a/mysys/psi_noop.c b/mysys/psi_noop.c index 6eecf56f797..f8fa2c92f63 100644 --- a/mysys/psi_noop.c +++ b/mysys/psi_noop.c @@ -763,6 +763,13 @@ struct PSI_bootstrap *PSI_hook= NULL; PSI *PSI_server= & PSI_noop; +/** + Global performance schema flag. + Indicate if the performance schema is enabled. + This flag is set at startup, and never changes. +*/ +my_bool pfs_enabled= FALSE; + void set_psi_server(PSI *psi) { PSI_server= psi; diff --git a/mysys/ptr_cmp.c b/mysys/ptr_cmp.c index 6d853a8db25..1880e60a811 100644 --- a/mysys/ptr_cmp.c +++ b/mysys/ptr_cmp.c @@ -27,14 +27,18 @@ * written in assembler. for example one in /usr/lib/libc/libc_hwcap*.so.1. * on Solaris, or on Windows inside C runtime linrary. * - * On Solaris, native implementation is also usually faster than the - * built-in memcmp supplied by GCC, so it is recommended to build + * On Solaris, native implementation is also usually faster than the + * built-in memcmp supplied by GCC, so it is recommended to build * with "-fno-builtin-memcmp"in CFLAGS if building with GCC on Solaris. */ -#if defined (__sun) || defined (_WIN32) +/* + Daniel Blacks tests shows that libc memcmp is generally faster than + ptr_cmp() at least of x86 and power8 platforms, so we use the libc + code as deafult for now +*/ + #define USE_NATIVE_MEMCMP 1 -#endif #ifdef USE_NATIVE_MEMCMP @@ -45,23 +49,19 @@ static int native_compare(size_t *length, unsigned char **a, unsigned char **b) return memcmp(*a, *b, *length); } -#else /* USE_NATIVE_MEMCMP */ +qsort2_cmp get_ptr_compare (size_t size __attribute__((unused))) +{ + return (qsort2_cmp) native_compare; +} + +#else /* USE_NATIVE_MEMCMP */ static int ptr_compare(size_t *compare_length, uchar **a, uchar **b); static int ptr_compare_0(size_t *compare_length, uchar **a, uchar **b); static int ptr_compare_1(size_t *compare_length, uchar **a, uchar **b); static int ptr_compare_2(size_t *compare_length, uchar **a, uchar **b); static int ptr_compare_3(size_t *compare_length, uchar **a, uchar **b); -#endif /* __sun */ - /* Get a pointer to a optimal byte-compare function for a given size */ - -#ifdef USE_NATIVE_MEMCMP -qsort2_cmp get_ptr_compare (size_t size __attribute__((unused))) -{ - return (qsort2_cmp) native_compare; -} -#else qsort2_cmp get_ptr_compare (size_t size) { if (size < 4) @@ -74,9 +74,6 @@ qsort2_cmp get_ptr_compare (size_t size) } return 0; /* Impossible */ } -#endif /* USE_NATIVE_MEMCMP */ - - /* Compare to keys to see witch is smaller. Loop unrolled to make it quick !! @@ -84,8 +81,6 @@ qsort2_cmp get_ptr_compare (size_t size) #define cmp(N) if (first[N] != last[N]) return (int) first[N] - (int) last[N] -#ifndef USE_NATIVE_MEMCMP - static int ptr_compare(size_t *compare_length, uchar **a, uchar **b) { size_t length= *compare_length; @@ -189,7 +184,7 @@ static int ptr_compare_3(size_t *compare_length,uchar **a, uchar **b) return (0); } -#endif /* !__sun */ +#endif /* USE_NATIVE_MEMCMP */ void my_store_ptr(uchar *buff, size_t pack_length, my_off_t pos) { @@ -227,4 +222,3 @@ my_off_t my_get_ptr(uchar *ptr, size_t pack_length) } return pos; } - diff --git a/mysys_ssl/openssl.c b/mysys_ssl/openssl.c index f3dc8f4277c..2cf37caccbb 100644 --- a/mysys_ssl/openssl.c +++ b/mysys_ssl/openssl.c @@ -61,8 +61,8 @@ int check_openssl_compatibility() return 1; alloc_size= alloc_count= 0; - md5_ctx= EVP_MD_CTX_create(); - EVP_MD_CTX_destroy(md5_ctx); + md5_ctx= EVP_MD_CTX_new(); + EVP_MD_CTX_free(md5_ctx); if (alloc_count != 1 || !alloc_size || alloc_size > EVP_MD_CTX_SIZE) return 1; diff --git a/pcre/AUTHORS b/pcre/AUTHORS index 291657caef1..eb9b1a44b34 100644 --- a/pcre/AUTHORS +++ b/pcre/AUTHORS @@ -8,7 +8,7 @@ Email domain: cam.ac.uk University of Cambridge Computing Service, Cambridge, England. -Copyright (c) 1997-2017 University of Cambridge +Copyright (c) 1997-2018 University of Cambridge All rights reserved @@ -19,7 +19,7 @@ Written by: Zoltan Herczeg Email local part: hzmester Emain domain: freemail.hu -Copyright(c) 2010-2017 Zoltan Herczeg +Copyright(c) 2010-2018 Zoltan Herczeg All rights reserved. @@ -30,7 +30,7 @@ Written by: Zoltan Herczeg Email local part: hzmester Emain domain: freemail.hu -Copyright(c) 2009-2017 Zoltan Herczeg +Copyright(c) 2009-2018 Zoltan Herczeg All rights reserved. diff --git a/pcre/ChangeLog b/pcre/ChangeLog index 590a7542885..7b53195f6a6 100644 --- a/pcre/ChangeLog +++ b/pcre/ChangeLog @@ -4,6 +4,59 @@ ChangeLog for PCRE Note that the PCRE 8.xx series (PCRE1) is now in a bugfix-only state. All development is happening in the PCRE2 10.xx series. + +Version 8.42 20-March-2018 +-------------------------- + +1. Fixed a MIPS issue in the JIT compiler reported by Joshua Kinard. + +2. Fixed outdated real_pcre definitions in pcre.h.in (patch by Evgeny Kotkov). + +3. pcregrep was truncating components of file names to 128 characters when +processing files with the -r option, and also (some very odd code) truncating +path names to 512 characters. There is now a check on the absolute length of +full path file names, which may be up to 2047 characters long. + +4. Using pcre_dfa_exec(), in UTF mode when UCP support was not defined, there +was the possibility of a false positive match when caselessly matching a "not +this character" item such as [^\x{1234}] (with a code point greater than 127) +because the "other case" variable was not being initialized. + +5. Although pcre_jit_exec checks whether the pattern is compiled +in a given mode, it was also expected that at least one mode is available. +This is fixed and pcre_jit_exec returns with PCRE_ERROR_JIT_BADOPTION +when the pattern is not optimized by JIT at all. + +6. The line number and related variables such as match counts in pcregrep +were all int variables, causing overflow when files with more than 2147483647 +lines were processed (assuming 32-bit ints). They have all been changed to +unsigned long ints. + +7. If a backreference with a minimum repeat count of zero was first in a +pattern, apart from assertions, an incorrect first matching character could be +recorded. For example, for the pattern /(?=(a))\1?b/, "b" was incorrectly set +as the first character of a match. + +8. Fix out-of-bounds read for partial matching of /./ against an empty string +when the newline type is CRLF. + +9. When matching using the the REG_STARTEND feature of the POSIX API with a +non-zero starting offset, unset capturing groups with lower numbers than a +group that did capture something were not being correctly returned as "unset" +(that is, with offset values of -1). + +10. Matching the pattern /(*UTF)\C[^\v]+\x80/ against an 8-bit string +containing multi-code-unit characters caused bad behaviour and possibly a +crash. This issue was fixed for other kinds of repeat in release 8.37 by change +38, but repeating character classes were overlooked. + +11. A small fix to pcregrep to avoid compiler warnings for -Wformat-overflow=2. + +12. Added --enable-jit=auto support to configure.ac. + +13. Fix misleading error message in configure.ac. + + Version 8.41 05-July-2017 ------------------------- diff --git a/pcre/INSTALL b/pcre/INSTALL index 2099840756e..8865734f81b 100644 --- a/pcre/INSTALL +++ b/pcre/INSTALL @@ -1,8 +1,8 @@ Installation Instructions ************************* -Copyright (C) 1994-1996, 1999-2002, 2004-2013 Free Software Foundation, -Inc. + Copyright (C) 1994-1996, 1999-2002, 2004-2016 Free Software +Foundation, Inc. Copying and distribution of this file, with or without modification, are permitted in any medium without royalty provided the copyright @@ -12,97 +12,96 @@ without warranty of any kind. Basic Installation ================== - Briefly, the shell command `./configure && make && make install' + Briefly, the shell command './configure && make && make install' should configure, build, and install this package. The following -more-detailed instructions are generic; see the `README' file for +more-detailed instructions are generic; see the 'README' file for instructions specific to this package. Some packages provide this -`INSTALL' file but do not implement all of the features documented +'INSTALL' file but do not implement all of the features documented below. The lack of an optional feature in a given package is not necessarily a bug. More recommendations for GNU packages can be found in *note Makefile Conventions: (standards)Makefile Conventions. - The `configure' shell script attempts to guess correct values for + The 'configure' shell script attempts to guess correct values for various system-dependent variables used during compilation. It uses -those values to create a `Makefile' in each directory of the package. -It may also create one or more `.h' files containing system-dependent -definitions. Finally, it creates a shell script `config.status' that +those values to create a 'Makefile' in each directory of the package. +It may also create one or more '.h' files containing system-dependent +definitions. Finally, it creates a shell script 'config.status' that you can run in the future to recreate the current configuration, and a -file `config.log' containing compiler output (useful mainly for -debugging `configure'). +file 'config.log' containing compiler output (useful mainly for +debugging 'configure'). - It can also use an optional file (typically called `config.cache' -and enabled with `--cache-file=config.cache' or simply `-C') that saves -the results of its tests to speed up reconfiguring. Caching is -disabled by default to prevent problems with accidental use of stale -cache files. + It can also use an optional file (typically called 'config.cache' and +enabled with '--cache-file=config.cache' or simply '-C') that saves the +results of its tests to speed up reconfiguring. Caching is disabled by +default to prevent problems with accidental use of stale cache files. If you need to do unusual things to compile the package, please try -to figure out how `configure' could check whether to do them, and mail -diffs or instructions to the address given in the `README' so they can +to figure out how 'configure' could check whether to do them, and mail +diffs or instructions to the address given in the 'README' so they can be considered for the next release. If you are using the cache, and at -some point `config.cache' contains results you don't want to keep, you +some point 'config.cache' contains results you don't want to keep, you may remove or edit it. - The file `configure.ac' (or `configure.in') is used to create -`configure' by a program called `autoconf'. You need `configure.ac' if -you want to change it or regenerate `configure' using a newer version -of `autoconf'. + The file 'configure.ac' (or 'configure.in') is used to create +'configure' by a program called 'autoconf'. You need 'configure.ac' if +you want to change it or regenerate 'configure' using a newer version of +'autoconf'. The simplest way to compile this package is: - 1. `cd' to the directory containing the package's source code and type - `./configure' to configure the package for your system. + 1. 'cd' to the directory containing the package's source code and type + './configure' to configure the package for your system. - Running `configure' might take a while. While running, it prints + Running 'configure' might take a while. While running, it prints some messages telling which features it is checking for. - 2. Type `make' to compile the package. + 2. Type 'make' to compile the package. - 3. Optionally, type `make check' to run any self-tests that come with + 3. Optionally, type 'make check' to run any self-tests that come with the package, generally using the just-built uninstalled binaries. - 4. Type `make install' to install the programs and any data files and + 4. Type 'make install' to install the programs and any data files and documentation. When installing into a prefix owned by root, it is recommended that the package be configured and built as a regular - user, and only the `make install' phase executed with root + user, and only the 'make install' phase executed with root privileges. - 5. Optionally, type `make installcheck' to repeat any self-tests, but + 5. Optionally, type 'make installcheck' to repeat any self-tests, but this time using the binaries in their final installed location. This target does not install anything. Running this target as a - regular user, particularly if the prior `make install' required + regular user, particularly if the prior 'make install' required root privileges, verifies that the installation completed correctly. 6. You can remove the program binaries and object files from the - source code directory by typing `make clean'. To also remove the - files that `configure' created (so you can compile the package for - a different kind of computer), type `make distclean'. There is - also a `make maintainer-clean' target, but that is intended mainly + source code directory by typing 'make clean'. To also remove the + files that 'configure' created (so you can compile the package for + a different kind of computer), type 'make distclean'. There is + also a 'make maintainer-clean' target, but that is intended mainly for the package's developers. If you use it, you may have to get all sorts of other programs in order to regenerate files that came with the distribution. - 7. Often, you can also type `make uninstall' to remove the installed + 7. Often, you can also type 'make uninstall' to remove the installed files again. In practice, not all packages have tested that uninstallation works correctly, even though it is required by the GNU Coding Standards. - 8. Some packages, particularly those that use Automake, provide `make + 8. Some packages, particularly those that use Automake, provide 'make distcheck', which can by used by developers to test that all other - targets like `make install' and `make uninstall' work correctly. + targets like 'make install' and 'make uninstall' work correctly. This target is generally not run by end users. Compilers and Options ===================== Some systems require unusual options for compilation or linking that -the `configure' script does not know about. Run `./configure --help' +the 'configure' script does not know about. Run './configure --help' for details on some of the pertinent environment variables. - You can give `configure' initial values for configuration parameters -by setting variables in the command line or in the environment. Here -is an example: + You can give 'configure' initial values for configuration parameters +by setting variables in the command line or in the environment. Here is +an example: ./configure CC=c99 CFLAGS=-g LIBS=-lposix @@ -113,21 +112,21 @@ Compiling For Multiple Architectures You can compile the package for more than one kind of computer at the same time, by placing the object files for each architecture in their -own directory. To do this, you can use GNU `make'. `cd' to the +own directory. To do this, you can use GNU 'make'. 'cd' to the directory where you want the object files and executables to go and run -the `configure' script. `configure' automatically checks for the -source code in the directory that `configure' is in and in `..'. This -is known as a "VPATH" build. +the 'configure' script. 'configure' automatically checks for the source +code in the directory that 'configure' is in and in '..'. This is known +as a "VPATH" build. - With a non-GNU `make', it is safer to compile the package for one + With a non-GNU 'make', it is safer to compile the package for one architecture at a time in the source code directory. After you have -installed the package for one architecture, use `make distclean' before +installed the package for one architecture, use 'make distclean' before reconfiguring for another architecture. On MacOS X 10.5 and later systems, you can create libraries and executables that work on multiple system types--known as "fat" or -"universal" binaries--by specifying multiple `-arch' options to the -compiler but only a single `-arch' option to the preprocessor. Like +"universal" binaries--by specifying multiple '-arch' options to the +compiler but only a single '-arch' option to the preprocessor. Like this: ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ @@ -136,105 +135,104 @@ this: This is not guaranteed to produce working output in all cases, you may have to build one architecture at a time and combine the results -using the `lipo' tool if you have problems. +using the 'lipo' tool if you have problems. Installation Names ================== - By default, `make install' installs the package's commands under -`/usr/local/bin', include files under `/usr/local/include', etc. You -can specify an installation prefix other than `/usr/local' by giving -`configure' the option `--prefix=PREFIX', where PREFIX must be an + By default, 'make install' installs the package's commands under +'/usr/local/bin', include files under '/usr/local/include', etc. You +can specify an installation prefix other than '/usr/local' by giving +'configure' the option '--prefix=PREFIX', where PREFIX must be an absolute file name. You can specify separate installation prefixes for architecture-specific files and architecture-independent files. If you -pass the option `--exec-prefix=PREFIX' to `configure', the package uses +pass the option '--exec-prefix=PREFIX' to 'configure', the package uses PREFIX as the prefix for installing programs and libraries. Documentation and other data files still use the regular prefix. In addition, if you use an unusual directory layout you can give -options like `--bindir=DIR' to specify different values for particular -kinds of files. Run `configure --help' for a list of the directories -you can set and what kinds of files go in them. In general, the -default for these options is expressed in terms of `${prefix}', so that -specifying just `--prefix' will affect all of the other directory +options like '--bindir=DIR' to specify different values for particular +kinds of files. Run 'configure --help' for a list of the directories +you can set and what kinds of files go in them. In general, the default +for these options is expressed in terms of '${prefix}', so that +specifying just '--prefix' will affect all of the other directory specifications that were not explicitly provided. The most portable way to affect installation locations is to pass the -correct locations to `configure'; however, many packages provide one or +correct locations to 'configure'; however, many packages provide one or both of the following shortcuts of passing variable assignments to the -`make install' command line to change installation locations without +'make install' command line to change installation locations without having to reconfigure or recompile. The first method involves providing an override variable for each -affected directory. For example, `make install +affected directory. For example, 'make install prefix=/alternate/directory' will choose an alternate location for all directory configuration variables that were expressed in terms of -`${prefix}'. Any directories that were specified during `configure', -but not in terms of `${prefix}', must each be overridden at install -time for the entire installation to be relocated. The approach of -makefile variable overrides for each directory variable is required by -the GNU Coding Standards, and ideally causes no recompilation. -However, some platforms have known limitations with the semantics of -shared libraries that end up requiring recompilation when using this -method, particularly noticeable in packages that use GNU Libtool. +'${prefix}'. Any directories that were specified during 'configure', +but not in terms of '${prefix}', must each be overridden at install time +for the entire installation to be relocated. The approach of makefile +variable overrides for each directory variable is required by the GNU +Coding Standards, and ideally causes no recompilation. However, some +platforms have known limitations with the semantics of shared libraries +that end up requiring recompilation when using this method, particularly +noticeable in packages that use GNU Libtool. - The second method involves providing the `DESTDIR' variable. For -example, `make install DESTDIR=/alternate/directory' will prepend -`/alternate/directory' before all installation names. The approach of -`DESTDIR' overrides is not required by the GNU Coding Standards, and + The second method involves providing the 'DESTDIR' variable. For +example, 'make install DESTDIR=/alternate/directory' will prepend +'/alternate/directory' before all installation names. The approach of +'DESTDIR' overrides is not required by the GNU Coding Standards, and does not work on platforms that have drive letters. On the other hand, it does better at avoiding recompilation issues, and works well even -when some directory options were not specified in terms of `${prefix}' -at `configure' time. +when some directory options were not specified in terms of '${prefix}' +at 'configure' time. Optional Features ================= If the package supports it, you can cause programs to be installed -with an extra prefix or suffix on their names by giving `configure' the -option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. +with an extra prefix or suffix on their names by giving 'configure' the +option '--program-prefix=PREFIX' or '--program-suffix=SUFFIX'. - Some packages pay attention to `--enable-FEATURE' options to -`configure', where FEATURE indicates an optional part of the package. -They may also pay attention to `--with-PACKAGE' options, where PACKAGE -is something like `gnu-as' or `x' (for the X Window System). The -`README' should mention any `--enable-' and `--with-' options that the + Some packages pay attention to '--enable-FEATURE' options to +'configure', where FEATURE indicates an optional part of the package. +They may also pay attention to '--with-PACKAGE' options, where PACKAGE +is something like 'gnu-as' or 'x' (for the X Window System). The +'README' should mention any '--enable-' and '--with-' options that the package recognizes. - For packages that use the X Window System, `configure' can usually + For packages that use the X Window System, 'configure' can usually find the X include and library files automatically, but if it doesn't, -you can use the `configure' options `--x-includes=DIR' and -`--x-libraries=DIR' to specify their locations. +you can use the 'configure' options '--x-includes=DIR' and +'--x-libraries=DIR' to specify their locations. Some packages offer the ability to configure how verbose the -execution of `make' will be. For these packages, running `./configure +execution of 'make' will be. For these packages, running './configure --enable-silent-rules' sets the default to minimal output, which can be -overridden with `make V=1'; while running `./configure +overridden with 'make V=1'; while running './configure --disable-silent-rules' sets the default to verbose, which can be -overridden with `make V=0'. +overridden with 'make V=0'. Particular systems ================== - On HP-UX, the default C compiler is not ANSI C compatible. If GNU -CC is not installed, it is recommended to use the following options in + On HP-UX, the default C compiler is not ANSI C compatible. If GNU CC +is not installed, it is recommended to use the following options in order to use an ANSI C compiler: ./configure CC="cc -Ae -D_XOPEN_SOURCE=500" and if that doesn't work, install pre-built binaries of GCC for HP-UX. - HP-UX `make' updates targets which have the same time stamps as -their prerequisites, which makes it generally unusable when shipped -generated files such as `configure' are involved. Use GNU `make' -instead. + HP-UX 'make' updates targets which have the same time stamps as their +prerequisites, which makes it generally unusable when shipped generated +files such as 'configure' are involved. Use GNU 'make' instead. On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot -parse its `' header file. The option `-nodtk' can be used as -a workaround. If GNU CC is not installed, it is therefore recommended -to try +parse its '' header file. The option '-nodtk' can be used as a +workaround. If GNU CC is not installed, it is therefore recommended to +try ./configure CC="cc" @@ -242,26 +240,26 @@ and if that doesn't work, try ./configure CC="cc -nodtk" - On Solaris, don't put `/usr/ucb' early in your `PATH'. This + On Solaris, don't put '/usr/ucb' early in your 'PATH'. This directory contains several dysfunctional programs; working variants of -these programs are available in `/usr/bin'. So, if you need `/usr/ucb' -in your `PATH', put it _after_ `/usr/bin'. +these programs are available in '/usr/bin'. So, if you need '/usr/ucb' +in your 'PATH', put it _after_ '/usr/bin'. - On Haiku, software installed for all users goes in `/boot/common', -not `/usr/local'. It is recommended to use the following options: + On Haiku, software installed for all users goes in '/boot/common', +not '/usr/local'. It is recommended to use the following options: ./configure --prefix=/boot/common Specifying the System Type ========================== - There may be some features `configure' cannot figure out + There may be some features 'configure' cannot figure out automatically, but needs to determine by the type of machine the package will run on. Usually, assuming the package is built to be run on the -_same_ architectures, `configure' can figure that out, but if it prints +_same_ architectures, 'configure' can figure that out, but if it prints a message saying it cannot guess the machine type, give it the -`--build=TYPE' option. TYPE can either be a short name for the system -type, such as `sun4', or a canonical name which has the form: +'--build=TYPE' option. TYPE can either be a short name for the system +type, such as 'sun4', or a canonical name which has the form: CPU-COMPANY-SYSTEM @@ -270,101 +268,101 @@ where SYSTEM can have one of these forms: OS KERNEL-OS - See the file `config.sub' for the possible values of each field. If -`config.sub' isn't included in this package, then this package doesn't + See the file 'config.sub' for the possible values of each field. If +'config.sub' isn't included in this package, then this package doesn't need to know the machine type. If you are _building_ compiler tools for cross-compiling, you should -use the option `--target=TYPE' to select the type of system they will +use the option '--target=TYPE' to select the type of system they will produce code for. If you want to _use_ a cross compiler, that generates code for a platform different from the build platform, you should specify the "host" platform (i.e., that on which the generated programs will -eventually be run) with `--host=TYPE'. +eventually be run) with '--host=TYPE'. Sharing Defaults ================ - If you want to set default values for `configure' scripts to share, -you can create a site shell script called `config.site' that gives -default values for variables like `CC', `cache_file', and `prefix'. -`configure' looks for `PREFIX/share/config.site' if it exists, then -`PREFIX/etc/config.site' if it exists. Or, you can set the -`CONFIG_SITE' environment variable to the location of the site script. -A warning: not all `configure' scripts look for a site script. + If you want to set default values for 'configure' scripts to share, +you can create a site shell script called 'config.site' that gives +default values for variables like 'CC', 'cache_file', and 'prefix'. +'configure' looks for 'PREFIX/share/config.site' if it exists, then +'PREFIX/etc/config.site' if it exists. Or, you can set the +'CONFIG_SITE' environment variable to the location of the site script. +A warning: not all 'configure' scripts look for a site script. Defining Variables ================== Variables not defined in a site shell script can be set in the -environment passed to `configure'. However, some packages may run +environment passed to 'configure'. However, some packages may run configure again during the build, and the customized values of these variables may be lost. In order to avoid this problem, you should set -them in the `configure' command line, using `VAR=value'. For example: +them in the 'configure' command line, using 'VAR=value'. For example: ./configure CC=/usr/local2/bin/gcc -causes the specified `gcc' to be used as the C compiler (unless it is +causes the specified 'gcc' to be used as the C compiler (unless it is overridden in the site shell script). -Unfortunately, this technique does not work for `CONFIG_SHELL' due to -an Autoconf limitation. Until the limitation is lifted, you can use -this workaround: +Unfortunately, this technique does not work for 'CONFIG_SHELL' due to an +Autoconf limitation. Until the limitation is lifted, you can use this +workaround: CONFIG_SHELL=/bin/bash ./configure CONFIG_SHELL=/bin/bash -`configure' Invocation +'configure' Invocation ====================== - `configure' recognizes the following options to control how it + 'configure' recognizes the following options to control how it operates. -`--help' -`-h' - Print a summary of all of the options to `configure', and exit. +'--help' +'-h' + Print a summary of all of the options to 'configure', and exit. -`--help=short' -`--help=recursive' +'--help=short' +'--help=recursive' Print a summary of the options unique to this package's - `configure', and exit. The `short' variant lists options used - only in the top level, while the `recursive' variant lists options - also present in any nested packages. + 'configure', and exit. The 'short' variant lists options used only + in the top level, while the 'recursive' variant lists options also + present in any nested packages. -`--version' -`-V' - Print the version of Autoconf used to generate the `configure' +'--version' +'-V' + Print the version of Autoconf used to generate the 'configure' script, and exit. -`--cache-file=FILE' +'--cache-file=FILE' Enable the cache: use and save the results of the tests in FILE, - traditionally `config.cache'. FILE defaults to `/dev/null' to + traditionally 'config.cache'. FILE defaults to '/dev/null' to disable caching. -`--config-cache' -`-C' - Alias for `--cache-file=config.cache'. +'--config-cache' +'-C' + Alias for '--cache-file=config.cache'. -`--quiet' -`--silent' -`-q' +'--quiet' +'--silent' +'-q' Do not print messages saying which checks are being made. To - suppress all normal output, redirect it to `/dev/null' (any error + suppress all normal output, redirect it to '/dev/null' (any error messages will still be shown). -`--srcdir=DIR' +'--srcdir=DIR' Look for the package's source code in directory DIR. Usually - `configure' can determine that directory automatically. + 'configure' can determine that directory automatically. -`--prefix=DIR' - Use DIR as the installation prefix. *note Installation Names:: - for more details, including other options available for fine-tuning - the installation locations. +'--prefix=DIR' + Use DIR as the installation prefix. *note Installation Names:: for + more details, including other options available for fine-tuning the + installation locations. -`--no-create' -`-n' +'--no-create' +'-n' Run the configure checks, but stop before creating any output files. -`configure' also accepts some other, not widely useful, options. Run -`configure --help' for more details. +'configure' also accepts some other, not widely useful, options. Run +'configure --help' for more details. diff --git a/pcre/LICENCE b/pcre/LICENCE index dd9071a8dd8..f6ef7fd7664 100644 --- a/pcre/LICENCE +++ b/pcre/LICENCE @@ -25,7 +25,7 @@ Email domain: cam.ac.uk University of Cambridge Computing Service, Cambridge, England. -Copyright (c) 1997-2017 University of Cambridge +Copyright (c) 1997-2018 University of Cambridge All rights reserved. @@ -36,7 +36,7 @@ Written by: Zoltan Herczeg Email local part: hzmester Emain domain: freemail.hu -Copyright(c) 2010-2017 Zoltan Herczeg +Copyright(c) 2010-2018 Zoltan Herczeg All rights reserved. @@ -47,7 +47,7 @@ Written by: Zoltan Herczeg Email local part: hzmester Emain domain: freemail.hu -Copyright(c) 2009-2017 Zoltan Herczeg +Copyright(c) 2009-2018 Zoltan Herczeg All rights reserved. diff --git a/pcre/NEWS b/pcre/NEWS index 36be07cb880..09b4ad36003 100644 --- a/pcre/NEWS +++ b/pcre/NEWS @@ -1,6 +1,12 @@ News about PCRE releases ------------------------ +Release 8.42 20-March-2018 +-------------------------- + +This is a bug-fix release. + + Release 8.41 13-June-2017 ------------------------- diff --git a/pcre/NON-AUTOTOOLS-BUILD b/pcre/NON-AUTOTOOLS-BUILD index 3910059106b..37f6164475b 100644 --- a/pcre/NON-AUTOTOOLS-BUILD +++ b/pcre/NON-AUTOTOOLS-BUILD @@ -760,13 +760,14 @@ The character code used is EBCDIC, not ASCII or Unicode. In z/OS, UNIX APIs and applications can be supported through UNIX System Services, and in such an environment PCRE can be built in the same way as in other systems. However, in native z/OS (without UNIX System Services) and in z/VM, special ports are -required. For details, please see this web site: +required. PCRE1 version 8.39 is available in file 882 on this site: - http://www.zaconsultants.net + http://www.cbttape.org -You may download PCRE from WWW.CBTTAPE.ORG, file 882.  Everything, source and -executable, is in EBCDIC and native z/OS file formats and this is the -recommended download site. +Everything, source and executable, is in EBCDIC and native z/OS file formats. +However, this software is not maintained and will not be upgraded. If you are +new to PCRE you should be looking at PCRE2 (version 10.30 or later). -========================== -Last Updated: 25 June 2015 +=============================== +Last Updated: 13 September 2017 +=============================== diff --git a/pcre/configure.ac b/pcre/configure.ac index 718a18508c9..dcdef6a9427 100644 --- a/pcre/configure.ac +++ b/pcre/configure.ac @@ -9,18 +9,18 @@ dnl The PCRE_PRERELEASE feature is for identifying release candidates. It might dnl be defined as -RC2, for example. For real releases, it should be empty. m4_define(pcre_major, [8]) -m4_define(pcre_minor, [41]) +m4_define(pcre_minor, [42]) m4_define(pcre_prerelease, []) -m4_define(pcre_date, [2017-07-05]) +m4_define(pcre_date, [2018-03-20]) # NOTE: The CMakeLists.txt file searches for the above variables in the first # 50 lines of this file. Please update that if the variables above are moved. # Libtool shared library interface versions (current:revision:age) -m4_define(libpcre_version, [3:9:2]) -m4_define(libpcre16_version, [2:9:2]) -m4_define(libpcre32_version, [0:9:0]) -m4_define(libpcreposix_version, [0:5:0]) +m4_define(libpcre_version, [3:10:2]) +m4_define(libpcre16_version, [2:10:2]) +m4_define(libpcre32_version, [0:10:0]) +m4_define(libpcreposix_version, [0:6:0]) m4_define(libpcrecpp_version, [0:1:0]) AC_PREREQ(2.57) @@ -155,6 +155,18 @@ AC_ARG_ENABLE(jit, [enable Just-In-Time compiling support]), , enable_jit=no) +# This code enables JIT if the hardware supports it. + +if test "$enable_jit" = "auto"; then + AC_LANG(C) + AC_COMPILE_IFELSE([AC_LANG_SOURCE([[ + #define SLJIT_CONFIG_AUTO 1 + #include "sljit/sljitConfigInternal.h" + #if (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) + #error unsupported + #endif]])], enable_jit=yes, enable_jit=no) +fi + # Handle --disable-pcregrep-jit (enabled by default) AC_ARG_ENABLE(pcregrep-jit, AS_HELP_STRING([--disable-pcregrep-jit], @@ -469,7 +481,7 @@ pcre_have_type_traits="0" pcre_have_bits_type_traits="0" if test "x$enable_cpp" = "xyes" -a -z "$CXX"; then - AC_MSG_ERROR([You need a C++ compiler for C++ support.]) + AC_MSG_ERROR([Invalid C++ compiler or C++ compiler flags]) fi if test "x$enable_cpp" = "xyes" -a -n "$CXX" diff --git a/pcre/doc/html/NON-AUTOTOOLS-BUILD.txt b/pcre/doc/html/NON-AUTOTOOLS-BUILD.txt index 3910059106b..37f6164475b 100644 --- a/pcre/doc/html/NON-AUTOTOOLS-BUILD.txt +++ b/pcre/doc/html/NON-AUTOTOOLS-BUILD.txt @@ -760,13 +760,14 @@ The character code used is EBCDIC, not ASCII or Unicode. In z/OS, UNIX APIs and applications can be supported through UNIX System Services, and in such an environment PCRE can be built in the same way as in other systems. However, in native z/OS (without UNIX System Services) and in z/VM, special ports are -required. For details, please see this web site: +required. PCRE1 version 8.39 is available in file 882 on this site: - http://www.zaconsultants.net + http://www.cbttape.org -You may download PCRE from WWW.CBTTAPE.ORG, file 882.  Everything, source and -executable, is in EBCDIC and native z/OS file formats and this is the -recommended download site. +Everything, source and executable, is in EBCDIC and native z/OS file formats. +However, this software is not maintained and will not be upgraded. If you are +new to PCRE you should be looking at PCRE2 (version 10.30 or later). -========================== -Last Updated: 25 June 2015 +=============================== +Last Updated: 13 September 2017 +=============================== diff --git a/pcre/pcre.h.in b/pcre/pcre.h.in index 667a45ed575..d4d78926984 100644 --- a/pcre/pcre.h.in +++ b/pcre/pcre.h.in @@ -321,11 +321,11 @@ these bits, just add new ones on the end, in order to remain compatible. */ /* Types */ -struct real_pcre; /* declaration; the definition is private */ -typedef struct real_pcre pcre; +struct real_pcre8_or_16; /* declaration; the definition is private */ +typedef struct real_pcre8_or_16 pcre; -struct real_pcre16; /* declaration; the definition is private */ -typedef struct real_pcre16 pcre16; +struct real_pcre8_or_16; /* declaration; the definition is private */ +typedef struct real_pcre8_or_16 pcre16; struct real_pcre32; /* declaration; the definition is private */ typedef struct real_pcre32 pcre32; diff --git a/pcre/pcre_compile.c b/pcre/pcre_compile.c index 1a916693e69..9b9da46f0d0 100644 --- a/pcre/pcre_compile.c +++ b/pcre/pcre_compile.c @@ -8063,7 +8063,7 @@ for (;; ptr++) single group (i.e. not to a duplicated name. */ HANDLE_REFERENCE: - if (firstcharflags == REQ_UNSET) firstcharflags = REQ_NONE; + if (firstcharflags == REQ_UNSET) zerofirstcharflags = firstcharflags = REQ_NONE; previous = code; item_hwm_offset = cd->hwm - cd->start_workspace; *code++ = ((options & PCRE_CASELESS) != 0)? OP_REFI : OP_REF; diff --git a/pcre/pcre_dfa_exec.c b/pcre/pcre_dfa_exec.c index bc09ced3a7c..f333381d088 100644 --- a/pcre/pcre_dfa_exec.c +++ b/pcre/pcre_dfa_exec.c @@ -2287,12 +2287,14 @@ for (;;) case OP_NOTI: if (clen > 0) { - unsigned int otherd; + pcre_uint32 otherd; #ifdef SUPPORT_UTF if (utf && d >= 128) { #ifdef SUPPORT_UCP otherd = UCD_OTHERCASE(d); +#else + otherd = d; #endif /* SUPPORT_UCP */ } else diff --git a/pcre/pcre_exec.c b/pcre/pcre_exec.c index fa84d924a4c..93256d32455 100644 --- a/pcre/pcre_exec.c +++ b/pcre/pcre_exec.c @@ -6,7 +6,7 @@ and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel - Copyright (c) 1997-2014 University of Cambridge + Copyright (c) 1997-2018 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -2313,7 +2313,7 @@ for (;;) case OP_ANY: if (IS_NEWLINE(eptr)) RRETURN(MATCH_NOMATCH); if (md->partial != 0 && - eptr + 1 >= md->end_subject && + eptr == md->end_subject - 1 && NLBLOCK->nltype == NLTYPE_FIXED && NLBLOCK->nllen == 2 && UCHAR21TEST(eptr) == NLBLOCK->nl[0]) @@ -3061,7 +3061,7 @@ for (;;) { RMATCH(eptr, ecode, offset_top, md, eptrb, RM18); if (rrc != MATCH_NOMATCH) RRETURN(rrc); - if (eptr-- == pp) break; /* Stop if tried at original pos */ + if (eptr-- <= pp) break; /* Stop if tried at original pos */ BACKCHAR(eptr); } } @@ -3218,7 +3218,7 @@ for (;;) { RMATCH(eptr, ecode, offset_top, md, eptrb, RM21); if (rrc != MATCH_NOMATCH) RRETURN(rrc); - if (eptr-- == pp) break; /* Stop if tried at original pos */ + if (eptr-- <= pp) break; /* Stop if tried at original pos */ #ifdef SUPPORT_UTF if (utf) BACKCHAR(eptr); #endif diff --git a/pcre/pcre_jit_compile.c b/pcre/pcre_jit_compile.c index 249edbe8e7f..2bad74b0231 100644 --- a/pcre/pcre_jit_compile.c +++ b/pcre/pcre_jit_compile.c @@ -164,7 +164,6 @@ typedef struct jit_arguments { const pcre_uchar *begin; const pcre_uchar *end; int *offsets; - pcre_uchar *uchar_ptr; pcre_uchar *mark_ptr; void *callout_data; /* Everything else after. */ @@ -214,7 +213,7 @@ enum control_types { type_then_trap = 1 }; -typedef int (SLJIT_CALL *jit_function)(jit_arguments *args); +typedef int (SLJIT_FUNC *jit_function)(jit_arguments *args); /* The following structure is the key data type for the recursive code generator. It is allocated by compile_matchingpath, and contains @@ -489,9 +488,24 @@ typedef struct compare_context { /* Used for accessing the elements of the stack. */ #define STACK(i) ((i) * (int)sizeof(sljit_sw)) +#ifdef SLJIT_PREF_SHIFT_REG +#if SLJIT_PREF_SHIFT_REG == SLJIT_R2 +/* Nothing. */ +#elif SLJIT_PREF_SHIFT_REG == SLJIT_R3 +#define SHIFT_REG_IS_R3 +#else +#error "Unsupported shift register" +#endif +#endif + #define TMP1 SLJIT_R0 +#ifdef SHIFT_REG_IS_R3 +#define TMP2 SLJIT_R3 +#define TMP3 SLJIT_R2 +#else #define TMP2 SLJIT_R2 #define TMP3 SLJIT_R3 +#endif #define STR_PTR SLJIT_S0 #define STR_END SLJIT_S1 #define STACK_TOP SLJIT_R1 @@ -520,13 +534,10 @@ the start pointers when the end of the capturing group has not yet reached. */ #if defined COMPILE_PCRE8 #define MOV_UCHAR SLJIT_MOV_U8 -#define MOVU_UCHAR SLJIT_MOVU_U8 #elif defined COMPILE_PCRE16 #define MOV_UCHAR SLJIT_MOV_U16 -#define MOVU_UCHAR SLJIT_MOVU_U16 #elif defined COMPILE_PCRE32 #define MOV_UCHAR SLJIT_MOV_U32 -#define MOVU_UCHAR SLJIT_MOVU_U32 #else #error Unsupported compiling mode #endif @@ -2383,12 +2394,25 @@ if (length < 8) } else { - GET_LOCAL_BASE(SLJIT_R1, 0, OVECTOR_START); - OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1); - loop = LABEL(); - OP1(SLJIT_MOVU, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw), SLJIT_R0, 0); - OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1); - JUMPTO(SLJIT_NOT_ZERO, loop); + if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw)) == SLJIT_SUCCESS) + { + GET_LOCAL_BASE(SLJIT_R1, 0, OVECTOR_START); + OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1); + loop = LABEL(); + sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw)); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, loop); + } + else + { + GET_LOCAL_BASE(SLJIT_R1, 0, OVECTOR_START + sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1); + loop = LABEL(); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_R1), 0, SLJIT_R0, 0); + OP2(SLJIT_ADD, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, sizeof(sljit_sw)); + OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, loop); + } } } @@ -2421,12 +2445,25 @@ if (length < 8) } else { - GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + sizeof(sljit_sw)); - OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2); - loop = LABEL(); - OP1(SLJIT_MOVU, SLJIT_MEM1(TMP2), sizeof(sljit_sw), TMP1, 0); - OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1); - JUMPTO(SLJIT_NOT_ZERO, loop); + if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw)) == SLJIT_SUCCESS) + { + GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + sizeof(sljit_sw)); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2); + loop = LABEL(); + sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); + OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, loop); + } + else + { + GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + 2 * sizeof(sljit_sw)); + OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2); + loop = LABEL(); + OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, TMP1, 0); + OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, SLJIT_IMM, sizeof(sljit_sw)); + OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1); + JUMPTO(SLJIT_NOT_ZERO, loop); + } } OP1(SLJIT_MOV, STACK_TOP, 0, ARGUMENTS, 0); @@ -2436,10 +2473,10 @@ if (common->control_head_ptr != 0) OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_IMM, 0); OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), SLJIT_OFFSETOF(jit_arguments, stack)); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->start_ptr); -OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), SLJIT_OFFSETOF(struct sljit_stack, base)); +OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), SLJIT_OFFSETOF(struct sljit_stack, end)); } -static sljit_sw SLJIT_CALL do_search_mark(sljit_sw *current, const pcre_uchar *skip_arg) +static sljit_sw SLJIT_FUNC do_search_mark(sljit_sw *current, const pcre_uchar *skip_arg) { while (current != NULL) { @@ -2460,7 +2497,7 @@ while (current != NULL) SLJIT_ASSERT(current[0] == 0 || current < (sljit_sw*)current[0]); current = (sljit_sw*)current[0]; } -return -1; +return 0; } static SLJIT_INLINE void copy_ovector(compiler_common *common, int topbracket) @@ -2468,6 +2505,7 @@ static SLJIT_INLINE void copy_ovector(compiler_common *common, int topbracket) DEFINE_COMPILER; struct sljit_label *loop; struct sljit_jump *early_quit; +BOOL has_pre; /* At this point we can freely use all registers. */ OP1(SLJIT_MOV, SLJIT_S2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(1)); @@ -2481,17 +2519,30 @@ if (common->mark_ptr != 0) OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, mark_ptr), SLJIT_R2, 0); OP2(SLJIT_SUB, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, offsets), SLJIT_IMM, sizeof(int)); OP1(SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, begin)); -GET_LOCAL_BASE(SLJIT_S0, 0, OVECTOR_START); + +has_pre = sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw)) == SLJIT_SUCCESS; +GET_LOCAL_BASE(SLJIT_S0, 0, OVECTOR_START - (has_pre ? sizeof(sljit_sw) : 0)); + /* Unlikely, but possible */ early_quit = CMP(SLJIT_EQUAL, SLJIT_R1, 0, SLJIT_IMM, 0); loop = LABEL(); -OP2(SLJIT_SUB, SLJIT_S1, 0, SLJIT_MEM1(SLJIT_S0), 0, SLJIT_R0, 0); -OP2(SLJIT_ADD, SLJIT_S0, 0, SLJIT_S0, 0, SLJIT_IMM, sizeof(sljit_sw)); + +if (has_pre) + sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw)); +else + { + OP1(SLJIT_MOV, SLJIT_S1, 0, SLJIT_MEM1(SLJIT_S0), 0); + OP2(SLJIT_ADD, SLJIT_S0, 0, SLJIT_S0, 0, SLJIT_IMM, sizeof(sljit_sw)); + } + +OP2(SLJIT_ADD, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, sizeof(int)); +OP2(SLJIT_SUB, SLJIT_S1, 0, SLJIT_S1, 0, SLJIT_R0, 0); /* Copy the integer value to the output buffer */ #if defined COMPILE_PCRE16 || defined COMPILE_PCRE32 OP2(SLJIT_ASHR, SLJIT_S1, 0, SLJIT_S1, 0, SLJIT_IMM, UCHAR_SHIFT); #endif -OP1(SLJIT_MOVU_S32, SLJIT_MEM1(SLJIT_R2), sizeof(int), SLJIT_S1, 0); + +OP1(SLJIT_MOV_S32, SLJIT_MEM1(SLJIT_R2), 0, SLJIT_S1, 0); OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1); JUMPTO(SLJIT_NOT_ZERO, loop); JUMPHERE(early_quit); @@ -2499,14 +2550,29 @@ JUMPHERE(early_quit); /* Calculate the return value, which is the maximum ovector value. */ if (topbracket > 1) { - GET_LOCAL_BASE(SLJIT_R0, 0, OVECTOR_START + topbracket * 2 * sizeof(sljit_sw)); - OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, topbracket + 1); + if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw))) == SLJIT_SUCCESS) + { + GET_LOCAL_BASE(SLJIT_R0, 0, OVECTOR_START + topbracket * 2 * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, topbracket + 1); - /* OVECTOR(0) is never equal to SLJIT_S2. */ - loop = LABEL(); - OP1(SLJIT_MOVU, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw))); - OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1); - CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop); + /* OVECTOR(0) is never equal to SLJIT_S2. */ + loop = LABEL(); + sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw))); + OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1); + CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop); + } + else + { + GET_LOCAL_BASE(SLJIT_R0, 0, OVECTOR_START + (topbracket - 1) * 2 * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, topbracket + 1); + + /* OVECTOR(0) is never equal to SLJIT_S2. */ + loop = LABEL(); + OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), 0); + OP2(SLJIT_SUB, SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 2 * (sljit_sw)sizeof(sljit_sw)); + OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1); + CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop); + } OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_R1, 0); } else @@ -5167,93 +5233,190 @@ OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL); sljit_emit_fast_return(compiler, RETURN_ADDR, 0); } -#define CHAR1 STR_END -#define CHAR2 STACK_TOP - static void do_casefulcmp(compiler_common *common) { DEFINE_COMPILER; struct sljit_jump *jump; struct sljit_label *label; +int char1_reg; +int char2_reg; -sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); +if (sljit_get_register_index(TMP3) < 0) + { + char1_reg = STR_END; + char2_reg = STACK_TOP; + } +else + { + char1_reg = TMP3; + char2_reg = RETURN_ADDR; + } + +sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0); OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP2, 0); -OP1(SLJIT_MOV, TMP3, 0, CHAR1, 0); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, CHAR2, 0); -OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); -OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); -label = LABEL(); -OP1(MOVU_UCHAR, CHAR1, 0, SLJIT_MEM1(TMP1), IN_UCHARS(1)); -OP1(MOVU_UCHAR, CHAR2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); -jump = CMP(SLJIT_NOT_EQUAL, CHAR1, 0, CHAR2, 0); -OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); -JUMPTO(SLJIT_NOT_ZERO, label); +if (char1_reg == STR_END) + { + OP1(SLJIT_MOV, TMP3, 0, char1_reg, 0); + OP1(SLJIT_MOV, RETURN_ADDR, 0, char2_reg, 0); + } -JUMPHERE(jump); -OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); -OP1(SLJIT_MOV, CHAR1, 0, TMP3, 0); -OP1(SLJIT_MOV, CHAR2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); -sljit_emit_fast_return(compiler, RETURN_ADDR, 0); +if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) + { + label = LABEL(); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0); + OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); + JUMPTO(SLJIT_NOT_ZERO, label); + + JUMPHERE(jump); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); + } +else if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) + { + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + + label = LABEL(); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0); + OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); + JUMPTO(SLJIT_NOT_ZERO, label); + + JUMPHERE(jump); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + } +else + { + label = LABEL(); + OP1(MOV_UCHAR, char1_reg, 0, SLJIT_MEM1(TMP1), 0); + OP1(MOV_UCHAR, char2_reg, 0, SLJIT_MEM1(STR_PTR), 0); + OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0); + OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); + JUMPTO(SLJIT_NOT_ZERO, label); + + JUMPHERE(jump); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); + } + +if (char1_reg == STR_END) + { + OP1(SLJIT_MOV, char1_reg, 0, TMP3, 0); + OP1(SLJIT_MOV, char2_reg, 0, RETURN_ADDR, 0); + } + +sljit_emit_fast_return(compiler, TMP1, 0); } -#define LCC_TABLE STACK_LIMIT - static void do_caselesscmp(compiler_common *common) { DEFINE_COMPILER; struct sljit_jump *jump; struct sljit_label *label; +int char1_reg = STR_END; +int char2_reg; +int lcc_table; +int opt_type = 0; -sljit_emit_fast_enter(compiler, RETURN_ADDR, 0); +if (sljit_get_register_index(TMP3) < 0) + { + char2_reg = STACK_TOP; + lcc_table = STACK_LIMIT; + } +else + { + char2_reg = RETURN_ADDR; + lcc_table = TMP3; + } + +if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) + opt_type = 1; +else if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) + opt_type = 2; + +sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0); OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP2, 0); -OP1(SLJIT_MOV, TMP3, 0, LCC_TABLE, 0); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, CHAR1, 0); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, CHAR2, 0); -OP1(SLJIT_MOV, LCC_TABLE, 0, SLJIT_IMM, common->lcc); -OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); -OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, char1_reg, 0); + +if (char2_reg == STACK_TOP) + { + OP1(SLJIT_MOV, TMP3, 0, char2_reg, 0); + OP1(SLJIT_MOV, RETURN_ADDR, 0, lcc_table, 0); + } + +OP1(SLJIT_MOV, lcc_table, 0, SLJIT_IMM, common->lcc); + +if (opt_type == 1) + { + label = LABEL(); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + } +else if (opt_type == 2) + { + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); + OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + + label = LABEL(); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); + sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + } +else + { + label = LABEL(); + OP1(MOV_UCHAR, char1_reg, 0, SLJIT_MEM1(TMP1), 0); + OP1(MOV_UCHAR, char2_reg, 0, SLJIT_MEM1(STR_PTR), 0); + OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); + } -label = LABEL(); -OP1(MOVU_UCHAR, CHAR1, 0, SLJIT_MEM1(TMP1), IN_UCHARS(1)); -OP1(MOVU_UCHAR, CHAR2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); #ifndef COMPILE_PCRE8 -jump = CMP(SLJIT_GREATER, CHAR1, 0, SLJIT_IMM, 255); +jump = CMP(SLJIT_GREATER, char1_reg, 0, SLJIT_IMM, 255); #endif -OP1(SLJIT_MOV_U8, CHAR1, 0, SLJIT_MEM2(LCC_TABLE, CHAR1), 0); +OP1(SLJIT_MOV_U8, char1_reg, 0, SLJIT_MEM2(lcc_table, char1_reg), 0); #ifndef COMPILE_PCRE8 JUMPHERE(jump); -jump = CMP(SLJIT_GREATER, CHAR2, 0, SLJIT_IMM, 255); +jump = CMP(SLJIT_GREATER, char2_reg, 0, SLJIT_IMM, 255); #endif -OP1(SLJIT_MOV_U8, CHAR2, 0, SLJIT_MEM2(LCC_TABLE, CHAR2), 0); +OP1(SLJIT_MOV_U8, char2_reg, 0, SLJIT_MEM2(lcc_table, char2_reg), 0); #ifndef COMPILE_PCRE8 JUMPHERE(jump); #endif -jump = CMP(SLJIT_NOT_EQUAL, CHAR1, 0, CHAR2, 0); + +if (opt_type == 0) + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + +jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0); OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); JUMPTO(SLJIT_NOT_ZERO, label); JUMPHERE(jump); -OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); -OP1(SLJIT_MOV, LCC_TABLE, 0, TMP3, 0); -OP1(SLJIT_MOV, CHAR1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); -OP1(SLJIT_MOV, CHAR2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1); -sljit_emit_fast_return(compiler, RETURN_ADDR, 0); -} +OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); -#undef LCC_TABLE -#undef CHAR1 -#undef CHAR2 +if (opt_type == 2) + OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); + +if (char2_reg == STACK_TOP) + { + OP1(SLJIT_MOV, char2_reg, 0, TMP3, 0); + OP1(SLJIT_MOV, lcc_table, 0, RETURN_ADDR, 0); + } + +OP1(SLJIT_MOV, char1_reg, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1); +sljit_emit_fast_return(compiler, TMP1, 0); +} #if defined SUPPORT_UTF && defined SUPPORT_UCP -static const pcre_uchar * SLJIT_CALL do_utf_caselesscmp(pcre_uchar *src1, jit_arguments *args, pcre_uchar *end1) +static const pcre_uchar * SLJIT_FUNC do_utf_caselesscmp(pcre_uchar *src1, pcre_uchar *src2, pcre_uchar *end1, pcre_uchar *end2) { /* This function would be ineffective to do in JIT level. */ sljit_u32 c1, c2; -const pcre_uchar *src2 = args->uchar_ptr; -const pcre_uchar *end2 = args->end; const ucd_record *ur; const sljit_u32 *pp; @@ -6776,32 +6939,37 @@ else #if defined SUPPORT_UTF && defined SUPPORT_UCP if (common->utf && *cc == OP_REFI) { - SLJIT_ASSERT(TMP1 == SLJIT_R0 && STACK_TOP == SLJIT_R1 && TMP2 == SLJIT_R2); + SLJIT_ASSERT(TMP1 == SLJIT_R0 && STACK_TOP == SLJIT_R1); if (ref) - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1)); + OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1)); else - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); if (withchecks) - jump = CMP(SLJIT_EQUAL, TMP1, 0, TMP2, 0); + jump = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_R2, 0); - /* Needed to save important temporary registers. */ + /* No free saved registers so save data on stack. */ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0); - OP1(SLJIT_MOV, SLJIT_R1, 0, ARGUMENTS, 0); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(jit_arguments, uchar_ptr), STR_PTR, 0); - sljit_emit_ijump(compiler, SLJIT_CALL3, SLJIT_IMM, SLJIT_FUNC_OFFSET(do_utf_caselesscmp)); + OP1(SLJIT_MOV, SLJIT_R1, 0, STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_R3, 0, STR_END, 0); + sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW) | SLJIT_ARG3(SW) | SLJIT_ARG4(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(do_utf_caselesscmp)); OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); + OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_RETURN_REG, 0); + if (common->mode == JIT_COMPILE) add_jump(compiler, backtracks, CMP(SLJIT_LESS_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1)); else { - add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0)); - nopartial = CMP(SLJIT_NOT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1); + OP2(SLJIT_SUB | SLJIT_SET_Z | SLJIT_SET_LESS, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1); + + add_jump(compiler, backtracks, JUMP(SLJIT_LESS)); + + nopartial = JUMP(SLJIT_NOT_EQUAL); + OP1(SLJIT_MOV, STR_PTR, 0, STR_END, 0); check_partial(common, FALSE); add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); JUMPHERE(nopartial); } - OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_RETURN_REG, 0); } else #endif /* SUPPORT_UTF && SUPPORT_UCP */ @@ -7125,7 +7293,7 @@ add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IM return cc + 1 + LINK_SIZE; } -static int SLJIT_CALL do_callout(struct jit_arguments *arguments, PUBL(callout_block) *callout_block, pcre_uchar **jit_ovector) +static sljit_s32 SLJIT_FUNC do_callout(struct jit_arguments *arguments, PUBL(callout_block) *callout_block, pcre_uchar **jit_ovector) { const pcre_uchar *begin = arguments->begin; int *offset_vector = arguments->offsets; @@ -7207,18 +7375,17 @@ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0); /* SLJIT_R0 = arguments */ OP1(SLJIT_MOV, SLJIT_R1, 0, STACK_TOP, 0); GET_LOCAL_BASE(SLJIT_R2, 0, OVECTOR_START); -sljit_emit_ijump(compiler, SLJIT_CALL3, SLJIT_IMM, SLJIT_FUNC_OFFSET(do_callout)); -OP1(SLJIT_MOV_S32, SLJIT_RETURN_REG, 0, SLJIT_RETURN_REG, 0); +sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(S32) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW) | SLJIT_ARG3(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(do_callout)); OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); free_stack(common, CALLOUT_ARG_SIZE / sizeof(sljit_sw)); /* Check return value. */ -OP2(SLJIT_SUB | SLJIT_SET_Z | SLJIT_SET_SIG_GREATER, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0); -add_jump(compiler, &backtrack->topbacktracks, JUMP(SLJIT_SIG_GREATER)); +OP2(SLJIT_SUB32 | SLJIT_SET_Z | SLJIT_SET_SIG_GREATER, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0); +add_jump(compiler, &backtrack->topbacktracks, JUMP(SLJIT_SIG_GREATER32)); if (common->forced_quit_label == NULL) - add_jump(compiler, &common->forced_quit, JUMP(SLJIT_NOT_EQUAL) /* SIG_LESS */); + add_jump(compiler, &common->forced_quit, JUMP(SLJIT_NOT_EQUAL32) /* SIG_LESS */); else - JUMPTO(SLJIT_NOT_EQUAL /* SIG_LESS */, common->forced_quit_label); + JUMPTO(SLJIT_NOT_EQUAL32 /* SIG_LESS */, common->forced_quit_label); return cc + 2 + 2 * LINK_SIZE; } @@ -10439,11 +10606,11 @@ if (opcode == OP_SKIP_ARG) OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0); OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, (sljit_sw)(current->cc + 2)); - sljit_emit_ijump(compiler, SLJIT_CALL2, SLJIT_IMM, SLJIT_FUNC_OFFSET(do_search_mark)); + sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(do_search_mark)); OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); OP1(SLJIT_MOV, STR_PTR, 0, TMP1, 0); - add_jump(compiler, &common->reset_match, CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, -1)); + add_jump(compiler, &common->reset_match, CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0)); return; } @@ -11031,7 +11198,7 @@ if (!compiler) common->compiler = compiler; /* Main pcre_jit_exec entry. */ -sljit_emit_enter(compiler, 0, 1, 5, 5, 0, 0, private_data_size); +sljit_emit_enter(compiler, 0, SLJIT_ARG1(SW), 5, 5, 0, 0, private_data_size); /* Register init. */ reset_ovector(common, (re->top_bracket + 1) * 2); @@ -11044,8 +11211,8 @@ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, str)) OP1(SLJIT_MOV, STR_END, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, end)); OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack)); OP1(SLJIT_MOV_U32, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, limit_match)); -OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, base)); -OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, limit)); +OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, end)); +OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, start)); OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 1); OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LIMIT_MATCH, TMP1, 0); @@ -11251,20 +11418,22 @@ common->quit_label = quit_label; set_jumps(common->stackalloc, LABEL()); /* RETURN_ADDR is not a saved register. */ sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0); -OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, TMP2, 0); -OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); -OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack)); -OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, top), STACK_TOP, 0); -OP2(SLJIT_SUB, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, limit), SLJIT_IMM, STACK_GROWTH_RATE); -sljit_emit_ijump(compiler, SLJIT_CALL2, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_stack_resize)); -jump = CMP(SLJIT_NOT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0); -OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0); -OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack)); -OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, top)); -OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, limit)); -OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1); -sljit_emit_fast_return(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0); +SLJIT_ASSERT(TMP1 == SLJIT_R0 && STACK_TOP == SLJIT_R1); + +OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, STACK_TOP, 0); +OP1(SLJIT_MOV, SLJIT_R0, 0, ARGUMENTS, 0); +OP2(SLJIT_SUB, SLJIT_R1, 0, STACK_LIMIT, 0, SLJIT_IMM, STACK_GROWTH_RATE); +OP1(SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, stack)); +OP1(SLJIT_MOV, STACK_LIMIT, 0, TMP2, 0); + +sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_stack_resize)); +jump = CMP(SLJIT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0); +OP1(SLJIT_MOV, TMP2, 0, STACK_LIMIT, 0); +OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_RETURN_REG, 0); +OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); +OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1); +sljit_emit_fast_return(compiler, TMP1, 0); /* Allocation failed. */ JUMPHERE(jump); @@ -11409,9 +11578,9 @@ union { sljit_u8 local_space[MACHINE_STACK_SIZE]; struct sljit_stack local_stack; -local_stack.max_limit = local_space; -local_stack.limit = local_space; -local_stack.base = local_space + MACHINE_STACK_SIZE; +local_stack.min_start = local_space; +local_stack.start = local_space; +local_stack.end = local_space + MACHINE_STACK_SIZE; local_stack.top = local_space + MACHINE_STACK_SIZE; arguments->stack = &local_stack; convert_executable_func.executable_func = executable_func; @@ -11529,7 +11698,7 @@ if ((options & PCRE_PARTIAL_HARD) != 0) else if ((options & PCRE_PARTIAL_SOFT) != 0) mode = JIT_PARTIAL_SOFT_COMPILE; -if (functions->executable_funcs[mode] == NULL) +if (functions == NULL || functions->executable_funcs[mode] == NULL) return PCRE_ERROR_JIT_BADOPTION; /* Sanity checks should be handled by pcre_exec. */ diff --git a/pcre/pcregrep.c b/pcre/pcregrep.c index a58ff823509..79d9e286c75 100644 --- a/pcre/pcregrep.c +++ b/pcre/pcregrep.c @@ -1387,8 +1387,8 @@ Returns: nothing */ static void -do_after_lines(int lastmatchnumber, char *lastmatchrestart, char *endptr, - char *printname) +do_after_lines(unsigned long int lastmatchnumber, char *lastmatchrestart, + char *endptr, char *printname) { if (after_context > 0 && lastmatchnumber > 0) { @@ -1398,7 +1398,7 @@ if (after_context > 0 && lastmatchnumber > 0) int ellength; char *pp = lastmatchrestart; if (printname != NULL) fprintf(stdout, "%s-", printname); - if (number) fprintf(stdout, "%d-", lastmatchnumber++); + if (number) fprintf(stdout, "%lu-", lastmatchnumber++); pp = end_of_line(pp, endptr, &ellength); FWRITE(lastmatchrestart, 1, pp - lastmatchrestart, stdout); lastmatchrestart = pp; @@ -1502,11 +1502,11 @@ static int pcregrep(void *handle, int frtype, char *filename, char *printname) { int rc = 1; -int linenumber = 1; -int lastmatchnumber = 0; -int count = 0; int filepos = 0; int offsets[OFFSET_SIZE]; +unsigned long int linenumber = 1; +unsigned long int lastmatchnumber = 0; +unsigned long int count = 0; char *lastmatchrestart = NULL; char *ptr = main_buffer; char *endptr; @@ -1609,7 +1609,7 @@ while (ptr < endptr) if (endlinelength == 0 && t == main_buffer + bufsize) { - fprintf(stderr, "pcregrep: line %d%s%s is too long for the internal buffer\n" + fprintf(stderr, "pcregrep: line %lu%s%s is too long for the internal buffer\n" "pcregrep: check the --buffer-size option\n", linenumber, (filename == NULL)? "" : " of file ", @@ -1747,7 +1747,7 @@ while (ptr < endptr) prevoffsets[1] = offsets[1]; if (printname != NULL) fprintf(stdout, "%s:", printname); - if (number) fprintf(stdout, "%d:", linenumber); + if (number) fprintf(stdout, "%lu:", linenumber); /* Handle --line-offsets */ @@ -1862,7 +1862,7 @@ while (ptr < endptr) { char *pp = lastmatchrestart; if (printname != NULL) fprintf(stdout, "%s-", printname); - if (number) fprintf(stdout, "%d-", lastmatchnumber++); + if (number) fprintf(stdout, "%lu-", lastmatchnumber++); pp = end_of_line(pp, endptr, &ellength); FWRITE(lastmatchrestart, 1, pp - lastmatchrestart, stdout); lastmatchrestart = pp; @@ -1902,7 +1902,7 @@ while (ptr < endptr) int ellength; char *pp = p; if (printname != NULL) fprintf(stdout, "%s-", printname); - if (number) fprintf(stdout, "%d-", linenumber - linecount--); + if (number) fprintf(stdout, "%lu-", linenumber - linecount--); pp = end_of_line(pp, endptr, &ellength); FWRITE(p, 1, pp - p, stdout); p = pp; @@ -1916,7 +1916,7 @@ while (ptr < endptr) endhyphenpending = TRUE; if (printname != NULL) fprintf(stdout, "%s:", printname); - if (number) fprintf(stdout, "%d:", linenumber); + if (number) fprintf(stdout, "%lu:", linenumber); /* In multiline mode, we want to print to the end of the line in which the end of the matched string is found, so we adjust linelength and the @@ -2112,7 +2112,7 @@ if (count_only && !quiet) { if (printname != NULL && filenames != FN_NONE) fprintf(stdout, "%s:", printname); - fprintf(stdout, "%d\n", count); + fprintf(stdout, "%lu\n", count); } } @@ -2234,7 +2234,7 @@ if (isdirectory(pathname)) if (dee_action == dee_RECURSE) { - char buffer[1024]; + char buffer[2048]; char *nextfile; directory_type *dir = opendirectory(pathname); @@ -2249,7 +2249,14 @@ if (isdirectory(pathname)) while ((nextfile = readdirectory(dir)) != NULL) { int frc; - sprintf(buffer, "%.512s%c%.128s", pathname, FILESEP, nextfile); + size_t fnlength = strlen(pathname) + strlen(nextfile) + 2; + if (fnlength > 2048) + { + fprintf(stderr, "pcre2grep: recursive filename is too long\n"); + rc = 2; + break; + } + sprintf(buffer, "%s%c%s", pathname, FILESEP, nextfile); frc = grep_or_recurse(buffer, dir_recurse, FALSE); if (frc > 1) rc = frc; else if (frc == 0 && rc == 1) rc = 0; @@ -2520,7 +2527,14 @@ if ((popts & PO_FIXED_STRINGS) != 0) } } -sprintf(buffer, "%s%.*s%s", prefix[popts], patlen, ps, suffix[popts]); +if (snprintf(buffer, PATBUFSIZE, "%s%.*s%s", prefix[popts], patlen, ps, + suffix[popts]) > PATBUFSIZE) + { + fprintf(stderr, "pcregrep: Buffer overflow while compiling \"%s\"\n", + ps); + return FALSE; + } + p->compiled = pcre_compile(buffer, options, &error, &errptr, pcretables); if (p->compiled != NULL) return TRUE; @@ -2756,8 +2770,15 @@ for (i = 1; i < argc; i++) int arglen = (argequals == NULL || equals == NULL)? (int)strlen(arg) : (int)(argequals - arg); - sprintf(buff1, "%.*s", baselen, op->long_name); - sprintf(buff2, "%s%.*s", buff1, fulllen - baselen - 2, opbra + 1); + if (snprintf(buff1, sizeof(buff1), "%.*s", baselen, op->long_name) > + (int)sizeof(buff1) || + snprintf(buff2, sizeof(buff2), "%s%.*s", buff1, + fulllen - baselen - 2, opbra + 1) > (int)sizeof(buff2)) + { + fprintf(stderr, "pcregrep: Buffer overflow when parsing %s option\n", + op->long_name); + pcregrep_exit(2); + } if (strncmp(arg, buff1, arglen) == 0 || strncmp(arg, buff2, arglen) == 0) diff --git a/pcre/pcreposix.c b/pcre/pcreposix.c index 7b404a71100..a76d6bfca45 100644 --- a/pcre/pcreposix.c +++ b/pcre/pcreposix.c @@ -6,7 +6,7 @@ and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel - Copyright (c) 1997-2017 University of Cambridge + Copyright (c) 1997-2018 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -389,8 +389,8 @@ if (rc >= 0) { for (i = 0; i < (size_t)rc; i++) { - pmatch[i].rm_so = ovector[i*2] + so; - pmatch[i].rm_eo = ovector[i*2+1] + so; + pmatch[i].rm_so = (ovector[i*2] < 0)? -1 : ovector[i*2] + so; + pmatch[i].rm_eo = (ovector[i*2+1] < 0)? -1: ovector[i*2+1] + so; } if (allocated_ovector) free(ovector); for (; i < nmatch; i++) pmatch[i].rm_so = pmatch[i].rm_eo = -1; diff --git a/pcre/testdata/testinput2 b/pcre/testdata/testinput2 index 08c6f39a565..8ba4dc4ddab 100644 --- a/pcre/testdata/testinput2 +++ b/pcre/testdata/testinput2 @@ -4249,4 +4249,12 @@ backtracking verbs. --/ /(?=.*[A-Z])/I +"(?<=(a))\1?b" + ab + aaab + +"(?=(a))\1?b" + ab + aaab + /-- End of testinput2 --/ diff --git a/pcre/testdata/testinput5 b/pcre/testdata/testinput5 index 28561a93572..c94008c3f29 100644 --- a/pcre/testdata/testinput5 +++ b/pcre/testdata/testinput5 @@ -798,4 +798,10 @@ /(?<=\K\x{17f})/8G+ \x{17f}\x{17f}\x{17f}\x{17f}\x{17f} +/\C[^\v]+\x80/8 + [Aá¿»BÅ€C] + +/\C[^\d]+\x80/8 + [Aá¿»BÅ€C] + /-- End of testinput5 --/ diff --git a/pcre/testdata/testoutput2 b/pcre/testdata/testoutput2 index 811bbefc84c..61ed8d9d4e4 100644 --- a/pcre/testdata/testoutput2 +++ b/pcre/testdata/testoutput2 @@ -14705,4 +14705,20 @@ No options No first char No need char +"(?<=(a))\1?b" + ab + 0: b + 1: a + aaab + 0: ab + 1: a + +"(?=(a))\1?b" + ab + 0: ab + 1: a + aaab + 0: ab + 1: a + /-- End of testinput2 --/ diff --git a/pcre/testdata/testoutput5 b/pcre/testdata/testoutput5 index bab989ca7e5..090e1e1c85f 100644 --- a/pcre/testdata/testoutput5 +++ b/pcre/testdata/testoutput5 @@ -1942,4 +1942,12 @@ Need char = 'z' 0: \x{17f} 0+ +/\C[^\v]+\x80/8 + [Aá¿»BÅ€C] +No match + +/\C[^\d]+\x80/8 + [Aá¿»BÅ€C] +No match + /-- End of testinput5 --/ diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c index 16e506626e5..72d5a91d59d 100644 --- a/plugin/server_audit/server_audit.c +++ b/plugin/server_audit/server_audit.c @@ -15,7 +15,7 @@ #define PLUGIN_VERSION 0x104 -#define PLUGIN_STR_VERSION "1.4.3" +#define PLUGIN_STR_VERSION "1.4.4" #define _my_thread_var loc_thread_var @@ -359,16 +359,17 @@ static MYSQL_SYSVAR_STR(excl_users, excl_users, PLUGIN_VAR_RQCMDARG, /* bits in the event filter. */ #define EVENT_CONNECT 1 #define EVENT_QUERY_ALL 2 -#define EVENT_QUERY 58 +#define EVENT_QUERY 122 #define EVENT_TABLE 4 #define EVENT_QUERY_DDL 8 #define EVENT_QUERY_DML 16 #define EVENT_QUERY_DCL 32 +#define EVENT_QUERY_DML_NO_SELECT 64 static const char *event_names[]= { "CONNECT", "QUERY", "TABLE", "QUERY_DDL", "QUERY_DML", "QUERY_DCL", - NULL + "QUERY_DML_NO_SELECT", NULL }; static TYPELIB events_typelib= { @@ -376,7 +377,7 @@ static TYPELIB events_typelib= }; static MYSQL_SYSVAR_SET(events, events, PLUGIN_VAR_RQCMDARG, "Specifies the set of events to monitor. Can be CONNECT, QUERY, TABLE," - " QUERY_DDL, QUERY_DML, QUERY_DCL.", + " QUERY_DDL, QUERY_DML, QUERY_DML_NO_SELECT, QUERY_DCL.", NULL, NULL, 0, &events_typelib); #define OUTPUT_SYSLOG 0 #define OUTPUT_FILE 1 @@ -854,6 +855,21 @@ struct sa_keyword dml_keywords[]= }; +struct sa_keyword dml_no_select_keywords[]= +{ + {2, "DO", 0, SQLCOM_DML}, + {4, "CALL", 0, SQLCOM_DML}, + {4, "LOAD", &data_word, SQLCOM_DML}, + {4, "LOAD", &xml_word, SQLCOM_DML}, + {6, "DELETE", 0, SQLCOM_DML}, + {6, "INSERT", 0, SQLCOM_DML}, + {6, "UPDATE", 0, SQLCOM_DML}, + {7, "HANDLER", 0, SQLCOM_DML}, + {7, "REPLACE", 0, SQLCOM_DML}, + {0, NULL, 0, SQLCOM_DML} +}; + + struct sa_keyword dcl_keywords[]= { {6, "CREATE", &user_word, SQLCOM_DCL}, @@ -1635,6 +1651,11 @@ static int log_statement_ex(const struct connection_info *cn, if (filter_query_type(query, dml_keywords)) goto do_log_query; } + if (events & EVENT_QUERY_DML_NO_SELECT) + { + if (filter_query_type(query, dml_no_select_keywords)) + goto do_log_query; + } if (events & EVENT_QUERY_DCL) { if (filter_query_type(query, dcl_keywords)) diff --git a/plugin/versioning/versioning.cc b/plugin/versioning/versioning.cc index e48f74bc4cb..7e3c29e1494 100644 --- a/plugin/versioning/versioning.cc +++ b/plugin/versioning/versioning.cc @@ -24,26 +24,26 @@ #include "item.h" #include "vers_utils.h" -/* System Versioning: VTQ_TRX_ID(), VTQ_COMMIT_ID(), VTQ_BEGIN_TS(), VTQ_COMMIT_TS(), VTQ_ISO_LEVEL() */ -template -class Create_func_vtq : public Create_native_func +/* System Versioning: TRT_TRX_ID(), TRT_COMMIT_ID(), TRT_BEGIN_TS(), TRT_COMMIT_TS(), TRT_ISO_LEVEL() */ +template +class Create_func_trt : public Create_native_func { public: virtual Item *create_native(THD *thd, LEX_CSTRING *name, List *item_list); - static Create_func_vtq s_singleton; + static Create_func_trt s_singleton; protected: - Create_func_vtq() {} - virtual ~Create_func_vtq() {} + Create_func_trt() {} + virtual ~Create_func_trt() {} }; -template -Create_func_vtq Create_func_vtq::s_singleton; +template +Create_func_trt Create_func_trt::s_singleton; -template +template Item* -Create_func_vtq::create_native(THD *thd, LEX_CSTRING *name, +Create_func_trt::create_native(THD *thd, LEX_CSTRING *name, List *item_list) { Item *func= NULL; @@ -56,16 +56,16 @@ Create_func_vtq::create_native(THD *thd, LEX_CSTRING *name, case 1: { Item *param_1= item_list->pop(); - switch (VTQ_FIELD) + switch (TRT_FIELD) { case TR_table::FLD_BEGIN_TS: case TR_table::FLD_COMMIT_TS: - func= new (thd->mem_root) Item_func_vtq_ts(thd, param_1, VTQ_FIELD); + func= new (thd->mem_root) Item_func_trt_ts(thd, param_1, TRT_FIELD); break; case TR_table::FLD_TRX_ID: case TR_table::FLD_COMMIT_ID: case TR_table::FLD_ISO_LEVEL: - func= new (thd->mem_root) Item_func_vtq_id(thd, param_1, VTQ_FIELD); + func= new (thd->mem_root) Item_func_trt_id(thd, param_1, TRT_FIELD); break; default: DBUG_ASSERT(0); @@ -76,11 +76,11 @@ Create_func_vtq::create_native(THD *thd, LEX_CSTRING *name, { Item *param_1= item_list->pop(); Item *param_2= item_list->pop(); - switch (VTQ_FIELD) + switch (TRT_FIELD) { case TR_table::FLD_TRX_ID: case TR_table::FLD_COMMIT_ID: - func= new (thd->mem_root) Item_func_vtq_id(thd, param_1, param_2, VTQ_FIELD); + func= new (thd->mem_root) Item_func_trt_id(thd, param_1, param_2, TRT_FIELD); break; default: goto error; @@ -98,8 +98,8 @@ Create_func_vtq::create_native(THD *thd, LEX_CSTRING *name, return func; }; -template -class Create_func_vtq_trx_sees : public Create_native_func +template +class Create_func_trt_trx_sees : public Create_native_func { public: virtual Item *create_native(THD *thd, LEX_CSTRING *name, List *item_list) @@ -115,7 +115,7 @@ public: { Item *param_1= item_list->pop(); Item *param_2= item_list->pop(); - func= new (thd->mem_root) Item_func_vtq_trx_seesX(thd, param_1, param_2); + func= new (thd->mem_root) Item_func_trt_trx_seesX(thd, param_1, param_2); break; } default: @@ -126,27 +126,27 @@ public: return func; } - static Create_func_vtq_trx_sees s_singleton; + static Create_func_trt_trx_sees s_singleton; protected: - Create_func_vtq_trx_sees() {} - virtual ~Create_func_vtq_trx_sees() {} + Create_func_trt_trx_sees() {} + virtual ~Create_func_trt_trx_sees() {} }; template -Create_func_vtq_trx_sees Create_func_vtq_trx_sees::s_singleton; +Create_func_trt_trx_sees Create_func_trt_trx_sees::s_singleton; #define BUILDER(F) & F::s_singleton static Native_func_registry func_array[] = { - { { C_STRING_WITH_LEN("VTQ_BEGIN_TS") }, BUILDER(Create_func_vtq)}, - { { C_STRING_WITH_LEN("VTQ_COMMIT_ID") }, BUILDER(Create_func_vtq)}, - { { C_STRING_WITH_LEN("VTQ_COMMIT_TS") }, BUILDER(Create_func_vtq)}, - { { C_STRING_WITH_LEN("VTQ_ISO_LEVEL") }, BUILDER(Create_func_vtq)}, - { { C_STRING_WITH_LEN("VTQ_TRX_ID") }, BUILDER(Create_func_vtq)}, - { { C_STRING_WITH_LEN("VTQ_TRX_SEES") }, BUILDER(Create_func_vtq_trx_sees)}, - { { C_STRING_WITH_LEN("VTQ_TRX_SEES_EQ") }, BUILDER(Create_func_vtq_trx_sees)}, + { { C_STRING_WITH_LEN("TRT_BEGIN_TS") }, BUILDER(Create_func_trt)}, + { { C_STRING_WITH_LEN("TRT_COMMIT_ID") }, BUILDER(Create_func_trt)}, + { { C_STRING_WITH_LEN("TRT_COMMIT_TS") }, BUILDER(Create_func_trt)}, + { { C_STRING_WITH_LEN("TRT_ISO_LEVEL") }, BUILDER(Create_func_trt)}, + { { C_STRING_WITH_LEN("TRT_TRX_ID") }, BUILDER(Create_func_trt)}, + { { C_STRING_WITH_LEN("TRT_TRX_SEES") }, BUILDER(Create_func_trt_trx_sees)}, + { { C_STRING_WITH_LEN("TRT_TRX_SEES_EQ") }, BUILDER(Create_func_trt_trx_sees)}, { {0, 0}, NULL} }; diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index fc133f53b37..d7f7906f713 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -95,6 +95,7 @@ INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/mysql_system_tables.sql ${CMAKE_CURRENT_SOURCE_DIR}/mysql_system_tables_data.sql ${CMAKE_CURRENT_SOURCE_DIR}/mysql_performance_tables.sql + ${CMAKE_CURRENT_SOURCE_DIR}/mysql_test_db.sql ${CMAKE_CURRENT_SOURCE_DIR}/fill_help_tables.sql ${CMAKE_CURRENT_SOURCE_DIR}/mysql_test_data_timezone.sql ${CMAKE_CURRENT_SOURCE_DIR}/mysql_to_mariadb.sql diff --git a/scripts/make_win_bin_dist b/scripts/make_win_bin_dist deleted file mode 100755 index 2415d4017e4..00000000000 --- a/scripts/make_win_bin_dist +++ /dev/null @@ -1,416 +0,0 @@ -#!/bin/sh -# Copyright (c) 2006, 2011, Oracle and/or its affiliates. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -# Exit if failing to copy, we want exact specifications, not -# just "what happen to be built". -set -e - -# ---------------------------------------------------------------------- -# Read first argument that is the base name of the resulting TAR file. -# See usage() function below for a description on the arguments. -# -# NOTE: We will read the rest of the command line later on. -# NOTE: Pattern matching with "{..,..}" can't be used, not portable. -# ---------------------------------------------------------------------- - -# FIXME why "libmysql.dll" installed both in "bin" and "lib/opt"? - -usage() -{ - echo <{help}; - - @args = @ARGV if $pick_args; - - @ARGV = @saved_ARGV; # Set back ARGV -} - -############################################################################## -# -# Try to find a specific file within --basedir which can either be a binary -# release or installed source directory and return the path. -# -############################################################################## - -sub find_in_basedir -{ - my $opt = shift; - my $mode = shift; # "dir" or "file" - my $files = shift; - - foreach my $file ( @{ref($files) ? $files : [$files]} ) - { - foreach my $dir ( @_ ) - { - foreach my $part ( "$file","$file.exe","release/$file.exe", - "debug/$file.exe","relwithdebinfo/$file.exe" ) - { - my $path = "$opt->{basedir}/$dir/$part"; - if ( -f $path ) - { - return $mode eq "dir" ? dirname($path) : $path; - } - } - } - } -} - -############################################################################## -# -# Just a function to write out an error report -# -############################################################################## - -sub cannot_find_file -{ - my $file = shift; - - print "FATAL ERROR: Could not find $file\n"; - print "\n"; - print "If you compiled from source, you need to run 'make install' to\n"; - print "copy the software into the correct location ready for operation.\n"; - print "\n"; - print "If you are using a binary release, you must either be at the top\n"; - print "level of the extracted archive, or pass the --basedir option\n"; - print "pointing to that location.\n"; - print "\n"; - - exit 1; -} - -############################################################################## -# -# Form a command line that can handle spaces in paths and arguments -# -############################################################################## - -# FIXME this backslash escaping needed if using '"..."' ? -# This regexp makes sure that any special chars are quoted, -# so the arg gets passed exactly to the server. -# XXX: This is broken; true fix requires using eval and proper -# quoting of every single arg ($opt->{basedir}, $opt->{ldata}, etc.) -# join(" ", map {s/([^\w\_\.\-])/\\$1/g} - -sub quote_options { - my @cmd; - foreach my $opt ( @_ ) - { - next unless $opt; # If undefined or empty, just skip - push(@cmd, "\"$opt\""); # Quote argument - } - return join(" ", @cmd); -} - -############################################################################## -# -# Ok, let's go. We first need to parse arguments which are required by -# my_print_defaults so that we can execute it first, then later re-parse -# the command line to add any extra bits that we need. -# -############################################################################## - -my $opt = {}; -parse_arguments($opt, @ARGV); - -# ---------------------------------------------------------------------- -# We can now find my_print_defaults. This script supports: -# -# --srcdir=path pointing to compiled source tree -# --basedir=path pointing to installed binary location -# -# or default to compiled-in locations. -# ---------------------------------------------------------------------- - -my $print_defaults; - -if ( $opt->{srcdir} and $opt->{basedir} ) -{ - error("Specify either --basedir or --srcdir, not both"); -} -if ( $opt->{srcdir} ) -{ - $opt->{builddir} = $opt->{srcdir} unless $opt->{builddir}; - $print_defaults = "$opt->{builddir}/extra/my_print_defaults"; -} -elsif ( $opt->{basedir} ) -{ - $print_defaults = find_in_basedir($opt,"file","my_print_defaults","bin","extra"); -} -else -{ - $print_defaults='@bindir@/my_print_defaults'; -} - --x $print_defaults or -f "$print_defaults.exe" - or cannot_find_file($print_defaults); - -# ---------------------------------------------------------------------- -# Now we can get arguments from the groups [mysqld] and [mysql_install_db] -# in the my.cfg file, then re-run to merge with command line arguments. -# ---------------------------------------------------------------------- - -my @default_options; -my $cmd = quote_options($print_defaults,$opt->{'defaults-file'}, - "mysqld","mariadb","mysql_install_db","server","client-server"); -open(PIPE, "$cmd |") or error($opt,"can't run $cmd: $!"); -while ( ) -{ - chomp; - next unless /\S/; - push(@default_options, $_); -} -close PIPE; -$opt = {}; # Reset the arguments FIXME ? -parse_arguments($opt, @default_options); -parse_arguments($opt, 'PICK-ARGS-FROM-ARGV', @ARGV); - -# ---------------------------------------------------------------------- -# Configure paths to support files -# ---------------------------------------------------------------------- - -# FIXME $extra_bindir is not used -my ($bindir,$extra_bindir,$mysqld,$srcpkgdatadir,$buildpkgdatadir,$mysqld_opt, - $scriptdir); - -if ( $opt->{srcdir} ) -{ - $opt->{basedir} = $opt->{builddir}; - $bindir = "$opt->{basedir}/client"; - $extra_bindir = "$opt->{basedir}/extra"; - $mysqld = "$opt->{basedir}/sql/mysqld"; - $mysqld_opt = "--language=$opt->{srcdir}/sql/share/english"; - $srcpkgdatadir = "$opt->{srcdir}/scripts"; - $buildpkgdatadir = "$opt->{builddir}/scripts"; - $scriptdir = "$opt->{srcdir}/scripts"; -} -elsif ( $opt->{basedir} ) -{ - $bindir = "$opt->{basedir}/bin"; - $extra_bindir = $bindir; - $mysqld = find_in_basedir($opt,"file",["mysqld-nt","mysqld"], - "libexec","sbin","bin") || # ,"sql" - find_in_basedir($opt,"file","mysqld-nt", - "bin"); # ,"sql" - $srcpkgdatadir = find_in_basedir($opt,"dir","fill_help_tables.sql", - "share","share/mysql"); # ,"scripts" - $buildpkgdir = $srcpkgdatadir; - $scriptdir = "$opt->{basedir}/scripts"; -} -else -{ - $opt->{basedir} = '@prefix@'; - $bindir = '@bindir@'; - $extra_bindir = $bindir; - $mysqld = '@libexecdir@/mysqld'; - $srcpkgdatadir = '@pkgdatadir@'; - $buildpkgdatadir = '@pkgdatadir@'; - $scriptdir = '@scriptdir@'; -} - -unless ( $opt->{ldata} ) -{ - $opt->{ldata} = '@localstatedir@'; -} - - -# ---------------------------------------------------------------------- -# Set up paths to SQL scripts required for bootstrap -# ---------------------------------------------------------------------- - -my $fill_help_tables = "$srcpkgdatadir/fill_help_tables.sql"; -my $create_system_tables = "$srcpkgdatadir/mysql_system_tables.sql"; -my $fill_system_tables = "$srcpkgdatadir/mysql_system_tables_data.sql"; -my $maria_add_gis_sp = "$buildpkgdatadir/maria_add_gis_sp_bootstrap.sql"; - -foreach my $f ( $fill_help_tables,$create_system_tables,$fill_system_tables,$maria_add_gis_sp ) -{ - -f $f or cannot_find_file($f); -} - --x $mysqld or -f "$mysqld.exe" or cannot_find_file($mysqld); -# Try to determine the hostname -my $hostname = hostname(); - -# ---------------------------------------------------------------------- -# Check if hostname is valid -# ---------------------------------------------------------------------- - -my $resolved; -if ( !$opt->{'cross-bootstrap'} and !$opt->{rpm} and !$opt->{force} ) -{ - my $resolveip = "$extra_bindir/resolveip"; - - $resolved = `$resolveip $hostname 2>&1`; - if ( $? != 0 ) - { - $resolved=`$resolveip localhost 2>&1`; - if ( $? != 0 ) - { - error($opt, - "Neither host '$hostname' nor 'localhost' could be looked up with", - "$resolveip", - "Please configure the 'hostname' command to return a correct", - "hostname.", - "If you want to solve this at a later stage, restart this script", - "with the --force option"); - } - warning($opt, - "The host '$hostname' could not be looked up with $resolveip.", - "This probably means that your libc libraries are not 100 % compatible", - "with this binary MySQL version. The MySQL daemon, mysqld, should work", - "normally with the exception that host name resolving will not work.", - "This means that you should use IP addresses instead of hostnames", - "when specifying MySQL privileges !"); - } -} - -# FIXME what does this really mean.... -if ( $opt->{'skip-name-resolve'} and $resolved and $resolved =~ /\s/ ) -{ - $hostname = (split(' ', $resolved))[5]; -} - -# ---------------------------------------------------------------------- -# Create database directories mysql & test -# ---------------------------------------------------------------------- - -foreach my $dir ( $opt->{ldata}, "$opt->{ldata}/mysql", "$opt->{ldata}/test" ) -{ - # FIXME not really the same as original "mkdir -p", but ok? - mkdir($dir, 0700) unless -d $dir; - chown($opt->{user}, $dir) if -w "/" and !$opt->{user}; -} - -push(@args, "--user=$opt->{user}") if $opt->{user}; - -# ---------------------------------------------------------------------- -# Configure mysqld command line -# ---------------------------------------------------------------------- - -# FIXME use --init-file instead of --bootstrap ?! - -my $mysqld_bootstrap = $ENV{MYSQLD_BOOTSTRAP} || $mysqld; -my $mysqld_install_cmd_line = quote_options($mysqld_bootstrap, - $opt->{'defaults-file'}, - $mysqld_opt, - "--bootstrap", - "--basedir=$opt->{basedir}", - "--datadir=$opt->{ldata}", - "--log-warnings=0", - "--max_allowed_packet=8M", - "--default-storage-engine=MyISAM", - "--net_buffer_length=16K", - "--enforce-storage-engine=\"\"", - @args, - ); - -# ---------------------------------------------------------------------- -# Create the system and help tables by passing them to "mysqld --bootstrap" -# ---------------------------------------------------------------------- - -report_verbose_wait($opt,"Installing MySQL system tables..."); - -open(SQL, $create_system_tables) - or error($opt,"can't open $create_system_tables for reading: $!"); -open(SQL2, $fill_system_tables) - or error($opt,"can't open $fill_system_tables for reading: $!"); -# FIXME > /dev/null ? -if ( open(PIPE, "| $mysqld_install_cmd_line") ) -{ - print PIPE "use mysql;\n"; - while ( ) - { - # When doing a "cross bootstrap" install, no reference to the current - # host should be added to the system tables. So we filter out any - # lines which contain the current host name. - next if $opt->{'cross-bootstrap'} and /\@current_hostname/; - - print PIPE $_; - } - while ( ) - { - # TODO: make it similar to the above condition when we're sure - # @@hostname returns a fqdn - # When doing a "cross bootstrap" install, no reference to the current - # host should be added to the system tables. So we filter out any - # lines which contain the current host name. - next if /\@current_hostname/; - - print PIPE $_; - } - close PIPE; - close SQL; - close SQL2; - - report_verbose($opt,"OK"); - - # ---------------------------------------------------------------------- - # Pipe fill_help_tables.sql to "mysqld --bootstrap" - # ---------------------------------------------------------------------- - - report_verbose_wait($opt,"Filling help tables..."); - open(SQL, $fill_help_tables) - or error($opt,"can't open $fill_help_tables for reading: $!"); - # FIXME > /dev/null ? - if ( open(PIPE, "| $mysqld_install_cmd_line") ) - { - print PIPE "use mysql;\n"; - while ( ) - { - print PIPE $_; - } - close PIPE; - close SQL; - - report_verbose($opt,"OK"); - } - else - { - warning($opt,"HELP FILES ARE NOT COMPLETELY INSTALLED!", - "The \"HELP\" command might not work properly"); - } - - # ---------------------------------------------------------------------- - # Pipe maria_add_gis_sp.sql to "mysqld --bootstrap" - # ---------------------------------------------------------------------- - - report_verbose_wait($opt,"Creating OpenGIS required SP-s..."); - open(SQL, $maria_add_gis_sp) - or error($opt,"can't open $maria_add_gis_sp for reading: $!"); - # FIXME > /dev/null ? - if ( open(PIPE, "| $mysqld_install_cmd_line") ) - { - print PIPE "use mysql;\n"; - while ( ) - { - print PIPE $_; - } - close PIPE; - close SQL; - - report_verbose($opt,"OK"); - } - else - { - warning($opt,"OPENGIS REQUIRED SP-S WERE NOT COMPLETELY INSTALLED!", - "GIS extentions might not work properly"); - } - - report_verbose($opt,"To start mysqld at boot time you have to copy", - "support-files/mysql.server to the right place " . - "for your system"); - - if ( !$opt->{'cross-bootstrap'} ) - { - # This is not a true installation on a running system. The end user must - # set a password after installing the data files on the real host system. - # At this point, there is no end user, so it does not make sense to print - # this reminder. - report($opt, - "PLEASE REMEMBER TO SET A PASSWORD FOR THE MySQL root USER !", - "To do so, start the server, then issue the following commands:", - "", - " $bindir/mysqladmin -u root password 'new-password'", - " $bindir/mysqladmin -u root -h $hostname password 'new-password'", - "", - "Alternatively you can run:", - "", - " $bindir/mysql_secure_installation", - "", - "which will also give you the option of removing the test", - "databases and anonymous user created by default. This is", - "strongly recommended for production servers.", - "", - "See the manual for more instructions."); - - if ( !$opt->{rpm} ) - { - report($opt, - "You can start the MySQL daemon with:", - "", - " cd " . '@prefix@' . " ; $bindir/mysqld_safe &", - "", - "You can test the MySQL daemon with mysql-test-run.pl", - "", - " cd mysql-test ; perl mysql-test-run.pl"); - } - report($opt, - "Please report any problems at http://bugs.mysql.com/", - "", - "The latest information about MySQL is available on the web at", - "", - " http://www.mysql.com", - "", - "Support MySQL by buying support/licenses at http://shop.mysql.com"); - } - exit 0 -} -else -{ - error($opt, - "Installation of system tables failed!", - "", - "Examine the logs in $opt->{ldata} for more information.", - "You can try to start the mysqld daemon with:", - "$mysqld --skip-grant &", - "and use the command line tool", - "$bindir/mysql to connect to the mysql", - "database and look at the grant tables:", - "", - "shell> $bindir/mysql -u root mysql", - "mysql> show tables;", - "", - "Try 'mysqld --help' if you have problems with paths. Using --log", - "gives you a log in $opt->{ldata} that may be helpful.", - "", - "The latest information about MySQL is available on the web at", - "http://www.mysql.com", - "Please consult the MySQL manual section: 'Problems running mysql_install_db',", - "and the manual section that describes problems on your OS.", - "Another information source is the MySQL email archive.", - "", - "Please check all of the above before submitting a bug report", - "at http://bugs.mysql.com/") -} - -############################################################################## -# -# Misc -# -############################################################################## - -sub report_verbose -{ - my $opt = shift; - my $text = shift; - - report_verbose_wait($opt, $text, @_); - print "\n\n"; -} - -sub report_verbose_wait -{ - my $opt = shift; - my $text = shift; - - if ( $opt->{verbose} or (!$opt->{rpm} and !$opt->{'cross-bootstrap'}) ) - { - print "$text"; - map {print "\n$_"} @_; - } -} - -sub report -{ - my $opt = shift; - my $text = shift; - - print "$text\n"; - map {print "$_\n"} @_; - print "\n"; -} - -sub error -{ - my $opt = shift; - my $text = shift; - - print "FATAL ERROR: $text\n"; - map {print "$_\n"} @_; - exit 1; -} - -sub warning -{ - my $opt = shift; - my $text = shift; - - print "WARNING: $text\n"; - map {print "$_\n"} @_; - print "\n"; -} diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh index 5afeb6a6f47..8d706e4c5ad 100644 --- a/scripts/mysql_install_db.sh +++ b/scripts/mysql_install_db.sh @@ -37,9 +37,9 @@ force=0 in_rpm=0 ip_only=0 cross_bootstrap=0 -install_params="" auth_root_authentication_method=normal auth_root_socket_user='root' +skip_test_db=0 usage() { @@ -80,11 +80,10 @@ Usage: $0 [OPTIONS] --defaults-file=path Read only this configuration file. --rpm For internal use. This option is used by RPM files during the MariaDB installation process. - --skip-auth-anonymous-user - Do not install an unprivileged anonymous user. --skip-name-resolve Use IP addresses rather than hostnames when creating grant table entries. This option can be useful if your DNS does not work. + --skip-test-db Don't install a test database. --srcdir=path The path to the MariaDB source directory. This option uses the compiled binaries and support files within the source tree, useful for if you don't want to install @@ -168,9 +167,6 @@ parse_arguments() # # --windows is a deprecated alias cross_bootstrap=1 ;; - --skip-auth-anonymous-user) - install_params="$install_params -SET @skip_auth_anonymous=1;" ;; --auth-root-authentication-method=normal) auth_root_authentication_method=normal ;; --auth-root-authentication-method=socket) @@ -179,6 +175,7 @@ SET @skip_auth_anonymous=1;" ;; usage ;; --auth-root-socket-user=*) auth_root_socket_user="$(parse_arg "$arg")" ;; + --skip-test-db) skip_test_db=1 ;; *) if test -n "$pick_args" @@ -353,8 +350,9 @@ create_system_tables="$srcpkgdatadir/mysql_system_tables.sql" create_system_tables2="$srcpkgdatadir/mysql_performance_tables.sql" fill_system_tables="$srcpkgdatadir/mysql_system_tables_data.sql" maria_add_gis_sp="$buildpkgdatadir/maria_add_gis_sp_bootstrap.sql" +mysql_test_db="$srcpkgdatadir/mysql_test_db.sql" -for f in "$fill_help_tables" "$create_system_tables" "$create_system_tables2" "$fill_system_tables" "$maria_add_gis_sp" +for f in "$fill_help_tables" "$create_system_tables" "$create_system_tables2" "$fill_system_tables" "$maria_add_gis_sp" "$mysql_test_db" do if test ! -f "$f" then @@ -418,7 +416,7 @@ then fi # Create database directories -for dir in "$ldata" "$ldata/mysql" "$ldata/test" +for dir in "$ldata" "$ldata/mysql" do if test ! -d "$dir" then @@ -467,20 +465,31 @@ mysqld_install_cmd_line() --net_buffer_length=16K } +cat_sql() +{ + echo "use mysql;" + + case "$auth_root_authentication_method" in + normal) + echo "SET @skip_auth_root_nopasswd=NULL;" + echo "SET @auth_root_socket=NULL;" + ;; + socket) + echo "SET @skip_auth_root_nopasswd=1;" + echo "SET @auth_root_socket='$auth_root_socket_user';" + ;; + esac + + cat "$create_system_tables" "$create_system_tables2" "$fill_system_tables" "$fill_help_tables" "$maria_add_gis_sp" + if test "$skip_test_db" -eq 0 + then + cat "$mysql_test_db" + fi +} # Create the system and help tables by passing them to "mysqld --bootstrap" s_echo "Installing MariaDB/MySQL system tables in '$ldata' ..." -case "$auth_root_authentication_method" in - normal) - install_params="$install_params -SET @skip_auth_root_nopasswd=NULL; -SET @auth_root_socket=NULL;" ;; - socket) - install_params="$install_params -SET @skip_auth_root_nopasswd=1; -SET @auth_root_socket='$auth_root_socket_user';" ;; -esac -if { echo "use mysql;$install_params"; cat "$create_system_tables" "$create_system_tables2" "$fill_system_tables" "$fill_help_tables" "$maria_add_gis_sp"; } | eval "$filter_cmd_line" | mysqld_install_cmd_line > /dev/null +if cat_sql | eval "$filter_cmd_line" | mysqld_install_cmd_line > /dev/null then s_echo "OK" else diff --git a/scripts/mysql_system_tables_data.sql b/scripts/mysql_system_tables_data.sql index 4821b18bcbf..0fbbfe4701f 100644 --- a/scripts/mysql_system_tables_data.sql +++ b/scripts/mysql_system_tables_data.sql @@ -26,21 +26,10 @@ -- a plain character SELECT LOWER( REPLACE((SELECT REPLACE(@@hostname,'_','\_')),'%','\%') )INTO @current_hostname; - --- Fill "db" table with default grants for anyone to --- access database 'test' and 'test_%' if "db" table didn't exist -CREATE TEMPORARY TABLE tmp_db LIKE db; -INSERT INTO tmp_db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y','Y'); -INSERT INTO tmp_db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y','Y'); -INSERT INTO db SELECT * FROM tmp_db WHERE @had_db_table=0; -DROP TABLE tmp_db; - - -- Fill "user" table with default users allowing root access -- from local machine if "user" table didn't exist before CREATE TEMPORARY TABLE tmp_user_nopasswd LIKE user; CREATE TEMPORARY TABLE tmp_user_socket LIKE user; -CREATE TEMPORARY TABLE tmp_user_anonymous LIKE user; -- Classic passwordless root account. INSERT INTO tmp_user_nopasswd VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','','N', 'N','', 0); REPLACE INTO tmp_user_nopasswd SELECT @current_hostname,'root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','','N','N','',0 FROM dual WHERE @current_hostname != 'localhost'; @@ -48,14 +37,10 @@ REPLACE INTO tmp_user_nopasswd VALUES ('127.0.0.1','root','','Y','Y','Y','Y','Y' REPLACE INTO tmp_user_nopasswd VALUES ('::1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','','N','N', '', 0); -- More secure root account using unix sucket auth. INSERT INTO tmp_user_socket VALUES ('localhost',IFNULL(@auth_root_socket, 'root'),'','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'unix_socket','','N', 'N','', 0); --- Anonymous user with no privileges. -INSERT INTO tmp_user_anonymous (host,user) VALUES ('localhost',''); -INSERT INTO tmp_user_anonymous (host,user) SELECT @current_hostname,'' FROM dual WHERE @current_hostname != 'localhost'; INSERT INTO user SELECT * FROM tmp_user_nopasswd WHERE @had_user_table=0 AND @skip_auth_root_nopasswd IS NULL; INSERT INTO user SELECT * FROM tmp_user_socket WHERE @had_user_table=0 AND @auth_root_socket IS NOT NULL; -INSERT INTO user SELECT * FROM tmp_user_anonymous WHERE @had_user_table=0 AND @skip_auth_anonymous IS NULL; -DROP TABLE tmp_user_nopasswd, tmp_user_socket, tmp_user_anonymous; +DROP TABLE tmp_user_nopasswd, tmp_user_socket; CREATE TEMPORARY TABLE tmp_proxies_priv LIKE proxies_priv; INSERT INTO tmp_proxies_priv VALUES ('localhost', 'root', '', '', TRUE, '', now()); diff --git a/scripts/mysql_test_db.sql b/scripts/mysql_test_db.sql new file mode 100644 index 00000000000..c1bb3661ec3 --- /dev/null +++ b/scripts/mysql_test_db.sql @@ -0,0 +1,31 @@ +-- Copyright (c) 2018 MariaDB Foundation +-- +-- This program is free software; you can redistribute it and/or modify +-- it under the terms of the GNU General Public License as published by +-- the Free Software Foundation; version 2 of the License. +-- +-- This program is distributed in the hope that it will be useful, +-- but WITHOUT ANY WARRANTY; without even the implied warranty of +-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +-- GNU General Public License for more details. +-- +-- You should have received a copy of the GNU General Public License +-- along with this program; if not, write to the Free Software +-- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +CREATE DATABASE IF NOT EXISTS test CHARACTER SET latin1 COLLATE latin1_swedish_ci; + +-- Fill "db" table with default grants for anyone to +-- access database 'test' and 'test_%' if "db" table didn't exist +CREATE TEMPORARY TABLE tmp_db LIKE db; +INSERT INTO tmp_db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y','Y'); +INSERT INTO tmp_db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y','Y'); +INSERT INTO db SELECT * FROM tmp_db WHERE @had_db_table=0; +DROP TABLE tmp_db; + +-- Anonymous user with no privileges. +CREATE TEMPORARY TABLE tmp_user_anonymous LIKE user; +INSERT INTO tmp_user_anonymous (host,user) VALUES ('localhost',''); +INSERT INTO tmp_user_anonymous (host,user) SELECT @current_hostname,'' FROM dual WHERE @current_hostname != 'localhost'; +INSERT INTO user SELECT * FROM tmp_user_anonymous WHERE @had_user_table=0; +DROP TABLE tmp_user_anonymous; diff --git a/scripts/mysqld_safe.sh b/scripts/mysqld_safe.sh index 5797bdc68d7..696ac552fad 100644 --- a/scripts/mysqld_safe.sh +++ b/scripts/mysqld_safe.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # Copyright Abandoned 1996 TCX DataKonsult AB & Monty Program KB & Detron HB # This file is public domain and comes with NO WARRANTY of any kind # @@ -36,7 +36,6 @@ skip_err_log=0 syslog_tag_mysqld=mysqld syslog_tag_mysqld_safe=mysqld_safe -trap '' 1 2 3 15 # we shouldn't let anyone kill us # MySQL-specific environment variable. First off, it's not really a umask, # it's the desired mode. Second, it follows umask(2), not umask(3) in that @@ -160,7 +159,7 @@ eval_log_error () { # sed buffers output (only GNU sed supports a -u (unbuffered) option) # which means that messages may not get sent to syslog until the # mysqld process quits. - cmd="$cmd 2>&1 | logger -t '$syslog_tag_mysqld' -p daemon.error" + cmd="$cmd 2>&1 | logger -t '$syslog_tag_mysqld' -p daemon.error & wait" ;; *) echo "Internal program error (non-fatal):" \ @@ -877,6 +876,13 @@ then exit 1 fi +# +# From now on, we catch signals to do a proper shutdown of mysqld +# when signalled to do so. +# +trap '/usr/bin/mysqladmin --defaults-extra-file=/etc/mysql/debian.cnf refresh & wait' 1 # HUP +trap '/usr/bin/mysqladmin --defaults-extra-file=/etc/mysql/debian.cnf shutdown' 2 3 15 # INT QUIT and TERM + # # Uncomment the following lines if you want all tables to be automatically # checked and repaired during startup. You should add sensible key_buffer diff --git a/scripts/wsrep_sst_xtrabackup.sh b/scripts/wsrep_sst_xtrabackup.sh index 3a30f5898db..41ed4485de5 100644 --- a/scripts/wsrep_sst_xtrabackup.sh +++ b/scripts/wsrep_sst_xtrabackup.sh @@ -399,7 +399,6 @@ if [[ ! ${WSREP_SST_OPT_ROLE} == 'joiner' && ! ${WSREP_SST_OPT_ROLE} == 'donor' fi read_cnf -setup_ports get_stream get_transfer diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh index 7e83b7365e3..78ad43d6b2d 100644 --- a/sql-bench/server-cfg.sh +++ b/sql-bench/server-cfg.sh @@ -131,6 +131,7 @@ sub new $limits{'alter_add_multi_col'}= 1; #Have ALTER TABLE t add a int,add b int; $limits{'alter_table'} = 1; # Have ALTER TABLE $limits{'alter_table_dropcol'}= 1; # Have ALTER TABLE DROP column + $limits{'alter_table_after'}= 1; # Have ALTER TABLE .. AFTER other_column $limits{'column_alias'} = 1; # Alias for fields in select statement. $limits{'func_extra_%'} = 1; # Has % as alias for mod() $limits{'func_extra_if'} = 1; # Have function if. diff --git a/sql-bench/test-ATIS.sh b/sql-bench/test-ATIS.sh index ca1132435ff..1e645b4c1a9 100644 --- a/sql-bench/test-ATIS.sh +++ b/sql-bench/test-ATIS.sh @@ -369,7 +369,7 @@ sub init_data @class_of_service= $server->create("class_of_service", ["class_code char(2) NOT NULL", - "rank tinyint(2) NOT NULL", + "`rank` tinyint(2) NOT NULL", "class_description char(80) NOT NULL"], ["PRIMARY KEY (class_code)"]); @code_description= diff --git a/sql-bench/test-alter-table.sh b/sql-bench/test-alter-table.sh index 6dc5ce35841..2dc96affdd3 100644 --- a/sql-bench/test-alter-table.sh +++ b/sql-bench/test-alter-table.sh @@ -202,9 +202,10 @@ while ($field_count > $opt_start_field_count) $count++; $end=max($field_count-$add,$opt_start_field_count); $fields=""; - while(--$field_count >= $end) + while ($field_count > $end) { $fields.=",DROP i${field_count}"; + $field_count--; } $dbh->do("ALTER TABLE bench " . substr($fields,1) . $server->{'drop_attr'}) || die $DBI::errstr; @@ -221,6 +222,39 @@ else print " for alter_table_drop ($count): " . timestr(timediff($end_time, $loop_time),"all") . "\n\n"; +#### +#### Add fields in middle of the table +#### + +goto skip_dropcol if (!$limits->{'alter_table_after'}); + +$count=0; +while ($field_count < $opt_field_count) +{ + $count++; + $end=min($field_count+$add,$opt_field_count); + $fields=""; + $tmp="ADD "; + while ($field_count < $end) + { + $field_count++; + $fields.=",$tmp i${field_count} integer after i1"; + $tmp="" if (!$multi_add); # Adabas + } + do_query($dbh,"ALTER TABLE bench " . substr($fields,1)); + $end_time=new Benchmark; + last if ($estimated=predict_query_time($loop_time,$end_time,\$count,$count, + $opt_field_count/$add+1)); +} + +$end_time=new Benchmark; +if ($estimated) +{ print "Estimated time"; } +else +{ print "Time"; } +print " for alter_table_add_in_middle ($count): " . + timestr(timediff($end_time, $loop_time),"all") . "\n\n"; + skip_dropcol: ################################ END ################################### diff --git a/sql-bench/test-insert.sh b/sql-bench/test-insert.sh index e264c7d529f..f4b47908d9b 100644 --- a/sql-bench/test-insert.sh +++ b/sql-bench/test-insert.sh @@ -993,11 +993,22 @@ $end_time=new Benchmark; print "Time for update_with_key (" . ($opt_loop_count*3) . "): " . timestr(timediff($end_time, $loop_time),"all") . "\n"; +print "Testing update with key, no changes in data\n"; +$loop_time=new Benchmark; +for ($i=0 ; $i < $opt_loop_count*3 ; $i++) +{ + $sth = $dbh->do("update bench1 set dummy1='updated' where id=$i and id2=$i") or die $DBI::errstr; +} + +$end_time=new Benchmark; +print "Time for update_with_key_record_unchanged (" . ($opt_loop_count*3) . "): " . + timestr(timediff($end_time, $loop_time),"all") . "\n"; + $loop_time=new Benchmark; $count=0; for ($i=1 ; $i < $opt_loop_count*3 ; $i+=3) { - $sth = $dbh->do("update bench1 set dummy1='updated' where id=$i") or die $DBI::errstr; + $sth = $dbh->do("update bench1 set dummy1='really updated' where id=$i") or die $DBI::errstr; $end_time=new Benchmark; last if ($estimated=predict_query_time($loop_time,$end_time,\$i,($i-1)/3, $opt_loop_count)); diff --git a/sql-common/client.c b/sql-common/client.c index 00573de415b..6a44a2db0aa 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -1343,7 +1343,9 @@ unpack_fields(MYSQL *mysql, MYSQL_DATA *data,MEM_ROOT *alloc,uint fields, { uchar *pos; /* fields count may be wrong */ - DBUG_ASSERT((uint) (field - result) < fields); + if (field >= result + fields) + goto err; + cli_fetch_lengths(&lengths[0], row->data, default_value ? 8 : 7); field->catalog= strmake_root(alloc,(char*) row->data[0], lengths[0]); field->db= strmake_root(alloc,(char*) row->data[1], lengths[1]); @@ -1361,12 +1363,7 @@ unpack_fields(MYSQL *mysql, MYSQL_DATA *data,MEM_ROOT *alloc,uint fields, /* Unpack fixed length parts */ if (lengths[6] != 12) - { - /* malformed packet. signal an error. */ - free_rows(data); /* Free old data */ - set_mysql_error(mysql, CR_MALFORMED_PACKET, unknown_sqlstate); - DBUG_RETURN(0); - } + goto err; pos= (uchar*) row->data[6]; field->charsetnr= uint2korr(pos); @@ -1393,6 +1390,8 @@ unpack_fields(MYSQL *mysql, MYSQL_DATA *data,MEM_ROOT *alloc,uint fields, /* old protocol, for backward compatibility */ for (row=data->data; row ; row = row->next,field++) { + if (field >= result + fields) + goto err; cli_fetch_lengths(&lengths[0], row->data, default_value ? 6 : 5); field->org_table= field->table= strdup_root(alloc,(char*) row->data[0]); field->name= strdup_root(alloc,(char*) row->data[1]); @@ -1429,8 +1428,17 @@ unpack_fields(MYSQL *mysql, MYSQL_DATA *data,MEM_ROOT *alloc,uint fields, } } #endif /* DELETE_SUPPORT_OF_4_0_PROTOCOL */ + if (field < result + fields) + goto err; free_rows(data); /* Free old data */ DBUG_RETURN(result); + +err: + /* malformed packet. signal an error. */ + free_rows(data); + free_root(alloc, MYF(0)); + set_mysql_error(mysql, CR_MALFORMED_PACKET, unknown_sqlstate); + DBUG_RETURN(0); } /* Read all rows (fields or data) from server */ @@ -1499,7 +1507,7 @@ MYSQL_DATA *cli_read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields, else { cur->data[field] = to; - if (len > (ulong) (end_to - to)) + if (unlikely(len > (ulong)(end_to-to) || to > end_to)) { free_rows(result); set_mysql_error(mysql, CR_MALFORMED_PACKET, unknown_sqlstate); @@ -1571,7 +1579,7 @@ read_one_row(MYSQL *mysql,uint fields,MYSQL_ROW row, ulong *lengths) } else { - if (len > (ulong) (end_pos - pos)) + if (unlikely(len > (ulong)(end_pos - pos) || pos > end_pos)) { set_mysql_error(mysql, CR_UNKNOWN_ERROR, unknown_sqlstate); return -1; @@ -2732,7 +2740,7 @@ static int client_mpvio_read_packet(struct st_plugin_vio *mpv, uchar **buf) *buf= mysql->net.read_pos; /* was it a request to change plugins ? */ - if (**buf == 254) + if (pkt_len == packet_error || **buf == 254) return (int)packet_error; /* if yes, this plugin shan't continue */ /* @@ -2917,7 +2925,7 @@ int run_plugin_auth(MYSQL *mysql, char *data, uint data_len, compile_time_assert(CR_OK == -1); compile_time_assert(CR_ERROR == 0); - if (res > CR_OK && mysql->net.read_pos[0] != 254) + if (res > CR_OK && (mysql->net.last_errno || mysql->net.read_pos[0] != 254)) { /* the plugin returned an error. write it down in mysql, @@ -3049,6 +3057,7 @@ set_connect_attributes(MYSQL *mysql, char *buff, size_t buf_len) rc+= mysql_options(mysql, MYSQL_OPT_CONNECT_ATTR_DELETE, "_client_name"); rc+= mysql_options(mysql, MYSQL_OPT_CONNECT_ATTR_DELETE, "_os"); rc+= mysql_options(mysql, MYSQL_OPT_CONNECT_ATTR_DELETE, "_platform"); + rc+= mysql_options(mysql, MYSQL_OPT_CONNECT_ATTR_DELETE, "_server_host"); rc+= mysql_options(mysql, MYSQL_OPT_CONNECT_ATTR_DELETE, "_pid"); rc+= mysql_options(mysql, MYSQL_OPT_CONNECT_ATTR_DELETE, "_thread"); rc+= mysql_options(mysql, MYSQL_OPT_CONNECT_ATTR_DELETE, "_client_version"); @@ -3064,6 +3073,8 @@ set_connect_attributes(MYSQL *mysql, char *buff, size_t buf_len) "_os", SYSTEM_TYPE); rc+= mysql_options4(mysql, MYSQL_OPT_CONNECT_ATTR_ADD, "_platform", MACHINE_TYPE); + rc+= mysql_options4(mysql, MYSQL_OPT_CONNECT_ATTR_ADD, + "_server_host", mysql->host); #ifdef __WIN__ snprintf(buff, buf_len, "%lu", (ulong) GetCurrentProcessId()); #else diff --git a/sql-common/client_plugin.c b/sql-common/client_plugin.c index f81660f8986..4c584d17294 100644 --- a/sql-common/client_plugin.c +++ b/sql-common/client_plugin.c @@ -28,7 +28,7 @@ There is no reference counting and no unloading either. */ -#if _MSC_VER +#if defined(_MSC_VER) /* Silence warnings about variable 'unused' being used. */ #define FORCE_INIT_OF_VARS 1 #endif diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index ca5c7c87bf5..7253f7ba447 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -469,12 +469,13 @@ IF(WIN32) ADD_CUSTOM_COMMAND(OUTPUT ${my_bootstrap_sql} COMMAND ${CMAKE_COMMAND} -E chdir ${CMAKE_SOURCE_DIR}/scripts - cmd /c copy mysql_system_tables.sql+mysql_system_tables_data.sql+fill_help_tables.sql+mysql_performance_tables.sql ${native_outfile} + cmd /c copy mysql_system_tables.sql+mysql_system_tables_data.sql+fill_help_tables.sql+mysql_performance_tables.sql+mysql_test_db.sql ${native_outfile} DEPENDS ${CMAKE_SOURCE_DIR}/scripts/mysql_system_tables.sql ${CMAKE_SOURCE_DIR}/scripts/mysql_system_tables_data.sql ${CMAKE_SOURCE_DIR}/scripts/fill_help_tables.sql ${CMAKE_SOURCE_DIR}/scripts/mysql_performance_tables.sql + ${CMAKE_SOURCE_DIR}/scripts/mysql_test_db.sql ) ADD_CUSTOM_COMMAND( diff --git a/sql/create_options.h b/sql/create_options.h index c82cb875743..16be5affde0 100644 --- a/sql/create_options.h +++ b/sql/create_options.h @@ -63,7 +63,7 @@ class engine_option_value: public Sql_alloc name(name_arg), next(NULL), parsed(false), quoted_value(false) { char *str; - if ((value.str= str= (char *)alloc_root(root, 22))) + if (likely((value.str= str= (char *)alloc_root(root, 22)))) { value.length= longlong10_to_str(value_arg, str, 10) - str; link(start, end); diff --git a/sql/datadict.cc b/sql/datadict.cc index 4e18fe06cb6..410dbff8778 100644 --- a/sql/datadict.cc +++ b/sql/datadict.cc @@ -89,10 +89,10 @@ Table_type dd_frm_type(THD *thd, char *path, LEX_CSTRING *engine_name, ((char*) (engine_name->str))[0]= 0; } - if ((error= mysql_file_read(file, (uchar*) header, sizeof(header), MYF(MY_NABP)))) + if (unlikely((error= mysql_file_read(file, (uchar*) header, sizeof(header), MYF(MY_NABP))))) goto err; - if (!strncmp((char*) header, "TYPE=VIEW\n", 10)) + if (unlikely((!strncmp((char*) header, "TYPE=VIEW\n", 10)))) { type= TABLE_TYPE_VIEW; goto err; diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc index 58a01a77849..d25c1a75e96 100644 --- a/sql/debug_sync.cc +++ b/sql/debug_sync.cc @@ -790,7 +790,7 @@ static bool debug_sync_set_action(THD *thd, st_debug_sync_action *action) and shall not be reported as a result of SET DEBUG_SYNC. Hence, we check for the first condition above. */ - if (thd->is_error()) + if (unlikely(thd->is_error())) DBUG_RETURN(TRUE); } @@ -1448,7 +1448,7 @@ static void debug_sync_execute(THD *thd, st_debug_sync_action *action) DBUG_PRINT("debug_sync", ("awoke from %s global: %s error: %d", sig_wait, sig_glob, error));}); - if (error == ETIMEDOUT || error == ETIME) + if (unlikely(error == ETIMEDOUT || error == ETIME)) { // We should not make the statement fail, even if in strict mode. const bool save_abort_on_warning= thd->abort_on_warning; diff --git a/sql/derror.cc b/sql/derror.cc index 8011f8c4020..011f8e1669c 100644 --- a/sql/derror.cc +++ b/sql/derror.cc @@ -232,7 +232,8 @@ static File open_error_msg_file(const char *file_name, const char *language, ret->errors= uint2korr(head+12); ret->sections= uint2korr(head+14); - if (ret->max_error < error_messages || ret->sections != MAX_ERROR_RANGES) + if (unlikely(ret->max_error < error_messages || + ret->sections != MAX_ERROR_RANGES)) { sql_print_error("\ Error message file '%s' had only %d error messages, but it should contain at least %d error messages.\nCheck that the above file is the right version for this program!", @@ -276,8 +277,8 @@ bool read_texts(const char *file_name, const char *language, struct st_msg_file msg_file; DBUG_ENTER("read_texts"); - if ((file= open_error_msg_file(file_name, language, error_messages, - &msg_file)) == FERR) + if (unlikely((file= open_error_msg_file(file_name, language, error_messages, + &msg_file)) == FERR)) DBUG_RETURN(1); if (!(*data= (const char***) diff --git a/sql/discover.cc b/sql/discover.cc index 7184cde5e03..afebce77bf9 100644 --- a/sql/discover.cc +++ b/sql/discover.cc @@ -127,7 +127,7 @@ int writefrm(const char *path, const char *db, const char *table, File file= mysql_file_create(key_file_frm, file_name, CREATE_MODE, create_flags, MYF(0)); - if ((error= file < 0)) + if (unlikely((error= file < 0))) { if (my_errno == ENOENT) my_error(ER_BAD_DB_ERROR, MYF(0), db); diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index 59aa6bcabc6..28820f05aa3 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -1444,7 +1444,7 @@ Event_job_data::execute(THD *thd, bool drop) } end: - if (drop && !thd->is_fatal_error) + if (drop && likely(!thd->is_fatal_error)) { /* We must do it here since here we're under the right authentication diff --git a/sql/events.cc b/sql/events.cc index 2fbb16861f6..6a8695843a3 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -320,7 +320,7 @@ Events::create_event(THD *thd, Event_parse_data *parse_data) enum_binlog_format save_binlog_format; DBUG_ENTER("Events::create_event"); - if (check_if_system_tables_error()) + if (unlikely(check_if_system_tables_error())) DBUG_RETURN(TRUE); /* @@ -455,7 +455,7 @@ Events::update_event(THD *thd, Event_parse_data *parse_data, DBUG_ENTER("Events::update_event"); - if (check_if_system_tables_error()) + if (unlikely(check_if_system_tables_error())) DBUG_RETURN(TRUE); if (parse_data->check_parse_data(thd) || parse_data->do_not_create) @@ -589,7 +589,7 @@ Events::drop_event(THD *thd, const LEX_CSTRING *dbname, enum_binlog_format save_binlog_format; DBUG_ENTER("Events::drop_event"); - if (check_if_system_tables_error()) + if (unlikely(check_if_system_tables_error())) DBUG_RETURN(TRUE); if (check_access(thd, EVENT_ACL, dbname->str, NULL, NULL, 0, 0)) @@ -761,7 +761,7 @@ Events::show_create_event(THD *thd, const LEX_CSTRING *dbname, DBUG_ENTER("Events::show_create_event"); DBUG_PRINT("enter", ("name: %s@%s", dbname->str, name->str)); - if (check_if_system_tables_error()) + if (unlikely(check_if_system_tables_error())) DBUG_RETURN(TRUE); if (check_access(thd, EVENT_ACL, dbname->str, NULL, NULL, 0, 0)) @@ -817,7 +817,7 @@ Events::fill_schema_events(THD *thd, TABLE_LIST *tables, COND * /* cond */) if (opt_noacl) DBUG_RETURN(0); - if (check_if_system_tables_error()) + if (unlikely(check_if_system_tables_error())) DBUG_RETURN(1); /* diff --git a/sql/field.cc b/sql/field.cc index 4f04fef1a26..a5b8e4a9e51 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1348,7 +1348,7 @@ bool Field::sp_prepare_and_store_item(THD *thd, Item **value) expr_item->save_in_field(this, 0); - if (!thd->is_error()) + if (likely(!thd->is_error())) DBUG_RETURN(false); error: @@ -1388,7 +1388,7 @@ void Field_num::prepend_zeros(String *value) const if ((diff= (int) (field_length - value->length())) > 0) { const bool error= value->realloc(field_length); - if (!error) + if (likely(!error)) { bmove_upp((uchar*) value->ptr()+field_length, (uchar*) value->ptr()+value->length(), @@ -1630,7 +1630,7 @@ double Field_real::get_double(const char *str, size_t length, CHARSET_INFO *cs, { char *end; double nr= my_strntod(cs,(char*) str, length, &end, error); - if (*error) + if (unlikely(*error)) { set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1); *error= 1; @@ -2908,7 +2908,7 @@ int Field_decimal::store(double nr) return 1; } - if (!isfinite(nr)) // Handle infinity as special case + if (!std::isfinite(nr)) // Handle infinity as special case { overflow(nr < 0.0); return 1; @@ -3190,7 +3190,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value, *native_error= my_decimal2binary(E_DEC_FATAL_ERROR & ~E_DEC_OVERFLOW, decimal_value, ptr, precision, dec); - if (*native_error == E_DEC_OVERFLOW) + if (unlikely(*native_error == E_DEC_OVERFLOW)) { my_decimal buff; DBUG_PRINT("info", ("overflow")); @@ -3209,7 +3209,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value) { int native_error; bool rc= store_value(decimal_value, &native_error); - if (!rc && native_error == E_DEC_TRUNCATED) + if (unlikely(!rc && native_error == E_DEC_TRUNCATED)) set_note(WARN_DATA_TRUNCATED, 1); return rc; } @@ -4189,7 +4189,7 @@ int Field_long::store(double nr) else res=(int32) (longlong) nr; } - if (error) + if (unlikely(error)) set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1); int4store(ptr,res); @@ -4235,7 +4235,7 @@ int Field_long::store(longlong nr, bool unsigned_val) else res=(int32) nr; } - if (error) + if (unlikely(error)) set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1); int4store(ptr,res); @@ -4319,7 +4319,7 @@ int Field_longlong::store(const char *from,size_t len,CHARSET_INFO *cs) ulonglong tmp; tmp= cs->cset->strntoull10rnd(cs,from,len,unsigned_flag,&end,&error); - if (error == MY_ERRNO_ERANGE) + if (unlikely(error == MY_ERRNO_ERANGE)) { set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; @@ -4339,7 +4339,7 @@ int Field_longlong::store(double nr) ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED; Converter_double_to_longlong conv(nr, unsigned_flag); - if (conv.error()) + if (unlikely(conv.error())) set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1); int8store(ptr, conv.result()); @@ -4352,7 +4352,7 @@ int Field_longlong::store(longlong nr, bool unsigned_val) ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED; int error= 0; - if (nr < 0) // Only possible error + if (unlikely(nr < 0)) // Only possible error { /* if field is unsigned and value is signed (< 0) or @@ -4501,7 +4501,7 @@ int Field_float::store(double nr) int error= truncate_double(&nr, field_length, not_fixed ? NOT_FIXED_DEC : dec, unsigned_flag, FLT_MAX); - if (error) + if (unlikely(error)) { set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1); if (error < 0) // Wrong double value @@ -4680,7 +4680,7 @@ int Field_double::store(double nr) int error= truncate_double(&nr, field_length, not_fixed ? NOT_FIXED_DEC : dec, unsigned_flag, DBL_MAX); - if (error) + if (unlikely(error)) { set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1); if (error < 0) // Wrong double value @@ -4718,7 +4718,7 @@ int truncate_double(double *nr, uint field_length, uint dec, int error= 0; double res= *nr; - if (isnan(res)) + if (std::isnan(res)) { *nr= 0; return -1; @@ -4740,7 +4740,7 @@ int truncate_double(double *nr, uint field_length, uint dec, max_value-= 1.0 / log_10[dec]; /* Check for infinity so we don't get NaN in calculations */ - if (!my_isinf(res)) + if (!std::isinf(res)) { double tmp= rint((res - floor(res)) * log_10[dec]) / log_10[dec]; res= floor(res) + tmp; @@ -4847,7 +4847,7 @@ double Field_double::val_real(void) longlong Field_double::val_int_from_real(bool want_unsigned_result) { Converter_double_to_longlong conv(val_real(), want_unsigned_result); - if (!want_unsigned_result && conv.error()) + if (unlikely(!want_unsigned_result && conv.error())) conv.push_warning(get_thd(), Field_double::val_real(), false); return conv.result(); } @@ -5084,7 +5084,7 @@ int Field_timestamp::store_TIME_with_warning(THD *thd, MYSQL_TIME *l_time, timestamp= TIME_to_timestamp(thd, l_time, &conversion_error); if (timestamp == 0 && l_time->second_part == 0) conversion_error= ER_WARN_DATA_OUT_OF_RANGE; - if (conversion_error) + if (unlikely(conversion_error)) { set_datetime_warning(conversion_error, str, MYSQL_TIMESTAMP_DATETIME, !error); @@ -6276,7 +6276,7 @@ int Field_year::store(const char *from, size_t len,CHARSET_INFO *cs) if (get_thd()->count_cuted_fields > CHECK_FIELD_EXPRESSION && (error= check_int(cs, from, len, end, error))) { - if (error == 1) /* empty or incorrect string */ + if (unlikely(error == 1) /* empty or incorrect string */) { *ptr= 0; return 1; @@ -6943,7 +6943,7 @@ Field_longstr::check_string_copy_error(const String_copier *copier, const char *pos; char tmp[32]; - if (!(pos= copier->most_important_error_pos())) + if (likely(!(pos= copier->most_important_error_pos()))) return FALSE; convert_to_printable(tmp, sizeof(tmp), pos, (end - pos), cs, 6); @@ -7003,15 +7003,15 @@ int Field_string::store(const char *from, size_t length,CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED; uint copy_length; - String_copier copier; + int rc; /* See the comment for Field_long::store(long long) */ DBUG_ASSERT(!table || table->in_use == current_thd); - copy_length= copier.well_formed_copy(field_charset, - (char*) ptr, field_length, - cs, from, length, - field_length / field_charset->mbmaxlen); + rc= well_formed_copy_with_check((char*) ptr, field_length, + cs, from, length, + field_length / field_charset->mbmaxlen, + false, ©_length); /* Append spaces if the string was shorter than the field. */ if (copy_length < field_length) @@ -7019,7 +7019,7 @@ int Field_string::store(const char *from, size_t length,CHARSET_INFO *cs) field_length-copy_length, field_charset->pad_char); - return check_conversion_status(&copier, from + length, cs, false); + return rc; } @@ -7055,10 +7055,10 @@ int Field_str::store(double nr) my_bool error= (local_char_length == 0); // my_gcvt() requires width > 0, and we may have a CHAR(0) column. - if (!error) + if (likely(!error)) length= my_gcvt(nr, MY_GCVT_ARG_DOUBLE, local_char_length, buff, &error); - if (error) + if (unlikely(error)) { if (get_thd()->abort_on_warning) set_warning(ER_DATA_TOO_LONG, 1); @@ -7553,19 +7553,16 @@ int Field_varstring::store(const char *from,size_t length,CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED; uint copy_length; - String_copier copier; + int rc; - copy_length= copier.well_formed_copy(field_charset, - (char*) ptr + length_bytes, - field_length, - cs, from, length, - field_length / field_charset->mbmaxlen); - if (length_bytes == 1) - *ptr= (uchar) copy_length; - else - int2store(ptr, copy_length); + rc= well_formed_copy_with_check((char*) get_data(), field_length, + cs, from, length, + field_length / field_charset->mbmaxlen, + true, ©_length); - return check_conversion_status(&copier, from + length, cs, true); + store_length(copy_length); + + return rc; } @@ -7959,10 +7956,13 @@ void Field_varstring::hash(ulong *nr, ulong *nr2) Compress field @param[out] to destination buffer for compressed data - @param[in,out] to_length in: size of to, out: compressed data length + @param[in] to_length size of to @param[in] from data to compress @param[in] length from length + @param[in] max_length truncate `from' to this length + @param[out] out_length compessed data length @param[in] cs from character set + @param[in] nchars copy no more than "nchars" characters In worst case (no compression performed) storage requirement is increased by 1 byte to store header. If it exceeds field length, normal data truncation is @@ -7986,52 +7986,57 @@ void Field_varstring::hash(ulong *nr, ulong *nr2) followed by compressed data. */ -int Field_longstr::compress(char *to, uint *to_length, +int Field_longstr::compress(char *to, uint to_length, const char *from, uint length, - CHARSET_INFO *cs) + uint max_length, + uint *out_length, + CHARSET_INFO *cs, size_t nchars) { THD *thd= get_thd(); - char *buf= 0; + char *buf; + uint buf_length; int rc= 0; - if (length == 0) - { - *to_length= 0; - return 0; - } - if (String::needs_conversion_on_storage(length, cs, field_charset) || - *to_length <= length) + max_length < length) { - String_copier copier; - const char *end= from + length; - - if (!(buf= (char*) my_malloc(*to_length - 1, MYF(MY_WME)))) + set_if_smaller(max_length, static_cast(field_charset->mbmaxlen) * length + 1); + if (!(buf= (char*) my_malloc(max_length, MYF(MY_WME)))) { - *to_length= 0; + *out_length= 0; return -1; } - length= copier.well_formed_copy(field_charset, buf, *to_length - 1, - cs, from, length, - (*to_length - 1) / field_charset->mbmaxlen); - rc= check_conversion_status(&copier, end, cs, true); - from= buf; - DBUG_ASSERT(length > 0); + rc= well_formed_copy_with_check(buf, max_length, cs, from, length, + nchars, true, &buf_length); + } + else + { + buf= const_cast(from); + buf_length= length; } - if (length >= thd->variables.column_compression_threshold && - (*to_length= compression_method()->compress(thd, to, from, length))) + if (buf_length == 0) + *out_length= 0; + else if (buf_length >= thd->variables.column_compression_threshold && + (*out_length= compression_method()->compress(thd, to, buf, buf_length))) status_var_increment(thd->status_var.column_compressions); else { /* Store uncompressed */ to[0]= 0; - memcpy(to + 1, from, length); - *to_length= length + 1; + if (buf_length < to_length) + memcpy(to + 1, buf, buf_length); + else + { + /* Storing string at blob capacity, e.g. 255 bytes string to TINYBLOB. */ + rc= well_formed_copy_with_check(to + 1, to_length - 1, cs, from, length, + nchars, true, &buf_length); + } + *out_length= buf_length + 1; } - if (buf) + if (buf != from) my_free(buf); return rc; } @@ -8085,9 +8090,12 @@ int Field_varstring_compressed::store(const char *from, size_t length, CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED; - uint to_length= (uint)MY_MIN(field_length, field_charset->mbmaxlen * length + 1); - int rc= compress((char*) get_data(), &to_length, from, (uint)length, cs); - store_length(to_length); + uint compressed_length; + int rc= compress((char*) get_data(), field_length, from, (uint) length, + Field_varstring_compressed::max_display_length(), + &compressed_length, cs, + Field_varstring_compressed::char_length()); + store_length(compressed_length); return rc; } @@ -8215,10 +8223,11 @@ int Field_blob::store(const char *from,size_t length,CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED; size_t copy_length, new_length; - String_copier copier; + uint copy_len; char *tmp; char buff[STRING_BUFFER_USUAL_SIZE]; String tmpstr(buff,sizeof(buff), &my_charset_bin); + int rc; if (!length) { @@ -8285,13 +8294,13 @@ int Field_blob::store(const char *from,size_t length,CHARSET_INFO *cs) bmove(ptr + packlength, (uchar*) &tmp, sizeof(char*)); return 0; } - copy_length= copier.well_formed_copy(field_charset, - (char*) value.ptr(), (uint)new_length, - cs, from, length); - Field_blob::store_length(copy_length); + rc= well_formed_copy_with_check((char*) value.ptr(), (uint) new_length, + cs, from, length, + length, true, ©_len); + Field_blob::store_length(copy_len); bmove(ptr+packlength,(uchar*) &tmp,sizeof(char*)); - return check_conversion_status(&copier, from + length, cs, true); + return rc; oom_error: /* Fatal OOM error */ @@ -8695,7 +8704,10 @@ int Field_blob_compressed::store(const char *from, size_t length, CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED; - uint to_length= (uint)MY_MIN(max_data_length(), field_charset->mbmaxlen * length + 1); + uint compressed_length; + uint max_length= max_data_length(); + uint to_length= (uint) MY_MIN(max_length, + field_charset->mbmaxlen * length + 1); String tmp(from, length, cs); int rc; @@ -8705,8 +8717,9 @@ int Field_blob_compressed::store(const char *from, size_t length, if (value.alloc(to_length)) goto oom; - rc= compress((char*) value.ptr(), &to_length, tmp.ptr(), (uint) length, cs); - set_ptr(to_length, (uchar*) value.ptr()); + rc= compress((char*) value.ptr(), to_length, tmp.ptr(), (uint) length, + max_length, &compressed_length, cs, (uint) length); + set_ptr(compressed_length, (uchar*) value.ptr()); return rc; oom: @@ -10319,7 +10332,7 @@ bool check_expression(Virtual_column_info *vcol, LEX_CSTRING *name, if (type == VCOL_GENERATED_VIRTUAL) filter|= VCOL_NOT_VIRTUAL; - if (ret || (res.errors & filter)) + if (unlikely(ret || (res.errors & filter))) { my_error(ER_GENERATED_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(0), res.name, vcol_type_name(type), name->str); @@ -10804,6 +10817,12 @@ Column_definition::redefine_stage1_common(const Column_definition *dup_field, */ uint32 Field_blob::char_length() const +{ + return Field_blob::octet_length(); +} + + +uint32 Field_blob::octet_length() const { switch (packlength) { @@ -10879,7 +10898,7 @@ bool Column_definition::set_compressed(const char *method) length */ -uint32 Field_blob::max_display_length() +uint32 Field_blob::max_display_length() const { switch (packlength) { @@ -11061,8 +11080,8 @@ bool Field::save_in_field_default_value(bool view_error_processing) { THD *thd= table->in_use; - if (flags & NO_DEFAULT_VALUE_FLAG && - real_type() != MYSQL_TYPE_ENUM) + if (unlikely(flags & NO_DEFAULT_VALUE_FLAG && + real_type() != MYSQL_TYPE_ENUM)) { if (reset()) { diff --git a/sql/field.h b/sql/field.h index eb4be46d3a0..b6f28808e2e 100644 --- a/sql/field.h +++ b/sql/field.h @@ -745,13 +745,15 @@ public: { return DTCollation(charset(), derivation(), repertoire()); } - Type_std_attributes type_std_attributes() const + virtual Type_std_attributes type_std_attributes() const { return Type_std_attributes(field_length, decimals(), MY_TEST(flags & UNSIGNED_FLAG), dtcollation()); } + bool is_unsigned() const { return flags & UNSIGNED_FLAG; } + /** Convenience definition of a copy function returned by Field::get_copy_func() @@ -1090,6 +1092,16 @@ public: memcpy(ptr, val, len); } virtual uint decimals() const { return 0; } + virtual Information_schema_numeric_attributes + information_schema_numeric_attributes() const + { + return Information_schema_numeric_attributes(); + } + virtual Information_schema_character_attributes + information_schema_character_attributes() const + { + return Information_schema_character_attributes(); + } /* Caller beware: sql_type can change str.Ptr, so check ptr() to see if it changed if you are using your own buffer @@ -1382,8 +1394,7 @@ public: new_table->s->db_create_options|= HA_OPTION_PACK_RECORD; } /* maximum possible display length */ - virtual uint32 max_display_length()= 0; - + virtual uint32 max_display_length() const= 0; /** Whether a field being created is compatible with a existing one. @@ -1444,11 +1455,6 @@ public: return flags & VERS_UPDATE_UNVERSIONED_FLAG; } - virtual bool vers_trx_id() const - { - return false; - } - /* Validate a non-null field value stored in the given record according to the current thread settings, e.g. sql_mode. @@ -1544,7 +1550,7 @@ public: } virtual bool sp_prepare_and_store_item(THD *thd, Item **value); - friend int cre_myisam(char * name, register TABLE *form, uint options, + friend int cre_myisam(char * name, TABLE *form, uint options, ulonglong auto_increment_value); friend class Copy_field; friend class Item_avg_field; @@ -1718,7 +1724,14 @@ public: CHARSET_INFO *charset(void) const { return field_charset; } enum Derivation derivation(void) const { return field_derivation; } bool binary() const { return field_charset == &my_charset_bin; } - uint32 max_display_length() { return field_length; } + uint32 max_display_length() const { return field_length; } + uint32 char_length() const { return field_length / field_charset->mbmaxlen; } + Information_schema_character_attributes + information_schema_character_attributes() const + { + return Information_schema_character_attributes(max_display_length(), + char_length()); + } friend class Create_field; my_decimal *val_decimal(my_decimal *); bool val_bool() { return val_real() != 0e0; } @@ -1751,13 +1764,29 @@ protected: return report_if_important_data(copier->source_end_pos(), end, count_spaces); } + int well_formed_copy_with_check(char *to, size_t to_length, + CHARSET_INFO *from_cs, + const char *from, size_t from_length, + size_t nchars, bool count_spaces, + uint *copy_length) + { + String_copier copier; + + *copy_length= copier.well_formed_copy(field_charset, to, to_length, + from_cs, from, from_length, + nchars); + + return check_conversion_status(&copier, from + from_length, from_cs, count_spaces); + } bool cmp_to_string_with_same_collation(const Item_bool_func *cond, const Item *item) const; bool cmp_to_string_with_stricter_collation(const Item_bool_func *cond, const Item *item) const; - int compress(char *to, uint *to_length, + int compress(char *to, uint to_length, const char *from, uint length, - CHARSET_INFO *cs); + uint max_length, + uint *out_length, + CHARSET_INFO *cs, size_t nchars); String *uncompress(String *val_buffer, String *val_ptr, const uchar *from, uint from_length); public: @@ -1809,6 +1838,13 @@ public: { return do_field_real; } + Information_schema_numeric_attributes + information_schema_numeric_attributes() const + { + return dec == NOT_FIXED_DEC ? + Information_schema_numeric_attributes(field_length) : + Information_schema_numeric_attributes(field_length, dec); + } int save_in_field(Field *to) { return to->store(val_real()); } bool memcpy_field_possible(const Field *from) const { @@ -1825,7 +1861,7 @@ public: bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate); my_decimal *val_decimal(my_decimal *); bool val_bool() { return val_real() != 0e0; } - uint32 max_display_length() { return field_length; } + uint32 max_display_length() const { return field_length; } uint size_of() const { return sizeof(*this); } Item *get_equal_const_item(THD *thd, const Context &ctx, Item *const_item); }; @@ -1844,6 +1880,12 @@ public: const Type_handler *type_handler() const { return &type_handler_olddecimal; } enum ha_base_keytype key_type() const { return zerofill ? HA_KEYTYPE_BINARY : HA_KEYTYPE_NUM; } + Information_schema_numeric_attributes + information_schema_numeric_attributes() const + { + uint tmp= dec ? 2 : 1; // The sign and the decimal point + return Information_schema_numeric_attributes(field_length - tmp, dec); + } Copy_func *get_copy_func(const Field *from) const { return eq_def(from) ? get_identical_copy_func() : do_field_string; @@ -1928,7 +1970,12 @@ public: void sort_string(uchar *buff, uint length); bool zero_pack() const { return 0; } void sql_type(String &str) const; - uint32 max_display_length() { return field_length; } + uint32 max_display_length() const { return field_length; } + Information_schema_numeric_attributes + information_schema_numeric_attributes() const + { + return Information_schema_numeric_attributes(precision, dec); + } uint size_of() const { return sizeof(*this); } uint32 pack_length() const { return (uint32) bin_size; } uint pack_length_from_metadata(uint field_metadata); @@ -1958,6 +2005,35 @@ public: bool val_bool() { return val_int() != 0; } int store_time_dec(const MYSQL_TIME *ltime, uint dec); bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate); + virtual const Type_limits_int *type_limits_int() const= 0; + uint32 max_display_length() const + { + return type_limits_int()->char_length(); + } + Type_std_attributes type_std_attributes() const + { + /* + For integer data types, the user-specified length does not constrain the + supported range, so e.g. a column of the INT(1) data type supports the + full integer range anyway. + Choose the maximum from the user-specified length and the maximum + possible length determined by the data type capacity: + INT(1) -> 11 + INT(10) -> 11 + INT(40) -> 40 + */ + uint32 length1= max_display_length(); + uint32 length2= field_length; + return Type_std_attributes(MY_MAX(length1, length2), decimals(), + MY_TEST(flags & UNSIGNED_FLAG), + dtcollation()); + } + Information_schema_numeric_attributes + information_schema_numeric_attributes() const + { + uint32 prec= type_limits_int()->precision(); + return Information_schema_numeric_attributes(prec, 0); + } }; @@ -1986,7 +2062,10 @@ public: void sort_string(uchar *buff,uint length); uint32 pack_length() const { return 1; } void sql_type(String &str) const; - uint32 max_display_length() { return 4; } + const Type_limits_int *type_limits_int() const + { + return type_handler_tiny.type_limits_int_by_unsigned_flag(is_unsigned()); + } virtual uchar *pack(uchar* to, const uchar *from, uint max_length) { @@ -2036,8 +2115,10 @@ public: void sort_string(uchar *buff,uint length); uint32 pack_length() const { return 2; } void sql_type(String &str) const; - uint32 max_display_length() { return 6; } - + const Type_limits_int *type_limits_int() const + { + return type_handler_short.type_limits_int_by_unsigned_flag(is_unsigned()); + } virtual uchar *pack(uchar* to, const uchar *from, uint max_length) { return pack_int16(to, from); } @@ -2071,8 +2152,10 @@ public: void sort_string(uchar *buff,uint length); uint32 pack_length() const { return 3; } void sql_type(String &str) const; - uint32 max_display_length() { return 8; } - + const Type_limits_int *type_limits_int() const + { + return type_handler_int24.type_limits_int_by_unsigned_flag(is_unsigned()); + } virtual uchar *pack(uchar* to, const uchar *from, uint max_length) { return Field::pack(to, from, max_length); @@ -2111,7 +2194,10 @@ public: void sort_string(uchar *buff,uint length); uint32 pack_length() const { return 4; } void sql_type(String &str) const; - uint32 max_display_length() { return MY_INT32_NUM_DECIMAL_DIGITS; } + const Type_limits_int *type_limits_int() const + { + return type_handler_long.type_limits_int_by_unsigned_flag(is_unsigned()); + } virtual uchar *pack(uchar* to, const uchar *from, uint max_length __attribute__((unused))) { @@ -2161,7 +2247,10 @@ public: void sort_string(uchar *buff,uint length); uint32 pack_length() const { return 8; } void sql_type(String &str) const; - uint32 max_display_length() { return 20; } + const Type_limits_int *type_limits_int() const + { + return type_handler_longlong.type_limits_int_by_unsigned_flag(is_unsigned()); + } virtual uchar *pack(uchar* to, const uchar *from, uint max_length __attribute__((unused))) { @@ -2191,8 +2280,7 @@ public: unsigned_arg), cached(0) {} - enum_field_types real_type() const { return MYSQL_TYPE_LONGLONG; } - enum_field_types type() const { return MYSQL_TYPE_LONGLONG;} + const Type_handler *type_handler() const { return &type_handler_vers_trx_id; } uint size_of() const { return sizeof(*this); } bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate, ulonglong trx_id); bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate) @@ -2219,10 +2307,6 @@ public: } /* cmp_type() cannot be TIME_RESULT, because we want to compare this field against integers. But in all other cases we treat it as TIME_RESULT! */ - bool vers_trx_id() const - { - return true; - } }; @@ -2337,6 +2421,11 @@ public: unireg_check_arg, field_name_arg, collation) {} const Type_handler *type_handler() const { return &type_handler_null; } + Information_schema_character_attributes + information_schema_character_attributes() const + { + return Information_schema_character_attributes(); + } Copy_func *get_copy_func(const Field *from) const { return do_field_string; @@ -2358,7 +2447,7 @@ public: uint32 pack_length() const { return 0; } void sql_type(String &str) const; uint size_of() const { return sizeof(*this); } - uint32 max_display_length() { return 4; } + uint32 max_display_length() const { return 4; } void move_field_offset(my_ptrdiff_t ptr_diff) {} bool can_optimize_keypart_ref(const Item_bool_func *cond, const Item *item) const @@ -2397,7 +2486,7 @@ public: return to->store_time_dec(<ime, decimals()); } bool memcpy_field_possible(const Field *from) const; - uint32 max_display_length() { return field_length; } + uint32 max_display_length() const { return field_length; } bool str_needs_quotes() { return TRUE; } enum Derivation derivation(void) const { return DERIVATION_NUMERIC; } uint repertoire(void) const { return MY_REPERTOIRE_NUMERIC; } @@ -2697,7 +2786,12 @@ public: String *val_str(String*,String *); bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate); bool send_binary(Protocol *protocol); - uint32 max_display_length() { return field_length; } + Information_schema_numeric_attributes + information_schema_numeric_attributes() const + { + return Information_schema_numeric_attributes(); + } + uint32 max_display_length() const { return field_length; } void sql_type(String &str) const; }; @@ -3352,7 +3446,11 @@ private: Field_varstring::sql_type(str); str.append(STRING_WITH_LEN(" /*!100301 COMPRESSED*/")); } - uint32 max_display_length() { return field_length - 1; } + uint32 max_display_length() const { return field_length - 1; } + uint32 char_length() const + { + return (field_length - 1) / field_charset->mbmaxlen; + } int cmp_max(const uchar *a_ptr, const uchar *b_ptr, uint max_len); /* @@ -3472,6 +3570,19 @@ public: } enum ha_base_keytype key_type() const { return binary() ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2; } + Type_std_attributes type_std_attributes() const + { + return Type_std_attributes(Field_blob::max_display_length(), decimals(), + MY_TEST(flags & UNSIGNED_FLAG), + dtcollation()); + } + Information_schema_character_attributes + information_schema_character_attributes() const + { + uint32 octets= Field_blob::octet_length(); + uint32 chars= octets / field_charset->mbminlen; + return Information_schema_character_attributes(octets, chars); + } Copy_func *get_copy_func(const Field *from) const { /* @@ -3548,7 +3659,7 @@ public: DBUG_ASSERT(number < UINT_MAX32); store_length(ptr, packlength, (uint32)number); } - inline uint32 get_length(uint row_offset= 0) const + inline uint32 get_length(my_ptrdiff_t row_offset= 0) const { return get_length(ptr+row_offset, this->packlength); } uint32 get_length(const uchar *ptr, uint packlength) const; uint32 get_length(const uchar *ptr_arg) const @@ -3632,8 +3743,9 @@ public: uint size_of() const { return sizeof(*this); } bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } - uint32 max_display_length(); + uint32 max_display_length() const; uint32 char_length() const; + uint32 octet_length() const; uint is_equal(Create_field *new_field); private: int save_field_metadata(uchar *first_byte); @@ -3716,6 +3828,11 @@ public: { return MYSQL_TYPE_GEOMETRY; } + Information_schema_character_attributes + information_schema_character_attributes() const + { + return Information_schema_character_attributes(); + } bool can_optimize_range(const Item_bool_func *cond, const Item *item, bool is_eq_func) const; @@ -3912,7 +4029,12 @@ public: enum ha_base_keytype key_type() const { return HA_KEYTYPE_BIT; } uint32 key_length() const { return (uint32) (field_length + 7) / 8; } uint32 max_data_length() const { return (field_length + 7) / 8; } - uint32 max_display_length() { return field_length; } + uint32 max_display_length() const { return field_length; } + Information_schema_numeric_attributes + information_schema_numeric_attributes() const + { + return Information_schema_numeric_attributes(field_length); + } uint size_of() const { return sizeof(*this); } int reset(void) { bzero(ptr, bytes_in_rec); diff --git a/sql/field_comp.cc b/sql/field_comp.cc index 9a7b3a7c7e0..eb4ae42aa4d 100644 --- a/sql/field_comp.cc +++ b/sql/field_comp.cc @@ -21,11 +21,30 @@ #include +/** + Compresses string using zlib + + @param[out] to destination buffer for compressed data + @param[in] from data to compress + @param[in] length from length + + Requirement is such that string stored at `to' must not exceed `from' length. + Otherwise 0 is returned and caller stores string uncompressed. + + `to' must be large enough to hold `length' bytes. + + length == 1 is an edge case that may break stream.avail_out calculation: at + least 2 bytes required to store metadata. +*/ + static uint compress_zlib(THD *thd, char *to, const char *from, uint length) { uint level= thd->variables.column_compression_zlib_level; - if (level > 0) + /* Caller takes care of empty strings. */ + DBUG_ASSERT(length); + + if (level > 0 && length > 1) { z_stream stream; int wbits= thd->variables.column_compression_zlib_wrap ? MAX_WBITS : @@ -40,6 +59,7 @@ static uint compress_zlib(THD *thd, char *to, const char *from, uint length) stream.avail_in= length; stream.next_in= (Bytef*) from; + DBUG_ASSERT(length >= static_cast(original_pack_length) + 1); stream.avail_out= length - original_pack_length - 1; stream.next_out= (Bytef*) to + original_pack_length + 1; diff --git a/sql/field_conv.cc b/sql/field_conv.cc index d648c90e114..64d0bc6c452 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -487,10 +487,11 @@ static void do_cut_string_complex(Copy_field *copy) memcpy(copy->to_ptr, copy->from_ptr, copy_length); /* Check if we lost any important characters */ - if (prefix.well_formed_error_pos() || - cs->cset->scan(cs, (char*) copy->from_ptr + copy_length, - (char*) from_end, - MY_SEQ_SPACES) < (copy->from_length - copy_length)) + if (unlikely(prefix.well_formed_error_pos() || + cs->cset->scan(cs, (char*) copy->from_ptr + copy_length, + (char*) from_end, + MY_SEQ_SPACES) < + (copy->from_length - copy_length))) { copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); diff --git a/sql/filesort.cc b/sql/filesort.cc index 00dfa08bba8..b2c88cda7b3 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -376,7 +376,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, } } tracker->report_merge_passes_at_end(thd->query_plan_fsort_passes); - if (error) + if (unlikely(error)) { int kill_errno= thd->killed_errno(); DBUG_ASSERT(thd->is_error() || kill_errno || thd->killed == ABORT_QUERY); @@ -414,7 +414,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, (longlong) sort->found_rows)); MYSQL_FILESORT_DONE(error, num_rows); - if (error) + if (unlikely(error)) { delete sort; sort= 0; @@ -705,10 +705,9 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, Bounded_queue *pq, ha_rows *found_rows) { - int error,flag,quick_select; - uint idx,indexpos,ref_length; - uchar *ref_pos,*next_pos,ref_buff[MAX_REFLENGTH]; - my_off_t record; + int error, quick_select; + uint idx, indexpos; + uchar *ref_pos, *next_pos, ref_buff[MAX_REFLENGTH]; TABLE *sort_form; handler *file; MY_BITMAP *save_read_set, *save_write_set, *save_vcol_set; @@ -723,14 +722,10 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, error=quick_select=0; sort_form=param->sort_form; file=sort_form->file; - ref_length=param->ref_length; ref_pos= ref_buff; quick_select=select && select->quick; - record=0; *found_rows= 0; - flag= ((file->ha_table_flags() & HA_REC_NOT_IN_SEQ) || quick_select); - if (flag) - ref_pos= &file->ref[0]; + ref_pos= &file->ref[0]; next_pos=ref_pos; DBUG_EXECUTE_IF("show_explain_in_find_all_keys", @@ -742,7 +737,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, next_pos=(uchar*) 0; /* Find records in sequence */ DBUG_EXECUTE_IF("bug14365043_1", DBUG_SET("+d,ha_rnd_init_fail");); - if (file->ha_rnd_init_with_error(1)) + if (unlikely(file->ha_rnd_init_with_error(1))) DBUG_RETURN(HA_POS_ERROR); file->extra_opt(HA_EXTRA_CACHE, thd->variables.read_buff_size); } @@ -778,29 +773,15 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, for (;;) { if (quick_select) - { - if ((error= select->quick->get_next())) - break; - file->position(sort_form->record[0]); - DBUG_EXECUTE_IF("debug_filesort", dbug_print_record(sort_form, TRUE);); - } + error= select->quick->get_next(); else /* Not quick-select */ - { - { - error= file->ha_rnd_next(sort_form->record[0]); - if (!flag) - { - my_store_ptr(ref_pos,ref_length,record); // Position to row - record+= sort_form->s->db_record_offset; - } - else if (!error) - file->position(sort_form->record[0]); - } - if (error && error != HA_ERR_RECORD_DELETED) - break; - } + error= file->ha_rnd_next(sort_form->record[0]); + if (unlikely(error)) + break; + file->position(sort_form->record[0]); + DBUG_EXECUTE_IF("debug_filesort", dbug_print_record(sort_form, TRUE);); - if (thd->check_killed()) + if (unlikely(thd->check_killed())) { DBUG_PRINT("info",("Sort killed by user")); if (!quick_select) @@ -812,7 +793,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, } bool write_record= false; - if (error == 0) + if (likely(error == 0)) { param->examined_rows++; if (select && select->cond) @@ -865,7 +846,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, } /* It does not make sense to read more keys in case of a fatal error */ - if (thd->is_error()) + if (unlikely(thd->is_error())) break; /* @@ -885,11 +866,11 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, /* Signal we should use orignal column read and write maps */ sort_form->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set); - if (thd->is_error()) + if (unlikely(thd->is_error())) DBUG_RETURN(HA_POS_ERROR); DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos)); - if (error != HA_ERR_END_OF_FILE) + if (unlikely(error != HA_ERR_END_OF_FILE)) { file->print_error(error,MYF(ME_ERROR | ME_WAITTANG)); // purecov: inspected DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */ @@ -1166,12 +1147,11 @@ Type_handler_real_result::make_sort_key(uchar *to, Item *item, /** Make a sort-key from record. */ -static void make_sortkey(register Sort_param *param, - register uchar *to, uchar *ref_pos) +static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos) { - reg3 Field *field; - reg1 SORT_FIELD *sort_field; - reg5 uint length; + Field *field; + SORT_FIELD *sort_field; + uint length; for (sort_field=param->local_sortorder ; sort_field != param->end ; @@ -1261,7 +1241,7 @@ static void make_sortkey(register Sort_param *param, static void register_used_fields(Sort_param *param) { - reg1 SORT_FIELD *sort_field; + SORT_FIELD *sort_field; TABLE *table=param->sort_form; for (sort_field= param->local_sortorder ; @@ -1460,7 +1440,7 @@ static bool check_if_pq_applicable(Sort_param *param, int merge_many_buff(Sort_param *param, uchar *sort_buffer, BUFFPEK *buffpek, uint *maxbuffer, IO_CACHE *t_file) { - register uint i; + uint i; IO_CACHE t_file2,*from_file,*to_file,*temp; BUFFPEK *lastbuff; DBUG_ENTER("merge_many_buff"); @@ -1508,27 +1488,28 @@ cleanup: /** Read data to buffer. - @retval - (uint)-1 if something goes wrong + @retval Number of bytes read + (ulong)-1 if something goes wrong */ -uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, - uint rec_length) +ulong read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, + uint rec_length) { - register uint count; - uint length; + ulong count; + ulong length= 0; - if ((count=(uint) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count))) + if ((count= (ulong) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count))) { - if (my_b_pread(fromfile, (uchar*) buffpek->base, - (length= rec_length*count), buffpek->file_pos)) - return ((uint) -1); + length= rec_length*count; + if (unlikely(my_b_pread(fromfile, (uchar*) buffpek->base, length, + buffpek->file_pos))) + return ((ulong) -1); buffpek->key=buffpek->base; buffpek->file_pos+= length; /* New filepos */ buffpek->count-= count; buffpek->mem_count= count; } - return (count*rec_length); + return (length); } /* read_to_buffer */ @@ -1582,18 +1563,18 @@ void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length) @retval 0 OK @retval - other error + 1 ERROR */ -int merge_buffers(Sort_param *param, IO_CACHE *from_file, - IO_CACHE *to_file, uchar *sort_buffer, - BUFFPEK *lastbuff, BUFFPEK *Fb, BUFFPEK *Tb, - int flag) +bool merge_buffers(Sort_param *param, IO_CACHE *from_file, + IO_CACHE *to_file, uchar *sort_buffer, + BUFFPEK *lastbuff, BUFFPEK *Fb, BUFFPEK *Tb, + int flag) { - int error; + bool error= 0; uint rec_length,res_length,offset; size_t sort_length; - ulong maxcount; + ulong maxcount, bytes_read; ha_rows max_rows,org_max_rows; my_off_t to_start_filepos; uchar *strpos; @@ -1611,7 +1592,6 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file, thd->inc_status_sort_merge_passes(); thd->query_plan_fsort_passes++; - error=0; rec_length= param->rec_length; res_length= param->res_length; sort_length= param->sort_length; @@ -1639,18 +1619,18 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file, cmp= get_ptr_compare(sort_length); first_cmp_arg= (void*) &sort_length; } - if (init_queue(&queue, (uint) (Tb-Fb)+1, offsetof(BUFFPEK,key), 0, - (queue_compare) cmp, first_cmp_arg, 0, 0)) + if (unlikely(init_queue(&queue, (uint) (Tb-Fb)+1, offsetof(BUFFPEK,key), 0, + (queue_compare) cmp, first_cmp_arg, 0, 0))) DBUG_RETURN(1); /* purecov: inspected */ for (buffpek= Fb ; buffpek <= Tb ; buffpek++) { buffpek->base= strpos; buffpek->max_keys= maxcount; - strpos+= - (uint) (error= (int) read_to_buffer(from_file, buffpek, rec_length)); - - if (error == -1) + bytes_read= read_to_buffer(from_file, buffpek, rec_length); + if (unlikely(bytes_read == (ulong) -1)) goto err; /* purecov: inspected */ + + strpos+= bytes_read; buffpek->max_keys= buffpek->mem_count; // If less data in buffers than expected queue_insert(&queue, (uchar*) buffpek); } @@ -1670,13 +1650,13 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file, buffpek->key+= rec_length; if (! --buffpek->mem_count) { - if (!(error= (int) read_to_buffer(from_file, buffpek, - rec_length))) + if (unlikely(!(bytes_read= read_to_buffer(from_file, buffpek, + rec_length)))) { (void) queue_remove_top(&queue); reuse_freed_buff(&queue, buffpek, rec_length); } - else if (error == -1) + else if (unlikely(bytes_read == (ulong) -1)) goto err; /* purecov: inspected */ } queue_replace_top(&queue); // Top element has been used @@ -1686,10 +1666,9 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file, while (queue.elements > 1) { - if (killable && thd->check_killed()) - { - error= 1; goto err; /* purecov: inspected */ - } + if (killable && unlikely(thd->check_killed())) + goto err; /* purecov: inspected */ + for (;;) { buffpek= (BUFFPEK*) queue_top(&queue); @@ -1726,9 +1705,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file, if (!check_dupl_count || dupl_count >= min_dupl_count) { if (my_b_write(to_file, src+wr_offset, wr_len)) - { - error=1; goto err; /* purecov: inspected */ - } + goto err; /* purecov: inspected */ } if (cmp) { @@ -1739,7 +1716,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file, } if (!--max_rows) { - error= 0; /* purecov: inspected */ + /* Nothing more to do */ goto end; /* purecov: inspected */ } @@ -1747,14 +1724,14 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file, buffpek->key+= rec_length; if (! --buffpek->mem_count) { - if (!(error= (int) read_to_buffer(from_file, buffpek, - rec_length))) + if (unlikely(!(bytes_read= read_to_buffer(from_file, buffpek, + rec_length)))) { (void) queue_remove_top(&queue); reuse_freed_buff(&queue, buffpek, rec_length); break; /* One buffer have been removed */ } - else if (error == -1) + else if (unlikely(bytes_read == (ulong) -1)) goto err; /* purecov: inspected */ } queue_replace_top(&queue); /* Top element has been replaced */ @@ -1790,14 +1767,9 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file, { src= unique_buff; if (my_b_write(to_file, src+wr_offset, wr_len)) - { - error=1; goto err; /* purecov: inspected */ - } + goto err; /* purecov: inspected */ if (!--max_rows) - { - error= 0; goto end; - } } } @@ -1813,13 +1785,11 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file, { if (my_b_write(to_file, (uchar*) buffpek->key, (size_t)(rec_length*buffpek->mem_count))) - { - error= 1; goto err; /* purecov: inspected */ - } + goto err; /* purecov: inspected */ } else { - register uchar *end; + uchar *end; src= buffpek->key+offset; for (end= src+buffpek->mem_count*rec_length ; src != end ; @@ -1832,21 +1802,26 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file, continue; } if (my_b_write(to_file, src, wr_len)) - { - error=1; goto err; - } + goto err; } } } - while ((error=(int) read_to_buffer(from_file, buffpek, rec_length)) - != -1 && error != 0); + while (likely(!(error= + (bytes_read= read_to_buffer(from_file, buffpek, + rec_length)) == (ulong) -1)) && + bytes_read != 0); end: lastbuff->count= MY_MIN(org_max_rows-max_rows, param->max_rows); lastbuff->file_pos= to_start_filepos; -err: +cleanup: delete_queue(&queue); DBUG_RETURN(error); + +err: + error= 1; + goto cleanup; + } /* merge_buffers */ diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc index b2bc1aee091..9241dd79113 100644 --- a/sql/gen_lex_hash.cc +++ b/sql/gen_lex_hash.cc @@ -78,6 +78,7 @@ So, we can read full search-structure as 32-bit word */ #define NO_YACC_SYMBOLS +#undef CHECK_UNLIKELY #include "mariadb.h" #include "mysql_version.h" #include "lex.h" @@ -140,7 +141,8 @@ void insert_into_hash(hash_lex_struct *root, const char *name, if (root->first_char>(*name)) { size_t new_size= root->last_char-(*name)+1; - if (new_sizechar_tails; tails= (hash_lex_struct*)realloc((char*)tails, sizeof(hash_lex_struct)*new_size); @@ -155,7 +157,8 @@ void insert_into_hash(hash_lex_struct *root, const char *name, if (root->last_char<(*name)) { size_t new_size= (*name)-root->first_char+1; - if (new_sizechar_tails; tails= (hash_lex_struct*)realloc((char*)tails, sizeof(hash_lex_struct)*new_size); @@ -401,8 +404,8 @@ int main(int argc,char **argv) static SYMBOL *get_hash_symbol(const char *s,\n\ unsigned int len,bool function)\n\ {\n\ - register uchar *hash_map;\n\ - register const char *cur_str= s;\n\ + uchar *hash_map;\n\ + const char *cur_str= s;\n\ \n\ if (len == 0) {\n\ DBUG_PRINT(\"warning\", (\"get_hash_symbol() received a request for a zero-length symbol, which is probably a mistake.\"));\ @@ -414,25 +417,25 @@ static SYMBOL *get_hash_symbol(const char *s,\n\ if (function){\n\ if (len>sql_functions_max_len) return 0;\n\ hash_map= sql_functions_map;\n\ - register uint32 cur_struct= uint4korr(hash_map+((len-1)*4));\n\ + uint32 cur_struct= uint4korr(hash_map+((len-1)*4));\n\ \n\ for (;;){\n\ - register uchar first_char= (uchar)cur_struct;\n\ + uchar first_char= (uchar)cur_struct;\n\ \n\ if (first_char == 0)\n\ {\n\ - register int16 ires= (int16)(cur_struct>>16);\n\ + int16 ires= (int16)(cur_struct>>16);\n\ if (ires==array_elements(symbols)) return 0;\n\ - register SYMBOL *res;\n\ + SYMBOL *res;\n\ if (ires>=0) \n\ res= symbols+ires;\n\ else\n\ res= sql_functions-ires-1;\n\ - register uint count= (uint) (cur_str - s);\n\ + uint count= (uint) (cur_str - s);\n\ return lex_casecmp(cur_str,res->name+count,len-count) ? 0 : res;\n\ }\n\ \n\ - register uchar cur_char= (uchar)to_upper_lex[(uchar)*cur_str];\n\ + uchar cur_char= (uchar)to_upper_lex[(uchar)*cur_str];\n\ if (cur_char>=8;\n\ if (cur_char>(uchar)cur_struct) return 0;\n\ @@ -448,20 +451,20 @@ static SYMBOL *get_hash_symbol(const char *s,\n\ }else{\n\ if (len>symbols_max_len) return 0;\n\ hash_map= symbols_map;\n\ - register uint32 cur_struct= uint4korr(hash_map+((len-1)*4));\n\ + uint32 cur_struct= uint4korr(hash_map+((len-1)*4));\n\ \n\ for (;;){\n\ - register uchar first_char= (uchar)cur_struct;\n\ + uchar first_char= (uchar)cur_struct;\n\ \n\ - if (first_char==0){\n\ - register int16 ires= (int16)(cur_struct>>16);\n\ + if (first_char==0) {\n\ + int16 ires= (int16)(cur_struct>>16);\n\ if (ires==array_elements(symbols)) return 0;\n\ - register SYMBOL *res= symbols+ires;\n\ - register uint count= (uint) (cur_str - s);\n\ + SYMBOL *res= symbols+ires;\n\ + uint count= (uint) (cur_str - s);\n\ return lex_casecmp(cur_str,res->name+count,len-count)!=0 ? 0 : res;\n\ }\n\ \n\ - register uchar cur_char= (uchar)to_upper_lex[(uchar)*cur_str];\n\ + uchar cur_char= (uchar)to_upper_lex[(uchar)*cur_str];\n\ if (cur_char>=8;\n\ if (cur_char>(uchar)cur_struct) return 0;\n\ diff --git a/sql/group_by_handler.cc b/sql/group_by_handler.cc index e75800d8986..f18758a2d94 100644 --- a/sql/group_by_handler.cc +++ b/sql/group_by_handler.cc @@ -63,9 +63,8 @@ int Pushdown_query::execute(JOIN *join) while (!(err= handler->next_row())) { - if (thd->check_killed()) + if (unlikely(thd->check_killed())) { - thd->send_kill_message(); handler->end_scan(); DBUG_RETURN(-1); } @@ -78,7 +77,7 @@ int Pushdown_query::execute(JOIN *join) if ((err= table->file->ha_write_tmp_row(table->record[0]))) { bool is_duplicate; - if (!table->file->is_fatal_error(err, HA_CHECK_DUP)) + if (likely(!table->file->is_fatal_error(err, HA_CHECK_DUP))) continue; // Distinct elimination if (create_internal_tmp_table_from_heap(thd, table, @@ -98,7 +97,7 @@ int Pushdown_query::execute(JOIN *join) { int error; /* result < 0 if row was not accepted and should not be counted */ - if ((error= join->result->send_data(*join->fields))) + if (unlikely((error= join->result->send_data(*join->fields)))) { handler->end_scan(); DBUG_RETURN(error < 0 ? 0 : -1); diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 30d4b338147..0775d67a592 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -738,12 +738,16 @@ int ha_partition::create(const char *name, TABLE *table_arg, for (j= 0; j < m_part_info->num_subparts; j++) { part_elem= sub_it++; - if ((error= create_partition_name(name_buff, sizeof(name_buff), path, - name_buffer_ptr, NORMAL_PART_NAME, FALSE))) + if (unlikely((error= create_partition_name(name_buff, + sizeof(name_buff), path, + name_buffer_ptr, + NORMAL_PART_NAME, FALSE)))) goto create_error; - if ((error= set_up_table_before_create(table_arg, name_buff, - create_info, part_elem)) || - ((error= (*file)->ha_create(name_buff, table_arg, create_info)))) + if (unlikely((error= set_up_table_before_create(table_arg, name_buff, + create_info, + part_elem)) || + ((error= (*file)->ha_create(name_buff, table_arg, + create_info))))) goto create_error; name_buffer_ptr= strend(name_buffer_ptr) + 1; @@ -752,12 +756,15 @@ int ha_partition::create(const char *name, TABLE *table_arg, } else { - if ((error= create_partition_name(name_buff, sizeof(name_buff), path, - name_buffer_ptr, NORMAL_PART_NAME, FALSE))) + if (unlikely((error= create_partition_name(name_buff, sizeof(name_buff), + path, name_buffer_ptr, + NORMAL_PART_NAME, FALSE)))) goto create_error; - if ((error= set_up_table_before_create(table_arg, name_buff, - create_info, part_elem)) || - ((error= (*file)->ha_create(name_buff, table_arg, create_info)))) + if (unlikely((error= set_up_table_before_create(table_arg, name_buff, + create_info, + part_elem)) || + ((error= (*file)->ha_create(name_buff, table_arg, + create_info))))) goto create_error; name_buffer_ptr= strend(name_buffer_ptr) + 1; @@ -832,16 +839,19 @@ int ha_partition::drop_partitions(const char *path) { partition_element *sub_elem= sub_it++; part= i * num_subparts + j; - if ((ret_error= create_subpartition_name(part_name_buff, - sizeof(part_name_buff), path, - part_elem->partition_name, - sub_elem->partition_name, name_variant))) + if (unlikely((ret_error= + create_subpartition_name(part_name_buff, + sizeof(part_name_buff), path, + part_elem->partition_name, + sub_elem->partition_name, + name_variant)))) error= ret_error; file= m_file[part]; DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff)); - if ((ret_error= file->ha_delete_table(part_name_buff))) + if (unlikely((ret_error= file->ha_delete_table(part_name_buff)))) error= ret_error; - if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos)) + if (unlikely(deactivate_ddl_log_entry(sub_elem->log_entry-> + entry_pos))) error= 1; } while (++j < num_subparts); } @@ -855,9 +865,10 @@ int ha_partition::drop_partitions(const char *path) { file= m_file[i]; DBUG_PRINT("info", ("Drop partition %s", part_name_buff)); - if ((ret_error= file->ha_delete_table(part_name_buff))) + if (unlikely((ret_error= file->ha_delete_table(part_name_buff)))) error= ret_error; - if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos)) + if (unlikely(deactivate_ddl_log_entry(part_elem->log_entry-> + entry_pos))) error= 1; } } @@ -938,15 +949,18 @@ int ha_partition::rename_partitions(const char *path) { sub_elem= sub_it++; file= m_reorged_file[part_count++]; - if ((ret_error= create_subpartition_name(norm_name_buff, - sizeof(norm_name_buff), path, - part_elem->partition_name, - sub_elem->partition_name, NORMAL_PART_NAME))) + if (unlikely((ret_error= + create_subpartition_name(norm_name_buff, + sizeof(norm_name_buff), path, + part_elem->partition_name, + sub_elem->partition_name, + NORMAL_PART_NAME)))) error= ret_error; DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff)); - if ((ret_error= file->ha_delete_table(norm_name_buff))) + if (unlikely((ret_error= file->ha_delete_table(norm_name_buff)))) error= ret_error; - else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos)) + else if (unlikely(deactivate_ddl_log_entry(sub_elem->log_entry-> + entry_pos))) error= 1; else sub_elem->log_entry= NULL; /* Indicate success */ @@ -955,16 +969,19 @@ int ha_partition::rename_partitions(const char *path) else { file= m_reorged_file[part_count++]; - if ((ret_error= create_partition_name(norm_name_buff, - sizeof(norm_name_buff), path, - part_elem->partition_name, NORMAL_PART_NAME, TRUE))) + if (unlikely((ret_error= + create_partition_name(norm_name_buff, + sizeof(norm_name_buff), path, + part_elem->partition_name, + NORMAL_PART_NAME, TRUE)))) error= ret_error; else { DBUG_PRINT("info", ("Delete partition %s", norm_name_buff)); - if ((ret_error= file->ha_delete_table(norm_name_buff))) + if (unlikely((ret_error= file->ha_delete_table(norm_name_buff)))) error= ret_error; - else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos)) + else if (unlikely(deactivate_ddl_log_entry(part_elem->log_entry-> + entry_pos))) error= 1; else part_elem->log_entry= NULL; /* Indicate success */ @@ -1011,33 +1028,39 @@ int ha_partition::rename_partitions(const char *path) { sub_elem= sub_it++; part= i * num_subparts + j; - if ((ret_error= create_subpartition_name(norm_name_buff, - sizeof(norm_name_buff), path, - part_elem->partition_name, - sub_elem->partition_name, NORMAL_PART_NAME))) + if (unlikely((ret_error= + create_subpartition_name(norm_name_buff, + sizeof(norm_name_buff), path, + part_elem->partition_name, + sub_elem->partition_name, + NORMAL_PART_NAME)))) error= ret_error; if (part_elem->part_state == PART_IS_CHANGED) { file= m_reorged_file[part_count++]; DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff)); - if ((ret_error= file->ha_delete_table(norm_name_buff))) + if (unlikely((ret_error= file->ha_delete_table(norm_name_buff)))) error= ret_error; - else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos)) + else if (unlikely(deactivate_ddl_log_entry(sub_elem->log_entry-> + entry_pos))) error= 1; (void) sync_ddl_log(); } file= m_new_file[part]; - if ((ret_error= create_subpartition_name(part_name_buff, - sizeof(part_name_buff), path, - part_elem->partition_name, - sub_elem->partition_name, TEMP_PART_NAME))) + if (unlikely((ret_error= + create_subpartition_name(part_name_buff, + sizeof(part_name_buff), path, + part_elem->partition_name, + sub_elem->partition_name, + TEMP_PART_NAME)))) error= ret_error; DBUG_PRINT("info", ("Rename subpartition from %s to %s", part_name_buff, norm_name_buff)); - if ((ret_error= file->ha_rename_table(part_name_buff, - norm_name_buff))) + if (unlikely((ret_error= file->ha_rename_table(part_name_buff, + norm_name_buff)))) error= ret_error; - else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos)) + else if (unlikely(deactivate_ddl_log_entry(sub_elem->log_entry-> + entry_pos))) error= 1; else sub_elem->log_entry= NULL; @@ -1045,12 +1068,17 @@ int ha_partition::rename_partitions(const char *path) } else { - if ((ret_error= create_partition_name(norm_name_buff, - sizeof(norm_name_buff), path, - part_elem->partition_name, NORMAL_PART_NAME, TRUE)) || - (ret_error= create_partition_name(part_name_buff, - sizeof(part_name_buff), path, - part_elem->partition_name, TEMP_PART_NAME, TRUE))) + if (unlikely((ret_error= + create_partition_name(norm_name_buff, + sizeof(norm_name_buff), path, + part_elem->partition_name, + NORMAL_PART_NAME, TRUE)) || + (ret_error= create_partition_name(part_name_buff, + sizeof(part_name_buff), + path, + part_elem-> + partition_name, + TEMP_PART_NAME, TRUE)))) error= ret_error; else { @@ -1058,19 +1086,21 @@ int ha_partition::rename_partitions(const char *path) { file= m_reorged_file[part_count++]; DBUG_PRINT("info", ("Delete partition %s", norm_name_buff)); - if ((ret_error= file->ha_delete_table(norm_name_buff))) + if (unlikely((ret_error= file->ha_delete_table(norm_name_buff)))) error= ret_error; - else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos)) + else if (unlikely(deactivate_ddl_log_entry(part_elem->log_entry-> + entry_pos))) error= 1; (void) sync_ddl_log(); } file= m_new_file[i]; DBUG_PRINT("info", ("Rename partition from %s to %s", part_name_buff, norm_name_buff)); - if ((ret_error= file->ha_rename_table(part_name_buff, - norm_name_buff))) + if (unlikely((ret_error= file->ha_rename_table(part_name_buff, + norm_name_buff)))) error= ret_error; - else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos)) + else if (unlikely(deactivate_ddl_log_entry(part_elem->log_entry-> + entry_pos))) error= 1; else part_elem->log_entry= NULL; @@ -1394,7 +1424,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, part= i * num_subparts + j; DBUG_PRINT("info", ("Optimize subpartition %u (%s)", part, sub_elem->partition_name)); - if ((error= handle_opt_part(thd, check_opt, part, flag))) + if (unlikely((error= handle_opt_part(thd, check_opt, part, flag)))) { /* print a line which partition the error belongs to */ if (error != HA_ADMIN_NOT_IMPLEMENTED && @@ -1421,7 +1451,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, { DBUG_PRINT("info", ("Optimize partition %u (%s)", i, part_elem->partition_name)); - if ((error= handle_opt_part(thd, check_opt, i, flag))) + if (unlikely((error= handle_opt_part(thd, check_opt, i, flag)))) { /* print a line which partition the error belongs to */ if (error != HA_ADMIN_NOT_IMPLEMENTED && @@ -1554,7 +1584,8 @@ int ha_partition::prepare_new_partition(TABLE *tbl, truncate_partition_filename((char*) p_elem->data_file_name); truncate_partition_filename((char*) p_elem->index_file_name); - if ((error= set_up_table_before_create(tbl, part_name, create_info, p_elem))) + if (unlikely((error= set_up_table_before_create(tbl, part_name, create_info, + p_elem)))) goto error_create; if (!(file->ht->flags & HTON_CAN_READ_CONNECT_STRING_IN_PARTITION)) @@ -1573,8 +1604,8 @@ int ha_partition::prepare_new_partition(TABLE *tbl, goto error_create; } DBUG_PRINT("info", ("partition %s created", part_name)); - if ((error= file->ha_open(tbl, part_name, m_mode, - m_open_test_lock | HA_OPEN_NO_PSI_CALL))) + if (unlikely((error= file->ha_open(tbl, part_name, m_mode, + m_open_test_lock | HA_OPEN_NO_PSI_CALL)))) goto error_open; DBUG_PRINT("info", ("partition %s opened", part_name)); @@ -1584,7 +1615,7 @@ int ha_partition::prepare_new_partition(TABLE *tbl, assumes that external_lock() is last call that may fail here. Otherwise see description for cleanup_new_partition(). */ - if ((error= file->ha_external_lock(ha_thd(), F_WRLCK))) + if (unlikely((error= file->ha_external_lock(ha_thd(), F_WRLCK)))) goto error_external_lock; DBUG_PRINT("info", ("partition %s external locked", part_name)); @@ -1738,7 +1769,6 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, !(m_reorged_file= (handler**) thd->calloc(sizeof(handler*)* (m_reorged_parts + 1)))) { - mem_alloc_error(sizeof(handler*)*(m_reorged_parts+1)); DBUG_RETURN(HA_ERR_OUT_OF_MEM); } @@ -1771,7 +1801,6 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, thd->calloc(sizeof(handler*)* (2*(num_remain_partitions + 1)))))) { - mem_alloc_error(sizeof(handler*)*2*(num_remain_partitions+1)); DBUG_RETURN(HA_ERR_OUT_OF_MEM); } m_added_file= &new_file_array[num_remain_partitions + 1]; @@ -1863,7 +1892,6 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, thd->mem_root, part_elem->engine_type))) { - mem_alloc_error(sizeof(handler)); DBUG_RETURN(HA_ERR_OUT_OF_MEM); } if ((*new_file)->set_ha_share_ref(&p_share_refs->ha_shares[j])) @@ -1920,21 +1948,24 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, do { partition_element *sub_elem= sub_it++; - if ((error= create_subpartition_name(part_name_buff, - sizeof(part_name_buff), path, - part_elem->partition_name, sub_elem->partition_name, - name_variant))) + if (unlikely((error= + create_subpartition_name(part_name_buff, + sizeof(part_name_buff), path, + part_elem->partition_name, + sub_elem->partition_name, + name_variant)))) { cleanup_new_partition(part_count); DBUG_RETURN(error); } part= i * num_subparts + j; DBUG_PRINT("info", ("Add subpartition %s", part_name_buff)); - if ((error= prepare_new_partition(table, create_info, - new_file_array[part], - (const char *)part_name_buff, - sub_elem, - disable_non_uniq_indexes))) + if (unlikely((error= + prepare_new_partition(table, create_info, + new_file_array[part], + (const char *)part_name_buff, + sub_elem, + disable_non_uniq_indexes)))) { cleanup_new_partition(part_count); DBUG_RETURN(error); @@ -1945,20 +1976,23 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, } else { - if ((error= create_partition_name(part_name_buff, - sizeof(part_name_buff), path, part_elem->partition_name, - name_variant, TRUE))) + if (unlikely((error= + create_partition_name(part_name_buff, + sizeof(part_name_buff), path, + part_elem->partition_name, + name_variant, TRUE)))) { cleanup_new_partition(part_count); DBUG_RETURN(error); } DBUG_PRINT("info", ("Add partition %s", part_name_buff)); - if ((error= prepare_new_partition(table, create_info, - new_file_array[i], - (const char *)part_name_buff, - part_elem, - disable_non_uniq_indexes))) + if (unlikely((error= + prepare_new_partition(table, create_info, + new_file_array[i], + (const char *)part_name_buff, + part_elem, + disable_non_uniq_indexes)))) { cleanup_new_partition(part_count); DBUG_RETURN(error); @@ -1992,7 +2026,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, part_elem->part_state= PART_TO_BE_DROPPED; } m_new_file= new_file_array; - if ((error= copy_partitions(copied, deleted))) + if (unlikely((error= copy_partitions(copied, deleted)))) { /* Close and unlock the new temporary partitions. @@ -2049,14 +2083,12 @@ int ha_partition::copy_partitions(ulonglong * const copied, uint32 new_part; late_extra_cache(reorg_part); - if ((result= file->ha_rnd_init_with_error(1))) + if (unlikely((result= file->ha_rnd_init_with_error(1)))) goto init_error; while (TRUE) { if ((result= file->ha_rnd_next(m_rec0))) { - if (result == HA_ERR_RECORD_DELETED) - continue; //Probably MyISAM if (result != HA_ERR_END_OF_FILE) goto error; /* @@ -2331,7 +2363,7 @@ uint ha_partition::del_ren_table(const char *from, const char *to) Delete table, start by delete the .par file. If error, break, otherwise delete as much as possible. */ - if ((error= handler::delete_table(from))) + if (unlikely((error= handler::delete_table(from)))) DBUG_RETURN(error); } /* @@ -2347,17 +2379,19 @@ uint ha_partition::del_ren_table(const char *from, const char *to) i= 0; do { - if ((error= create_partition_name(from_buff, sizeof(from_buff), from_path, - name_buffer_ptr, NORMAL_PART_NAME, FALSE))) + if (unlikely((error= create_partition_name(from_buff, sizeof(from_buff), + from_path, name_buffer_ptr, + NORMAL_PART_NAME, FALSE)))) goto rename_error; if (to != NULL) { // Rename branch - if ((error= create_partition_name(to_buff, sizeof(to_buff), to_path, - name_buffer_ptr, NORMAL_PART_NAME, FALSE))) + if (unlikely((error= create_partition_name(to_buff, sizeof(to_buff), + to_path, name_buffer_ptr, + NORMAL_PART_NAME, FALSE)))) goto rename_error; error= (*file)->ha_rename_table(from_buff, to_buff); - if (error) + if (unlikely(error)) goto rename_error; } else // delete branch @@ -2365,13 +2399,13 @@ uint ha_partition::del_ren_table(const char *from, const char *to) error= (*file)->ha_delete_table(from_buff); } name_buffer_ptr= strend(name_buffer_ptr) + 1; - if (error) + if (unlikely(error)) save_error= error; i++; } while (*(++file)); if (to != NULL) { - if ((error= handler::rename_table(from, to))) + if (unlikely((error= handler::rename_table(from, to)))) { /* Try to revert everything, ignore errors */ (void) handler::rename_table(to, from); @@ -2883,10 +2917,8 @@ bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root) DBUG_ENTER("ha_partition::new_handlers_from_part_info"); if (!(m_file= (handler **) alloc_root(mem_root, alloc_len))) - { - mem_alloc_error(alloc_len); - goto error_end; - } + goto error; + m_file_tot_parts= m_tot_parts; bzero((char*) m_file, alloc_len); DBUG_ASSERT(m_part_info->num_parts > 0); @@ -2927,8 +2959,6 @@ bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root) } DBUG_RETURN(FALSE); error: - mem_alloc_error(sizeof(handler)); -error_end: DBUG_RETURN(TRUE); } @@ -3486,7 +3516,8 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) if (init_partition_bitmaps()) goto err_alloc; - if ((error= m_part_info->set_partition_bitmaps(m_partitions_to_open))) + if (unlikely((error= + m_part_info->set_partition_bitmaps(m_partitions_to_open)))) goto err_alloc; /* Allocate memory used with MMR */ @@ -3535,8 +3566,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) if (!bitmap_is_set(&m_is_clone_of->m_opened_partitions, i)) continue; - if ((error= create_partition_name(name_buff, sizeof(name_buff), name, - name_buffer_ptr, NORMAL_PART_NAME, FALSE))) + if (unlikely((error= create_partition_name(name_buff, sizeof(name_buff), + name, name_buffer_ptr, + NORMAL_PART_NAME, FALSE)))) goto err_handler; /* ::clone() will also set ha_share from the original. */ if (!(m_file[i]= file[i]->clone(name_buff, m_clone_mem_root))) @@ -3553,7 +3585,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) } else { - if ((error= open_read_partitions(name_buff, sizeof(name_buff)))) + if (unlikely((error= open_read_partitions(name_buff, sizeof(name_buff))))) goto err_handler; m_num_locks= m_file_sample->lock_count(); } @@ -3886,7 +3918,7 @@ int ha_partition::external_lock(THD *thd, int lock_type) i= bitmap_get_next_set(used_partitions, i)) { DBUG_PRINT("info", ("external_lock(thd, %d) part %u", lock_type, i)); - if ((error= m_file[i]->ha_external_lock(thd, lock_type))) + if (unlikely((error= m_file[i]->ha_external_lock(thd, lock_type)))) { if (lock_type != F_UNLCK) goto err_handler; @@ -4050,7 +4082,7 @@ int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type) i < m_tot_parts; i= bitmap_get_next_set(&m_part_info->lock_partitions, i)) { - if ((error= m_file[i]->start_stmt(thd, lock_type))) + if (unlikely((error= m_file[i]->start_stmt(thd, lock_type)))) break; /* Add partition to be called in reset(). */ bitmap_set_bit(&m_partitions_to_reset, i); @@ -4235,7 +4267,7 @@ int ha_partition::write_row(uchar * buf) it is highly likely that we will not be able to insert it into the correct partition. We must check and fail if neccessary. */ - if (error) + if (unlikely(error)) goto exit; /* @@ -4346,9 +4378,10 @@ int ha_partition::update_row(const uchar *old_data, const uchar *new_data) DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), old_part_id)); #endif - if ((error= get_part_for_buf(new_data, m_rec0, m_part_info, &new_part_id))) + if (unlikely((error= get_part_for_buf(new_data, m_rec0, m_part_info, + &new_part_id)))) goto exit; - if (!bitmap_is_set(&(m_part_info->lock_partitions), new_part_id)) + if (unlikely(!bitmap_is_set(&(m_part_info->lock_partitions), new_part_id))) { error= HA_ERR_NOT_IN_LOCK_PARTITIONS; goto exit; @@ -4385,13 +4418,13 @@ int ha_partition::update_row(const uchar *old_data, const uchar *new_data) error= m_file[new_part_id]->ha_write_row((uchar*) new_data); reenable_binlog(thd); table->next_number_field= saved_next_number_field; - if (error) + if (unlikely(error)) goto exit; tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */ error= m_file[old_part_id]->ha_delete_row(old_data); reenable_binlog(thd); - if (error) + if (unlikely(error)) goto exit; } @@ -4481,7 +4514,7 @@ int ha_partition::delete_row(const uchar *buf) or last historical partition, but DELETE HISTORY can delete from any historical partition. So, skip the check in this case. */ - if (!thd->lex->vers_conditions) // if not DELETE HISTORY + if (!thd->lex->vers_conditions.is_set()) // if not DELETE HISTORY { uint32 part_id; error= get_part_for_buf(buf, m_rec0, m_part_info, &part_id); @@ -4535,7 +4568,7 @@ int ha_partition::delete_all_rows() i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { /* Can be pruned, like DELETE FROM t PARTITION (pX) */ - if ((error= m_file[i]->ha_delete_all_rows())) + if (unlikely((error= m_file[i]->ha_delete_all_rows()))) DBUG_RETURN(error); } DBUG_RETURN(0); @@ -4567,7 +4600,7 @@ int ha_partition::truncate() file= m_file; do { - if ((error= (*file)->ha_truncate())) + if (unlikely((error= (*file)->ha_truncate()))) DBUG_RETURN(error); } while (*(++file)); DBUG_RETURN(0); @@ -4625,7 +4658,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt) part= i * num_subparts + j; DBUG_PRINT("info", ("truncate subpartition %u (%s)", part, sub_elem->partition_name)); - if ((error= m_file[part]->ha_truncate())) + if (unlikely((error= m_file[part]->ha_truncate()))) break; sub_elem->part_state= PART_NORMAL; } while (++j < num_subparts); @@ -4901,7 +4934,7 @@ int ha_partition::rnd_init(bool scan) i < m_tot_parts; i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { - if ((error= m_file[i]->ha_rnd_init(scan))) + if (unlikely((error= m_file[i]->ha_rnd_init(scan)))) goto err; } @@ -5035,9 +5068,6 @@ int ha_partition::rnd_next(uchar *buf) /* if we get here, then the current partition ha_rnd_next returned failure */ - if (result == HA_ERR_RECORD_DELETED) - continue; // Probably MyISAM - if (result != HA_ERR_END_OF_FILE) goto end_dont_reset_start_part; // Return error @@ -5365,7 +5395,7 @@ int ha_partition::index_init(uint inx, bool sorted) i < m_tot_parts; i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { - if ((error= m_file[i]->ha_index_init(inx, sorted))) + if (unlikely((error= m_file[i]->ha_index_init(inx, sorted)))) goto err; DBUG_EXECUTE_IF("ha_partition_fail_index_init", { @@ -5375,7 +5405,7 @@ int ha_partition::index_init(uint inx, bool sorted) }); } err: - if (error) + if (unlikely(error)) { /* End the previously initialized indexes. */ uint j; @@ -5570,7 +5600,7 @@ int ha_partition::common_index_read(uchar *buf, bool have_start_key) m_start_key.keypart_map, m_start_key.flag, key_len)); DBUG_ASSERT(key_len); } - if ((error= partition_scan_set_up(buf, have_start_key))) + if (unlikely((error= partition_scan_set_up(buf, have_start_key)))) { DBUG_RETURN(error); } @@ -5596,7 +5626,7 @@ int ha_partition::common_index_read(uchar *buf, bool have_start_key) */ DBUG_PRINT("info", ("doing unordered scan")); error= handle_pre_scan(FALSE, FALSE); - if (!error) + if (likely(!error)) error= handle_unordered_scan_next_partition(buf); } else @@ -5686,12 +5716,12 @@ int ha_partition::common_first_last(uchar *buf) { int error; - if ((error= partition_scan_set_up(buf, FALSE))) + if (unlikely((error= partition_scan_set_up(buf, FALSE)))) return error; if (!m_ordered_scan_ongoing && m_index_scan_type != partition_index_last) { - if ((error= handle_pre_scan(FALSE, check_parallel_search()))) + if (unlikely((error= handle_pre_scan(FALSE, check_parallel_search())))) return error; return handle_unordered_scan_next_partition(buf); } @@ -5739,8 +5769,8 @@ int ha_partition::index_read_idx_map(uchar *buf, uint index, { error= m_file[part]->ha_index_read_idx_map(buf, index, key, keypart_map, find_flag); - if (error != HA_ERR_KEY_NOT_FOUND && - error != HA_ERR_END_OF_FILE) + if (likely(error != HA_ERR_KEY_NOT_FOUND && + error != HA_ERR_END_OF_FILE)) break; } if (part <= m_part_spec.end_part) @@ -6236,9 +6266,9 @@ ha_rows ha_partition::multi_range_read_info_const(uint keyno, save_part_spec= m_part_spec; seq_it= seq->init(seq_init_param, n_ranges, *mrr_mode); - if ((error= multi_range_key_create_key(seq, seq_it))) + if (unlikely((error= multi_range_key_create_key(seq, seq_it)))) { - if (error == HA_ERR_END_OF_FILE) // No keys in range + if (likely(error == HA_ERR_END_OF_FILE)) // No keys in range { rows= 0; goto calc_cost; @@ -6355,7 +6385,7 @@ int ha_partition::multi_range_read_init(RANGE_SEQ_IF *seq, m_seq_if= seq; m_seq= seq->init(seq_init_param, n_ranges, mrr_mode); - if ((error= multi_range_key_create_key(seq, m_seq))) + if (unlikely((error= multi_range_key_create_key(seq, m_seq)))) DBUG_RETURN(0); m_part_seq_if.get_key_info= (seq->get_key_info ? @@ -6406,12 +6436,12 @@ int ha_partition::multi_range_read_init(RANGE_SEQ_IF *seq, else m_mrr_buffer[i]= *buf; - if ((error= (*file)-> - multi_range_read_init(&m_part_seq_if, - &m_partition_part_key_multi_range_hld[i], - m_part_mrr_range_length[i], - mrr_mode, - &m_mrr_buffer[i]))) + if (unlikely((error= (*file)-> + multi_range_read_init(&m_part_seq_if, + &m_partition_part_key_multi_range_hld[i], + m_part_mrr_range_length[i], + mrr_mode, + &m_mrr_buffer[i])))) goto error; m_stock_range_seq[i]= 0; } @@ -6440,25 +6470,28 @@ int ha_partition::multi_range_read_next(range_id_t *range_info) { if (m_multi_range_read_first) { - if ((error= handle_ordered_index_scan(table->record[0], FALSE))) + if (unlikely((error= handle_ordered_index_scan(table->record[0], + FALSE)))) DBUG_RETURN(error); if (!m_pre_calling) m_multi_range_read_first= FALSE; } - else if ((error= handle_ordered_next(table->record[0], eq_range))) + else if (unlikely((error= handle_ordered_next(table->record[0], + eq_range)))) DBUG_RETURN(error); *range_info= m_mrr_range_current->ptr; } else { - if (m_multi_range_read_first) + if (unlikely(m_multi_range_read_first)) { - if ((error= handle_unordered_scan_next_partition(table->record[0]))) + if (unlikely((error= + handle_unordered_scan_next_partition(table->record[0])))) DBUG_RETURN(error); if (!m_pre_calling) m_multi_range_read_first= FALSE; } - else if ((error= handle_unordered_next(table->record[0], FALSE))) + else if (unlikely((error= handle_unordered_next(table->record[0], FALSE)))) DBUG_RETURN(error); *range_info= @@ -6631,7 +6664,7 @@ int ha_partition::ft_init() */ if (m_pre_calling) { - if ((error= pre_ft_end())) + if (unlikely((error= pre_ft_end()))) goto err1; } else @@ -6642,7 +6675,7 @@ int ha_partition::ft_init() if (bitmap_is_set(&(m_part_info->read_partitions), i)) { error= m_pre_calling ? m_file[i]->pre_ft_init() : m_file[i]->ft_init(); - if (error) + if (unlikely(error)) goto err2; } } @@ -6889,9 +6922,6 @@ int ha_partition::ft_read(uchar *buf) /* if we get here, then the current partition ft_next returned failure */ - if (result == HA_ERR_RECORD_DELETED) - continue; // Probably MyISAM - if (result != HA_ERR_END_OF_FILE) goto end_dont_reset_start_part; // Return error @@ -7195,7 +7225,7 @@ int ha_partition::handle_pre_scan(bool reverse_order, bool use_parallel) } if (error == HA_ERR_END_OF_FILE) error= 0; - if (error) + if (unlikely(error)) DBUG_RETURN(error); } table->status= 0; @@ -7250,8 +7280,8 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same) if (m_index_scan_type == partition_read_multi_range) { - if (!(error= file-> - multi_range_read_next(&m_range_info[m_part_spec.start_part]))) + if (likely(!(error= file-> + multi_range_read_next(&m_range_info[m_part_spec.start_part])))) { m_last_part= m_part_spec.start_part; DBUG_RETURN(0); @@ -7259,7 +7289,7 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same) } else if (m_index_scan_type == partition_read_range) { - if (!(error= file->read_range_next())) + if (likely(!(error= file->read_range_next()))) { m_last_part= m_part_spec.start_part; DBUG_RETURN(0); @@ -7267,8 +7297,8 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same) } else if (is_next_same) { - if (!(error= file->ha_index_next_same(buf, m_start_key.key, - m_start_key.length))) + if (likely(!(error= file->ha_index_next_same(buf, m_start_key.key, + m_start_key.length)))) { m_last_part= m_part_spec.start_part; DBUG_RETURN(0); @@ -7276,14 +7306,14 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same) } else { - if (!(error= file->ha_index_next(buf))) + if (likely(!(error= file->ha_index_next(buf)))) { m_last_part= m_part_spec.start_part; DBUG_RETURN(0); // Row was in range } } - if (error == HA_ERR_END_OF_FILE) + if (unlikely(error == HA_ERR_END_OF_FILE)) { m_part_spec.start_part++; // Start using next part error= handle_unordered_scan_next_partition(buf); @@ -7355,12 +7385,13 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf) DBUG_ASSERT(FALSE); DBUG_RETURN(1); } - if (!error) + if (likely(!error)) { m_last_part= i; DBUG_RETURN(0); } - if ((error != HA_ERR_END_OF_FILE) && (error != HA_ERR_KEY_NOT_FOUND)) + if (likely((error != HA_ERR_END_OF_FILE) && + (error != HA_ERR_KEY_NOT_FOUND))) DBUG_RETURN(error); /* @@ -7420,7 +7451,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) error= handle_pre_scan(reverse_order, m_pre_call_use_parallel); else error= handle_pre_scan(reverse_order, check_parallel_search()); - if (error) + if (unlikely(error)) DBUG_RETURN(error); if (m_key_not_found) @@ -7484,7 +7515,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) */ error= file->read_range_first(m_start_key.key? &m_start_key: NULL, end_range, eq_range, TRUE); - if (!error) + if (likely(!error)) memcpy(rec_buf_ptr, table->record[0], m_rec_length); reverse_order= FALSE; break; @@ -7501,7 +7532,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) bitmap_clear_bit(&m_mrr_used_partitions, i); continue; } - if (!error) + if (likely(!error)) { memcpy(rec_buf_ptr, table->record[0], m_rec_length); reverse_order= FALSE; @@ -7525,7 +7556,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) DBUG_ASSERT(FALSE); DBUG_RETURN(HA_ERR_END_OF_FILE); } - if (!error) + if (likely(!error)) { found= TRUE; if (!m_using_extended_keys) @@ -7687,7 +7718,7 @@ int ha_partition::handle_ordered_index_scan_key_not_found() error= m_file[i]->ha_index_next(curr_rec_buf); /* HA_ERR_KEY_NOT_FOUND is not allowed from index_next! */ DBUG_ASSERT(error != HA_ERR_KEY_NOT_FOUND); - if (!error) + if (likely(!error)) { DBUG_PRINT("info", ("partition queue_insert(1)")); queue_insert(&m_queue, part_buf); @@ -7749,7 +7780,7 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same) { /* There are partitions not included in the index record queue. */ uint old_elements= m_queue.elements; - if ((error= handle_ordered_index_scan_key_not_found())) + if (unlikely((error= handle_ordered_index_scan_key_not_found()))) DBUG_RETURN(error); /* If the queue top changed, i.e. one of the partitions that gave @@ -7785,9 +7816,9 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same) bool get_next= FALSE; error= file->multi_range_read_next(&m_range_info[part_id]); DBUG_PRINT("info", ("error: %d", error)); - if (error == HA_ERR_KEY_NOT_FOUND) + if (unlikely(error == HA_ERR_KEY_NOT_FOUND)) error= HA_ERR_END_OF_FILE; - if (error == HA_ERR_END_OF_FILE) + if (unlikely(error == HA_ERR_END_OF_FILE)) { bitmap_clear_bit(&m_mrr_used_partitions, part_id); DBUG_PRINT("info", ("partition m_queue.elements: %u", m_queue.elements)); @@ -7805,7 +7836,7 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same) } get_next= TRUE; } - else if (!error) + else if (likely(!error)) { DBUG_PRINT("info", ("m_range_info[%u])->id: %u", part_id, ((PARTITION_KEY_MULTI_RANGE *) @@ -7900,7 +7931,7 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same) error= file->ha_index_next_same(rec_buf, m_start_key.key, m_start_key.length); - if (error) + if (unlikely(error)) { if (error == HA_ERR_END_OF_FILE && m_queue.elements) { @@ -7957,7 +7988,7 @@ int ha_partition::handle_ordered_prev(uchar *buf) uchar *rec_buf= queue_top(&m_queue) + PARTITION_BYTES_IN_POS; handler *file= m_file[part_id]; - if ((error= file->ha_index_prev(rec_buf))) + if (unlikely((error= file->ha_index_prev(rec_buf)))) { if (error == HA_ERR_END_OF_FILE && m_queue.elements) { @@ -8351,7 +8382,7 @@ int ha_partition::open_read_partitions(char *name_buff, size_t name_buff_size) { handler **file; char *name_buffer_ptr; - int error; + int error= 0; name_buffer_ptr= m_name_buffer_ptr; file= m_file; @@ -8362,18 +8393,27 @@ int ha_partition::open_read_partitions(char *name_buff, size_t name_buff_size) int is_open= bitmap_is_set(&m_opened_partitions, n_file); int should_be_open= bitmap_is_set(&m_part_info->read_partitions, n_file); - if (is_open && !should_be_open) - { - if ((error= (*file)->ha_close())) - goto err_handler; - bitmap_clear_bit(&m_opened_partitions, n_file); - } - else if (!is_open && should_be_open) + /* + TODO: we can close some opened partitions if they're not + used in the query. It probably should be syncronized with the + table_open_cache value. + + if (is_open && !should_be_open) + { + if (unlikely((error= (*file)->ha_close()))) + goto err_handler; + bitmap_clear_bit(&m_opened_partitions, n_file); + } + else + */ + if (!is_open && should_be_open) { LEX_CSTRING save_connect_string= table->s->connect_string; - if ((error= create_partition_name(name_buff, name_buff_size, - table->s->normalized_path.str, - name_buffer_ptr, NORMAL_PART_NAME, FALSE))) + if (unlikely((error= + create_partition_name(name_buff, name_buff_size, + table->s->normalized_path.str, + name_buffer_ptr, NORMAL_PART_NAME, + FALSE)))) goto err_handler; if (!((*file)->ht->flags & HTON_CAN_READ_CONNECT_STRING_IN_PARTITION)) table->s->connect_string= m_connect_string[(uint)(file-m_file)]; @@ -8404,7 +8444,7 @@ int ha_partition::change_partitions_to_open(List *partition_names) return 0; m_partitions_to_open= partition_names; - if ((error= m_part_info->set_partition_bitmaps(partition_names))) + if (unlikely((error= m_part_info->set_partition_bitmaps(partition_names)))) goto err_handler; if (m_lock_type != F_UNLCK) @@ -8419,8 +8459,8 @@ int ha_partition::change_partitions_to_open(List *partition_names) if (bitmap_cmp(&m_opened_partitions, &m_part_info->read_partitions) != 0) return 0; - if ((error= read_par_file(table->s->normalized_path.str)) || - (error= open_read_partitions(name_buff, sizeof(name_buff)))) + if (unlikely((error= read_par_file(table->s->normalized_path.str)) || + (error= open_read_partitions(name_buff, sizeof(name_buff))))) goto err_handler; clear_handler_file(); @@ -8647,6 +8687,16 @@ err_handler: HA_EXTRA_NO_READCHECK=5 No readcheck on update HA_EXTRA_READCHECK=6 Use readcheck (def) + HA_EXTRA_REMEMBER_POS: + HA_EXTRA_RESTORE_POS: + System versioning needs this for MyISAM and Aria tables. + On DELETE using PRIMARY KEY: + 1) handler::ha_index_read_map() saves rowid used for row delete/update + 2) handler::ha_update_row() can rewrite saved rowid + 3) handler::ha_delete_row()/handler::ha_update_row() expects saved but got + different rowid and operation fails + Using those flags prevents harmful side effect of 2) + 4) Operations only used by temporary tables for query processing ---------------------------------------------------------------- HA_EXTRA_RESET_STATE: @@ -8706,8 +8756,6 @@ err_handler: Only used MyISAM, only used internally in MyISAM handler, never called from server level. HA_EXTRA_KEYREAD_CHANGE_POS: - HA_EXTRA_REMEMBER_POS: - HA_EXTRA_RESTORE_POS: HA_EXTRA_PRELOAD_BUFFER_SIZE: HA_EXTRA_CHANGE_KEY_TO_DUP: HA_EXTRA_CHANGE_KEY_TO_UNIQUE: @@ -8790,6 +8838,8 @@ int ha_partition::extra(enum ha_extra_function operation) case HA_EXTRA_PREPARE_FOR_DROP: case HA_EXTRA_FLUSH_CACHE: case HA_EXTRA_PREPARE_FOR_ALTER_TABLE: + case HA_EXTRA_REMEMBER_POS: + case HA_EXTRA_RESTORE_POS: { DBUG_RETURN(loop_extra(operation)); } @@ -9387,8 +9437,8 @@ ha_rows ha_partition::records() i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { ha_rows rows; - if ((error= m_file[i]->pre_records()) || - (rows= m_file[i]->records()) == HA_POS_ERROR) + if (unlikely((error= m_file[i]->pre_records()) || + (rows= m_file[i]->records()) == HA_POS_ERROR)) DBUG_RETURN(HA_POS_ERROR); tot_rows+= rows; } @@ -10036,7 +10086,7 @@ bool ha_partition::commit_inplace_alter_table(TABLE *altered_table, ha_alter_info->handler_ctx= part_inplace_ctx->handler_ctx_array[0]; error= m_file[0]->ha_commit_inplace_alter_table(altered_table, ha_alter_info, commit); - if (error) + if (unlikely(error)) goto end; if (ha_alter_info->group_commit_ctx) { @@ -10347,7 +10397,7 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment, /* Only nb_desired_values = 1 makes sense */ (*file)->get_auto_increment(offset, increment, 1, &first_value_part, &nb_reserved_values_part); - if (first_value_part == ULONGLONG_MAX) // error in one partition + if (unlikely(first_value_part == ULONGLONG_MAX)) // error in one partition { *first_value= first_value_part; /* log that the error was between table/partition handler */ @@ -10508,7 +10558,7 @@ int ha_partition::disable_indexes(uint mode) DBUG_ASSERT(bitmap_is_set_all(&(m_part_info->lock_partitions))); for (file= m_file; *file; file++) { - if ((error= (*file)->ha_disable_indexes(mode))) + if (unlikely((error= (*file)->ha_disable_indexes(mode)))) break; } return error; @@ -10533,7 +10583,7 @@ int ha_partition::enable_indexes(uint mode) DBUG_ASSERT(bitmap_is_set_all(&(m_part_info->lock_partitions))); for (file= m_file; *file; file++) { - if ((error= (*file)->ha_enable_indexes(mode))) + if (unlikely((error= (*file)->ha_enable_indexes(mode)))) break; } return error; @@ -10558,7 +10608,7 @@ int ha_partition::indexes_are_disabled(void) DBUG_ASSERT(bitmap_is_set_all(&(m_part_info->lock_partitions))); for (file= m_file; *file; file++) { - if ((error= (*file)->indexes_are_disabled())) + if (unlikely((error= (*file)->indexes_are_disabled()))) break; } return error; @@ -10597,6 +10647,8 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair) { /* Only need to read the partitioning fields. */ bitmap_union(table->read_set, &m_part_info->full_part_field_set); + if (table->vcol_set) + bitmap_union(table->vcol_set, &m_part_info->full_part_field_set); } if ((result= m_file[read_part_id]->ha_rnd_init(1))) @@ -10606,8 +10658,6 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair) { if ((result= m_file[read_part_id]->ha_rnd_next(m_rec0))) { - if (result == HA_ERR_RECORD_DELETED) - continue; if (result != HA_ERR_END_OF_FILE) break; @@ -10958,7 +11008,7 @@ int ha_partition::exec_bulk_update(ha_rows *dup_key_found) do { - if ((error= (*file)->exec_bulk_update(dup_key_found))) + if (unlikely((error= (*file)->exec_bulk_update(dup_key_found)))) DBUG_RETURN(error); } while (*(++file)); DBUG_RETURN(0); @@ -11123,9 +11173,9 @@ int ha_partition::direct_update_rows_init() bitmap_is_set(&(m_part_info->lock_partitions), i)) { file= m_file[i]; - if ((error= (m_pre_calling ? - file->pre_direct_update_rows_init() : - file->direct_update_rows_init()))) + if (unlikely((error= (m_pre_calling ? + file->pre_direct_update_rows_init() : + file->direct_update_rows_init())))) { DBUG_PRINT("info", ("partition FALSE by storage engine")); DBUG_RETURN(error); @@ -11221,14 +11271,14 @@ int ha_partition::direct_update_rows(ha_rows *update_rows_result) { if (rnd_seq && (m_pre_calling ? file->pre_inited : file->inited) == NONE) { - if ((error= (m_pre_calling ? - file->ha_pre_rnd_init(TRUE) : - file->ha_rnd_init(TRUE)))) + if (unlikely((error= (m_pre_calling ? + file->ha_pre_rnd_init(TRUE) : + file->ha_rnd_init(TRUE))))) DBUG_RETURN(error); } - if ((error= (m_pre_calling ? - (file)->pre_direct_update_rows() : - (file)->ha_direct_update_rows(&update_rows)))) + if (unlikely((error= (m_pre_calling ? + (file)->pre_direct_update_rows() : + (file)->ha_direct_update_rows(&update_rows))))) { if (rnd_seq) { @@ -11243,9 +11293,9 @@ int ha_partition::direct_update_rows(ha_rows *update_rows_result) } if (rnd_seq) { - if ((error= (m_pre_calling ? - file->ha_pre_index_or_rnd_end() : - file->ha_index_or_rnd_end()))) + if (unlikely((error= (m_pre_calling ? + file->ha_pre_index_or_rnd_end() : + file->ha_index_or_rnd_end())))) DBUG_RETURN(error); } } @@ -11309,9 +11359,9 @@ int ha_partition::direct_delete_rows_init() bitmap_is_set(&(m_part_info->lock_partitions), i)) { handler *file= m_file[i]; - if ((error= (m_pre_calling ? - file->pre_direct_delete_rows_init() : - file->direct_delete_rows_init()))) + if (unlikely((error= (m_pre_calling ? + file->pre_direct_delete_rows_init() : + file->direct_delete_rows_init())))) { DBUG_PRINT("exit", ("error in direct_delete_rows_init")); DBUG_RETURN(error); @@ -11408,9 +11458,9 @@ int ha_partition::direct_delete_rows(ha_rows *delete_rows_result) { if (rnd_seq && (m_pre_calling ? file->pre_inited : file->inited) == NONE) { - if ((error= (m_pre_calling ? - file->ha_pre_rnd_init(TRUE) : - file->ha_rnd_init(TRUE)))) + if (unlikely((error= (m_pre_calling ? + file->ha_pre_rnd_init(TRUE) : + file->ha_rnd_init(TRUE))))) DBUG_RETURN(error); } if ((error= (m_pre_calling ? @@ -11427,9 +11477,9 @@ int ha_partition::direct_delete_rows(ha_rows *delete_rows_result) } if (rnd_seq) { - if ((error= (m_pre_calling ? - file->ha_pre_index_or_rnd_end() : - file->ha_index_or_rnd_end()))) + if (unlikely((error= (m_pre_calling ? + file->ha_pre_index_or_rnd_end() : + file->ha_index_or_rnd_end())))) DBUG_RETURN(error); } } diff --git a/sql/ha_sequence.cc b/sql/ha_sequence.cc index b500ce3c1f6..d5064af16c3 100644 --- a/sql/ha_sequence.cc +++ b/sql/ha_sequence.cc @@ -81,17 +81,21 @@ int ha_sequence::open(const char *name, int mode, uint flags) DBUG_ASSERT(table->s == table_share && file); file->table= table; - if (!(error= file->open(name, mode, flags))) + if (likely(!(error= file->open(name, mode, flags)))) { /* - Copy values set by handler::open() in the underlying handler - Reuse original storage engine data for duplicate key reference - It would be easier to do this if we would have another handler - call: fixup_after_open()... - */ - ref= file->ref; + Allocate ref in table's mem_root. We can't use table's ref + as it's allocated by ha_ caller that allocates this. + */ ref_length= file->ref_length; - dup_ref= file->dup_ref; + if (!(ref= (uchar*) alloc_root(&table->mem_root,ALIGN_SIZE(ref_length)*2))) + { + file->ha_close(); + error=HA_ERR_OUT_OF_MEM; + DBUG_RETURN(error); + } + file->ref= ref; + file->dup_ref= dup_ref= ref+ALIGN_SIZE(file->ref_length); /* ha_open() sets the following for us. We have to set this for the @@ -107,7 +111,7 @@ int ha_sequence::open(const char *name, int mode, uint flags) /* Don't try to read the inital row the call is part of create code */ if (!(flags & (HA_OPEN_FOR_CREATE | HA_OPEN_FOR_REPAIR))) { - if ((error= table->s->sequence->read_initial_values(table))) + if (unlikely((error= table->s->sequence->read_initial_values(table)))) file->ha_close(); } else @@ -212,7 +216,7 @@ int ha_sequence::write_row(uchar *buf) if (tmp_seq.check_and_adjust(0)) DBUG_RETURN(HA_ERR_SEQUENCE_INVALID_DATA); sequence->copy(&tmp_seq); - if (!(error= file->write_row(buf))) + if (likely(!(error= file->write_row(buf)))) sequence->initialized= SEQUENCE::SEQ_READY_TO_USE; DBUG_RETURN(error); } @@ -229,14 +233,10 @@ int ha_sequence::write_row(uchar *buf) - Get an exclusive lock for the table. This is needed to ensure that we excute all full inserts (same as ALTER SEQUENCE) in same order on master and slaves - - Check that we are only using one table. - This is to avoid deadlock problems when upgrading lock to exlusive. - Check that the new row is an accurate SEQUENCE object */ THD *thd= table->in_use; - if (thd->lock->table_count != 1) - DBUG_RETURN(ER_WRONG_INSERT_INTO_SEQUENCE); if (table->s->tmp_table == NO_TMP_TABLE && thd->mdl_context.upgrade_shared_lock(table->mdl_ticket, MDL_EXCLUSIVE, @@ -255,7 +255,7 @@ int ha_sequence::write_row(uchar *buf) sequence->write_lock(table); } - if (!(error= file->update_first_row(buf))) + if (likely(!(error= file->update_first_row(buf)))) { Log_func *log_func= Write_rows_log_event::binlog_row_logging_function; if (!sequence_locked) @@ -322,7 +322,8 @@ int ha_sequence::external_lock(THD *thd, int lock_type) Copy lock flag to satisfy DBUG_ASSERT checks in ha_* functions in handler.cc when we later call it with file->ha_..() */ - file->m_lock_type= lock_type; + if (!error) + file->m_lock_type= lock_type; return error; } diff --git a/sql/handler.cc b/sql/handler.cc index 2c93ffeaa73..07459d4cd8c 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -698,7 +698,7 @@ int ha_end() So if flag is equal to HA_PANIC_CLOSE, the deallocate the errors. */ - if (ha_finish_errors()) + if (unlikely(ha_finish_errors())) error= 1; DBUG_RETURN(error); @@ -1197,7 +1197,7 @@ int ha_prepare(THD *thd) handlerton *ht= ha_info->ht(); if (ht->prepare) { - if (prepare_or_error(ht, thd, all)) + if (unlikely(prepare_or_error(ht, thd, all))) { ha_rollback_trans(thd, all); error=1; @@ -1475,7 +1475,7 @@ int ha_commit_trans(THD *thd, bool all) Sic: we know that prepare() is not NULL since otherwise trans->no_2pc would have been set. */ - if (prepare_or_error(ht, thd, all)) + if (unlikely(prepare_or_error(ht, thd, all))) goto err; need_prepare_ordered|= (ht->prepare_ordered != NULL); @@ -2525,7 +2525,7 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path, dummy_table.s= &dummy_share; path= get_canonical_filename(file, path, tmp_path); - if ((error= file->ha_delete_table(path))) + if (unlikely((error= file->ha_delete_table(path)))) { /* it's not an error if the table doesn't exist in the engine. @@ -2681,7 +2681,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode, set_partitions_to_open(partitions_to_open); - if ((error=open(name,mode,test_if_locked))) + if (unlikely((error=open(name,mode,test_if_locked)))) { if ((error == EACCES || error == EROFS) && mode == O_RDWR && (table->db_stat & HA_TRY_READ_ONLY)) @@ -2690,7 +2690,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode, error=open(name,O_RDONLY,test_if_locked); } } - if (error) + if (unlikely(error)) { my_errno= error; /* Safeguard */ DBUG_PRINT("error",("error: %d errno: %d",error,errno)); @@ -2759,19 +2759,27 @@ int handler::ha_rnd_next(uchar *buf) m_lock_type != F_UNLCK); DBUG_ASSERT(inited == RND); - TABLE_IO_WAIT(tracker, m_psi, PSI_TABLE_FETCH_ROW, MAX_KEY, 0, - { result= rnd_next(buf); }) - if (!result) + do { - update_rows_read(); - if (table->vfield && buf == table->record[0]) - table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ); + TABLE_IO_WAIT(tracker, m_psi, PSI_TABLE_FETCH_ROW, MAX_KEY, 0, + { result= rnd_next(buf); }) + if (result != HA_ERR_RECORD_DELETED) + break; + status_var_increment(table->in_use->status_var.ha_read_rnd_deleted_count); + } while (!table->in_use->check_killed(1)); + + if (result == HA_ERR_RECORD_DELETED) + result= HA_ERR_ABORTED_BY_USER; + else + { + if (!result) + { + update_rows_read(); + if (table->vfield && buf == table->record[0]) + table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ); + } increment_statistics(&SSV::ha_read_rnd_next_count); } - else if (result == HA_ERR_RECORD_DELETED) - increment_statistics(&SSV::ha_read_rnd_deleted_count); - else - increment_statistics(&SSV::ha_read_rnd_next_count); table->status=result ? STATUS_NOT_FOUND: 0; DBUG_RETURN(result); @@ -2789,7 +2797,9 @@ int handler::ha_rnd_pos(uchar *buf, uchar *pos) TABLE_IO_WAIT(tracker, m_psi, PSI_TABLE_FETCH_ROW, MAX_KEY, 0, { result= rnd_pos(buf, pos); }) increment_statistics(&SSV::ha_read_rnd_count); - if (!result) + if (result == HA_ERR_RECORD_DELETED) + result= HA_ERR_KEY_NOT_FOUND; + else if (!result) { update_rows_read(); if (table->vfield && buf == table->record[0]) @@ -2967,7 +2977,7 @@ bool handler::ha_was_semi_consistent_read() int handler::ha_rnd_init_with_error(bool scan) { int error; - if (!(error= ha_rnd_init(scan))) + if (likely(!(error= ha_rnd_init(scan)))) return 0; table->file->print_error(error, MYF(0)); return error; @@ -2983,7 +2993,7 @@ int handler::ha_rnd_init_with_error(bool scan) */ int handler::read_first_row(uchar * buf, uint primary_key) { - register int error; + int error; DBUG_ENTER("handler::read_first_row"); /* @@ -2994,23 +3004,22 @@ int handler::read_first_row(uchar * buf, uint primary_key) if (stats.deleted < 10 || primary_key >= MAX_KEY || !(index_flags(primary_key, 0, 0) & HA_READ_ORDER)) { - if (!(error= ha_rnd_init(1))) + if (likely(!(error= ha_rnd_init(1)))) { - while ((error= ha_rnd_next(buf)) == HA_ERR_RECORD_DELETED) - /* skip deleted row */; + error= ha_rnd_next(buf); const int end_error= ha_rnd_end(); - if (!error) + if (likely(!error)) error= end_error; } } else { /* Find the first row through the primary key */ - if (!(error= ha_index_init(primary_key, 0))) + if (likely(!(error= ha_index_init(primary_key, 0)))) { error= ha_index_first(buf); const int end_error= ha_index_end(); - if (!error) + if (likely(!error)) error= end_error; } } @@ -3430,7 +3439,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment, *nb_reserved_values= 1; } - if (error) + if (unlikely(error)) { if (error == HA_ERR_END_OF_FILE || error == HA_ERR_KEY_NOT_FOUND) /* No entry found, that's fine */; @@ -3818,7 +3827,7 @@ void handler::print_error(int error, myf errflag) } } DBUG_ASSERT(textno > 0); - if (fatal_error) + if (unlikely(fatal_error)) { /* Ensure this becomes a true error */ errflag&= ~(ME_JUST_WARNING | ME_JUST_INFO); @@ -3945,7 +3954,7 @@ int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt) if (table->s->frm_version < FRM_VER_TRUE_VARCHAR) return HA_ADMIN_NEEDS_ALTER; - if ((error= check_collation_compatibility())) + if (unlikely((error= check_collation_compatibility()))) return error; return check_for_upgrade(check_opt); @@ -4023,7 +4032,8 @@ uint handler::get_dup_key(int error) m_lock_type != F_UNLCK); DBUG_ENTER("handler::get_dup_key"); table->file->errkey = (uint) -1; - if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOREIGN_DUPLICATE_KEY || + if (error == HA_ERR_FOUND_DUPP_KEY || + error == HA_ERR_FOREIGN_DUPLICATE_KEY || error == HA_ERR_FOUND_DUPP_UNIQUE || error == HA_ERR_NULL_IN_SPATIAL || error == HA_ERR_DROP_INDEX_FK) table->file->info(HA_STATUS_ERRKEY | HA_STATUS_NO_LOCK); @@ -4087,14 +4097,14 @@ int handler::rename_table(const char * from, const char * to) start_ext= bas_ext(); for (ext= start_ext; *ext ; ext++) { - if (rename_file_ext(from, to, *ext)) + if (unlikely(rename_file_ext(from, to, *ext))) { if ((error=my_errno) != ENOENT) break; error= 0; } } - if (error) + if (unlikely(error)) { /* Try to revert the rename. Ignore errors. */ for (; ext >= start_ext; ext--) @@ -4138,15 +4148,15 @@ int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt) if (table->s->mysql_version < MYSQL_VERSION_ID) { - if ((error= check_old_types())) + if (unlikely((error= check_old_types()))) return error; error= ha_check_for_upgrade(check_opt); - if (error && (error != HA_ADMIN_NEEDS_CHECK)) + if (unlikely(error && (error != HA_ADMIN_NEEDS_CHECK))) return error; - if (!error && (check_opt->sql_flags & TT_FOR_UPGRADE)) + if (unlikely(!error && (check_opt->sql_flags & TT_FOR_UPGRADE))) return 0; } - if ((error= check(thd, check_opt))) + if (unlikely((error= check(thd, check_opt)))) return error; /* Skip updating frm version if not main handler. */ if (table->file != this) @@ -4469,20 +4479,8 @@ handler::check_if_supported_inplace_alter(TABLE *altered_table, DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } - -/* - Default implementation to support in-place alter table - and old online add/drop index API -*/ - -void handler::notify_table_changed() -{ - ha_create_partitioning_metadata(table->s->path.str, NULL, CHF_INDEX_FLAG); -} - - void Alter_inplace_info::report_unsupported_error(const char *not_supported, - const char *try_instead) + const char *try_instead) const { if (unsupported_reason == NULL) my_error(ER_ALTER_OPERATION_NOT_SUPPORTED, MYF(0), @@ -4579,7 +4577,6 @@ handler::ha_create_partitioning_metadata(const char *name, */ DBUG_ASSERT(m_lock_type == F_UNLCK || (!old_name && strcmp(name, table_share->path.str))); - mark_trx_read_write(); return create_partitioning_metadata(name, old_name, action_flag); } @@ -4669,7 +4666,7 @@ int ha_enable_transaction(THD *thd, bool on) is an optimization hint that storage engine is free to ignore. So, let's commit an open transaction (if any) now. */ - if (!(error= ha_commit_trans(thd, 0))) + if (likely(!(error= ha_commit_trans(thd, 0)))) error= trans_commit_implicit(thd); } DBUG_RETURN(error); @@ -4770,7 +4767,8 @@ void handler::update_global_table_stats() if (rows_read + rows_changed == 0) return; // Nothing to update. - DBUG_ASSERT(table->s && table->s->table_cache_key.str); + DBUG_ASSERT(table->s); + DBUG_ASSERT(table->s->table_cache_key.str); mysql_mutex_lock(&LOCK_global_table_stats); /* Gets the global table stats, creating one if necessary. */ @@ -4927,7 +4925,7 @@ int ha_create_table(THD *thd, const char *path, error= table.file->ha_create(name, &table, create_info); - if (error) + if (unlikely(error)) { if (!thd->is_error()) my_error(ER_CANT_CREATE_TABLE, MYF(0), db, table_name, error); @@ -5082,7 +5080,7 @@ static my_bool discover_handlerton(THD *thd, plugin_ref plugin, int error= hton->discover_table(hton, thd, share); if (error != HA_ERR_NO_SUCH_TABLE) { - if (error) + if (unlikely(error)) { if (!share->error) { @@ -5719,12 +5717,12 @@ int handler::index_read_idx_map(uchar * buf, uint index, const uchar * key, int error, UNINIT_VAR(error1); error= ha_index_init(index, 0); - if (!error) + if (likely(!error)) { error= index_read_map(buf, key, keypart_map, find_flag); error1= ha_index_end(); } - return error ? error : error1; + return error ? error : error1; } @@ -5861,7 +5859,7 @@ bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat) We also check thd->is_error() as Innodb may return 0 even if there was an error. */ - if (!result && !thd->is_error()) + if (likely(!result && !thd->is_error())) my_eof(thd); else if (!thd->is_error()) my_error(ER_GET_ERRNO, MYF(0), errno, hton_name(db_type)->str); @@ -6124,7 +6122,7 @@ int handler::ha_external_lock(THD *thd, int lock_type) DBUG_EXECUTE_IF("external_lock_failure", error= HA_ERR_GENERIC;); - if (error == 0 || lock_type == F_UNLCK) + if (likely(error == 0 || lock_type == F_UNLCK)) { m_lock_type= lock_type; cached_table_flags= table_flags(); @@ -6245,10 +6243,10 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data) int handler::update_first_row(uchar *new_data) { int error; - if (!(error= ha_rnd_init(1))) + if (likely(!(error= ha_rnd_init(1)))) { int end_error; - if (!(error= ha_rnd_next(table->record[1]))) + if (likely(!(error= ha_rnd_next(table->record[1])))) { /* We have to do the memcmp as otherwise we may get error 169 from InnoDB @@ -6257,7 +6255,7 @@ int handler::update_first_row(uchar *new_data) error= update_row(table->record[1], new_data); } end_error= ha_rnd_end(); - if (!error) + if (likely(!error)) error= end_error; /* Logging would be wrong if update_row works but ha_rnd_end fails */ DBUG_ASSERT(!end_error || error != 0); @@ -6374,7 +6372,8 @@ void handler::use_hidden_primary_key() Handler_share *handler::get_ha_share_ptr() { DBUG_ENTER("handler::get_ha_share_ptr"); - DBUG_ASSERT(ha_share && table_share); + DBUG_ASSERT(ha_share); + DBUG_ASSERT(table_share); #ifndef DBUG_OFF if (table_share->tmp_table == NO_TMP_TABLE) @@ -6854,7 +6853,8 @@ static Create_field *vers_init_sys_field(THD *thd, const char *field_name, int f f->flags= flags | NOT_NULL_FLAG; if (integer) { - f->set_handler(&type_handler_longlong); + DBUG_ASSERT(0); // Not implemented yet + f->set_handler(&type_handler_vers_trx_id); f->length= MY_INT64_NUM_DECIMAL_DIGITS - 1; f->flags|= UNSIGNED_FLAG; } @@ -6884,8 +6884,8 @@ static bool vers_create_sys_field(THD *thd, const char *field_name, return false; } -const LString_i Vers_parse_info::default_start= "row_start"; -const LString_i Vers_parse_info::default_end= "row_end"; +const Lex_ident Vers_parse_info::default_start= "row_start"; +const Lex_ident Vers_parse_info::default_end= "row_end"; bool Vers_parse_info::fix_implicit(THD *thd, Alter_info *alter_info) { @@ -7055,7 +7055,7 @@ bool Vers_parse_info::fix_alter_info(THD *thd, Alter_info *alter_info, { if (f->change.length && f->flags & VERS_SYSTEM_FIELD) { - my_error(ER_UNSUPPORTED_ACTION_ON_GENERATED_COLUMN, MYF(0)); + my_error(ER_VERS_ALTER_SYSTEM_FIELD, MYF(0), f->field_name.str); return true; } } @@ -7079,10 +7079,12 @@ bool Vers_parse_info::fix_alter_info(THD *thd, Alter_info *alter_info, // copy info from existing table create_info->options|= HA_VERSIONED_TABLE; - DBUG_ASSERT(share->vers_start_field() && share->vers_end_field()); - LString_i start(share->vers_start_field()->field_name); - LString_i end(share->vers_end_field()->field_name); - DBUG_ASSERT(start.ptr() && end.ptr()); + DBUG_ASSERT(share->vers_start_field()); + DBUG_ASSERT(share->vers_end_field()); + Lex_ident start(share->vers_start_field()->field_name); + Lex_ident end(share->vers_end_field()->field_name); + DBUG_ASSERT(start.str); + DBUG_ASSERT(end.str); as_row= start_end_t(start, end); system_time= as_row; @@ -7186,8 +7188,8 @@ bool Vers_parse_info::need_check(const Alter_info *alter_info) const alter_info->flags & ALTER_DROP_SYSTEM_VERSIONING || *this; } -bool Vers_parse_info::check_conditions(const LString &table_name, - const LString &db) const +bool Vers_parse_info::check_conditions(const Lex_table_name &table_name, + const Lex_table_name &db) const { if (!as_row.start || !as_row.end) { @@ -7217,7 +7219,8 @@ bool Vers_parse_info::check_conditions(const LString &table_name, return false; } -bool Vers_parse_info::check_sys_fields(const LString &table_name, const LString &db, +bool Vers_parse_info::check_sys_fields(const Lex_table_name &table_name, + const Lex_table_name &db, Alter_info *alter_info, bool native) { if (check_conditions(table_name, db)) diff --git a/sql/handler.h b/sql/handler.h index c5d92cbbaab..3d1b764bd14 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -73,11 +73,14 @@ class sequence_definition; */ enum enum_alter_inplace_result { HA_ALTER_ERROR, + HA_ALTER_INPLACE_COPY_NO_LOCK, + HA_ALTER_INPLACE_COPY_LOCK, + HA_ALTER_INPLACE_NOCOPY_LOCK, + HA_ALTER_INPLACE_NOCOPY_NO_LOCK, + HA_ALTER_INPLACE_INSTANT, HA_ALTER_INPLACE_NOT_SUPPORTED, HA_ALTER_INPLACE_EXCLUSIVE_LOCK, - HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE, HA_ALTER_INPLACE_SHARED_LOCK, - HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE, HA_ALTER_INPLACE_NO_LOCK }; @@ -628,43 +631,41 @@ typedef ulonglong alter_table_operations; ALTER_DROP_STORED_COLUMN) #define ALTER_COLUMN_DEFAULT ALTER_CHANGE_COLUMN_DEFAULT -#define ALTER_DROP_HISTORICAL (1ULL << 35) - // Add non-unique, non-primary index -#define ALTER_ADD_NON_UNIQUE_NON_PRIM_INDEX (1ULL << 36) +#define ALTER_ADD_NON_UNIQUE_NON_PRIM_INDEX (1ULL << 35) // Drop non-unique, non-primary index -#define ALTER_DROP_NON_UNIQUE_NON_PRIM_INDEX (1ULL << 37) +#define ALTER_DROP_NON_UNIQUE_NON_PRIM_INDEX (1ULL << 36) // Add unique, non-primary index -#define ALTER_ADD_UNIQUE_INDEX (1ULL << 38) +#define ALTER_ADD_UNIQUE_INDEX (1ULL << 37) // Drop unique, non-primary index -#define ALTER_DROP_UNIQUE_INDEX (1ULL << 39) +#define ALTER_DROP_UNIQUE_INDEX (1ULL << 38) // Add primary index -#define ALTER_ADD_PK_INDEX (1ULL << 40) +#define ALTER_ADD_PK_INDEX (1ULL << 39) // Drop primary index -#define ALTER_DROP_PK_INDEX (1ULL << 41) +#define ALTER_DROP_PK_INDEX (1ULL << 40) // Virtual generated column -#define ALTER_ADD_VIRTUAL_COLUMN (1ULL << 42) +#define ALTER_ADD_VIRTUAL_COLUMN (1ULL << 41) // Stored base (non-generated) column -#define ALTER_ADD_STORED_BASE_COLUMN (1ULL << 43) +#define ALTER_ADD_STORED_BASE_COLUMN (1ULL << 42) // Stored generated column -#define ALTER_ADD_STORED_GENERATED_COLUMN (1ULL << 44) +#define ALTER_ADD_STORED_GENERATED_COLUMN (1ULL << 43) // Drop column -#define ALTER_DROP_VIRTUAL_COLUMN (1ULL << 45) -#define ALTER_DROP_STORED_COLUMN (1ULL << 46) +#define ALTER_DROP_VIRTUAL_COLUMN (1ULL << 44) +#define ALTER_DROP_STORED_COLUMN (1ULL << 45) // Rename column (verified; ALTER_RENAME_COLUMN may use original name) -#define ALTER_COLUMN_NAME (1ULL << 47) +#define ALTER_COLUMN_NAME (1ULL << 46) // Change column datatype -#define ALTER_VIRTUAL_COLUMN_TYPE (1ULL << 48) -#define ALTER_STORED_COLUMN_TYPE (1ULL << 49) +#define ALTER_VIRTUAL_COLUMN_TYPE (1ULL << 47) +#define ALTER_STORED_COLUMN_TYPE (1ULL << 48) /** Change column datatype in such way that new type has compatible @@ -672,45 +673,45 @@ typedef ulonglong alter_table_operations; possible to perform change by only updating data dictionary without changing table rows. */ -#define ALTER_COLUMN_EQUAL_PACK_LENGTH (1ULL << 50) +#define ALTER_COLUMN_EQUAL_PACK_LENGTH (1ULL << 49) // Reorder column -#define ALTER_STORED_COLUMN_ORDER (1ULL << 51) +#define ALTER_STORED_COLUMN_ORDER (1ULL << 50) // Reorder column -#define ALTER_VIRTUAL_COLUMN_ORDER (1ULL << 52) +#define ALTER_VIRTUAL_COLUMN_ORDER (1ULL << 51) // Change column from NOT NULL to NULL -#define ALTER_COLUMN_NULLABLE (1ULL << 53) +#define ALTER_COLUMN_NULLABLE (1ULL << 52) // Change column from NULL to NOT NULL -#define ALTER_COLUMN_NOT_NULLABLE (1ULL << 54) +#define ALTER_COLUMN_NOT_NULLABLE (1ULL << 53) // Change column generation expression -#define ALTER_VIRTUAL_GCOL_EXPR (1ULL << 55) -#define ALTER_STORED_GCOL_EXPR (1ULL << 56) +#define ALTER_VIRTUAL_GCOL_EXPR (1ULL << 54) +#define ALTER_STORED_GCOL_EXPR (1ULL << 55) // column's engine options changed, something in field->option_struct -#define ALTER_COLUMN_OPTION (1ULL << 57) +#define ALTER_COLUMN_OPTION (1ULL << 56) // MySQL alias for the same thing: -#define ALTER_COLUMN_STORAGE_TYPE (1ULL << 58) +#define ALTER_COLUMN_STORAGE_TYPE ALTER_COLUMN_OPTION // Change the column format of column -#define ALTER_COLUMN_COLUMN_FORMAT (1ULL << 59) +#define ALTER_COLUMN_COLUMN_FORMAT (1ULL << 57) /** Changes in generated columns that affect storage, for example, when a vcol type or expression changes and this vcol is indexed or used in a partitioning expression */ -#define ALTER_COLUMN_VCOL (1ULL << 60) +#define ALTER_COLUMN_VCOL (1ULL << 58) /** ALTER TABLE for a partitioned table. The engine needs to commit online alter of all partitions atomically (using group_commit_ctx) */ -#define ALTER_PARTITIONED (1ULL << 61) +#define ALTER_PARTITIONED (1ULL << 59) /* Flags set in partition_flags when altering partitions @@ -1919,15 +1920,15 @@ struct Vers_parse_info start_end_t(LEX_CSTRING _start, LEX_CSTRING _end) : start(_start), end(_end) {} - LString_i start; - LString_i end; + Lex_ident start; + Lex_ident end; }; start_end_t system_time; start_end_t as_row; vers_sys_type_t check_unit; - void set_system_time(LString start, LString end) + void set_system_time(Lex_ident start, Lex_ident end) { system_time.start= start; system_time.end= end; @@ -1955,16 +1956,18 @@ protected: return as_row.start || as_row.end || system_time.start || system_time.end; } bool need_check(const Alter_info *alter_info) const; - bool check_conditions(const LString &table_name, const LString &db) const; + bool check_conditions(const Lex_table_name &table_name, + const Lex_table_name &db) const; public: - static const LString_i default_start; - static const LString_i default_end; + static const Lex_ident default_start; + static const Lex_ident default_end; bool fix_alter_info(THD *thd, Alter_info *alter_info, HA_CREATE_INFO *create_info, TABLE *table); bool fix_create_like(Alter_info &alter_info, HA_CREATE_INFO &create_info, TABLE_LIST &src_table, TABLE_LIST &table); - bool check_sys_fields(const LString &table_name, const LString &db, + bool check_sys_fields(const Lex_table_name &table_name, + const Lex_table_name &db, Alter_info *alter_info, bool native); /** @@ -2304,8 +2307,7 @@ public: /** Can be set by handler to describe why a given operation cannot be done in-place (HA_ALTER_INPLACE_NOT_SUPPORTED) or why it cannot be done - online (HA_ALTER_INPLACE_NO_LOCK or - HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE) + online (HA_ALTER_INPLACE_NO_LOCK or HA_ALTER_INPLACE_COPY_NO_LOCK) If set, it will be used with ER_ALTER_OPERATION_NOT_SUPPORTED_REASON if results from handler::check_if_supported_inplace_alter() doesn't match requirements set by user. If not set, the more generic @@ -2353,7 +2355,7 @@ public: replace not_supported with. */ void report_unsupported_error(const char *not_supported, - const char *try_instead); + const char *try_instead) const; }; @@ -4060,8 +4062,8 @@ public: *) As the first step, we acquire a lock corresponding to the concurrency level which was returned by handler::check_if_supported_inplace_alter() and requested by the user. This lock is held for most of the - duration of in-place ALTER (if HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE - or HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE were returned we acquire an + duration of in-place ALTER (if HA_ALTER_INPLACE_COPY_LOCK + or HA_ALTER_INPLACE_COPY_NO_LOCK were returned we acquire an exclusive lock for duration of the next step only). *) After that we call handler::ha_prepare_inplace_alter_table() to give the storage engine a chance to update its internal structures with a higher @@ -4105,12 +4107,12 @@ public: @retval HA_ALTER_ERROR Unexpected error. @retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported, must use copy. @retval HA_ALTER_INPLACE_EXCLUSIVE_LOCK Supported, but requires X lock. - @retval HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE + @retval HA_ALTER_INPLACE_COPY_LOCK Supported, but requires SNW lock during main phase. Prepare phase requires X lock. @retval HA_ALTER_INPLACE_SHARED_LOCK Supported, but requires SNW lock. - @retval HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE + @retval HA_ALTER_INPLACE_COPY_NO_LOCK Supported, concurrent reads/writes allowed. However, prepare phase requires X lock. @@ -4170,10 +4172,9 @@ protected: /** Allows the storage engine to update internal structures with concurrent writes blocked. If check_if_supported_inplace_alter() returns - HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE or - HA_ALTER_INPLACE_SHARED_AFTER_PREPARE, this function is called with - exclusive lock otherwise the same level of locking as for - inplace_alter_table() will be used. + HA_ALTER_INPLACE_COPY_NO_LOCK or HA_ALTER_INPLACE_COPY_LOCK, + this function is called with exclusive lock otherwise the same level + of locking as for inplace_alter_table() will be used. @note Storage engines are responsible for reporting any errors by calling my_error()/print_error() @@ -4271,7 +4272,7 @@ protected: @note No errors are allowed during notify_table_changed(). */ - virtual void notify_table_changed(); + virtual void notify_table_changed() { } public: /* End of On-line/in-place ALTER TABLE interface. */ diff --git a/sql/hostname.cc b/sql/hostname.cc index 2d39a8bb03d..00c780d9f35 100644 --- a/sql/hostname.cc +++ b/sql/hostname.cc @@ -460,7 +460,7 @@ int ip_to_hostname(struct sockaddr_storage *ip_storage, entry->m_last_seen= now; *connect_errors= entry->m_errors.m_connect; - if (entry->m_errors.m_connect >= max_connect_errors) + if (unlikely(entry->m_errors.m_connect >= max_connect_errors)) { entry->m_errors.m_host_blocked++; entry->set_error_timestamps(now); diff --git a/sql/item.cc b/sql/item.cc index c19ad32f6ce..b07d959375b 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -295,7 +295,7 @@ longlong Item::val_int_signed_typecast_from_str() { int error; longlong value= val_int_from_str(&error); - if (!null_value && value < 0 && error == 0) + if (unlikely(!null_value && value < 0 && error == 0)) push_note_converted_to_negative_complement(current_thd); return value; } @@ -305,7 +305,7 @@ longlong Item::val_int_unsigned_typecast_from_str() { int error; longlong value= val_int_from_str(&error); - if (!null_value && error < 0) + if (unlikely(!null_value && error < 0)) push_note_converted_to_positive_complement(current_thd); return value; } @@ -508,6 +508,8 @@ Item::Item(THD *thd): marker= 0; maybe_null=null_value=with_sum_func=with_window_func=with_field=0; in_rollup= 0; + with_param= 0; + /* Initially this item is not attached to any JOIN_TAB. */ join_tab_idx= MAX_TABLES; @@ -549,6 +551,7 @@ Item::Item(THD *thd, Item *item): in_rollup(item->in_rollup), null_value(item->null_value), with_sum_func(item->with_sum_func), + with_param(item->with_param), with_window_func(item->with_window_func), with_field(item->with_field), fixed(item->fixed), @@ -703,12 +706,11 @@ Item* Item::set_expr_cache(THD *thd) { DBUG_ENTER("Item::set_expr_cache"); Item_cache_wrapper *wrapper; - if ((wrapper= new (thd->mem_root) Item_cache_wrapper(thd, this)) && - !wrapper->fix_fields(thd, (Item**)&wrapper)) + if (likely((wrapper= new (thd->mem_root) Item_cache_wrapper(thd, this))) && + likely(!wrapper->fix_fields(thd, (Item**)&wrapper))) { - if (wrapper->set_cache(thd)) - DBUG_RETURN(NULL); - DBUG_RETURN(wrapper); + if (likely(!wrapper->set_cache(thd))) + DBUG_RETURN(wrapper); } DBUG_RETURN(NULL); } @@ -1307,7 +1309,7 @@ Item *Item_cache::safe_charset_converter(THD *thd, CHARSET_INFO *tocs) return this; Item_cache *cache; if (!conv || conv->fix_fields(thd, (Item **) NULL) || - !(cache= new (thd->mem_root) Item_cache_str(thd, conv))) + unlikely(!(cache= new (thd->mem_root) Item_cache_str(thd, conv)))) return NULL; // Safe conversion is not possible, or OEM cache->setup(thd, conv); cache->fixed= false; // Make Item::fix_fields() happy @@ -1389,7 +1391,7 @@ Item *Item::const_charset_converter(THD *thd, CHARSET_INFO *tocs, collation.derivation, collation.repertoire)); - if (!conv || (conv_errors && lossless)) + if (unlikely(!conv || (conv_errors && lossless))) { /* Safe conversion is not possible (or EOM). @@ -1688,6 +1690,9 @@ bool Item_sp_variable::fix_fields_from_item(THD *thd, Item **, const Item *it) max_length= it->max_length; decimals= it->decimals; unsigned_flag= it->unsigned_flag; + with_param= 1; + if (thd->lex->current_select && thd->lex->current_select->master_unit()->item) + thd->lex->current_select->master_unit()->item->with_param= 1; fixed= 1; collation.set(it->collation.collation, it->collation.derivation); @@ -2760,13 +2765,13 @@ bool Type_std_attributes::agg_item_set_converter(const DTCollation &coll, Item* Item_func_or_sum::build_clone(THD *thd) { Item_func_or_sum *copy= (Item_func_or_sum *) get_copy(thd); - if (!copy) + if (unlikely(!copy)) return 0; if (arg_count > 2) { copy->args= (Item**) alloc_root(thd->mem_root, sizeof(Item*) * arg_count); - if (!copy->args) + if (unlikely(!copy->args)) return 0; } else if (arg_count > 0) @@ -2797,6 +2802,17 @@ Item_sp::Item_sp(THD *thd, Name_resolution_context *context_arg, memset(&sp_mem_root, 0, sizeof(sp_mem_root)); } +Item_sp::Item_sp(THD *thd, Item_sp *item): + context(item->context), m_name(item->m_name), + m_sp(item->m_sp), func_ctx(NULL), sp_result_field(NULL) +{ + dummy_table= (TABLE*) thd->calloc(sizeof(TABLE)+ sizeof(TABLE_SHARE) + + sizeof(Query_arena)); + dummy_table->s= (TABLE_SHARE*) (dummy_table+1); + sp_query_arena= (Query_arena *) (dummy_table->s + 1); + memset(&sp_mem_root, 0, sizeof(sp_mem_root)); +} + const char * Item_sp::func_name(THD *thd) const { @@ -2863,7 +2879,7 @@ Item_sp::sp_check_access(THD *thd) */ bool Item_sp::execute(THD *thd, bool *null_value, Item **args, uint arg_count) { - if (execute_impl(thd, args, arg_count)) + if (unlikely(execute_impl(thd, args, arg_count))) { *null_value= 1; context->process_error(thd); @@ -2905,7 +2921,7 @@ Item_sp::execute_impl(THD *thd, Item **args, uint arg_count) thd->security_ctx= context->security_ctx; } - if (sp_check_access(thd)) + if (unlikely(sp_check_access(thd))) { thd->security_ctx= save_security_ctx; DBUG_RETURN(TRUE); @@ -2916,10 +2932,10 @@ Item_sp::execute_impl(THD *thd, Item **args, uint arg_count) statement-based replication (SBR) is active. */ - if (!m_sp->detistic() && !trust_function_creators && - (access == SP_CONTAINS_SQL || access == SP_MODIFIES_SQL_DATA) && - (mysql_bin_log.is_open() && - thd->variables.binlog_format == BINLOG_FORMAT_STMT)) + if (unlikely(!m_sp->detistic() && !trust_function_creators && + (access == SP_CONTAINS_SQL || access == SP_MODIFIES_SQL_DATA) && + (mysql_bin_log.is_open() && + thd->variables.binlog_format == BINLOG_FORMAT_STMT))) { my_error(ER_BINLOG_UNSAFE_ROUTINE, MYF(0)); thd->security_ctx= save_security_ctx; @@ -3049,9 +3065,10 @@ Item_sp::init_result_field(THD *thd, uint max_length, uint maybe_null, Item* Item_ref::build_clone(THD *thd) { Item_ref *copy= (Item_ref *) get_copy(thd); - if (!copy || - !(copy->ref= (Item**) alloc_root(thd->mem_root, sizeof(Item*))) || - !(*copy->ref= (* ref)->build_clone(thd))) + if (unlikely(!copy) || + unlikely(!(copy->ref= (Item**) alloc_root(thd->mem_root, + sizeof(Item*)))) || + unlikely(!(*copy->ref= (* ref)->build_clone(thd)))) return 0; return copy; } @@ -3165,72 +3182,16 @@ Item_field::Item_field(THD *thd, Item_field *item) } -/** - Calculate the max column length not taking into account the - limitations over integer types. - - When storing data into fields the server currently just ignores the - limits specified on integer types, e.g. 1234 can safely be stored in - an int(2) and will not cause an error. - Thus when creating temporary tables and doing transformations - we must adjust the maximum field length to reflect this fact. - We take the un-restricted maximum length and adjust it similarly to - how the declared length is adjusted wrt unsignedness etc. - TODO: this all needs to go when we disable storing 1234 in int(2). - - @param field_par Original field the use to calculate the lengths - @param max_length Item's calculated explicit max length - @return The adjusted max length -*/ - -inline static uint32 -adjust_max_effective_column_length(Field *field_par, uint32 max_length) -{ - uint32 new_max_length= field_par->max_display_length(); - uint32 sign_length= (field_par->flags & UNSIGNED_FLAG) ? 0 : 1; - - switch (field_par->type()) - { - case MYSQL_TYPE_INT24: - /* - Compensate for MAX_MEDIUMINT_WIDTH being 1 too long (8) - compared to the actual number of digits that can fit into - the column. - */ - new_max_length+= 1; - /* fall through */ - case MYSQL_TYPE_LONG: - case MYSQL_TYPE_TINY: - case MYSQL_TYPE_SHORT: - - /* Take out the sign and add a conditional sign */ - new_max_length= new_max_length - 1 + sign_length; - break; - - /* BINGINT is always 20 no matter the sign */ - case MYSQL_TYPE_LONGLONG: - /* make gcc happy */ - default: - break; - } - - /* Adjust only if the actual precision based one is bigger than specified */ - return new_max_length > max_length ? new_max_length : max_length; -} - - void Item_field::set_field(Field *field_par) { field=result_field=field_par; // for easy coding with fields maybe_null=field->maybe_null(); - Type_std_attributes::set(field_par); + Type_std_attributes::set(field_par->type_std_attributes()); table_name= *field_par->table_name; field_name= field_par->field_name; db_name= field_par->table->s->db.str; alias_name_used= field_par->table->alias_name_used; - max_length= adjust_max_effective_column_length(field_par, max_length); - fixed= 1; if (field->table->s->tmp_table == SYSTEM_TMP_TABLE) any_privileges= 0; @@ -4269,7 +4230,7 @@ bool Item_param::set_str(const char *str, ulong length, been written to the binary log. */ uint dummy_errors; - if (value.m_string.copy(str, length, fromcs, tocs, &dummy_errors)) + if (unlikely(value.m_string.copy(str, length, fromcs, tocs, &dummy_errors))) DBUG_RETURN(TRUE); /* Set str_value_ptr to make sure it's in sync with str_value. @@ -6208,7 +6169,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference) Field *new_field= (*((Item_field**)res))->field; - if (new_field == NULL) + if (unlikely(new_field == NULL)) { /* The column to which we link isn't valid. */ my_error(ER_BAD_FIELD_ERROR, MYF(0), (*res)->name.str, @@ -6253,7 +6214,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference) } } - if (!select) + if (unlikely(!select)) { my_error(ER_BAD_FIELD_ERROR, MYF(0), full_name(), thd->where); goto error; @@ -6407,19 +6368,15 @@ error: return TRUE; } - -/* - @brief - Mark virtual columns as used in a partitioning expression -*/ - -bool Item_field::vcol_in_partition_func_processor(void *int_arg) +bool Item_field::post_fix_fields_part_expr_processor(void *int_arg) { DBUG_ASSERT(fixed); if (field->vcol_info) - { field->vcol_info->mark_as_in_partitioning_expr(); - } + /* + Update table_name to be real table name, not the alias. Because alias is + reallocated for every statement, and this item has a long life time */ + table_name= field->table->s->table_name.str; return FALSE; } @@ -6661,7 +6618,7 @@ String *Item::check_well_formed_result(String *str, bool send_error) CHARSET_INFO *cs= str->charset(); uint wlen= str->well_formed_length(); null_value= false; - if (wlen < str->length()) + if (unlikely(wlen < str->length())) { THD *thd= current_thd; char hexbuf[7]; @@ -6700,9 +6657,10 @@ String_copier_for_item::copy_with_warn(CHARSET_INFO *dstcs, String *dst, CHARSET_INFO *srccs, const char *src, uint32 src_length, uint32 nchars) { - if ((dst->copy(dstcs, srccs, src, src_length, nchars, this))) + if (unlikely((dst->copy(dstcs, srccs, src, src_length, nchars, this)))) return true; // EOM - if (const char *pos= well_formed_error_pos()) + const char *pos; + if (unlikely(pos= well_formed_error_pos())) { ErrConvString err(pos, src_length - (pos - src), &my_charset_bin); push_warning_printf(m_thd, Sql_condition::WARN_LEVEL_WARN, @@ -6713,7 +6671,7 @@ String_copier_for_item::copy_with_warn(CHARSET_INFO *dstcs, String *dst, err.ptr()); return false; } - if (const char *pos= cannot_convert_error_pos()) + if (unlikely(pos= cannot_convert_error_pos())) { char buf[16]; int mblen= my_charlen(srccs, pos, src + src_length); @@ -6846,6 +6804,11 @@ fast_field_copier Item_field::setup_fast_field_copier(Field *to) return to->get_fast_field_copier(field); } +void Item_field::save_in_result_field(bool no_conversions) +{ + bool unused; + save_field_in_field(field, &unused, result_field, no_conversions); +} /** Set a field's value from a item. @@ -7236,7 +7199,7 @@ Item_float::Item_float(THD *thd, const char *str_arg, size_t length): char *end_not_used; value= my_strntod(&my_charset_bin, (char*) str_arg, length, &end_not_used, &error); - if (error) + if (unlikely(error)) { char tmp[NAME_LEN + 1]; my_snprintf(tmp, sizeof(tmp), "%.*s", (int)length, str_arg); @@ -8209,7 +8172,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) Field *from_field; ref= 0; - if (!outer_context) + if (unlikely(!outer_context)) { /* The current reference cannot be resolved in this query. */ my_error(ER_BAD_FIELD_ERROR,MYF(0), @@ -8357,7 +8320,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) last_checked_context->select_lex->nest_level); return FALSE; } - if (ref == 0) + if (unlikely(ref == 0)) { /* The item was not a table field and not a reference */ my_error(ER_BAD_FIELD_ERROR, MYF(0), @@ -8423,6 +8386,7 @@ void Item_ref::set_properties() split_sum_func() doesn't try to change the reference. */ with_sum_func= (*ref)->with_sum_func; + with_param= (*ref)->with_param; with_window_func= (*ref)->with_window_func; with_field= (*ref)->with_field; fixed= 1; @@ -8848,6 +8812,7 @@ Item_cache_wrapper::Item_cache_wrapper(THD *thd, Item *item_arg): Type_std_attributes::set(orig_item); maybe_null= orig_item->maybe_null; with_sum_func= orig_item->with_sum_func; + with_param= orig_item->with_param; with_field= orig_item->with_field; name= item_arg->name; m_with_subquery= orig_item->with_subquery(); @@ -9781,7 +9746,7 @@ bool Item_insert_value::fix_fields(THD *thd, Item **items) if (arg->type() == REF_ITEM) arg= static_cast(arg)->ref[0]; - if (arg->type() != FIELD_ITEM) + if (unlikely(arg->type() != FIELD_ITEM)) { my_error(ER_BAD_FIELD_ERROR, MYF(0), "", "VALUES() function"); return TRUE; @@ -9803,10 +9768,10 @@ bool Item_insert_value::fix_fields(THD *thd, Item **items) } else { - Field *tmp_field= field_arg->field; - /* charset doesn't matter here, it's to avoid sigsegv only */ - tmp_field= new Field_null(0, 0, Field::NONE, &field_arg->field->field_name, - &my_charset_bin); + static uchar null_bit=1; + /* charset doesn't matter here */ + Field *tmp_field= new Field_string(0, 0, &null_bit, 1, Field::NONE, + &field_arg->field->field_name, &my_charset_bin); if (tmp_field) { tmp_field->init(field_arg->field->table); @@ -9930,7 +9895,7 @@ bool Item_trigger_field::fix_fields(THD *thd, Item **items) /* Set field. */ - if (field_idx != (uint)-1) + if (likely(field_idx != (uint)-1)) { #ifndef NO_EMBEDDED_ACCESS_CHECKS /* @@ -10980,11 +10945,6 @@ Item_field::excl_dep_on_grouping_fields(st_select_lex *sel) return find_matching_grouping_field(this, sel) != NULL; } -bool Item_field::vers_trx_id() const -{ - DBUG_ASSERT(field); - return field->vers_trx_id(); -} void Item::register_in(THD *thd) { diff --git a/sql/item.h b/sql/item.h index 117c6b5c6d5..c8ec16ab836 100644 --- a/sql/item.h +++ b/sql/item.h @@ -370,11 +370,37 @@ typedef enum monotonicity_info class sp_rcontext; +/** + A helper class to collect different behavior of various kinds of SP variables: + - local SP variables and SP parameters + - PACKAGE BODY routine variables + - (there will be more kinds in the future) +*/ + class Sp_rcontext_handler { public: virtual ~Sp_rcontext_handler() {} + /** + A prefix used for SP variable names in queries: + - EXPLAIN EXTENDED + - SHOW PROCEDURE CODE + Local variables and SP parameters have empty prefixes. + Package body variables are marked with a special prefix. + This improves readability of the output of these queries, + especially when a local variable or a parameter has the same + name with a package body variable. + */ virtual const LEX_CSTRING *get_name_prefix() const= 0; + /** + At execution time THD->spcont points to the run-time context (sp_rcontext) + of the currently executed routine. + Local variables store their data in the sp_rcontext pointed by thd->spcont. + Package body variables store data in separate sp_rcontext that belongs + to the package. + This method provides access to the proper sp_rcontext structure, + depending on the SP variable kind. + */ virtual sp_rcontext *get_rcontext(sp_rcontext *ctx) const= 0; }; @@ -804,6 +830,12 @@ protected: value= NULL; return value; } + bool get_date_from_item(Item *item, MYSQL_TIME *ltime, ulonglong fuzzydate) + { + bool rc= item->get_date(ltime, fuzzydate); + null_value= MY_TEST(rc || item->null_value); + return rc; + } /* This method is used if the item was not null but convertion to TIME/DATE/DATETIME failed. We return a zero date if allowed, @@ -837,6 +869,7 @@ public: of a query with ROLLUP */ bool null_value; /* if item is null */ bool with_sum_func; /* True if item contains a sum func */ + bool with_param; /* True if contains an SP parameter */ bool with_window_func; /* True if item contains a window func */ /** True if any item except Item_sum contains a field. Set during parsing. @@ -949,6 +982,10 @@ public: { return type_handler(); } + virtual const Type_handler *type_handler_for_system_time() const + { + return real_type_handler(); + } /* result_type() of an item specifies how the value should be returned */ Item_result result_type() const { @@ -1647,6 +1684,7 @@ public: set field of temporary table for Item which can be switched on temporary table during query processing (grouping and so on) */ + virtual void set_result_field(Field *field) {} virtual bool is_result_field() { return 0; } virtual bool is_json_type() { return false; } virtual bool is_bool_literal() const { return false; } @@ -1819,7 +1857,7 @@ public: fields. */ virtual bool check_partition_func_processor(void *arg) { return 1;} - virtual bool vcol_in_partition_func_processor(void *arg) { return 0; } + virtual bool post_fix_fields_part_expr_processor(void *arg) { return 0; } virtual bool rename_fields_processor(void *arg) { return 0; } /** Processor used to check acceptability of an item in the defining expression for a virtual column @@ -1936,8 +1974,6 @@ public: const Tmp_field_param *param)= 0; virtual Item_field *field_for_view_update() { return 0; } - virtual bool vers_trx_id() const - { return false; } virtual Item *neg_transformer(THD *thd) { return NULL; } virtual Item *update_value_transformer(THD *thd, uchar *select_arg) { return this; } @@ -2184,7 +2220,7 @@ template inline Item* get_item_copy (THD *thd, T* item) { Item *copy= new (get_thd_memroot(thd)) T(*item); - if (copy) + if (likely(copy)) copy->register_in(thd); return copy; } @@ -2332,7 +2368,7 @@ public: Item_args(THD *thd, Item *a, Item *b, Item *c) { arg_count= 0; - if ((args= (Item**) thd_alloc(thd, sizeof(Item*) * 3))) + if (likely((args= (Item**) thd_alloc(thd, sizeof(Item*) * 3)))) { arg_count= 3; args[0]= a; args[1]= b; args[2]= c; @@ -2341,7 +2377,7 @@ public: Item_args(THD *thd, Item *a, Item *b, Item *c, Item *d) { arg_count= 0; - if ((args= (Item**) thd_alloc(thd, sizeof(Item*) * 4))) + if (likely((args= (Item**) thd_alloc(thd, sizeof(Item*) * 4)))) { arg_count= 4; args[0]= a; args[1]= b; args[2]= c; args[3]= d; @@ -2350,7 +2386,7 @@ public: Item_args(THD *thd, Item *a, Item *b, Item *c, Item *d, Item* e) { arg_count= 5; - if ((args= (Item**) thd_alloc(thd, sizeof(Item*) * 5))) + if (likely((args= (Item**) thd_alloc(thd, sizeof(Item*) * 5)))) { arg_count= 5; args[0]= a; args[1]= b; args[2]= c; args[3]= d; args[4]= e; @@ -2676,7 +2712,7 @@ public: based on result_type(), which is less exact. */ Field *create_field_for_create_select(TABLE *table) - { return tmp_table_field_from_field_type(table); } + { return create_table_field_from_handler(table); } bool is_valid_limit_clause_variable_with_error() const { @@ -2930,6 +2966,7 @@ public: return table map of the temporary table. */ table_map used_tables() const { return 1; } + void set_result_field(Field *field) { result_field= field; } bool is_result_field() { return true; } void save_in_result_field(bool no_conversions) { @@ -3019,7 +3056,7 @@ public: const char *table_name_arg): Item(thd), field(par_field), db_name(db_arg), table_name(table_name_arg) { - Type_std_attributes::set(par_field); + Type_std_attributes::set(par_field->type_std_attributes()); } enum Type type() const { return FIELD_ITEM; } Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src, @@ -3149,10 +3186,6 @@ public: Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src, const Tmp_field_param *param); TYPELIB *get_typelib() const { return field->get_typelib(); } - uint32 field_flags() const - { - return field->flags; - } enum_monotonicity_info get_monotonicity_info() const { return MONOTONIC_STRICT_INCREASING; @@ -3204,7 +3237,7 @@ public: cond_equal_ref); } bool is_result_field() { return false; } - void save_in_result_field(bool no_conversions) { } + void save_in_result_field(bool no_conversions); Item *get_tmp_table_item(THD *thd); bool collect_item_field_processor(void * arg); bool add_field_to_set_processor(void * arg); @@ -3213,7 +3246,7 @@ public: bool register_field_in_write_map(void *arg); bool register_field_in_bitmap(void *arg); bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool vcol_in_partition_func_processor(void *bool_arg); + bool post_fix_fields_part_expr_processor(void *bool_arg); bool check_valid_arguments_processor(void *bool_arg); bool check_field_expression_processor(void *arg); bool enumerate_field_refs_processor(void *arg); @@ -3249,7 +3282,6 @@ public: uint32 max_display_length() const { return field->max_display_length(); } Item_field *field_for_view_update() { return this; } int fix_outer_field(THD *thd, Field **field, Item **reference); - virtual bool vers_trx_id() const; virtual Item *update_value_transformer(THD *thd, uchar *select_arg); Item *derived_field_transformer_for_having(THD *thd, uchar *arg); Item *derived_field_transformer_for_where(THD *thd, uchar *arg); @@ -4444,7 +4476,7 @@ public: { // following assert is redundant, because fixed=1 assigned in constructor DBUG_ASSERT(fixed == 1); - ulonglong value= (ulonglong) Item_hex_hybrid::val_int(); + longlong value= Item_hex_hybrid::val_int(); int2my_decimal(E_DEC_FATAL_ERROR, value, TRUE, decimal_value); return decimal_value; } @@ -4457,6 +4489,10 @@ public: { return &type_handler_longlong; } + const Type_handler *type_handler_for_system_time() const + { + return &type_handler_longlong; + } void print(String *str, enum_query_type query_type); Item *get_copy(THD *thd) { return get_item_copy(thd, this); } @@ -4889,6 +4925,7 @@ public: */ Field *sp_result_field; Item_sp(THD *thd, Name_resolution_context *context_arg, sp_name *name_arg); + Item_sp(THD *thd, Item_sp *item); const char *func_name(THD *thd) const; void cleanup(); bool sp_check_access(THD *thd); @@ -5121,12 +5158,6 @@ public: return 0; return cleanup_processor(arg); } - virtual bool vers_trx_id() const - { - DBUG_ASSERT(ref); - DBUG_ASSERT(*ref); - return (*ref)->vers_trx_id(); - } }; @@ -6635,29 +6666,14 @@ class Item_type_holder: public Item, { protected: TYPELIB *enum_set_typelib; -private: - void init_flags(Item *item) - { - if (item->real_type() == Item::FIELD_ITEM) - { - Item_field *item_field= (Item_field *)item->real_item(); - m_flags|= (item_field->field->flags & - (VERS_SYS_START_FLAG | VERS_SYS_END_FLAG)); - // TODO: additional field flag? - m_vers_trx_id= item_field->field->vers_trx_id(); - } - } public: Item_type_holder(THD *thd, Item *item) :Item(thd, item), Type_handler_hybrid_field_type(item->real_type_handler()), - enum_set_typelib(0), - m_flags(0), - m_vers_trx_id(false) + enum_set_typelib(0) { DBUG_ASSERT(item->fixed); maybe_null= item->maybe_null; - init_flags(item); } Item_type_holder(THD *thd, Item *item, @@ -6667,27 +6683,20 @@ public: :Item(thd), Type_handler_hybrid_field_type(handler), Type_geometry_attributes(handler, attr), - enum_set_typelib(attr->get_typelib()), - m_flags(0), - m_vers_trx_id(false) + enum_set_typelib(attr->get_typelib()) { name= item->name; Type_std_attributes::set(*attr); maybe_null= maybe_null_arg; - init_flags(item); } const Type_handler *type_handler() const { - const Type_handler *handler= m_vers_trx_id ? - &type_handler_vers_trx_id : - Type_handler_hybrid_field_type::type_handler(); - return handler->type_handler_for_item_field(); + return Type_handler_hybrid_field_type::type_handler()-> + type_handler_for_item_field(); } const Type_handler *real_type_handler() const { - if (m_vers_trx_id) - return &type_handler_vers_trx_id; return Type_handler_hybrid_field_type::type_handler(); } @@ -6715,18 +6724,6 @@ public: } Item* get_copy(THD *thd) { return 0; } -private: - uint m_flags; - bool m_vers_trx_id; -public: - uint32 field_flags() const - { - return m_flags; - } - virtual bool vers_trx_id() const - { - return m_vers_trx_id; - } }; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index ba503f18855..76f4788c1cf 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -126,12 +126,9 @@ Type_handler_hybrid_field_type::aggregate_for_comparison(const char *funcname, many cases. */ set_handler(items[0]->type_handler()->type_handler_for_comparison()); - m_vers_trx_id= items[0]->vers_trx_id(); for (uint i= 1 ; i < nitems ; i++) { unsigned_count+= items[i]->unsigned_flag; - if (!m_vers_trx_id) - m_vers_trx_id= items[i]->vers_trx_id(); if (aggregate_for_comparison(items[i]->type_handler()-> type_handler_for_comparison())) { @@ -423,7 +420,8 @@ void Item_func::convert_const_compared_to_int_field(THD *thd) args[field= 1]->real_item()->type() == FIELD_ITEM) { Item_field *field_item= (Item_field*) (args[field]->real_item()); - if (((field_item->field_type() == MYSQL_TYPE_LONGLONG && !field_item->vers_trx_id()) || + if (((field_item->field_type() == MYSQL_TYPE_LONGLONG && + field_item->type_handler() != &type_handler_vers_trx_id) || field_item->field_type() == MYSQL_TYPE_YEAR)) convert_const_to_int(thd, field_item, &args[!field]); } @@ -1294,6 +1292,7 @@ bool Item_in_optimizer::fix_left(THD *thd) } eval_not_null_tables(NULL); with_sum_func= args[0]->with_sum_func; + with_param= args[0]->with_param || args[1]->with_param; with_field= args[0]->with_field; if ((const_item_cache= args[0]->const_item())) { @@ -1342,6 +1341,7 @@ bool Item_in_optimizer::fix_fields(THD *thd, Item **ref) m_with_subquery= true; with_sum_func= with_sum_func || args[1]->with_sum_func; with_field= with_field || args[1]->with_field; + with_param= args[0]->with_param || args[1]->with_param; used_tables_and_const_cache_join(args[1]); fixed= 1; return FALSE; @@ -1891,6 +1891,7 @@ void Item_func_interval::fix_length_and_dec() used_tables_and_const_cache_join(row); not_null_tables_cache= row->not_null_tables(); with_sum_func= with_sum_func || row->with_sum_func; + with_param= with_param || row->with_param; with_field= with_field || row->with_field; } @@ -2831,7 +2832,7 @@ Item_func_nullif::time_op(MYSQL_TIME *ltime) bool Item_func_nullif::is_null() { - return (null_value= (!compare() ? 1 : args[2]->null_value)); + return (null_value= (!compare() ? 1 : args[2]->is_null())); } void Item_func_case::reorder_args(uint start) @@ -4304,7 +4305,7 @@ bool cmp_item_row::prepare_comparators(THD *thd, Item **args, uint arg_count) bool Item_func_in::fix_for_row_comparison_using_bisection(THD *thd) { uint cols= args[0]->cols(); - if (!(array= new (thd->mem_root) in_row(thd, arg_count-1, 0))) + if (unlikely(!(array= new (thd->mem_root) in_row(thd, arg_count-1, 0)))) return true; cmp_item_row *cmp= &((in_row*)array)->tmp; if (cmp->alloc_comparators(thd, cols) || @@ -4315,7 +4316,7 @@ bool Item_func_in::fix_for_row_comparison_using_bisection(THD *thd) Call store_value() to setup others. */ cmp->store_value(args[0]); - if (thd->is_fatal_error) // OOM + if (unlikely(thd->is_fatal_error)) // OOM return true; fix_in_vector(); return false; @@ -4528,6 +4529,7 @@ Item_cond::fix_fields(THD *thd, Item **ref) List_iterator li(list); Item *item; uchar buff[sizeof(char*)]; // Max local vars in function + longlong is_and_cond= functype() == Item_func::COND_AND_FUNC; not_null_tables_cache= 0; used_tables_and_const_cache_init(); @@ -4590,26 +4592,33 @@ Item_cond::fix_fields(THD *thd, Item **ref) (item= *li.ref())->check_cols(1)) return TRUE; /* purecov: inspected */ used_tables_cache|= item->used_tables(); - if (item->const_item()) + if (item->const_item() && !item->with_param && + !item->is_expensive() && !cond_has_datetime_is_null(item)) { - if (!item->is_expensive() && !cond_has_datetime_is_null(item) && - item->val_int() == 0) + if (item->val_int() == is_and_cond && top_level()) { /* - This is "... OR false_cond OR ..." + a. This is "... AND true_cond AND ..." + In this case, true_cond has no effect on cond_and->not_null_tables() + b. This is "... OR false_cond/null cond OR ..." In this case, false_cond has no effect on cond_or->not_null_tables() */ } else { /* - This is "... OR const_cond OR ..." + a. This is "... AND false_cond/null_cond AND ..." + The whole condition is FALSE/UNKNOWN. + b. This is "... OR const_cond OR ..." In this case, cond_or->not_null_tables()=0, because the condition const_cond might evaluate to true (regardless of whether some tables were NULL-complemented). */ + not_null_tables_cache= (table_map) 0; and_tables_cache= (table_map) 0; } + if (thd->is_error()) + return TRUE; } else { @@ -4621,6 +4630,7 @@ Item_cond::fix_fields(THD *thd, Item **ref) } with_sum_func|= item->with_sum_func; + with_param|= item->with_param; with_field|= item->with_field; m_with_subquery|= item->with_subquery(); with_window_func|= item->with_window_func; @@ -4636,30 +4646,36 @@ bool Item_cond::eval_not_null_tables(void *opt_arg) { Item *item; + longlong is_and_cond= functype() == Item_func::COND_AND_FUNC; List_iterator li(list); not_null_tables_cache= (table_map) 0; and_tables_cache= ~(table_map) 0; while ((item=li++)) { table_map tmp_table_map; - if (item->const_item()) + if (item->const_item() && !item->with_param && + !item->is_expensive() && !cond_has_datetime_is_null(item)) { - if (!item->is_expensive() && !cond_has_datetime_is_null(item) && - item->val_int() == 0) + if (item->val_int() == is_and_cond && top_level()) { /* - This is "... OR false_cond OR ..." + a. This is "... AND true_cond AND ..." + In this case, true_cond has no effect on cond_and->not_null_tables() + b. This is "... OR false_cond/null cond OR ..." In this case, false_cond has no effect on cond_or->not_null_tables() */ } else { /* - This is "... OR const_cond OR ..." + a. This is "... AND false_cond/null_cond AND ..." + The whole condition is FALSE/UNKNOWN. + b. This is "... OR const_cond OR ..." In this case, cond_or->not_null_tables()=0, because the condition - some_cond_or might be true regardless of what tables are - NULL-complemented. + const_cond might evaluate to true (regardless of whether some tables + were NULL-complemented). */ + not_null_tables_cache= (table_map) 0; and_tables_cache= (table_map) 0; } } @@ -5439,7 +5455,7 @@ bool Regexp_processor_pcre::compile(String *pattern, bool send_error) m_pcre= pcre_compile(pattern->c_ptr_safe(), m_library_flags, &pcreErrorStr, &pcreErrorOffset, NULL); - if (m_pcre == NULL) + if (unlikely(m_pcre == NULL)) { if (send_error) { @@ -5458,7 +5474,7 @@ bool Regexp_processor_pcre::compile(Item *item, bool send_error) char buff[MAX_FIELD_WIDTH]; String tmp(buff, sizeof(buff), &my_charset_bin); String *pattern= item->val_str(&tmp); - if (item->null_value || compile(pattern, send_error)) + if (unlikely(item->null_value) || (unlikely(compile(pattern, send_error)))) return true; return false; } @@ -5577,7 +5593,7 @@ int Regexp_processor_pcre::pcre_exec_with_warn(const pcre *code, int rc= pcre_exec(code, extra, subject, length, startoffset, options, ovector, ovecsize); DBUG_EXECUTE_IF("pcre_exec_error_123", rc= -123;); - if (rc < PCRE_ERROR_NOMATCH) + if (unlikely(rc < PCRE_ERROR_NOMATCH)) pcre_exec_warn(rc); return rc; } @@ -5857,8 +5873,8 @@ void Item_func_like::turboBM_compute_bad_character_shifts() bool Item_func_like::turboBM_matches(const char* text, int text_len) const { - register int bcShift; - register int turboShift; + int bcShift; + int turboShift; int shift = pattern_len; int j = 0; int u = 0; @@ -5872,7 +5888,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const { while (j <= tlmpl) { - register int i= plm1; + int i= plm1; while (i >= 0 && pattern[i] == text[i + j]) { i--; @@ -5882,7 +5898,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const if (i < 0) return 1; - register const int v = plm1 - i; + const int v= plm1 - i; turboShift = u - v; bcShift = bmBc[(uint) (uchar) text[i + j]] - plm1 + i; shift = MY_MAX(turboShift, bcShift); @@ -5903,7 +5919,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const { while (j <= tlmpl) { - register int i = plm1; + int i= plm1; while (i >= 0 && likeconv(cs,pattern[i]) == likeconv(cs,text[i + j])) { i--; @@ -5913,7 +5929,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const if (i < 0) return 1; - register const int v = plm1 - i; + const int v= plm1 - i; turboShift = u - v; bcShift = bmBc[(uint) likeconv(cs, text[i + j])] - plm1 + i; shift = MY_MAX(turboShift, bcShift); diff --git a/sql/item_create.cc b/sql/item_create.cc index 901dfa06f40..ab91e378be3 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -3313,7 +3313,7 @@ Create_qfunc::create_func(THD *thd, LEX_CSTRING *name, List *item_list) { LEX_CSTRING db; - if (! thd->db.str && ! thd->lex->sphead) + if (unlikely(! thd->db.str && ! thd->lex->sphead)) { /* The proper error message should be in the lines of: @@ -3462,7 +3462,7 @@ Create_sp_func::create_with_db(THD *thd, LEX_CSTRING *db, LEX_CSTRING *name, const Sp_handler *sph= &sp_handler_function; Database_qualified_name pkgname(&null_clex_str, &null_clex_str); - if (has_named_parameters(item_list)) + if (unlikely(has_named_parameters(item_list))) { /* The syntax "db.foo(expr AS p1, expr AS p2, ...) is invalid, @@ -3481,8 +3481,8 @@ Create_sp_func::create_with_db(THD *thd, LEX_CSTRING *db, LEX_CSTRING *name, arg_count= item_list->elements; qname= new (thd->mem_root) sp_name(db, name, use_explicit_name); - if (sph->sp_resolve_package_routine(thd, thd->lex->sphead, - qname, &sph, &pkgname)) + if (unlikely(sph->sp_resolve_package_routine(thd, thd->lex->sphead, + qname, &sph, &pkgname))) return NULL; sph->add_used_routine(lex, thd, qname); if (pkgname.m_name.length) @@ -3502,7 +3502,7 @@ Create_sp_func::create_with_db(THD *thd, LEX_CSTRING *db, LEX_CSTRING *name, Item* Create_native_func::create_func(THD *thd, LEX_CSTRING *name, List *item_list) { - if (has_named_parameters(item_list)) + if (unlikely(has_named_parameters(item_list))) { my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -3520,7 +3520,7 @@ Create_func_arg0::create_func(THD *thd, LEX_CSTRING *name, List *item_list if (item_list != NULL) arg_count= item_list->elements; - if (arg_count != 0) + if (unlikely(arg_count != 0)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -3538,7 +3538,7 @@ Create_func_arg1::create_func(THD *thd, LEX_CSTRING *name, List *item_list if (item_list) arg_count= item_list->elements; - if (arg_count != 1) + if (unlikely(arg_count != 1)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -3546,7 +3546,7 @@ Create_func_arg1::create_func(THD *thd, LEX_CSTRING *name, List *item_list Item *param_1= item_list->pop(); - if (! param_1->is_autogenerated_name) + if (unlikely(! param_1->is_autogenerated_name)) { my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -3564,7 +3564,7 @@ Create_func_arg2::create_func(THD *thd, LEX_CSTRING *name, List *item_list if (item_list) arg_count= item_list->elements; - if (arg_count != 2) + if (unlikely(arg_count != 2)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -3573,8 +3573,8 @@ Create_func_arg2::create_func(THD *thd, LEX_CSTRING *name, List *item_list Item *param_1= item_list->pop(); Item *param_2= item_list->pop(); - if ( (! param_1->is_autogenerated_name) - || (! param_2->is_autogenerated_name)) + if (unlikely(!param_1->is_autogenerated_name || + !param_2->is_autogenerated_name)) { my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -3592,7 +3592,7 @@ Create_func_arg3::create_func(THD *thd, LEX_CSTRING *name, List *item_list if (item_list) arg_count= item_list->elements; - if (arg_count != 3) + if (unlikely(arg_count != 3)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -3602,9 +3602,9 @@ Create_func_arg3::create_func(THD *thd, LEX_CSTRING *name, List *item_list Item *param_2= item_list->pop(); Item *param_3= item_list->pop(); - if ( (! param_1->is_autogenerated_name) - || (! param_2->is_autogenerated_name) - || (! param_3->is_autogenerated_name)) + if (unlikely(!param_1->is_autogenerated_name || + !param_2->is_autogenerated_name || + !param_3->is_autogenerated_name)) { my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -3765,7 +3765,7 @@ Item* Create_func_binlog_gtid_pos::create_2_arg(THD *thd, Item *arg1, Item *arg2) { #ifdef HAVE_REPLICATION - if (!mysql_bin_log.is_open()) + if (unlikely(!mysql_bin_log.is_open())) #endif { my_error(ER_NO_BINARY_LOGGING, MYF(0)); @@ -3903,7 +3903,7 @@ Create_func_concat::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 1) + if (unlikely(arg_count < 1)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -3926,7 +3926,7 @@ Create_func_concat_operator_oracle::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 1) + if (unlikely(arg_count < 1)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -3950,7 +3950,7 @@ Create_func_decode_oracle::create_native(THD *thd, LEX_CSTRING *name, List *item_list) { uint arg_count= item_list ? item_list->elements : 0; - if (arg_count < 3) + if (unlikely(arg_count < 3)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -3970,7 +3970,7 @@ Create_func_concat_ws::create_native(THD *thd, LEX_CSTRING *name, arg_count= item_list->elements; /* "WS" stands for "With Separator": this function takes 2+ arguments */ - if (arg_count < 2) + if (unlikely(arg_count < 2)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -4272,7 +4272,7 @@ Create_func_elt::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 2) + if (unlikely(arg_count < 2)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -4468,7 +4468,7 @@ Create_func_field::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 2) + if (unlikely(arg_count < 2)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -4841,7 +4841,7 @@ Create_func_greatest::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 2) + if (unlikely(arg_count < 2)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -5118,6 +5118,7 @@ Create_func_json_exists Create_func_json_exists::s_singleton; Item* Create_func_json_exists::create_2_arg(THD *thd, Item *arg1, Item *arg2) { + status_var_increment(current_thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_exists(thd, arg1, arg2); } @@ -5134,7 +5135,7 @@ Create_func_json_detailed::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 1 || arg_count > 2 /* json_doc, [path]...*/) + if (unlikely(arg_count < 1 || arg_count > 2 /* json_doc, [path]...*/)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); } @@ -5143,6 +5144,7 @@ Create_func_json_detailed::create_native(THD *thd, LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_format(thd, *item_list); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5152,6 +5154,7 @@ Create_func_json_loose Create_func_json_loose::s_singleton; Item* Create_func_json_loose::create_1_arg(THD *thd, Item *arg1) { + status_var_increment(current_thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_format(thd, arg1, Item_func_json_format::LOOSE); } @@ -5162,6 +5165,7 @@ Create_func_json_compact Create_func_json_compact::s_singleton; Item* Create_func_json_compact::create_1_arg(THD *thd, Item *arg1) { + status_var_increment(current_thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_format(thd, arg1, Item_func_json_format::COMPACT); } @@ -5172,6 +5176,7 @@ Create_func_json_valid Create_func_json_valid::s_singleton; Item* Create_func_json_valid::create_1_arg(THD *thd, Item *arg1) { + status_var_increment(current_thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_valid(thd, arg1); } @@ -5181,6 +5186,7 @@ Create_func_json_type Create_func_json_type::s_singleton; Item* Create_func_json_type::create_1_arg(THD *thd, Item *arg1) { + status_var_increment(current_thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_type(thd, arg1); } @@ -5190,6 +5196,7 @@ Create_func_json_depth Create_func_json_depth::s_singleton; Item* Create_func_json_depth::create_1_arg(THD *thd, Item *arg1) { + status_var_increment(current_thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_depth(thd, arg1); } @@ -5199,6 +5206,7 @@ Create_func_json_value Create_func_json_value::s_singleton; Item* Create_func_json_value::create_2_arg(THD *thd, Item *arg1, Item *arg2) { + status_var_increment(current_thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_value(thd, arg1, arg2); } @@ -5208,6 +5216,7 @@ Create_func_json_query Create_func_json_query::s_singleton; Item* Create_func_json_query::create_2_arg(THD *thd, Item *arg1, Item *arg2) { + status_var_increment(current_thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_query(thd, arg1, arg2); } @@ -5217,6 +5226,7 @@ Create_func_json_quote Create_func_json_quote::s_singleton; Item* Create_func_json_quote::create_1_arg(THD *thd, Item *arg1) { + status_var_increment(current_thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_quote(thd, arg1); } @@ -5226,6 +5236,7 @@ Create_func_json_unquote Create_func_json_unquote::s_singleton; Item* Create_func_json_unquote::create_1_arg(THD *thd, Item *arg1) { + status_var_increment(current_thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_unquote(thd, arg1); } @@ -5256,6 +5267,7 @@ Create_func_json_array::create_native(THD *thd, LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_array(thd); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5272,7 +5284,7 @@ Create_func_json_array_append::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/) + if (unlikely(arg_count < 3 || (arg_count & 1) == 0 /*is even*/)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); } @@ -5281,6 +5293,7 @@ Create_func_json_array_append::create_native(THD *thd, LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_array_append(thd, *item_list); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5297,7 +5310,7 @@ Create_func_json_array_insert::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/) + if (unlikely(arg_count < 3 || (arg_count & 1) == 0 /*is even*/)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); } @@ -5306,6 +5319,7 @@ Create_func_json_array_insert::create_native(THD *thd, LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_array_insert(thd, *item_list); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5322,7 +5336,7 @@ Create_func_json_insert::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/) + if (unlikely(arg_count < 3 || (arg_count & 1) == 0 /*is even*/)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); } @@ -5332,6 +5346,7 @@ Create_func_json_insert::create_native(THD *thd, LEX_CSTRING *name, thd, *item_list); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5348,7 +5363,7 @@ Create_func_json_set::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/) + if (unlikely(arg_count < 3 || (arg_count & 1) == 0 /*is even*/)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); } @@ -5358,6 +5373,7 @@ Create_func_json_set::create_native(THD *thd, LEX_CSTRING *name, thd, *item_list); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5374,7 +5390,7 @@ Create_func_json_replace::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/) + if (unlikely(arg_count < 3 || (arg_count & 1) == 0 /*is even*/)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); } @@ -5384,6 +5400,7 @@ Create_func_json_replace::create_native(THD *thd, LEX_CSTRING *name, thd, *item_list); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5400,7 +5417,7 @@ Create_func_json_remove::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 2 /*json_doc, path [,path]*/) + if (unlikely(arg_count < 2 /*json_doc, path [,path]*/)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); } @@ -5409,6 +5426,7 @@ Create_func_json_remove::create_native(THD *thd, LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_remove(thd, *item_list); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5425,7 +5443,7 @@ Create_func_json_object::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) { arg_count= item_list->elements; - if ((arg_count & 1) != 0 /*is odd*/) + if (unlikely((arg_count & 1) != 0 /*is odd*/)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); func= NULL; @@ -5441,6 +5459,7 @@ Create_func_json_object::create_native(THD *thd, LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_object(thd); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5454,8 +5473,8 @@ Create_func_json_length::create_native(THD *thd, LEX_CSTRING *name, Item *func; int arg_count; - if (item_list == NULL || - (arg_count= item_list->elements) == 0) + if (unlikely(item_list == NULL || + (arg_count= item_list->elements) == 0)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); func= NULL; @@ -5465,6 +5484,7 @@ Create_func_json_length::create_native(THD *thd, LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_length(thd, *item_list); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5478,8 +5498,8 @@ Create_func_json_merge::create_native(THD *thd, LEX_CSTRING *name, Item *func; int arg_count; - if (item_list == NULL || - (arg_count= item_list->elements) < 2) // json, json + if (unlikely(item_list == NULL || + (arg_count= item_list->elements) < 2)) // json, json { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); func= NULL; @@ -5489,6 +5509,7 @@ Create_func_json_merge::create_native(THD *thd, LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_merge(thd, *item_list); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5505,7 +5526,7 @@ Create_func_json_contains::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count == 2 || arg_count == 3/* json_doc, val, [path] */) + if (unlikely(arg_count == 2 || arg_count == 3/* json_doc, val, [path] */)) { func= new (thd->mem_root) Item_func_json_contains(thd, *item_list); } @@ -5514,6 +5535,7 @@ Create_func_json_contains::create_native(THD *thd, LEX_CSTRING *name, my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5530,7 +5552,7 @@ Create_func_json_keys::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 1 || arg_count > 2 /* json_doc, [path]...*/) + if (unlikely(arg_count < 1 || arg_count > 2 /* json_doc, [path]...*/)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); } @@ -5539,6 +5561,7 @@ Create_func_json_keys::create_native(THD *thd, LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_keys(thd, *item_list); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5555,7 +5578,7 @@ Create_func_json_contains_path::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 3 /* json_doc, one_or_all, path, [path]...*/) + if (unlikely(arg_count < 3 /* json_doc, one_or_all, path, [path]...*/)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); } @@ -5564,6 +5587,7 @@ Create_func_json_contains_path::create_native(THD *thd, LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_contains_path(thd, *item_list); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5580,7 +5604,7 @@ Create_func_json_extract::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 2 /* json_doc, path, [path]...*/) + if (unlikely(arg_count < 2 /* json_doc, path, [path]...*/)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); } @@ -5589,6 +5613,7 @@ Create_func_json_extract::create_native(THD *thd, LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_extract(thd, *item_list); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5605,7 +5630,7 @@ Create_func_json_search::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 3 /* json_doc, one_or_all, search_str, [escape_char[, path]...*/) + if (unlikely(arg_count < 3 /* json_doc, one_or_all, search_str, [escape_char[, path]...*/)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); } @@ -5614,6 +5639,7 @@ Create_func_json_search::create_native(THD *thd, LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_search(thd, *item_list); } + status_var_increment(current_thd->status_var.feature_json); return func; } @@ -5675,7 +5701,7 @@ Create_func_least::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 2) + if (unlikely(arg_count < 2)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -5953,7 +5979,7 @@ Create_func_make_set::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 2) + if (unlikely(arg_count < 2)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -5978,7 +6004,7 @@ Create_func_master_pos_wait::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 2 || arg_count > 4) + if (unlikely(arg_count < 2 || arg_count > 4)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return func; @@ -6028,7 +6054,7 @@ Create_func_master_gtid_wait::create_native(THD *thd, LEX_CSTRING *name, if (item_list != NULL) arg_count= item_list->elements; - if (arg_count < 1 || arg_count > 2) + if (unlikely(arg_count < 1 || arg_count > 2)) { my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); return func; @@ -7461,7 +7487,7 @@ Item *create_temporal_literal(THD *thd, DBUG_ASSERT(0); } - if (item) + if (likely(item)) { if (status.warnings) // e.g. a note on nanosecond truncation { @@ -7570,7 +7596,7 @@ Item *create_func_dyncol_get(THD *thd, Item *str, Item *num, { Item *res; - if (!(res= new (thd->mem_root) Item_dyncol_get(thd, str, num))) + if (likely(!(res= new (thd->mem_root) Item_dyncol_get(thd, str, num)))) return res; // Return NULL return handler->create_typecast_item(thd, res, Type_cast_attributes(c_len, c_dec, cs)); diff --git a/sql/item_func.cc b/sql/item_func.cc index e2740272385..c291e897957 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -62,9 +62,9 @@ bool check_reserved_words(const LEX_CSTRING *name) { - if (!my_strcasecmp(system_charset_info, name->str, "GLOBAL") || - !my_strcasecmp(system_charset_info, name->str, "LOCAL") || - !my_strcasecmp(system_charset_info, name->str, "SESSION")) + if (lex_string_eq(name, STRING_WITH_LEN("GLOBAL")) || + lex_string_eq(name, STRING_WITH_LEN("LOCAL")) || + lex_string_eq(name, STRING_WITH_LEN("SESSION"))) return TRUE; return FALSE; } @@ -148,6 +148,7 @@ void Item_func::sync_with_sum_func_and_with_field(List &list) with_sum_func|= item->with_sum_func; with_window_func|= item->with_window_func; with_field|= item->with_field; + with_param|= item->with_param; } } @@ -367,6 +368,7 @@ Item_func::fix_fields(THD *thd, Item **ref) maybe_null=1; with_sum_func= with_sum_func || item->with_sum_func; + with_param= with_param || item->with_param; with_window_func= with_window_func || item->with_window_func; with_field= with_field || item->with_field; used_tables_and_const_cache_join(item); @@ -376,7 +378,7 @@ Item_func::fix_fields(THD *thd, Item **ref) if (check_arguments()) return true; fix_length_and_dec(); - if (thd->is_error()) // An error inside fix_length_and_dec occurred + if (unlikely(thd->is_error())) // An error inside fix_length_and_dec occurred return TRUE; fixed= 1; return FALSE; @@ -1149,7 +1151,8 @@ double Item_double_typecast::val_real() if ((null_value= args[0]->null_value)) return 0.0; - if ((error= truncate_double(&tmp, max_length, decimals, 0, DBL_MAX))) + if (unlikely((error= truncate_double(&tmp, max_length, decimals, 0, + DBL_MAX)))) { THD *thd= current_thd; push_warning_printf(thd, @@ -1935,6 +1938,9 @@ void Item_func_neg::fix_length_and_dec_double() set_handler(&type_handler_double); decimals= args[0]->decimals; // Preserve NOT_FIXED_DEC max_length= args[0]->max_length + 1; + // Limit length with something reasonable + uint32 mlen= type_handler()->max_display_length(this); + set_if_smaller(max_length, mlen); unsigned_flag= false; } @@ -2503,12 +2509,12 @@ double my_double_round(double value, longlong dec, bool dec_unsigned, volatile double value_div_tmp= value / tmp; volatile double value_mul_tmp= value * tmp; - if (!dec_negative && my_isinf(tmp)) // "dec" is too large positive number + if (!dec_negative && std::isinf(tmp)) // "dec" is too large positive number return value; - if (dec_negative && my_isinf(tmp)) + if (dec_negative && std::isinf(tmp)) tmp2= 0.0; - else if (!dec_negative && my_isinf(value_mul_tmp)) + else if (!dec_negative && std::isinf(value_mul_tmp)) tmp2= value; else if (truncate) { @@ -2738,7 +2744,7 @@ bool Item_func_min_max::get_date_native(MYSQL_TIME *ltime, ulonglong fuzzy_date) longlong res= args[i]->val_datetime_packed(); /* Check if we need to stop (because of error or KILL) and stop the loop */ - if (args[i]->null_value) + if (unlikely(args[i]->null_value)) return (null_value= 1); if (i == 0 || (res < min_max ? cmp_sign : -cmp_sign) > 0) @@ -2747,7 +2753,7 @@ bool Item_func_min_max::get_date_native(MYSQL_TIME *ltime, ulonglong fuzzy_date) unpack_time(min_max, ltime, mysql_timestamp_type()); if (!(fuzzy_date & TIME_TIME_ONLY) && - ((null_value= check_date_with_warn(ltime, fuzzy_date, + unlikely((null_value= check_date_with_warn(ltime, fuzzy_date, MYSQL_TIMESTAMP_ERROR)))) return true; @@ -3074,8 +3080,8 @@ longlong Item_func_ord::val_int() #ifdef USE_MB if (use_mb(res->charset())) { - register const char *str=res->ptr(); - register uint32 n=0, l=my_ismbchar(res->charset(),str,str+res->length()); + const char *str=res->ptr(); + uint32 n=0, l=my_ismbchar(res->charset(),str,str+res->length()); if (!l) return (longlong)((uchar) *str); while (l--) @@ -3297,6 +3303,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func, func->maybe_null=1; func->with_sum_func= func->with_sum_func || item->with_sum_func; func->with_field= func->with_field || item->with_field; + func->with_param= func->with_param || item->with_param; func->With_subquery_cache::join(item); func->used_tables_and_const_cache_join(item); f_args.arg_type[i]=item->result_type(); @@ -3376,7 +3383,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func, } } Udf_func_init init= u_d->func_init; - if ((error=(uchar) init(&initid, &f_args, init_msg_buff))) + if (unlikely((error=(uchar) init(&initid, &f_args, init_msg_buff)))) { my_error(ER_CANT_INITIALIZE_UDF, MYF(0), u_d->name.str, init_msg_buff); @@ -3394,7 +3401,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func, func->decimals=MY_MIN(initid.decimals,NOT_FIXED_DEC); } initialized=1; - if (error) + if (unlikely(error)) { my_error(ER_CANT_INITIALIZE_UDF, MYF(0), u_d->name.str, ER_THD(thd, ER_UNKNOWN_ERROR)); @@ -3406,7 +3413,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func, bool udf_handler::get_arguments() { - if (error) + if (unlikely(error)) return 1; // Got an error earlier char *to= num_buffer; uint str_count=0; @@ -3481,7 +3488,7 @@ String *udf_handler::val_str(String *str,String *save_str) char *res=func(&initid, &f_args, (char*) str->ptr(), &res_length, &is_null_tmp, &error); DBUG_PRINT("info", ("udf func returned, res_length: %lu", res_length)); - if (is_null_tmp || !res || error) // The !res is for safety + if (is_null_tmp || !res || unlikely(error)) // The !res is for safety { DBUG_PRINT("info", ("Null or error")); DBUG_RETURN(0); @@ -3517,7 +3524,7 @@ my_decimal *udf_handler::val_decimal(my_bool *null_value, my_decimal *dec_buf) u_d->func; char *res= func(&initid, &f_args, buf, &res_length, &is_null, &error); - if (is_null || error) + if (is_null || unlikely(error)) { *null_value= 1; return 0; @@ -4078,7 +4085,7 @@ longlong Item_func_get_lock::val_int() thd->push_internal_handler(&lock_wait_timeout_handler); bool error= thd->mdl_context.acquire_lock(&ull_request, timeout); (void) thd->pop_internal_handler(); - if (error) + if (unlikely(error)) { if (lock_wait_timeout_handler.m_lock_wait_timeout) null_value= 0; @@ -5351,13 +5358,14 @@ get_var_with_binlog(THD *thd, enum_sql_command sql_command, new (thd->mem_root) Item_null(thd))), thd->mem_root); /* Create the variable if the above allocations succeeded */ - if (thd->is_fatal_error || sql_set_variables(thd, &tmp_var_list, false)) + if (unlikely(thd->is_fatal_error) || + unlikely(sql_set_variables(thd, &tmp_var_list, false))) { thd->lex= sav_lex; goto err; } thd->lex= sav_lex; - if (!(var_entry= get_variable(&thd->user_vars, name, 0))) + if (unlikely(!(var_entry= get_variable(&thd->user_vars, name, 0)))) goto err; } else if (var_entry->used_query_id == thd->query_id || @@ -5386,8 +5394,8 @@ get_var_with_binlog(THD *thd, enum_sql_command sql_command, destroyed. */ size= ALIGN_SIZE(sizeof(BINLOG_USER_VAR_EVENT)) + var_entry->length; - if (!(user_var_event= (BINLOG_USER_VAR_EVENT *) - alloc_root(thd->user_var_events_alloc, size))) + if (unlikely(!(user_var_event= (BINLOG_USER_VAR_EVENT *) + alloc_root(thd->user_var_events_alloc, size)))) goto err; user_var_event->value= (char*) user_var_event + @@ -5436,7 +5444,7 @@ void Item_func_get_user_var::fix_length_and_dec() 'm_var_entry' is NULL only if there occurred an error during the call to get_var_with_binlog. */ - if (!error && m_var_entry) + if (likely(!error && m_var_entry)) { unsigned_flag= m_var_entry->unsigned_flag; max_length= (uint32)m_var_entry->length; @@ -5927,8 +5935,8 @@ bool Item_func_match::init_search(THD *thd, bool no_order) for (uint i= 1; i < arg_count; i++) fields.push_back(args[i]); concat_ws= new (thd->mem_root) Item_func_concat_ws(thd, fields); - if (thd->is_fatal_error) - DBUG_RETURN(1); // OOM + if (unlikely(thd->is_fatal_error)) + DBUG_RETURN(1); // OOM in new or push_back /* Above function used only to get value and do not need fix_fields for it: Item_string - basic constant @@ -6803,7 +6811,7 @@ longlong Item_func_nextval::val_int() entry->value= value; entry->set_version(table); - if (error) // Warning already printed + if (unlikely(error)) // Warning already printed entry->null_value= null_value= 1; // For not strict mode DBUG_RETURN(value); } @@ -6915,7 +6923,7 @@ longlong Item_func_setval::val_int() DBUG_ASSERT(table && table->s->sequence); thd= table->in_use; - if (thd->count_cuted_fields == CHECK_FIELD_EXPRESSION) + if (unlikely(thd->count_cuted_fields == CHECK_FIELD_EXPRESSION)) { /* Alter table checking if function works */ null_value= 0; @@ -6924,7 +6932,7 @@ longlong Item_func_setval::val_int() value= nextval; error= table->s->sequence->set_value(table, nextval, round, is_used); - if (error) + if (unlikely(error)) { null_value= 1; value= 0; diff --git a/sql/item_func.h b/sql/item_func.h index 59fc49ead39..a6fcf8f4870 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -32,6 +32,7 @@ extern "C" /* Bug in BSDI include file */ #include "sql_udf.h" // udf_handler #include "my_decimal.h" // string2my_decimal +#include class Item_func :public Item_func_or_sum @@ -70,7 +71,7 @@ public: SP_CONTAINS_FUNC,SP_OVERLAPS_FUNC, SP_STARTPOINT,SP_ENDPOINT,SP_EXTERIORRING, SP_POINTN,SP_GEOMETRYN,SP_INTERIORRINGN, SP_RELATE_FUNC, - NOT_FUNC, NOT_ALL_FUNC, + NOT_FUNC, NOT_ALL_FUNC, TEMPTABLE_ROWID, NOW_FUNC, NOW_UTC_FUNC, SYSDATE_FUNC, TRIG_COND_FUNC, SUSERVAR_FUNC, GUSERVAR_FUNC, COLLATE_FUNC, EXTRACT_FUNC, CHAR_TYPECAST_FUNC, FUNC_SP, UDF_FUNC, @@ -82,16 +83,19 @@ public: { with_sum_func= 0; with_field= 0; + with_param= 0; } Item_func(THD *thd, Item *a): Item_func_or_sum(thd, a) { with_sum_func= a->with_sum_func; + with_param= a->with_param; with_field= a->with_field; } Item_func(THD *thd, Item *a, Item *b): Item_func_or_sum(thd, a, b) { with_sum_func= a->with_sum_func || b->with_sum_func; + with_param= a->with_param || b->with_param; with_field= a->with_field || b->with_field; } Item_func(THD *thd, Item *a, Item *b, Item *c): @@ -99,6 +103,7 @@ public: { with_sum_func= a->with_sum_func || b->with_sum_func || c->with_sum_func; with_field= a->with_field || b->with_field || c->with_field; + with_param= a->with_param || b->with_param || c->with_param; } Item_func(THD *thd, Item *a, Item *b, Item *c, Item *d): Item_func_or_sum(thd, a, b, c, d) @@ -107,6 +112,8 @@ public: c->with_sum_func || d->with_sum_func; with_field= a->with_field || b->with_field || c->with_field || d->with_field; + with_param= a->with_param || b->with_param || + c->with_param || d->with_param; } Item_func(THD *thd, Item *a, Item *b, Item *c, Item *d, Item* e): Item_func_or_sum(thd, a, b, c, d, e) @@ -115,6 +122,8 @@ public: c->with_sum_func || d->with_sum_func || e->with_sum_func; with_field= a->with_field || b->with_field || c->with_field || d->with_field || e->with_field; + with_param= a->with_param || b->with_param || + c->with_param || d->with_param || e->with_param; } Item_func(THD *thd, List &list): Item_func_or_sum(thd, list) @@ -229,7 +238,7 @@ public: */ inline double check_float_overflow(double value) { - return isfinite(value) ? value : raise_float_overflow(); + return std::isfinite(value) ? value : raise_float_overflow(); } /** Throw an error if the input BIGINT value represented by the @@ -1612,14 +1621,13 @@ public: { name= a->name; } - double val_real() { return args[0]->val_real(); } - longlong val_int() { return args[0]->val_int(); } - String *val_str(String *str) { return args[0]->val_str(str); } - my_decimal *val_decimal(my_decimal *dec) { return args[0]->val_decimal(dec); } + double val_real() { return val_real_from_item(args[0]); } + longlong val_int() { return val_int_from_item(args[0]); } + String *val_str(String *str) { return val_str_from_item(args[0], str); } + my_decimal *val_decimal(my_decimal *dec) + { return val_decimal_from_item(args[0], dec); } bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate) - { - return args[0]->get_date(ltime, fuzzydate); - } + { return get_date_from_item(args[0], ltime, fuzzydate); } const char *func_name() const { return "rollup_const"; } bool const_item() const { return 0; } const Type_handler *type_handler() const { return args[0]->type_handler(); } @@ -1627,9 +1635,7 @@ public: { collation= args[0]->collation; max_length= args[0]->max_length; - decimals=args[0]->decimals; - /* The item could be a NULL constant. */ - null_value= args[0]->is_null(); + decimals=args[0]->decimals; } Item *get_copy(THD *thd) { return get_item_copy(thd, this); } @@ -2851,7 +2857,7 @@ public: { return result_type() != STRING_RESULT ? sp_result_field : - tmp_table_field_from_field_type(table); + create_table_field_from_handler(table); } void make_send_field(THD *thd, Send_field *tmp_field); @@ -3014,6 +3020,7 @@ public: Item_func_uuid_short(THD *thd): Item_longlong_func(thd) {} const char *func_name() const { return "uuid_short"; } longlong val_int(); + bool const_item() const { return false; } void fix_length_and_dec() { max_length= 21; unsigned_flag=1; } table_map used_tables() const { return RAND_TABLE_BIT; } diff --git a/sql/item_inetfunc.cc b/sql/item_inetfunc.cc index d4788a39d5e..8a3345ecc81 100644 --- a/sql/item_inetfunc.cc +++ b/sql/item_inetfunc.cc @@ -149,13 +149,14 @@ longlong Item_func_inet_bool_base::val_int() { DBUG_ASSERT(fixed); - if (args[0]->result_type() != STRING_RESULT) // String argument expected + // String argument expected + if (unlikely(args[0]->result_type() != STRING_RESULT)) return 0; String buffer; String *arg_str= args[0]->val_str(&buffer); - if (!arg_str) // Out-of memory happened. The error has been reported. + if (unlikely(!arg_str)) // Out-of memory happened. error has been reported. return 0; // Or: the underlying field is NULL return calc_value(arg_str) ? 1 : 0; @@ -175,7 +176,8 @@ String *Item_func_inet_str_base::val_str_ascii(String *buffer) { DBUG_ASSERT(fixed); - if (args[0]->result_type() != STRING_RESULT) // String argument expected + // String argument expected + if (unlikely(args[0]->result_type() != STRING_RESULT)) { null_value= true; return NULL; @@ -183,15 +185,17 @@ String *Item_func_inet_str_base::val_str_ascii(String *buffer) StringBuffer tmp; String *arg_str= args[0]->val_str(&tmp); - if (!arg_str) // Out-of memory happened. The error has been reported. - { // Or: the underlying field is NULL + if (unlikely(!arg_str)) + { + // Out-of memory happened. error has been reported. + // Or: the underlying field is NULL null_value= true; return NULL; } null_value= !calc_value(arg_str, buffer); - return null_value ? NULL : buffer; + return unlikely(null_value) ? NULL : buffer; } /////////////////////////////////////////////////////////////////////////// diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc index be1d84982de..a45baec8e92 100644 --- a/sql/item_jsonfunc.cc +++ b/sql/item_jsonfunc.cc @@ -173,7 +173,7 @@ static int json_nice(json_engine_t *je, String *nice_js, key_end= je->s.c_str; } while (json_read_keyname_chr(je) == 0); - if (je->s.error) + if (unlikely(je->s.error)) goto error; if (!first_value) @@ -492,7 +492,7 @@ continue_search: if (json_read_value(&je)) goto err_return; - if (check_and_get_value(&je, str, &error)) + if (unlikely(check_and_get_value(&je, str, &error))) { if (error) goto err_return; @@ -623,7 +623,7 @@ String *Item_func_json_unquote::val_str(String *str) if (!(js= read_json(&je))) return NULL; - if (je.s.error || je.value_type != JSON_VALUE_STRING) + if (unlikely(je.s.error) || je.value_type != JSON_VALUE_STRING) return js; str->length(0); @@ -835,7 +835,7 @@ String *Item_func_json_extract::read_json(String *str, } } - if (je.s.error) + if (unlikely(je.s.error)) goto error; if (!not_first_value) @@ -994,7 +994,7 @@ static int check_contains(json_engine_t *js, json_engine_t *value) k_end= value->s.c_str; } while (json_read_keyname_chr(value) == 0); - if (value->s.error || json_read_value(value)) + if (unlikely(value->s.error) || json_read_value(value)) return FALSE; if (set_js) @@ -1037,7 +1037,7 @@ static int check_contains(json_engine_t *js, json_engine_t *value) return FALSE; return TRUE; } - if (value->s.error || js->s.error || + if (unlikely(value->s.error) || unlikely(js->s.error) || (!v_scalar && json_skip_to_level(js, c_level))) return FALSE; } @@ -1165,7 +1165,7 @@ longlong Item_func_json_contains::val_int() goto error; result= check_contains(&je, &ve); - if (je.s.error || ve.s.error) + if (unlikely(je.s.error || ve.s.error)) goto error; return result; @@ -1385,7 +1385,7 @@ longlong Item_func_json_contains_path::val_int() } } - if (je.s.error == 0) + if (likely(je.s.error == 0)) return result; report_json_error(js, &je, 0); @@ -1749,7 +1749,7 @@ String *Item_func_json_array_insert::val_str(String *str) goto js_error; } - if (je.s.error) + if (unlikely(je.s.error)) goto js_error; str->length(0); @@ -1881,7 +1881,7 @@ static int do_merge(String *str, json_engine_t *je1, json_engine_t *je2) key_end= je1->s.c_str; } while (json_read_keyname_chr(je1) == 0); - if (je1->s.error) + if (unlikely(je1->s.error)) return 1; if (first_key) @@ -1916,7 +1916,7 @@ static int do_merge(String *str, json_engine_t *je1, json_engine_t *je2) return ires; goto merged_j1; } - if (je2->s.error) + if (unlikely(je2->s.error)) return 2; key_start= je1->s.c_str; @@ -1946,7 +1946,7 @@ merged_j1: key_end= je2->s.c_str; } while (json_read_keyname_chr(je2) == 0); - if (je2->s.error) + if (unlikely(je2->s.error)) return 1; *je1= sav_je1; @@ -1957,7 +1957,7 @@ merged_j1: json_string_set_str(&key_name, key_start, key_end); if (!json_key_matches(je1, &key_name)) { - if (je1->s.error || json_skip_key(je1)) + if (unlikely(je1->s.error || json_skip_key(je1))) return 2; continue; } @@ -1967,7 +1967,7 @@ merged_j1: goto continue_j2; } - if (je1->s.error) + if (unlikely(je1->s.error)) return 2; if (first_key) @@ -2008,7 +2008,7 @@ continue_j2: empty_array= 0; } - if (je1->s.error) + if (unlikely(je1->s.error)) return 1; end1= je1->s.c_str - je1->sav_c_len; @@ -2206,7 +2206,7 @@ longlong Item_func_json_length::val_int() while (json_scan_next(&je) == 0) {} } - if (!je.s.error) + if (likely(!je.s.error)) return length; err_return: @@ -2260,7 +2260,7 @@ longlong Item_func_json_depth::val_int() } } while (json_scan_next(&je) == 0); - if (!je.s.error) + if (likely(!je.s.error)) return depth; report_json_error(js, &je, 0); @@ -2475,7 +2475,7 @@ String *Item_func_json_insert::val_str(String *str) } } - if (je.s.error) + if (unlikely(je.s.error)) goto js_error; if (!mode_insert) @@ -2513,7 +2513,7 @@ String *Item_func_json_insert::val_str(String *str) } } - if (je.s.error) + if (unlikely(je.s.error)) goto js_error; if (!mode_insert) @@ -2615,7 +2615,7 @@ String *Item_func_json_remove::val_str(String *str) { uint array_counters[JSON_DEPTH_LIMIT]; json_path_with_flags *c_path= paths + n_path; - const char *rem_start, *rem_end; + const char *rem_start= 0, *rem_end; const json_path_step_t *lp; uint n_item= 0; @@ -2686,7 +2686,7 @@ String *Item_func_json_remove::val_str(String *str) } } - if (je.s.error) + if (unlikely(je.s.error)) goto js_error; continue; @@ -2718,7 +2718,7 @@ String *Item_func_json_remove::val_str(String *str) } } - if (je.s.error) + if (unlikely(je.s.error)) goto js_error; continue; @@ -2883,7 +2883,7 @@ skip_search: { key_end= je.s.c_str; } while (json_read_keyname_chr(&je) == 0); - if (je.s.error) + if (unlikely(je.s.error)) goto err_return; key_len= (int)(key_end - key_start); @@ -2907,7 +2907,7 @@ skip_search: } } - if (je.s.error || str->append("]", 1)) + if (unlikely(je.s.error || str->append("]", 1))) goto err_return; null_value= 0; @@ -3091,7 +3091,7 @@ String *Item_func_json_search::val_str(String *str) } } - if (je.s.error) + if (unlikely(je.s.error)) goto js_error; end: diff --git a/sql/item_row.cc b/sql/item_row.cc index 64794093bec..74ea9ecc0c2 100644 --- a/sql/item_row.cc +++ b/sql/item_row.cc @@ -65,6 +65,7 @@ bool Item_row::fix_fields(THD *thd, Item **ref) with_window_func = with_window_func || item->with_window_func; with_field= with_field || item->with_field; m_with_subquery|= item->with_subquery(); + with_param|= item->with_param; } fixed= 1; return FALSE; @@ -178,4 +179,3 @@ Item* Item_row::build_clone(THD *thd) } return copy; } - diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 85e578f4595..6af49d494d4 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1078,7 +1078,7 @@ String *Item_func_reverse::val_str(String *str) #ifdef USE_MB if (use_mb(res->charset())) { - register uint32 l; + uint32 l; while (ptr < end) { if ((l= my_ismbchar(res->charset(),ptr,end))) @@ -1128,7 +1128,7 @@ String *Item_func_replace::val_str_internal(String *str, bool alloced=0; #ifdef USE_MB const char *ptr,*end,*strend,*search,*search_end; - register uint32 l; + uint32 l; bool binary_cmp; #endif THD *thd= 0; @@ -1188,7 +1188,7 @@ redo: { if (*ptr == *search) { - register char *i,*j; + char *i,*j; i=(char*) ptr+1; j=(char*) search+1; while (j != search_end) if (*i++ != *j++) goto skip; @@ -1799,14 +1799,14 @@ String *Item_func_substr_index::val_str(String *str) const char *search= delimiter->ptr(); const char *search_end= search+delimiter_length; int32 n=0,c=count,pass; - register uint32 l; + uint32 l; for (pass=(count>0);pass<2;++pass) { while (ptr < end) { if (*ptr == *search) { - register char *i,*j; + char *i,*j; i=(char*) ptr+1; j=(char*) search+1; while (j != search_end) if (*i++ != *j++) goto skip; @@ -1974,7 +1974,7 @@ String *Item_func_rtrim::val_str(String *str) end= ptr+res->length(); #ifdef USE_MB char *p=ptr; - register uint32 l; + uint32 l; #endif if (remove_length == 1) { @@ -2059,7 +2059,7 @@ String *Item_func_trim::val_str(String *str) if (use_mb(collation.collation)) { char *p=ptr; - register uint32 l; + uint32 l; loop: while (ptr + remove_length < end) { @@ -2641,7 +2641,7 @@ String *Item_func_format::val_str_ascii(String *str) return 0; /* purecov: inspected */ nr= my_double_round(nr, (longlong) dec, FALSE, FALSE); str->set_real(nr, dec, &my_charset_numeric); - if (!isfinite(nr)) + if (!std::isfinite(nr)) return str; str_length=str->length(); } @@ -4918,7 +4918,7 @@ longlong Item_dyncol_get::val_int() char *end= val.x.string.value.str + val.x.string.value.length, *org_end= end; num= my_strtoll10(val.x.string.value.str, &end, &error); - if (end != org_end || error > 0) + if (unlikely(end != org_end || error > 0)) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_BAD_DATA, @@ -5204,3 +5204,23 @@ null: my_free(names); return NULL; } + +Item_temptable_rowid::Item_temptable_rowid(TABLE *table_arg) + : Item_str_func(table_arg->in_use), table(table_arg) +{ + max_length= table->file->ref_length; +} + +void Item_temptable_rowid::fix_length_and_dec() +{ + used_tables_cache= table->map; + const_item_cache= false; +} + +String *Item_temptable_rowid::val_str(String *str) +{ + if (!((null_value= table->null_row))) + table->file->position(table->record[0]); + str_value.set((char*)(table->file->ref), max_length, &my_charset_bin); + return &str_value; +} diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index cff9fdee072..eb084c3f58d 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -1630,6 +1630,7 @@ public: DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII); fix_char_length(MY_UUID_STRING_LENGTH); } + bool const_item() const { return false; } table_map used_tables() const { return RAND_TABLE_BIT; } const char *func_name() const{ return "uuid"; } String *val_str(String *); @@ -1750,5 +1751,25 @@ public: { return get_item_copy(thd, this); } }; -#endif /* ITEM_STRFUNC_INCLUDED */ +/* + this is used by JOIN_TAB::keep_current_rowid + and stores handler::position(). + It has nothing to do with _rowid pseudo-column, that the parser supports. +*/ +class Item_temptable_rowid :public Item_str_func +{ +public: + TABLE *table; + Item_temptable_rowid(TABLE *table_arg); + const Type_handler *type_handler() const { return &type_handler_string; } + Field *create_tmp_field(bool group, TABLE *table) + { return create_table_field_from_handler(table); } + String *val_str(String *str); + enum Functype functype() const { return TEMPTABLE_ROWID; } + const char *func_name() const { return ""; } + void fix_length_and_dec(); + Item *get_copy(THD *thd) + { return get_item_copy(thd, this); } +}; +#endif /* ITEM_STRFUNC_INCLUDED */ diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 6378e9b76bf..143894ad949 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -156,6 +156,7 @@ void Item_subselect::cleanup() reset(); filesort_buffer.free_sort_buffer(); my_free(sortbuffer.str); + sortbuffer.str= 0; value_assigned= 0; expr_cache= 0; @@ -710,7 +711,7 @@ bool Item_subselect::exec() Do not execute subselect in case of a fatal error or if the query has been killed. */ - if (thd->is_error() || thd->killed) + if (unlikely(thd->is_error() || thd->killed)) DBUG_RETURN(true); DBUG_ASSERT(!thd->lex->context_analysis_only); @@ -1417,14 +1418,14 @@ void Item_exists_subselect::print(String *str, enum_query_type query_type) bool Item_in_subselect::test_limit(st_select_lex_unit *unit_arg) { - if (unit_arg->fake_select_lex && - unit_arg->fake_select_lex->test_limit()) + if (unlikely(unit_arg->fake_select_lex && + unit_arg->fake_select_lex->test_limit())) return(1); SELECT_LEX *sl= unit_arg->first_select(); for (; sl; sl= sl->next_select()) { - if (sl->test_limit()) + if (unlikely(sl->test_limit())) return(1); } return(0); @@ -3674,7 +3675,7 @@ int subselect_single_select_engine::prepare(THD *thd) int subselect_union_engine::prepare(THD *thd_arg) { set_thd(thd_arg); - return unit->prepare(thd, result, SELECT_NO_UNLOCK); + return unit->prepare(unit->derived, result, SELECT_NO_UNLOCK); } int subselect_uniquesubquery_engine::prepare(THD *) @@ -3935,12 +3936,8 @@ int subselect_uniquesubquery_engine::scan_table() for (;;) { error=table->file->ha_rnd_next(table->record[0]); - if (error) { - if (error == HA_ERR_RECORD_DELETED) - { - error= 0; - continue; - } + if (unlikely(error)) + { if (error == HA_ERR_END_OF_FILE) { error= 0; @@ -4076,8 +4073,8 @@ int subselect_uniquesubquery_engine::exec() make_prev_keypart_map(tab-> ref.key_parts), HA_READ_KEY_EXACT); - if (error && - error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) + if (unlikely(error && + error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)) error= report_error(table, error); else { @@ -4115,7 +4112,8 @@ int subselect_uniquesubquery_engine::index_lookup() HA_READ_KEY_EXACT); DBUG_PRINT("info", ("lookup result: %i", error)); - if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) + if (unlikely(error && error != HA_ERR_KEY_NOT_FOUND && + error != HA_ERR_END_OF_FILE)) { /* TIMOUR: I don't understand at all when do we need to call report_error. @@ -4246,8 +4244,8 @@ int subselect_indexsubquery_engine::exec() make_prev_keypart_map(tab-> ref.key_parts), HA_READ_KEY_EXACT); - if (error && - error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) + if (unlikely(error && + error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)) error= report_error(table, error); else { @@ -4269,7 +4267,7 @@ int subselect_indexsubquery_engine::exec() error= table->file->ha_index_next_same(table->record[0], tab->ref.key_buff, tab->ref.key_length); - if (error && error != HA_ERR_END_OF_FILE) + if (unlikely(error && error != HA_ERR_END_OF_FILE)) { error= report_error(table, error); break; @@ -4282,7 +4280,7 @@ int subselect_indexsubquery_engine::exec() *tab->ref.null_ref_key= 1; null_finding= 1; /* Check if there exists a row with a null value in the index */ - if ((error= (safe_index_read(tab) == 1))) + if (unlikely((error= (safe_index_read(tab) == 1)))) break; } } @@ -5425,8 +5423,8 @@ int subselect_hash_sj_engine::exec() DBUG_ASSERT(materialize_join->optimization_state == JOIN::OPTIMIZATION_DONE && !is_materialized); materialize_join->exec(); - if ((res= MY_TEST(materialize_join->error || thd->is_fatal_error || - thd->is_error()))) + if (unlikely((res= MY_TEST(materialize_join->error || thd->is_fatal_error || + thd->is_error())))) goto err; /* @@ -5784,14 +5782,14 @@ Ordered_key::cmp_keys_by_row_data(ha_rows a, ha_rows b) rowid_a= row_num_to_rowid + a * rowid_length; rowid_b= row_num_to_rowid + b * rowid_length; /* Fetch the rows for comparison. */ - if ((error= tbl->file->ha_rnd_pos(tbl->record[0], rowid_a))) + if (unlikely((error= tbl->file->ha_rnd_pos(tbl->record[0], rowid_a)))) { /* purecov: begin inspected */ tbl->file->print_error(error, MYF(ME_FATALERROR)); // Sets fatal_error return 0; /* purecov: end */ } - if ((error= tbl->file->ha_rnd_pos(tbl->record[1], rowid_b))) + if (unlikely((error= tbl->file->ha_rnd_pos(tbl->record[1], rowid_b)))) { /* purecov: begin inspected */ tbl->file->print_error(error, MYF(ME_FATALERROR)); // Sets fatal_error @@ -5873,7 +5871,7 @@ int Ordered_key::cmp_key_with_search_key(rownum_t row_num) int __attribute__((unused)) error; int cmp_res; - if ((error= tbl->file->ha_rnd_pos(tbl->record[0], cur_rowid))) + if (unlikely((error= tbl->file->ha_rnd_pos(tbl->record[0], cur_rowid)))) { /* purecov: begin inspected */ tbl->file->print_error(error, MYF(ME_FATALERROR)); // Sets fatal_error @@ -6222,7 +6220,7 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts, DBUG_ASSERT(cur_keyid == merge_keys_count); /* Populate the indexes with data from the temporary table. */ - if (tmp_table->file->ha_rnd_init_with_error(1)) + if (unlikely(tmp_table->file->ha_rnd_init_with_error(1))) return TRUE; tmp_table->file->extra_opt(HA_EXTRA_CACHE, current_thd->variables.read_buff_size); @@ -6230,17 +6228,12 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts, while (TRUE) { error= tmp_table->file->ha_rnd_next(tmp_table->record[0]); - if (error == HA_ERR_RECORD_DELETED) - { - /* We get this for duplicate records that should not be in tmp_table. */ - continue; - } /* This is a temp table that we fully own, there should be no other cause to stop the iteration than EOF. */ DBUG_ASSERT(!error || error == HA_ERR_END_OF_FILE); - if (error == HA_ERR_END_OF_FILE) + if (unlikely(error == HA_ERR_END_OF_FILE)) { DBUG_ASSERT(cur_rownum == tmp_table->file->stats.records); break; @@ -6460,7 +6453,7 @@ bool subselect_rowid_merge_engine::partial_match() DBUG_ASSERT(!pq.elements); /* All data accesses during execution are via handler::ha_rnd_pos() */ - if (tmp_table->file->ha_rnd_init_with_error(0)) + if (unlikely(tmp_table->file->ha_rnd_init_with_error(0))) { res= FALSE; goto end; @@ -6666,7 +6659,7 @@ bool subselect_table_scan_engine::partial_match() int error; bool res; - if (tmp_table->file->ha_rnd_init_with_error(1)) + if (unlikely(tmp_table->file->ha_rnd_init_with_error(1))) { res= FALSE; goto end; @@ -6677,12 +6670,8 @@ bool subselect_table_scan_engine::partial_match() for (;;) { error= tmp_table->file->ha_rnd_next(tmp_table->record[0]); - if (error) { - if (error == HA_ERR_RECORD_DELETED) - { - error= 0; - continue; - } + if (unlikely(error)) + { if (error == HA_ERR_END_OF_FILE) { error= 0; diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 4a951896135..19e94be7882 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -999,7 +999,7 @@ bool Aggregator_distinct::add() */ return tree->unique_add(table->record[0] + table->s->null_bytes); } - if ((error= table->file->ha_write_tmp_row(table->record[0])) && + if (unlikely((error= table->file->ha_write_tmp_row(table->record[0]))) && table->file->is_fatal_error(error, HA_CHECK_DUP)) return TRUE; return FALSE; @@ -1133,6 +1133,7 @@ Item_sum_num::fix_fields(THD *thd, Item **ref) return TRUE; set_if_bigger(decimals, args[i]->decimals); m_with_subquery|= args[i]->with_subquery(); + with_param|= args[i]->with_param; with_window_func|= args[i]->with_window_func; } result_field=0; @@ -1166,6 +1167,7 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref) DBUG_RETURN(TRUE); m_with_subquery= args[0]->with_subquery(); + with_param= args[0]->with_param; with_window_func|= args[0]->with_window_func; fix_length_and_dec(); @@ -1269,6 +1271,12 @@ Item_sum_sp::Item_sum_sp(THD *thd, Name_resolution_context *context_arg, m_sp= sp; } +Item_sum_sp::Item_sum_sp(THD *thd, Item_sum_sp *item): + Item_sum(thd, item), Item_sp(thd, item) +{ + maybe_null= item->maybe_null; + quick_group= item->quick_group; +} bool Item_sum_sp::fix_fields(THD *thd, Item **ref) @@ -1388,7 +1396,7 @@ Item_sum_sp::fix_length_and_dec() { DBUG_ENTER("Item_sum_sp::fix_length_and_dec"); DBUG_ASSERT(sp_result_field); - Type_std_attributes::set(sp_result_field); + Type_std_attributes::set(sp_result_field->type_std_attributes()); Item_sum::fix_length_and_dec(); DBUG_VOID_RETURN; } @@ -1400,6 +1408,14 @@ Item_sum_sp::func_name() const return Item_sp::func_name(thd); } +Item* Item_sum_sp::copy_or_same(THD *thd) +{ + Item_sum_sp *copy_item= new (thd->mem_root) Item_sum_sp(thd, this); + copy_item->init_result_field(thd, max_length, maybe_null, + ©_item->null_value, ©_item->name); + return copy_item; +} + /*********************************************************************** ** reset and add of sum_func ***********************************************************************/ @@ -2027,7 +2043,7 @@ double Item_sum_std::val_real() { DBUG_ASSERT(fixed == 1); double nr= Item_sum_variance::val_real(); - if (my_isinf(nr)) + if (std::isinf(nr)) return DBL_MAX; DBUG_ASSERT(nr >= 0.0); return sqrt(nr); @@ -2458,7 +2474,7 @@ Item *Item_sum_max::copy_or_same(THD* thd) bool Item_sum_max::add() { - Item *tmp_item; + Item * UNINIT_VAR(tmp_item); DBUG_ENTER("Item_sum_max::add"); DBUG_PRINT("enter", ("this: %p", this)); @@ -2663,7 +2679,7 @@ void Item_sum_num::reset_field() void Item_sum_hybrid::reset_field() { - Item *tmp_item, *arg0; + Item *UNINIT_VAR(tmp_item), *arg0; DBUG_ENTER("Item_sum_hybrid::reset_field"); arg0= args[0]; @@ -3020,7 +3036,7 @@ Item *Item_sum_avg::result_item(THD *thd, Field *field) void Item_sum_hybrid::update_field() { DBUG_ENTER("Item_sum_hybrid::update_field"); - Item *tmp_item; + Item *UNINIT_VAR(tmp_item); if (unlikely(direct_added)) { tmp_item= args[0]; @@ -3900,6 +3916,7 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref) args[i]->check_cols(1)) return TRUE; m_with_subquery|= args[i]->with_subquery(); + with_param|= args[i]->with_param; with_window_func|= args[i]->with_window_func; } diff --git a/sql/item_sum.h b/sql/item_sum.h index 96f115357f9..b0dea818f01 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -1297,11 +1297,16 @@ public: Item_sum_sp(THD *thd, Name_resolution_context *context_arg, sp_name *name, sp_head *sp, List &list); + Item_sum_sp(THD *thd, Item_sum_sp *item); enum Sumfunctype sum_func () const { return SP_AGGREGATE_FUNC; } + Field *create_field_for_create_select(TABLE *table) + { + return create_table_field_from_handler(table); + } void fix_length_and_dec(); bool fix_fields(THD *thd, Item **ref); const char *func_name() const; @@ -1362,6 +1367,7 @@ public: } Item *get_copy(THD *thd) { return get_item_copy(thd, this); } + Item *copy_or_same(THD *thd); }; /* Items to get the value of a stored sum function */ diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 5f4a489fca1..e01114fb0ad 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -257,8 +257,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, break; case 'w': tmp= (char*) val + 1; - if ((weekday= (int) my_strtoll10(val, &tmp, &error)) < 0 || - weekday >= 7) + if (unlikely((weekday= (int) my_strtoll10(val, &tmp, &error)) < 0 || + weekday >= 7)) goto err; /* We should use the same 1 - 7 scale for %w as for %W */ if (!weekday) @@ -279,9 +279,10 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, sunday_first_n_first_week_non_iso= (*ptr=='U' || *ptr== 'V'); strict_week_number= (*ptr=='V' || *ptr=='v'); tmp= (char*) val + MY_MIN(val_len, 2); - if ((week_number= (int) my_strtoll10(val, &tmp, &error)) < 0 || - (strict_week_number && !week_number) || - week_number > 53) + if (unlikely((week_number= + (int) my_strtoll10(val, &tmp, &error)) < 0 || + (strict_week_number && !week_number) || + week_number > 53)) goto err; val= tmp; break; @@ -331,7 +332,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, default: goto err; } - if (error) // Error from my_strtoll10 + if (unlikely(error)) // Error from my_strtoll10 goto err; } else if (!my_isspace(cs, *ptr)) @@ -3232,13 +3233,7 @@ void Item_func_str_to_date::fix_length_and_dec() if (agg_arg_charsets(collation, args, 2, MY_COLL_ALLOW_CONV, 1)) return; if (collation.collation->mbminlen > 1) - { -#if MYSQL_VERSION_ID > 50500 internal_charset= &my_charset_utf8mb4_general_ci; -#else - internal_charset= &my_charset_utf8_general_ci; -#endif - } maybe_null= true; set_handler(&type_handler_datetime2); diff --git a/sql/item_vers.cc b/sql/item_vers.cc index 32a6c67f2fe..d7361f687f9 100644 --- a/sql/item_vers.cc +++ b/sql/item_vers.cc @@ -26,9 +26,9 @@ #include "tztime.h" #include "item.h" -Item_func_vtq_ts::Item_func_vtq_ts(THD *thd, Item* a, TR_table::field_id_t _vtq_field) : +Item_func_trt_ts::Item_func_trt_ts(THD *thd, Item* a, TR_table::field_id_t _trt_field) : Item_datetimefunc(thd, a), - vtq_field(_vtq_field) + trt_field(_trt_field) { decimals= 6; null_value= true; @@ -37,7 +37,7 @@ Item_func_vtq_ts::Item_func_vtq_ts(THD *thd, Item* a, TR_table::field_id_t _vtq_ bool -Item_func_vtq_ts::get_date(MYSQL_TIME *res, ulonglong fuzzy_date) +Item_func_trt_ts::get_date(MYSQL_TIME *res, ulonglong fuzzy_date) { THD *thd= current_thd; // can it differ from constructor's? DBUG_ASSERT(thd); @@ -67,14 +67,14 @@ Item_func_vtq_ts::get_date(MYSQL_TIME *res, ulonglong fuzzy_date) return true; } - return trt[vtq_field]->get_date(res, fuzzy_date); + return trt[trt_field]->get_date(res, fuzzy_date); } -Item_func_vtq_id::Item_func_vtq_id(THD *thd, Item* a, TR_table::field_id_t _vtq_field, +Item_func_trt_id::Item_func_trt_id(THD *thd, Item* a, TR_table::field_id_t _trt_field, bool _backwards) : Item_longlong_func(thd, a), - vtq_field(_vtq_field), + trt_field(_trt_field), backwards(_backwards) { decimals= 0; @@ -83,9 +83,9 @@ Item_func_vtq_id::Item_func_vtq_id(THD *thd, Item* a, TR_table::field_id_t _vtq_ DBUG_ASSERT(arg_count == 1 && args[0]); } -Item_func_vtq_id::Item_func_vtq_id(THD *thd, Item* a, Item* b, TR_table::field_id_t _vtq_field) : +Item_func_trt_id::Item_func_trt_id(THD *thd, Item* a, Item* b, TR_table::field_id_t _trt_field) : Item_longlong_func(thd, a, b), - vtq_field(_vtq_field), + trt_field(_trt_field), backwards(false) { decimals= 0; @@ -95,7 +95,7 @@ Item_func_vtq_id::Item_func_vtq_id(THD *thd, Item* a, Item* b, TR_table::field_i } longlong -Item_func_vtq_id::get_by_trx_id(ulonglong trx_id) +Item_func_trt_id::get_by_trx_id(ulonglong trx_id) { THD *thd= current_thd; DBUG_ASSERT(thd); @@ -111,11 +111,11 @@ Item_func_vtq_id::get_by_trx_id(ulonglong trx_id) if (null_value) return 0; - return trt[vtq_field]->val_int(); + return trt[trt_field]->val_int(); } longlong -Item_func_vtq_id::get_by_commit_ts(MYSQL_TIME &commit_ts, bool backwards) +Item_func_trt_id::get_by_commit_ts(MYSQL_TIME &commit_ts, bool backwards) { THD *thd= current_thd; DBUG_ASSERT(thd); @@ -123,17 +123,17 @@ Item_func_vtq_id::get_by_commit_ts(MYSQL_TIME &commit_ts, bool backwards) TR_table trt(thd); null_value= !trt.query(commit_ts, backwards); if (null_value) - return 0; + return backwards ? ULONGLONG_MAX : 0; - return trt[vtq_field]->val_int(); + return trt[trt_field]->val_int(); } longlong -Item_func_vtq_id::val_int() +Item_func_trt_id::val_int() { if (args[0]->is_null()) { - if (arg_count < 2 || vtq_field == TR_table::FLD_TRX_ID) + if (arg_count < 2 || trt_field == TR_table::FLD_TRX_ID) { null_value= true; return 0; @@ -157,7 +157,7 @@ Item_func_vtq_id::val_int() } } -Item_func_vtq_trx_sees::Item_func_vtq_trx_sees(THD *thd, Item* a, Item* b) : +Item_func_trt_trx_sees::Item_func_trt_trx_sees(THD *thd, Item* a, Item* b) : Item_bool_func(thd, a, b), accept_eq(false) { @@ -166,7 +166,7 @@ Item_func_vtq_trx_sees::Item_func_vtq_trx_sees(THD *thd, Item* a, Item* b) : } longlong -Item_func_vtq_trx_sees::val_int() +Item_func_trt_trx_sees::val_int() { THD *thd= current_thd; DBUG_ASSERT(thd); diff --git a/sql/item_vers.h b/sql/item_vers.h index 39ed4ecda1f..17ad3daa73c 100644 --- a/sql/item_vers.h +++ b/sql/item_vers.h @@ -22,47 +22,47 @@ #pragma interface /* gcc class implementation */ #endif -class Item_func_vtq_ts: public Item_datetimefunc +class Item_func_trt_ts: public Item_datetimefunc { - TR_table::field_id_t vtq_field; + TR_table::field_id_t trt_field; public: - Item_func_vtq_ts(THD *thd, Item* a, TR_table::field_id_t _vtq_field); + Item_func_trt_ts(THD *thd, Item* a, TR_table::field_id_t _trt_field); const char *func_name() const { - if (vtq_field == TR_table::FLD_BEGIN_TS) + if (trt_field == TR_table::FLD_BEGIN_TS) { - return "vtq_begin_ts"; + return "trt_begin_ts"; } - return "vtq_commit_ts"; + return "trt_commit_ts"; } bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date); Item *get_copy(THD *thd) - { return get_item_copy(thd, this); } + { return get_item_copy(thd, this); } void fix_length_and_dec() { fix_attributes_datetime(decimals); } }; -class Item_func_vtq_id : public Item_longlong_func +class Item_func_trt_id : public Item_longlong_func { - TR_table::field_id_t vtq_field; + TR_table::field_id_t trt_field; bool backwards; longlong get_by_trx_id(ulonglong trx_id); longlong get_by_commit_ts(MYSQL_TIME &commit_ts, bool backwards); public: - Item_func_vtq_id(THD *thd, Item* a, TR_table::field_id_t _vtq_field, bool _backwards= false); - Item_func_vtq_id(THD *thd, Item* a, Item* b, TR_table::field_id_t _vtq_field); + Item_func_trt_id(THD *thd, Item* a, TR_table::field_id_t _trt_field, bool _backwards= false); + Item_func_trt_id(THD *thd, Item* a, Item* b, TR_table::field_id_t _trt_field); const char *func_name() const { - switch (vtq_field) + switch (trt_field) { case TR_table::FLD_TRX_ID: - return "vtq_trx_id"; + return "trt_trx_id"; case TR_table::FLD_COMMIT_ID: - return "vtq_commit_id"; + return "trt_commit_id"; case TR_table::FLD_ISO_LEVEL: - return "vtq_iso_level"; + return "trt_iso_level"; default: DBUG_ASSERT(0); } @@ -77,37 +77,37 @@ public: longlong val_int(); Item *get_copy(THD *thd) - { return get_item_copy(thd, this); } + { return get_item_copy(thd, this); } }; -class Item_func_vtq_trx_sees : public Item_bool_func +class Item_func_trt_trx_sees : public Item_bool_func { protected: bool accept_eq; public: - Item_func_vtq_trx_sees(THD *thd, Item* a, Item* b); + Item_func_trt_trx_sees(THD *thd, Item* a, Item* b); const char *func_name() const { - return "vtq_trx_sees"; + return "trt_trx_sees"; } longlong val_int(); Item *get_copy(THD *thd) - { return get_item_copy(thd, this); } + { return get_item_copy(thd, this); } }; -class Item_func_vtq_trx_sees_eq : - public Item_func_vtq_trx_sees +class Item_func_trt_trx_sees_eq : + public Item_func_trt_trx_sees { public: - Item_func_vtq_trx_sees_eq(THD *thd, Item* a, Item* b) : - Item_func_vtq_trx_sees(thd, a, b) + Item_func_trt_trx_sees_eq(THD *thd, Item* a, Item* b) : + Item_func_trt_trx_sees(thd, a, b) { accept_eq= true; } const char *func_name() const { - return "vtq_trx_sees_eq"; + return "trt_trx_sees_eq"; } }; diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index a4221661ae4..b38e06b3778 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -2646,6 +2646,10 @@ my_xpath_parse_VariableReference(MY_XPATH *xpath) sp_variable *spv; const Sp_rcontext_handler *rh; LEX *lex; + /* + We call lex->find_variable() rather than thd->lex->spcont->find_variable() + to make sure package body variables are properly supported. + */ if ((lex= thd->lex) && (spv= lex->find_variable(&name, &rh))) { diff --git a/sql/key.cc b/sql/key.cc index 8642820ff3f..34196a973c5 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -52,8 +52,8 @@ int find_ref_key(KEY *key, uint key_count, uchar *record, Field *field, uint *key_length, uint *keypart) { - reg2 int i; - reg3 KEY *key_info; + int i; + KEY *key_info; uint fieldpos; fieldpos= field->offset(record); @@ -499,7 +499,7 @@ int key_cmp(KEY_PART_INFO *key_part, const uchar *key, uint key_length) if (key_part->null_bit) { /* This key part allows null values; NULL is lower than everything */ - register bool field_is_null= key_part->field->is_null(); + bool field_is_null= key_part->field->is_null(); if (*key) // If range key is null { /* the range is expecting a null value */ diff --git a/sql/lex.h b/sql/lex.h index 7be3e4c9251..d336c273a18 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -722,7 +722,7 @@ static SYMBOL symbols[] = { { "YEAR", SYM(YEAR_SYM)}, { "YEAR_MONTH", SYM(YEAR_MONTH_SYM)}, { "ZEROFILL", SYM(ZEROFILL)}, - { "||", SYM(OR_OR_SYM)} + { "||", SYM(OR2_SYM)} }; diff --git a/sql/lex_string.h b/sql/lex_string.h index 25f2c83a372..a5209165be0 100644 --- a/sql/lex_string.h +++ b/sql/lex_string.h @@ -21,6 +21,7 @@ typedef struct st_mysql_const_lex_string LEX_CSTRING; /* Functions to compare if two lex strings are equal */ + static inline bool lex_string_cmp(CHARSET_INFO *charset, const LEX_CSTRING *a, const LEX_CSTRING *b) { @@ -30,6 +31,7 @@ static inline bool lex_string_cmp(CHARSET_INFO *charset, const LEX_CSTRING *a, /* Compare to LEX_CSTRING's and return 0 if equal */ + static inline bool cmp(const LEX_CSTRING *a, const LEX_CSTRING *b) { return (a->length != b->length || @@ -40,6 +42,7 @@ static inline bool cmp(const LEX_CSTRING *a, const LEX_CSTRING *b) Compare if two LEX_CSTRING are equal. Assumption is that character set is ASCII (like for plugin names) */ + static inline bool lex_string_eq(const LEX_CSTRING *a, const LEX_CSTRING *b) { if (a->length != b->length) @@ -48,12 +51,15 @@ static inline bool lex_string_eq(const LEX_CSTRING *a, const LEX_CSTRING *b) } /* - Compare if two LEX_CSTRING are equal in system character set - (field names, user variables, etc - but *not* table names) + To be used when calling lex_string_eq with STRING_WITH_LEN() as second + argument */ -static inline bool lex_string_syseq(const LEX_CSTRING *a, const LEX_CSTRING *b) + +static inline bool lex_string_eq(const LEX_CSTRING *a, const char *b, size_t b_length) { - return lex_string_cmp(system_charset_info, a, b) == 0; + if (a->length != b_length) + return 0; /* Different */ + return strcasecmp(a->str, b) == 0; } #endif /* LEX_STRING_INCLUDED */ diff --git a/sql/lock.cc b/sql/lock.cc index 2d9409fc7c3..17629f17291 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -379,7 +379,7 @@ end: static int lock_external(THD *thd, TABLE **tables, uint count) { - reg1 uint i; + uint i; int lock_type,error; DBUG_ENTER("lock_external"); @@ -393,7 +393,7 @@ static int lock_external(THD *thd, TABLE **tables, uint count) (*tables)->reginfo.lock_type <= TL_READ_NO_INSERT)) lock_type=F_RDLCK; - if ((error=(*tables)->file->ha_external_lock(thd,lock_type))) + if (unlikely((error=(*tables)->file->ha_external_lock(thd,lock_type)))) { (*tables)->file->print_error(error, MYF(0)); while (--i) @@ -439,7 +439,7 @@ void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock, bool free_lock) DBUG_ASSERT(!(sql_lock->flags & GET_LOCK_ON_THD)); my_free(sql_lock); } - if (!errors) + if (likely(!errors)) thd->clear_error(); THD_STAGE_INFO(thd, org_stage); DBUG_VOID_RETURN; @@ -539,7 +539,7 @@ void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table) { if (locked) { - reg1 uint i; + uint i; for (i=0; i < locked->table_count; i++) { if (locked->table[i] == table) @@ -726,7 +726,7 @@ static int unlock_external(THD *thd, TABLE **table,uint count) if ((*table)->current_lock != F_UNLCK) { (*table)->current_lock = F_UNLCK; - if ((error=(*table)->file->ha_external_lock(thd, F_UNLCK))) + if (unlikely((error=(*table)->file->ha_external_lock(thd, F_UNLCK)))) { error_code= error; (*table)->file->print_error(error, MYF(0)); diff --git a/sql/log.cc b/sql/log.cc index 8ed53a60195..ff4b0366b43 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2044,7 +2044,7 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all) - We are in a transaction and a full transaction is committed. Otherwise, we accumulate the changes. */ - if (!error && ending_trans(thd, all)) + if (likely(!error) && ending_trans(thd, all)) error= binlog_commit_flush_trx_cache(thd, all, cache_mngr); /* @@ -2122,7 +2122,7 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all) */ error |= binlog_truncate_trx_cache(thd, cache_mngr, all); } - else if (!error) + else if (likely(!error)) { if (ending_trans(thd, all) && trans_cannot_safely_rollback(thd, all)) error= binlog_rollback_flush_trx_cache(thd, all, cache_mngr); @@ -2174,7 +2174,7 @@ void MYSQL_BIN_LOG::set_write_error(THD *thd, bool is_transactional) write_error= 1; - if (check_write_error(thd)) + if (unlikely(check_write_error(thd))) DBUG_VOID_RETURN; if (my_errno == EFBIG) @@ -2202,7 +2202,7 @@ bool MYSQL_BIN_LOG::check_write_error(THD *thd) bool checked= FALSE; - if (!thd->is_error()) + if (likely(!thd->is_error())) DBUG_RETURN(checked); switch (thd->get_stmt_da()->sql_errno()) @@ -2273,7 +2273,7 @@ static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv) or "RELEASE S" without the preceding "SAVEPOINT S" in the binary log. */ - if (!(error= mysql_bin_log.write(&qinfo))) + if (likely(!(error= mysql_bin_log.write(&qinfo)))) binlog_trans_log_savepos(thd, (my_off_t*) sv); DBUG_RETURN(error); @@ -2459,7 +2459,7 @@ static int find_uniq_filename(char *name, ulong next_log_number) uint i; char buff[FN_REFLEN], ext_buf[FN_REFLEN]; struct st_my_dir *dir_info; - reg1 struct fileinfo *file_info; + struct fileinfo *file_info; ulong max_found, next, UNINIT_VAR(number); size_t buf_length, length; char *start, *end; @@ -2474,7 +2474,8 @@ static int find_uniq_filename(char *name, ulong next_log_number) length= (size_t) (end - start + 1); if ((DBUG_EVALUATE_IF("error_unique_log_filename", 1, - !(dir_info= my_dir(buff,MYF(MY_DONT_SORT)))))) + unlikely(!(dir_info= my_dir(buff, + MYF(MY_DONT_SORT))))))) { // This shouldn't happen strmov(end,".1"); // use name+1 DBUG_RETURN(1); @@ -2666,7 +2667,7 @@ bool MYSQL_LOG::open( #ifdef EMBEDDED_LIBRARY "embedded library\n", my_progname, server_version, MYSQL_COMPILATION_COMMENT -#elif _WIN32 +#elif defined(_WIN32) "started with:\nTCP Port: %d, Named Pipe: %s\n", my_progname, server_version, MYSQL_COMPILATION_COMMENT, mysqld_port, mysqld_unix_port @@ -2784,10 +2785,10 @@ int MYSQL_LOG::generate_new_name(char *new_name, const char *log_name, if (!fn_ext(log_name)[0]) { if (DBUG_EVALUATE_IF("binlog_inject_new_name_error", TRUE, FALSE) || - find_uniq_filename(new_name, next_log_number)) + unlikely(find_uniq_filename(new_name, next_log_number))) { THD *thd= current_thd; - if (thd) + if (unlikely(thd)) my_error(ER_NO_UNIQUE_LOGFILE, MYF(ME_FATALERROR), log_name); sql_print_error(ER_DEFAULT(ER_NO_UNIQUE_LOGFILE), log_name); return 1; @@ -3136,7 +3137,7 @@ end: err: error= 1; - if (! write_error) + if (!write_error) { write_error= 1; sql_print_error(ER_THD(thd, ER_ERROR_ON_WRITE), name, errno); @@ -4192,7 +4193,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD *thd, bool create_new_log, for (;;) { - if ((error= my_delete(linfo.log_file_name, MYF(0))) != 0) + if (unlikely((error= my_delete(linfo.log_file_name, MYF(0))))) { if (my_errno == ENOENT) { @@ -4235,7 +4236,8 @@ bool MYSQL_BIN_LOG::reset_logs(THD *thd, bool create_new_log, /* Start logging with a new file */ close(LOG_CLOSE_INDEX | LOG_CLOSE_TO_BE_OPENED); - if ((error= my_delete(index_file_name, MYF(0)))) // Reset (open will update) + // Reset (open will update) + if (unlikely((error= my_delete(index_file_name, MYF(0))))) { if (my_errno == ENOENT) { @@ -4264,8 +4266,8 @@ bool MYSQL_BIN_LOG::reset_logs(THD *thd, bool create_new_log, } } if (create_new_log && !open_index_file(index_file_name, 0, FALSE)) - if ((error= open(save_name, log_type, 0, next_log_number, - io_cache_type, max_size, 0, FALSE))) + if (unlikely((error= open(save_name, log_type, 0, next_log_number, + io_cache_type, max_size, 0, FALSE)))) goto err; my_free((void *) save_name); @@ -4413,8 +4415,9 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included) Read the next log file name from the index file and pass it back to the caller. */ - if((error=find_log_pos(&rli->linfo, rli->event_relay_log_name, 0)) || - (error=find_next_log(&rli->linfo, 0))) + if (unlikely((error=find_log_pos(&rli->linfo, rli->event_relay_log_name, + 0))) || + unlikely((error=find_next_log(&rli->linfo, 0)))) { sql_print_error("next log error: %d offset: %llu log: %s included: %d", error, rli->linfo.index_file_offset, @@ -4529,14 +4532,14 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log, if (need_mutex) mysql_mutex_lock(&LOCK_index); - if ((error=find_log_pos(&log_info, to_log, 0 /*no mutex*/))) + if (unlikely((error=find_log_pos(&log_info, to_log, 0 /*no mutex*/))) ) { sql_print_error("MYSQL_BIN_LOG::purge_logs was called with file %s not " "listed in the index.", to_log); goto err; } - if ((error= open_purge_index_file(TRUE))) + if (unlikely((error= open_purge_index_file(TRUE)))) { sql_print_error("MYSQL_BIN_LOG::purge_logs failed to sync the index file."); goto err; @@ -4546,12 +4549,12 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log, File name exists in index file; delete until we find this file or a file that is used. */ - if ((error=find_log_pos(&log_info, NullS, 0 /*no mutex*/))) + if (unlikely((error=find_log_pos(&log_info, NullS, 0 /*no mutex*/)))) goto err; while ((strcmp(to_log,log_info.log_file_name) || (exit_loop=included)) && can_purge_log(log_info.log_file_name)) { - if ((error= register_purge_index_entry(log_info.log_file_name))) + if (unlikely((error= register_purge_index_entry(log_info.log_file_name)))) { sql_print_error("MYSQL_BIN_LOG::purge_logs failed to copy %s to register file.", log_info.log_file_name); @@ -4564,14 +4567,14 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log, DBUG_EXECUTE_IF("crash_purge_before_update_index", DBUG_SUICIDE();); - if ((error= sync_purge_index_file())) + if (unlikely((error= sync_purge_index_file()))) { sql_print_error("MYSQL_BIN_LOG::purge_logs failed to flush register file."); goto err; } /* We know how many files to delete. Update index file. */ - if ((error=update_log_index(&log_info, need_update_threads))) + if (unlikely((error=update_log_index(&log_info, need_update_threads)))) { sql_print_error("MYSQL_BIN_LOG::purge_logs failed to update the index file"); goto err; @@ -4662,8 +4665,9 @@ int MYSQL_BIN_LOG::sync_purge_index_file() int error= 0; DBUG_ENTER("MYSQL_BIN_LOG::sync_purge_index_file"); - if ((error= flush_io_cache(&purge_index_file)) || - (error= my_sync(purge_index_file.file, MYF(MY_WME|MY_SYNC_FILESIZE)))) + if (unlikely((error= flush_io_cache(&purge_index_file))) || + unlikely((error= my_sync(purge_index_file.file, + MYF(MY_WME | MY_SYNC_FILESIZE))))) DBUG_RETURN(error); DBUG_RETURN(error); @@ -4674,8 +4678,9 @@ int MYSQL_BIN_LOG::register_purge_index_entry(const char *entry) int error= 0; DBUG_ENTER("MYSQL_BIN_LOG::register_purge_index_entry"); - if ((error=my_b_write(&purge_index_file, (const uchar*)entry, strlen(entry))) || - (error=my_b_write(&purge_index_file, (const uchar*)"\n", 1))) + if (unlikely((error=my_b_write(&purge_index_file, (const uchar*)entry, + strlen(entry)))) || + unlikely((error=my_b_write(&purge_index_file, (const uchar*)"\n", 1)))) DBUG_RETURN (error); DBUG_RETURN(error); @@ -4698,7 +4703,8 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *reclaimed_space, DBUG_ASSERT(my_b_inited(&purge_index_file)); - if ((error=reinit_io_cache(&purge_index_file, READ_CACHE, 0, 0, 0))) + if (unlikely((error= reinit_io_cache(&purge_index_file, READ_CACHE, 0, 0, + 0)))) { sql_print_error("MYSQL_BIN_LOG::purge_index_entry failed to reinit register file " "for read"); @@ -4727,7 +4733,8 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *reclaimed_space, /* Get rid of the trailing '\n' */ log_info.log_file_name[length-1]= 0; - if (!mysql_file_stat(m_key_file_log, log_info.log_file_name, &s, MYF(0))) + if (unlikely(!mysql_file_stat(m_key_file_log, log_info.log_file_name, &s, + MYF(0)))) { if (my_errno == ENOENT) { @@ -4774,7 +4781,8 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *reclaimed_space, } else { - if ((error= find_log_pos(&check_log_info, log_info.log_file_name, need_mutex))) + if (unlikely((error= find_log_pos(&check_log_info, + log_info.log_file_name, need_mutex)))) { if (error != LOG_INFO_EOF) { @@ -4887,7 +4895,7 @@ int MYSQL_BIN_LOG::purge_logs_before_date(time_t purge_time) mysql_mutex_lock(&LOCK_index); to_log[0]= 0; - if ((error=find_log_pos(&log_info, NullS, 0 /*no mutex*/))) + if (unlikely((error=find_log_pos(&log_info, NullS, 0 /*no mutex*/)))) goto err; while (strcmp(log_file_name, log_info.log_file_name) && @@ -5154,7 +5162,7 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock) We have to do this here and not in open as we want to store the new file name in the current binary log file. */ - if ((error= generate_new_name(new_name, name, 0))) + if (unlikely((error= generate_new_name(new_name, name, 0)))) { #ifdef ENABLE_AND_FIX_HANG close_on_error= TRUE; @@ -5198,7 +5206,7 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock) log rotation should give the waiting thread a signal to discover EOF and move on to the next log. */ - if ((error= flush_io_cache(&log_file))) + if (unlikely((error= flush_io_cache(&log_file)))) { close_on_error= TRUE; goto end; @@ -5244,7 +5252,7 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock) /* reopen index binlog file, BUG#34582 */ file_to_open= index_file_name; error= open_index_file(index_file_name, 0, FALSE); - if (!error) + if (likely(!error)) { /* reopen the binary log file. */ file_to_open= new_name_ptr; @@ -5253,7 +5261,7 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock) } /* handle reopening errors */ - if (error) + if (unlikely(error)) { my_error(ER_CANT_OPEN_FILE, MYF(ME_FATALERROR), file_to_open, error); close_on_error= TRUE; @@ -5269,7 +5277,7 @@ end: mysql_file_close(old_file, MYF(MY_WME)); } - if (error && close_on_error /* rotate or reopen failed */) + if (unlikely(error && close_on_error)) /* rotate or reopen failed */ { /* Close whatever was left opened. @@ -5395,7 +5403,7 @@ bool MYSQL_BIN_LOG::write_event_buffer(uchar* buf, uint len) error= new_file_without_locking(); err: my_safe_afree(ebuf, len); - if (!error) + if (likely(!error)) update_binlog_end_pos(); DBUG_RETURN(error); } @@ -5778,14 +5786,14 @@ int THD::binlog_write_table_map(TABLE *table, bool is_transactional, Annotate_rows_log_event anno(table->in_use, is_transactional, false); /* Annotate event should be written not more than once */ *with_annotate= 0; - if ((error= writer.write(&anno))) + if (unlikely((error= writer.write(&anno)))) { if (my_errno == EFBIG) cache_data->set_incident(); DBUG_RETURN(error); } } - if ((error= writer.write(&the_event))) + if (unlikely((error= writer.write(&the_event)))) DBUG_RETURN(error); binlog_table_maps++; @@ -6433,7 +6441,7 @@ err: bool check_purge= false; DBUG_ASSERT(!is_relay_log); - if (!error) + if (likely(!error)) { bool synced; @@ -6463,7 +6471,7 @@ err: it's list before dump-thread tries to send it */ update_binlog_end_pos(offset); - if ((error= rotate(false, &check_purge))) + if (unlikely((error= rotate(false, &check_purge)))) check_purge= false; } } @@ -6501,7 +6509,7 @@ err: checkpoint_and_purge(prev_binlog_id); } - if (error) + if (unlikely(error)) { set_write_error(thd, is_trans_cache); if (check_write_error(thd) && cache_data && @@ -6697,7 +6705,7 @@ int MYSQL_BIN_LOG::rotate(bool force_rotate, bool* check_purge) */ mark_xids_active(binlog_id, 1); - if ((error= new_file_without_locking())) + if (unlikely((error= new_file_without_locking()))) { /** Be conservative... There are possible lost events (eg, @@ -6893,7 +6901,7 @@ int MYSQL_BIN_LOG::rotate_and_purge(bool force_rotate, if (err_gtid < 0) error= 1; // otherwise error is propagated the user } - else if ((error= rotate(force_rotate, &check_purge))) + else if (unlikely((error= rotate(force_rotate, &check_purge)))) check_purge= false; /* NOTE: Run purge_logs wo/ holding LOCK_log because it does not need @@ -7134,6 +7142,8 @@ int query_error_code(THD *thd, bool not_killed) if (not_killed || (killed_mask_hard(thd->killed) == KILL_BAD_DATA)) { error= thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0; + if (!error) + return error; /* thd->get_get_stmt_da()->sql_errno() might be ER_SERVER_SHUTDOWN or ER_QUERY_INTERRUPTED, So here we need to make sure that error @@ -7184,11 +7194,11 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd) if (likely(is_open())) { prev_binlog_id= current_binlog_id; - if (!(error= write_incident_already_locked(thd)) && - !(error= flush_and_sync(0))) + if (likely(!(error= write_incident_already_locked(thd))) && + likely(!(error= flush_and_sync(0)))) { update_binlog_end_pos(); - if ((error= rotate(false, &check_purge))) + if (unlikely((error= rotate(false, &check_purge)))) check_purge= false; } @@ -7431,7 +7441,7 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry) &wfc->LOCK_wait_commit, &stage_waiting_for_prior_transaction_to_commit, &old_stage); - while ((loc_waitee= wfc->waitee) && !orig_entry->thd->check_killed()) + while ((loc_waitee= wfc->waitee) && !orig_entry->thd->check_killed(1)) mysql_cond_wait(&wfc->COND_wait_commit, &wfc->LOCK_wait_commit); wfc->opaque_pointer= NULL; DBUG_PRINT("info", ("After waiting for prior commit, queued_by_other=%d", @@ -7864,7 +7874,8 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader) */ DBUG_ASSERT(!cache_mngr->stmt_cache.empty() || !cache_mngr->trx_cache.empty()); - if ((current->error= write_transaction_or_stmt(current, commit_id))) + if (unlikely((current->error= write_transaction_or_stmt(current, + commit_id)))) current->commit_errno= errno; strmake_buf(cache_mngr->last_commit_pos_file, log_file_name); @@ -7892,7 +7903,7 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader) } bool synced= 0; - if (flush_and_sync(&synced)) + if (unlikely(flush_and_sync(&synced))) { for (current= queue; current != NULL; current= current->next) { @@ -7916,12 +7927,13 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader) for (current= queue; current != NULL; current= current->next) { #ifdef HAVE_REPLICATION - if (!current->error && - repl_semisync_master. - report_binlog_update(current->thd, - current->cache_mngr->last_commit_pos_file, - current->cache_mngr-> - last_commit_pos_offset)) + if (likely(!current->error) && + unlikely(repl_semisync_master. + report_binlog_update(current->thd, + current->cache_mngr-> + last_commit_pos_file, + current->cache_mngr-> + last_commit_pos_offset))) { current->error= ER_ERROR_ON_WRITE; current->commit_errno= -1; @@ -7939,7 +7951,7 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader) */ update_binlog_end_pos(commit_offset); - if (any_error) + if (unlikely(any_error)) sql_print_error("Failed to run 'after_flush' hooks"); } @@ -8004,7 +8016,7 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader) { last= current->next == NULL; #ifdef HAVE_REPLICATION - if (!current->error) + if (likely(!current->error)) current->error= repl_semisync_master.wait_after_sync(current->cache_mngr-> last_commit_pos_file, @@ -8066,7 +8078,7 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader) DEBUG_SYNC(leader->thd, "commit_loop_entry_commit_ordered"); ++num_commits; - if (current->cache_mngr->using_xa && !current->error && + if (current->cache_mngr->using_xa && likely(!current->error) && DBUG_EVALUATE_IF("skip_commit_ordered", 0, 1)) run_commit_ordered(current->thd, current->all); current->thd->wakeup_subsequent_commits(current->error); @@ -8158,12 +8170,12 @@ MYSQL_BIN_LOG::write_transaction_or_stmt(group_commit_entry *entry, } } - if (mngr->get_binlog_cache_log(FALSE)->error) // Error on read + if (unlikely(mngr->get_binlog_cache_log(FALSE)->error)) { entry->error_cache= &mngr->stmt_cache.cache_log; DBUG_RETURN(ER_ERROR_ON_WRITE); } - if (mngr->get_binlog_cache_log(TRUE)->error) // Error on read + if (unlikely(mngr->get_binlog_cache_log(TRUE)->error)) // Error on read { entry->error_cache= &mngr->trx_cache.cache_log; DBUG_RETURN(ER_ERROR_ON_WRITE); @@ -8455,7 +8467,8 @@ void MYSQL_BIN_LOG::close(uint exiting) if ((exiting & LOG_CLOSE_INDEX) && my_b_inited(&index_file)) { end_io_cache(&index_file); - if (mysql_file_close(index_file.file, MYF(0)) < 0 && ! write_error) + if (unlikely(mysql_file_close(index_file.file, MYF(0)) < 0) && + ! write_error) { write_error= 1; sql_print_error(ER_THD_OR_DEFAULT(current_thd, ER_ERROR_ON_WRITE), @@ -8516,10 +8529,9 @@ void MYSQL_BIN_LOG::set_max_size(ulong max_size_arg) 0 String is not a number */ -static bool test_if_number(register const char *str, - ulong *res, bool allow_wildcards) +static bool test_if_number(const char *str, ulong *res, bool allow_wildcards) { - reg2 int flag; + int flag; const char *start; DBUG_ENTER("test_if_number"); @@ -8775,16 +8787,20 @@ void sql_print_information(const char *format, ...) va_list args; DBUG_ENTER("sql_print_information"); - if (disable_log_notes) - DBUG_VOID_RETURN; // Skip notes during start/shutdown - va_start(args, format); - error_log_print(INFORMATION_LEVEL, format, args); + sql_print_information_v(format, args); va_end(args); DBUG_VOID_RETURN; } +void sql_print_information_v(const char *format, va_list ap) +{ + if (disable_log_notes) + return; // Skip notes during start/shutdown + + error_log_print(INFORMATION_LEVEL, format, ap); +} void TC_LOG::run_prepare_ordered(THD *thd, bool all) @@ -10215,7 +10231,7 @@ MYSQL_BIN_LOG::do_binlog_recovery(const char *opt_name, bool do_xa_recovery) char log_name[FN_REFLEN]; int error; - if ((error= find_log_pos(&log_info, NullS, 1))) + if (unlikely((error= find_log_pos(&log_info, NullS, 1)))) { /* If there are no binlog files (LOG_INFO_EOF), then we still try to read @@ -10273,7 +10289,7 @@ MYSQL_BIN_LOG::do_binlog_recovery(const char *opt_name, bool do_xa_recovery) else { error= read_state_from_file(); - if (error == 2) + if (unlikely(error == 2)) { /* The binlog exists, but the .state file is missing. This is normal if diff --git a/sql/log.h b/sql/log.h index 098824d9ec8..7dfdb36c442 100644 --- a/sql/log.h +++ b/sql/log.h @@ -1111,6 +1111,7 @@ int vprint_msg_to_log(enum loglevel level, const char *format, va_list args); void sql_print_error(const char *format, ...); void sql_print_warning(const char *format, ...); void sql_print_information(const char *format, ...); +void sql_print_information_v(const char *format, va_list ap); typedef void (*sql_print_message_func)(const char *format, ...); extern sql_print_message_func sql_print_message_handlers[]; diff --git a/sql/log_event.cc b/sql/log_event.cc index cd47cbba9bd..7a6d0a1821b 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1,6 +1,6 @@ /* - Copyright (c) 2000, 2016, Oracle and/or its affiliates. - Copyright (c) 2009, 2016, MariaDB + Copyright (c) 2000, 2018, Oracle and/or its affiliates. + Copyright (c) 2009, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -53,6 +53,7 @@ #include "rpl_constants.h" #include "sql_digest.h" #include "zlib.h" +#include "my_atomic.h" #define my_b_write_string(A, B) my_b_write((A), (uchar*)(B), (uint) (sizeof(B) - 1)) @@ -394,7 +395,7 @@ static bool pretty_print_str(IO_CACHE* cache, const char* str, int len) error= my_b_write_byte(cache, c); break; } - if (error) + if (unlikely(error)) goto err; } return my_b_write_byte(cache, '\''); @@ -1924,7 +1925,7 @@ Log_event* Log_event::read_log_event(IO_CACHE* file, res->register_temp_buf(event.release(), true); err: - if (error) + if (unlikely(error)) { DBUG_ASSERT(!res); #ifdef MYSQL_CLIENT @@ -3120,7 +3121,7 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td, typestr, sizeof(typestr)); error= copy_event_cache_to_string_and_reinit(&tmp_cache, &review_str); close_cached_file(&tmp_cache); - if (error) + if (unlikely(error)) return 0; switch (td->type(i)) // Converting a string to HEX format @@ -3740,15 +3741,15 @@ bool Log_event::print_base64(IO_CACHE* file, } if (my_b_tell(file) == 0) - if (my_b_write_string(file, "\nBINLOG '\n")) + if (unlikely(my_b_write_string(file, "\nBINLOG '\n"))) error= 1; - if (!error && my_b_printf(file, "%s\n", tmp_str)) + if (likely(!error) && unlikely(my_b_printf(file, "%s\n", tmp_str))) error= 1; - if (!more && !error) - if (my_b_printf(file, "'%s\n", print_event_info->delimiter)) + if (!more && likely(!error)) + if (unlikely(my_b_printf(file, "'%s\n", print_event_info->delimiter))) error= 1; my_free(tmp_str); - if (error) + if (unlikely(error)) goto err; } @@ -3855,7 +3856,7 @@ bool Log_event::print_base64(IO_CACHE* file, error= ev->print_verbose(&tmp_cache, print_event_info); close_cached_file(&tmp_cache); - if (error) + if (unlikely(error)) { delete ev; goto err; @@ -3868,7 +3869,7 @@ bool Log_event::print_base64(IO_CACHE* file, ev->count_row_events(print_event_info); #endif delete ev; - if (error) + if (unlikely(error)) goto err; } } @@ -4830,6 +4831,24 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, db= (char *)start; query= (char *)(start + db_len + 1); q_len= data_len - db_len -1; + + if (data_len && (data_len < db_len || + data_len < q_len || + data_len != (db_len + q_len + 1))) + { + q_len= 0; + query= NULL; + DBUG_VOID_RETURN; + } + + uint32 max_length= uint32(event_len - ((const char*)(end + db_len + 1) - + (buf - common_header_len))); + if (q_len != max_length) + { + q_len= 0; + query= NULL; + DBUG_VOID_RETURN; + } /** Append the db length at the end of the buffer. This will be used by Query_cache::send_result_to_client() in case the query cache is On. @@ -5376,6 +5395,19 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, you. */ thd->catalog= catalog_len ? (char *) catalog : (char *)""; + + size_t valid_len= Well_formed_prefix(system_charset_info, + db, db_len, NAME_LEN).length(); + + if (valid_len != db_len) + { + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + ER_THD(thd, ER_SLAVE_FATAL_ERROR), + "Invalid database name in Query event."); + thd->is_slave_error= true; + goto end; + } + new_db.length= db_len; new_db.str= (char *) rpl_filter->get_rewrite_db(db, &new_db.length); thd->set_db(&new_db); /* allocates a copy of 'db' */ @@ -5415,7 +5447,7 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, thd->variables.pseudo_thread_id= thread_id; // for temp tables DBUG_PRINT("query",("%s", thd->query())); - if (!(expected_error= error_code) || + if (unlikely(!(expected_error= error_code)) || ignored_error_code(expected_error) || !unexpected_error_code(expected_error)) { @@ -5517,7 +5549,23 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, } else thd->variables.collation_database= thd->db_charset; - + + { + const CHARSET_INFO *cs= thd->charset(); + /* + We cannot ask for parsing a statement using a character set + without state_maps (parser internal data). + */ + if (!cs->state_map) + { + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + ER_THD(thd, ER_SLAVE_FATAL_ERROR), + "character_set cannot be parsed"); + thd->is_slave_error= true; + goto end; + } + } + /* Record any GTID in the same transaction, so slave state is transactionally consistent. @@ -5531,8 +5579,10 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, rgi->gtid_pending= false; gtid= rgi->current_gtid; - if (rpl_global_gtid_slave_state->record_gtid(thd, >id, sub_id, - true, false, &hton)) + if (unlikely(rpl_global_gtid_slave_state->record_gtid(thd, >id, + sub_id, + true, false, + &hton))) { int errcode= thd->get_stmt_da()->sql_errno(); if (!is_parallel_retry_error(rgi, errcode)) @@ -5559,7 +5609,7 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, it is a concurrency issue or ignorable issue, effects of the statement should be rolled back. */ - if (expected_error && + if (unlikely(expected_error) && (ignored_error_code(expected_error) || concurrency_error_code(expected_error))) { @@ -5628,7 +5678,7 @@ START SLAVE; . Query: '%s'", expected_error, thd->query()); } /* If the query was not ignored, it is printed to the general log */ - if (!thd->is_error() || + if (likely(!thd->is_error()) || thd->get_stmt_da()->sql_errno() != ER_SLAVE_IGNORED_TABLE) general_log_write(thd, COM_QUERY, thd->query(), thd->query_length()); else @@ -5667,7 +5717,7 @@ compare_errors: DBUG_PRINT("info",("expected_error: %d sql_errno: %d", expected_error, actual_error)); - if ((expected_error && + if ((unlikely(expected_error) && !test_if_equal_repl_errors(expected_error, actual_error) && !concurrency_error_code(expected_error)) && !ignored_error_code(actual_error) && @@ -5702,7 +5752,7 @@ compare_errors: /* Other cases: mostly we expected no error and get one. */ - else if (thd->is_slave_error || thd->is_fatal_error) + else if (unlikely(thd->is_slave_error || thd->is_fatal_error)) { if (!is_parallel_retry_error(rgi, actual_error)) rli->report(ERROR_LEVEL, actual_error, rgi->gtid_info(), @@ -5752,7 +5802,7 @@ compare_errors: } end: - if (sub_id && !thd->is_slave_error) + if (unlikely(sub_id && !thd->is_slave_error)) rpl_global_gtid_slave_state->update_state_hash(sub_id, >id, hton, rgi); /* @@ -6073,7 +6123,13 @@ int Start_log_event_v3::do_apply_event(rpl_group_info *rgi) */ break; default: - /* this case is impossible */ + /* + This case is not expected. It can be either an event corruption or an + unsupported binary log version. + */ + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + ER_THD(thd, ER_SLAVE_FATAL_ERROR), + "Binlog version not supported"); DBUG_RETURN(1); } DBUG_RETURN(error); @@ -6992,6 +7048,9 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len, fields = (char*)field_lens + num_fields; table_name = fields + field_block_len; + if (strlen(table_name) > NAME_LEN) + goto err; + db = table_name + table_name_len + 1; DBUG_EXECUTE_IF ("simulate_invalid_address", db_len = data_len;); @@ -7419,7 +7478,7 @@ error: DBUG_EXECUTE_IF("LOAD_DATA_INFILE_has_fatal_error", thd->is_slave_error= 0; thd->is_fatal_error= 1;); - if (thd->is_slave_error) + if (unlikely(thd->is_slave_error)) { /* this err/sql_errno code is copy-paste from net_send_error() */ const char *err; @@ -7442,7 +7501,7 @@ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", } free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) { char buf[256]; my_snprintf(buf, sizeof(buf), @@ -7973,6 +8032,23 @@ Gtid_log_event::do_apply_event(rpl_group_info *rgi) } DBUG_ASSERT((bits & OPTION_GTID_BEGIN) == 0); + + Master_info *mi=rgi->rli->mi; + switch (flags2 & (FL_DDL | FL_TRANSACTIONAL)) + { + case FL_TRANSACTIONAL: + my_atomic_add64_explicit((volatile int64 *)&mi->total_trans_groups, 1, + MY_MEMORY_ORDER_RELAXED); + break; + case FL_DDL: + my_atomic_add64_explicit((volatile int64 *)&mi->total_ddl_groups, 1, + MY_MEMORY_ORDER_RELAXED); + break; + default: + my_atomic_add64_explicit((volatile int64 *)&mi->total_non_trans_groups, 1, + MY_MEMORY_ORDER_RELAXED); + } + if (flags2 & FL_STANDALONE) return 0; @@ -8841,7 +8917,7 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi) gtid= rgi->current_gtid; err= rpl_global_gtid_slave_state->record_gtid(thd, >id, sub_id, true, false, &hton); - if (err) + if (unlikely(err)) { int ec= thd->get_stmt_da()->sql_errno(); /* @@ -8872,7 +8948,7 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi) res= trans_commit(thd); /* Automatically rolls back on error. */ thd->mdl_context.release_transactional_locks(); - if (!res && sub_id) + if (likely(!res) && sub_id) rpl_global_gtid_slave_state->update_state_hash(sub_id, >id, hton, rgi); /* @@ -9048,6 +9124,13 @@ User_var_log_event(const char* buf, uint event_len, buf+= description_event->common_header_len + description_event->post_header_len[USER_VAR_EVENT-1]; name_len= uint4korr(buf); + /* Avoid reading out of buffer */ + if ((buf - buf_start) + UV_NAME_LEN_SIZE + name_len > event_len) + { + error= true; + goto err; + } + name= (char *) buf + UV_NAME_LEN_SIZE; /* @@ -9104,7 +9187,12 @@ User_var_log_event(const char* buf, uint event_len, Old events will not have this extra byte, thence, we keep the flags set to UNDEF_F. */ - size_t bytes_read= ((val + val_len) - buf_start); + size_t bytes_read= (val + val_len) - buf_start; + if (bytes_read > size_t(event_len)) + { + error= true; + goto err; + } if ((data_written - bytes_read) > 0) { flags= (uint) *(buf + UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + @@ -9114,7 +9202,7 @@ User_var_log_event(const char* buf, uint event_len, } err: - if (error) + if (unlikely(error)) name= 0; } @@ -9294,7 +9382,7 @@ bool User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) cs->csname, hex_str, cs->name, print_event_info->delimiter); my_free(hex_str); - if (error) + if (unlikely(error)) goto err; break; } @@ -9336,7 +9424,12 @@ int User_var_log_event::do_apply_event(rpl_group_info *rgi) } if (!(charset= get_charset(charset_number, MYF(MY_WME)))) + { + rgi->rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + ER_THD(thd, ER_SLAVE_FATAL_ERROR), + "Invalid character set for User var event"); DBUG_RETURN(1); + } LEX_CSTRING user_var_name; user_var_name.str= name; user_var_name.length= name_len; @@ -9351,12 +9444,26 @@ int User_var_log_event::do_apply_event(rpl_group_info *rgi) { switch (type) { case REAL_RESULT: + if (val_len != 8) + { + rgi->rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + ER_THD(thd, ER_SLAVE_FATAL_ERROR), + "Invalid variable length at User var event"); + return 1; + } float8get(real_val, val); it= new (thd->mem_root) Item_float(thd, real_val, 0); val= (char*) &real_val; // Pointer to value in native format val_len= 8; break; case INT_RESULT: + if (val_len != 8) + { + rgi->rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + ER_THD(thd, ER_SLAVE_FATAL_ERROR), + "Invalid variable length at User var event"); + return 1; + } int_val= (longlong) uint8korr(val); it= new (thd->mem_root) Item_int(thd, int_val); val= (char*) &int_val; // Pointer to value in native format @@ -9364,6 +9471,13 @@ int User_var_log_event::do_apply_event(rpl_group_info *rgi) break; case DECIMAL_RESULT: { + if (val_len < 3) + { + rgi->rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + ER_THD(thd, ER_SLAVE_FATAL_ERROR), + "Invalid variable length at User var event"); + return 1; + } Item_decimal *dec= new (thd->mem_root) Item_decimal(thd, (uchar*) val+2, val[0], val[1]); it= dec; val= (char *)dec->val_decimal(NULL); @@ -9806,9 +9920,9 @@ int Create_file_log_event::do_apply_event(rpl_group_info *rgi) error=0; // Everything is ok err: - if (error) + if (unlikely(error)) end_io_cache(&file); - if (fd >= 0) + if (likely(fd >= 0)) mysql_file_close(fd, MYF(0)); return error != 0; } @@ -10519,7 +10633,7 @@ Execute_load_query_log_event::do_apply_event(rpl_group_info *rgi) If there was an error the slave is going to stop, leave the file so that we can re-execute this event at START SLAVE. */ - if (!error) + if (unlikely(!error)) mysql_file_delete(key_file_log_event_data, fname, MYF(MY_WME)); my_free(buf); @@ -10779,6 +10893,14 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); m_width = net_field_length(&ptr_after_width); DBUG_PRINT("debug", ("m_width=%lu", m_width)); + + /* Avoid reading out of buffer */ + if (ptr_after_width + (m_width + 7) / 8 > (uchar*)buf + event_len) + { + m_cols.bitmap= NULL; + DBUG_VOID_RETURN; + } + /* if my_bitmap_init fails, catched in is_valid() */ if (likely(!my_bitmap_init(&m_cols, m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, @@ -10827,7 +10949,12 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, const uchar* const ptr_rows_data= (const uchar*) ptr_after_width; - size_t const data_size= event_len - (ptr_rows_data - (const uchar *) buf); + size_t const read_size= ptr_rows_data - (const unsigned char *) buf; + if (read_size > event_len) + { + DBUG_VOID_RETURN; + } + size_t const data_size= event_len - read_size; DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %lu", m_table_id, m_flags, m_width, (ulong) data_size)); @@ -11146,7 +11273,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) lex->query_tables_last= &tables->next_global; } } - if (open_and_lock_tables(thd, rgi->tables_to_lock, FALSE, 0)) + if (unlikely(open_and_lock_tables(thd, rgi->tables_to_lock, FALSE, 0))) { uint actual_error= thd->get_stmt_da()->sql_errno(); #ifdef WITH_WSREP @@ -11399,13 +11526,13 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) error= do_exec_row(rgi); - if (error) + if (unlikely(error)) DBUG_PRINT("info", ("error: %s", HA_ERR(error))); DBUG_ASSERT(error != HA_ERR_RECORD_DELETED); table->in_use = old_thd; - if (error) + if (unlikely(error)) { int actual_error= convert_handler_error(error, thd, table); bool idempotent_error= (idempotent_error_code(error) && @@ -11436,12 +11563,12 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) DBUG_PRINT("info", ("curr_row: %p; curr_row_end: %p; rows_end:%p", m_curr_row, m_curr_row_end, m_rows_end)); - if (!m_curr_row_end && !error) + if (!m_curr_row_end && likely(!error)) error= unpack_current_row(rgi); m_curr_row= m_curr_row_end; - if (error == 0 && !transactional_table) + if (likely(error == 0) && !transactional_table) thd->transaction.all.modified_non_trans_table= thd->transaction.stmt.modified_non_trans_table= TRUE; } // row processing loop @@ -11463,7 +11590,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) const_cast(rli)->abort_slave= 1;); } - if ((error= do_after_row_operations(rli, error)) && + if (unlikely(error= do_after_row_operations(rli, error)) && ignored_error_code(convert_handler_error(error, thd, table))) { @@ -11477,7 +11604,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) } // if (table) - if (error) + if (unlikely(error)) { slave_rows_error_report(ERROR_LEVEL, error, rgi, thd, table, get_type_str(), @@ -11506,7 +11633,8 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) } #endif /* WITH_WSREP && HAVE_QUERY_CACHE */ - if (get_flags(STMT_END_F) && (error= rows_event_stmt_cleanup(rgi, thd))) + if (unlikely(get_flags(STMT_END_F) && + (error= rows_event_stmt_cleanup(rgi, thd)))) slave_rows_error_report(ERROR_LEVEL, thd->is_error() ? 0 : error, rgi, thd, table, @@ -12863,7 +12991,7 @@ Write_rows_log_event::do_after_row_operations(const Slave_reporting_capability * ultimately. Still todo: fix */ } - if ((local_error= m_table->file->ha_end_bulk_insert())) + if (unlikely((local_error= m_table->file->ha_end_bulk_insert()))) { m_table->file->print_error(local_error, MYF(0)); } @@ -12981,7 +13109,7 @@ Rows_log_event::write_row(rpl_group_info *rgi, prepare_record(table, m_width, true); /* unpack row into table->record[0] */ - if ((error= unpack_current_row(rgi))) + if (unlikely((error= unpack_current_row(rgi)))) { table->file->print_error(error, MYF(0)); DBUG_RETURN(error); @@ -13021,9 +13149,9 @@ Rows_log_event::write_row(rpl_group_info *rgi, DBUG_PRINT_BITSET("debug", "read_set: %s", table->read_set); if (invoke_triggers && - process_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE, TRUE)) + unlikely(process_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE, TRUE))) { - DBUG_RETURN(HA_ERR_GENERIC); // in case if error is not set yet + DBUG_RETURN(HA_ERR_GENERIC); // in case if error is not set yet } // Handle INSERT. @@ -13047,7 +13175,7 @@ Rows_log_event::write_row(rpl_group_info *rgi, if (table->s->sequence) error= update_sequence(); - else while ((error= table->file->ha_write_row(table->record[0]))) + else while (unlikely(error= table->file->ha_write_row(table->record[0]))) { if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT || @@ -13079,11 +13207,9 @@ Rows_log_event::write_row(rpl_group_info *rgi, { DBUG_PRINT("info",("Locating offending record using rnd_pos()")); error= table->file->ha_rnd_pos(table->record[1], table->file->dup_ref); - if (error) + if (unlikely(error)) { DBUG_PRINT("info",("rnd_pos() returns error %d",error)); - if (error == HA_ERR_RECORD_DELETED) - error= HA_ERR_KEY_NOT_FOUND; table->file->print_error(error, MYF(0)); DBUG_RETURN(error); } @@ -13114,11 +13240,9 @@ Rows_log_event::write_row(rpl_group_info *rgi, (const uchar*)key.get(), HA_WHOLE_KEY, HA_READ_KEY_EXACT); - if (error) + if (unlikely(error)) { DBUG_PRINT("info",("index_read_idx() returns %s", HA_ERR(error))); - if (error == HA_ERR_RECORD_DELETED) - error= HA_ERR_KEY_NOT_FOUND; table->file->print_error(error, MYF(0)); DBUG_RETURN(error); } @@ -13189,18 +13313,20 @@ Rows_log_event::write_row(rpl_group_info *rgi, { DBUG_PRINT("info",("Deleting offending row and trying to write new one again")); if (invoke_triggers && - process_triggers(TRG_EVENT_DELETE, TRG_ACTION_BEFORE, TRUE)) + unlikely(process_triggers(TRG_EVENT_DELETE, TRG_ACTION_BEFORE, + TRUE))) error= HA_ERR_GENERIC; // in case if error is not set yet else { - if ((error= table->file->ha_delete_row(table->record[1]))) + if (unlikely((error= table->file->ha_delete_row(table->record[1])))) { DBUG_PRINT("info",("ha_delete_row() returns error %d",error)); table->file->print_error(error, MYF(0)); DBUG_RETURN(error); } if (invoke_triggers && - process_triggers(TRG_EVENT_DELETE, TRG_ACTION_AFTER, TRUE)) + unlikely(process_triggers(TRG_EVENT_DELETE, TRG_ACTION_AFTER, + TRUE))) DBUG_RETURN(HA_ERR_GENERIC); // in case if error is not set yet } /* Will retry ha_write_row() with the offending row removed. */ @@ -13208,7 +13334,7 @@ Rows_log_event::write_row(rpl_group_info *rgi, } if (invoke_triggers && - process_triggers(TRG_EVENT_INSERT, TRG_ACTION_AFTER, TRUE)) + unlikely(process_triggers(TRG_EVENT_INSERT, TRG_ACTION_AFTER, TRUE))) error= HA_ERR_GENERIC; // in case if error is not set yet DBUG_RETURN(error); @@ -13262,7 +13388,7 @@ Write_rows_log_event::do_exec_row(rpl_group_info *rgi) error= write_row(rgi, slave_exec_mode == SLAVE_EXEC_MODE_IDEMPOTENT); thd_proc_info(thd, tmp); - if (error && !thd->is_error()) + if (unlikely(error) && unlikely(!thd->is_error())) { DBUG_ASSERT(0); my_error(ER_UNKNOWN_ERROR, MYF(0)); @@ -13613,11 +13739,9 @@ int Rows_log_event::find_row(rpl_group_info *rgi) DBUG_RETURN(error); error= table->file->ha_rnd_pos_by_record(table->record[0]); - if (error) + if (unlikely(error)) { DBUG_PRINT("info",("rnd_pos returns error %d",error)); - if (error == HA_ERR_RECORD_DELETED) - error= HA_ERR_KEY_NOT_FOUND; table->file->print_error(error, MYF(0)); } DBUG_RETURN(error); @@ -13677,13 +13801,12 @@ int Rows_log_event::find_row(rpl_group_info *rgi) table->record[0][table->s->null_bytes - 1]|= 256U - (1U << table->s->last_null_bit_pos); - if ((error= table->file->ha_index_read_map(table->record[0], m_key, - HA_WHOLE_KEY, - HA_READ_KEY_EXACT))) + if (unlikely((error= table->file->ha_index_read_map(table->record[0], + m_key, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT)))) { DBUG_PRINT("info",("no record matching the key found in the table")); - if (error == HA_ERR_RECORD_DELETED) - error= HA_ERR_KEY_NOT_FOUND; table->file->print_error(error, MYF(0)); table->file->ha_index_end(); goto end; @@ -13759,9 +13882,6 @@ int Rows_log_event::find_row(rpl_group_info *rgi) { while ((error= table->file->ha_index_next(table->record[0]))) { - /* We just skip records that has already been deleted */ - if (error == HA_ERR_RECORD_DELETED) - continue; DBUG_PRINT("info",("no record matching the given row found")); table->file->print_error(error, MYF(0)); table->file->ha_index_end(); @@ -13776,7 +13896,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi) DBUG_EXECUTE_IF("slave_crash_if_table_scan", abort();); /* We don't have a key: search the table using rnd_next() */ - if ((error= table->file->ha_rnd_init_with_error(1))) + if (unlikely((error= table->file->ha_rnd_init_with_error(1)))) { DBUG_PRINT("info",("error initializing table scan" " (ha_rnd_init returns %d)",error)); @@ -13788,10 +13908,9 @@ int Rows_log_event::find_row(rpl_group_info *rgi) /* Continue until we find the right record or have made a full loop */ do { - restart_rnd_next: error= table->file->ha_rnd_next(table->record[0]); - if (error) + if (unlikely(error)) DBUG_PRINT("info", ("error: %s", HA_ERR(error))); switch (error) { @@ -13804,13 +13923,6 @@ int Rows_log_event::find_row(rpl_group_info *rgi) table->file->ha_rnd_end(); goto end; - /* - If the record was deleted, we pick the next one without doing - any comparisons. - */ - case HA_ERR_RECORD_DELETED: - goto restart_rnd_next; - default: DBUG_PRINT("info", ("Failed to get next record" " (rnd_next returns %d)",error)); @@ -13941,7 +14053,7 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi) #endif /* WSREP_PROC_INFO */ thd_proc_info(thd, message); - if (!(error= find_row(rgi))) + if (likely(!(error= find_row(rgi)))) { /* Delete the record found, located in record[0] @@ -13956,9 +14068,9 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi) thd_proc_info(thd, message); if (invoke_triggers && - process_triggers(TRG_EVENT_DELETE, TRG_ACTION_BEFORE, FALSE)) + unlikely(process_triggers(TRG_EVENT_DELETE, TRG_ACTION_BEFORE, FALSE))) error= HA_ERR_GENERIC; // in case if error is not set yet - if (!error) + if (likely(!error)) { m_table->mark_columns_per_binlog_row_image(); if (m_vers_from_plain && m_table->versioned(VERS_TIMESTAMP)) @@ -13976,8 +14088,8 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi) } m_table->default_column_bitmaps(); } - if (invoke_triggers && !error && - process_triggers(TRG_EVENT_DELETE, TRG_ACTION_AFTER, FALSE)) + if (invoke_triggers && likely(!error) && + unlikely(process_triggers(TRG_EVENT_DELETE, TRG_ACTION_AFTER, FALSE))) error= HA_ERR_GENERIC; // in case if error is not set yet m_table->file->ha_index_or_rnd_end(); } @@ -14162,7 +14274,7 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) thd_proc_info(thd, message); int error= find_row(rgi); - if (error) + if (unlikely(error)) { /* We need to read the second image in the event of error to be @@ -14198,7 +14310,7 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) /* this also updates m_curr_row_end */ thd_proc_info(thd, message); - if ((error= unpack_current_row(rgi, &m_cols_ai))) + if (unlikely((error= unpack_current_row(rgi, &m_cols_ai)))) goto err; /* @@ -14225,7 +14337,7 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) thd_proc_info(thd, message); if (invoke_triggers && - process_triggers(TRG_EVENT_UPDATE, TRG_ACTION_BEFORE, TRUE)) + unlikely(process_triggers(TRG_EVENT_UPDATE, TRG_ACTION_BEFORE, TRUE))) { error= HA_ERR_GENERIC; // in case if error is not set yet goto err; @@ -14239,7 +14351,7 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) if (m_vers_from_plain && m_table->versioned(VERS_TIMESTAMP)) m_table->vers_update_fields(); error= m_table->file->ha_update_row(m_table->record[1], m_table->record[0]); - if (error == HA_ERR_RECORD_IS_THE_SAME) + if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME)) error= 0; if (m_vers_from_plain && m_table->versioned(VERS_TIMESTAMP)) { @@ -14249,8 +14361,8 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) } m_table->default_column_bitmaps(); - if (invoke_triggers && !error && - process_triggers(TRG_EVENT_UPDATE, TRG_ACTION_AFTER, TRUE)) + if (invoke_triggers && likely(!error) && + unlikely(process_triggers(TRG_EVENT_UPDATE, TRG_ACTION_AFTER, TRUE))) error= HA_ERR_GENERIC; // in case if error is not set yet thd_proc_info(thd, tmp); @@ -14391,7 +14503,7 @@ void Incident_log_event::pack_info(Protocol *protocol) #endif /* MYSQL_CLIENT */ -#if WITH_WSREP && !defined(MYSQL_CLIENT) +#if defined(WITH_WSREP) && !defined(MYSQL_CLIENT) /* read the first event from (*buf). The size of the (*buf) is (*buf_len). At the end (*buf) is shitfed to point to the following event or NULL and diff --git a/sql/log_event.h b/sql/log_event.h index a2b58b1e023..84025554ee9 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -5034,7 +5034,8 @@ public: DBUG_ENTER("Incident_log_event::Incident_log_event"); DBUG_PRINT("enter", ("m_incident: %d", m_incident)); m_message.length= 0; - if (!(m_message.str= (char*) my_malloc(msg->length+1, MYF(MY_WME)))) + if (unlikely(!(m_message.str= (char*) my_malloc(msg->length+1, + MYF(MY_WME))))) { /* Mark this event invalid */ m_incident= INCIDENT_NONE; diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index 20986050203..66c4c2bef42 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -1,4 +1,5 @@ -/* Copyright (c) 2007, 2016, Oracle and/or its affiliates. +/* Copyright (c) 2007, 2018, Oracle and/or its affiliates. + Copyright (c) 2009, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -98,7 +99,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi) */ ev_thd->lex->set_stmt_row_injection(); - if (open_and_lock_tables(ev_thd, rgi->tables_to_lock, FALSE, 0)) + if (unlikely(open_and_lock_tables(ev_thd, rgi->tables_to_lock, FALSE, 0))) { uint actual_error= ev_thd->get_stmt_da()->sql_errno(); if (ev_thd->is_slave_error || ev_thd->is_fatal_error) @@ -227,7 +228,8 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi) while (error == 0 && row_start < ev->m_rows_end) { uchar const *row_end= NULL; - if ((error= do_prepare_row(ev_thd, rgi, table, row_start, &row_end))) + if (unlikely((error= do_prepare_row(ev_thd, rgi, table, row_start, + &row_end)))) break; // We should perform the after-row operation even in // the case of error @@ -266,7 +268,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi) error= do_after_row_operations(table, error); } - if (error) + if (unlikely(error)) { /* error has occurred during the transaction */ rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL, "Error in %s event: error during transaction execution " @@ -477,14 +479,14 @@ replace_record(THD *thd, TABLE *table, DBUG_PRINT_BITSET("debug", "read_set = %s", table->read_set); #endif - while ((error= table->file->ha_write_row(table->record[0]))) + while (unlikely(error= table->file->ha_write_row(table->record[0]))) { if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT) { table->file->print_error(error, MYF(0)); /* to check at exec_relay_log_event */ DBUG_RETURN(error); } - if ((keynum= table->file->get_dup_key(error)) < 0) + if (unlikely((keynum= table->file->get_dup_key(error)) < 0)) { table->file->print_error(error, MYF(0)); /* @@ -508,18 +510,16 @@ replace_record(THD *thd, TABLE *table, if (table->file->ha_table_flags() & HA_DUPLICATE_POS) { error= table->file->ha_rnd_pos(table->record[1], table->file->dup_ref); - if (error) + if (unlikely(error)) { DBUG_PRINT("info",("rnd_pos() returns error %d",error)); - if (error == HA_ERR_RECORD_DELETED) - error= HA_ERR_KEY_NOT_FOUND; table->file->print_error(error, MYF(0)); DBUG_RETURN(error); } } else { - if (table->file->extra(HA_EXTRA_FLUSH_CACHE)) + if (unlikely(table->file->extra(HA_EXTRA_FLUSH_CACHE))) { DBUG_RETURN(my_errno); } @@ -527,7 +527,7 @@ replace_record(THD *thd, TABLE *table, if (key.get() == NULL) { key.assign(static_cast(my_alloca(table->s->max_unique_length))); - if (key.get() == NULL) + if (unlikely(key.get() == NULL)) DBUG_RETURN(ENOMEM); } @@ -537,11 +537,9 @@ replace_record(THD *thd, TABLE *table, (const uchar*)key.get(), HA_WHOLE_KEY, HA_READ_KEY_EXACT); - if (error) + if (unlikely(error)) { DBUG_PRINT("info", ("index_read_idx() returns error %d", error)); - if (error == HA_ERR_RECORD_DELETED) - error= HA_ERR_KEY_NOT_FOUND; table->file->print_error(error, MYF(0)); DBUG_RETURN(error); } @@ -577,7 +575,7 @@ replace_record(THD *thd, TABLE *table, { error=table->file->ha_update_row(table->record[1], table->record[0]); - if (error && error != HA_ERR_RECORD_IS_THE_SAME) + if (unlikely(error) && error != HA_ERR_RECORD_IS_THE_SAME) table->file->print_error(error, MYF(0)); else error= 0; @@ -585,7 +583,7 @@ replace_record(THD *thd, TABLE *table, } else { - if ((error= table->file->ha_delete_row(table->record[1]))) + if (unlikely((error= table->file->ha_delete_row(table->record[1])))) { table->file->print_error(error, MYF(0)); DBUG_RETURN(error); @@ -671,7 +669,8 @@ static int find_and_fetch_row(TABLE *table, uchar *key) { int error; /* We have a key: search the table using the index */ - if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE))) + if (!table->file->inited && + unlikely(error= table->file->ha_index_init(0, FALSE))) { table->file->print_error(error, MYF(0)); DBUG_RETURN(error); @@ -695,9 +694,9 @@ static int find_and_fetch_row(TABLE *table, uchar *key) my_ptrdiff_t const pos= table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0; table->record[1][pos]= 0xFF; - if ((error= table->file->ha_index_read_map(table->record[1], key, - HA_WHOLE_KEY, - HA_READ_KEY_EXACT))) + if (unlikely((error= table->file->ha_index_read_map(table->record[1], key, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT)))) { table->file->print_error(error, MYF(0)); table->file->ha_index_end(); @@ -738,9 +737,6 @@ static int find_and_fetch_row(TABLE *table, uchar *key) while ((error= table->file->ha_index_next(table->record[1]))) { - /* We just skip records that has already been deleted */ - if (error == HA_ERR_RECORD_DELETED) - continue; table->file->print_error(error, MYF(0)); table->file->ha_index_end(); DBUG_RETURN(error); @@ -758,13 +754,12 @@ static int find_and_fetch_row(TABLE *table, uchar *key) int error; /* We don't have a key: search the table using rnd_next() */ - if ((error= table->file->ha_rnd_init_with_error(1))) + if (unlikely((error= table->file->ha_rnd_init_with_error(1)))) return error; /* Continue until we find the right record or have made a full loop */ do { - restart_rnd_next: error= table->file->ha_rnd_next(table->record[1]); DBUG_DUMP("record[0]", table->record[0], table->s->reclength); @@ -774,18 +769,11 @@ static int find_and_fetch_row(TABLE *table, uchar *key) case 0: break; - /* - If the record was deleted, we pick the next one without doing - any comparisons. - */ - case HA_ERR_RECORD_DELETED: - goto restart_rnd_next; - case HA_ERR_END_OF_FILE: if (++restart_count < 2) { int error2; - if ((error2= table->file->ha_rnd_init_with_error(1))) + if (unlikely((error2= table->file->ha_rnd_init_with_error(1)))) DBUG_RETURN(error2); } break; @@ -853,7 +841,7 @@ int Write_rows_log_event_old::do_after_row_operations(TABLE *table, int error) fires bug#27077 todo: explain or fix */ - if ((local_error= table->file->ha_end_bulk_insert())) + if (unlikely((local_error= table->file->ha_end_bulk_insert()))) { table->file->print_error(local_error, MYF(0)); } @@ -985,7 +973,7 @@ int Delete_rows_log_event_old::do_exec_row(TABLE *table) int error; DBUG_ASSERT(table != NULL); - if (!(error= ::find_and_fetch_row(table, m_key))) + if (likely(!(error= ::find_and_fetch_row(table, m_key)))) { /* Now we should have the right row to delete. We are using @@ -1094,7 +1082,7 @@ int Update_rows_log_event_old::do_exec_row(TABLE *table) DBUG_ASSERT(table != NULL); int error= ::find_and_fetch_row(table, m_key); - if (error) + if (unlikely(error)) return error; /* @@ -1120,7 +1108,7 @@ int Update_rows_log_event_old::do_exec_row(TABLE *table) database into the after image delivered from the master. */ error= table->file->ha_update_row(table->record[1], table->record[0]); - if (error == HA_ERR_RECORD_IS_THE_SAME) + if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME)) error= 0; return error; @@ -1233,6 +1221,13 @@ Old_rows_log_event::Old_rows_log_event(const char *buf, uint event_len, DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); m_width = net_field_length(&ptr_after_width); DBUG_PRINT("debug", ("m_width=%lu", m_width)); + /* Avoid reading out of buffer */ + if (ptr_after_width + m_width > (uchar *)buf + event_len) + { + m_cols.bitmap= NULL; + DBUG_VOID_RETURN; + } + /* if my_bitmap_init fails, catched in is_valid() */ if (likely(!my_bitmap_init(&m_cols, m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, @@ -1409,8 +1404,8 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) */ lex_start(thd); - if ((error= lock_tables(thd, rgi->tables_to_lock, - rgi->tables_to_lock_count, 0))) + if (unlikely((error= lock_tables(thd, rgi->tables_to_lock, + rgi->tables_to_lock_count, 0)))) { if (thd->is_slave_error || thd->is_fatal_error) { @@ -1602,7 +1597,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) DBUG_PRINT("info", ("curr_row: %p; curr_row_end:%p; rows_end: %p", m_curr_row, m_curr_row_end, m_rows_end)); - if (!m_curr_row_end && !error) + if (!m_curr_row_end && likely(!error)) unpack_current_row(rgi); // at this moment m_curr_row_end should be set @@ -1619,7 +1614,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) error= do_after_row_operations(rli, error); } // if (table) - if (error) + if (unlikely(error)) { /* error has occurred during the transaction */ rli->report(ERROR_LEVEL, thd->net.last_errno, NULL, "Error in %s event: error during transaction execution " @@ -1703,7 +1698,9 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) already. So there should be no need to rollback the transaction. */ DBUG_ASSERT(! thd->transaction_rollback_request); - if ((error= (binlog_error ? trans_rollback_stmt(thd) : trans_commit_stmt(thd)))) + if (unlikely((error= (binlog_error ? + trans_rollback_stmt(thd) : + trans_commit_stmt(thd))))) rli->report(ERROR_LEVEL, error, NULL, "Error in %s event: commit of row events failed, " "table `%s`.`%s`", @@ -1924,8 +1921,9 @@ Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite) /* fill table->record[0] with default values */ - if ((error= prepare_record(table, m_width, - TRUE /* check if columns have def. values */))) + if (unlikely((error= + prepare_record(table, m_width, + TRUE /* check if columns have def. values */)))) DBUG_RETURN(error); /* unpack row into table->record[0] */ @@ -1946,14 +1944,14 @@ Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite) TODO: Add safety measures against infinite looping. */ - while ((error= table->file->ha_write_row(table->record[0]))) + while (unlikely(error= table->file->ha_write_row(table->record[0]))) { if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT) { table->file->print_error(error, MYF(0)); /* to check at exec_relay_log_event */ DBUG_RETURN(error); } - if ((keynum= table->file->get_dup_key(error)) < 0) + if (unlikely((keynum= table->file->get_dup_key(error)) < 0)) { DBUG_PRINT("info",("Can't locate duplicate key (get_dup_key returns %d)",keynum)); table->file->print_error(error, MYF(0)); @@ -1979,11 +1977,9 @@ Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite) { DBUG_PRINT("info",("Locating offending record using rnd_pos()")); error= table->file->ha_rnd_pos(table->record[1], table->file->dup_ref); - if (error) + if (unlikely(error)) { DBUG_PRINT("info",("rnd_pos() returns error %d",error)); - if (error == HA_ERR_RECORD_DELETED) - error= HA_ERR_KEY_NOT_FOUND; table->file->print_error(error, MYF(0)); DBUG_RETURN(error); } @@ -2001,7 +1997,7 @@ Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite) if (key.get() == NULL) { key.assign(static_cast(my_alloca(table->s->max_unique_length))); - if (key.get() == NULL) + if (unlikely(key.get() == NULL)) { DBUG_PRINT("info",("Can't allocate key buffer")); DBUG_RETURN(ENOMEM); @@ -2014,11 +2010,9 @@ Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite) (const uchar*)key.get(), HA_WHOLE_KEY, HA_READ_KEY_EXACT); - if (error) + if (unlikely(error)) { DBUG_PRINT("info",("index_read_idx() returns error %d", error)); - if (error == HA_ERR_RECORD_DELETED) - error= HA_ERR_KEY_NOT_FOUND; table->file->print_error(error, MYF(0)); DBUG_RETURN(error); } @@ -2087,7 +2081,7 @@ Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite) else { DBUG_PRINT("info",("Deleting offending row and trying to write new one again")); - if ((error= table->file->ha_delete_row(table->record[1]))) + if (unlikely((error= table->file->ha_delete_row(table->record[1])))) { DBUG_PRINT("info",("ha_delete_row() returns error %d",error)); table->file->print_error(error, MYF(0)); @@ -2175,11 +2169,9 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi) */ DBUG_PRINT("info",("locating record using primary key (position)")); int error= table->file->ha_rnd_pos_by_record(table->record[0]); - if (error) + if (unlikely(error)) { DBUG_PRINT("info",("rnd_pos returns error %d",error)); - if (error == HA_ERR_RECORD_DELETED) - error= HA_ERR_KEY_NOT_FOUND; table->file->print_error(error, MYF(0)); } DBUG_RETURN(error); @@ -2204,7 +2196,8 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi) DBUG_PRINT("info",("locating record using primary key (index_read)")); /* We have a key: search the table using the index */ - if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE))) + if (!table->file->inited && + unlikely(error= table->file->ha_index_init(0, FALSE))) { DBUG_PRINT("info",("ha_index_init returns error %d",error)); table->file->print_error(error, MYF(0)); @@ -2234,13 +2227,12 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi) table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0; table->record[0][pos]= 0xFF; - if ((error= table->file->ha_index_read_map(table->record[0], m_key, - HA_WHOLE_KEY, - HA_READ_KEY_EXACT))) + if (unlikely((error= table->file->ha_index_read_map(table->record[0], + m_key, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT)))) { DBUG_PRINT("info",("no record matching the key found in the table")); - if (error == HA_ERR_RECORD_DELETED) - error= HA_ERR_KEY_NOT_FOUND; table->file->print_error(error, MYF(0)); table->file->ha_index_end(); DBUG_RETURN(error); @@ -2308,11 +2300,8 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi) while (record_compare(table)) { - while ((error= table->file->ha_index_next(table->record[0]))) + while (unlikely(error= table->file->ha_index_next(table->record[0]))) { - /* We just skip records that has already been deleted */ - if (error == HA_ERR_RECORD_DELETED) - continue; DBUG_PRINT("info",("no record matching the given row found")); table->file->print_error(error, MYF(0)); (void) table->file->ha_index_end(); @@ -2327,7 +2316,7 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi) int restart_count= 0; // Number of times scanning has restarted from top /* We don't have a key: search the table using rnd_next() */ - if ((error= table->file->ha_rnd_init_with_error(1))) + if (unlikely((error= table->file->ha_rnd_init_with_error(1)))) { DBUG_PRINT("info",("error initializing table scan" " (ha_rnd_init returns %d)",error)); @@ -2345,15 +2334,12 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi) case 0: break; - case HA_ERR_RECORD_DELETED: - goto restart_rnd_next; - case HA_ERR_END_OF_FILE: if (++restart_count < 2) { int error2; table->file->ha_rnd_end(); - if ((error2= table->file->ha_rnd_init_with_error(1))) + if (unlikely((error2= table->file->ha_rnd_init_with_error(1)))) DBUG_RETURN(error2); goto restart_rnd_next; } @@ -2472,7 +2458,7 @@ Write_rows_log_event_old::do_after_row_operations(const Slave_reporting_capabili fires bug#27077 todo: explain or fix */ - if ((local_error= m_table->file->ha_end_bulk_insert())) + if (unlikely((local_error= m_table->file->ha_end_bulk_insert()))) { m_table->file->print_error(local_error, MYF(0)); } @@ -2486,7 +2472,7 @@ Write_rows_log_event_old::do_exec_row(rpl_group_info *rgi) DBUG_ASSERT(m_table != NULL); int error= write_row(rgi, TRUE /* overwrite */); - if (error && !thd->net.last_errno) + if (unlikely(error) && !thd->net.last_errno) thd->net.last_errno= error; return error; @@ -2589,7 +2575,7 @@ int Delete_rows_log_event_old::do_exec_row(rpl_group_info *rgi) int error; DBUG_ASSERT(m_table != NULL); - if (!(error= find_row(rgi))) + if (likely(!(error= find_row(rgi))) ) { /* Delete the record found, located in record[0] @@ -2689,7 +2675,7 @@ Update_rows_log_event_old::do_exec_row(rpl_group_info *rgi) DBUG_ASSERT(m_table != NULL); int error= find_row(rgi); - if (error) + if (unlikely(error)) { /* We need to read the second image in the event of error to be @@ -2733,7 +2719,7 @@ Update_rows_log_event_old::do_exec_row(rpl_group_info *rgi) error= m_table->file->ha_update_row(m_table->record[1], m_table->record[0]); m_table->file->ha_index_or_rnd_end(); - if (error == HA_ERR_RECORD_IS_THE_SAME) + if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME)) error= 0; return error; diff --git a/sql/mdl.h b/sql/mdl.h index a537022484f..952d97d301c 100644 --- a/sql/mdl.h +++ b/sql/mdl.h @@ -456,7 +456,7 @@ public: static void *operator new(size_t size, MEM_ROOT *mem_root) throw () { return alloc_root(mem_root, size); } - static void operator delete(void *ptr, MEM_ROOT *mem_root) {} + static void operator delete(void *, MEM_ROOT *) {} void init(MDL_key::enum_mdl_namespace namespace_arg, const char *db_arg, const char *name_arg, @@ -497,7 +497,7 @@ public: is mandatory. Can only be used before the request has been granted. */ - MDL_request& operator=(const MDL_request &rhs) + MDL_request& operator=(const MDL_request &) { ticket= NULL; /* Do nothing, in particular, don't try to copy the key. */ diff --git a/sql/mf_iocache.cc b/sql/mf_iocache.cc index 3d3b4da11db..495d78804b0 100644 --- a/sql/mf_iocache.cc +++ b/sql/mf_iocache.cc @@ -49,8 +49,7 @@ extern "C" { */ -int _my_b_net_read(register IO_CACHE *info, uchar *Buffer, - size_t Count __attribute__((unused))) +int _my_b_net_read(IO_CACHE *info, uchar *Buffer, size_t) { ulong read_length; NET *net= &(current_thd)->net; @@ -59,12 +58,12 @@ int _my_b_net_read(register IO_CACHE *info, uchar *Buffer, if (!info->end_of_file) DBUG_RETURN(1); /* because my_b_get (no _) takes 1 byte at a time */ read_length= my_net_read_packet(net, 0); - if (read_length == packet_error) + if (unlikely(read_length == packet_error)) { info->error= -1; DBUG_RETURN(1); } - if (read_length == 0) + if (unlikely(read_length == 0)) { info->end_of_file= 0; /* End of file from client */ DBUG_RETURN(1); diff --git a/sql/mf_iocache_encr.cc b/sql/mf_iocache_encr.cc index 9724ca4e19e..d2e6d554ba7 100644 --- a/sql/mf_iocache_encr.cc +++ b/sql/mf_iocache_encr.cc @@ -49,8 +49,8 @@ static int my_b_encr_read(IO_CACHE *info, uchar *Buffer, size_t Count) if (pos_in_file == info->end_of_file) { - info->read_pos= info->read_end= info->buffer; - info->pos_in_file= pos_in_file; + /* reading past EOF should not empty the cache */ + info->read_pos= info->read_end; info->error= 0; DBUG_RETURN(MY_TEST(Count)); } diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc index ec3c85b34c4..cf587ef4acd 100644 --- a/sql/multi_range_read.cc +++ b/sql/multi_range_read.cc @@ -757,12 +757,6 @@ int Mrr_ordered_rndpos_reader::get_next(range_id_t *range_info) res= file->ha_rnd_pos(file->get_table()->record[0], rowid_buffer->read_ptr1); - if (res == HA_ERR_RECORD_DELETED) - { - /* not likely to get this code with current storage engines, but still */ - continue; - } - if (res) return res; /* Some fatal error */ diff --git a/sql/mysql_install_db.cc b/sql/mysql_install_db.cc index c76b279048b..0483c7f5fbf 100644 --- a/sql/mysql_install_db.cc +++ b/sql/mysql_install_db.cc @@ -545,7 +545,6 @@ static int create_db_instance() } CreateDirectory("mysql",NULL); - CreateDirectory("test", NULL); /* Set data directory permissions for both current user and diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 4309748fe06..2205538f2d3 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -304,7 +304,6 @@ static TYPELIB tc_heuristic_recover_typelib= const char *first_keyword= "first"; const char *my_localhost= "localhost", *delayed_user= "DELAYED"; -const char *quoted_string= "%`s"; bool opt_large_files= sizeof(my_off_t) > 4; static my_bool opt_autocommit; ///< for --autocommit command-line option @@ -535,6 +534,7 @@ ulonglong slave_skipped_errors; ulong feature_files_opened_with_delayed_keys= 0, feature_check_constraint= 0; ulonglong denied_connections; my_decimal decimal_zero; +long opt_secure_timestamp; /* Maximum length of parameter value which can be set through @@ -1957,10 +1957,11 @@ void kill_mysql(THD *thd) pthread_t tmp; int error; abort_loop=1; - if ((error= mysql_thread_create(0, /* Not instrumented */ - &tmp, &connection_attrib, - kill_server_thread, (void*) 0))) - sql_print_error("Can't create thread to kill server (errno= %d).", error); + if (unlikely((error= mysql_thread_create(0, /* Not instrumented */ + &tmp, &connection_attrib, + kill_server_thread, (void*) 0)))) + sql_print_error("Can't create thread to kill server (errno= %d).", + error); } #endif DBUG_VOID_RETURN; @@ -2191,7 +2192,8 @@ static void mysqld_exit(int exit_code) if (opt_endinfo && global_status_var.global_memory_used) fprintf(stderr, "Warning: Memory not freed: %ld\n", (long) global_status_var.global_memory_used); - if (!opt_debugging && !my_disable_leak_check && exit_code == 0) + if (!opt_debugging && !my_disable_leak_check && exit_code == 0 && + debug_assert_on_not_freed_memory) { #ifdef SAFEMALLOC sf_report_leaked_memory(0); @@ -2240,14 +2242,14 @@ void clean_up(bool print_message) lex_free(); /* Free some memory */ item_create_cleanup(); tdc_start_shutdown(); +#ifdef HAVE_REPLICATION + semi_sync_master_deinit(); +#endif plugin_shutdown(); udf_free(); ha_end(); if (tc_log) tc_log->close(); -#ifdef HAVE_REPLICATION - semi_sync_master_deinit(); -#endif xid_cache_free(); tdc_deinit(); mdl_destroy(); @@ -2569,7 +2571,7 @@ static MYSQL_SOCKET activate_tcp_port(uint port) my_snprintf(port_buf, NI_MAXSERV, "%d", port); error= getaddrinfo(real_bind_addr_str, port_buf, &hints, &ai); - if (error != 0) + if (unlikely(error != 0)) { DBUG_PRINT("error",("Got error: %d from getaddrinfo()", error)); @@ -3485,8 +3487,9 @@ static void start_signal_handler(void) (void) my_setstacksize(&thr_attr,my_thread_stack_size); mysql_mutex_lock(&LOCK_start_thread); - if ((error= mysql_thread_create(key_thread_signal_hand, - &signal_thread, &thr_attr, signal_hand, 0))) + if (unlikely((error= mysql_thread_create(key_thread_signal_hand, + &signal_thread, &thr_attr, + signal_hand, 0)))) { sql_print_error("Can't create interrupt-thread (error %d, errno: %d)", error,errno); @@ -3588,10 +3591,10 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused))) PSI_CALL_delete_current_thread(); #ifdef USE_ONE_SIGNAL_HAND pthread_t tmp; - if ((error= mysql_thread_create(0, /* Not instrumented */ - &tmp, &connection_attrib, - kill_server_thread, - (void*) &sig))) + if (unlikely((error= mysql_thread_create(0, /* Not instrumented */ + &tmp, &connection_attrib, + kill_server_thread, + (void*) &sig)))) sql_print_error("Can't create thread to kill server (errno= %d)", error); #else @@ -3683,9 +3686,9 @@ void my_message_sql(uint error, const char *str, myf MyFlags) func= sql_print_error; } - if (thd) + if (likely(thd)) { - if (MyFlags & ME_FATALERROR) + if (unlikely(MyFlags & ME_FATALERROR)) thd->is_fatal_error= 1; (void) thd->raise_condition(error, NULL, level, str); } @@ -3695,7 +3698,7 @@ void my_message_sql(uint error, const char *str, myf MyFlags) /* When simulating OOM, skip writing to error log to avoid mtr errors */ DBUG_EXECUTE_IF("simulate_out_of_memory", DBUG_VOID_RETURN;); - if (!thd || thd->log_all_errors || (MyFlags & ME_NOREFRESH)) + if (unlikely(!thd) || thd->log_all_errors || (MyFlags & ME_NOREFRESH)) (*func)("%s: %s", my_progname_short, str); /* purecov: inspected */ DBUG_VOID_RETURN; } @@ -4101,7 +4104,7 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific) thd->status_var.local_memory_used); if (size > 0 && thd->status_var.local_memory_used > (int64)thd->variables.max_mem_used && - !thd->killed && !thd->get_stmt_da()->is_set()) + likely(!thd->killed) && !thd->get_stmt_da()->is_set()) { /* Ensure we don't get called here again */ char buf[50], *buf2; @@ -5377,7 +5380,7 @@ static int init_server_components() init_global_index_stats(); /* Allow storage engine to give real error messages */ - if (ha_init_errors()) + if (unlikely(ha_init_errors())) DBUG_RETURN(1); tc_log= 0; // ha_initialize_handlerton() needs that @@ -5564,7 +5567,7 @@ static int init_server_components() error= mysql_bin_log.open(opt_bin_logname, LOG_BIN, 0, 0, WRITE_CACHE, max_binlog_size, 0, TRUE); mysql_mutex_unlock(log_lock); - if (error) + if (unlikely(error)) unireg_abort(1); } @@ -5598,7 +5601,7 @@ static int init_server_components() else error= mlockall(MCL_CURRENT); - if (error) + if (unlikely(error)) { if (global_system_variables.log_warnings) sql_print_warning("Failed to lock memory. Errno: %d\n",errno); @@ -5630,9 +5633,9 @@ static void create_shutdown_thread() hEventShutdown=CreateEvent(0, FALSE, FALSE, shutdown_event_name); pthread_t hThread; int error; - if ((error= mysql_thread_create(key_thread_handle_shutdown, - &hThread, &connection_attrib, - handle_shutdown, 0))) + if (unlikely((error= mysql_thread_create(key_thread_handle_shutdown, + &hThread, &connection_attrib, + handle_shutdown, 0)))) sql_print_warning("Can't create thread to handle shutdown requests" " (errno= %d)", error); @@ -8566,8 +8569,10 @@ SHOW_VAR status_vars[]= { {"Feature_fulltext", (char*) offsetof(STATUS_VAR, feature_fulltext), SHOW_LONG_STATUS}, {"Feature_gis", (char*) offsetof(STATUS_VAR, feature_gis), SHOW_LONG_STATUS}, {"Feature_invisible_columns", (char*) offsetof(STATUS_VAR, feature_invisible_columns), SHOW_LONG_STATUS}, + {"Feature_json", (char*) offsetof(STATUS_VAR, feature_json), SHOW_LONG_STATUS}, {"Feature_locale", (char*) offsetof(STATUS_VAR, feature_locale), SHOW_LONG_STATUS}, {"Feature_subquery", (char*) offsetof(STATUS_VAR, feature_subquery), SHOW_LONG_STATUS}, + {"Feature_system_versioning", (char*) offsetof(STATUS_VAR, feature_system_versioning), SHOW_LONG_STATUS}, {"Feature_timezone", (char*) offsetof(STATUS_VAR, feature_timezone), SHOW_LONG_STATUS}, {"Feature_trigger", (char*) offsetof(STATUS_VAR, feature_trigger), SHOW_LONG_STATUS}, {"Feature_window_functions", (char*) offsetof(STATUS_VAR, feature_window_functions), SHOW_LONG_STATUS}, @@ -9011,7 +9016,7 @@ static int mysql_init_variables(void) #if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY) have_ssl=SHOW_OPTION_YES; -#if HAVE_YASSL +#if defined(HAVE_YASSL) have_openssl= SHOW_OPTION_NO; #else have_openssl= SHOW_OPTION_YES; @@ -9563,7 +9568,7 @@ mysql_getopt_value(const char *name, uint length, case OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE: { KEY_CACHE *key_cache; - if (!(key_cache= get_or_create_key_cache(name, length))) + if (unlikely(!(key_cache= get_or_create_key_cache(name, length)))) { if (error) *error= EXIT_OUT_OF_MEMORY; @@ -9693,17 +9698,6 @@ static int get_options(int *argc_ptr, char ***argv_ptr) global_system_variables.max_allowed_packet); } -#if MYSQL_VERSION_ID > 101001 - /* - TIMESTAMP columns get implicit DEFAULT values when - --explicit_defaults_for_timestamp is not set. - */ - if (!opt_help && !opt_explicit_defaults_for_timestamp) - sql_print_warning("TIMESTAMP with implicit DEFAULT value is deprecated. " - "Please use --explicit_defaults_for_timestamp server " - "option (see documentation for more details)."); -#endif - if (log_error_file_ptr != disabled_my_option) opt_error_log= 1; else diff --git a/sql/mysqld.h b/sql/mysqld.h index 7a616097338..4a392eaf196 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -241,7 +241,7 @@ extern int max_user_connections; extern volatile ulong cached_thread_count; extern ulong what_to_log,flush_time; extern uint max_prepared_stmt_count, prepared_stmt_count; -extern ulong open_files_limit; +extern MYSQL_PLUGIN_IMPORT ulong open_files_limit; extern ulonglong binlog_cache_size, binlog_stmt_cache_size, binlog_file_cache_size; extern ulonglong max_binlog_cache_size, max_binlog_stmt_cache_size; extern ulong max_binlog_size; @@ -302,7 +302,9 @@ extern my_bool encrypt_binlog; extern my_bool encrypt_tmp_disk_tables, encrypt_tmp_files; extern ulong encryption_algorithm; extern const char *encryption_algorithm_names[]; -extern const char *quoted_string; +extern long opt_secure_timestamp; + +enum secure_timestamp { SECTIME_NO, SECTIME_SUPER, SECTIME_REPL, SECTIME_YES }; #ifdef HAVE_PSI_INTERFACE #ifdef HAVE_MMAP diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 4c3771bc710..0d3aa12465d 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -112,12 +112,12 @@ extern void query_cache_insert(void *thd, const char *packet, size_t length, unsigned pkt_nr); #endif // HAVE_QUERY_CACHE #define update_statistics(A) A -extern my_bool thd_net_is_killed(); +extern my_bool thd_net_is_killed(THD *thd); /* Additional instrumentation hooks for the server */ #include "mysql_com_server.h" #else #define update_statistics(A) -#define thd_net_is_killed() 0 +#define thd_net_is_killed(A) 0 #endif @@ -620,7 +620,7 @@ net_real_write(NET *net,const uchar *packet, size_t len) query_cache_insert(net->thd, (char*) packet, len, net->pkt_nr); #endif - if (net->error == 2) + if (unlikely(net->error == 2)) DBUG_RETURN(-1); /* socket can't be used */ net->reading_or_writing=2; @@ -960,7 +960,7 @@ retry: DBUG_PRINT("info",("vio_read returned %ld errno: %d", (long) length, vio_errno(net->vio))); - if (i== 0 && thd_net_is_killed()) + if (i== 0 && unlikely(thd_net_is_killed((THD*) net->thd))) { DBUG_PRINT("info", ("thd is killed")); len= packet_error; @@ -1246,13 +1246,13 @@ my_net_read_packet_reallen(NET *net, my_bool read_from_server, ulong* reallen) total_length += len; len = my_real_read(net,&complen, 0); } while (len == MAX_PACKET_LENGTH); - if (len != packet_error) + if (likely(len != packet_error)) len+= total_length; net->where_b = save_pos; } net->read_pos = net->buff + net->where_b; - if (len != packet_error) + if (likely(len != packet_error)) { net->read_pos[len]=0; /* Safeguard for mysql_use_result */ *reallen = (ulong)len; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 9ccffd13bfc..64d3b08b1b1 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1595,7 +1595,7 @@ int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler, selects. */ int error= quick->init_ror_merged_scan(TRUE, local_alloc); - if (error) + if (unlikely(error)) DBUG_RETURN(error); quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS); } @@ -1619,7 +1619,8 @@ int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler, quick->record= head->record[0]; } - if (need_to_fetch_row && head->file->ha_rnd_init_with_error(false)) + if (need_to_fetch_row && + unlikely(head->file->ha_rnd_init_with_error(false))) { DBUG_PRINT("error", ("ROR index_merge rnd_init call failed")); DBUG_RETURN(1); @@ -1793,9 +1794,9 @@ int QUICK_ROR_UNION_SELECT::reset() List_iterator_fast it(quick_selects); while ((quick= it++)) { - if ((error= quick->reset())) + if (unlikely((error= quick->reset()))) DBUG_RETURN(error); - if ((error= quick->get_next())) + if (unlikely((error= quick->get_next()))) { if (error == HA_ERR_END_OF_FILE) continue; @@ -1805,12 +1806,12 @@ int QUICK_ROR_UNION_SELECT::reset() queue_insert(&queue, (uchar*)quick); } /* Prepare for ha_rnd_pos calls. */ - if (head->file->inited && (error= head->file->ha_rnd_end())) + if (head->file->inited && unlikely((error= head->file->ha_rnd_end()))) { DBUG_PRINT("error", ("ROR index_merge rnd_end call failed")); DBUG_RETURN(error); } - if ((error= head->file->ha_rnd_init(false))) + if (unlikely((error= head->file->ha_rnd_init(false)))) { DBUG_PRINT("error", ("ROR index_merge rnd_init call failed")); DBUG_RETURN(error); @@ -10835,8 +10836,9 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, goto err; quick->records= records; - if ((cp_buffer_from_ref(thd, table, ref) && thd->is_fatal_error) || - !(range= new(alloc) QUICK_RANGE())) + if ((cp_buffer_from_ref(thd, table, ref) && + unlikely(thd->is_fatal_error)) || + unlikely(!(range= new(alloc) QUICK_RANGE()))) goto err; // out of memory range->min_key= range->max_key= ref->key_buff; @@ -10845,8 +10847,8 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, make_prev_keypart_map(ref->key_parts); range->flag= EQ_RANGE; - if (!(quick->key_parts=key_part=(KEY_PART *) - alloc_root(&quick->alloc,sizeof(KEY_PART)*ref->key_parts))) + if (unlikely(!(quick->key_parts=key_part=(KEY_PART *) + alloc_root(&quick->alloc,sizeof(KEY_PART)*ref->key_parts)))) goto err; max_used_key_len=0; @@ -11164,103 +11166,100 @@ int QUICK_ROR_INTERSECT_SELECT::get_next() uint last_rowid_count=0; DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::get_next"); - do + /* Get a rowid for first quick and save it as a 'candidate' */ + qr= quick_it++; + quick= qr->quick; + error= quick->get_next(); + if (cpk_quick) { - /* Get a rowid for first quick and save it as a 'candidate' */ - qr= quick_it++; - quick= qr->quick; - error= quick->get_next(); - if (cpk_quick) + while (!error && !cpk_quick->row_in_ranges()) { - while (!error && !cpk_quick->row_in_ranges()) - { - quick->file->unlock_row(); /* row not in range; unlock */ - error= quick->get_next(); - } + quick->file->unlock_row(); /* row not in range; unlock */ + error= quick->get_next(); } - if (error) - DBUG_RETURN(error); + } + if (unlikely(error)) + DBUG_RETURN(error); + + /* Save the read key tuple */ + key_copy(qr->key_tuple, record, head->key_info + quick->index, + quick->max_used_key_length); + + quick->file->position(quick->record); + memcpy(last_rowid, quick->file->ref, head->file->ref_length); + last_rowid_count= 1; + quick_with_last_rowid= quick; + + while (last_rowid_count < quick_selects.elements) + { + if (!(qr= quick_it++)) + { + quick_it.rewind(); + qr= quick_it++; + } + quick= qr->quick; + + do + { + DBUG_EXECUTE_IF("innodb_quick_report_deadlock", + DBUG_SET("+d,innodb_report_deadlock");); + if (unlikely((error= quick->get_next()))) + { + /* On certain errors like deadlock, trx might be rolled back.*/ + if (!thd->transaction_rollback_request) + quick_with_last_rowid->file->unlock_row(); + DBUG_RETURN(error); + } + quick->file->position(quick->record); + cmp= head->file->cmp_ref(quick->file->ref, last_rowid); + if (cmp < 0) + { + /* This row is being skipped. Release lock on it. */ + quick->file->unlock_row(); + } + } while (cmp < 0); - /* Save the read key tuple */ key_copy(qr->key_tuple, record, head->key_info + quick->index, quick->max_used_key_length); - quick->file->position(quick->record); - memcpy(last_rowid, quick->file->ref, head->file->ref_length); - last_rowid_count= 1; - quick_with_last_rowid= quick; - - while (last_rowid_count < quick_selects.elements) + /* Ok, current select 'caught up' and returned ref >= cur_ref */ + if (cmp > 0) { - if (!(qr= quick_it++)) + /* Found a row with ref > cur_ref. Make it a new 'candidate' */ + if (cpk_quick) { - quick_it.rewind(); - qr= quick_it++; - } - quick= qr->quick; - - do - { - DBUG_EXECUTE_IF("innodb_quick_report_deadlock", - DBUG_SET("+d,innodb_report_deadlock");); - if ((error= quick->get_next())) + while (!cpk_quick->row_in_ranges()) { - /* On certain errors like deadlock, trx might be rolled back.*/ - if (!thd->transaction_rollback_request) - quick_with_last_rowid->file->unlock_row(); - DBUG_RETURN(error); + quick->file->unlock_row(); /* row not in range; unlock */ + if (unlikely((error= quick->get_next()))) + { + /* On certain errors like deadlock, trx might be rolled back.*/ + if (!thd->transaction_rollback_request) + quick_with_last_rowid->file->unlock_row(); + DBUG_RETURN(error); + } } quick->file->position(quick->record); - cmp= head->file->cmp_ref(quick->file->ref, last_rowid); - if (cmp < 0) - { - /* This row is being skipped. Release lock on it. */ - quick->file->unlock_row(); - } - } while (cmp < 0); + } + memcpy(last_rowid, quick->file->ref, head->file->ref_length); + quick_with_last_rowid->file->unlock_row(); + last_rowid_count= 1; + quick_with_last_rowid= quick; + //save the fields here key_copy(qr->key_tuple, record, head->key_info + quick->index, quick->max_used_key_length); - - /* Ok, current select 'caught up' and returned ref >= cur_ref */ - if (cmp > 0) - { - /* Found a row with ref > cur_ref. Make it a new 'candidate' */ - if (cpk_quick) - { - while (!cpk_quick->row_in_ranges()) - { - quick->file->unlock_row(); /* row not in range; unlock */ - if ((error= quick->get_next())) - { - /* On certain errors like deadlock, trx might be rolled back.*/ - if (!thd->transaction_rollback_request) - quick_with_last_rowid->file->unlock_row(); - DBUG_RETURN(error); - } - } - quick->file->position(quick->record); - } - memcpy(last_rowid, quick->file->ref, head->file->ref_length); - quick_with_last_rowid->file->unlock_row(); - last_rowid_count= 1; - quick_with_last_rowid= quick; - - //save the fields here - key_copy(qr->key_tuple, record, head->key_info + quick->index, - quick->max_used_key_length); - } - else - { - /* current 'candidate' row confirmed by this select */ - last_rowid_count++; - } } + else + { + /* current 'candidate' row confirmed by this select */ + last_rowid_count++; + } + } - /* We get here if we got the same row ref in all scans. */ - if (need_to_fetch_row) - error= head->file->ha_rnd_pos(head->record[0], last_rowid); - } while (error == HA_ERR_RECORD_DELETED); + /* We get here if we got the same row ref in all scans. */ + if (need_to_fetch_row) + error= head->file->ha_rnd_pos(head->record[0], last_rowid); if (!need_to_fetch_row) { @@ -11304,44 +11303,41 @@ int QUICK_ROR_UNION_SELECT::get_next() do { - do + if (!queue.elements) + DBUG_RETURN(HA_ERR_END_OF_FILE); + /* Ok, we have a queue with >= 1 scans */ + + quick= (QUICK_SELECT_I*)queue_top(&queue); + memcpy(cur_rowid, quick->last_rowid, rowid_length); + + /* put into queue rowid from the same stream as top element */ + if ((error= quick->get_next())) { - if (!queue.elements) - DBUG_RETURN(HA_ERR_END_OF_FILE); - /* Ok, we have a queue with >= 1 scans */ + if (error != HA_ERR_END_OF_FILE) + DBUG_RETURN(error); + queue_remove_top(&queue); + } + else + { + quick->save_last_pos(); + queue_replace_top(&queue); + } - quick= (QUICK_SELECT_I*)queue_top(&queue); - memcpy(cur_rowid, quick->last_rowid, rowid_length); + if (!have_prev_rowid) + { + /* No rows have been returned yet */ + dup_row= FALSE; + have_prev_rowid= TRUE; + } + else + dup_row= !head->file->cmp_ref(cur_rowid, prev_rowid); + } while (dup_row); - /* put into queue rowid from the same stream as top element */ - if ((error= quick->get_next())) - { - if (error != HA_ERR_END_OF_FILE) - DBUG_RETURN(error); - queue_remove_top(&queue); - } - else - { - quick->save_last_pos(); - queue_replace_top(&queue); - } + tmp= cur_rowid; + cur_rowid= prev_rowid; + prev_rowid= tmp; - if (!have_prev_rowid) - { - /* No rows have been returned yet */ - dup_row= FALSE; - have_prev_rowid= TRUE; - } - else - dup_row= !head->file->cmp_ref(cur_rowid, prev_rowid); - } while (dup_row); - - tmp= cur_rowid; - cur_rowid= prev_rowid; - prev_rowid= tmp; - - error= head->file->ha_rnd_pos(quick->record, prev_rowid); - } while (error == HA_ERR_RECORD_DELETED); + error= head->file->ha_rnd_pos(quick->record, prev_rowid); DBUG_RETURN(error); } @@ -11363,7 +11359,7 @@ int QUICK_RANGE_SELECT::reset() if (file->inited == handler::RND) { /* Handler could be left in this state by MRR */ - if ((error= file->ha_rnd_end())) + if (unlikely((error= file->ha_rnd_end()))) DBUG_RETURN(error); } @@ -11375,7 +11371,7 @@ int QUICK_RANGE_SELECT::reset() { DBUG_EXECUTE_IF("bug14365043_2", DBUG_SET("+d,ha_index_init_fail");); - if ((error= file->ha_index_init(index,1))) + if (unlikely((error= file->ha_index_init(index,1)))) { file->print_error(error, MYF(0)); goto err; @@ -11718,7 +11714,7 @@ int QUICK_SELECT_DESC::get_next() if (last_range->flag & NO_MAX_RANGE) // Read last record { int local_error; - if ((local_error= file->ha_index_last(record))) + if (unlikely((local_error= file->ha_index_last(record)))) DBUG_RETURN(local_error); // Empty table if (cmp_prev(last_range) == 0) DBUG_RETURN(0); diff --git a/sql/opt_range_mrr.cc b/sql/opt_range_mrr.cc index ace6208fd77..515d94e8748 100644 --- a/sql/opt_range_mrr.cc +++ b/sql/opt_range_mrr.cc @@ -135,7 +135,7 @@ static void step_down_to(SEL_ARG_RANGE_SEQ *arg, SEL_ARG *key_tree) TRUE No more ranges in the sequence */ -#if (_MSC_FULL_VER == 160030319) +#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER == 160030319) /* Workaround Visual Studio 2010 RTM compiler backend bug, the function enters infinite loop. @@ -315,7 +315,7 @@ walk_up_n_right: return 0; } -#if (_MSC_FULL_VER == 160030319) +#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER == 160030319) /* VS2010 compiler bug workaround */ #pragma optimize("g", on) #endif diff --git a/sql/opt_split.cc b/sql/opt_split.cc index 6467706bc0b..37853bdbbe9 100644 --- a/sql/opt_split.cc +++ b/sql/opt_split.cc @@ -352,8 +352,9 @@ bool JOIN::check_for_splittable_materialized() Field *ord_field= ((Item_field *) (ord_item->real_item()))->field; - JOIN_TAB *tab= ord_field->table->reginfo.join_tab; - if (tab->is_inner_table_of_outer_join()) + /* Ignore fields from of inner tables of outer joins */ + TABLE_LIST *tbl= ord_field->table->pos_in_table_list; + if (tbl->is_inner_table_of_outer_join()) continue; List_iterator li(fields_list); @@ -543,7 +544,14 @@ void TABLE::add_splitting_info_for_key_field(KEY_FIELD *key_field) added_key_field->level= 0; added_key_field->optimize= KEY_OPTIMIZE_EQ; added_key_field->eq_func= true; - added_key_field->null_rejecting= true; + + Item *real= key_field->val->real_item(); + if ((real->type() == Item::FIELD_ITEM) && + ((Item_field*)real)->field->maybe_null()) + added_key_field->null_rejecting= true; + else + added_key_field->null_rejecting= false; + added_key_field->cond_guard= NULL; added_key_field->sj_pred_no= UINT_MAX; return; @@ -862,7 +870,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count, table_map tables_usable_for_splitting= spl_opt_info->tables_usable_for_splitting; KEYUSE_EXT *keyuse_ext= &join->ext_keyuses_for_splitting->at(0); - KEYUSE_EXT *best_key_keyuse_ext_start; + KEYUSE_EXT *UNINIT_VAR(best_key_keyuse_ext_start); TABLE *best_table= 0; double best_rec_per_key= DBL_MAX; SplM_plan_info *spl_plan= 0; diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index a55ce2d163c..800ee65fe26 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -1063,8 +1063,6 @@ bool convert_join_subqueries_to_semijoins(JOIN *join) while ((in_subq= li++)) { SELECT_LEX *subq_sel= in_subq->get_select_lex(); - if (subq_sel->handle_derived(thd->lex, DT_OPTIMIZE)) - DBUG_RETURN(1); if (subq_sel->handle_derived(thd->lex, DT_MERGE)) DBUG_RETURN(TRUE); if (subq_sel->join->transform_in_predicates_into_in_subq(thd)) @@ -3708,8 +3706,7 @@ bool setup_sj_materialization_part1(JOIN_TAB *sjm_tab) sjm= emb_sj_nest->sj_mat_info; thd= tab->join->thd; /* First the calls come to the materialization function */ - //List &item_list= emb_sj_nest->sj_subq_pred->unit->first_select()->item_list; - + DBUG_ASSERT(sjm->is_used); /* Set up the table to write to, do as select_union::create_result_table does @@ -3718,10 +3715,22 @@ bool setup_sj_materialization_part1(JOIN_TAB *sjm_tab) sjm->sjm_table_param.bit_fields_as_long= TRUE; SELECT_LEX *subq_select= emb_sj_nest->sj_subq_pred->unit->first_select(); const LEX_CSTRING sj_materialize_name= { STRING_WITH_LEN("sj-materialize") }; - Ref_ptr_array p_items= subq_select->ref_pointer_array; - for (uint i= 0; i < subq_select->item_list.elements; i++) - sjm->sjm_table_cols.push_back(p_items[i], thd->mem_root); - + List_iterator it(subq_select->item_list); + Item *item; + while((item= it++)) + { + /* + This semi-join replaced the subquery (subq_select) and so on + re-executing it will not be prepared. To use the Items from its + select list we have to prepare (fix_fields) them + */ + if (!item->fixed && item->fix_fields(thd, it.ref())) + DBUG_RETURN(TRUE); + item= *(it.ref()); // it can be changed by fix_fields + DBUG_ASSERT(!item->name.length || item->name.length == strlen(item->name.str)); + sjm->sjm_table_cols.push_back(item, thd->mem_root); + } + sjm->sjm_table_param.field_count= subq_select->item_list.elements; sjm->sjm_table_param.force_not_null_cols= TRUE; @@ -4352,7 +4361,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd) } } - if (thd->is_fatal_error) // If end of memory + if (unlikely(thd->is_fatal_error)) // If end of memory goto err; share->db_record_offset= 1; table->no_rows= 1; // We don't need the data @@ -4361,10 +4370,11 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd) recinfo++; if (share->db_type() == TMP_ENGINE_HTON) { - if (create_internal_tmp_table(table, keyinfo, start_recinfo, &recinfo, 0)) + if (unlikely(create_internal_tmp_table(table, keyinfo, start_recinfo, + &recinfo, 0))) goto err; } - if (open_tmp_table(table)) + if (unlikely(open_tmp_table(table))) goto err; thd->mem_root= mem_root_save; @@ -4476,7 +4486,7 @@ int SJ_TMP_TABLE::sj_weedout_check_row(THD *thd) } error= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]); - if (error) + if (unlikely(error)) { /* create_internal_tmp_table_from_heap will generate error if needed */ if (!tmp_table->file->is_fatal_error(error, HA_CHECK_DUP)) @@ -5297,7 +5307,8 @@ enum_nested_loop_state join_tab_execution_startup(JOIN_TAB *tab) hash_sj_engine->materialize_join->exec(); hash_sj_engine->is_materialized= TRUE; - if (hash_sj_engine->materialize_join->error || tab->join->thd->is_fatal_error) + if (unlikely(hash_sj_engine->materialize_join->error) || + unlikely(tab->join->thd->is_fatal_error)) DBUG_RETURN(NESTED_LOOP_ERROR); } } @@ -6316,6 +6327,7 @@ bool JOIN::choose_tableless_subquery_plan() tmp_having= having; } } + exec_const_cond= conds; return FALSE; } diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 43d1c2de7ad..82946709166 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -316,7 +316,7 @@ int opt_sum_query(THD *thd, else { error= tl->table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); - if(error) + if (unlikely(error)) { tl->table->file->print_error(error, MYF(ME_FATALERROR)); DBUG_RETURN(error); @@ -400,15 +400,16 @@ int opt_sum_query(THD *thd, } longlong info_limit= 1; table->file->info_push(INFO_KIND_FORCE_LIMIT_BEGIN, &info_limit); - if (!(error= table->file->ha_index_init((uint) ref.key, 1))) + if (likely(!(error= table->file->ha_index_init((uint) ref.key, 1)))) error= (is_max ? get_index_max_value(table, &ref, range_fl) : get_index_min_value(table, &ref, item_field, range_fl, prefix_len)); /* Verify that the read tuple indeed matches the search key */ - if (!error && reckey_in_range(is_max, &ref, item_field->field, - conds, range_fl, prefix_len)) + if (!error && + reckey_in_range(is_max, &ref, item_field->field, + conds, range_fl, prefix_len)) error= HA_ERR_KEY_NOT_FOUND; table->file->ha_end_keyread(); table->file->ha_index_end(); @@ -478,7 +479,7 @@ int opt_sum_query(THD *thd, } } - if (thd->is_error()) + if (unlikely(thd->is_error())) DBUG_RETURN(thd->get_stmt_da()->sql_errno()); /* diff --git a/sql/parse_file.cc b/sql/parse_file.cc index 90d766f15d2..751ca180f97 100644 --- a/sql/parse_file.cc +++ b/sql/parse_file.cc @@ -483,8 +483,7 @@ frm_error: my_error(ER_FPARSER_BAD_HEADER, MYF(0), file_name->str); DBUG_RETURN(0); } - else - DBUG_RETURN(parser); // upper level have to check parser->ok() + DBUG_RETURN(parser); // upper level have to check parser->ok() } diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 43ee92f6020..f09bde6a965 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -48,11 +48,9 @@ partition_info *partition_info::get_clone(THD *thd) List_iterator part_it(partitions); partition_element *part; partition_info *clone= new (mem_root) partition_info(*this); - if (!clone) - { - mem_alloc_error(sizeof(partition_info)); + if (unlikely(!clone)) DBUG_RETURN(NULL); - } + memset(&(clone->read_partitions), 0, sizeof(clone->read_partitions)); memset(&(clone->lock_partitions), 0, sizeof(clone->lock_partitions)); clone->bitmaps_are_initialized= FALSE; @@ -64,20 +62,16 @@ partition_info *partition_info::get_clone(THD *thd) partition_element *subpart; partition_element *part_clone= new (mem_root) partition_element(); if (!part_clone) - { - mem_alloc_error(sizeof(partition_element)); DBUG_RETURN(NULL); - } + memcpy(part_clone, part, sizeof(partition_element)); part_clone->subpartitions.empty(); while ((subpart= (subpart_it++))) { partition_element *subpart_clone= new (mem_root) partition_element(); if (!subpart_clone) - { - mem_alloc_error(sizeof(partition_element)); DBUG_RETURN(NULL); - } + memcpy(subpart_clone, subpart, sizeof(partition_element)); part_clone->subpartitions.push_back(subpart_clone, mem_root); } @@ -88,20 +82,15 @@ partition_info *partition_info::get_clone(THD *thd) (part_elem_value *)alloc_root(mem_root, sizeof(part_elem_value) * part->list_val_list.elements); if (!new_val_arr) - { - mem_alloc_error(sizeof(part_elem_value) * part->list_val_list.elements); DBUG_RETURN(NULL); - } + p_column_list_val *new_colval_arr= (p_column_list_val*)alloc_root(mem_root, sizeof(p_column_list_val) * num_columns * part->list_val_list.elements); if (!new_colval_arr) - { - mem_alloc_error(sizeof(p_column_list_val) * num_columns * - part->list_val_list.elements); DBUG_RETURN(NULL); - } + part_elem_value *val; while ((val= list_val_it++)) { @@ -394,10 +383,6 @@ char *partition_info::create_default_partition_names(THD *thd, uint part_no, move_ptr+= MAX_PART_NAME_SIZE; } while (++i < num_parts_arg); } - else - { - mem_alloc_error(num_parts_arg*MAX_PART_NAME_SIZE); - } DBUG_RETURN(ptr); } @@ -422,13 +407,8 @@ char *partition_info::create_default_subpartition_name(THD *thd, uint subpart_no DBUG_ENTER("create_default_subpartition_name"); if (likely(ptr != NULL)) - { my_snprintf(ptr, size_alloc, "%ssp%u", part_name, subpart_no); - } - else - { - mem_alloc_error(size_alloc); - } + DBUG_RETURN(ptr); } @@ -505,10 +485,7 @@ bool partition_info::set_up_default_partitions(THD *thd, handler *file, default_name+=MAX_PART_NAME_SIZE; } else - { - mem_alloc_error(sizeof(partition_element)); goto end; - } } while (++i < num_parts); result= FALSE; end: @@ -574,10 +551,7 @@ bool partition_info::set_up_default_subpartitions(THD *thd, handler *file, subpart_elem->partition_name= ptr; } else - { - mem_alloc_error(sizeof(partition_element)); goto end; - } } while (++j < num_subparts); } while (++i < num_parts); result= FALSE; @@ -885,7 +859,7 @@ void partition_info::vers_set_hist_part(THD *thd) if (vers_info->interval.is_set()) { - if (vers_info->hist_part->range_value > thd->systime()) + if (vers_info->hist_part->range_value > thd->query_start()) return; partition_element *next= NULL; @@ -896,7 +870,7 @@ void partition_info::vers_set_hist_part(THD *thd) while ((next= it++) != vers_info->now_part) { vers_info->hist_part= next; - if (next->range_value > thd->systime()) + if (next->range_value > thd->query_start()) return; } goto warn; @@ -1506,11 +1480,9 @@ bool partition_info::set_part_expr(THD *thd, char *start_token, Item *item_ptr, size_t expr_len= end_token - start_token; char *func_string= (char*) thd->memdup(start_token, expr_len); - if (!func_string) - { - mem_alloc_error(expr_len); + if (unlikely(!func_string)) return TRUE; - } + if (is_subpart) { list_of_subpart_fields= FALSE; @@ -1665,7 +1637,6 @@ bool partition_info::set_up_charset_field_preps(THD *thd) } DBUG_RETURN(FALSE); error: - mem_alloc_error(size); DBUG_RETURN(TRUE); } @@ -1698,17 +1669,19 @@ bool check_partition_dirs(partition_info *part_info) partition_element *subpart_elem; while ((subpart_elem= sub_it++)) { - if (error_if_data_home_dir(subpart_elem->data_file_name, - "DATA DIRECTORY") || - error_if_data_home_dir(subpart_elem->index_file_name, - "INDEX DIRECTORY")) + if (unlikely(error_if_data_home_dir(subpart_elem->data_file_name, + "DATA DIRECTORY")) || + unlikely(error_if_data_home_dir(subpart_elem->index_file_name, + "INDEX DIRECTORY"))) return 1; } } else { - if (error_if_data_home_dir(part_elem->data_file_name, "DATA DIRECTORY") || - error_if_data_home_dir(part_elem->index_file_name, "INDEX DIRECTORY")) + if (unlikely(error_if_data_home_dir(part_elem->data_file_name, + "DATA DIRECTORY")) || + unlikely(error_if_data_home_dir(part_elem->index_file_name, + "INDEX DIRECTORY"))) return 1; } } @@ -1827,9 +1800,11 @@ part_column_list_val *partition_info::add_column_value(THD *thd) into the structure used for 1 column. After this we call ourselves recursively which should always succeed. */ + num_columns= curr_list_object; if (!reorganize_into_single_field_col_val(thd)) { - DBUG_RETURN(add_column_value(thd)); + if (!init_column_part(thd)) + DBUG_RETURN(add_column_value(thd)); } DBUG_RETURN(NULL); } @@ -1970,10 +1945,8 @@ bool partition_info::init_column_part(THD *thd) if (!(list_val= (part_elem_value*) thd->calloc(sizeof(part_elem_value))) || p_elem->list_val_list.push_back(list_val, thd->mem_root)) - { - mem_alloc_error(sizeof(part_elem_value)); DBUG_RETURN(TRUE); - } + if (num_columns) loc_num_columns= num_columns; else @@ -1981,10 +1954,8 @@ bool partition_info::init_column_part(THD *thd) if (!(col_val_array= (part_column_list_val*) thd->calloc(loc_num_columns * sizeof(part_column_list_val)))) - { - mem_alloc_error(loc_num_columns * sizeof(part_elem_value)); DBUG_RETURN(TRUE); - } + list_val->col_val_array= col_val_array; list_val->added_items= 0; curr_list_val= list_val; @@ -2200,7 +2171,6 @@ bool partition_info::fix_column_value_functions(THD *thd, thd->variables.sql_mode= save_sql_mode; if (!(val_ptr= (uchar*) thd->memdup(field->ptr, len))) { - mem_alloc_error(len); result= TRUE; goto end; } @@ -2304,7 +2274,7 @@ bool partition_info::fix_parser_data(THD *thd) part_elem= it++; List_iterator list_val_it(part_elem->list_val_list); num_elements= part_elem->list_val_list.elements; - if (!num_elements && error_if_requires_values()) + if (unlikely(!num_elements && error_if_requires_values())) DBUG_RETURN(true); DBUG_ASSERT(part_type == RANGE_PARTITION ? num_elements == 1U : TRUE); @@ -2715,11 +2685,9 @@ bool partition_info::vers_init_info(THD * thd) column_list= TRUE; num_columns= 1; vers_info= new (thd->mem_root) Vers_part_info; - if (!vers_info) - { - mem_alloc_error(sizeof(Vers_part_info)); + if (unlikely(!vers_info)) return true; - } + return false; } diff --git a/sql/password.c b/sql/password.c index 5e9684acb25..75ac210acd3 100644 --- a/sql/password.c +++ b/sql/password.c @@ -90,7 +90,7 @@ void hash_password(ulong *result, const char *password, uint password_len) { - register ulong nr=1345345333L, add=7, nr2=0x12345671L; + ulong nr=1345345333L, add=7, nr2=0x12345671L; ulong tmp; const char *password_end= password + password_len; for (; password < password_end; password++) @@ -325,7 +325,7 @@ hex2octet(uint8 *to, const char *str, uint len) const char *str_end= str + len; while (str < str_end) { - register char tmp= char_val(*str++); + char tmp= char_val(*str++); *to++= (tmp << 4) | char_val(*str++); } } diff --git a/sql/plistsort.c b/sql/plistsort.c index 99657410fe0..e66bd7c7276 100644 --- a/sql/plistsort.c +++ b/sql/plistsort.c @@ -91,7 +91,7 @@ recursion_point: } { - register struct LS_STRUCT_NAME *sp0= sp++; + struct LS_STRUCT_NAME *sp0= sp++; sp->list_len= sp0->list_len >> 1; sp0->list_len-= sp->list_len; sp->return_point= 0; @@ -100,7 +100,7 @@ recursion_point: return_point0: sp->list1= sorted_list; { - register struct LS_STRUCT_NAME *sp0= sp++; + struct LS_STRUCT_NAME *sp0= sp++; list= list_end; sp->list_len= sp0->list_len; sp->return_point= 1; @@ -108,9 +108,9 @@ return_point0: goto recursion_point; return_point1: { - register LS_LIST_ITEM **hook= &sorted_list; - register LS_LIST_ITEM *list1= sp->list1; - register LS_LIST_ITEM *list2= sorted_list; + LS_LIST_ITEM **hook= &sorted_list; + LS_LIST_ITEM *list1= sp->list1; + LS_LIST_ITEM *list2= sorted_list; if (LS_COMPARE_FUNC_CALL(list1, list2)) { diff --git a/sql/protocol.cc b/sql/protocol.cc index 771fade489b..d29d2fe853d 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -285,7 +285,7 @@ net_send_ok(THD *thd, DBUG_ASSERT(store.length() <= MAX_PACKET_LENGTH); error= my_net_write(net, (const unsigned char*)store.ptr(), store.length()); - if (!error && (!skip_flush || is_eof)) + if (likely(!error) && (!skip_flush || is_eof)) error= net_flush(net); thd->server_status&= ~SERVER_SESSION_STATE_CHANGED; @@ -349,7 +349,7 @@ net_send_eof(THD *thd, uint server_status, uint statement_warn_count) { thd->get_stmt_da()->set_overwrite_status(true); error= write_eof_packet(thd, net, server_status, statement_warn_count); - if (!error) + if (likely(!error)) error= net_flush(net); thd->get_stmt_da()->set_overwrite_status(false); DBUG_PRINT("info", ("EOF sent, so no more error sending allowed")); @@ -393,7 +393,7 @@ static bool write_eof_packet(THD *thd, NET *net, because if 'is_fatal_error' is set the server is not going to execute other queries (see the if test in dispatch_command / COM_QUERY) */ - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) server_status&= ~SERVER_MORE_RESULTS_EXISTS; int2store(buff + 3, server_status); error= my_net_write(net, buff, 5); @@ -590,7 +590,7 @@ void Protocol::end_statement() thd->get_stmt_da()->skip_flush()); break; } - if (!error) + if (likely(!error)) thd->get_stmt_da()->set_is_sent(true); DBUG_VOID_RETURN; } @@ -990,7 +990,7 @@ bool Protocol::send_result_set_row(List *row_items) DBUG_RETURN(TRUE); } /* Item::send() may generate an error. If so, abort the loop. */ - if (thd->is_error()) + if (unlikely(thd->is_error())) DBUG_RETURN(TRUE); } diff --git a/sql/records.cc b/sql/records.cc index ac84ca84ab6..59601ae99f3 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -83,7 +83,7 @@ bool init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table, table->status=0; /* And it's always found */ if (!table->file->inited && - (error= table->file->ha_index_init(idx, 1))) + unlikely(error= table->file->ha_index_init(idx, 1))) { if (print_error) table->file->print_error(error, MYF(0)); @@ -217,7 +217,6 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table, info->select=select; info->print_error=print_error; info->unlock_row= rr_unlock_row; - info->ignore_not_found_rows= 0; table->status= 0; /* Rows are always found */ tempfile= 0; @@ -235,7 +234,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table, reinit_io_cache(info->io_cache,READ_CACHE,0L,0,0); info->ref_pos=table->file->ref; if (!table->file->inited) - if (table->file->ha_rnd_init_with_error(0)) + if (unlikely(table->file->ha_rnd_init_with_error(0))) DBUG_RETURN(1); /* @@ -272,7 +271,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table, else if (filesort && filesort->record_pointers) { DBUG_PRINT("info",("using record_pointers")); - if (table->file->ha_rnd_init_with_error(0)) + if (unlikely(table->file->ha_rnd_init_with_error(0))) DBUG_RETURN(1); info->cache_pos= filesort->record_pointers; info->cache_end= (info->cache_pos+ @@ -285,7 +284,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table, int error; info->read_record_func= rr_index_first; if (!table->file->inited && - (error= table->file->ha_index_init(table->file->keyread, 1))) + unlikely((error= table->file->ha_index_init(table->file->keyread, 1)))) { if (print_error) table->file->print_error(error, MYF(0)); @@ -296,7 +295,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table, { DBUG_PRINT("info",("using rr_sequential")); info->read_record_func= rr_sequential; - if (table->file->ha_rnd_init_with_error(1)) + if (unlikely(table->file->ha_rnd_init_with_error(1))) DBUG_RETURN(1); /* We can use record cache if we don't update dynamic length tables */ if (!table->no_cache && @@ -365,11 +364,8 @@ static int rr_quick(READ_RECORD *info) int tmp; while ((tmp= info->select->quick->get_next())) { - if (info->thd->killed || (tmp != HA_ERR_RECORD_DELETED)) - { - tmp= rr_handle_error(info, tmp); - break; - } + tmp= rr_handle_error(info, tmp); + break; } return tmp; } @@ -484,15 +480,8 @@ int rr_sequential(READ_RECORD *info) int tmp; while ((tmp= info->table->file->ha_rnd_next(info->record))) { - /* - rnd_next can return RECORD_DELETED for MyISAM when one thread is - reading and another deleting without locks. - */ - if (info->thd->killed || (tmp != HA_ERR_RECORD_DELETED)) - { - tmp= rr_handle_error(info, tmp); - break; - } + tmp= rr_handle_error(info, tmp); + break; } return tmp; } @@ -508,8 +497,7 @@ static int rr_from_tempfile(READ_RECORD *info) if (!(tmp= info->table->file->ha_rnd_pos(info->record,info->ref_pos))) break; /* The following is extremely unlikely to happen */ - if (tmp == HA_ERR_RECORD_DELETED || - (tmp == HA_ERR_KEY_NOT_FOUND && info->ignore_not_found_rows)) + if (tmp == HA_ERR_KEY_NOT_FOUND) continue; tmp= rr_handle_error(info, tmp); break; @@ -560,8 +548,7 @@ int rr_from_pointers(READ_RECORD *info) break; /* The following is extremely unlikely to happen */ - if (tmp == HA_ERR_RECORD_DELETED || - (tmp == HA_ERR_KEY_NOT_FOUND && info->ignore_not_found_rows)) + if (tmp == HA_ERR_KEY_NOT_FOUND) continue; tmp= rr_handle_error(info, tmp); break; @@ -631,7 +618,7 @@ static int init_rr_cache(THD *thd, READ_RECORD *info) static int rr_from_cache(READ_RECORD *info) { - reg1 uint i; + uint i; ulong length; my_off_t rest_of_file; int16 error; @@ -642,7 +629,7 @@ static int rr_from_cache(READ_RECORD *info) { if (info->cache_pos != info->cache_end) { - if (info->cache_pos[info->error_offset]) + if (unlikely(info->cache_pos[info->error_offset])) { shortget(error,info->cache_pos); if (info->print_error) @@ -688,7 +675,8 @@ static int rr_from_cache(READ_RECORD *info) record=uint3korr(position); position+=3; record_pos=info->cache+record*info->reclength; - if ((error=(int16) info->table->file->ha_rnd_pos(record_pos,info->ref_pos))) + if (unlikely((error= (int16) info->table->file-> + ha_rnd_pos(record_pos,info->ref_pos)))) { record_pos[info->error_offset]=1; shortstore(record_pos,error); diff --git a/sql/records.h b/sql/records.h index 940c88ca0c7..f6a5069840d 100644 --- a/sql/records.h +++ b/sql/records.h @@ -67,7 +67,7 @@ struct READ_RECORD uchar *cache,*cache_pos,*cache_end,*read_positions; struct st_sort_addon_field *addon_field; /* Pointer to the fields info */ struct st_io_cache *io_cache; - bool print_error, ignore_not_found_rows; + bool print_error; void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *); int read_record() { return read_record_func(this); } diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc index d7e43a8fb39..ad885c925d9 100644 --- a/sql/rpl_gtid.cc +++ b/sql/rpl_gtid.cc @@ -167,9 +167,8 @@ rpl_slave_state::check_duplicate_gtid(rpl_gtid *gtid, rpl_group_info *rgi) break; } thd= rgi->thd; - if (thd->check_killed()) + if (unlikely(thd->check_killed())) { - thd->send_kill_message(); res= -1; break; } @@ -2602,7 +2601,7 @@ gtid_waiting::wait_for_gtid(THD *thd, rpl_gtid *wait_gtid, &stage_master_gtid_wait_primary, &old_stage); do { - if (thd->check_killed()) + if (unlikely(thd->check_killed(1))) break; else if (wait_until) { @@ -2654,7 +2653,7 @@ gtid_waiting::wait_for_gtid(THD *thd, rpl_gtid *wait_gtid, &stage_master_gtid_wait, &old_stage); did_enter_cond= true; } - while (!elem.done && !thd->check_killed()) + while (!elem.done && likely(!thd->check_killed(1))) { thd_wait_begin(thd, THD_WAIT_BINLOG); if (wait_until) diff --git a/sql/rpl_injector.cc b/sql/rpl_injector.cc index b855dec35f9..3c0ec34f352 100644 --- a/sql/rpl_injector.cc +++ b/sql/rpl_injector.cc @@ -105,7 +105,7 @@ int injector::transaction::use_table(server_id_type sid, table tbl) int error; - if ((error= check_state(TABLE_STATE))) + if (unlikely((error= check_state(TABLE_STATE)))) DBUG_RETURN(error); server_id_type save_id= m_thd->variables.server_id; @@ -180,7 +180,8 @@ void injector::new_trans(THD *thd, injector::transaction *ptr) int injector::record_incident(THD *thd, Incident incident) { Incident_log_event ev(thd, incident); - if (int error= mysql_bin_log.write(&ev)) + int error; + if (unlikely((error= mysql_bin_log.write(&ev)))) return error; return mysql_bin_log.rotate_and_purge(true); } @@ -189,7 +190,8 @@ int injector::record_incident(THD *thd, Incident incident, const LEX_CSTRING *message) { Incident_log_event ev(thd, incident, message); - if (int error= mysql_bin_log.write(&ev)) + int error; + if (unlikely((error= mysql_bin_log.write(&ev)))) return error; return mysql_bin_log.rotate_and_purge(true); } diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc index 55a66719e56..6f659aa12ad 100644 --- a/sql/rpl_mi.cc +++ b/sql/rpl_mi.cc @@ -42,7 +42,8 @@ Master_info::Master_info(LEX_CSTRING *connection_name_arg, using_gtid(USE_GTID_NO), events_queued_since_last_gtid(0), gtid_reconnect_event_skip_count(0), gtid_event_seen(false), in_start_all_slaves(0), in_stop_all_slaves(0), in_flush_all_relay_logs(0), - users(0), killed(0) + users(0), killed(0), + total_ddl_groups(0), total_non_trans_groups(0), total_trans_groups(0) { char *tmp; host[0] = 0; user[0] = 0; password[0] = 0; @@ -677,7 +678,7 @@ file '%s')", fname); mi->rli.is_relay_log_recovery= FALSE; // now change cache READ -> WRITE - must do this before flush_master_info reinit_io_cache(&mi->file, WRITE_CACHE, 0L, 0, 1); - if ((error= MY_TEST(flush_master_info(mi, TRUE, TRUE)))) + if (unlikely((error= MY_TEST(flush_master_info(mi, TRUE, TRUE))))) sql_print_error("Failed to flush master info file"); mysql_mutex_unlock(&mi->data_lock); DBUG_RETURN(error); @@ -1648,7 +1649,7 @@ bool Master_info_index::start_all_slaves(THD *thd) error= start_slave(thd, mi, 1); mi->release(); mysql_mutex_lock(&LOCK_active_mi); - if (error) + if (unlikely(error)) { my_error(ER_CANT_START_STOP_SLAVE, MYF(0), "START", @@ -1721,7 +1722,7 @@ bool Master_info_index::stop_all_slaves(THD *thd) error= stop_slave(thd, mi, 1); mi->release(); mysql_mutex_lock(&LOCK_active_mi); - if (error) + if (unlikely(error)) { my_error(ER_CANT_START_STOP_SLAVE, MYF(0), "STOP", @@ -2020,7 +2021,7 @@ bool Master_info_index::flush_all_relay_logs() mi->release(); mysql_mutex_lock(&LOCK_active_mi); - if (error) + if (unlikely(error)) { result= true; break; diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h index 260c35e8c04..54d6b5be592 100644 --- a/sql/rpl_mi.h +++ b/sql/rpl_mi.h @@ -327,6 +327,16 @@ class Master_info : public Slave_reporting_capability uint users; /* Active user for object */ uint killed; + + /* No of DDL event group */ + volatile uint64 total_ddl_groups; + + /* No of non-transactional event group*/ + volatile uint64 total_non_trans_groups; + + /* No of transactional event group*/ + volatile uint64 total_trans_groups; + /* domain-id based filter */ Domain_id_filter domain_id_filter; diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index 9f8a3450716..a12bbba5a1f 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -337,7 +337,7 @@ do_gco_wait(rpl_group_info *rgi, group_commit_orderer *gco, thd->set_time_for_next_stage(); do { - if (thd->check_killed() && !rgi->worker_error) + if (!rgi->worker_error && unlikely(thd->check_killed(1))) { DEBUG_SYNC(thd, "rpl_parallel_start_waiting_for_prior_killed"); thd->clear_error(); @@ -402,9 +402,8 @@ do_ftwrl_wait(rpl_group_info *rgi, { if (entry->force_abort || rgi->worker_error) break; - if (thd->check_killed()) + if (unlikely(thd->check_killed())) { - thd->send_kill_message(); slave_output_error_info(rgi, thd); signal_error_to_sql_driver_thread(thd, rgi, 1); break; @@ -453,9 +452,8 @@ pool_mark_busy(rpl_parallel_thread_pool *pool, THD *thd) } while (pool->busy) { - if (thd && thd->check_killed()) + if (thd && unlikely(thd->check_killed())) { - thd->send_kill_message(); res= 1; break; } @@ -571,9 +569,8 @@ rpl_pause_for_ftwrl(THD *thd) e->last_committed_sub_id < e->pause_sub_id && !err) { - if (thd->check_killed()) + if (unlikely(thd->check_killed())) { - thd->send_kill_message(); err= 1; break; } @@ -838,8 +835,8 @@ do_retry: } DBUG_EXECUTE_IF("inject_mdev8031", { /* Simulate pending KILL caught in read_relay_log_description_event(). */ - if (thd->check_killed()) { - thd->send_kill_message(); + if (unlikely(thd->check_killed())) + { err= 1; goto err; } @@ -862,13 +859,13 @@ do_retry: if (ev) break; - if (rlog.error < 0) + if (unlikely(rlog.error < 0)) { errmsg= "slave SQL thread aborted because of I/O error"; err= 1; goto check_retry; } - if (rlog.error > 0) + if (unlikely(rlog.error > 0)) { sql_print_error("Slave SQL thread: I/O error reading " "event(errno: %d cur_log->error: %d)", @@ -1036,6 +1033,8 @@ handle_rpl_parallel_thread(void *arg) thd->system_thread= SYSTEM_THREAD_SLAVE_SQL; thd->security_ctx->skip_grants(); thd->variables.max_allowed_packet= slave_max_allowed_packet; + /* Ensure that slave can exeute any alter table it gets from master */ + thd->variables.alter_algorithm= (ulong) Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT; thd->slave_thread= 1; set_slave_thread_options(thd); @@ -1288,7 +1287,7 @@ handle_rpl_parallel_thread(void *arg) if (!err) #endif { - if (thd->check_killed()) + if (unlikely(thd->check_killed())) { thd->clear_error(); thd->get_stmt_da()->reset_diagnostics_area(); @@ -1301,7 +1300,7 @@ handle_rpl_parallel_thread(void *arg) delete_or_keep_event_post_apply(rgi, event_type, qev->ev); DBUG_EXECUTE_IF("rpl_parallel_simulate_temp_err_gtid_0_x_100", err= dbug_simulate_tmp_error(rgi, thd);); - if (err) + if (unlikely(err)) { convert_kill_to_deadlock_error(rgi); if (has_temporary_error(thd) && slave_trans_retries > 0) @@ -2075,7 +2074,7 @@ rpl_parallel_entry::choose_thread(rpl_group_info *rgi, bool *did_enter_cond, /* The thread is ready to queue into. */ break; } - else if (rli->sql_driver_thd->check_killed()) + else if (unlikely(rli->sql_driver_thd->check_killed(1))) { unlock_or_exit_cond(rli->sql_driver_thd, &thr->LOCK_rpl_thread, did_enter_cond, old_stage); @@ -2401,9 +2400,8 @@ rpl_parallel::wait_for_workers_idle(THD *thd) &stage_waiting_for_workers_idle, &old_stage); while (e->current_sub_id > e->last_committed_sub_id) { - if (thd->check_killed()) + if (unlikely(thd->check_killed())) { - thd->send_kill_message(); err= 1; break; } diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc index 6da3f15cfb9..db579a63ce0 100644 --- a/sql/rpl_record.cc +++ b/sql/rpl_record.cc @@ -196,6 +196,7 @@ unpack_row(rpl_group_info *rgi, uchar const **const current_row_end, ulong *const master_reclength, uchar const *const row_end) { + int error; DBUG_ENTER("unpack_row"); DBUG_ASSERT(row_data); DBUG_ASSERT(table); @@ -419,7 +420,7 @@ unpack_row(rpl_group_info *rgi, /* Add Extra slave persistent columns */ - if (int error= fill_extra_persistent_columns(table, cols->n_bits)) + if (unlikely(error= fill_extra_persistent_columns(table, cols->n_bits))) DBUG_RETURN(error); /* diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 04109ddadb4..9e09a5cf067 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -142,7 +142,7 @@ int Relay_log_info::init(const char* info_fname) log_space_limit= relay_log_space_limit; log_space_total= 0; - if (error_on_rli_init_info) + if (unlikely(error_on_rli_init_info)) goto err; char pattern[FN_REFLEN]; @@ -306,7 +306,7 @@ Failed to open the existing relay log info file '%s' (errno %d)", fname); error= 1; } - if (error) + if (unlikely(error)) { if (info_fd >= 0) mysql_file_close(info_fd, MYF(0)); @@ -415,7 +415,7 @@ Failed to open the existing relay log info file '%s' (errno %d)", before Relay_log_info::flush() */ reinit_io_cache(&info_file, WRITE_CACHE,0L,0,1); - if ((error= flush())) + if (unlikely((error= flush()))) { msg= "Failed to flush relay log info file"; goto err; @@ -1520,7 +1520,7 @@ scan_one_gtid_slave_pos_table(THD *thd, HASH *hash, DYNAMIC_ARRAY *array, LEX_CSTRING *tablename, void **out_hton) { TABLE_LIST tlist; - TABLE *table; + TABLE *UNINIT_VAR(table); bool table_opened= false; bool table_scanned= false; struct gtid_pos_element tmp_entry, *entry; @@ -1537,11 +1537,9 @@ scan_one_gtid_slave_pos_table(THD *thd, HASH *hash, DYNAMIC_ARRAY *array, goto end; bitmap_set_all(table->read_set); - if ((err= table->file->ha_rnd_init_with_error(1))) - { - table->file->print_error(err, MYF(0)); + if (unlikely(err= table->file->ha_rnd_init_with_error(1))) goto end; - } + table_scanned= true; for (;;) { @@ -1551,9 +1549,7 @@ scan_one_gtid_slave_pos_table(THD *thd, HASH *hash, DYNAMIC_ARRAY *array, if ((err= table->file->ha_rnd_next(table->record[0]))) { - if (err == HA_ERR_RECORD_DELETED) - continue; - else if (err == HA_ERR_END_OF_FILE) + if (err == HA_ERR_END_OF_FILE) break; else { @@ -1784,6 +1780,8 @@ gtid_pos_auto_create_tables(rpl_slave_state::gtid_pos_table **list_ptr) p= strmake(p, plugin_name(*auto_engines)->str, FN_REFLEN - (p - buf)); table_name.str= buf; table_name.length= p - buf; + table_case_convert(const_cast(table_name.str), + static_cast(table_name.length)); entry= rpl_global_gtid_slave_state->alloc_gtid_pos_table (&table_name, hton, rpl_slave_state::GTID_POS_AUTO_CREATE); if (!entry) @@ -2208,7 +2206,7 @@ void rpl_group_info::cleanup_context(THD *thd, bool error) to rollback before continuing with the next events. 4) so we need this "context cleanup" function. */ - if (error) + if (unlikely(error)) { trans_rollback_stmt(thd); // if a "statement transaction" /* trans_rollback() also resets OPTION_GTID_BEGIN */ @@ -2222,7 +2220,7 @@ void rpl_group_info::cleanup_context(THD *thd, bool error) m_table_map.clear_tables(); slave_close_thread_tables(thd); - if (error) + if (unlikely(error)) { thd->mdl_context.release_transactional_locks(); diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc index 58711078db6..31035fb5dcc 100644 --- a/sql/rpl_utility.cc +++ b/sql/rpl_utility.cc @@ -42,6 +42,12 @@ max_display_length_for_temporal2_field(uint32 int_display_length, @param sql_type Type of the field @param metadata The metadata from the master for the field. @return Maximum length of the field in bytes. + + The precise values calculated by field->max_display_length() and + calculated by max_display_length_for_field() can differ (by +1 or -1) + for integer data types (TINYINT, SMALLINT, MEDIUMINT, INT, BIGINT). + This slight difference is not important here, because we call + this function only for two *different* integer data types. */ static uint32 max_display_length_for_field(enum_field_types sql_type, unsigned int metadata) @@ -737,6 +743,16 @@ can_convert_field_to(Field *field, case MYSQL_TYPE_INT24: case MYSQL_TYPE_LONG: case MYSQL_TYPE_LONGLONG: + /* + max_display_length_for_field() is not fully precise for the integer + data types. So its result cannot be compared to the result of + field->max_dispay_length() when the table field and the binlog field + are of the same type. + This code should eventually be rewritten not to use + compare_lengths(), to detect subtype/supetype relations + just using the type codes. + */ + DBUG_ASSERT(source_type != field->real_type()); *order_var= compare_lengths(field, source_type, metadata); DBUG_ASSERT(*order_var != 0); DBUG_RETURN(is_conversion_ok(*order_var, rli)); diff --git a/sql/semisync_master.cc b/sql/semisync_master.cc index 3c88bdddad4..8a82fd9085c 100644 --- a/sql/semisync_master.cc +++ b/sql/semisync_master.cc @@ -779,7 +779,6 @@ void Repl_semi_sync_master::dump_end(THD* thd) int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, my_off_t trx_wait_binlog_pos) { - DBUG_ENTER("Repl_semi_sync_master::commit_trx"); if (get_master_enabled() && trx_wait_binlog_name) @@ -788,15 +787,16 @@ int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, struct timespec abstime; int wait_result; PSI_stage_info old_stage; + THD *thd= current_thd; set_timespec(start_ts, 0); - DEBUG_SYNC(current_thd, "rpl_semisync_master_commit_trx_before_lock"); + DEBUG_SYNC(thd, "rpl_semisync_master_commit_trx_before_lock"); /* Acquire the mutex. */ lock(); /* This must be called after acquired the lock */ - THD_ENTER_COND(NULL, &COND_binlog_send, &LOCK_binlog, + THD_ENTER_COND(thd, &COND_binlog_send, &LOCK_binlog, & stage_waiting_for_semi_sync_ack_from_slave, & old_stage); @@ -809,7 +809,7 @@ int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, trx_wait_binlog_name, (ulong)trx_wait_binlog_pos, (int)is_on())); - while (is_on() && !thd_killed(current_thd)) + while (is_on() && !thd_killed(thd)) { if (m_reply_file_name_inited) { @@ -924,7 +924,7 @@ int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, m_active_tranxs may be NULL if someone disabled semi sync during cond_timewait() */ - assert(thd_killed(current_thd) || !m_active_tranxs || + assert(thd_killed(thd) || !m_active_tranxs || !m_active_tranxs->is_tranx_end_pos(trx_wait_binlog_name, trx_wait_binlog_pos)); @@ -937,7 +937,7 @@ int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, /* The lock held will be released by thd_exit_cond, so no need to call unlock() here */ - THD_EXIT_COND(NULL, & old_stage); + THD_EXIT_COND(thd, &old_stage); } DBUG_RETURN(0); diff --git a/sql/set_var.cc b/sql/set_var.cc index bf373dde905..7bf6b9f928d 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -245,10 +245,10 @@ uchar *sys_var::global_value_ptr(THD *thd, const LEX_CSTRING *base) bool sys_var::check(THD *thd, set_var *var) { - if ((var->value && do_check(thd, var)) - || (on_check && on_check(this, thd, var))) + if (unlikely((var->value && do_check(thd, var)) || + (on_check && on_check(this, thd, var)))) { - if (!thd->is_error()) + if (likely(!thd->is_error())) { char buff[STRING_BUFFER_USUAL_SIZE]; String str(buff, sizeof(buff), system_charset_info), *res; @@ -718,10 +718,10 @@ int sql_set_variables(THD *thd, List *var_list, bool free) set_var_base *var; while ((var=it++)) { - if ((error= var->check(thd))) + if (unlikely((error= var->check(thd)))) goto err; } - if (was_error || !(error= MY_TEST(thd->is_error()))) + if (unlikely(was_error) || likely(!(error= MY_TEST(thd->is_error())))) { it.rewind(); while ((var= it++)) diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 17192663381..77347472521 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7089,7 +7089,7 @@ ER_IDENT_CAUSES_TOO_LONG_PATH eng "Long database name and identifier for object resulted in path length exceeding %d characters. Path: '%s'" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL - eng "cannot silently convert NULL values, as required in this SQL_MODE" + eng "cannot convert NULL to non-constant DEFAULT" ER_MUST_CHANGE_PASSWORD_LOGIN eng "Your password has expired. To log in you must change it using a client that supports expired passwords" @@ -7859,8 +7859,8 @@ ER_VERS_ALTER_ENGINE_PROHIBITED ER_VERS_RANGE_PROHIBITED eng "SYSTEM_TIME range selector is not allowed" -ER_UNUSED_26 - eng "You should never see it" +ER_CONFLICTING_FOR_SYSTEM_TIME + eng "Conflicting FOR SYSTEM_TIME clauses in WITH RECURSIVE" ER_VERS_TABLE_MUST_HAVE_COLUMNS eng "Table %`s must have at least one versioned column" @@ -7913,3 +7913,9 @@ ER_INDEX_FILE_FULL eng "The index file for table '%-.192s' is full" ER_UPDATED_COLUMN_ONLY_ONCE eng "The column %`s.%`s cannot be changed more than once in a single UPDATE statement" +ER_EMPTY_ROW_IN_TVC + eng "Row with no elements is not allowed in table value constructor in this context" +ER_VERS_QUERY_IN_PARTITION + eng "SYSTEM_TIME partitions in table %`s does not support historical query" +ER_KEY_DOESNT_SUPPORT + eng "%s index %`s does not support this operation" diff --git a/sql/slave.cc b/sql/slave.cc index bf70db66f35..275db6165e9 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -223,7 +223,7 @@ static void set_slave_max_allowed_packet(THD *thd, MYSQL *mysql) void init_thread_mask(int* mask,Master_info* mi,bool inverse) { bool set_io = mi->slave_running, set_sql = mi->rli.slave_running; - register int tmp_mask=0; + int tmp_mask=0; DBUG_ENTER("init_thread_mask"); if (set_io) @@ -344,8 +344,15 @@ gtid_pos_table_creation(THD *thd, plugin_ref engine, LEX_CSTRING *table_name) goto end; mysql_parse(thd, thd->query(), thd->query_length(), &parser_state, FALSE, FALSE); - if (thd->is_error()) + if (unlikely(thd->is_error())) err= 1; + /* The warning is relevant to 10.3 and earlier. */ + sql_print_warning("The automatically created table '%s' name may not be " + "entirely in lowercase. The table name will be converted " + "to lowercase to any future upgrade to 10.4.0 and later " + "version where it will be auto-created at once " + "in lowercase.", + table_name->str); end: thd->variables.option_bits= thd_saved_option; thd->reset_query(); @@ -356,7 +363,7 @@ end: static void handle_gtid_pos_auto_create_request(THD *thd, void *hton) { - int err; + int UNINIT_VAR(err); plugin_ref engine= NULL, *auto_engines; rpl_slave_state::gtid_pos_table *entry; StringBuffer loc_table_name; @@ -739,7 +746,7 @@ int init_slave() thd->reset_globals(); delete thd; - if (error) + if (unlikely(error)) { sql_print_error("Failed to create slave threads"); goto err; @@ -885,7 +892,7 @@ bool init_slave_skip_errors(const char* arg) if (!arg || !*arg) // No errors defined goto end; - if (my_bitmap_init(&slave_error_mask,0,MAX_SLAVE_ERROR,0)) + if (unlikely(my_bitmap_init(&slave_error_mask,0,MAX_SLAVE_ERROR,0))) DBUG_RETURN(1); use_slave_mask= 1; @@ -978,10 +985,10 @@ bool init_slave_transaction_retry_errors(const char* arg) p++; } - if (!(slave_transaction_retry_errors= - (uint *) my_once_alloc(sizeof(int) * - slave_transaction_retry_error_length, - MYF(MY_WME)))) + if (unlikely(!(slave_transaction_retry_errors= + (uint *) my_once_alloc(sizeof(int) * + slave_transaction_retry_error_length, + MYF(MY_WME))))) DBUG_RETURN(1); /* @@ -1030,11 +1037,12 @@ int terminate_slave_threads(Master_info* mi,int thread_mask,bool skip_lock) } else mi->rli.abort_slave=1; - if ((error=terminate_slave_thread(mi->rli.sql_driver_thd, sql_lock, - &mi->rli.stop_cond, - &mi->rli.slave_running, - skip_lock)) && - !force_all) + if (unlikely((error= terminate_slave_thread(mi->rli.sql_driver_thd, + sql_lock, + &mi->rli.stop_cond, + &mi->rli.slave_running, + skip_lock))) && + !force_all) DBUG_RETURN(error); retval= error; @@ -1052,11 +1060,11 @@ int terminate_slave_threads(Master_info* mi,int thread_mask,bool skip_lock) { DBUG_PRINT("info",("Terminating IO thread")); mi->abort_slave=1; - if ((error=terminate_slave_thread(mi->io_thd, io_lock, - &mi->stop_cond, - &mi->slave_running, - skip_lock)) && - !force_all) + if (unlikely((error= terminate_slave_thread(mi->io_thd, io_lock, + &mi->stop_cond, + &mi->slave_running, + skip_lock))) && + !force_all) DBUG_RETURN(error); if (!retval) retval= error; @@ -1232,8 +1240,9 @@ int start_slave_thread( } start_id= *slave_run_id; DBUG_PRINT("info",("Creating new slave thread")); - if ((error = mysql_thread_create(thread_key, - &th, &connection_attrib, h_func, (void*)mi))) + if (unlikely((error= mysql_thread_create(thread_key, + &th, &connection_attrib, h_func, + (void*)mi)))) { sql_print_error("Can't create slave thread (errno= %d).", error); if (start_lock) @@ -1346,7 +1355,7 @@ int start_slave_threads(THD *thd, mi->rli.restart_gtid_pos.reset(); } - if (!error && (thread_mask & SLAVE_IO)) + if (likely(!error) && likely((thread_mask & SLAVE_IO))) error= start_slave_thread( #ifdef HAVE_PSI_INTERFACE key_thread_slave_io, @@ -1355,7 +1364,7 @@ int start_slave_threads(THD *thd, cond_io, &mi->slave_running, &mi->slave_run_id, mi); - if (!error && (thread_mask & SLAVE_SQL)) + if (likely(!error) && likely(thread_mask & SLAVE_SQL)) { error= start_slave_thread( #ifdef HAVE_PSI_INTERFACE @@ -1365,7 +1374,7 @@ int start_slave_threads(THD *thd, cond_sql, &mi->rli.slave_running, &mi->rli.slave_run_id, mi); - if (error) + if (unlikely(error)) terminate_slave_threads(mi, thread_mask & SLAVE_IO, !need_slave_mutex); } DBUG_RETURN(error); @@ -2337,7 +2346,8 @@ past_checksum: */ if (opt_replicate_events_marked_for_skip == RPL_SKIP_FILTER_ON_MASTER) { - if (mysql_real_query(mysql, STRING_WITH_LEN("SET skip_replication=1"))) + if (unlikely(mysql_real_query(mysql, + STRING_WITH_LEN("SET skip_replication=1")))) { err_code= mysql_errno(mysql); if (is_network_error(err_code)) @@ -2381,7 +2391,7 @@ past_checksum: STRINGIFY_ARG(MARIA_SLAVE_CAPABILITY_ANNOTATE))), mysql_real_query(mysql, STRING_WITH_LEN("SET @mariadb_slave_capability=" STRINGIFY_ARG(MARIA_SLAVE_CAPABILITY_MINE)))); - if (rc) + if (unlikely(rc)) { err_code= mysql_errno(mysql); if (is_network_error(err_code)) @@ -2457,7 +2467,7 @@ after_set_capability: query_str.append(STRING_WITH_LEN("'"), system_charset_info); rc= mysql_real_query(mysql, query_str.ptr(), query_str.length()); - if (rc) + if (unlikely(rc)) { err_code= mysql_errno(mysql); if (is_network_error(err_code)) @@ -2490,7 +2500,7 @@ after_set_capability: } rc= mysql_real_query(mysql, query_str.ptr(), query_str.length()); - if (rc) + if (unlikely(rc)) { err_code= mysql_errno(mysql); if (is_network_error(err_code)) @@ -2523,7 +2533,7 @@ after_set_capability: } rc= mysql_real_query(mysql, query_str.ptr(), query_str.length()); - if (rc) + if (unlikely(rc)) { err_code= mysql_errno(mysql); if (is_network_error(err_code)) @@ -2559,7 +2569,7 @@ after_set_capability: query_str.append(STRING_WITH_LEN("'"), system_charset_info); rc= mysql_real_query(mysql, query_str.ptr(), query_str.length()); - if (rc) + if (unlikely(rc)) { err_code= mysql_errno(mysql); if (is_network_error(err_code)) @@ -3121,6 +3131,19 @@ void show_master_info_get_fields(THD *thd, List *field_list, field_list->push_back(new (mem_root) Item_empty_string(thd, "Slave_SQL_Running_State", 20)); + field_list->push_back(new (mem_root) + Item_return_int(thd, "Slave_DDL_Groups", 20, + MYSQL_TYPE_LONGLONG), + mem_root); + field_list->push_back(new (mem_root) + Item_return_int(thd, "Slave_Non_Transactional_Groups", 20, + MYSQL_TYPE_LONGLONG), + mem_root); + field_list->push_back(new (mem_root) + Item_return_int(thd, "Slave_Transactional_Groups", 20, + MYSQL_TYPE_LONGLONG), + mem_root); + if (full) { field_list->push_back(new (mem_root) @@ -3351,6 +3374,17 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full, // Slave_SQL_Running_State protocol->store(slave_sql_running_state, &my_charset_bin); + uint64 events; + events= (uint64)my_atomic_load64_explicit((volatile int64 *) + &mi->total_ddl_groups, MY_MEMORY_ORDER_RELAXED); + protocol->store(events); + events= (uint64)my_atomic_load64_explicit((volatile int64 *) + &mi->total_non_trans_groups, MY_MEMORY_ORDER_RELAXED); + protocol->store(events); + events= (uint64)my_atomic_load64_explicit((volatile int64 *) + &mi->total_trans_groups, MY_MEMORY_ORDER_RELAXED); + protocol->store(events); + if (full) { protocol->store((uint32) mi->rli.retried_trans); @@ -3650,7 +3684,7 @@ static ulong read_event(MYSQL* mysql, Master_info *mi, bool* suppress_warnings, #endif len = cli_safe_read_reallen(mysql, network_read_len); - if (len == packet_error || (long) len < 1) + if (unlikely(len == packet_error || (long) len < 1)) { if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED) { @@ -3715,7 +3749,7 @@ has_temporary_error(THD *thd) error or not. This is currently the case for Incident_log_event, which sets no message. Return FALSE. */ - if (!thd->is_error()) + if (!likely(thd->is_error())) DBUG_RETURN(0); current_errno= thd->get_stmt_da()->sql_errno(); @@ -3936,7 +3970,7 @@ apply_event_and_update_pos_apply(Log_event* ev, THD* thd, rpl_group_info *rgi, TODO: Replace this with a decent error message when merged with BUG#24954 (which adds several new error message). */ - if (error) + if (unlikely(error)) { rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, rgi->gtid_info(), "It was not possible to update the positions" @@ -4327,19 +4361,19 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli, update_log_pos failed: this should not happen, so we don't retry. */ - if (exec_res == 2) + if (unlikely(exec_res == 2)) DBUG_RETURN(1); #ifdef WITH_WSREP - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); if (thd->wsrep_conflict_state == NO_CONFLICT) { - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); #endif /* WITH_WSREP */ if (slave_trans_retries) { int UNINIT_VAR(temp_err); - if (exec_res && (temp_err= has_temporary_error(thd))) + if (unlikely(exec_res) && (temp_err= has_temporary_error(thd))) { const char *errmsg; rli->clear_error(); @@ -4412,7 +4446,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli, #ifdef WITH_WSREP } else - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); #endif /* WITH_WSREP */ thread_safe_increment64(&rli->executed_entries); @@ -4776,7 +4810,7 @@ connected: if (check_io_slave_killed(mi, NullS)) goto err; - if (event_len == packet_error) + if (unlikely(event_len == packet_error)) { uint mysql_error_number= mysql_errno(mysql); switch (mysql_error_number) { @@ -5079,7 +5113,7 @@ slave_output_error_info(rpl_group_info *rgi, THD *thd) Relay_log_info *rli= rgi->rli; uint32 const last_errno= rli->last_error().number; - if (thd->is_error()) + if (unlikely(thd->is_error())) { char const *const errmsg= thd->get_stmt_da()->message(); @@ -5123,7 +5157,7 @@ slave_output_error_info(rpl_group_info *rgi, THD *thd) udf_error = true; sql_print_warning("Slave: %s Error_code: %d", err->get_message_text(), err->get_sql_errno()); } - if (udf_error) + if (unlikely(udf_error)) { StringBuffer<100> tmp; if (rli->mi->using_gtid != Master_info::USE_GTID_NO) @@ -5251,6 +5285,10 @@ pthread_handler_t handle_slave_sql(void *arg) applied. In all other cases it must be FALSE. */ thd->variables.binlog_annotate_row_events= 0; + + /* Ensure that slave can exeute any alter table it gets from master */ + thd->variables.alter_algorithm= (ulong) Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT; + add_to_active_threads(thd); /* We are going to set slave_running to 1. Assuming slave I/O thread is @@ -5403,7 +5441,7 @@ pthread_handler_t handle_slave_sql(void *arg) if (opt_init_slave.length) { execute_init_command(thd, &opt_init_slave, &LOCK_sys_init_slave); - if (thd->is_slave_error) + if (unlikely(thd->is_slave_error)) { rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL, "Slave SQL thread aborted. Can't execute init_slave query"); @@ -6069,7 +6107,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) { int error= 0; StringBuffer<1024> error_msg; - ulonglong inc_pos; + ulonglong inc_pos= 0; ulonglong event_pos; Relay_log_info *rli= &mi->rli; mysql_mutex_t *log_lock= rli->relay_log.get_log_lock(); @@ -6908,7 +6946,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) } mysql_mutex_unlock(log_lock); - if (!error && + if (likely(!error) && mi->using_gtid != Master_info::USE_GTID_NO && mi->events_queued_since_last_gtid > 0 && ( (mi->last_queued_gtid_standalone && @@ -6957,11 +6995,11 @@ err: Do not print ER_SLAVE_RELAY_LOG_WRITE_FAILURE error here, as the caller handle_slave_io() prints it on return. */ - if (error && error != ER_SLAVE_RELAY_LOG_WRITE_FAILURE) + if (unlikely(error) && error != ER_SLAVE_RELAY_LOG_WRITE_FAILURE) mi->report(ERROR_LEVEL, error, NULL, ER_DEFAULT(error), error_msg.ptr()); - if(is_malloc) + if (unlikely(is_malloc)) my_free((void *)new_buf); DBUG_RETURN(error); @@ -7428,7 +7466,7 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size) } if (opt_reckless_slave) // For mysql-test cur_log->error = 0; - if (cur_log->error < 0) + if (unlikely(cur_log->error < 0)) { errmsg = "slave SQL thread aborted because of I/O error"; if (hot_log) diff --git a/sql/sp.cc b/sql/sp.cc index cb05108a5bc..af86737ebb9 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -1,6 +1,6 @@ /* - Copyright (c) 2002, 2016, Oracle and/or its affiliates. - Copyright (c) 2009, 2017, MariaDB + Copyright (c) 2002, 2018, Oracle and/or its affiliates. + Copyright (c) 2009, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -423,7 +423,7 @@ private: bool m_print_once; public: - Proc_table_intact() : m_print_once(TRUE) {} + Proc_table_intact() : m_print_once(TRUE) { has_keys= TRUE; } protected: void report_error(uint code, const char *fmt, ...); @@ -780,6 +780,7 @@ Sp_handler::db_find_and_cache_routine(THD *thd, Silence DEPRECATED SYNTAX warnings when loading a stored procedure into the cache. */ + struct Silence_deprecated_warning : public Internal_error_handler { public: @@ -1002,7 +1003,7 @@ Sp_handler::db_load_routine(THD *thd, const Database_qualified_name *name, if (type() == TYPE_ENUM_PACKAGE_BODY) { - sp_package *package= sphp[0]->get_package(); + sp_package *package= (*sphp)->get_package(); List_iterator it(package->m_routine_implementations); for (LEX *lex; (lex= it++); ) { @@ -1078,6 +1079,7 @@ sp_returns_type(THD *thd, String &result, const sp_head *sp) @return SP_OK on success, or SP_DELETE_ROW_FAILED on error. used to indicate about errors. */ + int Sp_handler::sp_drop_routine_internal(THD *thd, const Database_qualified_name *name, @@ -1111,7 +1113,7 @@ Sp_handler::sp_find_and_drop_routine(THD *thd, TABLE *table, const Database_qualified_name *name) const { int ret; - if (SP_OK != (ret= db_find_routine_aux(thd, name, table))) + if ((ret= db_find_routine_aux(thd, name, table)) != SP_OK) return ret; return sp_drop_routine_internal(thd, name, table); } @@ -1123,11 +1125,23 @@ Sp_handler_package_spec:: const Database_qualified_name *name) const { int ret; - if (SP_OK != (ret= db_find_routine_aux(thd, name, table))) + if ((ret= db_find_routine_aux(thd, name, table)) != SP_OK) return ret; + /* + When we do "DROP PACKAGE pkg", we should also perform + "DROP PACKAGE BODY pkg" automatically. + */ ret= sp_handler_package_body.sp_find_and_drop_routine(thd, table, name); if (ret != SP_KEY_NOT_FOUND && ret != SP_OK) - return ret; + { + /* + - SP_KEY_NOT_FOUND means that "CREATE PACKAGE pkg" did not + have a correspoinding "CREATE PACKAGE BODY pkg" yet. + - SP_OK means that "CREATE PACKAGE pkg" had a correspoinding + "CREATE PACKAGE BODY pkg", which was successfully dropped. + */ + return ret; // Other codes mean an unexpecte error + } return Sp_handler::sp_find_and_drop_routine(thd, table, name); } @@ -1231,7 +1245,7 @@ Sp_handler::sp_create_routine(THD *thd, const sp_head *sp) const DBUG_ASSERT(0); ret= SP_OK; } - if (ret) + if (ret != SP_OK) goto done; } else if (lex->create_info.if_not_exists()) @@ -1565,7 +1579,7 @@ Sp_handler::sp_drop_routine(THD *thd, if (!(table= open_proc_table_for_update(thd))) DBUG_RETURN(SP_OPEN_TABLE_FAILED); - if (SP_OK == (ret= sp_find_and_drop_routine(thd, table, name)) && + if ((ret= sp_find_and_drop_routine(thd, table, name)) == SP_OK && write_bin_log(thd, TRUE, thd->query(), thd->query_length())) ret= SP_INTERNAL_ERROR; /* @@ -1916,6 +1930,7 @@ Sp_handler::sp_show_create_routine(THD *thd, and return it as a 0-terminated string 'pkg.name' -> 'pkg\0' */ + class Prefix_name_buf: public LEX_CSTRING { char m_buf[SAFE_NAME_LEN + 1]; @@ -1948,6 +1963,7 @@ public: - either returns the original SP, - or makes and returns a new clone of SP */ + sp_head * Sp_handler::sp_clone_and_link_routine(THD *thd, const Database_qualified_name *name, @@ -2015,7 +2031,7 @@ Sp_handler::sp_clone_and_link_routine(THD *thd, 1. Cut the package name prefix from the routine name: 'pkg1.p1' -> 'p1', to have db_load_routine() generate and parse a query like this: CREATE PROCEDURE p1 ...; - rether than: + rather than: CREATE PROCEDURE pkg1.p1 ...; The latter would be misinterpreted by the parser as a standalone routine 'p1' in the database 'pkg1', which is not what we need. @@ -2126,6 +2142,7 @@ Sp_handler::sp_find_routine(THD *thd, const Database_qualified_name *name, @retval non-NULL - a pointer to an sp_head object @retval NULL - an error happened. */ + sp_head * Sp_handler::sp_find_package_routine(THD *thd, const LEX_CSTRING pkgname_str, @@ -2168,6 +2185,7 @@ Sp_handler::sp_find_package_routine(THD *thd, @retval non-NULL - a pointer to an sp_head object @retval NULL - an error happened */ + sp_head * Sp_handler::sp_find_package_routine(THD *thd, const Database_qualified_name *name, @@ -2275,7 +2293,7 @@ bool sp_add_used_routine(Query_tables_list *prelocking_ctx, Query_arena *arena, { Sroutine_hash_entry *rn= (Sroutine_hash_entry *)arena->alloc(sizeof(Sroutine_hash_entry)); - if (!rn) // OOM. Error will be reported using fatal_error(). + if (unlikely(!rn)) // OOM. Error will be reported using fatal_error(). return FALSE; rn->mdl_request.init(key, MDL_SHARED, MDL_TRANSACTION); if (my_hash_insert(&prelocking_ctx->sroutines, (uchar *)rn)) @@ -2301,6 +2319,7 @@ bool sp_add_used_routine(Query_tables_list *prelocking_ctx, Query_arena *arena, It's used during parsing of CREATE PACKAGE BODY, to load the corresponding CREATE PACKAGE. */ + int Sp_handler::sp_cache_routine_reentrant(THD *thd, const Database_qualified_name *name, @@ -2315,7 +2334,7 @@ Sp_handler::sp_cache_routine_reentrant(THD *thd, } -/* +/** Check if a routine has a declaration in the CREATE PACKAGE statement, by looking up in thd->sp_package_spec_cache, and by loading from mysql.proc if needed. @@ -2340,6 +2359,7 @@ Sp_handler::sp_cache_routine_reentrant(THD *thd, After the call of this function, the package specification is always cached, unless a fatal error happens. */ + static bool is_package_public_routine(THD *thd, const LEX_CSTRING &db, @@ -2356,7 +2376,7 @@ is_package_public_routine(THD *thd, } -/* +/** Check if a routine has a declaration in the CREATE PACKAGE statement by looking up in sp_package_spec_cache. @@ -2373,6 +2393,7 @@ is_package_public_routine(THD *thd, The package specification (i.e. the CREATE PACKAGE statement) for the current package body must already be loaded and cached at this point. */ + static bool is_package_public_routine_quick(THD *thd, const LEX_CSTRING &db, @@ -2388,10 +2409,11 @@ is_package_public_routine_quick(THD *thd, } -/* +/** Check if a qualified name, e.g. "CALL name1.name2", refers to a known routine in the package body "pkg". */ + static bool is_package_body_routine(THD *thd, sp_package *pkg, const LEX_CSTRING &name1, @@ -2404,11 +2426,12 @@ is_package_body_routine(THD *thd, sp_package *pkg, } -/* +/** Resolve a qualified routine reference xxx.yyy(), between: - A standalone routine: xxx.yyy - A package routine: current_database.xxx.yyy */ + bool Sp_handler:: sp_resolve_package_routine_explicit(THD *thd, sp_head *caller, @@ -2444,11 +2467,12 @@ bool Sp_handler:: } -/* +/** Resolve a non-qualified routine reference yyy(), between: - A standalone routine: current_database.yyy - A package routine: current_database.current_package.yyy */ + bool Sp_handler:: sp_resolve_package_routine_implicit(THD *thd, sp_head *caller, @@ -2534,6 +2558,7 @@ bool Sp_handler:: @retval false on success @retval true on error (e.g. EOM, could not read CREATE PACKAGE) */ + bool Sp_handler::sp_resolve_package_routine(THD *thd, sp_head *caller, @@ -2787,7 +2812,7 @@ int Sp_handler::sp_cache_routine(THD *thd, an error with it's return value without calling my_error(), we set the generic "mysql.proc table corrupt" error here. */ - if (! thd->is_error()) + if (!thd->is_error()) { my_error(ER_SP_PROC_TABLE_CORRUPT, MYF(0), ErrConvDQName(name).ptr(), ret); @@ -2813,6 +2838,7 @@ int Sp_handler::sp_cache_routine(THD *thd, @retval false - loaded or does not exists @retval true - error while loading mysql.proc */ + int Sp_handler::sp_cache_package_routine(THD *thd, const LEX_CSTRING &pkgname_cstr, @@ -2831,7 +2857,7 @@ Sp_handler::sp_cache_package_routine(THD *thd, sp_package *pkg= ph ? ph->get_package() : NULL; LEX_CSTRING tmp= name->m_name; const char *dot= strrchr(tmp.str, '.'); - size_t prefix_length= dot ? dot - tmp.str + 1 : NULL; + size_t prefix_length= dot ? dot - tmp.str + 1 : 0; tmp.str+= prefix_length; tmp.length-= prefix_length; LEX *rlex= pkg ? pkg->m_routine_implementations.find(tmp, type()) : NULL; @@ -2856,6 +2882,7 @@ Sp_handler::sp_cache_package_routine(THD *thd, @retval false - loaded or does not exists @retval true - error while loading mysql.proc */ + int Sp_handler::sp_cache_package_routine(THD *thd, const Database_qualified_name *name, bool lookup_only, sp_head **sp) const @@ -2873,6 +2900,7 @@ int Sp_handler::sp_cache_package_routine(THD *thd, @return Returns false on success, true on (alloc) failure. */ + bool Sp_handler::show_create_sp(THD *thd, String *buf, const LEX_CSTRING &db, @@ -3015,15 +3043,15 @@ Sp_handler::sp_load_for_information_schema(THD *thd, TABLE *proc_table, LEX_CSTRING Sp_handler_procedure::empty_body_lex_cstring(sql_mode_t mode) const { - static LEX_CSTRING m_empty_body_std= {C_STRING_WITH_LEN("BEGIN END")}; - static LEX_CSTRING m_empty_body_ora= {C_STRING_WITH_LEN("AS BEGIN NULL; END")}; + static LEX_CSTRING m_empty_body_std= {STRING_WITH_LEN("BEGIN END")}; + static LEX_CSTRING m_empty_body_ora= {STRING_WITH_LEN("AS BEGIN NULL; END")}; return mode & MODE_ORACLE ? m_empty_body_ora : m_empty_body_std; } LEX_CSTRING Sp_handler_function::empty_body_lex_cstring(sql_mode_t mode) const { - static LEX_CSTRING m_empty_body_std= {C_STRING_WITH_LEN("RETURN NULL")}; - static LEX_CSTRING m_empty_body_ora= {C_STRING_WITH_LEN("AS BEGIN RETURN NULL; END")}; + static LEX_CSTRING m_empty_body_std= {STRING_WITH_LEN("RETURN NULL")}; + static LEX_CSTRING m_empty_body_ora= {STRING_WITH_LEN("AS BEGIN RETURN NULL; END")}; return mode & MODE_ORACLE ? m_empty_body_ora : m_empty_body_std; } diff --git a/sql/sp.h b/sql/sp.h index 3fb20b1c8e8..380dd69d3a1 100644 --- a/sql/sp.h +++ b/sql/sp.h @@ -371,12 +371,12 @@ public: stored_procedure_type type() const { return TYPE_ENUM_PACKAGE; } LEX_CSTRING type_lex_cstring() const { - static LEX_CSTRING m_type_str= {C_STRING_WITH_LEN("PACKAGE")}; + static LEX_CSTRING m_type_str= {STRING_WITH_LEN("PACKAGE")}; return m_type_str; } LEX_CSTRING empty_body_lex_cstring(sql_mode_t mode) const { - static LEX_CSTRING m_empty_body= {C_STRING_WITH_LEN("BEGIN END")}; + static LEX_CSTRING m_empty_body= {STRING_WITH_LEN("BEGIN END")}; return m_empty_body; } const char *show_create_routine_col1_caption() const @@ -404,12 +404,12 @@ public: stored_procedure_type type() const { return TYPE_ENUM_PACKAGE_BODY; } LEX_CSTRING type_lex_cstring() const { - static LEX_CSTRING m_type_str= {C_STRING_WITH_LEN("PACKAGE BODY")}; + static LEX_CSTRING m_type_str= {STRING_WITH_LEN("PACKAGE BODY")}; return m_type_str; } LEX_CSTRING empty_body_lex_cstring(sql_mode_t mode) const { - static LEX_CSTRING m_empty_body= {C_STRING_WITH_LEN("BEGIN END")}; + static LEX_CSTRING m_empty_body= {STRING_WITH_LEN("BEGIN END")}; return m_empty_body; } const char *show_create_routine_col1_caption() const diff --git a/sql/sp_head.cc b/sql/sp_head.cc index a9056553080..d86a1f38953 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -368,6 +368,7 @@ Item *THD::sp_prepare_func_item(Item **it_addr, uint cols) /** Fix an Item for evaluation for SP. */ + Item *THD::sp_fix_func_item(Item **it_addr) { DBUG_ENTER("THD::sp_fix_func_item"); @@ -593,6 +594,7 @@ sp_package::~sp_package() /* Test if two routines have equal specifications */ + bool sp_head::eq_routine_spec(const sp_head *sp) const { // TODO: Add tests for equal return data types (in case of FUNCTION) @@ -835,7 +837,7 @@ sp_head::~sp_head() thd->lex->sphead= NULL; lex_end(thd->lex); delete thd->lex; - thd->lex= thd->stmt_lex= lex; + thd->lex= lex; } my_hash_free(&m_sptabs); @@ -904,8 +906,13 @@ sp_head::create_result_field(uint field_max_length, const LEX_CSTRING *field_nam Perhaps we should refactor prepare_create_field() to set Create_field::length to maximum octet length for BLOBs, instead of packed length). + + Note, for integer data types, field_max_length can be bigger + than the user specified length, e.g. a field of the INT(1) data type + is translated to the item with max_length=11. */ DBUG_ASSERT(field_max_length <= m_return_field_def.length || + m_return_field_def.type_handler()->cmp_type() == INT_RESULT || (current_thd->stmt_arena->is_stmt_execute() && m_return_field_def.length == 8 && (m_return_field_def.pack_flag & @@ -1147,7 +1154,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success) backup_arena; query_id_t old_query_id; TABLE *old_derived_tables; - LEX *old_lex, *old_stmt_lex; + LEX *old_lex; Item_change_list old_change_list; String old_packet; uint old_server_status; @@ -1254,7 +1261,6 @@ sp_head::execute(THD *thd, bool merge_da_on_success) do it in each instruction */ old_lex= thd->lex; - old_stmt_lex= thd->stmt_lex; /* We should also save Item tree change list to avoid rollback something too early in the calling query. @@ -1371,7 +1377,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success) errors are not catchable by SQL handlers) or the connection has been killed during execution. */ - if (!thd->is_fatal_error && !thd->killed_errno() && + if (likely(!thd->is_fatal_error) && likely(!thd->killed_errno()) && ctx->handle_sql_condition(thd, &ip, i)) { err_status= FALSE; @@ -1380,7 +1386,8 @@ sp_head::execute(THD *thd, bool merge_da_on_success) /* Reset sp_rcontext::end_partial_result_set flag. */ ctx->end_partial_result_set= FALSE; - } while (!err_status && !thd->killed && !thd->is_fatal_error && + } while (!err_status && likely(!thd->killed) && + likely(!thd->is_fatal_error) && !thd->spcont->pause_state); #if defined(ENABLED_PROFILING) @@ -1410,7 +1417,6 @@ sp_head::execute(THD *thd, bool merge_da_on_success) DBUG_ASSERT(thd->Item_change_list::is_empty()); old_change_list.move_elements_to(thd); thd->lex= old_lex; - thd->stmt_lex= old_stmt_lex; thd->set_query_id(old_query_id); DBUG_ASSERT(!thd->derived_tables); thd->derived_tables= old_derived_tables; @@ -1600,6 +1606,7 @@ bool sp_head::check_execute_access(THD *thd) const @retval NULL - error (access denided or EOM) @retval !NULL - success (the invoker has rights to all %TYPE tables) */ + sp_rcontext *sp_head::rcontext_create(THD *thd, Field *ret_value, Row_definition_list *defs, bool switch_security_ctx) @@ -1777,6 +1784,7 @@ err_with_cleanup: /* Execute the package initialization section. */ + bool sp_package::instantiate_if_needed(THD *thd) { List args; @@ -2452,6 +2460,7 @@ sp_head::merge_lex(THD *thd, LEX *oldlex, LEX *sublex) /** Put the instruction on the backpatch list, associated with the label. */ + int sp_head::push_backpatch(THD *thd, sp_instr *i, sp_label *lab, List *list, backpatch_instr_type itype) @@ -2507,6 +2516,7 @@ sp_head::push_backpatch_goto(THD *thd, sp_pcontext *ctx, sp_label *lab) Update all instruction with this label in the backpatch list to the current position. */ + void sp_head::backpatch(sp_label *lab) { @@ -3034,6 +3044,7 @@ bool sp_head::add_instr_preturn(THD *thd, sp_pcontext *spcont) QQ: Perhaps we need a dedicated sp_instr_nop for this purpose. */ + bool sp_head::replace_instr_to_nop(THD *thd, uint ip) { sp_instr *instr= get_instr(ip); @@ -3158,6 +3169,7 @@ sp_head::opt_mark() @return 0 if ok, !=0 on error. */ + int sp_head::show_routine_code(THD *thd) { @@ -3265,7 +3277,7 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, We should not save old value since it is saved/restored in sp_head::execute() when we are entering/leaving routine. */ - thd->lex= thd->stmt_lex= m_lex; + thd->lex= m_lex; thd->set_query_id(next_query_id()); @@ -3305,7 +3317,7 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, if (open_tables) res= instr->exec_open_and_lock_tables(thd, m_lex->query_tables); - if (!res) + if (likely(!res)) { res= instr->exec_core(thd, nextp); DBUG_PRINT("info",("exec_core returned: %d", res)); @@ -3365,7 +3377,7 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, Update the state of the active arena if no errors on open_tables stage. */ - if (!res || !thd->is_error() || + if (likely(!res) || likely(!thd->is_error()) || (thd->get_stmt_da()->sql_errno() != ER_CANT_REOPEN_TABLE && thd->get_stmt_da()->sql_errno() != ER_NO_SUCH_TABLE && thd->get_stmt_da()->sql_errno() != ER_NO_SUCH_TABLE_IN_ENGINE && @@ -3460,7 +3472,6 @@ sp_instr_stmt::execute(THD *thd, uint *nextp) int res; bool save_enable_slow_log; const CSET_STRING query_backup= thd->query_string; - QUERY_START_TIME_INFO time_info; Sub_statement_state backup_state; DBUG_ENTER("sp_instr_stmt::execute"); DBUG_PRINT("info", ("command: %d", m_lex_keeper.sql_command())); @@ -3470,15 +3481,7 @@ sp_instr_stmt::execute(THD *thd, uint *nextp) thd->profiling.set_query_source(m_query.str, m_query.length); #endif - if ((save_enable_slow_log= thd->enable_slow_log)) - { - /* - Save start time info for the CALL statement and overwrite it with the - current time for log_slow_statement() to log the individual query timing. - */ - thd->backup_query_start_time(&time_info); - thd->set_time(); - } + save_enable_slow_log= thd->enable_slow_log; thd->store_slow_query_state(&backup_state); if (!(res= alloc_query(thd, m_query.str, m_query.length)) && @@ -3536,15 +3539,12 @@ sp_instr_stmt::execute(THD *thd, uint *nextp) thd->set_query(query_backup); thd->query_name_consts= 0; - if (!thd->is_error()) + if (likely(!thd->is_error())) { res= 0; thd->get_stmt_da()->reset_diagnostics_area(); } } - /* Restore the original query start time */ - if (thd->enable_slow_log) - thd->restore_query_start_time(&time_info); DBUG_RETURN(res || thd->is_error()); } @@ -4402,7 +4402,7 @@ sp_instr_cfetch::print(String *str) int sp_instr_agg_cfetch::execute(THD *thd, uint *nextp) { - DBUG_ENTER("sp_instr_cfetch::execute"); + DBUG_ENTER("sp_instr_agg_cfetch::execute"); int res= 0; if (!thd->spcont->instr_ptr) { @@ -4427,7 +4427,16 @@ sp_instr_agg_cfetch::execute(THD *thd, uint *nextp) DBUG_RETURN(res); } +void +sp_instr_agg_cfetch::print(String *str) +{ + uint rsrv= SP_INSTR_UINT_MAXLEN+11; + + if (str->reserve(rsrv)) + return; + str->qs_append(STRING_WITH_LEN("agg_cfetch")); +} /* sp_instr_cursor_copy_struct class functions @@ -4441,6 +4450,7 @@ sp_instr_agg_cfetch::execute(THD *thd, uint *nextp) - opens the cursor without copying data (materialization). - copies the cursor structure to the associated %ROWTYPE variable. */ + int sp_instr_cursor_copy_struct::exec_core(THD *thd, uint *nextp) { @@ -4917,6 +4927,7 @@ sp_head::set_local_variable(THD *thd, sp_pcontext *spcont, /** Similar to set_local_variable(), but for ROW variable fields. */ + bool sp_head::set_local_variable_row_field(THD *thd, sp_pcontext *spcont, const Sp_rcontext_handler *rh, @@ -5111,6 +5122,7 @@ bool sp_head::spvar_fill_table_rowtype_reference(THD *thd, END p1; Check that the first p1 and the last p1 match. */ + bool sp_head::check_package_routine_end_name(const LEX_CSTRING &end_name) const { LEX_CSTRING non_qualified_name= m_name; @@ -5137,7 +5149,5 @@ err: ulong sp_head::sp_cache_version() const { - return m_parent ? m_parent->sp_cache_version() : - m_sp_cache_version; - + return m_parent ? m_parent->sp_cache_version() : m_sp_cache_version; } diff --git a/sql/sp_head.h b/sql/sp_head.h index f588f79b599..c0c0c83b77e 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -1957,7 +1957,7 @@ public: virtual int execute(THD *thd, uint *nextp); - virtual void print(String *str){}; + virtual void print(String *str); }; // class sp_instr_agg_cfetch : public sp_instr diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc index 2e9ae23d7f9..6166d1d9615 100644 --- a/sql/sp_rcontext.cc +++ b/sql/sp_rcontext.cc @@ -52,7 +52,7 @@ const LEX_CSTRING *Sp_rcontext_handler_local::get_name_prefix() const const LEX_CSTRING *Sp_rcontext_handler_package_body::get_name_prefix() const { static const LEX_CSTRING sp_package_body_variable_prefix_clex_str= - {C_STRING_WITH_LEN("PACKAGE_BODY.")}; + {STRING_WITH_LEN("PACKAGE_BODY.")}; return &sp_package_body_variable_prefix_clex_str; } @@ -196,11 +196,12 @@ bool sp_rcontext::init_var_table(THD *thd, */ static inline bool check_column_grant_for_type_ref(THD *thd, TABLE_LIST *table_list, - const char *str, size_t length) + const char *str, size_t length, + Field *fld) { #ifndef NO_EMBEDDED_ACCESS_CHECKS table_list->table->grant.want_privilege= SELECT_ACL; - return check_column_grant_in_table_ref(thd, table_list, str, length); + return check_column_grant_in_table_ref(thd, table_list, str, length, fld); #else return false; #endif @@ -234,11 +235,11 @@ bool Qualified_column_ident::resolve_type_ref(THD *thd, Column_definition *def) !open_tables_only_view_structure(thd, table_list, thd->mdl_context.has_locks())) { - if ((src= lex.query_tables->table->find_field_by_name(&m_column))) + if (likely((src= lex.query_tables->table->find_field_by_name(&m_column)))) { if (!(rc= check_column_grant_for_type_ref(thd, table_list, m_column.str, - m_column.length))) + m_column.length, src))) { *def= Column_definition(thd, src, NULL/*No defaults,no constraints*/); def->flags&= (uint) ~NOT_NULL_FLAG; @@ -302,7 +303,7 @@ bool Table_ident::resolve_table_rowtype_ref(THD *thd, LEX_CSTRING tmp= src[0]->field_name; Spvar_definition *def; if ((rc= check_column_grant_for_type_ref(thd, table_list, - tmp.str, tmp.length)) || + tmp.str, tmp.length,src[0])) || (rc= !(src[0]->field_name.str= thd->strmake(tmp.str, tmp.length))) || (rc= !(def= new (thd->mem_root) Spvar_definition(thd, *src)))) break; @@ -486,14 +487,14 @@ bool sp_rcontext::handle_sql_condition(THD *thd, handlers from this context are applicable: try to locate one in the outer scope. */ - if (thd->is_fatal_sub_stmt_error && m_in_sub_stmt) + if (unlikely(thd->is_fatal_sub_stmt_error) && m_in_sub_stmt) DBUG_RETURN(false); Diagnostics_area *da= thd->get_stmt_da(); const sp_handler *found_handler= NULL; const Sql_condition *found_condition= NULL; - if (thd->is_error()) + if (unlikely(thd->is_error())) { found_handler= cur_spi->m_ctx->find_handler(da->get_error_condition_identity()); diff --git a/sql/spatial.cc b/sql/spatial.cc index 255ba3f0647..a8a70d0763b 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -406,7 +406,7 @@ Geometry *Geometry::create_from_json(Geometry_buffer *buffer, key_buf[key_len++]= (uchar)je->s.c_next | 0x20; /* make it lowercase. */ } - if (je->s.error) + if (unlikely(je->s.error)) goto err_return; if (key_len == type_keyname_len && @@ -1956,6 +1956,7 @@ bool Gis_multi_point::init_from_json(json_engine_t *je, bool er_on_3D, if (je->s.error) return TRUE; + if (n_points == 0) { je->s.error= Geometry::GEOJ_EMPTY_COORDINATES; @@ -2231,6 +2232,7 @@ bool Gis_multi_line_string::init_from_json(json_engine_t *je, bool er_on_3D, n_line_strings++; } + if (je->s.error) return TRUE; @@ -2629,8 +2631,10 @@ bool Gis_multi_polygon::init_from_json(json_engine_t *je, bool er_on_3D, n_polygons++; } + if (je->s.error) return TRUE; + if (n_polygons == 0) { je->s.error= Geometry::GEOJ_EMPTY_COORDINATES; diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index ec195f82069..aec15d38847 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2016, Oracle and/or its affiliates. - Copyright (c) 2009, 2016, MariaDB + Copyright (c) 2009, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -2224,7 +2224,7 @@ bool acl_reload(THD *thd) To avoid deadlocks we should obtain table locks before obtaining acl_cache->lock mutex. */ - if ((result= tables.open_and_lock(thd))) + if (unlikely((result= tables.open_and_lock(thd)))) { DBUG_ASSERT(result <= 0); /* @@ -3470,7 +3470,8 @@ int acl_set_default_role(THD *thd, const char *host, const char *user, user_table.default_role()->store(acl_user->default_rolename.str, acl_user->default_rolename.length, system_charset_info); - if ((error=table->file->ha_update_row(table->record[1],table->record[0])) && + if (unlikely(error= table->file->ha_update_row(table->record[1], + table->record[0])) && error != HA_ERR_RECORD_IS_THE_SAME) { mysql_mutex_unlock(&acl_cache->lock); @@ -3829,7 +3830,8 @@ static bool update_user_table(THD *thd, const User_table& user_table, new_password_len); - if ((error=table->file->ha_update_row(table->record[1],table->record[0])) && + if (unlikely(error= table->file->ha_update_row(table->record[1], + table->record[0])) && error != HA_ERR_RECORD_IS_THE_SAME) { table->file->print_error(error,MYF(0)); /* purecov: deadcode */ @@ -4121,8 +4123,8 @@ static int replace_user_table(THD *thd, const User_table &user_table, */ if (cmp_record(table, record[1])) { - if ((error= - table->file->ha_update_row(table->record[1],table->record[0])) && + if (unlikely(error= table->file->ha_update_row(table->record[1], + table->record[0])) && error != HA_ERR_RECORD_IS_THE_SAME) { // This should never happen table->file->print_error(error,MYF(0)); /* purecov: deadcode */ @@ -4133,8 +4135,9 @@ static int replace_user_table(THD *thd, const User_table &user_table, error= 0; } } - else if ((error=table->file->ha_write_row(table->record[0]))) // insert - { // This should never happen + else if (unlikely(error=table->file->ha_write_row(table->record[0]))) + { + // This should never happen if (table->file->is_fatal_error(error, HA_CHECK_DUP)) { table->file->print_error(error,MYF(0)); /* purecov: deadcode */ @@ -4145,7 +4148,7 @@ static int replace_user_table(THD *thd, const User_table &user_table, error=0; // Privileges granted / revoked end: - if (!error) + if (likely(!error)) { acl_cache->clear(1); // Clear privilege cache if (old_row_exists) @@ -4259,18 +4262,19 @@ static int replace_db_table(TABLE *table, const char *db, /* update old existing row */ if (rights) { - if ((error= table->file->ha_update_row(table->record[1], - table->record[0])) && + if (unlikely((error= table->file->ha_update_row(table->record[1], + table->record[0]))) && error != HA_ERR_RECORD_IS_THE_SAME) goto table_error; /* purecov: deadcode */ } else /* must have been a revoke of all privileges */ { - if ((error= table->file->ha_delete_row(table->record[1]))) + if (unlikely((error= table->file->ha_delete_row(table->record[1])))) goto table_error; /* purecov: deadcode */ } } - else if (rights && (error= table->file->ha_write_row(table->record[0]))) + else if (rights && + (unlikely(error= table->file->ha_write_row(table->record[0])))) { if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)) goto table_error; /* purecov: deadcode */ @@ -4347,7 +4351,7 @@ replace_roles_mapping_table(TABLE *table, LEX_CSTRING *user, LEX_CSTRING *host, } if (revoke_grant && !with_admin) { - if ((error= table->file->ha_delete_row(table->record[1]))) + if (unlikely((error= table->file->ha_delete_row(table->record[1])))) { DBUG_PRINT("info", ("error deleting row '%s' '%s' '%s'", host->str, user->str, role->str)); @@ -4358,7 +4362,8 @@ replace_roles_mapping_table(TABLE *table, LEX_CSTRING *user, LEX_CSTRING *host, { table->field[3]->store(!revoke_grant + 1); - if ((error= table->file->ha_update_row(table->record[1], table->record[0]))) + if (unlikely((error= table->file->ha_update_row(table->record[1], + table->record[0])))) { DBUG_PRINT("info", ("error updating row '%s' '%s' '%s'", host->str, user->str, role->str)); @@ -4370,7 +4375,7 @@ replace_roles_mapping_table(TABLE *table, LEX_CSTRING *user, LEX_CSTRING *host, table->field[3]->store(with_admin + 1); - if ((error= table->file->ha_write_row(table->record[0]))) + if (unlikely((error= table->file->ha_write_row(table->record[0])))) { DBUG_PRINT("info", ("error inserting row '%s' '%s' '%s'", host->str, user->str, role->str)); @@ -4501,7 +4506,7 @@ replace_proxies_priv_table(THD *thd, TABLE *table, const LEX_USER *user, get_grantor(thd, grantor); - if ((error= table->file->ha_index_init(0, 1))) + if (unlikely((error= table->file->ha_index_init(0, 1)))) { table->file->print_error(error, MYF(0)); DBUG_PRINT("info", ("ha_index_init error")); @@ -4538,18 +4543,18 @@ replace_proxies_priv_table(THD *thd, TABLE *table, const LEX_USER *user, /* update old existing row */ if (!revoke_grant) { - if ((error= table->file->ha_update_row(table->record[1], - table->record[0])) && + if (unlikely(error= table->file->ha_update_row(table->record[1], + table->record[0])) && error != HA_ERR_RECORD_IS_THE_SAME) goto table_error; /* purecov: inspected */ } else { - if ((error= table->file->ha_delete_row(table->record[1]))) + if (unlikely((error= table->file->ha_delete_row(table->record[1])))) goto table_error; /* purecov: inspected */ } } - else if ((error= table->file->ha_write_row(table->record[0]))) + else if (unlikely((error= table->file->ha_write_row(table->record[0])))) { DBUG_PRINT("info", ("error inserting the row")); if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)) @@ -4952,7 +4957,7 @@ static int replace_column_table(GRANT_TABLE *g_t, List_iterator iter(columns); class LEX_COLUMN *column; int error= table->file->ha_index_init(0, 1); - if (error) + if (unlikely(error)) { table->file->print_error(error, MYF(0)); DBUG_RETURN(-1); @@ -5012,7 +5017,7 @@ static int replace_column_table(GRANT_TABLE *g_t, error=table->file->ha_update_row(table->record[1],table->record[0]); else error=table->file->ha_delete_row(table->record[1]); - if (error && error != HA_ERR_RECORD_IS_THE_SAME) + if (unlikely(error) && error != HA_ERR_RECORD_IS_THE_SAME) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ result= -1; /* purecov: inspected */ @@ -5028,7 +5033,7 @@ static int replace_column_table(GRANT_TABLE *g_t, else // new grant { GRANT_COLUMN *grant_column; - if ((error=table->file->ha_write_row(table->record[0]))) + if (unlikely((error=table->file->ha_write_row(table->record[0])))) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ result= -1; /* purecov: inspected */ @@ -5083,8 +5088,9 @@ static int replace_column_table(GRANT_TABLE *g_t, if (privileges) { int tmp_error; - if ((tmp_error=table->file->ha_update_row(table->record[1], - table->record[0])) && + if (unlikely(tmp_error= + table->file->ha_update_row(table->record[1], + table->record[0])) && tmp_error != HA_ERR_RECORD_IS_THE_SAME) { /* purecov: deadcode */ table->file->print_error(tmp_error,MYF(0)); /* purecov: deadcode */ @@ -5100,7 +5106,8 @@ static int replace_column_table(GRANT_TABLE *g_t, else { int tmp_error; - if ((tmp_error = table->file->ha_delete_row(table->record[1]))) + if (unlikely((tmp_error= + table->file->ha_delete_row(table->record[1])))) { /* purecov: deadcode */ table->file->print_error(tmp_error,MYF(0)); /* purecov: deadcode */ result= -1; /* purecov: deadcode */ @@ -5226,18 +5233,18 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, { if (store_table_rights || store_col_rights) { - if ((error=table->file->ha_update_row(table->record[1], - table->record[0])) && + if (unlikely(error=table->file->ha_update_row(table->record[1], + table->record[0])) && error != HA_ERR_RECORD_IS_THE_SAME) goto table_error; /* purecov: deadcode */ } - else if ((error = table->file->ha_delete_row(table->record[1]))) + else if (unlikely((error = table->file->ha_delete_row(table->record[1])))) goto table_error; /* purecov: deadcode */ } else { error=table->file->ha_write_row(table->record[0]); - if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)) + if (unlikely(table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))) goto table_error; /* purecov: deadcode */ } @@ -5352,18 +5359,18 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name, { if (store_proc_rights) { - if ((error=table->file->ha_update_row(table->record[1], - table->record[0])) && - error != HA_ERR_RECORD_IS_THE_SAME) + if (unlikely(error=table->file->ha_update_row(table->record[1], + table->record[0])) && + error != HA_ERR_RECORD_IS_THE_SAME) goto table_error; } - else if ((error= table->file->ha_delete_row(table->record[1]))) + else if (unlikely((error= table->file->ha_delete_row(table->record[1])))) goto table_error; } else { error=table->file->ha_write_row(table->record[0]); - if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)) + if (unlikely(table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))) goto table_error; } @@ -6375,13 +6382,13 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, column->column.ptr(), NULL, NULL, NULL, TRUE, FALSE, &unused_field_idx, FALSE, &dummy); - if (f == (Field*)0) + if (unlikely(f == (Field*)0)) { my_error(ER_BAD_FIELD_ERROR, MYF(0), column->column.c_ptr(), table_list->alias.str); DBUG_RETURN(TRUE); } - if (f == (Field *)-1) + if (unlikely(f == (Field *)-1)) DBUG_RETURN(TRUE); column_priv|= column->rights; } @@ -6464,7 +6471,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, 0, revoke_grant, create_new_users, MY_TEST(thd->variables.sql_mode & MODE_NO_AUTO_CREATE_USER)); - if (error) + if (unlikely(error)) { result= TRUE; // Remember error continue; // Add next user @@ -7775,6 +7782,8 @@ err: table_ref table reference where to check the field name name of field to check length length of name + fld use fld object to check invisibility when it is + not 0, not_found_field, view_ref_found DESCRIPTION Check the access rights to a column depending on the type of table @@ -7789,13 +7798,17 @@ err: */ bool check_column_grant_in_table_ref(THD *thd, TABLE_LIST * table_ref, - const char *name, size_t length) + const char *name, size_t length, + Field *fld) { GRANT_INFO *grant; const char *db_name; const char *table_name; Security_context *sctx= table_ref->security_ctx ? table_ref->security_ctx : thd->security_ctx; + if (fld && fld != not_found_field && fld != view_ref_found + && fld->invisible >= INVISIBLE_SYSTEM) + return false; if (table_ref->view || table_ref->field_translation) { @@ -7871,6 +7884,9 @@ bool check_grant_all_columns(THD *thd, ulong want_access_arg, for (; !fields->end_of_fields(); fields->next()) { + if (fields->field() && + fields->field()->invisible >= INVISIBLE_SYSTEM) + continue; LEX_CSTRING *field_name= fields->name(); if (table_name != fields->get_table_name()) @@ -9244,8 +9260,8 @@ static int modify_grant_table(TABLE *table, Field *host_field, system_charset_info); user_field->store(user_to->user.str, user_to->user.length, system_charset_info); - if ((error= table->file->ha_update_row(table->record[1], - table->record[0])) && + if (unlikely(error= table->file->ha_update_row(table->record[1], + table->record[0])) && error != HA_ERR_RECORD_IS_THE_SAME) table->file->print_error(error, MYF(0)); else @@ -9254,7 +9270,7 @@ static int modify_grant_table(TABLE *table, Field *host_field, else { /* delete */ - if ((error=table->file->ha_delete_row(table->record[0]))) + if (unlikely((error=table->file->ha_delete_row(table->record[0])))) table->file->print_error(error, MYF(0)); } @@ -9286,11 +9302,9 @@ static int handle_roles_mappings_table(TABLE *table, bool drop, DBUG_PRINT("info", ("Rewriting entry in roles_mapping table: %s@%s", user_from->user.str, user_from->host.str)); table->use_all_columns(); - if ((error= table->file->ha_rnd_init(1))) - { - table->file->print_error(error, MYF(0)); + + if (unlikely(table->file->ha_rnd_init_with_error(1))) result= -1; - } else { while((error= table->file->ha_rnd_next(table->record[0])) != @@ -9321,7 +9335,7 @@ static int handle_roles_mappings_table(TABLE *table, bool drop, if (drop) /* drop if requested */ { - if ((error= table->file->ha_delete_row(table->record[0]))) + if (unlikely((error= table->file->ha_delete_row(table->record[0])))) table->file->print_error(error, MYF(0)); } else if (user_to) @@ -9329,8 +9343,8 @@ static int handle_roles_mappings_table(TABLE *table, bool drop, store_record(table, record[1]); role_field->store(user_to->user.str, user_to->user.length, system_charset_info); - if ((error= table->file->ha_update_row(table->record[1], - table->record[0])) && + if (unlikely(error= table->file->ha_update_row(table->record[1], + table->record[0])) && error != HA_ERR_RECORD_IS_THE_SAME) table->file->print_error(error, MYF(0)); } @@ -9421,13 +9435,14 @@ static int handle_grant_table(THD *thd, const Grant_table_base& grant_table, error= table->file->ha_index_read_idx_map(table->record[0], 0, user_key, (key_part_map)3, HA_READ_KEY_EXACT); - if (!error && !*host_str) - { // verify that we got a role or a user, as needed + if (!unlikely(error) && !*host_str) + { + // verify that we got a role or a user, as needed if (static_cast(grant_table).check_is_role() != user_from->is_role()) error= HA_ERR_KEY_NOT_FOUND; } - if (error) + if (unlikely(error)) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) { @@ -9451,11 +9466,8 @@ static int handle_grant_table(THD *thd, const Grant_table_base& grant_table, And their host- and user fields are not consecutive. Thus, we need to do a table scan to find all matching records. */ - if ((error= table->file->ha_rnd_init(1))) - { - table->file->print_error(error, MYF(0)); + if (unlikely(table->file->ha_rnd_init_with_error(1))) result= -1; - } else { #ifdef EXTRA_DEBUG @@ -11385,7 +11397,7 @@ int fill_schema_applicable_roles(THD *thd, TABLE_LIST *tables, COND *cond) int wild_case_compare(CHARSET_INFO *cs, const char *str,const char *wildstr) { - reg3 int flag; + int flag; DBUG_ENTER("wild_case_compare"); DBUG_PRINT("enter",("str: '%s' wildstr: '%s'",str,wildstr)); while (*wildstr) @@ -12273,6 +12285,7 @@ static bool send_plugin_request_packet(MPVIO_EXT *mpvio, const char *client_auth_plugin= ((st_mysql_auth *) (plugin_decl(mpvio->plugin)->info))->client_auth_plugin; + DBUG_EXECUTE_IF("auth_disconnect", { vio_close(net->vio); DBUG_RETURN(1); }); DBUG_ASSERT(client_auth_plugin); /* @@ -12645,7 +12658,7 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio, DBUG_PRINT("info", ("Reading user information over SSL layer")); pkt_len= my_net_read(net); - if (pkt_len == packet_error || pkt_len < NORMAL_HANDSHAKE_SIZE) + if (unlikely(pkt_len == packet_error || pkt_len < NORMAL_HANDSHAKE_SIZE)) { DBUG_PRINT("error", ("Failed to read user information (pkt_len= %lu)", pkt_len)); @@ -12734,8 +12747,9 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio, Since 4.1 all database names are stored in utf8 The cast is ok as copy_with_error will create a new area for db */ - if (thd->copy_with_error(system_charset_info, (LEX_STRING*) &mpvio->db, - thd->charset(), db, db_len)) + if (unlikely(thd->copy_with_error(system_charset_info, + (LEX_STRING*) &mpvio->db, + thd->charset(), db, db_len))) return packet_error; user_len= copy_and_convert(user_buff, sizeof(user_buff) - 1, @@ -12972,7 +12986,7 @@ static int server_mpvio_read_packet(MYSQL_PLUGIN_VIO *param, uchar **buf) else pkt_len= my_net_read(&mpvio->auth_info.thd->net); - if (pkt_len == packet_error) + if (unlikely(pkt_len == packet_error)) goto err; mpvio->packets_read++; @@ -12984,7 +12998,7 @@ static int server_mpvio_read_packet(MYSQL_PLUGIN_VIO *param, uchar **buf) if (mpvio->packets_read == 1) { pkt_len= parse_client_handshake_packet(mpvio, buf, pkt_len); - if (pkt_len == packet_error) + if (unlikely(pkt_len == packet_error)) goto err; } else diff --git a/sql/sql_acl.h b/sql/sql_acl.h index a608ef0dd77..6da7d4d5db4 100644 --- a/sql/sql_acl.h +++ b/sql/sql_acl.h @@ -239,7 +239,7 @@ bool check_grant_column (THD *thd, GRANT_INFO *grant, const char *db_name, const char *table_name, const char *name, size_t length, Security_context *sctx); bool check_column_grant_in_table_ref(THD *thd, TABLE_LIST * table_ref, - const char *name, size_t length); + const char *name, size_t length, Field *fld); bool check_grant_all_columns(THD *thd, ulong want_access, Field_iterator_table_ref *fields); bool check_grant_routine(THD *thd, ulong want_access, diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index 82fc1cbfff7..21bb086f013 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2010, 2015, Oracle and/or its affiliates. - Copyright (c) 2011, 2016, MariaDB + Copyright (c) 2011, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -240,7 +240,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, if (thd->locked_tables_list.locked_tables()) { - if (thd->locked_tables_list.reopen_tables(thd)) + if (thd->locked_tables_list.reopen_tables(thd, false)) goto end; /* Restore the table in the table list with the new opened table */ table_list->table= pos_in_locked_tables->table; @@ -267,7 +267,7 @@ end: tdc_release_share(table->s); } /* In case of a temporary table there will be no metadata lock. */ - if (error && has_mdl_lock) + if (unlikely(error) && has_mdl_lock) thd->mdl_context.release_transactional_locks(); DBUG_RETURN(error); @@ -430,7 +430,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt, const char *operator_name, thr_lock_type lock_type, - bool open_for_modify, + bool org_open_for_modify, bool repair_table_use_frm, uint extra_open_options, int (*prepare_func)(THD *, TABLE_LIST *, @@ -497,6 +497,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, bool fatal_error=0; bool open_error; bool collect_eis= FALSE; + bool open_for_modify= org_open_for_modify; DBUG_PRINT("admin", ("table: '%s'.'%s'", db, table->table_name.str)); strxmov(table_name, db, ".", table->table_name.str, NullS); @@ -525,7 +526,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, If open_and_lock_tables() failed, close_thread_tables() will close the table and table->table can therefore be invalid. */ - if (open_error) + if (unlikely(open_error)) table->table= NULL; /* @@ -533,7 +534,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, so any errors opening the table are logical errors. In these cases it does not make sense to try to repair. */ - if (open_error && thd->locked_tables_mode) + if (unlikely(open_error) && thd->locked_tables_mode) { result_code= HA_ADMIN_FAILED; goto send_result; @@ -828,7 +829,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, repair_table_use_frm, FALSE); thd->open_options&= ~extra_open_options; - if (!open_error) + if (unlikely(!open_error)) { TABLE *tab= table->table; Field **field_ptr= tab->field; @@ -1164,7 +1165,7 @@ send_result_message: } } /* Error path, a admin command failed. */ - if (thd->transaction_rollback_request) + if (thd->transaction_rollback_request || fatal_error) { /* Unlikely, but transaction rollback was requested by one of storage @@ -1175,7 +1176,9 @@ send_result_message: } else { - if (trans_commit_stmt(thd) || trans_commit_implicit(thd)) + if (trans_commit_stmt(thd) || + (stmt_causes_implicit_commit(thd, CF_IMPLICIT_COMMIT_END) && + trans_commit_implicit(thd))) goto err; } close_thread_tables(thd); @@ -1209,7 +1212,8 @@ send_result_message: err: /* Make sure this table instance is not reused after the failure. */ trans_rollback_stmt(thd); - trans_rollback(thd); + if (stmt_causes_implicit_commit(thd, CF_IMPLICIT_COMMIT_END)) + trans_rollback(thd); if (table && table->table) { table->table->m_needs_reopen= true; @@ -1217,7 +1221,9 @@ err: } close_thread_tables(thd); // Shouldn't be needed thd->mdl_context.release_transactional_locks(); +#ifdef WITH_PARTITION_STORAGE_ENGINE err2: +#endif thd->resume_subsequent_commits(suspended_wfc); DBUG_RETURN(TRUE); } diff --git a/sql/sql_alloc.h b/sql/sql_alloc.h index c3bee260817..153b0401e29 100644 --- a/sql/sql_alloc.h +++ b/sql/sql_alloc.h @@ -39,7 +39,7 @@ public: { return alloc_root(mem_root, size); } static void operator delete(void *ptr, size_t size) { TRASH_FREE(ptr, size); } static void operator delete(void *, MEM_ROOT *){} - static void operator delete[](void *ptr, MEM_ROOT *mem_root) + static void operator delete[](void *, MEM_ROOT *) { /* never called */ } static void operator delete[](void *ptr, size_t size) { TRASH_FREE(ptr, size); } #ifdef HAVE_valgrind diff --git a/sql/sql_alter.cc b/sql/sql_alter.cc index 77e0c9d5298..bf78bd12192 100644 --- a/sql/sql_alter.cc +++ b/sql/sql_alter.cc @@ -54,12 +54,16 @@ Alter_info::Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root) bool Alter_info::set_requested_algorithm(const LEX_CSTRING *str) { // To avoid adding new keywords to the grammar, we match strings here. - if (!my_strcasecmp(system_charset_info, str->str, "INPLACE")) + if (lex_string_eq(str, STRING_WITH_LEN("INPLACE"))) requested_algorithm= ALTER_TABLE_ALGORITHM_INPLACE; - else if (!my_strcasecmp(system_charset_info, str->str, "COPY")) + else if (lex_string_eq(str, STRING_WITH_LEN("COPY"))) requested_algorithm= ALTER_TABLE_ALGORITHM_COPY; - else if (!my_strcasecmp(system_charset_info, str->str, "DEFAULT")) + else if (lex_string_eq(str, STRING_WITH_LEN("DEFAULT"))) requested_algorithm= ALTER_TABLE_ALGORITHM_DEFAULT; + else if (lex_string_eq(str, STRING_WITH_LEN("NOCOPY"))) + requested_algorithm= ALTER_TABLE_ALGORITHM_NOCOPY; + else if (lex_string_eq(str, STRING_WITH_LEN("INSTANT"))) + requested_algorithm= ALTER_TABLE_ALGORITHM_INSTANT; else return true; return false; @@ -69,19 +73,141 @@ bool Alter_info::set_requested_algorithm(const LEX_CSTRING *str) bool Alter_info::set_requested_lock(const LEX_CSTRING *str) { // To avoid adding new keywords to the grammar, we match strings here. - if (!my_strcasecmp(system_charset_info, str->str, "NONE")) + if (lex_string_eq(str, STRING_WITH_LEN("NONE"))) requested_lock= ALTER_TABLE_LOCK_NONE; - else if (!my_strcasecmp(system_charset_info, str->str, "SHARED")) + else if (lex_string_eq(str, STRING_WITH_LEN("SHARED"))) requested_lock= ALTER_TABLE_LOCK_SHARED; - else if (!my_strcasecmp(system_charset_info, str->str, "EXCLUSIVE")) + else if (lex_string_eq(str, STRING_WITH_LEN("EXCLUSIVE"))) requested_lock= ALTER_TABLE_LOCK_EXCLUSIVE; - else if (!my_strcasecmp(system_charset_info, str->str, "DEFAULT")) + else if (lex_string_eq(str, STRING_WITH_LEN("DEFAULT"))) requested_lock= ALTER_TABLE_LOCK_DEFAULT; else return true; return false; } +const char* Alter_info::algorithm() const +{ + switch (requested_algorithm) { + case ALTER_TABLE_ALGORITHM_INPLACE: + return "ALGORITHM=INPLACE"; + case ALTER_TABLE_ALGORITHM_COPY: + return "ALGORITHM=COPY"; + case ALTER_TABLE_ALGORITHM_DEFAULT: + return "ALGORITHM=DEFAULT"; + case ALTER_TABLE_ALGORITHM_NOCOPY: + return "ALGORITHM=NOCOPY"; + case ALTER_TABLE_ALGORITHM_INSTANT: + return "ALGORITHM=INSTANT"; + } + + return NULL; /* purecov: begin deadcode */ +} + +const char* Alter_info::lock() const +{ + switch (requested_lock) { + case ALTER_TABLE_LOCK_SHARED: + return "LOCK=SHARED"; + case ALTER_TABLE_LOCK_NONE: + return "LOCK=NONE"; + case ALTER_TABLE_LOCK_DEFAULT: + return "LOCK=DEFAULT"; + case ALTER_TABLE_LOCK_EXCLUSIVE: + return "LOCK=EXCLUSIVE"; + } + return NULL; /* purecov: begin deadcode */ +} + + +bool Alter_info::supports_algorithm(THD *thd, enum_alter_inplace_result result, + const Alter_inplace_info *ha_alter_info) +{ + if (requested_algorithm == Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT) + requested_algorithm = (Alter_info::enum_alter_table_algorithm) thd->variables.alter_algorithm; + + switch (result) { + case HA_ALTER_INPLACE_EXCLUSIVE_LOCK: + case HA_ALTER_INPLACE_SHARED_LOCK: + case HA_ALTER_INPLACE_NO_LOCK: + case HA_ALTER_INPLACE_INSTANT: + return false; + case HA_ALTER_INPLACE_COPY_NO_LOCK: + case HA_ALTER_INPLACE_COPY_LOCK: + if (requested_algorithm >= Alter_info::ALTER_TABLE_ALGORITHM_NOCOPY) + { + ha_alter_info->report_unsupported_error(algorithm(), + "ALGORITHM=INPLACE"); + return true; + } + return false; + case HA_ALTER_INPLACE_NOCOPY_NO_LOCK: + case HA_ALTER_INPLACE_NOCOPY_LOCK: + if (requested_algorithm == Alter_info::ALTER_TABLE_ALGORITHM_INSTANT) + { + ha_alter_info->report_unsupported_error("ALGORITHM=INSTANT", + "ALGORITHM=NOCOPY"); + return true; + } + return false; + case HA_ALTER_INPLACE_NOT_SUPPORTED: + if (requested_algorithm >= Alter_info::ALTER_TABLE_ALGORITHM_INPLACE) + { + ha_alter_info->report_unsupported_error(algorithm(), + "ALGORITHM=COPY"); + return true; + } + return false; + case HA_ALTER_ERROR: + return true; + } + /* purecov: begin deadcode */ + DBUG_ASSERT(0); + return false; +} + + +bool Alter_info::supports_lock(THD *thd, enum_alter_inplace_result result, + const Alter_inplace_info *ha_alter_info) +{ + switch (result) { + case HA_ALTER_INPLACE_EXCLUSIVE_LOCK: + // If SHARED lock and no particular algorithm was requested, use COPY. + if (requested_lock == Alter_info::ALTER_TABLE_LOCK_SHARED && + requested_algorithm == Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT && + thd->variables.alter_algorithm == + Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT) + return false; + + if (requested_lock == Alter_info::ALTER_TABLE_LOCK_SHARED || + requested_lock == Alter_info::ALTER_TABLE_LOCK_NONE) + { + ha_alter_info->report_unsupported_error(lock(), "LOCK=EXCLUSIVE"); + return true; + } + return false; + case HA_ALTER_INPLACE_NO_LOCK: + case HA_ALTER_INPLACE_INSTANT: + case HA_ALTER_INPLACE_COPY_NO_LOCK: + case HA_ALTER_INPLACE_NOCOPY_NO_LOCK: + return false; + case HA_ALTER_INPLACE_COPY_LOCK: + case HA_ALTER_INPLACE_NOCOPY_LOCK: + case HA_ALTER_INPLACE_NOT_SUPPORTED: + case HA_ALTER_INPLACE_SHARED_LOCK: + if (requested_lock == Alter_info::ALTER_TABLE_LOCK_NONE) + { + ha_alter_info->report_unsupported_error("LOCK=NONE", "LOCK=SHARED"); + return true; + } + return false; + case HA_ALTER_ERROR: + return true; + } + /* purecov: begin deadcode */ + DBUG_ASSERT(0); + return false; +} Alter_table_ctx::Alter_table_ctx() : datetime_field(NULL), error_if_not_empty(false), @@ -219,8 +345,11 @@ bool Sql_cmd_alter_table::execute(THD *thd) DBUG_ENTER("Sql_cmd_alter_table::execute"); - if (thd->is_fatal_error) /* out of memory creating a copy of alter_info */ + if (unlikely(thd->is_fatal_error)) + { + /* out of memory creating a copy of alter_info */ DBUG_RETURN(TRUE); + } /* We also require DROP priv for ALTER TABLE ... DROP PARTITION, as well as for RENAME TO, as being done by SQLCOM_RENAME_TABLE @@ -273,8 +402,8 @@ bool Sql_cmd_alter_table::execute(THD *thd) - For temporary MERGE tables we do not track if their child tables are base or temporary. As result we can't guarantee that privilege check - which was done in presence of temporary child will stay relevant later - as this temporary table might be removed. + which was done in presence of temporary child will stay relevant + later as this temporary table might be removed. If SELECT_ACL | UPDATE_ACL | DELETE_ACL privileges were not checked for the underlying *base* tables, it would create a security breach as in @@ -314,6 +443,9 @@ bool Sql_cmd_alter_table::execute(THD *thd) create_info.data_file_name= create_info.index_file_name= NULL; thd->prepare_logs_for_admin_command(); +#ifdef WITH_PARTITION_STORAGE_ENGINE + thd->work_part_info= 0; +#endif #ifdef WITH_WSREP if ((!thd->is_current_stmt_binlog_format_row() || diff --git a/sql/sql_alter.h b/sql/sql_alter.h index 85a7b993e12..268dbc43abd 100644 --- a/sql/sql_alter.h +++ b/sql/sql_alter.h @@ -47,14 +47,24 @@ public: */ enum enum_alter_table_algorithm { - // In-place if supported, copy otherwise. +/* + Use thd->variables.alter_algorithm for alter method. If this is also + default then use the fastest possible ALTER TABLE method + (INSTANT, NOCOPY, INPLACE, COPY) +*/ ALTER_TABLE_ALGORITHM_DEFAULT, + // Copy if supported, error otherwise. + ALTER_TABLE_ALGORITHM_COPY, + // In-place if supported, error otherwise. ALTER_TABLE_ALGORITHM_INPLACE, - // Copy if supported, error otherwise. - ALTER_TABLE_ALGORITHM_COPY + // No Copy will refuse any operation which does rebuild. + ALTER_TABLE_ALGORITHM_NOCOPY, + + // Instant should allow any operation that changes metadata only. + ALTER_TABLE_ALGORITHM_INSTANT }; @@ -67,7 +77,7 @@ public: // Maximum supported level of concurency for the given operation. ALTER_TABLE_LOCK_DEFAULT, - // Allow concurrent reads & writes. If not supported, give erorr. + // Allow concurrent reads & writes. If not supported, give error. ALTER_TABLE_LOCK_NONE, // Allow concurrent reads only. If not supported, give error. @@ -174,6 +184,45 @@ public: bool set_requested_lock(const LEX_CSTRING *str); + /** + Returns the algorithm value in the format "algorithm=value" + */ + const char* algorithm() const; + + /** + Returns the lock value in the format "lock=value" + */ + const char* lock() const; + + /** + Check whether the given result can be supported + with the specified user alter algorithm. + + @param thd Thread handle + @param result Operation supported for inplace alter + @param ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data during + in-place alter + @retval false Supported operation + @retval true Not supported value + */ + bool supports_algorithm(THD *thd, enum_alter_inplace_result result, + const Alter_inplace_info *ha_alter_info); + + /** + Check whether the given result can be supported + with the specified user lock type. + + @param result Operation supported for inplace alter + @param ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data during + in-place alter + @retval false Supported lock type + @retval true Not supported value + */ + bool supports_lock(THD *thd, enum_alter_inplace_result result, + const Alter_inplace_info *ha_alter_info); + private: Alter_info &operator=(const Alter_info &rhs); // not implemented Alter_info(const Alter_info &rhs); // not implemented diff --git a/sql/sql_array.h b/sql/sql_array.h index cad7b0e1c48..0e5246b7e2a 100644 --- a/sql/sql_array.h +++ b/sql/sql_array.h @@ -221,7 +221,7 @@ public: bool resize(size_t new_size, Elem default_val) { size_t old_size= elements(); - if (allocate_dynamic(&array, (uint)new_size)) + if (unlikely(allocate_dynamic(&array, (uint)new_size))) return true; if (new_size > old_size) diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 5ce6a6b001c..64427af6319 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -199,8 +199,8 @@ uint get_table_def_key(const TABLE_LIST *table_list, const char **key) from key used by MDL subsystem. */ DBUG_ASSERT(!strcmp(table_list->get_db_name(), - table_list->mdl_request.key.db_name()) && - !strcmp(table_list->get_table_name(), + table_list->mdl_request.key.db_name())); + DBUG_ASSERT(!strcmp(table_list->get_table_name(), table_list->mdl_request.key.name())); *key= (const char*)table_list->mdl_request.key.ptr() + 1; @@ -484,7 +484,7 @@ err_with_reopen: old locks. This should always succeed (unless some external process has removed the tables) */ - if (thd->locked_tables_list.reopen_tables(thd)) + if (thd->locked_tables_list.reopen_tables(thd, false)) result= true; /* Since downgrade_lock() won't do anything with shared @@ -951,7 +951,8 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table, @param thd thread handle @param table table which should be checked @param table_list list of tables - @param check_alias whether to check tables' aliases + @param check_flag whether to check tables' aliases + Currently this is only used by INSERT NOTE: to exclude derived tables from check we use following mechanism: a) during derived table processing set THD::derived_tables_processing @@ -980,9 +981,9 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table, static TABLE_LIST* find_dup_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list, - bool check_alias) + uint check_flag) { - TABLE_LIST *res; + TABLE_LIST *res= 0; LEX_CSTRING *d_name, *t_name, *t_alias; DBUG_ENTER("find_dup_table"); DBUG_PRINT("enter", ("table alias: %s", table->alias.str)); @@ -1015,17 +1016,15 @@ TABLE_LIST* find_dup_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list, retry: DBUG_PRINT("info", ("real table: %s.%s", d_name->str, t_name->str)); - for (TABLE_LIST *tl= table_list;;) + for (TABLE_LIST *tl= table_list; tl ; tl= tl->next_global, res= 0) { - if (tl && - tl->select_lex && tl->select_lex->master_unit() && + if (tl->select_lex && tl->select_lex->master_unit() && tl->select_lex->master_unit()->executed) { /* There is no sense to check tables of already executed parts of the query */ - tl= tl->next_global; continue; } /* @@ -1034,21 +1033,29 @@ retry: */ if (! (res= find_table_in_global_list(tl, d_name, t_name))) break; + tl= res; // We can continue search after this table /* Skip if same underlying table. */ if (res->table && (res->table == table->table)) - goto next; + continue; + + if (check_flag & CHECK_DUP_FOR_CREATE) + DBUG_RETURN(res); /* Skip if table alias does not match. */ - if (check_alias) + if (check_flag & CHECK_DUP_ALLOW_DIFFERENT_ALIAS) { if (my_strcasecmp(table_alias_charset, t_alias->str, res->alias.str)) - goto next; + continue; } /* - Skip if marked to be excluded (could be a derived table) or if - entry is a prelocking placeholder. + If table is not excluded (could be a derived table) and table is not + a prelocking placeholder then we found either a duplicate entry + or a table that is part of a derived table (handled below). + Examples are: + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 SELECT * FROM view_containing_t1; */ if (res->select_lex && !res->select_lex->exclude_from_table_unique_test && @@ -1060,14 +1067,17 @@ retry: processed in derived table or top select of multi-update/multi-delete (exclude_from_table_unique_test) or prelocking placeholder. */ -next: - tl= res->next_global; DBUG_PRINT("info", ("found same copy of table or table which we should skip")); } if (res && res->belong_to_derived) { - /* Try to fix */ + /* + We come here for queries of type: + INSERT INTO t1 (SELECT tmp.a FROM (select * FROM t1) as tmp); + + Try to fix by materializing the derived table + */ TABLE_LIST *derived= res->belong_to_derived; if (derived->is_merged_derived() && !derived->derived->is_excluded()) { @@ -1099,7 +1109,7 @@ next: TABLE_LIST* unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list, - bool check_alias) + uint check_flag) { TABLE_LIST *dup; @@ -1131,12 +1141,12 @@ unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list, if (!tmp_parent) break; - if ((dup= find_dup_table(thd, child, child->next_global, check_alias))) + if ((dup= find_dup_table(thd, child, child->next_global, check_flag))) break; } } else - dup= find_dup_table(thd, table, table_list, check_alias); + dup= find_dup_table(thd, table, table_list, check_flag); return dup; } @@ -1471,15 +1481,15 @@ open_table_get_mdl_lock(THD *thd, Open_table_context *ot_ctx, return FALSE; } +#ifdef WITH_PARTITION_STORAGE_ENGINE /* Set all [named] partitions as used. */ static int set_partitions_as_used(TABLE_LIST *tl, TABLE *t) { -#ifdef WITH_PARTITION_STORAGE_ENGINE if (t->part_info) return t->file->change_partitions_to_open(tl->partition_names); -#endif return 0; } +#endif /** @@ -1525,7 +1535,9 @@ bool open_table(THD *thd, TABLE_LIST *table_list, Open_table_context *ot_ctx) MDL_ticket *mdl_ticket; TABLE_SHARE *share; uint gts_flags; +#ifdef WITH_PARTITION_STORAGE_ENGINE int part_names_error=0; +#endif DBUG_ENTER("open_table"); /* @@ -1623,7 +1635,9 @@ bool open_table(THD *thd, TABLE_LIST *table_list, Open_table_context *ot_ctx) table= best_table; table->query_id= thd->query_id; DBUG_PRINT("info",("Using locked table")); +#ifdef WITH_PARTITION_STORAGE_ENGINE part_names_error= set_partitions_as_used(table_list, table); +#endif goto reset; } /* @@ -1782,7 +1796,7 @@ retry_share: share= tdc_acquire_share(thd, table_list, gts_flags, &table); - if (!share) + if (unlikely(!share)) { /* Hide "Table doesn't exist" errors if the table belongs to a view. @@ -1908,7 +1922,9 @@ retry_share: { DBUG_ASSERT(table->file != NULL); MYSQL_REBIND_TABLE(table->file); +#ifdef WITH_PARTITION_STORAGE_ENGINE part_names_error= set_partitions_as_used(table_list, table); +#endif } else { @@ -1924,7 +1940,7 @@ retry_share: thd->open_options, table, FALSE, IF_PARTITIONING(table_list->partition_names,0)); - if (error) + if (unlikely(error)) { my_free(table); @@ -1969,7 +1985,7 @@ retry_share: table_list->table= table; #ifdef WITH_PARTITION_STORAGE_ENGINE - if (table->part_info) + if (unlikely(table->part_info)) { /* Partitions specified were incorrect.*/ if (part_names_error) @@ -2054,7 +2070,7 @@ TABLE *find_table_for_mdl_upgrade(THD *thd, const char *db, { TABLE *tab= find_locked_table(thd->open_tables, db, table_name); - if (!tab) + if (unlikely(!tab)) { if (!no_error) my_error(ER_TABLE_NOT_LOCKED, MYF(0), table_name); @@ -2067,8 +2083,8 @@ TABLE *find_table_for_mdl_upgrade(THD *thd, const char *db, cases don't take a global IX lock in order to be compatible with global read lock. */ - if (!thd->mdl_context.is_lock_owner(MDL_key::GLOBAL, "", "", - MDL_INTENTION_EXCLUSIVE)) + if (unlikely(!thd->mdl_context.is_lock_owner(MDL_key::GLOBAL, "", "", + MDL_INTENTION_EXCLUSIVE))) { if (!no_error) my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0), table_name); @@ -2080,7 +2096,7 @@ TABLE *find_table_for_mdl_upgrade(THD *thd, const char *db, (tab= find_locked_table(tab->next, db, table_name))) continue; - if (!tab && !no_error) + if (unlikely(!tab && !no_error)) my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0), table_name); return tab; @@ -2298,7 +2314,8 @@ void Locked_tables_list::unlink_from_list(THD *thd, If mode is not LTM_LOCK_TABLES, we needn't do anything. Moreover, outside this mode pos_in_locked_tables value is not trustworthy. */ - if (thd->locked_tables_mode != LTM_LOCK_TABLES) + if (thd->locked_tables_mode != LTM_LOCK_TABLES && + thd->locked_tables_mode != LTM_PRELOCKED_UNDER_LOCK_TABLES) return; /* @@ -2402,7 +2419,7 @@ unlink_all_closed_tables(THD *thd, MYSQL_LOCK *lock, size_t reopen_count) */ bool -Locked_tables_list::reopen_tables(THD *thd) +Locked_tables_list::reopen_tables(THD *thd, bool need_reopen) { Open_table_context ot_ctx(thd, MYSQL_OPEN_REOPEN); uint reopen_count= 0; @@ -2413,8 +2430,20 @@ Locked_tables_list::reopen_tables(THD *thd) for (TABLE_LIST *table_list= m_locked_tables; table_list; table_list= table_list->next_global) { - if (table_list->table) /* The table was not closed */ - continue; + if (need_reopen) + { + if (!table_list->table || !table_list->table->needs_reopen()) + continue; + /* no need to remove the table from the TDC here, thus (TABLE*)1 */ + close_all_tables_for_name(thd, table_list->table->s, + HA_EXTRA_NOT_USED, (TABLE*)1); + DBUG_ASSERT(table_list->table == NULL); + } + else + { + if (table_list->table) /* The table was not closed */ + continue; + } /* Links into thd->open_tables upon success */ if (open_table(thd, table_list, &ot_ctx)) @@ -3513,7 +3542,7 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables, error= open_table(thd, tables, ot_ctx); } - if (error) + if (unlikely(error)) { if (! ot_ctx->can_recover_from_failed_open() && safe_to_ignore_table) { @@ -3593,7 +3622,7 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables, if (need_prelocking && ! lex->requires_prelocking()) lex->mark_as_requiring_prelocking(save_query_tables_last); - if (error) + if (unlikely(error)) goto end; } @@ -3603,7 +3632,7 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables, /* Check and update metadata version of a base table. */ error= check_and_update_table_version(thd, tables, tables->table->s); - if (error) + if (unlikely(error)) goto end; /* After opening a MERGE table add the children to the query list of @@ -3663,7 +3692,7 @@ process_view_routines: if (need_prelocking && ! lex->requires_prelocking()) lex->mark_as_requiring_prelocking(save_query_tables_last); - if (error) + if (unlikely(error)) goto end; } @@ -4032,7 +4061,7 @@ restart: flags, prelocking_strategy, has_prelocking_list, &ot_ctx); - if (error) + if (unlikely(error)) { if (ot_ctx.can_recover_from_failed_open()) { @@ -4114,7 +4143,7 @@ restart: if (need_prelocking && ! *start) *start= thd->lex->query_tables; - if (error) + if (unlikely(error)) { if (ot_ctx.can_recover_from_failed_open()) { @@ -4210,7 +4239,7 @@ error: THD_STAGE_INFO(thd, stage_after_opening_tables); thd_proc_info(thd, 0); - if (error && *table_to_open) + if (unlikely(error) && *table_to_open) { (*table_to_open)->table= NULL; } @@ -4388,7 +4417,7 @@ handle_table(THD *thd, Query_tables_list *prelocking_ctx, arena= thd->activate_stmt_arena_if_needed(&backup); table->file->get_parent_foreign_key_list(thd, &fk_list); - if (thd->is_error()) + if (unlikely(thd->is_error())) { if (arena) thd->restore_active_arena(arena, &backup); @@ -4439,7 +4468,7 @@ handle_table(THD *thd, Query_tables_list *prelocking_ctx, table->internal_tables); if (arena) thd->restore_active_arena(arena, &backup); - if (error) + if (unlikely(error)) { *need_prelocking= TRUE; return TRUE; @@ -4680,7 +4709,7 @@ static bool check_lock_and_start_stmt(THD *thd, table_list->table->alias.c_ptr()); DBUG_RETURN(1); } - if ((error= table_list->table->file->start_stmt(thd, lock_type))) + if (unlikely((error= table_list->table->file->start_stmt(thd, lock_type)))) { table_list->table->file->print_error(error, MYF(0)); DBUG_RETURN(1); @@ -4820,7 +4849,7 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type, break; } - if (!error) + if (likely(!error)) { /* We can't have a view or some special "open_strategy" in this function @@ -5864,7 +5893,7 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list, #ifndef NO_EMBEDDED_ACCESS_CHECKS /* Check if there are sufficient access rights to the found field. */ if (check_privileges && - check_column_grant_in_table_ref(thd, *actual_table, name, length)) + check_column_grant_in_table_ref(thd, *actual_table, name, length, fld)) fld= WRONG_GRANT; else #endif @@ -6041,7 +6070,7 @@ find_field_in_tables(THD *thd, Item_ident *item, #ifndef NO_EMBEDDED_ACCESS_CHECKS /* Check if there are sufficient access rights to the found field. */ if (found && check_privileges && - check_column_grant_in_table_ref(thd, table_ref, name, length)) + check_column_grant_in_table_ref(thd, table_ref, name, length, found)) found= WRONG_GRANT; #endif } @@ -6162,7 +6191,7 @@ find_field_in_tables(THD *thd, Item_ident *item, if (db) return cur_field; - if (found) + if (unlikely(found)) { if (report_error == REPORT_ALL_ERRORS || report_error == IGNORE_EXCEPT_NON_UNIQUE) @@ -6174,7 +6203,7 @@ find_field_in_tables(THD *thd, Item_ident *item, } } - if (found) + if (likely(found)) return found; /* @@ -6293,7 +6322,7 @@ find_item_in_list(Item *find, List &items, uint *counter, (if this field created from expression argument of group_concat()), => we have to check presence of name before compare */ - if (!item_field->name.str) + if (unlikely(!item_field->name.str)) continue; if (table_name) @@ -6411,24 +6440,27 @@ find_item_in_list(Item *find, List &items, uint *counter, } } } - if (!found) + + if (likely(found)) + return found; + + if (unlikely(found_unaliased_non_uniq)) { - if (found_unaliased_non_uniq) - { - if (report_error != IGNORE_ERRORS) - my_error(ER_NON_UNIQ_ERROR, MYF(0), - find->full_name(), current_thd->where); - return (Item **) 0; - } - if (found_unaliased) - { - found= found_unaliased; - *counter= unaliased_counter; - *resolution= RESOLVED_BEHIND_ALIAS; - } + if (report_error != IGNORE_ERRORS) + my_error(ER_NON_UNIQ_ERROR, MYF(0), + find->full_name(), current_thd->where); + return (Item **) 0; } + if (found_unaliased) + { + found= found_unaliased; + *counter= unaliased_counter; + *resolution= RESOLVED_BEHIND_ALIAS; + } + if (found) return found; + if (report_error != REPORT_EXCEPT_NOT_FOUND) { if (report_error == REPORT_ALL_ERRORS) @@ -6548,7 +6580,7 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2, Query_arena *arena, backup; bool result= TRUE; bool first_outer_loop= TRUE; - Field *field_1, *field_2; + Field *field_1; field_visibility_t field_1_invisible, field_2_invisible; /* Leaf table references to which new natural join columns are added @@ -6572,6 +6604,8 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2, { bool found= FALSE; const LEX_CSTRING *field_name_1; + Field *field_2= 0; + /* true if field_name_1 is a member of using_fields */ bool is_using_column_1; if (!(nj_col_1= it_1.get_or_create_column_ref(thd, leaf_1))) @@ -7160,7 +7194,7 @@ static bool setup_natural_join_row_types(THD *thd, int setup_wild(THD *thd, TABLE_LIST *tables, List &fields, List *sum_func_list, - uint wild_num) + uint wild_num, uint *hidden_bit_fields) { if (!wild_num) return(0); @@ -7200,7 +7234,7 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List &fields, else if (insert_fields(thd, ((Item_field*) item)->context, ((Item_field*) item)->db_name, ((Item_field*) item)->table_name, &it, - any_privileges)) + any_privileges, hidden_bit_fields)) { if (arena) thd->restore_active_arena(arena, &backup); @@ -7251,7 +7285,7 @@ bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array, List *sum_func_list, List *pre_fix, bool allow_sum_func) { - reg2 Item *item; + Item *item; enum_column_usage saved_column_usage= thd->column_usage; nesting_map save_allow_sum_func= thd->lex->allow_sum_func; List_iterator it(fields); @@ -7665,7 +7699,7 @@ bool get_key_map_from_key_list(key_map *map, TABLE *table, bool insert_fields(THD *thd, Name_resolution_context *context, const char *db_name, const char *table_name, List_iterator *it, - bool any_privileges) + bool any_privileges, uint *hidden_bit_fields) { Field_iterator_table_ref field_iterator; bool found; @@ -7793,6 +7827,9 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name, else it->after(item); /* Add 'item' to the SELECT list. */ + if (item->type() == Item::FIELD_ITEM && item->field_type() == MYSQL_TYPE_BIT) + (*hidden_bit_fields)++; + #ifndef NO_EMBEDDED_ACCESS_CHECKS /* Set privilege information for the fields of newly created views. @@ -8169,7 +8206,7 @@ fill_record(THD *thd, TABLE *table_arg, List &fields, List &values, if (rfield->stored_in_db()) { - if (value->save_in_field(rfield, 0) < 0 && !ignore_errors) + if (unlikely(value->save_in_field(rfield, 0) < 0) && !ignore_errors) { my_message(ER_UNKNOWN_ERROR, ER_THD(thd, ER_UNKNOWN_ERROR), MYF(0)); goto err; @@ -8424,7 +8461,7 @@ fill_record(THD *thd, TABLE *table, Field **ptr, List &values, /* Ensure that all fields are from the same table */ DBUG_ASSERT(field->table == table); - if (field->invisible) + if (unlikely(field->invisible)) { all_fields_have_values= false; continue; @@ -8436,7 +8473,7 @@ fill_record(THD *thd, TABLE *table, Field **ptr, List &values, if (field->field_index == autoinc_index) table->auto_increment_field_not_null= TRUE; - if (field->vcol_info || (vers_sys_field && !ignore_errors)) + if (unlikely(field->vcol_info) || (vers_sys_field && !ignore_errors)) { Item::Type type= value->type(); if (type != Item::DEFAULT_VALUE_ITEM && @@ -8621,6 +8658,19 @@ int setup_ftfuncs(SELECT_LEX *select_lex) } +void cleanup_ftfuncs(SELECT_LEX *select_lex) +{ + List_iterator li(*(select_lex->ftfunc_list)), + lj(*(select_lex->ftfunc_list)); + Item_func_match *ftf; + + while ((ftf=li++)) + { + ftf->cleanup(); + } +} + + int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order) { if (select_lex->ftfunc_list->elements) diff --git a/sql/sql_base.h b/sql/sql_base.h index a6a85d47dc9..01892c0b938 100644 --- a/sql/sql_base.h +++ b/sql/sql_base.h @@ -1,4 +1,6 @@ /* Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2011, 2018, MariaDB + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -59,6 +61,10 @@ enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND, IGNORE_ERRORS, REPORT_EXCEPT_NON_UNIQUE, IGNORE_EXCEPT_NON_UNIQUE}; +/* Flag bits for unique_table() */ +#define CHECK_DUP_ALLOW_DIFFERENT_ALIAS 1 +#define CHECK_DUP_FOR_CREATE 2 + uint get_table_def_key(const TABLE_LIST *table_list, const char **key); TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update, uint lock_flags); @@ -153,11 +159,12 @@ bool fill_record_n_invoke_before_triggers(THD *thd, TABLE *table, enum trg_event_type event); bool insert_fields(THD *thd, Name_resolution_context *context, const char *db_name, const char *table_name, - List_iterator *it, bool any_privileges); + List_iterator *it, bool any_privileges, + uint *hidden_bit_fields); void make_leaves_list(THD *thd, List &list, TABLE_LIST *tables, bool full_table_list, TABLE_LIST *boundary); int setup_wild(THD *thd, TABLE_LIST *tables, List &fields, - List *sum_func_list, uint wild_num); + List *sum_func_list, uint wild_num, uint * hidden_bit_fields); bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array, List &item, enum_column_usage column_usage, List *sum_func_list, List *pre_fix, @@ -214,6 +221,7 @@ int setup_conds(THD *thd, TABLE_LIST *tables, List &leaves, COND **conds); void wrap_ident(THD *thd, Item **conds); int setup_ftfuncs(SELECT_LEX* select); +void cleanup_ftfuncs(SELECT_LEX *select_lex); int init_ftfuncs(THD *thd, SELECT_LEX* select, bool no_order); bool lock_table_names(THD *thd, const DDL_options_st &options, TABLE_LIST *table_list, @@ -263,7 +271,7 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint counter, uint flags); int decide_logging_format(THD *thd, TABLE_LIST *tables); void close_thread_table(THD *thd, TABLE **table_ptr); TABLE_LIST *unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list, - bool check_alias); + uint check_flag); bool is_equal(const LEX_CSTRING *a, const LEX_CSTRING *b); class Open_tables_backup; diff --git a/sql/sql_bitmap.h b/sql/sql_bitmap.h index 5a2caf89fe2..705a8d169e1 100644 --- a/sql/sql_bitmap.h +++ b/sql/sql_bitmap.h @@ -141,16 +141,16 @@ public: }; }; -/* An iterator to quickly walk over bits in unlonglong bitmap. */ +/* An iterator to quickly walk over bits in ulonglong bitmap. */ class Table_map_iterator { ulonglong bmp; uint no; public: Table_map_iterator(ulonglong t) : bmp(t), no(0) {} - int next_bit() + uint next_bit() { - static const char last_bit[16]= {32, 0, 1, 0, + static const uchar last_bit[16]= {32, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0}; @@ -162,10 +162,10 @@ public: if (!bmp) return BITMAP_END; } - bmp &= ~(1LL << bit); + bmp &= ~(1ULL << bit); return no + bit; } - int operator++(int) { return next_bit(); } + uint operator++(int) { return next_bit(); } enum { BITMAP_END= 64 }; }; @@ -201,7 +201,10 @@ public: bool is_subset(const Bitmap<64>& map2) const { return !(map & ~map2.map); } bool is_overlapping(const Bitmap<64>& map2) const { return (map & map2.map)!= 0; } bool operator==(const Bitmap<64>& map2) const { return map == map2.map; } - char *print(char *buf) const { longlong2str(map,buf,16); return buf; } + char *print(char *buf) const { + longlong2str(longlong(map), buf, 16); + return buf; + } ulonglong to_ulonglong() const { return map; } class Iterator : public Table_map_iterator { diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index c8f18556d50..aa4c77d0939 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -329,6 +329,9 @@ TODO list: */ #include "mariadb.h" /* NO_EMBEDDED_ACCESS_CHECKS */ +#if defined(DBUG_OFF) && defined(HAVE_MADVISE) +#include +#endif #include "sql_priv.h" #include "sql_basic_types.h" #include "sql_cache.h" @@ -1049,7 +1052,7 @@ void query_cache_insert(void *thd_arg, const char *packet, size_t length, called for this thread. */ - if (!thd) + if (unlikely(!thd)) return; query_cache.insert(thd, &thd->query_cache_tls, @@ -2591,7 +2594,7 @@ size_t Query_cache::init_cache() { size_t mem_bin_count, num, step; size_t mem_bin_size, prev_size, inc; - size_t additional_data_size, max_mem_bin_size, approx_additional_data_size; + size_t max_mem_bin_size, approx_additional_data_size; int align; DBUG_ENTER("Query_cache::init_cache"); @@ -2656,6 +2659,13 @@ size_t Query_cache::init_cache() if (!(cache= (uchar *) my_malloc_lock(query_cache_size+additional_data_size, MYF(0)))) goto err; +#if defined(DBUG_OFF) && defined(HAVE_MADVISE) && defined(MADV_DONTDUMP) + if (madvise(cache, query_cache_size+additional_data_size, MADV_DONTDUMP)) + { + DBUG_PRINT("warning", ("coudn't mark query cache memory as MADV_DONTDUMP: %s", + strerror(errno))); + } +#endif DBUG_PRINT("qcache", ("cache length %zu, min unit %zu, %zu bins", query_cache_size, min_allocation_unit, mem_bin_num)); @@ -2818,6 +2828,13 @@ void Query_cache::free_cache() } while (block != queries_blocks); } +#if defined(DBUG_OFF) && defined(HAVE_MADVISE) && defined(MADV_DODUMP) + if (madvise(cache, query_cache_size+additional_data_size, MADV_DODUMP)) + { + DBUG_PRINT("warning", ("coudn't mark query cache memory as MADV_DODUMP: %s", + strerror(errno))); + } +#endif my_free(cache); make_disabled(); my_hash_free(&queries); diff --git a/sql/sql_cache.h b/sql/sql_cache.h index eb046b4d167..0ed45a9ed81 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -337,6 +337,7 @@ protected: till the end of a flush operation. */ mysql_mutex_t structure_guard_mutex; + size_t additional_data_size; uchar *cache; // cache memory Query_cache_block *first_block; // physical location block list Query_cache_block *queries_blocks; // query list (LIFO) diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 7b7d34c6bc0..d2e4f66dd59 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -336,24 +336,9 @@ ulong get_max_connections(void) extern "C" int mysql_tmpfile(const char *prefix) { char filename[FN_REFLEN]; - File fd = create_temp_file(filename, mysql_tmpdir, prefix, -#ifdef __WIN__ - O_BINARY | O_TRUNC | O_SEQUENTIAL | - O_SHORT_LIVED | -#endif /* __WIN__ */ - O_CREAT | O_EXCL | O_RDWR | O_TEMPORARY, - MYF(MY_WME)); - if (fd >= 0) { -#ifndef __WIN__ - /* - This can be removed once the following bug is fixed: - Bug #28903 create_temp_file() doesn't honor O_TEMPORARY option - (file not removed) (Unix) - */ - unlink(filename); -#endif /* !__WIN__ */ - } - + File fd= create_temp_file(filename, mysql_tmpdir, prefix, + O_BINARY | O_SEQUENTIAL, + MYF(MY_WME | MY_TEMPORARY)); return fd; } @@ -770,6 +755,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier, bool skip_global_sys_var_lock) THD *old_THR_THD= current_thd; set_current_thd(this); status_var.local_memory_used= sizeof(THD); + status_var.max_local_memory_used= status_var.local_memory_used; status_var.global_memory_used= 0; variables.pseudo_thread_id= thread_id; variables.max_mem_used= global_system_variables.max_mem_used; @@ -819,7 +805,6 @@ THD::THD(my_thread_id id, bool is_wsrep_applier, bool skip_global_sys_var_lock) statement_id_counter= 0UL; // Must be reset to handle error with THD's created for init of mysqld lex->current_select= 0; - stmt_lex= 0; start_utime= utime_after_query= 0; system_time.start.val= system_time.sec= system_time.sec_part= 0; utime_after_lock= 0L; @@ -880,7 +865,6 @@ THD::THD(my_thread_id id, bool is_wsrep_applier, bool skip_global_sys_var_lock) *scramble= '\0'; #ifdef WITH_WSREP - mysql_mutex_init(key_LOCK_wsrep_thd, &LOCK_wsrep_thd, MY_MUTEX_INIT_FAST); wsrep_ws_handle.trx_id = WSREP_UNDEFINED_TRX_ID; wsrep_ws_handle.opaque = NULL; wsrep_retry_counter = 0; @@ -1173,8 +1157,8 @@ Sql_condition* THD::raise_condition(uint sql_errno, require memory allocation and therefore might fail. Non fatal out of memory errors can occur if raised by SIGNAL/RESIGNAL statement. */ - if (!(is_fatal_error && (sql_errno == EE_OUTOFMEMORY || - sql_errno == ER_OUTOFMEMORY))) + if (likely(!(is_fatal_error && (sql_errno == EE_OUTOFMEMORY || + sql_errno == ER_OUTOFMEMORY)))) { cond= da->push_warning(this, sql_errno, sqlstate, level, ucid, msg); } @@ -1317,6 +1301,7 @@ void THD::init(bool skip_lock) reset_current_stmt_binlog_format_row(); reset_binlog_local_stmt_filter(); set_status_var_init(); + status_var.max_local_memory_used= status_var.local_memory_used; bzero((char *) &org_status_var, sizeof(org_status_var)); status_in_global= 0; start_bytes_received= 0; @@ -1716,9 +1701,6 @@ THD::~THD() mysql_mutex_unlock(&LOCK_thd_kill); #ifdef WITH_WSREP - mysql_mutex_lock(&LOCK_wsrep_thd); - mysql_mutex_unlock(&LOCK_wsrep_thd); - mysql_mutex_destroy(&LOCK_wsrep_thd); delete wsrep_rgi; #endif if (!free_connection_done) @@ -2376,12 +2358,12 @@ bool THD::convert_string(LEX_STRING *to, CHARSET_INFO *to_cs, DBUG_ENTER("THD::convert_string"); size_t new_length= to_cs->mbmaxlen * from_length; uint errors; - if (alloc_lex_string(to, new_length + 1)) + if (unlikely(alloc_lex_string(to, new_length + 1))) DBUG_RETURN(true); // EOM to->length= copy_and_convert((char*) to->str, new_length, to_cs, from, from_length, from_cs, &errors); to->str[to->length]= 0; // Safety - if (errors && lex->parse_vcol_expr) + if (unlikely(errors) && lex->parse_vcol_expr) { my_error(ER_BAD_DATA, MYF(0), ErrConvString(from, from_length, from_cs).ptr(), @@ -2483,7 +2465,8 @@ bool THD::copy_with_error(CHARSET_INFO *dstcs, LEX_STRING *dst, bool THD::convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs) { uint dummy_errors; - if (convert_buffer.copy(s->ptr(), s->length(), from_cs, to_cs, &dummy_errors)) + if (unlikely(convert_buffer.copy(s->ptr(), s->length(), from_cs, to_cs, + &dummy_errors))) return TRUE; /* If convert_buffer >> s copying is more efficient long term */ if (convert_buffer.alloced_length() >= convert_buffer.length() * 2 || @@ -2496,6 +2479,39 @@ bool THD::convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs) } +bool THD::check_string_for_wellformedness(const char *str, + size_t length, + CHARSET_INFO *cs) const +{ + DBUG_ASSERT(charset_is_system_charset); + size_t wlen= Well_formed_prefix(cs, str, length).length(); + if (wlen < length) + { + ErrConvString err(str, length, &my_charset_bin); + my_error(ER_INVALID_CHARACTER_STRING, MYF(0), cs->csname, err.ptr()); + return true; + } + return false; +} + + +bool THD::to_ident_sys_alloc(Lex_ident_sys_st *to, const Lex_ident_cli_st *ident) +{ + if (ident->is_quoted()) + { + LEX_CSTRING unquoted; + if (quote_unescape(&unquoted, ident, ident->quote())) + return true; + return charset_is_system_charset ? + to->copy_sys(this, &unquoted) : + to->convert(this, &unquoted, charset()); + } + return charset_is_system_charset ? + to->copy_sys(this, ident) : + to->copy_or_convert(this, ident, charset()); +} + + Item_basic_constant * THD::make_string_literal(const char *str, size_t length, uint repertoire) { @@ -2657,18 +2673,31 @@ CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, size_t key_length) } -int THD::send_explain_fields(select_result *result, uint8 explain_flags, bool is_analyze) +void THD::prepare_explain_fields(select_result *result, + List *field_list, + uint8 explain_flags, + bool is_analyze) +{ + if (lex->explain_json) + make_explain_json_field_list(*field_list, is_analyze); + else + make_explain_field_list(*field_list, explain_flags, is_analyze); + + result->prepare(*field_list, NULL); +} + + +int THD::send_explain_fields(select_result *result, + uint8 explain_flags, + bool is_analyze) { List field_list; - if (lex->explain_json) - make_explain_json_field_list(field_list, is_analyze); - else - make_explain_field_list(field_list, explain_flags, is_analyze); - - result->prepare(field_list, NULL); - return (result->send_result_set_metadata(field_list, - Protocol::SEND_NUM_ROWS | - Protocol::SEND_EOF)); + int rc; + prepare_explain_fields(result, &field_list, explain_flags, is_analyze); + rc= result->send_result_set_metadata(field_list, + Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF); + return(rc); } @@ -3015,7 +3044,7 @@ bool select_send::send_eof() Don't send EOF if we're in error condition (which implies we've already sent or are sending an error) */ - if (thd->is_error()) + if (unlikely(thd->is_error())) return TRUE; ::my_eof(thd); is_result_set_started= 0; @@ -3030,10 +3059,11 @@ bool select_send::send_eof() bool select_to_file::send_eof() { int error= MY_TEST(end_io_cache(&cache)); - if (mysql_file_close(file, MYF(MY_WME)) || thd->is_error()) + if (unlikely(mysql_file_close(file, MYF(MY_WME))) || + unlikely(thd->is_error())) error= true; - if (!error && !suppress_my_ok) + if (likely(!error) && !suppress_my_ok) { ::my_ok(thd,row_count); } @@ -3295,7 +3325,7 @@ int select_export::send_data(List &items) res->charset(), res->ptr(), res->length()); error_pos= copier.most_important_error_pos(); - if (error_pos) + if (unlikely(error_pos)) { char printable_buff[32]; convert_to_printable(printable_buff, sizeof(printable_buff), @@ -3814,7 +3844,7 @@ void Statement::set_statement(Statement *stmt) { id= stmt->id; column_usage= stmt->column_usage; - stmt_lex= lex= stmt->lex; + lex= stmt->lex; query_string= stmt->query_string; } @@ -4131,7 +4161,7 @@ bool select_dumpvar::send_eof() Don't send EOF if we're in error condition (which implies we've already sent or are sending an error) */ - if (thd->is_error()) + if (unlikely(thd->is_error())) return true; if (!suppress_my_ok) @@ -4267,9 +4297,8 @@ void thd_increment_bytes_sent(void *thd, size_t length) } } -my_bool thd_net_is_killed() +my_bool thd_net_is_killed(THD *thd) { - THD *thd= current_thd; return thd && thd->killed ? 1 : 0; } @@ -4737,7 +4766,7 @@ TABLE *open_purge_table(THD *thd, const char *db, size_t dblen, /* we don't recover here */ DBUG_ASSERT(!error || !ot_ctx.can_recover_from_failed_open()); - if (error) + if (unlikely(error)) close_thread_tables(thd); DBUG_RETURN(error ? NULL : tl->table); @@ -6359,7 +6388,8 @@ int THD::decide_logging_format(TABLE_LIST *tables) clear_binlog_local_stmt_filter(); } - if (error) { + if (unlikely(error)) + { DBUG_PRINT("info", ("decision: no logging since an error was generated")); DBUG_RETURN(-1); } @@ -6656,7 +6686,8 @@ int THD::binlog_write_row(TABLE* table, bool is_trans, Pack records into format for transfer. We are allocating more memory than needed, but that doesn't matter. */ - Row_data_memory memory(table, max_row_length(table, record)); + Row_data_memory memory(table, max_row_length(table, table->rpl_write_set, + record)); if (!memory.has_memory()) return HA_ERR_OUT_OF_MEM; @@ -6693,8 +6724,10 @@ int THD::binlog_update_row(TABLE* table, bool is_trans, DBUG_ASSERT(is_current_stmt_binlog_format_row() && ((WSREP(this) && wsrep_emulate_bin_log) || mysql_bin_log.is_open())); - size_t const before_maxlen = max_row_length(table, before_record); - size_t const after_maxlen = max_row_length(table, after_record); + size_t const before_maxlen= max_row_length(table, table->read_set, + before_record); + size_t const after_maxlen= max_row_length(table, table->rpl_write_set, + after_record); Row_data_memory row_data(table, before_maxlen, after_maxlen); if (!row_data.has_memory()) @@ -6770,7 +6803,8 @@ int THD::binlog_delete_row(TABLE* table, bool is_trans, Pack records into format for transfer. We are allocating more memory than needed, but that doesn't matter. */ - Row_data_memory memory(table, max_row_length(table, record)); + Row_data_memory memory(table, max_row_length(table, table->read_set, + record)); if (unlikely(!memory.has_memory())) return HA_ERR_OUT_OF_MEM; @@ -6809,15 +6843,17 @@ int THD::binlog_delete_row(TABLE* table, bool is_trans, } +/** + Remove from read_set spurious columns. The write_set has been + handled before in table->mark_columns_needed_for_update. +*/ + void THD::binlog_prepare_row_images(TABLE *table) { DBUG_ENTER("THD::binlog_prepare_row_images"); - /** - Remove from read_set spurious columns. The write_set has been - handled before in table->mark_columns_needed_for_update. - */ - DBUG_PRINT_BITSET("debug", "table->read_set (before preparing): %s", table->read_set); + DBUG_PRINT_BITSET("debug", "table->read_set (before preparing): %s", + table->read_set); THD *thd= table->in_use; /** @@ -6835,7 +6871,7 @@ void THD::binlog_prepare_row_images(TABLE *table) */ DBUG_ASSERT(table->read_set != &table->tmp_set); - switch(thd->variables.binlog_row_image) + switch (thd->variables.binlog_row_image) { case BINLOG_ROW_IMAGE_MINIMAL: /* MINIMAL: Mark only PK */ @@ -6865,7 +6901,8 @@ void THD::binlog_prepare_row_images(TABLE *table) table->write_set); } - DBUG_PRINT_BITSET("debug", "table->read_set (after preparing): %s", table->read_set); + DBUG_PRINT_BITSET("debug", "table->read_set (after preparing): %s", + table->read_set); DBUG_VOID_RETURN; } @@ -7201,8 +7238,11 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query_arg, top-most close_thread_tables(). */ if (this->locked_tables_mode <= LTM_LOCK_TABLES) - if (int error= binlog_flush_pending_rows_event(TRUE, is_trans)) + { + int error; + if (unlikely(error= binlog_flush_pending_rows_event(TRUE, is_trans))) DBUG_RETURN(error); + } /* Warnings for unsafe statements logged in statement format are @@ -7470,7 +7510,7 @@ wait_for_commit::wait_for_prior_commit2(THD *thd) thd->ENTER_COND(&COND_wait_commit, &LOCK_wait_commit, &stage_waiting_for_prior_transaction_to_commit, &old_stage); - while ((loc_waitee= this->waitee) && !thd->check_killed()) + while ((loc_waitee= this->waitee) && likely(!thd->check_killed(1))) mysql_cond_wait(&COND_wait_commit, &LOCK_wait_commit); if (!loc_waitee) { diff --git a/sql/sql_class.h b/sql/sql_class.h index a7c33cbc504..64b75dbe7be 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -632,6 +632,7 @@ typedef struct system_variables ulong query_cache_type; ulong tx_isolation; ulong updatable_views_with_limit; + ulong alter_algorithm; int max_user_connections; ulong server_id; /** @@ -655,7 +656,6 @@ typedef struct system_variables my_bool keep_files_on_create; my_bool old_mode; - my_bool old_alter_table; my_bool old_passwords; my_bool big_tables; my_bool only_standard_compliant_cte; @@ -820,8 +820,10 @@ typedef struct system_status_var ulong feature_fulltext; /* +1 when MATCH is used */ ulong feature_gis; /* +1 opening a table with GIS features */ ulong feature_invisible_columns; /* +1 opening a table with invisible column */ + ulong feature_json; /* +1 when JSON function appears in the statement */ ulong feature_locale; /* +1 when LOCALE is set */ ulong feature_subquery; /* +1 when subqueries are used */ + ulong feature_system_versioning; /* +1 opening a table WITH SYSTEM VERSIONING */ ulong feature_timezone; /* +1 when XPATH is used */ ulong feature_trigger; /* +1 opening a table with triggers */ ulong feature_xml; /* +1 when XPATH is used */ @@ -1012,7 +1014,7 @@ public: inline void* calloc(size_t size) { void *ptr; - if ((ptr=alloc_root(mem_root,size))) + if (likely((ptr=alloc_root(mem_root,size)))) bzero(ptr, size); return ptr; } @@ -1025,7 +1027,7 @@ public: inline void *memdup_w_gap(const void *str, size_t size, size_t gap) { void *ptr; - if ((ptr= alloc_root(mem_root,size+gap))) + if (likely((ptr= alloc_root(mem_root,size+gap)))) memcpy(ptr,str,size); return ptr; } @@ -1100,21 +1102,6 @@ public: LEX_CSTRING name; /* name for named prepared statements */ LEX *lex; // parse tree descriptor - /* - LEX which represents current statement (conventional, SP or PS) - - For example during view parsing THD::lex will point to the views LEX and - THD::stmt_lex will point to LEX of the statement where the view will be - included - - Currently it is used to have always correct select numbering inside - statement (LEX::current_select_number) without storing and restoring a - global counter which was THD::select_number. - - TODO: make some unified statement representation (now SP has different) - to store such data like LEX::current_select_number. - */ - LEX *stmt_lex; /* Points to the query associated with this statement. It's const, but we need to declare it char * because all table handlers are written @@ -1910,7 +1897,7 @@ public: void unlink_all_closed_tables(THD *thd, MYSQL_LOCK *lock, size_t reopen_count); - bool reopen_tables(THD *thd); + bool reopen_tables(THD *thd, bool need_reopen); bool restore_lock(THD *thd, TABLE_LIST *dst_table_list, TABLE *table, MYSQL_LOCK *lock); void add_back_last_deleted_lock(TABLE_LIST *dst_table_list); @@ -2152,26 +2139,6 @@ struct wait_for_commit void reinit(); }; -/* - Structure to store the start time for a query -*/ - -struct QUERY_START_TIME_INFO -{ - my_time_t start_time; - ulong start_time_sec_part; - ulonglong start_utime, utime_after_lock; - - void backup_query_start_time(QUERY_START_TIME_INFO *backup) - { - *backup= *this; - } - void restore_query_start_time(QUERY_START_TIME_INFO *backup) - { - *this= *backup; - } -}; - extern "C" void my_message_sql(uint error, const char *str, myf MyFlags); /** @@ -2190,8 +2157,7 @@ class THD :public Statement, */ public Item_change_list, public MDL_context_owner, - public Open_tables_state, - public QUERY_START_TIME_INFO + public Open_tables_state { private: inline bool is_stmt_prepare() const @@ -2416,10 +2382,12 @@ public: uint32 file_id; // for LOAD DATA INFILE /* remote (peer) port */ uint16 peer_port; + my_time_t start_time; // start_time and its sec_part + ulong start_time_sec_part; // are almost always used separately my_hrtime_t user_time; // track down slow pthread_create ulonglong prior_thr_create_utime, thr_create_utime; - ulonglong utime_after_query; + ulonglong start_utime, utime_after_lock, utime_after_query; // Process indicator struct { @@ -2625,6 +2593,15 @@ public: WT_THD wt; ///< for deadlock detection Rows_log_event *m_pending_rows_event; + struct st_trans_time : public timeval + { + void reset(THD *thd) + { + tv_sec= thd->query_start(); + tv_usec= (long) thd->query_start_sec_part(); + } + } start_time; + /* Tables changed in transaction (that must be invalidated in query cache). List contain only transactional tables, that not invalidated in query @@ -3066,10 +3043,14 @@ public: } *killed_err; /* See also thd_killed() */ - inline bool check_killed() + inline bool check_killed(bool dont_send_error_message= 0) { - if (killed) + if (unlikely(killed)) + { + if (!dont_send_error_message) + send_kill_message(); return TRUE; + } if (apc_target.have_apc_requests()) apc_target.process_apc_requests(); return FALSE; @@ -3129,8 +3110,10 @@ public: is set if a statement accesses a temporary table created through CREATE TEMPORARY TABLE. */ - bool charset_is_system_charset, charset_is_collation_connection; +private: + bool charset_is_system_charset, charset_is_collation_connection; bool charset_is_character_set_filesystem; +public: bool enable_slow_log; /* Enable slow log for current statement */ bool abort_on_warning; bool got_warning; /* Set on call to push_warning() */ @@ -3434,16 +3417,13 @@ public: { query_start_sec_part_used=1; return start_time_sec_part; } MYSQL_TIME query_start_TIME(); +private: struct { my_hrtime_t start; my_time_t sec; ulong sec_part; } system_time; - ulong systime_sec_part() { query_start_sec_part_used=1; return system_time.sec_part; } - my_time_t systime() { return system_time.sec; } - -private: void set_system_time() { my_hrtime_t hrtime= my_hrtime(); @@ -3468,29 +3448,16 @@ private: } } - void set_system_time_from_user_time(bool with_sec_part) +public: + timeval transaction_time() { - if (with_sec_part) - { - system_time.sec= start_time; - system_time.sec_part= start_time_sec_part; - } - else - { - if (system_time.sec == start_time) - system_time.sec_part++; - else - { - system_time.sec= start_time; - system_time.sec_part= 0; - } - } + if (!in_multi_stmt_transaction_mode()) + transaction.start_time.reset(this); + return transaction.start_time; } -public: inline void set_start_time() { - set_system_time(); if (user_time.val) { start_time= hrtime_to_my_time(user_time); @@ -3498,6 +3465,7 @@ public: } else { + set_system_time(); start_time= system_time.sec; start_time_sec_part= system_time.sec_part; } @@ -3508,6 +3476,7 @@ public: set_start_time(); start_utime= utime_after_lock= microsecond_interval_timer(); } + /* only used in SET @@timestamp=... */ inline void set_time(my_hrtime_t t) { user_time= t; @@ -3519,15 +3488,29 @@ public: */ inline void set_time(my_time_t t, ulong sec_part) { - start_time= t; - start_time_sec_part= sec_part > TIME_MAX_SECOND_PART ? 0 : sec_part; - user_time.val= hrtime_from_time(start_time) + start_time_sec_part; - if (slave_thread) - set_system_time_from_user_time(sec_part <= TIME_MAX_SECOND_PART); - else // BINLOG command - set_system_time(); - PSI_CALL_set_thread_start_time(start_time); - start_utime= utime_after_lock= microsecond_interval_timer(); + if (opt_secure_timestamp > (slave_thread ? SECTIME_REPL : SECTIME_SUPER)) + set_time(); // note that BINLOG itself requires SUPER + else + { + if (sec_part <= TIME_MAX_SECOND_PART) + { + start_time= system_time.sec= t; + start_time_sec_part= system_time.sec_part= sec_part; + } + else if (t != system_time.sec) + { + start_time= system_time.sec= t; + start_time_sec_part= system_time.sec_part= 0; + } + else + { + start_time= t; + start_time_sec_part= ++system_time.sec_part; + } + user_time.val= hrtime_from_time(start_time) + start_time_sec_part; + PSI_CALL_set_thread_start_time(start_time); + start_utime= utime_after_lock= microsecond_interval_timer(); + } } void set_time_after_lock() { @@ -3657,13 +3640,34 @@ public: lex_str->length= length; return lex_str; } + // Remove double quotes: aaa""bbb -> aaa"bbb + bool quote_unescape(LEX_CSTRING *dst, const LEX_CSTRING *src, char quote) + { + const char *tmp= src->str; + const char *tmpend= src->str + src->length; + char *to; + if (!(dst->str= to= (char *) alloc(src->length + 1))) + { + dst->length= 0; // Safety + return true; + } + for ( ; tmp < tmpend; ) + { + if ((*to++= *tmp++) == quote) + tmp++; // Skip double quotes + } + *to= 0; // End null for safety + dst->length= to - dst->str; + return false; + } LEX_CSTRING *make_clex_string(const char* str, size_t length) { LEX_CSTRING *lex_str; char *tmp; - if (!(lex_str= (LEX_CSTRING *)alloc_root(mem_root, sizeof(LEX_CSTRING) + - length+1))) + if (unlikely(!(lex_str= (LEX_CSTRING *)alloc_root(mem_root, + sizeof(LEX_CSTRING) + + length+1)))) return 0; tmp= (char*) (lex_str+1); lex_str->str= tmp; @@ -3676,7 +3680,7 @@ public: // Allocate LEX_STRING for character set conversion bool alloc_lex_string(LEX_STRING *dst, size_t length) { - if ((dst->str= (char*) alloc(length))) + if (likely((dst->str= (char*) alloc(length)))) return false; dst->length= 0; // Safety return true; // EOM @@ -3684,6 +3688,25 @@ public: bool convert_string(LEX_STRING *to, CHARSET_INFO *to_cs, const char *from, size_t from_length, CHARSET_INFO *from_cs); + bool convert_string(LEX_CSTRING *to, CHARSET_INFO *to_cs, + const char *from, size_t from_length, + CHARSET_INFO *from_cs) + { + LEX_STRING tmp; + bool rc= convert_string(&tmp, to_cs, from, from_length, from_cs); + to->str= tmp.str; + to->length= tmp.length; + return rc; + } + bool convert_string(LEX_CSTRING *to, CHARSET_INFO *tocs, + const LEX_CSTRING *from, CHARSET_INFO *fromcs, + bool simple_copy_is_possible) + { + if (!simple_copy_is_possible) + return unlikely(convert_string(to, tocs, from->str, from->length, fromcs)); + *to= *from; + return false; + } /* Convert a strings between character sets. Uses my_convert_fix(), which uses an mb_wc .. mc_mb loop internally. @@ -3700,7 +3723,6 @@ public: bool convert_with_error(CHARSET_INFO *dstcs, LEX_STRING *dst, CHARSET_INFO *srccs, const char *src, size_t src_length); - /* If either "dstcs" or "srccs" is &my_charset_bin, then performs native copying using cs->cset->copy_fix(). @@ -3719,6 +3741,17 @@ public: bool convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs); + /* + Check if the string is wellformed, raise an error if not wellformed. + @param str - The string to check. + @param length - the string length. + */ + bool check_string_for_wellformedness(const char *str, + size_t length, + CHARSET_INFO *cs) const; + + bool to_ident_sys_alloc(Lex_ident_sys_st *to, const Lex_ident_cli_st *from); + /* Create a string literal with optional client->connection conversion. @param str - the string in the client character set @@ -3735,9 +3768,29 @@ public: Item_basic_constant *make_string_literal_nchar(const Lex_string_with_metadata_st &str); Item_basic_constant *make_string_literal_charset(const Lex_string_with_metadata_st &str, CHARSET_INFO *cs); + bool make_text_string_sys(LEX_CSTRING *to, + const Lex_string_with_metadata_st *from) + { + return convert_string(to, system_charset_info, + from, charset(), charset_is_system_charset); + } + bool make_text_string_connection(LEX_CSTRING *to, + const Lex_string_with_metadata_st *from) + { + return convert_string(to, variables.collation_connection, + from, charset(), charset_is_collation_connection); + } + bool make_text_string_filesystem(LEX_CSTRING *to, + const Lex_string_with_metadata_st *from) + { + return convert_string(to, variables.character_set_filesystem, + from, charset(), charset_is_character_set_filesystem); + } void add_changed_table(TABLE *table); void add_changed_table(const char *key, size_t key_length); CHANGED_TABLE_LIST * changed_table_dup(const char *key, size_t key_length); + void prepare_explain_fields(select_result *result, List *field_list, + uint8 explain_flags, bool is_analyze); int send_explain_fields(select_result *result, uint8 explain_flags, bool is_analyze); void make_explain_field_list(List &field_list, uint8 explain_flags, @@ -3824,7 +3877,7 @@ public: void set_stmt_da(Diagnostics_area *da) { m_stmt_da= da; } - inline CHARSET_INFO *charset() { return variables.character_set_client; } + inline CHARSET_INFO *charset() const { return variables.character_set_client; } void update_charset(); void update_charset(CHARSET_INFO *character_set_client, CHARSET_INFO *collation_connection) @@ -3932,7 +3985,7 @@ public: The worst things that can happen is that we get a suboptimal error message. */ - if ((killed_err= (err_info*) alloc(sizeof(*killed_err)))) + if (likely((killed_err= (err_info*) alloc(sizeof(*killed_err))))) { killed_err->no= killed_errno_arg; ::strmake((char*) killed_err->msg, killed_err_msg_arg, @@ -4029,6 +4082,16 @@ public: DBUG_VOID_RETURN; } + inline enum_binlog_format get_current_stmt_binlog_format() + { + return current_stmt_binlog_format; + } + + inline void set_current_stmt_binlog_format(enum_binlog_format format) + { + current_stmt_binlog_format= format; + } + inline void set_current_stmt_binlog_format_row() { DBUG_ENTER("set_current_stmt_binlog_format_row"); @@ -4214,16 +4277,8 @@ public: void parse_error(const char *err_text, const char *yytext) { Lex_input_stream *lip= &m_parser_state->m_lip; - if (!yytext) - { - if (lip->lookahead_token >= 0) - yytext= lip->get_tok_start_prev(); - else - yytext= lip->get_tok_start(); - - if (!yytext) + if (!yytext && !(yytext= lip->get_tok_start())) yytext= ""; - } /* Push an error into the error stack */ ErrConvString err(yytext, strlen(yytext), variables.character_set_client); my_printf_error(ER_PARSE_ERROR, ER_THD(this, ER_PARSE_ERROR), MYF(0), @@ -4513,6 +4568,12 @@ public: /* Members related to temporary tables. */ public: + /* Opened table states. */ + enum Temporary_table_state { + TMP_TABLE_IN_USE, + TMP_TABLE_NOT_IN_USE, + TMP_TABLE_ANY + }; bool has_thd_temporary_tables(); TABLE *create_and_open_tmp_table(handlerton *hton, @@ -4523,8 +4584,10 @@ public: bool open_in_engine, bool open_internal_tables); - TABLE *find_temporary_table(const char *db, const char *table_name); - TABLE *find_temporary_table(const TABLE_LIST *tl); + TABLE *find_temporary_table(const char *db, const char *table_name, + Temporary_table_state state= TMP_TABLE_IN_USE); + TABLE *find_temporary_table(const TABLE_LIST *tl, + Temporary_table_state state= TMP_TABLE_IN_USE); TMP_TABLE_SHARE *find_tmp_table_share_w_base_key(const char *key, uint key_length); @@ -4551,13 +4614,6 @@ private: /* Whether a lock has been acquired? */ bool m_tmp_tables_locked; - /* Opened table states. */ - enum Temporary_table_state { - TMP_TABLE_IN_USE, - TMP_TABLE_NOT_IN_USE, - TMP_TABLE_ANY - }; - bool has_temporary_tables(); uint create_tmp_table_def_key(char *key, const char *db, const char *table_name); @@ -4606,7 +4662,6 @@ public: query_id_t wsrep_last_query_id; enum wsrep_query_state wsrep_query_state; enum wsrep_conflict_state wsrep_conflict_state; - mysql_mutex_t LOCK_wsrep_thd; wsrep_trx_meta_t wsrep_trx_meta; uint32 wsrep_rand; Relay_log_info *wsrep_rli; @@ -4727,7 +4782,7 @@ public: void set_local_lex(sp_lex_local *sublex) { DBUG_ASSERT(lex->sphead); - lex= stmt_lex= sublex; + lex= sublex; /* Reset part of parser state which needs this. */ m_parser_state->m_yacc.reset_before_substatement(); } @@ -4907,7 +4962,7 @@ public: unit= u; return 0; } - virtual int prepare2(void) { return 0; } + virtual int prepare2(JOIN *join) { return 0; } /* Because of peculiarities of prepared statements protocol we need to know number of columns in the result set (if @@ -4916,7 +4971,7 @@ public: virtual uint field_count(List &fields) const { return fields.elements; } virtual bool send_result_set_metadata(List &list, uint flags)=0; - virtual bool initialize_tables (JOIN *join=0) { return 0; } + virtual bool initialize_tables (JOIN *join) { return 0; } virtual bool send_eof()=0; /** Check if this query returns a result set and therefore is allowed in @@ -5151,7 +5206,7 @@ class select_insert :public select_result_interceptor { enum_duplicates duplic, bool ignore); ~select_insert(); int prepare(List &list, SELECT_LEX_UNIT *u); - virtual int prepare2(void); + virtual int prepare2(JOIN *join); virtual int send_data(List &items); virtual void store_values(List &values); virtual bool can_rollback_data() { return 0; } @@ -5203,7 +5258,7 @@ public: // Needed for access from local class MY_HOOKS in prepare(), since thd is proteted. const THD *get_thd(void) { return thd; } const HA_CREATE_INFO *get_create_info() { return create_info; }; - int prepare2(void) { return 0; } + int prepare2(JOIN *join) { return 0; } private: TABLE *create_table_from_items(THD *thd, @@ -5468,7 +5523,7 @@ public: bool postponed_prepare(List &types); bool send_result_set_metadata(List &list, uint flags); int send_data(List &items); - bool initialize_tables (JOIN *join= NULL); + bool initialize_tables (JOIN *join); bool send_eof(); bool flush() { return false; } bool check_simple_select() const @@ -5887,6 +5942,7 @@ public: int prepare(List &list, SELECT_LEX_UNIT *u); int send_data(List &items); bool initialize_tables (JOIN *join); + int prepare2(JOIN *join); int do_updates(); bool send_eof(); inline ha_rows num_found() const { return found; } @@ -6364,7 +6420,8 @@ public: char *tmp; /* format: [database + dot] + name + '\0' */ dst->length= m_db.length + dot + m_name.length; - if (!(dst->str= tmp= (char*) alloc_root(mem_root, dst->length + 1))) + if (unlikely(!(dst->str= tmp= (char*) alloc_root(mem_root, + dst->length + 1)))) return true; sprintf(tmp, "%.*s%.*s%.*s", (int) m_db.length, (m_db.length ? m_db.str : ""), @@ -6380,7 +6437,7 @@ public: { char *tmp; size_t length= package.length + 1 + routine.length + 1; - if (!(tmp= (char *) alloc_root(mem_root, length))) + if (unlikely(!(tmp= (char *) alloc_root(mem_root, length)))) return true; m_name.length= my_snprintf(tmp, length, "%.*s.%.*s", (int) package.length, package.str, @@ -6394,9 +6451,9 @@ public: const LEX_CSTRING &package, const LEX_CSTRING &routine) { - if (make_package_routine_name(mem_root, package, routine)) + if (unlikely(make_package_routine_name(mem_root, package, routine))) return true; - if (!(m_db.str= strmake_root(mem_root, db.str, db.length))) + if (unlikely(!(m_db.str= strmake_root(mem_root, db.str, db.length)))) return true; m_db.length= db.length; return false; diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index 558d11eca8c..b48070b9c8f 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -167,7 +167,7 @@ int check_for_max_user_connections(THD *thd, USER_CONN *uc) error= 0; end: - if (error) + if (unlikely(error)) { uc->connections--; // no need for decrease_user_connections() here /* @@ -178,7 +178,7 @@ end: thd->user_connect= NULL; } mysql_mutex_unlock(&LOCK_user_conn); - if (error) + if (unlikely(error)) { inc_host_errors(thd->main_security_ctx.ip, &errors); } @@ -1049,7 +1049,7 @@ static int check_connection(THD *thd) vio_keepalive(net->vio, TRUE); vio_set_keepalive_options(net->vio, &opt_vio_keepalive); - if (thd->packet.alloc(thd->variables.net_buffer_length)) + if (unlikely(thd->packet.alloc(thd->variables.net_buffer_length))) { /* Important note: @@ -1139,7 +1139,7 @@ bool login_connection(THD *thd) error= check_connection(thd); thd->protocol->end_statement(); - if (error) + if (unlikely(error)) { // Wrong permissions #ifdef _WIN32 if (vio_type(net->vio) == VIO_TYPE_NAMEDPIPE) @@ -1206,13 +1206,13 @@ void end_connection(THD *thd) thd->user_connect= NULL; } - if (thd->killed || (net->error && net->vio != 0)) + if (unlikely(thd->killed) || (net->error && net->vio != 0)) { statistic_increment(aborted_threads,&LOCK_status); status_var_increment(thd->status_var.lost_connections); } - if (!thd->killed && (net->error && net->vio != 0)) + if (likely(!thd->killed) && (net->error && net->vio != 0)) thd->print_aborted_warning(1, thd->get_stmt_da()->is_error() ? thd->get_stmt_da()->message() : ER_THD(thd, ER_UNKNOWN_ERROR)); } @@ -1241,7 +1241,7 @@ void prepare_new_connection_state(THD* thd) if (opt_init_connect.length && !(sctx->master_access & SUPER_ACL)) { execute_init_command(thd, &opt_init_connect, &LOCK_sys_init_connect); - if (thd->is_error()) + if (unlikely(thd->is_error())) { Host_errors errors; thd->set_killed(KILL_CONNECTION); @@ -1330,9 +1330,9 @@ bool thd_prepare_connection(THD *thd) bool thd_is_connection_alive(THD *thd) { NET *net= &thd->net; - if (!net->error && - net->vio != 0 && - thd->killed < KILL_CONNECTION) + if (likely(!net->error && + net->vio != 0 && + thd->killed < KILL_CONNECTION)) return TRUE; return FALSE; } @@ -1407,9 +1407,9 @@ void do_handle_one_connection(CONNECT *connect) #ifdef WITH_WSREP if (WSREP(thd)) { - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); thd->wsrep_query_state= QUERY_EXITING; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); } #endif end_thread: @@ -1510,7 +1510,7 @@ THD *CONNECT::create_thd(THD *thd) res= my_net_init(&thd->net, vio, thd, MYF(MY_THREAD_SPECIFIC)); vio= 0; // Vio now handled by thd - if (res || thd->is_error()) + if (unlikely(res || thd->is_error())) { if (!thd_reused) delete thd; diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index a58a9254a82..97bbf8f73bd 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -682,7 +682,6 @@ void With_element::move_anchors_ahead() { st_select_lex *next_sl; st_select_lex *new_pos= spec->first_select(); - st_select_lex *UNINIT_VAR(last_sl); new_pos->linkage= UNION_TYPE; for (st_select_lex *sl= new_pos; sl; sl= next_sl) { @@ -690,6 +689,14 @@ void With_element::move_anchors_ahead() if (is_anchor(sl)) { sl->move_node(new_pos); + if (new_pos == spec->first_select()) + { + enum sub_select_type type= new_pos->linkage; + new_pos->linkage= sl->linkage; + sl->linkage= type; + new_pos->with_all_modifier= sl->with_all_modifier; + sl->with_all_modifier= false; + } new_pos= sl->next_select(); } else if (!sq_rec_ref && no_rec_ref_on_top_level()) @@ -697,10 +704,7 @@ void With_element::move_anchors_ahead() sq_rec_ref= find_first_sq_rec_ref_in_select(sl); DBUG_ASSERT(sq_rec_ref != NULL); } - last_sl= sl; } - if (spec->union_distinct) - spec->union_distinct= last_sl; first_recursive= new_pos; } @@ -829,8 +833,9 @@ st_select_lex_unit *With_element::clone_parsed_spec(THD *thd, if (parser_state.init(thd, (char*) unparsed_spec.str, (unsigned int)unparsed_spec.length)) goto err; lex_start(thd); + lex->stmt_lex= old_lex; with_select= &lex->select_lex; - with_select->select_number= ++thd->stmt_lex->current_select_number; + with_select->select_number= ++thd->lex->stmt_lex->current_select_number; parse_status= parse_sql(thd, &parser_state, 0); if (parse_status) goto err; @@ -978,7 +983,7 @@ bool With_element::prepare_unreferenced(THD *thd) thd->lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_DERIVED; if (!spec->prepared && - (spec->prepare(thd, 0, 0) || + (spec->prepare(spec->derived, 0, 0) || rename_columns_of_derived_unit(thd, spec) || check_duplicate_names(thd, first_sl->item_list, 1))) rc= true; @@ -1189,7 +1194,7 @@ bool st_select_lex::check_unrestricted_recursive(bool only_standard_compliant) /* Check conditions 3-4 for restricted specification*/ - if (with_sum_func || + if ((with_sum_func && !with_elem->is_anchor(this)) || (with_elem->contains_sq_with_recursive_reference())) with_elem->get_owner()->add_unrestricted( with_elem->get_mutually_recursive()); @@ -1414,7 +1419,7 @@ bool With_element::instantiate_tmp_tables() { if (!rec_table->is_created() && instantiate_tmp_table(rec_table, - rec_result->tmp_table_param.keyinfo, + rec_table->s->key_info, rec_result->tmp_table_param.start_recinfo, &rec_result->tmp_table_param.recinfo, 0)) diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 67910a3b618..5f826e37a76 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -293,7 +293,7 @@ static my_bool put_dbopt(const char *dbname, Schema_specification_st *create) strmov(opt->name, dbname); opt->name_length= length; - if ((error= my_hash_insert(&dboptions, (uchar*) opt))) + if (unlikely((error= my_hash_insert(&dboptions, (uchar*) opt)))) { my_free(opt); goto end; @@ -338,7 +338,7 @@ static void del_dbopt(const char *path) static bool write_db_opt(THD *thd, const char *path, Schema_specification_st *create) { - register File file; + File file; char buf[256]; // Should be enough for one option bool error=1; @@ -724,7 +724,7 @@ mysql_alter_db_internal(THD *thd, const LEX_CSTRING *db, "table name to file name" encoding. */ build_table_filename(path, sizeof(path) - 1, db->str, "", MY_DB_OPT_FILE, 0); - if ((error=write_db_opt(thd, path, create_info))) + if (unlikely((error=write_db_opt(thd, path, create_info)))) goto exit; /* Change options if current database is being altered. */ @@ -754,7 +754,7 @@ mysql_alter_db_internal(THD *thd, const LEX_CSTRING *db, These DDL methods and logging are protected with the exclusive metadata lock on the schema. */ - if ((error= mysql_bin_log.write(&qinfo))) + if (unlikely((error= mysql_bin_log.write(&qinfo)))) goto exit; } my_ok(thd, result); @@ -938,7 +938,7 @@ mysql_rm_db_internal(THD *thd, const LEX_CSTRING *db, bool if_exists, bool silen thd->pop_internal_handler(); update_binlog: - if (!silent && !error) + if (!silent && likely(!error)) { const char *query; ulong query_length; @@ -993,8 +993,7 @@ update_binlog: if (ha_table_exists(thd, &tbl->db, &tbl->table_name)) continue; - tbl_name_len= my_snprintf(quoted_name, sizeof(quoted_name), - quoted_string, + tbl_name_len= my_snprintf(quoted_name, sizeof(quoted_name), "%`s", tbl->table_name.str); tbl_name_len++; /* +1 for the comma */ if (query_pos + tbl_name_len + 1 >= query_end) @@ -1036,7 +1035,7 @@ exit: SELECT DATABASE() in the future). For this we free() thd->db and set it to 0. */ - if (thd->db.str && cmp_db_names(&thd->db, db) && !error) + if (unlikely(thd->db.str && cmp_db_names(&thd->db, db) && !error)) { mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server); SESSION_TRACKER_CHANGED(thd, CURRENT_SCHEMA_TRACKER, NULL); @@ -1179,9 +1178,9 @@ static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error) if (pos > path && pos[-1] == FN_LIBCHAR) *--pos=0; - if ((error= my_readlink(tmp2_path, path, MYF(MY_WME))) < 0) + if (unlikely((error= my_readlink(tmp2_path, path, MYF(MY_WME))) < 0)) DBUG_RETURN(1); - if (!error) + if (likely(!error)) { if (mysql_file_delete(key_file_misc, path, MYF(send_error ? MY_WME : 0))) { @@ -1196,7 +1195,7 @@ static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error) if (pos > path && pos[-1] == FN_LIBCHAR) *--pos=0; - if (rmdir(path) < 0 && send_error) + if (unlikely(rmdir(path) < 0 && send_error)) { my_error(ER_DB_DROP_RMDIR, MYF(0), path, errno); DBUG_RETURN(1); @@ -1697,15 +1696,16 @@ bool mysql_upgrade_db(THD *thd, const LEX_CSTRING *old_db) length= build_table_filename(path, sizeof(path)-1, old_db->str, "", "", 0); if (length && path[length-1] == FN_LIBCHAR) path[length-1]=0; // remove ending '\' - if ((error= my_access(path,F_OK))) + if (unlikely((error= my_access(path,F_OK)))) { my_error(ER_BAD_DB_ERROR, MYF(0), old_db->str); goto exit; } /* Step1: Create the new database */ - if ((error= mysql_create_db_internal(thd, &new_db, - DDL_options(), &create_info, 1))) + if (unlikely((error= mysql_create_db_internal(thd, &new_db, + DDL_options(), &create_info, + 1)))) goto exit; /* Step2: Move tables to the new database */ diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 0a12d01596b..b6ca2e956cb 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -254,7 +254,12 @@ int TABLE::delete_row() store_record(this, record[1]); vers_update_end(); - return file->ha_update_row(record[1], record[0]); + int res; + if ((res= file->extra(HA_EXTRA_REMEMBER_POS))) + return res; + if ((res= file->ha_update_row(record[1], record[0]))) + return res; + return file->extra(HA_EXTRA_RESTORE_POS); } @@ -305,7 +310,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, THD_STAGE_INFO(thd, stage_init_update); - bool truncate_history= table_list->vers_conditions; + bool truncate_history= table_list->vers_conditions.is_set(); if (truncate_history) { if (table_list->is_view_or_derived()) @@ -324,13 +329,6 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, DBUG_ASSERT(!conds); conds= table_list->on_expr; table_list->on_expr= NULL; - - // trx_sees() in InnoDB reads row_start - if (!table->versioned(VERS_TIMESTAMP)) - { - DBUG_ASSERT(table_list->vers_conditions.type == SYSTEM_TIME_BEFORE); - bitmap_set_bit(table->read_set, table->vers_end_field()->field_index); - } } if (mysql_handle_list_of_derived(thd->lex, table_list, DT_MERGE_FOR_INSERT)) @@ -401,7 +399,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, } const_cond_result= const_cond && (!conds || conds->val_int()); - if (thd->is_error()) + if (unlikely(thd->is_error())) { /* Error evaluating val_int(). */ DBUG_RETURN(TRUE); @@ -439,7 +437,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, if (thd->lex->describe) goto produce_explain_and_leave; - if (!(error=table->file->ha_delete_all_rows())) + if (likely(!(error=table->file->ha_delete_all_rows()))) { /* If delete_all_rows() is used, it is not possible to log the @@ -495,7 +493,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, table->quick_keys.clear_all(); // Can't use 'only index' select=make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error); - if (error) + if (unlikely(error)) DBUG_RETURN(TRUE); if ((select && select->check_quick(thd, safe_update, limit)) || !limit) { @@ -511,7 +509,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, Currently they rely on the user checking DA for errors when unwinding the stack after calling Item::val_xxx(). */ - if (thd->is_error()) + if (unlikely(thd->is_error())) DBUG_RETURN(TRUE); my_ok(thd, 0); DBUG_RETURN(0); // Nothing to delete @@ -662,10 +660,10 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, else error= init_read_record_idx(&info, thd, table, 1, query_plan.index, reverse); - if (error) + if (unlikely(error)) goto got_error; - if (init_ftfuncs(thd, select_lex, 1)) + if (unlikely(init_ftfuncs(thd, select_lex, 1))) goto got_error; table->mark_columns_needed_for_delete(); @@ -676,9 +674,9 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, if (with_select) { - if (result->send_result_set_metadata(select_lex->item_list, - Protocol::SEND_NUM_ROWS | - Protocol::SEND_EOF)) + if (unlikely(result->send_result_set_metadata(select_lex->item_list, + Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF))) goto cleanup; } @@ -703,7 +701,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, if (record_should_be_deleted(thd, table, select, explain, truncate_history)) { table->file->position(table->record[0]); - if ((error= deltempfile->unique_add((char*) table->file->ref))) + if (unlikely((error= + deltempfile->unique_add((char*) table->file->ref)))) { error= 1; goto terminate_delete; @@ -713,8 +712,10 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, } } end_read_record(&info); - if (deltempfile->get(table) || table->file->ha_index_or_rnd_end() || - init_read_record(&info, thd, table, 0, &deltempfile->sort, 0, 1, false)) + if (unlikely(deltempfile->get(table)) || + unlikely(table->file->ha_index_or_rnd_end()) || + unlikely(init_read_record(&info, thd, table, 0, &deltempfile->sort, 0, + 1, false))) { error= 1; goto terminate_delete; @@ -723,8 +724,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, } THD_STAGE_INFO(thd, stage_updating); - while (!(error=info.read_record()) && !thd->killed && - ! thd->is_error()) + while (likely(!(error=info.read_record())) && likely(!thd->killed) && + likely(!thd->is_error())) { if (delete_while_scanning) delete_record= record_should_be_deleted(thd, table, select, explain, @@ -746,7 +747,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, } error= table->delete_row(); - if (!error) + if (likely(!error)) { deleted++; if (!truncate_history && table->triggers && @@ -777,7 +778,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, Don't try unlocking the row if skip_record reported an error since in this case the transaction might have been rolled back already. */ - else if (!thd->is_error()) + else if (likely(!thd->is_error())) table->file->unlock_row(); // Row failed selection, release lock on it else break; @@ -785,9 +786,9 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, terminate_delete: killed_status= thd->killed; - if (killed_status != NOT_KILLED || thd->is_error()) + if (unlikely(killed_status != NOT_KILLED || thd->is_error())) error= 1; // Aborted - if (will_batch && (loc_error= table->file->end_bulk_delete())) + if (will_batch && unlikely((loc_error= table->file->end_bulk_delete()))) { if (error != 1) table->file->print_error(loc_error,MYF(0)); @@ -826,7 +827,7 @@ cleanup: thd->transaction.all.modified_non_trans_table= TRUE; /* See similar binlogging code in sql_update.cc, for comments */ - if ((error < 0) || thd->transaction.stmt.modified_non_trans_table) + if (likely((error < 0) || thd->transaction.stmt.modified_non_trans_table)) { if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open()) { @@ -857,7 +858,7 @@ cleanup: } DBUG_ASSERT(transactional_table || !deleted || thd->transaction.stmt.modified_non_trans_table); - if (error < 0 || + if (likely(error < 0) || (thd->lex->ignore && !thd->is_error() && !thd->is_fatal_error)) { if (thd->lex->analyze_stmt) @@ -938,7 +939,7 @@ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, select_lex->leaf_tables, FALSE, DELETE_ACL, SELECT_ACL, TRUE)) DBUG_RETURN(TRUE); - if (table_list->vers_conditions) + if (table_list->vers_conditions.is_set()) { if (table_list->is_view()) { @@ -948,7 +949,8 @@ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, if (select_lex->vers_setup_conds(thd, table_list)) DBUG_RETURN(true); } - if ((wild_num && setup_wild(thd, table_list, field_list, NULL, wild_num)) || + if ((wild_num && setup_wild(thd, table_list, field_list, NULL, wild_num, + &select_lex->hidden_bit_fields)) || setup_fields(thd, Ref_ptr_array(), field_list, MARK_COLUMNS_READ, NULL, NULL, 0) || setup_conds(thd, table_list, select_lex->leaf_tables, conds) || @@ -1122,7 +1124,8 @@ multi_delete::initialize_tables(JOIN *join) Unique **tempfiles_ptr; DBUG_ENTER("initialize_tables"); - if ((thd->variables.option_bits & OPTION_SAFE_UPDATES) && error_if_full_join(join)) + if (unlikely((thd->variables.option_bits & OPTION_SAFE_UPDATES) && + error_if_full_join(join))) DBUG_RETURN(1); table_map tables_to_delete_from=0; @@ -1132,7 +1135,7 @@ multi_delete::initialize_tables(JOIN *join) TABLE_LIST *tbl= walk->correspondent_table->find_table_for_update(); tables_to_delete_from|= tbl->table->map; if (delete_while_scanning && - unique_table(thd, tbl, join->tables_list, false)) + unique_table(thd, tbl, join->tables_list, 0)) { /* If the table we are going to delete from appears @@ -1252,7 +1255,7 @@ int multi_delete::send_data(List &values) table->status|= STATUS_DELETED; error= table->delete_row(); - if (!error) + if (likely(!error)) { deleted++; if (!table->file->has_transactions()) @@ -1275,7 +1278,7 @@ int multi_delete::send_data(List &values) else { error=tempfiles[secure_counter]->unique_add((char*) table->file->ref); - if (error) + if (unlikely(error)) { error= 1; // Fatal error DBUG_RETURN(1); @@ -1371,19 +1374,19 @@ int multi_delete::do_deletes() { TABLE *table = table_being_deleted->table; int local_error; - if (tempfiles[counter]->get(table)) + if (unlikely(tempfiles[counter]->get(table))) DBUG_RETURN(1); local_error= do_table_deletes(table, &tempfiles[counter]->sort, thd->lex->ignore); - if (thd->killed && !local_error) + if (unlikely(thd->killed) && likely(!local_error)) DBUG_RETURN(1); - if (local_error == -1) // End of file - local_error = 0; + if (unlikely(local_error == -1)) // End of file + local_error= 0; - if (local_error) + if (unlikely(local_error)) DBUG_RETURN(local_error); } DBUG_RETURN(0); @@ -1413,27 +1416,23 @@ int multi_delete::do_table_deletes(TABLE *table, SORT_INFO *sort_info, ha_rows last_deleted= deleted; DBUG_ENTER("do_deletes_for_table"); - if (init_read_record(&info, thd, table, NULL, sort_info, 0, 1, FALSE)) + if (unlikely(init_read_record(&info, thd, table, NULL, sort_info, 0, 1, + FALSE))) DBUG_RETURN(1); - /* - Ignore any rows not found in reference tables as they may already have - been deleted by foreign key handling - */ - info.ignore_not_found_rows= 1; bool will_batch= !table->file->start_bulk_delete(); - while (!(local_error= info.read_record()) && !thd->killed) + while (likely(!(local_error= info.read_record())) && likely(!thd->killed)) { if (table->triggers && - table->triggers->process_triggers(thd, TRG_EVENT_DELETE, - TRG_ACTION_BEFORE, FALSE)) + unlikely(table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_BEFORE, FALSE))) { local_error= 1; break; } local_error= table->delete_row(); - if (local_error && !ignore) + if (unlikely(local_error) && !ignore) { table->file->print_error(local_error, MYF(0)); break; @@ -1444,7 +1443,7 @@ int multi_delete::do_table_deletes(TABLE *table, SORT_INFO *sort_info, during ha_delete_row. Also, don't execute the AFTER trigger if the row operation failed. */ - if (!local_error) + if (unlikely(!local_error)) { deleted++; if (table->triggers && @@ -1459,7 +1458,7 @@ int multi_delete::do_table_deletes(TABLE *table, SORT_INFO *sort_info, if (will_batch) { int tmp_error= table->file->end_bulk_delete(); - if (tmp_error && !local_error) + if (unlikely(tmp_error) && !local_error) { local_error= tmp_error; table->file->print_error(local_error, MYF(0)); @@ -1507,28 +1506,30 @@ bool multi_delete::send_eof() { query_cache_invalidate3(thd, delete_tables, 1); } - if ((local_error == 0) || thd->transaction.stmt.modified_non_trans_table) + if (likely((local_error == 0) || + thd->transaction.stmt.modified_non_trans_table)) { if(WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open()) { int errcode= 0; - if (local_error == 0) + if (likely(local_error == 0)) thd->clear_error(); else errcode= query_error_code(thd, killed_status == NOT_KILLED); - if (thd->binlog_query(THD::ROW_QUERY_TYPE, - thd->query(), thd->query_length(), - transactional_tables, FALSE, FALSE, errcode) && + if (unlikely(thd->binlog_query(THD::ROW_QUERY_TYPE, + thd->query(), thd->query_length(), + transactional_tables, FALSE, FALSE, + errcode)) && !normal_tables) { local_error=1; // Log write failed: roll back the SQL statement } } } - if (local_error != 0) + if (unlikely(local_error != 0)) error_handled= TRUE; // to force early leave from ::abort_result_set() - if (!local_error && !thd->lex->analyze_stmt) + if (likely(!local_error && !thd->lex->analyze_stmt)) { ::my_ok(thd, deleted); } diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 2df740b6811..944fc837572 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -674,7 +674,8 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) table reference from a subquery for this. */ DBUG_ASSERT(derived->with->get_sq_rec_ref()); - if (mysql_derived_prepare(lex->thd, lex, derived->with->get_sq_rec_ref())) + if (unlikely(mysql_derived_prepare(lex->thd, lex, + derived->with->get_sq_rec_ref()))) DBUG_RETURN(TRUE); } @@ -698,7 +699,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) &derived->alias, FALSE, FALSE, FALSE, 0); thd->create_tmp_table_for_derived= FALSE; - if (!res && !derived->table) + if (likely(!res) && !derived->table) { derived->derived_result->set_unit(unit); derived->table= derived->derived_result->table; @@ -747,8 +748,6 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) } } - unit->derived= derived; - /* Above cascade call of prepare is important for PS protocol, but after it is called we can check if we really need prepare for this derived @@ -766,7 +765,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_DERIVED; // st_select_lex_unit::prepare correctly work for single select - if ((res= unit->prepare(thd, derived->derived_result, 0))) + if ((res= unit->prepare(derived, derived->derived_result, 0))) goto exit; if (derived->with && (res= derived->with->rename_columns_of_derived_unit(thd, unit))) diff --git a/sql/sql_do.cc b/sql/sql_do.cc index 20a7aa75590..2a4e43ab78a 100644 --- a/sql/sql_do.cc +++ b/sql/sql_do.cc @@ -35,7 +35,7 @@ bool mysql_do(THD *thd, List &values) (void) value->is_null(); free_underlaid_joins(thd, &thd->lex->select_lex); - if (thd->is_error()) + if (unlikely(thd->is_error())) { /* Rollback the effect of the statement, since next instruction diff --git a/sql/sql_error.cc b/sql/sql_error.cc index 67440aeed33..d6f5b99eef6 100644 --- a/sql/sql_error.cc +++ b/sql/sql_error.cc @@ -337,7 +337,7 @@ Diagnostics_area::set_ok_status(ulonglong affected_rows, In production, refuse to overwrite an error or a custom response with an OK packet. */ - if (is_error() || is_disabled()) + if (unlikely(is_error() || is_disabled())) return; /* When running a bulk operation, m_status will be DA_OK for the first @@ -377,7 +377,7 @@ Diagnostics_area::set_eof_status(THD *thd) In production, refuse to overwrite an error or a custom response with an EOF packet. */ - if (is_error() || is_disabled()) + if (unlikely(is_error() || is_disabled())) return; /* diff --git a/sql/sql_expression_cache.cc b/sql/sql_expression_cache.cc index 15c6cc57efb..3b6b5993073 100644 --- a/sql/sql_expression_cache.cc +++ b/sql/sql_expression_cache.cc @@ -270,10 +270,11 @@ my_bool Expression_cache_tmptable::put_value(Item *value) *(items.head_ref())= value; fill_record(table_thd, cache_table, cache_table->field, items, TRUE, TRUE); - if (table_thd->is_error()) + if (unlikely(table_thd->is_error())) goto err;; - if ((error= cache_table->file->ha_write_tmp_row(cache_table->record[0]))) + if (unlikely((error= + cache_table->file->ha_write_tmp_row(cache_table->record[0])))) { /* create_myisam_from_heap will generate error if needed */ if (cache_table->file->is_fatal_error(error, HA_CHECK_DUP)) diff --git a/sql/sql_get_diagnostics.cc b/sql/sql_get_diagnostics.cc index e7ab6cc3c75..6a3cec79160 100644 --- a/sql/sql_get_diagnostics.cc +++ b/sql/sql_get_diagnostics.cc @@ -69,7 +69,7 @@ Sql_cmd_get_diagnostics::execute(THD *thd) const char *sqlstate= new_stmt_da.get_sqlstate(); /* In case of a fatal error, set it into the original DA.*/ - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) { save_stmt_da->set_error_status(sql_errno, message, sqlstate, NULL); DBUG_RETURN(true); @@ -81,7 +81,7 @@ Sql_cmd_get_diagnostics::execute(THD *thd) message); /* Appending might have failed. */ - if (! (rv= thd->is_error())) + if (unlikely(!(rv= thd->is_error()))) thd->get_stmt_da()->set_ok_status(0, 0, NULL); DBUG_RETURN(rv); diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index ddc9c4a99d7..faf9fcbe906 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -195,13 +195,14 @@ static void mysql_ha_close_childs(THD *thd, TABLE_LIST *current_table_list, static void mysql_ha_close_table(SQL_HANDLER *handler) { + DBUG_ENTER("mysql_ha_close_table"); THD *thd= handler->thd; TABLE *table= handler->table; TABLE_LIST *current_table_list= NULL, *next_global; /* check if table was already closed */ if (!table) - return; + DBUG_VOID_RETURN; if ((next_global= table->file->get_next_global_for_child())) current_table_list= next_global->parent_l; @@ -232,6 +233,7 @@ static void mysql_ha_close_table(SQL_HANDLER *handler) } my_free(handler->lock); handler->init(); + DBUG_VOID_RETURN; } /* @@ -345,7 +347,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen) error= (thd->open_temporary_tables(tables) || open_tables(thd, &tables, &counter, 0)); - if (error) + if (unlikely(error)) goto err; table= tables->table; @@ -371,7 +373,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen) /* The ticket returned is within a savepoint. Make a copy. */ error= thd->mdl_context.clone_ticket(&table_list->mdl_request); table_list->table->mdl_ticket= table_list->mdl_request.ticket; - if (error) + if (unlikely(error)) goto err; } } @@ -426,8 +428,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen) thd->set_n_backup_active_arena(&sql_handler->arena, &backup_arena); error= table->fill_item_list(&sql_handler->fields); thd->restore_active_arena(&sql_handler->arena, &backup_arena); - - if (error) + if (unlikely(error)) goto err; /* Always read all columns */ @@ -619,7 +620,7 @@ static SQL_HANDLER *mysql_ha_find_handler(THD *thd, const LEX_CSTRING *name) static bool mysql_ha_fix_cond_and_key(SQL_HANDLER *handler, enum enum_ha_read_modes mode, const char *keyname, - List *key_expr, + List *key_expr, enum ha_rkey_function ha_rkey_mode, Item *cond, bool in_prepare) { THD *thd= handler->thd; @@ -661,6 +662,18 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler, Item *item; key_part_map keypart_map; uint key_len; + const KEY *c_key= table->s->key_info + handler->keyno; + + if ((c_key->flags & HA_SPATIAL) || + c_key->algorithm == HA_KEY_ALG_FULLTEXT || + (ha_rkey_mode != HA_READ_KEY_EXACT && + (table->file->index_flags(handler->keyno, 0, TRUE) & + (HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE)) == 0)) + { + my_error(ER_KEY_DOESNT_SUPPORT, MYF(0), + table->file->index_type(handler->keyno), keyinfo->name); + return 1; + } if (key_expr->elements > keyinfo->user_defined_key_parts) { @@ -668,6 +681,16 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler, keyinfo->user_defined_key_parts); return 1; } + + if (key_expr->elements < keyinfo->user_defined_key_parts && + (table->file->index_flags(handler->keyno, 0, TRUE) & + HA_ONLY_WHOLE_INDEX)) + { + my_error(ER_KEY_DOESNT_SUPPORT, MYF(0), + table->file->index_type(handler->keyno), keyinfo->name); + return 1; + } + for (keypart_map= key_len=0 ; (item=it_ke++) ; key_part++) { my_bitmap_map *old_map; @@ -838,11 +861,12 @@ retry: goto retry; } - if (lock_error) + if (unlikely(lock_error)) goto err0; // mysql_lock_tables() printed error message already } - if (mysql_ha_fix_cond_and_key(handler, mode, keyname, key_expr, cond, 0)) + if (mysql_ha_fix_cond_and_key(handler, mode, keyname, key_expr, + ha_rkey_mode, cond, 0)) goto err; mode= handler->mode; keyno= handler->keyno; @@ -880,14 +904,14 @@ retry: case RFIRST: if (keyname) { - if (!(error= table->file->ha_index_or_rnd_end()) && - !(error= table->file->ha_index_init(keyno, 1))) + if (likely(!(error= table->file->ha_index_or_rnd_end())) && + likely(!(error= table->file->ha_index_init(keyno, 1)))) error= table->file->ha_index_first(table->record[0]); } else { - if (!(error= table->file->ha_index_or_rnd_end()) && - !(error= table->file->ha_rnd_init(1))) + if (likely(!(error= table->file->ha_index_or_rnd_end())) && + likely(!(error= table->file->ha_rnd_init(1)))) error= table->file->ha_rnd_next(table->record[0]); } mode= RNEXT; @@ -906,8 +930,8 @@ retry: /* else fall through */ case RLAST: DBUG_ASSERT(keyname != 0); - if (!(error= table->file->ha_index_or_rnd_end()) && - !(error= table->file->ha_index_init(keyno, 1))) + if (likely(!(error= table->file->ha_index_or_rnd_end())) && + likely(!(error= table->file->ha_index_init(keyno, 1)))) error= table->file->ha_index_last(table->record[0]); mode=RPREV; break; @@ -921,13 +945,13 @@ retry: { DBUG_ASSERT(keyname != 0); - if (!(key= (uchar*) thd->calloc(ALIGN_SIZE(handler->key_len)))) + if (unlikely(!(key= (uchar*) thd->calloc(ALIGN_SIZE(handler->key_len))))) goto err; - if ((error= table->file->ha_index_or_rnd_end())) + if (unlikely((error= table->file->ha_index_or_rnd_end()))) break; key_copy(key, table->record[0], table->key_info + keyno, handler->key_len); - if (!(error= table->file->ha_index_init(keyno, 1))) + if (unlikely(!(error= table->file->ha_index_init(keyno, 1)))) error= table->file->ha_index_read_map(table->record[0], key, handler->keypart_map, ha_rkey_mode); @@ -940,10 +964,8 @@ retry: goto err; } - if (error) + if (unlikely(error)) { - if (error == HA_ERR_RECORD_DELETED) - continue; if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) { /* Don't give error in the log file for some expected problems */ @@ -1003,14 +1025,16 @@ err0: SQL_HANDLER *mysql_ha_read_prepare(THD *thd, TABLE_LIST *tables, enum enum_ha_read_modes mode, const char *keyname, - List *key_expr, Item *cond) + List *key_expr, enum ha_rkey_function ha_rkey_mode, + Item *cond) { SQL_HANDLER *handler; DBUG_ENTER("mysql_ha_read_prepare"); if (!(handler= mysql_ha_find_handler(thd, &tables->alias))) DBUG_RETURN(0); tables->table= handler->table; // This is used by fix_fields - if (mysql_ha_fix_cond_and_key(handler, mode, keyname, key_expr, cond, 1)) + if (mysql_ha_fix_cond_and_key(handler, mode, keyname, key_expr, + ha_rkey_mode, cond, 1)) DBUG_RETURN(0); DBUG_RETURN(handler); } diff --git a/sql/sql_handler.h b/sql/sql_handler.h index ffefec91fad..4c16f7e5c57 100644 --- a/sql/sql_handler.h +++ b/sql/sql_handler.h @@ -80,5 +80,6 @@ void mysql_ha_rm_temporary_tables(THD *thd); SQL_HANDLER *mysql_ha_read_prepare(THD *thd, TABLE_LIST *tables, enum enum_ha_read_modes mode, const char *keyname, - List *key_expr, Item *cond); + List *key_expr, enum ha_rkey_function ha_rkey_mode, + Item *cond); #endif diff --git a/sql/sql_help.cc b/sql/sql_help.cc index da38a2caf94..085e54dbc90 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -618,8 +618,9 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, table->covering_keys.clear_all(); SQL_SELECT *res= make_select(table, 0, 0, cond, 0, 0, error); - if (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR)) || - (res && res->quick && res->quick->reset())) + if (unlikely(*error) || + (likely(res) && unlikely(res->check_quick(thd, 0, HA_POS_ERROR))) || + (likely(res) && res->quick && unlikely(res->quick->reset()))) { delete res; res=0; @@ -658,7 +659,7 @@ SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, size_t mlen, pfname->charset()), new (mem_root) Item_string_ascii(thd, "\\"), FALSE); - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) return 0; // OOM return prepare_simple_select(thd, cond, table, error); } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 109f4124ce3..a1d7f0f907c 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -89,7 +89,7 @@ static int write_delayed(THD *thd, TABLE *table, enum_duplicates duplic, LEX_STRING query, bool ignore, bool log_on); static void end_delayed_insert(THD *thd); pthread_handler_t handle_delayed_insert(void *arg); -static void unlink_blobs(register TABLE *table); +static void unlink_blobs(TABLE *table); #endif static bool check_view_insertability(THD *thd, TABLE_LIST *view); @@ -749,12 +749,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, table_list->table_name.str); DBUG_RETURN(TRUE); } - /* - mark the table_list as a target for insert, to skip the DT/view prepare phase - for correct access rights checks - TODO: remove this hack - */ - table_list->skip_prepare_derived= TRUE; if (table_list->lock_type == TL_WRITE_DELAYED) { @@ -963,8 +957,9 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, */ restore_record(table,s->default_values); // Get empty record table->reset_default_fields(); - if (fill_record_n_invoke_before_triggers(thd, table, fields, *values, 0, - TRG_EVENT_INSERT)) + if (unlikely(fill_record_n_invoke_before_triggers(thd, table, fields, + *values, 0, + TRG_EVENT_INSERT))) { if (values_list.elements != 1 && ! thd->is_error()) { @@ -987,7 +982,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, INSERT INTO t1 VALUES (values) */ if (thd->lex->used_tables || // Column used in values() - table->s->visible_fields != table->s->fields) + table->s->visible_fields != table->s->fields) restore_record(table,s->default_values); // Get empty record else { @@ -1008,9 +1003,11 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, } } table->reset_default_fields(); - if (fill_record_n_invoke_before_triggers(thd, table, - table->field_to_fill(), - *values, 0, TRG_EVENT_INSERT)) + if (unlikely(fill_record_n_invoke_before_triggers(thd, table, + table-> + field_to_fill(), + *values, 0, + TRG_EVENT_INSERT))) { if (values_list.elements != 1 && ! thd->is_error()) { @@ -1023,16 +1020,16 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, } /* - with triggers a field can get a value *conditionally*, so we have to repeat - has_no_default_value() check for every row + with triggers a field can get a value *conditionally*, so we have to + repeat has_no_default_value() check for every row */ if (table->triggers && table->triggers->has_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE)) { for (Field **f=table->field ; *f ; f++) { - if (!(*f)->has_explicit_value() && - has_no_default_value(thd, *f, table_list)) + if (unlikely(!(*f)->has_explicit_value() && + has_no_default_value(thd, *f, table_list))) { error= 1; goto values_loop_end; @@ -1064,7 +1061,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, else #endif error=write_record(thd, table ,&info); - if (error) + if (unlikely(error)) break; thd->get_stmt_da()->inc_current_row_for_warning(); } @@ -1081,9 +1078,9 @@ values_loop_end: user */ #ifndef EMBEDDED_LIBRARY - if (lock_type == TL_WRITE_DELAYED) + if (unlikely(lock_type == TL_WRITE_DELAYED)) { - if (!error) + if (likely(!error)) { info.copied=values_list.elements; end_delayed_insert(thd); @@ -1097,7 +1094,8 @@ values_loop_end: auto_inc values from the delayed_insert thread as they share TABLE. */ table->file->ha_release_auto_increment(); - if (using_bulk_insert && table->file->ha_end_bulk_insert() && !error) + if (using_bulk_insert && unlikely(table->file->ha_end_bulk_insert()) && + !error) { table->file->print_error(my_errno,MYF(0)); error=1; @@ -1107,7 +1105,7 @@ values_loop_end: transactional_table= table->file->has_transactions(); - if ((changed= (info.copied || info.deleted || info.updated))) + if (likely(changed= (info.copied || info.deleted || info.updated))) { /* Invalidate the table in the query cache if something changed. @@ -1212,7 +1210,7 @@ values_loop_end: (!table->triggers || !table->triggers->has_delete_triggers())) table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); - if (error) + if (unlikely(error)) goto abort; if (thd->lex->analyze_stmt) { @@ -1579,7 +1577,8 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, { Item *fake_conds= 0; TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, table_list, table_list->next_global, 1))) + if ((duplicate= unique_table(thd, table_list, table_list->next_global, + CHECK_DUP_ALLOW_DIFFERENT_ALIAS))) { update_non_unique_table_error(table_list, "INSERT", duplicate); DBUG_RETURN(TRUE); @@ -1692,7 +1691,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) if (info->handle_duplicates == DUP_REPLACE || info->handle_duplicates == DUP_UPDATE) { - while ((error=table->file->ha_write_row(table->record[0]))) + while (unlikely(error=table->file->ha_write_row(table->record[0]))) { uint key_nr; /* @@ -1725,7 +1724,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) } goto err; } - if ((int) (key_nr = table->file->get_dup_key(error)) < 0) + if (unlikely((int) (key_nr = table->file->get_dup_key(error)) < 0)) { error= HA_ERR_FOUND_DUPP_KEY; /* Database can't find key */ goto err; @@ -1836,8 +1835,8 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) info->touched++; if (different_records) { - if ((error=table->file->ha_update_row(table->record[1], - table->record[0])) && + if (unlikely(error=table->file->ha_update_row(table->record[1], + table->record[0])) && error != HA_ERR_RECORD_IS_THE_SAME) { if (info->ignore && @@ -1935,11 +1934,11 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) bitmap_set_bit(table->write_set, table->vers_start_field()->field_index); table->vers_start_field()->store(0, false); } - if ((error=table->file->ha_update_row(table->record[1], - table->record[0])) && + if (unlikely(error= table->file->ha_update_row(table->record[1], + table->record[0])) && error != HA_ERR_RECORD_IS_THE_SAME) goto err; - if (error != HA_ERR_RECORD_IS_THE_SAME) + if (likely(!error)) { info->deleted++; if (table->versioned(VERS_TIMESTAMP)) @@ -1947,12 +1946,12 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) store_record(table, record[2]); error= vers_insert_history_row(table); restore_record(table, record[2]); - if (error) + if (unlikely(error)) goto err; } } else - error= 0; + error= 0; // error was HA_ERR_RECORD_IS_THE_SAME thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row); /* Since we pretend that we have done insert we should call @@ -1979,7 +1978,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) table->record[0]); restore_record(table,insert_values); } - if (error) + if (unlikely(error)) goto err; if (!table->versioned(VERS_TIMESTAMP)) info->deleted++; @@ -2019,7 +2018,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) table->write_set != save_write_set) table->column_bitmaps_set(save_read_set, save_write_set); } - else if ((error=table->file->ha_write_row(table->record[0]))) + else if (unlikely((error=table->file->ha_write_row(table->record[0])))) { DEBUG_SYNC(thd, "write_row_noreplace"); if (!info->ignore || @@ -2606,11 +2605,12 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) share->default_fields) { bool error_reported= FALSE; - if (!(copy->def_vcol_set= (MY_BITMAP*) alloc_root(client_thd->mem_root, - sizeof(MY_BITMAP)))) + if (unlikely(!(copy->def_vcol_set= + (MY_BITMAP*) alloc_root(client_thd->mem_root, + sizeof(MY_BITMAP))))) goto error; - - if (parse_vcol_defs(client_thd, client_thd->mem_root, copy, &error_reported)) + if (unlikely(parse_vcol_defs(client_thd, client_thd->mem_root, copy, + &error_reported))) goto error; } @@ -3198,7 +3198,7 @@ pthread_handler_t handle_delayed_insert(void *arg) /* Remove all pointers to data for blob fields so that original table doesn't try to free them */ -static void unlink_blobs(register TABLE *table) +static void unlink_blobs(TABLE *table) { for (Field **ptr=table->field ; *ptr ; ptr++) { @@ -3209,7 +3209,7 @@ static void unlink_blobs(register TABLE *table) /* Free blobs stored in current row */ -static void free_delayed_insert_blobs(register TABLE *table) +static void free_delayed_insert_blobs(TABLE *table) { for (Field **ptr=table->field ; *ptr ; ptr++) { @@ -3221,7 +3221,7 @@ static void free_delayed_insert_blobs(register TABLE *table) /* set value field for blobs to point to data in record */ -static void set_delayed_insert_blobs(register TABLE *table) +static void set_delayed_insert_blobs(TABLE *table) { for (Field **ptr=table->field ; *ptr ; ptr++) { @@ -3362,7 +3362,7 @@ bool Delayed_insert::handle_inserts(void) thd.clear_error(); // reset error for binlog tmp_error= 0; - if (table->vfield) + if (unlikely(table->vfield)) { /* Virtual fields where not calculated by caller as the temporary @@ -3373,7 +3373,7 @@ bool Delayed_insert::handle_inserts(void) VCOL_UPDATE_FOR_WRITE); } - if (tmp_error || write_record(&thd, table, &info)) + if (unlikely(tmp_error) || unlikely(write_record(&thd, table, &info))) { info.error_count++; // Ignore errors thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status); @@ -3420,7 +3420,7 @@ bool Delayed_insert::handle_inserts(void) mysql_cond_broadcast(&cond_client); // If waiting clients THD_STAGE_INFO(&thd, stage_reschedule); mysql_mutex_unlock(&mutex); - if ((error=table->file->extra(HA_EXTRA_NO_CACHE))) + if (unlikely((error=table->file->extra(HA_EXTRA_NO_CACHE)))) { /* This should never happen */ table->file->print_error(error,MYF(0)); @@ -3472,7 +3472,7 @@ bool Delayed_insert::handle_inserts(void) thd.binlog_flush_pending_rows_event(TRUE, has_trans)) goto err; - if ((error=table->file->extra(HA_EXTRA_NO_CACHE))) + if (unlikely((error=table->file->extra(HA_EXTRA_NO_CACHE)))) { // This shouldn't happen table->file->print_error(error,MYF(0)); sql_print_error("%s", thd.get_stmt_da()->message()); @@ -3781,7 +3781,7 @@ select_insert::prepare(List &values, SELECT_LEX_UNIT *u) 0 OK */ -int select_insert::prepare2(void) +int select_insert::prepare2(JOIN *) { DBUG_ENTER("select_insert::prepare2"); if (thd->lex->current_select->options & OPTION_BUFFER_RESULT && @@ -3825,15 +3825,16 @@ int select_insert::send_data(List &values) unit->offset_limit_cnt--; DBUG_RETURN(0); } - if (thd->killed == ABORT_QUERY) + if (unlikely(thd->killed == ABORT_QUERY)) DBUG_RETURN(0); thd->count_cuted_fields= CHECK_FIELD_WARN; // Calculate cuted fields store_values(values); - if (table->default_field && table->update_default_fields(0, info.ignore)) + if (table->default_field && + unlikely(table->update_default_fields(0, info.ignore))) DBUG_RETURN(1); thd->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL; - if (thd->is_error()) + if (unlikely(thd->is_error())) { table->auto_increment_field_not_null= FALSE; DBUG_RETURN(1); @@ -3854,7 +3855,7 @@ int select_insert::send_data(List &values) table->vers_write= table->versioned(); table->auto_increment_field_not_null= FALSE; - if (!error) + if (likely(!error)) { if (table->triggers || info.handle_duplicates == DUP_UPDATE) { @@ -3913,18 +3914,18 @@ bool select_insert::prepare_eof() DBUG_PRINT("enter", ("trans_table=%d, table_type='%s'", trans_table, table->file->table_type())); - error = IF_WSREP((thd->wsrep_conflict_state == MUST_ABORT || - thd->wsrep_conflict_state == CERT_FAILURE) ? -1 :, ) - (thd->locked_tables_mode <= LTM_LOCK_TABLES ? - table->file->ha_end_bulk_insert() : 0); + error= (IF_WSREP((thd->wsrep_conflict_state == MUST_ABORT || + thd->wsrep_conflict_state == CERT_FAILURE) ? -1 :, ) + (thd->locked_tables_mode <= LTM_LOCK_TABLES ? + table->file->ha_end_bulk_insert() : 0)); - if (!error && thd->is_error()) + if (likely(!error) && unlikely(thd->is_error())) error= thd->get_stmt_da()->sql_errno(); table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); - if ((changed= (info.copied || info.deleted || info.updated))) + if (likely((changed= (info.copied || info.deleted || info.updated)))) { /* We must invalidate the table in the query cache before binlog writing @@ -3948,10 +3949,10 @@ bool select_insert::prepare_eof() ha_autocommit_or_rollback() is issued below. */ if ((WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open()) && - (!error || thd->transaction.stmt.modified_non_trans_table)) + (likely(!error) || thd->transaction.stmt.modified_non_trans_table)) { int errcode= 0; - if (!error) + if (likely(!error)) thd->clear_error(); else errcode= query_error_code(thd, killed_status == NOT_KILLED); @@ -3965,7 +3966,7 @@ bool select_insert::prepare_eof() } table->file->ha_release_auto_increment(); - if (error) + if (unlikely(error)) { table->file->print_error(error,MYF(0)); DBUG_RETURN(true); @@ -4277,9 +4278,9 @@ TABLE *select_create::create_table_from_items(THD *thd, else create_table->table= 0; // Create failed - if (!(table= create_table->table)) + if (unlikely(!(table= create_table->table))) { - if (!thd->is_error()) // CREATE ... IF NOT EXISTS + if (likely(!thd->is_error())) // CREATE ... IF NOT EXISTS my_ok(thd); // succeed, but did nothing DBUG_RETURN(NULL); } @@ -4293,8 +4294,8 @@ TABLE *select_create::create_table_from_items(THD *thd, since it won't wait for the table lock (we have exclusive metadata lock on the table) and thus can't get aborted. */ - if (! ((*lock)= mysql_lock_tables(thd, &table, 1, 0)) || - hooks->postlock(&table, 1)) + if (unlikely(!((*lock)= mysql_lock_tables(thd, &table, 1, 0)) || + hooks->postlock(&table, 1))) { /* purecov: begin tested */ /* @@ -4364,14 +4365,15 @@ select_create::prepare(List &_values, SELECT_LEX_UNIT *u) create_table->next_global= save_next_global; - if (error) + if (unlikely(error)) return error; TABLE const *const table = *tables; if (thd->is_current_stmt_binlog_format_row() && !table->s->tmp_table) { - if (int error= ptr->binlog_show_create_table(tables, count)) + int error; + if (unlikely((error= ptr->binlog_show_create_table(tables, count)))) return error; } return 0; @@ -4579,17 +4581,17 @@ bool select_create::send_eof() #ifdef WITH_WSREP if (WSREP_ON) { - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); if (thd->wsrep_conflict_state != NO_CONFLICT) { WSREP_DEBUG("select_create commit failed, thd: %lld err: %d %s", (longlong) thd->thread_id, thd->wsrep_conflict_state, thd->query()); - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); abort_result_set(); DBUG_RETURN(true); } - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); } #endif /* WITH_WSREP */ } diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc index fb8660aa79d..53c5e992ca9 100644 --- a/sql/sql_join_cache.cc +++ b/sql/sql_join_cache.cc @@ -2248,7 +2248,7 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last) goto finish2; /* Prepare to retrieve all records of the joined table */ - if ((error= join_tab_scan->open())) + if (unlikely((error= join_tab_scan->open()))) { /* TODO: if we get here, we will assert in net_send_statement(). Add test @@ -2259,10 +2259,9 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last) while (!(error= join_tab_scan->next())) { - if (join->thd->check_killed()) + if (unlikely(join->thd->check_killed())) { /* The user has aborted the execution of the query */ - join->thd->send_kill_message(); rc= NESTED_LOOP_KILLED; goto finish; } @@ -2411,7 +2410,7 @@ enum_nested_loop_state JOIN_CACHE::generate_full_extensions(uchar *rec_ptr) DBUG_RETURN(rc); } } - else if (join->thd->is_error()) + else if (unlikely(join->thd->is_error())) rc= NESTED_LOOP_ERROR; DBUG_RETURN(rc); } @@ -2533,10 +2532,9 @@ enum_nested_loop_state JOIN_CACHE::join_null_complements(bool skip_last) for ( ; cnt; cnt--) { - if (join->thd->check_killed()) + if (unlikely(join->thd->check_killed())) { /* The user has aborted the execution of the query */ - join->thd->send_kill_message(); rc= NESTED_LOOP_KILLED; goto finish; } @@ -3392,7 +3390,7 @@ int JOIN_TAB_SCAN::next() while (!err && select && (skip_rc= select->skip_record(thd)) <= 0) { - if (thd->check_killed() || skip_rc < 0) + if (unlikely(thd->check_killed()) || skip_rc < 0) return 1; /* Move to the next record if the last retrieved record does not diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 93810d2041c..779b6d9c3c1 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -31,6 +31,7 @@ #include "sql_select.h" #include "sql_cte.h" #include "sql_signal.h" +#include "sql_partition.h" void LEX::parse_error(uint err_number) @@ -39,9 +40,6 @@ void LEX::parse_error(uint err_number) } -static int lex_one_token(YYSTYPE *yylval, THD *thd); - - /** LEX_STRING constant for null-string to be used in parser and other places. */ @@ -125,7 +123,7 @@ const char * index_hint_type_name[] = inline int lex_casecmp(const char *s, const char *t, uint len) { while (len-- != 0 && - to_upper_lex[(uchar) *s++] == to_upper_lex[(uchar) *t++]) ; + to_upper_lex[(uchar) *s++] == to_upper_lex[(uchar) *t++]) ; return (int) len+1; } @@ -146,7 +144,7 @@ void lex_init(void) void lex_free(void) -{ // Call this when daemon ends +{ // Call this when daemon ends DBUG_ENTER("lex_free"); DBUG_VOID_RETURN; } @@ -189,14 +187,14 @@ init_lex_with_single_table(THD *thd, TABLE *table, LEX *lex) thd->lex= lex; lex_start(thd); context->init(); - if ((!(table_ident= new Table_ident(thd, - &table->s->db, - &table->s->table_name, - TRUE))) || - (!(table_list= select_lex->add_table_to_list(thd, - table_ident, - NULL, - 0)))) + if (unlikely((!(table_ident= new Table_ident(thd, + &table->s->db, + &table->s->table_name, + TRUE)))) || + (unlikely(!(table_list= select_lex->add_table_to_list(thd, + table_ident, + NULL, + 0))))) return TRUE; context->resolve_in_table_list_only(table_list); lex->use_only_table_context= TRUE; @@ -251,8 +249,8 @@ st_parsing_options::reset() */ bool Lex_input_stream::init(THD *thd, - char* buff, - size_t length) + char* buff, + size_t length) { DBUG_EXECUTE_IF("bug42064_simulate_oom", DBUG_SET("+d,simulate_out_of_memory");); @@ -284,7 +282,6 @@ void Lex_input_stream::reset(char *buffer, size_t length) { yylineno= 1; - yylval= NULL; lookahead_token= -1; lookahead_yylval= NULL; m_ptr= buffer; @@ -420,32 +417,18 @@ void Lex_input_stream::body_utf8_append(const char *ptr) operation. */ -void Lex_input_stream::body_utf8_append_ident(THD *thd, - const LEX_CSTRING *txt, - const char *end_ptr) +void +Lex_input_stream::body_utf8_append_ident(THD *thd, + const Lex_string_with_metadata_st *txt, + const char *end_ptr) { if (!m_cpp_utf8_processed_ptr) return; LEX_CSTRING utf_txt; - CHARSET_INFO *txt_cs= thd->charset(); - - if (!my_charset_same(txt_cs, &my_charset_utf8_general_ci)) - { - LEX_STRING to; - thd->convert_string(&to, - &my_charset_utf8_general_ci, - txt->str, (uint) txt->length, - txt_cs); - utf_txt.str= to.str; - utf_txt.length= to.length; - - } - else - utf_txt= *txt; + thd->make_text_string_sys(&utf_txt, txt); // QQ: check return value? /* NOTE: utf_txt.length is in bytes, not in symbols. */ - memcpy(m_body_utf8_ptr, utf_txt.str, utf_txt.length); m_body_utf8_ptr += utf_txt.length; *m_body_utf8_ptr= 0; @@ -669,11 +652,11 @@ void lex_start(THD *thd) void LEX::start(THD *thd_arg) { DBUG_ENTER("LEX::start"); - DBUG_PRINT("info", ("This: %p thd_arg->lex: %p thd_arg->stmt_lex: %p", - this, thd_arg->lex, thd_arg->stmt_lex)); + DBUG_PRINT("info", ("This: %p thd_arg->lex: %p", this, thd_arg->lex)); thd= unit.thd= thd_arg; - + stmt_lex= this; // default, should be rewritten for VIEWs And CTEs + DBUG_ASSERT(!explain); context_stack.empty(); @@ -842,23 +825,27 @@ Yacc_state::~Yacc_state() } } -static int find_keyword(Lex_input_stream *lip, uint len, bool function) +int Lex_input_stream::find_keyword(Lex_ident_cli_st *kwd, + uint len, bool function) { - const char *tok= lip->get_tok_start(); + const char *tok= m_tok_start; SYMBOL *symbol= get_hash_symbol(tok, len, function); if (symbol) { - lip->yylval->symbol.symbol=symbol; - lip->yylval->symbol.str= (char*) tok; - lip->yylval->symbol.length=len; + kwd->set_keyword(tok, len); + DBUG_ASSERT(tok >= get_buf()); + DBUG_ASSERT(tok < get_end_of_query()); if ((symbol->tok == NOT_SYM) && - (lip->m_thd->variables.sql_mode & MODE_HIGH_NOT_PRECEDENCE)) + (m_thd->variables.sql_mode & MODE_HIGH_NOT_PRECEDENCE)) return NOT2_SYM; - if ((symbol->tok == OR_OR_SYM) && - !(lip->m_thd->variables.sql_mode & MODE_PIPES_AS_CONCAT)) - return OR2_SYM; + if ((symbol->tok == OR2_SYM) && + (m_thd->variables.sql_mode & MODE_PIPES_AS_CONCAT)) + { + return (m_thd->variables.sql_mode & MODE_ORACLE) ? + ORACLE_CONCAT_SYM : MYSQL_CONCAT_SYM; + } return symbol->tok; } @@ -955,54 +942,19 @@ bool is_native_function_with_warn(THD *thd, const LEX_CSTRING *name) /* make a copy of token before ptr and set yytoklen */ -static LEX_CSTRING get_token(Lex_input_stream *lip, uint skip, uint length) +LEX_CSTRING Lex_input_stream::get_token(uint skip, uint length) { LEX_CSTRING tmp; - lip->yyUnget(); // ptr points now after last token char + yyUnget(); // ptr points now after last token char tmp.length= length; - tmp.str= lip->m_thd->strmake(lip->get_tok_start() + skip, tmp.length); + tmp.str= m_thd->strmake(m_tok_start + skip, tmp.length); - lip->m_cpp_text_start= lip->get_cpp_tok_start() + skip; - lip->m_cpp_text_end= lip->m_cpp_text_start + tmp.length; + m_cpp_text_start= m_cpp_tok_start + skip; + m_cpp_text_end= m_cpp_text_start + tmp.length; return tmp; } -/* - todo: - There are no dangerous charsets in mysql for function - get_quoted_token yet. But it should be fixed in the - future to operate multichar strings (like ucs2) -*/ - -static LEX_CSTRING get_quoted_token(Lex_input_stream *lip, - uint skip, - uint length, char quote) -{ - LEX_CSTRING tmp; - const char *from, *end; - char *to; - lip->yyUnget(); // ptr points now after last token char - tmp.length= length; - tmp.str= to= (char*) lip->m_thd->alloc(tmp.length+1); - from= lip->get_tok_start() + skip; - end= to+length; - - lip->m_cpp_text_start= lip->get_cpp_tok_start() + skip; - lip->m_cpp_text_end= lip->m_cpp_text_start + length; - - for ( ; to != end; ) - { - if ((*to++= *from++) == quote) - { - from++; // Skip double quotes - lip->m_cpp_text_start++; - } - } - *to= 0; // End null for safety - return tmp; -} - static size_t my_unescape(CHARSET_INFO *cs, char *to, const char *str, const char *end, @@ -1078,16 +1030,16 @@ Lex_input_stream::unescape(CHARSET_INFO *cs, char *to, bool Lex_input_stream::get_text(Lex_string_with_metadata_st *dst, uint sep, int pre_skip, int post_skip) { - reg1 uchar c; + uchar c; uint found_escape=0; CHARSET_INFO *cs= m_thd->charset(); + bool is_8bit= false; - dst->set_8bit(false); while (! eof()) { c= yyGet(); if (c & 0x80) - dst->set_8bit(true); + is_8bit= true; #ifdef USE_MB { int l; @@ -1102,10 +1054,10 @@ bool Lex_input_stream::get_text(Lex_string_with_metadata_st *dst, uint sep, #endif if (c == '\\' && !(m_thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)) - { // Escaped character + { // Escaped character found_escape=1; if (eof()) - return true; + return true; yySkip(); } else if (c == sep) @@ -1113,7 +1065,7 @@ bool Lex_input_stream::get_text(Lex_string_with_metadata_st *dst, uint sep, if (c == yyGet()) // Check if two separators in a row { found_escape=1; // duplicate. Remember for delete - continue; + continue; } else yyUnget(); @@ -1122,7 +1074,7 @@ bool Lex_input_stream::get_text(Lex_string_with_metadata_st *dst, uint sep, const char *str, *end; char *to; - str= get_tok_start(); + str= m_tok_start; end= get_ptr(); /* Extract the text from the token */ str += pre_skip; @@ -1131,23 +1083,24 @@ bool Lex_input_stream::get_text(Lex_string_with_metadata_st *dst, uint sep, if (!(to= (char*) m_thd->alloc((uint) (end - str) + 1))) { - dst->str= ""; // Sql_alloc has set error flag - dst->length= 0; - return true; + dst->set(&empty_clex_str, 0, '\0'); + return true; // Sql_alloc has set error flag } - dst->str= to; - m_cpp_text_start= get_cpp_tok_start() + pre_skip; + m_cpp_text_start= m_cpp_tok_start + pre_skip; m_cpp_text_end= get_cpp_ptr() - post_skip; if (!found_escape) { - memcpy(to, str, dst->length= (end - str)); - to[dst->length]= 0; + size_t len= (end - str); + memcpy(to, str, len); + to[len]= '\0'; + dst->set(to, len, is_8bit, '\0'); } else { - dst->length= unescape(cs, to, str, end, sep); + size_t len= unescape(cs, to, str, end, sep); + dst->set(to, len, is_8bit, '\0'); } return false; } @@ -1176,11 +1129,11 @@ static const uint unsigned_longlong_len=20; static inline uint int_token(const char *str,uint length) { - if (length < long_len) // quick normal case + if (length < long_len) // quick normal case return NUM; bool neg=0; - if (*str == '+') // Remove sign and pre-zeros + if (*str == '+') // Remove sign and pre-zeros { str++; length--; } @@ -1202,9 +1155,9 @@ static inline uint int_token(const char *str,uint length) { if (length == long_len) { - cmp= signed_long_str+1; - smaller=NUM; // If <= signed_long_str - bigger=LONG_NUM; // If >= signed_long_str + cmp= signed_long_str + 1; + smaller= NUM; // If <= signed_long_str + bigger= LONG_NUM; // If >= signed_long_str } else if (length < signed_longlong_len) return LONG_NUM; @@ -1212,8 +1165,8 @@ static inline uint int_token(const char *str,uint length) return DECIMAL_NUM; else { - cmp=signed_longlong_str+1; - smaller=LONG_NUM; // If <= signed_longlong_str + cmp= signed_longlong_str + 1; + smaller= LONG_NUM; // If <= signed_longlong_str bigger=DECIMAL_NUM; } } @@ -1259,34 +1212,34 @@ static inline uint int_token(const char *str,uint length) @retval Whether EOF reached before comment is closed. */ -bool consume_comment(Lex_input_stream *lip, int remaining_recursions_permitted) +bool Lex_input_stream::consume_comment(int remaining_recursions_permitted) { - reg1 uchar c; - while (! lip->eof()) + uchar c; + while (!eof()) { - c= lip->yyGet(); + c= yyGet(); if (remaining_recursions_permitted > 0) { - if ((c == '/') && (lip->yyPeek() == '*')) + if ((c == '/') && (yyPeek() == '*')) { - lip->yySkip(); /* Eat asterisk */ - consume_comment(lip, remaining_recursions_permitted-1); + yySkip(); // Eat asterisk + consume_comment(remaining_recursions_permitted - 1); continue; } } if (c == '*') { - if (lip->yyPeek() == '/') + if (yyPeek() == '/') { - lip->yySkip(); /* Eat slash */ + yySkip(); // Eat slash return FALSE; } } if (c == '\n') - lip->yylineno++; + yylineno++; } return TRUE; @@ -1299,31 +1252,42 @@ bool consume_comment(Lex_input_stream *lip, int remaining_recursions_permitted) @param yylval [out] semantic value of the token being parsed (yylval) @param thd THD - - MY_LEX_EOQ Found end of query - - MY_LEX_OPERATOR_OR_IDENT Last state was an ident, text or number - (which can't be followed by a signed number) + - MY_LEX_EOQ Found end of query + - MY_LEX_OPERATOR_OR_IDENT Last state was an ident, text or number + (which can't be followed by a signed number) */ int MYSQLlex(YYSTYPE *yylval, THD *thd) { - Lex_input_stream *lip= & thd->m_parser_state->m_lip; + return thd->m_parser_state->m_lip.lex_token(yylval, thd); +} + + +int ORAlex(YYSTYPE *yylval, THD *thd) +{ + return thd->m_parser_state->m_lip.lex_token(yylval, thd); +} + + +int Lex_input_stream::lex_token(YYSTYPE *yylval, THD *thd) +{ int token; - if (lip->lookahead_token >= 0) + if (lookahead_token >= 0) { /* The next token was already parsed in advance, return it. */ - token= lip->lookahead_token; - lip->lookahead_token= -1; - *yylval= *(lip->lookahead_yylval); - lip->lookahead_yylval= NULL; + token= lookahead_token; + lookahead_token= -1; + *yylval= *(lookahead_yylval); + lookahead_yylval= NULL; return token; } token= lex_one_token(yylval, thd); - lip->add_digest_token(token, yylval); + add_digest_token(token, yylval); switch(token) { case WITH: @@ -1335,7 +1299,7 @@ int MYSQLlex(YYSTYPE *yylval, THD *thd) which sql_yacc.yy can process. */ token= lex_one_token(yylval, thd); - lip->add_digest_token(token, yylval); + add_digest_token(token, yylval); switch(token) { case CUBE_SYM: return WITH_CUBE_SYM; @@ -1347,9 +1311,8 @@ int MYSQLlex(YYSTYPE *yylval, THD *thd) /* Save the token following 'WITH' */ - lip->lookahead_yylval= lip->yylval; - lip->yylval= NULL; - lip->lookahead_token= token; + lookahead_yylval= yylval; + lookahead_token= token; return WITH; } break; @@ -1360,7 +1323,7 @@ int MYSQLlex(YYSTYPE *yylval, THD *thd) * SELECT ... FOR SYSTEM_TIME ... . */ token= lex_one_token(yylval, thd); - lip->add_digest_token(token, yylval); + add_digest_token(token, yylval); switch(token) { case SYSTEM_TIME_SYM: return FOR_SYSTEM_TIME_SYM; @@ -1368,9 +1331,8 @@ int MYSQLlex(YYSTYPE *yylval, THD *thd) /* Save the token following 'FOR_SYM' */ - lip->lookahead_yylval= lip->yylval; - lip->yylval= NULL; - lip->lookahead_token= token; + lookahead_yylval= yylval; + lookahead_token= token; return FOR_SYM; } break; @@ -1379,16 +1341,15 @@ int MYSQLlex(YYSTYPE *yylval, THD *thd) thd->lex->current_select->parsing_place == IN_PART_FUNC) return VALUE_SYM; token= lex_one_token(yylval, thd); - lip->add_digest_token(token, yylval); + add_digest_token(token, yylval); switch(token) { case LESS_SYM: return VALUES_LESS_SYM; case IN_SYM: return VALUES_IN_SYM; default: - lip->lookahead_yylval= lip->yylval; - lip->yylval= NULL; - lip->lookahead_token= token; + lookahead_yylval= yylval; + lookahead_token= token; return VALUES; } break; @@ -1398,71 +1359,64 @@ int MYSQLlex(YYSTYPE *yylval, THD *thd) return token; } -int ORAlex(YYSTYPE *yylval, THD *thd) -{ - return MYSQLlex(yylval, thd); -} -static int lex_one_token(YYSTYPE *yylval, THD *thd) +int Lex_input_stream::lex_one_token(YYSTYPE *yylval, THD *thd) { - reg1 uchar UNINIT_VAR(c); + uchar UNINIT_VAR(c); bool comment_closed; - int tokval, result_state; + int tokval; uint length; enum my_lex_states state; - Lex_input_stream *lip= & thd->m_parser_state->m_lip; LEX *lex= thd->lex; CHARSET_INFO *const cs= thd->charset(); const uchar *const state_map= cs->state_map; const uchar *const ident_map= cs->ident_map; - lip->yylval=yylval; // The global state - - lip->start_token(); - state=lip->next_state; - lip->next_state=MY_LEX_OPERATOR_OR_IDENT; + start_token(); + state= next_state; + next_state= MY_LEX_OPERATOR_OR_IDENT; for (;;) { switch (state) { - case MY_LEX_OPERATOR_OR_IDENT: // Next is operator or keyword - case MY_LEX_START: // Start of token + case MY_LEX_OPERATOR_OR_IDENT: // Next is operator or keyword + case MY_LEX_START: // Start of token // Skip starting whitespace - while(state_map[c= lip->yyPeek()] == MY_LEX_SKIP) + while(state_map[c= yyPeek()] == MY_LEX_SKIP) { - if (c == '\n') - lip->yylineno++; + if (c == '\n') + yylineno++; - lip->yySkip(); + yySkip(); } /* Start of real token */ - lip->restart_token(); - c= lip->yyGet(); + restart_token(); + c= yyGet(); state= (enum my_lex_states) state_map[c]; break; case MY_LEX_ESCAPE: - if (!lip->eof() && lip->yyGet() == 'N') - { // Allow \N as shortcut for NULL - yylval->lex_str.str=(char*) "\\N"; - yylval->lex_str.length=2; - return NULL_SYM; + if (!eof() && yyGet() == 'N') + { // Allow \N as shortcut for NULL + yylval->lex_str.str= (char*) "\\N"; + yylval->lex_str.length= 2; + return NULL_SYM; } /* Fall through */ - case MY_LEX_CHAR: // Unknown or single char token - case MY_LEX_SKIP: // This should not happen + case MY_LEX_CHAR: // Unknown or single char token + case MY_LEX_SKIP: // This should not happen if (c != ')') - lip->next_state= MY_LEX_START; // Allow signed numbers + next_state= MY_LEX_START; // Allow signed numbers return((int) c); case MY_LEX_MINUS_OR_COMMENT: - if (lip->yyPeek() == '-' && - (my_isspace(cs,lip->yyPeekn(1)) || - my_iscntrl(cs,lip->yyPeekn(1)))) + if (yyPeek() == '-' && + (my_isspace(cs,yyPeekn(1)) || + my_iscntrl(cs,yyPeekn(1)))) { state=MY_LEX_COMMENT; break; } - lip->next_state= MY_LEX_START; // Allow signed numbers + next_state= MY_LEX_START; // Allow signed numbers return((int) c); case MY_LEX_PLACEHOLDER: @@ -1472,13 +1426,13 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd) its value in a query for the binlog, the query must stay grammatically correct. */ - lip->next_state= MY_LEX_START; // Allow signed numbers - if (lip->stmt_prepare_mode && !ident_map[(uchar) lip->yyPeek()]) + next_state= MY_LEX_START; // Allow signed numbers + if (stmt_prepare_mode && !ident_map[(uchar) yyPeek()]) return(PARAM_MARKER); return((int) c); case MY_LEX_COMMA: - lip->next_state= MY_LEX_START; // Allow signed numbers + next_state= MY_LEX_START; // Allow signed numbers /* Warning: This is a work around, to make the "remember_name" rule in @@ -1488,431 +1442,277 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd) remember_name (see select_item) *before* actually parsing the first token of expr2. */ - lip->restart_token(); + restart_token(); return((int) c); case MY_LEX_IDENT_OR_NCHAR: { uint sep; - if (lip->yyPeek() != '\'') + if (yyPeek() != '\'') { - state= MY_LEX_IDENT; - break; + state= MY_LEX_IDENT; + break; } /* Found N'string' */ - lip->yySkip(); // Skip ' - if (lip->get_text(&yylval->lex_string_with_metadata, - (sep= lip->yyGetLast()), 2, 1)) + yySkip(); // Skip ' + if (get_text(&yylval->lex_string_with_metadata, (sep= yyGetLast()), 2, 1)) { - state= MY_LEX_CHAR; // Read char by char - break; + state= MY_LEX_CHAR; // Read char by char + break; } - lip->body_utf8_append(lip->m_cpp_text_start); - lip->body_utf8_append_escape(thd, &yylval->lex_string_with_metadata, + body_utf8_append(m_cpp_text_start); + body_utf8_append_escape(thd, &yylval->lex_string_with_metadata, national_charset_info, - lip->m_cpp_text_end, sep); + m_cpp_text_end, sep); return(NCHAR_STRING); } case MY_LEX_IDENT_OR_HEX: - if (lip->yyPeek() == '\'') - { // Found x'hex-number' - state= MY_LEX_HEX_NUMBER; - break; + if (yyPeek() == '\'') + { // Found x'hex-number' + state= MY_LEX_HEX_NUMBER; + break; } /* fall through */ case MY_LEX_IDENT_OR_BIN: - if (lip->yyPeek() == '\'') + if (yyPeek() == '\'') { // Found b'bin-number' state= MY_LEX_BIN_NUMBER; break; } /* fall through */ case MY_LEX_IDENT: - const char *start; -#if defined(USE_MB) && defined(USE_MB_IDENT) - if (use_mb(cs)) - { - result_state= IDENT_QUOTED; - int char_length= my_charlen(cs, lip->get_ptr() - 1, - lip->get_end_of_query()); - if (char_length <= 0) - { - state= MY_LEX_CHAR; - continue; - } - lip->skip_binary(char_length - 1); - - while (ident_map[c=lip->yyGet()]) - { - char_length= my_charlen(cs, lip->get_ptr() - 1, - lip->get_end_of_query()); - if (char_length <= 0) - break; - lip->skip_binary(char_length - 1); - } - } - else -#endif - { - for (result_state= c; - ident_map[(uchar) (c= lip->yyGet())]; - result_state|= c) - ; - /* If there were non-ASCII characters, mark that we must convert */ - result_state= result_state & 0x80 ? IDENT_QUOTED : IDENT; - } - length= lip->yyLength(); - start= lip->get_ptr(); - if (lip->ignore_space) - { - /* - If we find a space then this can't be an identifier. We notice this - below by checking start != lex->ptr. - */ - for (; state_map[(uchar) c] == MY_LEX_SKIP ; c= lip->yyGet()) - { - if (c == '\n') - lip->yylineno++; - } - } - if (start == lip->get_ptr() && c == '.' && - ident_map[(uchar) lip->yyPeek()]) - lip->next_state=MY_LEX_IDENT_SEP; - else - { // '(' must follow directly if function - lip->yyUnget(); - if ((tokval = find_keyword(lip, length, c == '('))) - { - lip->next_state= MY_LEX_START; // Allow signed numbers - return(tokval); // Was keyword - } - lip->yySkip(); // next state does a unget - } - yylval->lex_str=get_token(lip, 0, length); - - /* - Note: "SELECT _bla AS 'alias'" - _bla should be considered as a IDENT if charset haven't been found. - So we don't use MYF(MY_WME) with get_charset_by_csname to avoid - producing an error. - */ - - if (yylval->lex_str.str[0] == '_') - { - CHARSET_INFO *cs= get_charset_by_csname(yylval->lex_str.str + 1, - MY_CS_PRIMARY, MYF(0)); - if (cs) - { - yylval->charset= cs; - lip->m_underscore_cs= cs; - - lip->body_utf8_append(lip->m_cpp_text_start, - lip->get_cpp_tok_start() + length); - return(UNDERSCORE_CHARSET); - } - } - - lip->body_utf8_append(lip->m_cpp_text_start); - - lip->body_utf8_append_ident(thd, &yylval->lex_str, lip->m_cpp_text_end); - - return(result_state); // IDENT or IDENT_QUOTED + { + tokval= scan_ident_middle(thd, &yylval->ident_cli, + &yylval->charset, &state); + if (!tokval) + continue; + if (tokval == UNDERSCORE_CHARSET) + m_underscore_cs= yylval->charset; + return tokval; + } case MY_LEX_IDENT_SEP: // Found ident and now '.' - yylval->lex_str.str= (char*) lip->get_ptr(); + yylval->lex_str.str= (char*) get_ptr(); yylval->lex_str.length= 1; - c= lip->yyGet(); // should be '.' - lip->next_state= MY_LEX_IDENT_START; // Next is ident (not keyword) - if (!ident_map[(uchar) lip->yyPeek()]) // Probably ` or " - lip->next_state= MY_LEX_START; + c= yyGet(); // should be '.' + next_state= MY_LEX_IDENT_START; // Next is ident (not keyword) + if (!ident_map[(uchar) yyPeek()]) // Probably ` or " + next_state= MY_LEX_START; return((int) c); - case MY_LEX_NUMBER_IDENT: // number or ident which num-start - if (lip->yyGetLast() == '0') + case MY_LEX_NUMBER_IDENT: // number or ident which num-start + if (yyGetLast() == '0') { - c= lip->yyGet(); + c= yyGet(); if (c == 'x') { - while (my_isxdigit(cs,(c = lip->yyGet()))) ; - if ((lip->yyLength() >= 3) && !ident_map[c]) + while (my_isxdigit(cs, (c = yyGet()))) ; + if ((yyLength() >= 3) && !ident_map[c]) { /* skip '0x' */ - yylval->lex_str=get_token(lip, 2, lip->yyLength()-2); + yylval->lex_str= get_token(2, yyLength() - 2); return (HEX_NUM); } - lip->yyUnget(); + yyUnget(); state= MY_LEX_IDENT_START; break; } else if (c == 'b') { - while ((c= lip->yyGet()) == '0' || c == '1') + while ((c= yyGet()) == '0' || c == '1') ; - if ((lip->yyLength() >= 3) && !ident_map[c]) + if ((yyLength() >= 3) && !ident_map[c]) { /* Skip '0b' */ - yylval->lex_str= get_token(lip, 2, lip->yyLength()-2); + yylval->lex_str= get_token(2, yyLength() - 2); return (BIN_NUM); } - lip->yyUnget(); + yyUnget(); state= MY_LEX_IDENT_START; break; } - lip->yyUnget(); + yyUnget(); } - while (my_isdigit(cs, (c = lip->yyGet()))) ; + while (my_isdigit(cs, (c= yyGet()))) ; if (!ident_map[c]) - { // Can't be identifier - state=MY_LEX_INT_OR_REAL; - break; + { // Can't be identifier + state=MY_LEX_INT_OR_REAL; + break; } if (c == 'e' || c == 'E') { - // The following test is written this way to allow numbers of type 1e1 - if (my_isdigit(cs,lip->yyPeek()) || - (c=(lip->yyGet())) == '+' || c == '-') - { // Allow 1E+10 - if (my_isdigit(cs,lip->yyPeek())) // Number must have digit after sign - { - lip->yySkip(); - while (my_isdigit(cs,lip->yyGet())) ; - yylval->lex_str=get_token(lip, 0, lip->yyLength()); - return(FLOAT_NUM); - } - } - lip->yyUnget(); + // The following test is written this way to allow numbers of type 1e1 + if (my_isdigit(cs, yyPeek()) || + (c=(yyGet())) == '+' || c == '-') + { // Allow 1E+10 + if (my_isdigit(cs, yyPeek())) // Number must have digit after sign + { + yySkip(); + while (my_isdigit(cs, yyGet())) ; + yylval->lex_str= get_token(0, yyLength()); + return(FLOAT_NUM); + } + } + yyUnget(); } // fall through - case MY_LEX_IDENT_START: // We come here after '.' - result_state= IDENT; -#if defined(USE_MB) && defined(USE_MB_IDENT) - if (use_mb(cs)) - { - result_state= IDENT_QUOTED; - while (ident_map[c=lip->yyGet()]) - { - int char_length= my_charlen(cs, lip->get_ptr() - 1, - lip->get_end_of_query()); - if (char_length <= 0) - break; - lip->skip_binary(char_length - 1); - } - } - else -#endif - { - for (result_state=0; ident_map[c= lip->yyGet()]; result_state|= c) - ; - /* If there were non-ASCII characters, mark that we must convert */ - result_state= result_state & 0x80 ? IDENT_QUOTED : IDENT; - } - if (c == '.' && ident_map[(uchar) lip->yyPeek()]) - lip->next_state=MY_LEX_IDENT_SEP;// Next is '.' + case MY_LEX_IDENT_START: // We come here after '.' + return scan_ident_start(thd, &yylval->ident_cli); - yylval->lex_str= get_token(lip, 0, lip->yyLength()); + case MY_LEX_USER_VARIABLE_DELIMITER: // Found quote char + return scan_ident_delimited(thd, &yylval->ident_cli); - lip->body_utf8_append(lip->m_cpp_text_start); - - lip->body_utf8_append_ident(thd, &yylval->lex_str, lip->m_cpp_text_end); - - return(result_state); - - case MY_LEX_USER_VARIABLE_DELIMITER: // Found quote char - { - uint double_quotes= 0; - char quote_char= c; // Used char - while ((c=lip->yyGet())) - { - int var_length= my_charlen(cs, lip->get_ptr() - 1, - lip->get_end_of_query()); - if (var_length == 1) - { - if (c == quote_char) - { - if (lip->yyPeek() != quote_char) - break; - c=lip->yyGet(); - double_quotes++; - continue; - } - } -#ifdef USE_MB - else if (var_length > 1) - { - lip->skip_binary(var_length - 1); - } -#endif - } - if (double_quotes) - yylval->lex_str=get_quoted_token(lip, 1, - lip->yyLength() - double_quotes -1, - quote_char); - else - yylval->lex_str=get_token(lip, 1, lip->yyLength() -1); - if (c == quote_char) - lip->yySkip(); // Skip end ` - lip->next_state= MY_LEX_START; - - lip->body_utf8_append(lip->m_cpp_text_start); - - lip->body_utf8_append_ident(thd, &yylval->lex_str, lip->m_cpp_text_end); - - return(IDENT_QUOTED); - } - case MY_LEX_INT_OR_REAL: // Complete int or incomplete real - if (c != '.' || lip->yyPeek() == '.') + case MY_LEX_INT_OR_REAL: // Complete int or incomplete real + if (c != '.' || yyPeek() == '.') { /* Found a complete integer number: - the number is either not followed by a dot at all, or - the number is followed by a double dot as in: FOR i IN 1..10 */ - yylval->lex_str=get_token(lip, 0, lip->yyLength()); - return int_token(yylval->lex_str.str, (uint) yylval->lex_str.length); + yylval->lex_str= get_token(0, yyLength()); + return int_token(yylval->lex_str.str, (uint) yylval->lex_str.length); } // fall through - case MY_LEX_REAL: // Incomplete real number - while (my_isdigit(cs,c = lip->yyGet())) ; + case MY_LEX_REAL: // Incomplete real number + while (my_isdigit(cs, c= yyGet())) ; if (c == 'e' || c == 'E') { - c = lip->yyGet(); - if (c == '-' || c == '+') - c = lip->yyGet(); // Skip sign - if (!my_isdigit(cs,c)) - { // No digit after sign - state= MY_LEX_CHAR; - break; - } - while (my_isdigit(cs,lip->yyGet())) ; - yylval->lex_str=get_token(lip, 0, lip->yyLength()); - return(FLOAT_NUM); + c= yyGet(); + if (c == '-' || c == '+') + c= yyGet(); // Skip sign + if (!my_isdigit(cs, c)) + { // No digit after sign + state= MY_LEX_CHAR; + break; + } + while (my_isdigit(cs, yyGet())) ; + yylval->lex_str= get_token(0, yyLength()); + return(FLOAT_NUM); } - yylval->lex_str=get_token(lip, 0, lip->yyLength()); + yylval->lex_str= get_token(0, yyLength()); return(DECIMAL_NUM); - case MY_LEX_HEX_NUMBER: // Found x'hexstring' - lip->yySkip(); // Accept opening ' - while (my_isxdigit(cs, (c= lip->yyGet()))) ; + case MY_LEX_HEX_NUMBER: // Found x'hexstring' + yySkip(); // Accept opening ' + while (my_isxdigit(cs, (c= yyGet()))) ; if (c != '\'') return(ABORT_SYM); // Illegal hex constant - lip->yySkip(); // Accept closing ' - length= lip->yyLength(); // Length of hexnum+3 + yySkip(); // Accept closing ' + length= yyLength(); // Length of hexnum+3 if ((length % 2) == 0) return(ABORT_SYM); // odd number of hex digits - yylval->lex_str=get_token(lip, - 2, // skip x' - length-3); // don't count x' and last ' + yylval->lex_str= get_token(2, // skip x' + length - 3); // don't count x' and last ' return HEX_STRING; case MY_LEX_BIN_NUMBER: // Found b'bin-string' - lip->yySkip(); // Accept opening ' - while ((c= lip->yyGet()) == '0' || c == '1') + yySkip(); // Accept opening ' + while ((c= yyGet()) == '0' || c == '1') ; if (c != '\'') return(ABORT_SYM); // Illegal hex constant - lip->yySkip(); // Accept closing ' - length= lip->yyLength(); // Length of bin-num + 3 - yylval->lex_str= get_token(lip, - 2, // skip b' - length-3); // don't count b' and last ' + yySkip(); // Accept closing ' + length= yyLength(); // Length of bin-num + 3 + yylval->lex_str= get_token(2, // skip b' + length - 3); // don't count b' and last ' return (BIN_NUM); - case MY_LEX_CMP_OP: // Incomplete comparison operator - lip->next_state= MY_LEX_START; // Allow signed numbers - if (state_map[(uchar) lip->yyPeek()] == MY_LEX_CMP_OP || - state_map[(uchar) lip->yyPeek()] == MY_LEX_LONG_CMP_OP) + case MY_LEX_CMP_OP: // Incomplete comparison operator + next_state= MY_LEX_START; // Allow signed numbers + if (state_map[(uchar) yyPeek()] == MY_LEX_CMP_OP || + state_map[(uchar) yyPeek()] == MY_LEX_LONG_CMP_OP) { - lip->yySkip(); - if ((tokval= find_keyword(lip, 2, 0))) + yySkip(); + if ((tokval= find_keyword(&yylval->kwd, 2, 0))) return(tokval); - lip->yyUnget(); + yyUnget(); } return(c); - case MY_LEX_LONG_CMP_OP: // Incomplete comparison operator - lip->next_state= MY_LEX_START; - if (state_map[(uchar) lip->yyPeek()] == MY_LEX_CMP_OP || - state_map[(uchar) lip->yyPeek()] == MY_LEX_LONG_CMP_OP) + case MY_LEX_LONG_CMP_OP: // Incomplete comparison operator + next_state= MY_LEX_START; + if (state_map[(uchar) yyPeek()] == MY_LEX_CMP_OP || + state_map[(uchar) yyPeek()] == MY_LEX_LONG_CMP_OP) { - lip->yySkip(); - if (state_map[(uchar) lip->yyPeek()] == MY_LEX_CMP_OP) + yySkip(); + if (state_map[(uchar) yyPeek()] == MY_LEX_CMP_OP) { - lip->yySkip(); - if ((tokval= find_keyword(lip, 3, 0))) + yySkip(); + if ((tokval= find_keyword(&yylval->kwd, 3, 0))) return(tokval); - lip->yyUnget(); + yyUnget(); } - if ((tokval= find_keyword(lip, 2, 0))) + if ((tokval= find_keyword(&yylval->kwd, 2, 0))) return(tokval); - lip->yyUnget(); + yyUnget(); } return(c); case MY_LEX_BOOL: - if (c != lip->yyPeek()) + if (c != yyPeek()) { - state=MY_LEX_CHAR; - break; + state= MY_LEX_CHAR; + break; } - lip->yySkip(); - tokval = find_keyword(lip,2,0); // Is a bool operator - lip->next_state= MY_LEX_START; // Allow signed numbers + yySkip(); + tokval= find_keyword(&yylval->kwd, 2, 0); // Is a bool operator + next_state= MY_LEX_START; // Allow signed numbers return(tokval); case MY_LEX_STRING_OR_DELIMITER: if (thd->variables.sql_mode & MODE_ANSI_QUOTES) { - state= MY_LEX_USER_VARIABLE_DELIMITER; - break; + state= MY_LEX_USER_VARIABLE_DELIMITER; + break; } /* " used for strings */ /* fall through */ - case MY_LEX_STRING: // Incomplete text string + case MY_LEX_STRING: // Incomplete text string { uint sep; - if (lip->get_text(&yylval->lex_string_with_metadata, - (sep= lip->yyGetLast()), 1, 1)) + if (get_text(&yylval->lex_string_with_metadata, (sep= yyGetLast()), 1, 1)) { - state= MY_LEX_CHAR; // Read char by char - break; + state= MY_LEX_CHAR; // Read char by char + break; } - CHARSET_INFO *strcs= lip->m_underscore_cs ? lip->m_underscore_cs : cs; - lip->body_utf8_append(lip->m_cpp_text_start); + CHARSET_INFO *strcs= m_underscore_cs ? m_underscore_cs : cs; + body_utf8_append(m_cpp_text_start); - lip->body_utf8_append_escape(thd, &yylval->lex_string_with_metadata, - strcs, lip->m_cpp_text_end, sep); - lip->m_underscore_cs= NULL; + body_utf8_append_escape(thd, &yylval->lex_string_with_metadata, + strcs, m_cpp_text_end, sep); + m_underscore_cs= NULL; return(TEXT_STRING); } - case MY_LEX_COMMENT: // Comment + case MY_LEX_COMMENT: // Comment lex->select_lex.options|= OPTION_FOUND_COMMENT; - while ((c = lip->yyGet()) != '\n' && c) ; - lip->yyUnget(); // Safety against eof - state = MY_LEX_START; // Try again + while ((c= yyGet()) != '\n' && c) ; + yyUnget(); // Safety against eof + state= MY_LEX_START; // Try again break; - case MY_LEX_LONG_COMMENT: /* Long C comment? */ - if (lip->yyPeek() != '*') + case MY_LEX_LONG_COMMENT: // Long C comment? + if (yyPeek() != '*') { - state=MY_LEX_CHAR; // Probable division - break; + state= MY_LEX_CHAR; // Probable division + break; } lex->select_lex.options|= OPTION_FOUND_COMMENT; /* Reject '/' '*', since we might need to turn off the echo */ - lip->yyUnget(); + yyUnget(); - lip->save_in_comment_state(); + save_in_comment_state(); - if (lip->yyPeekn(2) == '!' || - (lip->yyPeekn(2) == 'M' && lip->yyPeekn(3) == '!')) + if (yyPeekn(2) == '!' || + (yyPeekn(2) == 'M' && yyPeekn(3) == '!')) { - bool maria_comment_syntax= lip->yyPeekn(2) == 'M'; - lip->in_comment= DISCARD_COMMENT; + bool maria_comment_syntax= yyPeekn(2) == 'M'; + in_comment= DISCARD_COMMENT; /* Accept '/' '*' '!', but do not keep this marker. */ - lip->set_echo(FALSE); - lip->yySkipn(maria_comment_syntax ? 4 : 3); + set_echo(FALSE); + yySkipn(maria_comment_syntax ? 4 : 3); /* The special comment format is very strict: @@ -1923,24 +1723,24 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd) 50114 -> 5.1.14 100000 -> 10.0.0 */ - if ( my_isdigit(cs, lip->yyPeekn(0)) - && my_isdigit(cs, lip->yyPeekn(1)) - && my_isdigit(cs, lip->yyPeekn(2)) - && my_isdigit(cs, lip->yyPeekn(3)) - && my_isdigit(cs, lip->yyPeekn(4)) + if ( my_isdigit(cs, yyPeekn(0)) + && my_isdigit(cs, yyPeekn(1)) + && my_isdigit(cs, yyPeekn(2)) + && my_isdigit(cs, yyPeekn(3)) + && my_isdigit(cs, yyPeekn(4)) ) { ulong version; uint length= 5; - char *end_ptr= (char*) lip->get_ptr()+length; + char *end_ptr= (char*) get_ptr() + length; int error; - if (my_isdigit(cs, lip->yyPeekn(5))) + if (my_isdigit(cs, yyPeekn(5))) { end_ptr++; // 6 digit number length++; } - version= (ulong) my_strtoll10(lip->get_ptr(), &end_ptr, &error); + version= (ulong) my_strtoll10(get_ptr(), &end_ptr, &error); /* MySQL-5.7 has new features and might have new SQL syntax that @@ -1952,31 +1752,31 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd) (version < 50700 || version > 99999 || maria_comment_syntax)) { /* Accept 'M' 'm' 'm' 'd' 'd' */ - lip->yySkipn(length); + yySkipn(length); /* Expand the content of the special comment as real code */ - lip->set_echo(TRUE); + set_echo(TRUE); state=MY_LEX_START; break; /* Do not treat contents as a comment. */ } else { #ifdef WITH_WSREP - if (WSREP(thd) && version == 99997 && thd->wsrep_exec_mode == LOCAL_STATE) - { - WSREP_DEBUG("consistency check: %s", thd->query()); - thd->wsrep_consistency_check= CONSISTENCY_CHECK_DECLARED; - lip->yySkipn(5); - lip->set_echo(TRUE); - state=MY_LEX_START; - break; /* Do not treat contents as a comment. */ - } + if (WSREP(thd) && version == 99997 && thd->wsrep_exec_mode == LOCAL_STATE) + { + WSREP_DEBUG("consistency check: %s", thd->query()); + thd->wsrep_consistency_check= CONSISTENCY_CHECK_DECLARED; + yySkipn(5); + set_echo(TRUE); + state= MY_LEX_START; + break; /* Do not treat contents as a comment. */ + } #endif /* WITH_WSREP */ /* Patch and skip the conditional comment to avoid it being propagated infinitely (eg. to a slave). */ - char *pcom= lip->yyUnput(' '); - comment_closed= ! consume_comment(lip, 1); + char *pcom= yyUnput(' '); + comment_closed= ! consume_comment(1); if (! comment_closed) { *pcom= '!'; @@ -1988,16 +1788,16 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd) { /* Not a version comment. */ state=MY_LEX_START; - lip->set_echo(TRUE); + set_echo(TRUE); break; } } else { - lip->in_comment= PRESERVE_COMMENT; - lip->yySkip(); // Accept / - lip->yySkip(); // Accept * - comment_closed= ! consume_comment(lip, 0); + in_comment= PRESERVE_COMMENT; + yySkip(); // Accept / + yySkip(); // Accept * + comment_closed= ! consume_comment(0); /* regular comments can have zero comments inside. */ } /* @@ -2011,143 +1811,367 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd) /#!VERSI oned containing /# regular #/ is allowed #/ - Inside one versioned comment, another versioned comment - is treated as a regular discardable comment. It gets - no special parsing. + Inside one versioned comment, another versioned comment + is treated as a regular discardable comment. It gets + no special parsing. */ /* Unbalanced comments with a missing '*' '/' are a syntax error */ if (! comment_closed) return (ABORT_SYM); state = MY_LEX_START; // Try again - lip->restore_in_comment_state(); + restore_in_comment_state(); break; case MY_LEX_END_LONG_COMMENT: - if ((lip->in_comment != NO_COMMENT) && lip->yyPeek() == '/') + if ((in_comment != NO_COMMENT) && yyPeek() == '/') { /* Reject '*' '/' */ - lip->yyUnget(); + yyUnget(); /* Accept '*' '/', with the proper echo */ - lip->set_echo(lip->in_comment == PRESERVE_COMMENT); - lip->yySkipn(2); + set_echo(in_comment == PRESERVE_COMMENT); + yySkipn(2); /* And start recording the tokens again */ - lip->set_echo(TRUE); - lip->in_comment=NO_COMMENT; + set_echo(TRUE); + in_comment= NO_COMMENT; state=MY_LEX_START; } else - state=MY_LEX_CHAR; // Return '*' + state= MY_LEX_CHAR; // Return '*' break; - case MY_LEX_SET_VAR: // Check if ':=' - if (lip->yyPeek() != '=') + case MY_LEX_SET_VAR: // Check if ':=' + if (yyPeek() != '=') { - state=MY_LEX_CHAR; // Return ':' - break; + state= MY_LEX_CHAR; // Return ':' + break; } - lip->yySkip(); + yySkip(); return (SET_VAR); - case MY_LEX_SEMICOLON: // optional line terminator + case MY_LEX_SEMICOLON: // optional line terminator state= MY_LEX_CHAR; // Return ';' break; case MY_LEX_EOL: - if (lip->eof()) + if (eof()) { - lip->yyUnget(); // Reject the last '\0' - lip->set_echo(FALSE); - lip->yySkip(); - lip->set_echo(TRUE); + yyUnget(); // Reject the last '\0' + set_echo(FALSE); + yySkip(); + set_echo(TRUE); /* Unbalanced comments with a missing '*' '/' are a syntax error */ - if (lip->in_comment != NO_COMMENT) + if (in_comment != NO_COMMENT) return (ABORT_SYM); - lip->next_state=MY_LEX_END; // Mark for next loop + next_state= MY_LEX_END; // Mark for next loop return(END_OF_INPUT); } state=MY_LEX_CHAR; break; case MY_LEX_END: - lip->next_state=MY_LEX_END; - return(0); // We found end of input last time + next_state= MY_LEX_END; + return(0); // We found end of input last time /* Actually real shouldn't start with . but allow them anyhow */ case MY_LEX_REAL_OR_POINT: - if (my_isdigit(cs,(c= lip->yyPeek()))) - state = MY_LEX_REAL; // Real + if (my_isdigit(cs, (c= yyPeek()))) + state = MY_LEX_REAL; // Real else if (c == '.') { - lip->yySkip(); + yySkip(); return DOT_DOT_SYM; } else { - state= MY_LEX_IDENT_SEP; // return '.' - lip->yyUnget(); // Put back '.' + state= MY_LEX_IDENT_SEP; // return '.' + yyUnget(); // Put back '.' } break; - case MY_LEX_USER_END: // end '@' of user@hostname - switch (state_map[(uchar) lip->yyPeek()]) { + case MY_LEX_USER_END: // end '@' of user@hostname + switch (state_map[(uchar) yyPeek()]) { case MY_LEX_STRING: case MY_LEX_USER_VARIABLE_DELIMITER: case MY_LEX_STRING_OR_DELIMITER: - break; + break; case MY_LEX_USER_END: - lip->next_state=MY_LEX_SYSTEM_VAR; - break; + next_state= MY_LEX_SYSTEM_VAR; + break; default: - lip->next_state=MY_LEX_HOSTNAME; - break; + next_state= MY_LEX_HOSTNAME; + break; } - yylval->lex_str.str=(char*) lip->get_ptr(); - yylval->lex_str.length=1; + yylval->lex_str.str= (char*) get_ptr(); + yylval->lex_str.length= 1; return((int) '@'); - case MY_LEX_HOSTNAME: // end '@' of user@hostname - for (c=lip->yyGet() ; - my_isalnum(cs,c) || c == '.' || c == '_' || c == '$'; - c= lip->yyGet()) ; - yylval->lex_str=get_token(lip, 0, lip->yyLength()); + case MY_LEX_HOSTNAME: // end '@' of user@hostname + for (c= yyGet() ; + my_isalnum(cs, c) || c == '.' || c == '_' || c == '$'; + c= yyGet()) ; + yylval->lex_str= get_token(0, yyLength()); return(LEX_HOSTNAME); case MY_LEX_SYSTEM_VAR: - yylval->lex_str.str=(char*) lip->get_ptr(); - yylval->lex_str.length=1; - lip->yySkip(); // Skip '@' - lip->next_state= (state_map[(uchar) lip->yyPeek()] == - MY_LEX_USER_VARIABLE_DELIMITER ? - MY_LEX_OPERATOR_OR_IDENT : - MY_LEX_IDENT_OR_KEYWORD); + yylval->lex_str.str= (char*) get_ptr(); + yylval->lex_str.length= 1; + yySkip(); // Skip '@' + next_state= (state_map[(uchar) yyPeek()] == + MY_LEX_USER_VARIABLE_DELIMITER ? + MY_LEX_OPERATOR_OR_IDENT : + MY_LEX_IDENT_OR_KEYWORD); return((int) '@'); case MY_LEX_IDENT_OR_KEYWORD: /* - We come here when we have found two '@' in a row. - We should now be able to handle: - [(global | local | session) .]variable_name + We come here when we have found two '@' in a row. + We should now be able to handle: + [(global | local | session) .]variable_name */ - - for (result_state= 0; ident_map[c= lip->yyGet()]; result_state|= c) - ; - /* If there were non-ASCII characters, mark that we must convert */ - result_state= result_state & 0x80 ? IDENT_QUOTED : IDENT; - - if (c == '.') - lip->next_state=MY_LEX_IDENT_SEP; - length= lip->yyLength(); - if (length == 0) - return(ABORT_SYM); // Names must be nonempty. - if ((tokval= find_keyword(lip, length,0))) - { - lip->yyUnget(); // Put back 'c' - return(tokval); // Was keyword - } - yylval->lex_str=get_token(lip, 0, length); - - lip->body_utf8_append(lip->m_cpp_text_start); - - lip->body_utf8_append_ident(thd, &yylval->lex_str, lip->m_cpp_text_end); - - return(result_state); + return scan_ident_sysvar(thd, &yylval->ident_cli); } } } +bool Lex_input_stream::get_7bit_or_8bit_ident(THD *thd, uchar *last_char) +{ + uchar c; + CHARSET_INFO *const cs= thd->charset(); + const uchar *const ident_map= cs->ident_map; + bool is_8bit= false; + for ( ; ident_map[c= yyGet()]; ) + { + if (c & 0x80) + is_8bit= true; // will convert + } + *last_char= c; + return is_8bit; +} + + +int Lex_input_stream::scan_ident_sysvar(THD *thd, Lex_ident_cli_st *str) +{ + uchar last_char; + uint length; + int tokval; + bool is_8bit; + DBUG_ASSERT(m_tok_start == m_ptr); + + is_8bit= get_7bit_or_8bit_ident(thd, &last_char); + + if (last_char == '.') + next_state= MY_LEX_IDENT_SEP; + if (!(length= yyLength())) + return ABORT_SYM; // Names must be nonempty. + if ((tokval= find_keyword(str, length, 0))) + { + yyUnget(); // Put back 'c' + return tokval; // Was keyword + } + + yyUnget(); // ptr points now after last token char + str->set_ident(m_tok_start, length, is_8bit); + + m_cpp_text_start= m_cpp_tok_start; + m_cpp_text_end= m_cpp_text_start + length; + body_utf8_append(m_cpp_text_start); + body_utf8_append_ident(thd, str, m_cpp_text_end); + + return is_8bit ? IDENT_QUOTED : IDENT; +} + + +/* + We can come here if different parsing stages: + - In an identifier chain: + SELECT t1.cccc FROM t1; + (when the "cccc" part starts) + In this case both m_tok_start and m_ptr point to "cccc". + - When a sequence of digits has changed to something else, + therefore the token becomes an identifier rather than a number: + SELECT 12345_6 FROM t1; + In this case m_tok_start points to the entire "12345_678", + while m_ptr points to "678". +*/ +int Lex_input_stream::scan_ident_start(THD *thd, Lex_ident_cli_st *str) +{ + uchar c; + bool is_8bit; + CHARSET_INFO *const cs= thd->charset(); + const uchar *const ident_map= cs->ident_map; + DBUG_ASSERT(m_tok_start <= m_ptr); + + if (use_mb(cs)) + { + is_8bit= true; + while (ident_map[c= yyGet()]) + { + int char_length= my_charlen(cs, get_ptr() - 1, get_end_of_query()); + if (char_length <= 0) + break; + skip_binary(char_length - 1); + } + } + else + { + is_8bit= get_7bit_or_8bit_ident(thd, &c); + } + if (c == '.' && ident_map[(uchar) yyPeek()]) + next_state= MY_LEX_IDENT_SEP;// Next is '.' + + uint length= yyLength(); + yyUnget(); // ptr points now after last token char + str->set_ident(m_tok_start, length, is_8bit); + m_cpp_text_start= m_cpp_tok_start; + m_cpp_text_end= m_cpp_text_start + length; + body_utf8_append(m_cpp_text_start); + body_utf8_append_ident(thd, str, m_cpp_text_end); + return is_8bit ? IDENT_QUOTED : IDENT; +} + + +int Lex_input_stream::scan_ident_middle(THD *thd, Lex_ident_cli_st *str, + CHARSET_INFO **introducer, + my_lex_states *st) +{ + CHARSET_INFO *const cs= thd->charset(); + const uchar *const ident_map= cs->ident_map; + const uchar *const state_map= cs->state_map; + const char *start; + uint length; + uchar c; + bool is_8bit; + bool resolve_introducer= true; + DBUG_ASSERT(m_ptr == m_tok_start + 1); // m_ptr points to the second byte + + if (use_mb(cs)) + { + is_8bit= true; + int char_length= my_charlen(cs, get_ptr() - 1, get_end_of_query()); + if (char_length <= 0) + { + *st= MY_LEX_CHAR; + return 0; + } + skip_binary(char_length - 1); + + while (ident_map[c= yyGet()]) + { + char_length= my_charlen(cs, get_ptr() - 1, get_end_of_query()); + if (char_length <= 0) + break; + if (char_length > 1 || (c & 0x80)) + resolve_introducer= false; + skip_binary(char_length - 1); + } + } + else + { + is_8bit= get_7bit_or_8bit_ident(thd, &c) || (m_tok_start[0] & 0x80); + resolve_introducer= !is_8bit; + } + length= yyLength(); + start= get_ptr(); + if (ignore_space) + { + /* + If we find a space then this can't be an identifier. We notice this + below by checking start != lex->ptr. + */ + for (; state_map[(uchar) c] == MY_LEX_SKIP ; c= yyGet()) + { + if (c == '\n') + yylineno++; + } + } + if (start == get_ptr() && c == '.' && ident_map[(uchar) yyPeek()]) + next_state= MY_LEX_IDENT_SEP; + else + { // '(' must follow directly if function + int tokval; + yyUnget(); + if ((tokval= find_keyword(str, length, c == '('))) + { + next_state= MY_LEX_START; // Allow signed numbers + return(tokval); // Was keyword + } + yySkip(); // next state does a unget + } + + /* + Note: "SELECT _bla AS 'alias'" + _bla should be considered as a IDENT if charset haven't been found. + So we don't use MYF(MY_WME) with get_charset_by_csname to avoid + producing an error. + */ + DBUG_ASSERT(length > 0); + if (resolve_introducer && m_tok_start[0] == '_') + { + + yyUnget(); // ptr points now after last token char + str->set_ident(m_tok_start, length, false); + + m_cpp_text_start= m_cpp_tok_start; + m_cpp_text_end= m_cpp_text_start + length; + body_utf8_append(m_cpp_text_start, m_cpp_tok_start + length); + ErrConvString csname(str->str + 1, str->length - 1, &my_charset_bin); + CHARSET_INFO *cs= get_charset_by_csname(csname.ptr(), + MY_CS_PRIMARY, MYF(0)); + if (cs) + { + *introducer= cs; + return UNDERSCORE_CHARSET; + } + return IDENT; + } + + yyUnget(); // ptr points now after last token char + str->set_ident(m_tok_start, length, is_8bit); + m_cpp_text_start= m_cpp_tok_start; + m_cpp_text_end= m_cpp_text_start + length; + body_utf8_append(m_cpp_text_start); + body_utf8_append_ident(thd, str, m_cpp_text_end); + return is_8bit ? IDENT_QUOTED : IDENT; +} + + +int Lex_input_stream::scan_ident_delimited(THD *thd, + Lex_ident_cli_st *str) +{ + CHARSET_INFO *const cs= thd->charset(); + uint double_quotes= 0; + uchar c, quote_char= m_tok_start[0]; + DBUG_ASSERT(m_ptr == m_tok_start + 1); + + while ((c= yyGet())) + { + int var_length= my_charlen(cs, get_ptr() - 1, get_end_of_query()); + if (var_length == 1) + { + if (c == quote_char) + { + if (yyPeek() != quote_char) + break; + c= yyGet(); + double_quotes++; + continue; + } + } + else if (var_length > 1) + { + skip_binary(var_length - 1); + } + } + + str->set_ident_quoted(m_tok_start + 1, yyLength() - 1, true, quote_char); + yyUnget(); // ptr points now after last token char + + m_cpp_text_start= m_cpp_tok_start + 1; + m_cpp_text_end= m_cpp_text_start + str->length; + + if (c == quote_char) + yySkip(); // Skip end ` + next_state= MY_LEX_START; + body_utf8_append(m_cpp_text_start); + // QQQ: shouldn't it add unescaped version ???? + body_utf8_append_ident(thd, str, m_cpp_text_end); + return IDENT_QUOTED; +} + + void trim_whitespace(CHARSET_INFO *cs, LEX_CSTRING *str, size_t * prefix_length) { /* @@ -2251,6 +2275,7 @@ void st_select_lex::init_query() select_n_having_items= 0; n_sum_items= 0; n_child_sum_items= 0; + hidden_bit_fields= 0; subquery_in_having= explicit_limit= 0; is_item_list_lookup= 0; first_execution= 1; @@ -2298,6 +2323,7 @@ void st_select_lex::init_select() select_limit= 0; /* denotes the default limit = HA_POS_ERROR */ offset_limit= 0; /* denotes the default offset = 0 */ with_sum_func= 0; + with_all_modifier= 0; is_correlated= 0; cur_pos_in_select_list= UNDEF_POS; cond_value= having_value= Item::COND_UNDEF; @@ -2367,7 +2393,7 @@ void st_select_lex_node::add_slave(st_select_lex_node *slave_arg) ref - references on reference on this node */ void st_select_lex_node::include_standalone(st_select_lex_node *upper, - st_select_lex_node **ref) + st_select_lex_node **ref) { next= 0; prev= ref; @@ -2432,7 +2458,7 @@ void st_select_lex_node::fast_exclude() */ st_select_lex_node *st_select_lex_node:: insert_chain_before( - st_select_lex_node **ptr_pos_to_insert, + st_select_lex_node **ptr_pos_to_insert, st_select_lex_node *end_chain_node) { end_chain_node->link_next= *ptr_pos_to_insert; @@ -2787,6 +2813,10 @@ ulong st_select_lex::get_table_join_options() bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) { + + if (!((options & SELECT_DISTINCT) && !group_list.elements)) + hidden_bit_fields= 0; + // find_order_in_list() may need some extra space, so multiply by two. order_group_num*= 2; @@ -2801,7 +2831,8 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) select_n_reserved + select_n_having_items + select_n_where_fields + - order_group_num) * 5; + order_group_num + + hidden_bit_fields) * 5; if (!ref_pointer_array.is_null()) { /* @@ -2815,7 +2846,7 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) return false; } Item **array= static_cast(arena->alloc(sizeof(Item*) * n_elems)); - if (array != NULL) + if (likely(array != NULL)) ref_pointer_array= Ref_ptr_array(array, n_elems); return array == NULL; @@ -3130,11 +3161,11 @@ bool LEX::can_be_merged() } return (selects_allow_merge && - select_lex.group_list.elements == 0 && - select_lex.having == 0 && + select_lex.group_list.elements == 0 && + select_lex.having == 0 && select_lex.with_sum_func == 0 && - select_lex.table_list.elements >= 1 && - !(select_lex.options & SELECT_DISTINCT) && + select_lex.table_list.elements >= 1 && + !(select_lex.options & SELECT_DISTINCT) && select_lex.select_limit == 0); } @@ -3515,14 +3546,14 @@ void LEX::set_trg_event_type_for_tables() SYNOPSIS unlink_first_table() - link_to_local Set to 1 if caller should link this table to local list + link_to_local Set to 1 if caller should link this table to local list NOTES We assume that first tables in both lists is the same table or the local list is empty. RETURN - 0 If 'query_tables' == 0 + 0 If 'query_tables' == 0 unlinked table In this case link_to_local is set. @@ -3549,7 +3580,7 @@ TABLE_LIST *LEX::unlink_first_table(bool *link_to_local) select_lex.context.table_list= select_lex.context.first_name_resolution_table= first->next_local; select_lex.table_list.first= first->next_local; - select_lex.table_list.elements--; //safety + select_lex.table_list.elements--; //safety first->next_local= 0; /* Ensure that the global list has the same first table as the local @@ -3611,14 +3642,14 @@ void LEX::first_lists_tables_same() SYNOPSIS link_first_table_back() - link_to_local do we need link this table to local + link_to_local do we need link this table to local RETURN global list */ void LEX::link_first_table_back(TABLE_LIST *first, - bool link_to_local) + bool link_to_local) { if (first) { @@ -3633,7 +3664,7 @@ void LEX::link_first_table_back(TABLE_LIST *first, first->next_local= select_lex.table_list.first; select_lex.context.table_list= first; select_lex.table_list.first= first; - select_lex.table_list.elements++; //safety + select_lex.table_list.elements++; //safety } } } @@ -3928,15 +3959,15 @@ bool st_select_lex::optimize_unflattened_subqueries(bool const_only) { if (!subquery_predicate->fixed) { - /* - This subquery was excluded as part of some expression so it is - invisible from all prepared expression. + /* + This subquery was excluded as part of some expression so it is + invisible from all prepared expression. */ - next_unit= un->next_unit(); - un->exclude_level(); - if (next_unit) - continue; - break; + next_unit= un->next_unit(); + un->exclude_level(); + if (next_unit) + continue; + break; } if (subquery_predicate->substype() == Item_subselect::IN_SUBS) { @@ -4257,7 +4288,7 @@ bool SELECT_LEX::merge_subquery(THD *thd, TABLE_LIST *derived, for (uint i= 0; i < cnt; i++) { if (subq_select->expr_cache_may_be_used[i]) - expr_cache_may_be_used[i]= true; + expr_cache_may_be_used[i]= true; } List_iterator_fast it(subq_select->in_funcs); @@ -4323,7 +4354,7 @@ void SELECT_LEX::update_used_tables() for (embedding= tl->embedding; embedding; embedding=embedding->embedding) { if (embedding->is_view_or_derived()) - { + { DBUG_ASSERT(embedding->is_merged_derived()); TABLE *tab= tl->table; tab->covering_keys= tab->s->keys_for_keyread; @@ -4355,7 +4386,7 @@ void SELECT_LEX::update_used_tables() bool maybe_null; if ((maybe_null= MY_TEST(embedding->outer_join))) { - tl->table->maybe_null= maybe_null; + tl->table->maybe_null= maybe_null; break; } } @@ -4867,14 +4898,14 @@ bool LEX::set_arena_for_set_stmt(Query_arena *backup) if (!mem_root_for_set_stmt) { mem_root_for_set_stmt= new MEM_ROOT(); - if (!(mem_root_for_set_stmt)) + if (unlikely(!(mem_root_for_set_stmt))) DBUG_RETURN(1); init_sql_alloc(mem_root_for_set_stmt, "set_stmt", ALLOC_ROOT_SET, ALLOC_ROOT_SET, MYF(MY_THREAD_SPECIFIC)); } - if (!(arena_for_set_stmt= new(mem_root_for_set_stmt) - Query_arena_memroot(mem_root_for_set_stmt, - Query_arena::STMT_INITIALIZED))) + if (unlikely(!(arena_for_set_stmt= new(mem_root_for_set_stmt) + Query_arena_memroot(mem_root_for_set_stmt, + Query_arena::STMT_INITIALIZED)))) DBUG_RETURN(1); DBUG_PRINT("info", ("mem_root: %p arena: %p", mem_root_for_set_stmt, @@ -4987,6 +5018,8 @@ int st_select_lex_unit::save_union_explain(Explain_query *output) Explain_union *eu= new (output->mem_root) Explain_union(output->mem_root, thd->lex->analyze_stmt); + if (unlikely(!eu)) + return 0; if (with_element && with_element->is_recursive) eu->is_recursive_cte= true; @@ -5121,9 +5154,9 @@ bool LEX::add_unit_in_brackets(SELECT_LEX *nselect) /* add SELECT list*/ Item *item= new (thd->mem_root) Item_field(thd, context, NULL, NULL, &star_clex_str); - if (item == NULL) + if (unlikely(item == NULL)) DBUG_RETURN(TRUE); - if (add_item_to_list(thd, item)) + if (unlikely(add_item_to_list(thd, item))) DBUG_RETURN(TRUE); (dummy_select->with_wild)++; @@ -5136,20 +5169,21 @@ bool LEX::add_unit_in_brackets(SELECT_LEX *nselect) SELECT_LEX_UNIT *unit= nselect->master_unit(); Table_ident *ti= new (thd->mem_root) Table_ident(unit); - if (ti == NULL) + if (unlikely(ti == NULL)) DBUG_RETURN(TRUE); char buff[10]; LEX_CSTRING alias; alias.length= my_snprintf(buff, sizeof(buff), "__%u", dummy_select->select_number); alias.str= thd->strmake(buff, alias.length); - if (!alias.str) + if (unlikely(!alias.str)) DBUG_RETURN(TRUE); TABLE_LIST *table_list; - if (!(table_list= dummy_select->add_table_to_list(thd, ti, &alias, - 0, TL_READ, - MDL_SHARED_READ))) + if (unlikely(!(table_list= + dummy_select->add_table_to_list(thd, ti, &alias, + 0, TL_READ, + MDL_SHARED_READ)))) DBUG_RETURN(TRUE); context->resolve_in_table_list_only(table_list); dummy_select->add_joined_table(table_list); @@ -5248,12 +5282,26 @@ LEX::find_variable(const LEX_CSTRING *name, } +static bool is_new(const char *str) +{ + return (str[0] == 'n' || str[0] == 'N') && + (str[1] == 'e' || str[1] == 'E') && + (str[2] == 'w' || str[2] == 'W'); +} + +static bool is_old(const char *str) +{ + return (str[0] == 'o' || str[0] == 'O') && + (str[1] == 'l' || str[1] == 'L') && + (str[2] == 'd' || str[2] == 'D'); +} + + bool LEX::is_trigger_new_or_old_reference(const LEX_CSTRING *name) const { + // "name" is not necessarily NULL-terminated! return sphead && sphead->m_handler->type() == TYPE_ENUM_TRIGGER && - name->length == 3 && - (!my_strcasecmp(system_charset_info, name->str, "NEW") || - !my_strcasecmp(system_charset_info, name->str, "OLD")); + name->length == 3 && (is_new(name->str) || is_old(name->str)); } @@ -5272,7 +5320,7 @@ bool LEX::sp_variable_declarations_set_default(THD *thd, int nvars, Item *dflt_value_item) { if (!dflt_value_item && - !(dflt_value_item= new (thd->mem_root) Item_null(thd))) + unlikely(!(dflt_value_item= new (thd->mem_root) Item_null(thd)))) return true; for (uint i= 0 ; i < (uint) nvars ; i++) @@ -5286,7 +5334,7 @@ bool LEX::sp_variable_declarations_set_default(THD *thd, int nvars, spcont, &sp_rcontext_handler_local, spvar->offset, dflt_value_item, this, last); - if (is == NULL || sphead->add_instr(is)) + if (unlikely(is == NULL || sphead->add_instr(is))) return true; } return false; @@ -5310,7 +5358,8 @@ LEX::sp_variable_declarations_copy_type_finalize(THD *thd, int nvars, } spvar->field_def.field_name= spvar->name; } - if (sp_variable_declarations_set_default(thd, nvars, default_value)) + if (unlikely(sp_variable_declarations_set_default(thd, nvars, + default_value))) return true; spcont->declare_var_boundary(0); return sphead->restore_lex(thd); @@ -5411,7 +5460,8 @@ LEX::sp_variable_declarations_table_rowtype_finalize(THD *thd, int nvars, Item *def) { Table_ident *table_ref; - if (!(table_ref= new (thd->mem_root) Table_ident(thd, &db, &table, false))) + if (unlikely(!(table_ref= + new (thd->mem_root) Table_ident(thd, &db, &table, false)))) return true; // Loop through all variables in the same declaration for (uint i= 0 ; i < (uint) nvars; i++) @@ -5451,7 +5501,7 @@ LEX::sp_variable_declarations_cursor_rowtype_finalize(THD *thd, int nvars, sphead->fill_spvar_definition(thd, &spvar->field_def, &spvar->name); } - if (sp_variable_declarations_set_default(thd, nvars, def)) + if (unlikely(sp_variable_declarations_set_default(thd, nvars, def))) return true; // Make sure sp_rcontext is created using the invoker security context: sphead->m_flags|= sp_head::HAS_COLUMN_TYPE_REFS; @@ -5574,7 +5624,7 @@ sp_variable *LEX::sp_add_for_loop_variable(THD *thd, const LEX_CSTRING *name, spvar->field_def.set_handler(&type_handler_longlong); type_handler_longlong.Column_definition_prepare_stage2(&spvar->field_def, NULL, HA_CAN_GEOMETRY); - if (!value && !(value= new (thd->mem_root) Item_null(thd))) + if (!value && unlikely(!(value= new (thd->mem_root) Item_null(thd)))) return NULL; spvar->default_value= value; @@ -5583,7 +5633,7 @@ sp_variable *LEX::sp_add_for_loop_variable(THD *thd, const LEX_CSTRING *name, spcont, &sp_rcontext_handler_local, spvar->offset, value, this, true); - if (is == NULL || sphead->add_instr(is)) + if (unlikely(is == NULL || sphead->add_instr(is))) return NULL; spcont->declare_var_boundary(0); return spvar; @@ -5600,14 +5650,16 @@ bool LEX::sp_for_loop_implicit_cursor_statement(THD *thd, if (sp_declare_cursor(thd, &name, cur, NULL, true)) return true; DBUG_ASSERT(thd->lex == this); - if (!(bounds->m_index= new (thd->mem_root) sp_assignment_lex(thd, this))) + if (unlikely(!(bounds->m_index= + new (thd->mem_root) sp_assignment_lex(thd, this)))) return true; bounds->m_index->sp_lex_in_use= true; sphead->reset_lex(thd, bounds->m_index); DBUG_ASSERT(thd->lex != this); - if (!(item= new (thd->mem_root) Item_field(thd, - thd->lex->current_context(), - NullS, NullS, &name))) + if (unlikely(!(item= + new (thd->mem_root) Item_field(thd, + thd->lex->current_context(), + NullS, NullS, &name)))) return true; bounds->m_index->set_item_and_free_list(item, NULL); if (thd->lex->sphead->restore_lex(thd)) @@ -5632,13 +5684,14 @@ LEX::sp_add_for_loop_cursor_variable(THD *thd, return NULL; spcont->declare_var_boundary(1); sphead->fill_spvar_definition(thd, &spvar->field_def, &spvar->name); - if (!(spvar->default_value= new (thd->mem_root) Item_null(thd))) + if (unlikely(!(spvar->default_value= new (thd->mem_root) Item_null(thd)))) return NULL; spvar->field_def.set_cursor_rowtype_ref(coffset); - if (sphead->add_for_loop_open_cursor(thd, spcont, spvar, pcursor, coffset, - param_lex, parameters)) + if (unlikely(sphead->add_for_loop_open_cursor(thd, spcont, spvar, pcursor, + coffset, + param_lex, parameters))) return NULL; spcont->declare_var_boundary(0); @@ -5661,7 +5714,7 @@ bool LEX::sp_for_loop_condition(THD *thd, const Lex_for_loop_st &loop) args[i]= new (thd->mem_root) Item_splocal(thd, &sp_rcontext_handler_local, &src->name, src->offset, src->type_handler()); - if (args[i] == NULL) + if (unlikely(args[i] == NULL)) return true; #ifdef DBUG_ASSERT_EXISTS args[i]->m_sp= sphead; @@ -5671,7 +5724,7 @@ bool LEX::sp_for_loop_condition(THD *thd, const Lex_for_loop_st &loop) Item *expr= loop.m_direction > 0 ? (Item *) new (thd->mem_root) Item_func_le(thd, args[0], args[1]) : (Item *) new (thd->mem_root) Item_func_ge(thd, args[0], args[1]); - return !expr || sp_while_loop_expression(thd, expr); + return unlikely(!expr) || unlikely(sp_while_loop_expression(thd, expr)); } @@ -5683,7 +5736,7 @@ bool LEX::sp_for_loop_intrange_condition_test(THD *thd, { spcont->set_for_loop(loop); sphead->reset_lex(thd); - if (thd->lex->sp_for_loop_condition(thd, loop)) + if (unlikely(thd->lex->sp_for_loop_condition(thd, loop))) return true; return thd->lex->sphead->restore_lex(thd); } @@ -5698,8 +5751,10 @@ bool LEX::sp_for_loop_cursor_condition_test(THD *thd, sphead->reset_lex(thd); cursor_name= spcont->find_cursor(loop.m_cursor_offset); DBUG_ASSERT(cursor_name); - if (!(expr= new (thd->mem_root) Item_func_cursor_found(thd, cursor_name, - loop.m_cursor_offset))) + if (unlikely(!(expr= + new (thd->mem_root) + Item_func_cursor_found(thd, cursor_name, + loop.m_cursor_offset)))) return true; if (thd->lex->sp_while_loop_expression(thd, expr)) return true; @@ -5711,13 +5766,16 @@ bool LEX::sp_for_loop_intrange_declarations(THD *thd, Lex_for_loop_st *loop, const LEX_CSTRING *index, const Lex_for_loop_bounds_st &bounds) { - if (!(loop->m_index= - bounds.m_index->sp_add_for_loop_variable(thd, index, - bounds.m_index->get_item()))) + if (unlikely(!(loop->m_index= + bounds.m_index-> + sp_add_for_loop_variable(thd, index, + bounds.m_index->get_item())))) return true; - if (!(loop->m_upper_bound= - bounds.m_upper_bound->sp_add_for_loop_upper_bound(thd, - bounds.m_upper_bound->get_item()))) + if (unlikely(!(loop->m_upper_bound= + bounds.m_upper_bound-> + sp_add_for_loop_upper_bound(thd, + bounds. + m_upper_bound->get_item())))) return true; loop->m_direction= bounds.m_direction; loop->m_implicit_cursor= 0; @@ -5770,8 +5828,9 @@ bool LEX::sp_for_loop_cursor_declarations(THD *thd, thd->parse_error(); return true; } - if (!(pcursor= spcont->find_cursor_with_error(&name, &coffs, false)) || - pcursor->check_param_count_with_error(param_count)) + if (unlikely(!(pcursor= spcont->find_cursor_with_error(&name, &coffs, + false)) || + pcursor->check_param_count_with_error(param_count))) return true; if (!(loop->m_index= sp_add_for_loop_cursor_variable(thd, index, @@ -5796,18 +5855,19 @@ bool LEX::sp_for_loop_increment(THD *thd, const Lex_for_loop_st &loop) Item_splocal(thd, &sp_rcontext_handler_local, &loop.m_index->name, loop.m_index->offset, loop.m_index->type_handler()); - if (splocal == NULL) + if (unlikely(splocal == NULL)) return true; #ifdef DBUG_ASSERT_EXISTS splocal->m_sp= sphead; #endif Item_int *inc= new (thd->mem_root) Item_int(thd, loop.m_direction); - if (!inc) + if (unlikely(!inc)) return true; Item *expr= new (thd->mem_root) Item_func_plus(thd, splocal, inc); - if (!expr || - sphead->set_local_variable(thd, spcont, &sp_rcontext_handler_local, - loop.m_index, expr, this, true)) + if (unlikely(!expr) || + unlikely(sphead->set_local_variable(thd, spcont, + &sp_rcontext_handler_local, + loop.m_index, expr, this, true))) return true; return false; } @@ -5819,8 +5879,8 @@ bool LEX::sp_for_loop_intrange_finalize(THD *thd, const Lex_for_loop_st &loop) // Generate FOR LOOP index increment in its own lex DBUG_ASSERT(this != thd->lex); - if (thd->lex->sp_for_loop_increment(thd, loop) || - thd->lex->sphead->restore_lex(thd)) + if (unlikely(thd->lex->sp_for_loop_increment(thd, loop) || + thd->lex->sphead->restore_lex(thd))) return true; // Generate a jump to the beginning of the loop @@ -5834,7 +5894,7 @@ bool LEX::sp_for_loop_cursor_finalize(THD *thd, const Lex_for_loop_st &loop) sp_instr_cfetch *instr= new (thd->mem_root) sp_instr_cfetch(sphead->instructions(), spcont, loop.m_cursor_offset, false); - if (instr == NULL || sphead->add_instr(instr)) + if (unlikely(instr == NULL) || unlikely(sphead->add_instr(instr))) return true; instr->add_to_varlist(loop.m_index); // Generate a jump to the beginning of the loop @@ -5857,7 +5917,7 @@ bool LEX::sp_declare_cursor(THD *thd, const LEX_CSTRING *name, } cursor_stmt->set_cursor_name(name); - if (spcont->add_cursor(name, param_ctx, cursor_stmt)) + if (unlikely(spcont->add_cursor(name, param_ctx, cursor_stmt))) return true; if (add_cpush_instr) @@ -5865,7 +5925,7 @@ bool LEX::sp_declare_cursor(THD *thd, const LEX_CSTRING *name, i= new (thd->mem_root) sp_instr_cpush(sphead->instructions(), spcont, cursor_stmt, spcont->current_cursor_count() - 1); - return i == NULL || sphead->add_instr(i); + return unlikely(i == NULL) || unlikely(sphead->add_instr(i)); } return false; } @@ -5900,15 +5960,17 @@ bool LEX::sp_handler_declaration_init(THD *thd, int type) sp_instr_hpush_jump *i= new (thd->mem_root) sp_instr_hpush_jump(sphead->instructions(), spcont, h); - if (i == NULL || sphead->add_instr(i)) + if (unlikely(i == NULL) || unlikely(sphead->add_instr(i))) return true; /* For continue handlers, mark end of handler scope. */ if (type == sp_handler::CONTINUE && - sphead->push_backpatch(thd, i, spcont->last_label())) + unlikely(sphead->push_backpatch(thd, i, spcont->last_label()))) return true; - if (sphead->push_backpatch(thd, i, spcont->push_label(thd, &empty_clex_str, 0))) + if (unlikely(sphead->push_backpatch(thd, i, + spcont->push_label(thd, &empty_clex_str, + 0)))) return true; return false; @@ -5923,16 +5985,16 @@ bool LEX::sp_handler_declaration_finalize(THD *thd, int type) if (type == sp_handler::CONTINUE) { i= new (thd->mem_root) sp_instr_hreturn(sphead->instructions(), spcont); - if (i == NULL || - sphead->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(sphead->add_instr(i))) return true; } else { /* EXIT or UNDO handler, just jump to the end of the block */ i= new (thd->mem_root) sp_instr_hreturn(sphead->instructions(), spcont); - if (i == NULL || - sphead->add_instr(i) || - sphead->push_backpatch(thd, i, spcont->last_label())) /* Block end */ + if (unlikely(i == NULL) || + unlikely(sphead->add_instr(i)) || + unlikely(sphead->push_backpatch(thd, i, spcont->last_label()))) /* Block end */ return true; } sphead->backpatch(hlab); @@ -5960,16 +6022,16 @@ bool LEX::sp_block_finalize(THD *thd, const Lex_spblock_st spblock, { i= new (thd->mem_root) sp_instr_hpop(sp->instructions(), ctx, spblock.hndlrs); - if (i == NULL || - sp->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(sp->add_instr(i))) return true; } if (spblock.curs) { i= new (thd->mem_root) sp_instr_cpop(sp->instructions(), ctx, spblock.curs); - if (i == NULL || - sp->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(sp->add_instr(i))) return true; } spcont= ctx->pop_context(); @@ -5982,11 +6044,11 @@ bool LEX::sp_block_finalize(THD *thd, const Lex_spblock_st spblock, const LEX_CSTRING *end_label) { sp_label *splabel; - if (sp_block_finalize(thd, spblock, &splabel)) + if (unlikely(sp_block_finalize(thd, spblock, &splabel))) return true; - if (end_label->str && - lex_string_cmp(system_charset_info, - end_label, &splabel->name) != 0) + if (unlikely(end_label->str && + lex_string_cmp(system_charset_info, + end_label, &splabel->name) != 0)) { my_error(ER_SP_LABEL_MISMATCH, MYF(0), end_label->str); return true; @@ -5999,9 +6061,9 @@ sp_name *LEX::make_sp_name(THD *thd, const LEX_CSTRING *name) { sp_name *res; LEX_CSTRING db; - if (check_routine_name(name) || - copy_db_to(&db) || - (!(res= new (thd->mem_root) sp_name(&db, name, false)))) + if (unlikely(check_routine_name(name)) || + unlikely(copy_db_to(&db)) || + unlikely((!(res= new (thd->mem_root) sp_name(&db, name, false))))) return NULL; return res; } @@ -6023,7 +6085,7 @@ sp_name *LEX::make_sp_name(THD *thd, const LEX_CSTRING *name) sp_name *LEX::make_sp_name_package_routine(THD *thd, const LEX_CSTRING *name) { sp_name *res= make_sp_name(thd, name); - if (res && strchr(res->m_name.str, '.')) + if (likely(res) && unlikely(strchr(res->m_name.str, '.'))) { my_error(ER_SP_WRONG_NAME, MYF(0), res->m_name.str); res= NULL; @@ -6037,15 +6099,16 @@ sp_name *LEX::make_sp_name(THD *thd, const LEX_CSTRING *name1, { sp_name *res; LEX_CSTRING norm_name1; - if (!name1->str || - !thd->make_lex_string(&norm_name1, name1->str, name1->length) || - check_db_name((LEX_STRING *) &norm_name1)) + if (unlikely(!name1->str) || + unlikely(!thd->make_lex_string(&norm_name1, name1->str, + name1->length)) || + unlikely(check_db_name((LEX_STRING *) &norm_name1))) { my_error(ER_WRONG_DB_NAME, MYF(0), name1->str); return NULL; } - if (check_routine_name(name2) || - (!(res= new (thd->mem_root) sp_name(&norm_name1, name2, true)))) + if (unlikely(check_routine_name(name2)) || + unlikely(!(res= new (thd->mem_root) sp_name(&norm_name1, name2, true)))) return NULL; return res; } @@ -6058,7 +6121,7 @@ sp_head *LEX::make_sp_head(THD *thd, const sp_name *name, sp_head *sp; /* Order is important here: new - reset - init */ - if ((sp= new sp_head(package, sph))) + if (likely((sp= new sp_head(package, sph)))) { sp->reset_thd_mem_root(thd); sp->init(this); @@ -6210,13 +6273,13 @@ bool LEX::sp_change_context(THD *thd, const sp_pcontext *ctx, bool exclusive) if ((n= spcont->diff_handlers(ctx, exclusive))) { sp_instr_hpop *hpop= new (thd->mem_root) sp_instr_hpop(ip++, spcont, n); - if (hpop == NULL || sphead->add_instr(hpop)) + if (unlikely(hpop == NULL) || unlikely(sphead->add_instr(hpop))) return true; } if ((n= spcont->diff_cursors(ctx, exclusive))) { sp_instr_cpop *cpop= new (thd->mem_root) sp_instr_cpop(ip++, spcont, n); - if (cpop == NULL || sphead->add_instr(cpop)) + if (unlikely(cpop == NULL) || unlikely(sphead->add_instr(cpop))) return true; } return false; @@ -6226,7 +6289,7 @@ bool LEX::sp_change_context(THD *thd, const sp_pcontext *ctx, bool exclusive) bool LEX::sp_leave_statement(THD *thd, const LEX_CSTRING *label_name) { sp_label *lab= spcont->find_label(label_name); - if (!lab) + if (unlikely(!lab)) { my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "LEAVE", label_name->str); return true; @@ -6266,7 +6329,7 @@ bool LEX::sp_push_goto_label(THD *thd, const LEX_CSTRING *label_name) sp_label *lab= spcont->find_goto_label(label_name, false); if (lab) { - if (lab->ip != 0) + if (unlikely(lab->ip != 0)) { my_error(ER_SP_LABEL_REDEFINE, MYF(0), label_name->str); return true; @@ -6310,9 +6373,9 @@ bool LEX::sp_exit_block(THD *thd, sp_label *lab, Item *when) sp_instr_jump_if_not(sphead->instructions(), spcont, when, thd->lex); - if (i == NULL || - sphead->add_instr(i) || - sp_exit_block(thd, lab)) + if (unlikely(i == NULL) || + unlikely(sphead->add_instr(i)) || + unlikely(sp_exit_block(thd, lab))) return true; i->backpatch(sphead->instructions(), spcont); return false; @@ -6322,7 +6385,7 @@ bool LEX::sp_exit_block(THD *thd, sp_label *lab, Item *when) bool LEX::sp_exit_statement(THD *thd, Item *item) { sp_label *lab= spcont->find_label_current_loop_start(); - if (!lab) + if (unlikely(!lab)) { my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "EXIT", ""); return true; @@ -6335,7 +6398,7 @@ bool LEX::sp_exit_statement(THD *thd, Item *item) bool LEX::sp_exit_statement(THD *thd, const LEX_CSTRING *label_name, Item *item) { sp_label *lab= spcont->find_label(label_name); - if (!lab || lab->type != sp_label::ITERATION) + if (unlikely(!lab || lab->type != sp_label::ITERATION)) { my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "EXIT", label_name->str); return true; @@ -6347,7 +6410,7 @@ bool LEX::sp_exit_statement(THD *thd, const LEX_CSTRING *label_name, Item *item) bool LEX::sp_iterate_statement(THD *thd, const LEX_CSTRING *label_name) { sp_label *lab= spcont->find_label(label_name); - if (!lab || lab->type != sp_label::ITERATION) + if (unlikely(!lab || lab->type != sp_label::ITERATION)) { my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "ITERATE", label_name->str); return true; @@ -6383,9 +6446,9 @@ bool LEX::sp_continue_loop(THD *thd, sp_label *lab, Item *when) sp_instr_jump_if_not(sphead->instructions(), spcont, when, thd->lex); - if (i == NULL || - sphead->add_instr(i) || - sp_continue_loop(thd, lab)) + if (unlikely(i == NULL) || + unlikely(sphead->add_instr(i)) || + unlikely(sp_continue_loop(thd, lab))) return true; i->backpatch(sphead->instructions(), spcont); return false; @@ -6395,7 +6458,7 @@ bool LEX::sp_continue_loop(THD *thd, sp_label *lab, Item *when) bool LEX::sp_continue_statement(THD *thd, Item *when) { sp_label *lab= spcont->find_label_current_loop_start(); - if (!lab) + if (unlikely(!lab)) { my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "CONTINUE", ""); return true; @@ -6483,11 +6546,11 @@ bool LEX::sp_while_loop_expression(THD *thd, Item *expr) { sp_instr_jump_if_not *i= new (thd->mem_root) sp_instr_jump_if_not(sphead->instructions(), spcont, expr, this); - return i == NULL || - /* Jumping forward */ - sphead->push_backpatch(thd, i, spcont->last_label()) || - sphead->new_cont_backpatch(i) || - sphead->add_instr(i); + return (unlikely(i == NULL) || + /* Jumping forward */ + unlikely(sphead->push_backpatch(thd, i, spcont->last_label())) || + unlikely(sphead->new_cont_backpatch(i)) || + unlikely(sphead->add_instr(i))); } @@ -6496,8 +6559,8 @@ bool LEX::sp_while_loop_finalize(THD *thd) sp_label *lab= spcont->last_label(); /* Jumping back */ sp_instr_jump *i= new (thd->mem_root) sp_instr_jump(sphead->instructions(), spcont, lab->ip); - if (i == NULL || - sphead->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(sphead->add_instr(i))) return true; sphead->do_cont_backpatch(); return false; @@ -6510,13 +6573,13 @@ Item *LEX::create_and_link_Item_trigger_field(THD *thd, { Item_trigger_field *trg_fld; - if (trg_chistics.event == TRG_EVENT_INSERT && !new_row) + if (unlikely(trg_chistics.event == TRG_EVENT_INSERT && !new_row)) { my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), "OLD", "on INSERT"); return NULL; } - if (trg_chistics.event == TRG_EVENT_DELETE && new_row) + if (unlikely(trg_chistics.event == TRG_EVENT_DELETE && new_row)) { my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), "NEW", "on DELETE"); return NULL; @@ -6538,22 +6601,64 @@ Item *LEX::create_and_link_Item_trigger_field(THD *thd, Let us add this item to list of all Item_trigger_field objects in trigger. */ - if (trg_fld) - trg_table_fields.link_in_list(trg_fld, &trg_fld->next_trg_field); + if (likely(trg_fld)) + trg_table_fields.link_in_list(trg_fld, &trg_fld->next_trg_field); return trg_fld; } +Item *LEX::make_item_colon_ident_ident(THD *thd, + const Lex_ident_cli_st *ca, + const Lex_ident_cli_st *cb) +{ + Lex_ident_sys a(thd, ca), b(thd, cb); + if (a.is_null() || b.is_null()) + return NULL; // OEM + if (!is_trigger_new_or_old_reference(&a)) + { + thd->parse_error(); + return NULL; + } + bool new_row= (a.str[0] == 'N' || a.str[0] == 'n'); + return create_and_link_Item_trigger_field(thd, &b, new_row); +} + + +Item *LEX::make_item_sysvar(THD *thd, + enum_var_type type, + const LEX_CSTRING *name, + const LEX_CSTRING *component) + +{ + Item *item; + DBUG_ASSERT(name->str); + /* + "SELECT @@global.global.variable" is not allowed + Note, "global" can come through TEXT_STRING_sys. + */ + if (component->str && unlikely(check_reserved_words(name))) + { + thd->parse_error(); + return NULL; + } + if (unlikely(!(item= get_system_var(thd, type, name, component)))) + return NULL; + if (!((Item_func_get_system_var*) item)->is_written_to_binlog()) + set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_VARIABLE); + return item; +} + + Item_param *LEX::add_placeholder(THD *thd, const LEX_CSTRING *name, const char *start, const char *end) { - if (!thd->m_parser_state->m_lip.stmt_prepare_mode) + if (unlikely(!thd->m_parser_state->m_lip.stmt_prepare_mode)) { thd->parse_error(ER_SYNTAX_ERROR, start); return NULL; } - if (!parsing_options.allows_variable) + if (unlikely(!parsing_options.allows_variable)) { my_error(ER_VIEW_SELECT_VARIABLE, MYF(0)); return NULL; @@ -6562,7 +6667,7 @@ Item_param *LEX::add_placeholder(THD *thd, const LEX_CSTRING *name, Query_fragment pos(thd, sphead, start, end); Item_param *item= new (thd->mem_root) Item_param(thd, name, pos.pos(), pos.length()); - if (!item || param_list.push_back(item, thd->mem_root)) + if (unlikely(!item) || unlikely(param_list.push_back(item, thd->mem_root))) { my_error(ER_OUT_OF_RESOURCES, MYF(0)); return NULL; @@ -6590,8 +6695,8 @@ bool LEX::add_resignal_statement(THD *thd, const sp_condition_value *v) Item *LEX::create_item_ident_nospvar(THD *thd, - const LEX_CSTRING *a, - const LEX_CSTRING *b) + const Lex_ident_sys_st *a, + const Lex_ident_sys_st *b) { DBUG_ASSERT(this == thd->lex); /* @@ -6606,7 +6711,7 @@ Item *LEX::create_item_ident_nospvar(THD *thd, return create_and_link_Item_trigger_field(thd, b, new_row); } - if (current_select->no_table_names_allowed) + if (unlikely(current_select->no_table_names_allowed)) { my_error(ER_TABLENAME_NOT_ALLOWED_HERE, MYF(0), a->str, thd->where); return NULL; @@ -6622,13 +6727,13 @@ Item *LEX::create_item_ident_nospvar(THD *thd, Item_splocal *LEX::create_item_spvar_row_field(THD *thd, const Sp_rcontext_handler *rh, - const LEX_CSTRING *a, - const LEX_CSTRING *b, + const Lex_ident_sys *a, + const Lex_ident_sys *b, sp_variable *spv, const char *start, const char *end) { - if (!parsing_options.allows_variable) + if (unlikely(!parsing_options.allows_variable)) { my_error(ER_VIEW_SELECT_VARIABLE, MYF(0)); return NULL; @@ -6639,24 +6744,24 @@ Item_splocal *LEX::create_item_spvar_row_field(THD *thd, if (spv->field_def.is_table_rowtype_ref() || spv->field_def.is_cursor_rowtype_ref()) { - if (!(item= new (thd->mem_root) - Item_splocal_row_field_by_name(thd, rh, a, b, spv->offset, - &type_handler_null, - pos.pos(), pos.length()))) + if (unlikely(!(item= new (thd->mem_root) + Item_splocal_row_field_by_name(thd, rh, a, b, spv->offset, + &type_handler_null, + pos.pos(), pos.length())))) return NULL; } else { uint row_field_offset; const Spvar_definition *def; - if (!(def= spv->find_row_field(a, b, &row_field_offset))) + if (unlikely(!(def= spv->find_row_field(a, b, &row_field_offset)))) return NULL; - if (!(item= new (thd->mem_root) - Item_splocal_row_field(thd, rh, a, b, - spv->offset, row_field_offset, - def->type_handler(), - pos.pos(), pos.length()))) + if (unlikely(!(item= new (thd->mem_root) + Item_splocal_row_field(thd, rh, a, b, + spv->offset, row_field_offset, + def->type_handler(), + pos.pos(), pos.length())))) return NULL; } #ifdef DBUG_ASSERT_EXISTS @@ -6671,7 +6776,7 @@ my_var *LEX::create_outvar(THD *thd, const LEX_CSTRING *name) { const Sp_rcontext_handler *rh; sp_variable *spv; - if ((spv= find_variable(name, &rh))) + if (likely((spv= find_variable(name, &rh)))) return result ? new (thd->mem_root) my_var_sp(rh, name, spv->offset, spv->type_handler(), sphead) : @@ -6687,7 +6792,7 @@ my_var *LEX::create_outvar(THD *thd, { const Sp_rcontext_handler *rh; sp_variable *t; - if (!(t= find_variable(a, &rh))) + if (unlikely(!(t= find_variable(a, &rh)))) { my_error(ER_SP_UNDECLARED_VAR, MYF(0), a->str); return NULL; @@ -6705,11 +6810,12 @@ my_var *LEX::create_outvar(THD *thd, Item *LEX::create_item_func_nextval(THD *thd, Table_ident *table_ident) { TABLE_LIST *table; - if (!(table= current_select->add_table_to_list(thd, table_ident, 0, - TL_OPTION_SEQUENCE, - TL_WRITE_ALLOW_WRITE, - MDL_SHARED_WRITE))) + if (unlikely(!(table= current_select->add_table_to_list(thd, table_ident, 0, + TL_OPTION_SEQUENCE, + TL_WRITE_ALLOW_WRITE, + MDL_SHARED_WRITE)))) return NULL; + thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); return new (thd->mem_root) Item_func_nextval(thd, table); } @@ -6717,11 +6823,12 @@ Item *LEX::create_item_func_nextval(THD *thd, Table_ident *table_ident) Item *LEX::create_item_func_lastval(THD *thd, Table_ident *table_ident) { TABLE_LIST *table; - if (!(table= current_select->add_table_to_list(thd, table_ident, 0, - TL_OPTION_SEQUENCE, - TL_READ, - MDL_SHARED_READ))) + if (unlikely(!(table= current_select->add_table_to_list(thd, table_ident, 0, + TL_OPTION_SEQUENCE, + TL_READ, + MDL_SHARED_READ)))) return NULL; + thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); return new (thd->mem_root) Item_func_lastval(thd, table); } @@ -6731,7 +6838,8 @@ Item *LEX::create_item_func_nextval(THD *thd, const LEX_CSTRING *name) { Table_ident *table_ident; - if (!(table_ident= new (thd->mem_root) Table_ident(thd, db, name, false))) + if (unlikely(!(table_ident= + new (thd->mem_root) Table_ident(thd, db, name, false)))) return NULL; return create_item_func_nextval(thd, table_ident); } @@ -6742,7 +6850,8 @@ Item *LEX::create_item_func_lastval(THD *thd, const LEX_CSTRING *name) { Table_ident *table_ident; - if (!(table_ident= new (thd->mem_root) Table_ident(thd, db, name, false))) + if (unlikely(!(table_ident= + new (thd->mem_root) Table_ident(thd, db, name, false)))) return NULL; return create_item_func_lastval(thd, table_ident); } @@ -6753,10 +6862,10 @@ Item *LEX::create_item_func_setval(THD *thd, Table_ident *table_ident, bool is_used) { TABLE_LIST *table; - if (!(table= current_select->add_table_to_list(thd, table_ident, 0, - TL_OPTION_SEQUENCE, - TL_WRITE_ALLOW_WRITE, - MDL_SHARED_WRITE))) + if (unlikely(!(table= current_select->add_table_to_list(thd, table_ident, 0, + TL_OPTION_SEQUENCE, + TL_WRITE_ALLOW_WRITE, + MDL_SHARED_WRITE)))) return NULL; return new (thd->mem_root) Item_func_setval(thd, table, nextval, round, is_used); @@ -6764,38 +6873,45 @@ Item *LEX::create_item_func_setval(THD *thd, Table_ident *table_ident, Item *LEX::create_item_ident(THD *thd, - const LEX_CSTRING *a, - const LEX_CSTRING *b, - const char *start, const char *end) + const Lex_ident_cli_st *ca, + const Lex_ident_cli_st *cb) { + const char *start= ca->pos(); + const char *end= cb->end(); const Sp_rcontext_handler *rh; sp_variable *spv; - if ((spv= find_variable(a, &rh)) && + DBUG_ASSERT(thd->m_parser_state->m_lip.get_buf() <= start); + DBUG_ASSERT(start <= end); + DBUG_ASSERT(end <= thd->m_parser_state->m_lip.get_end_of_query()); + Lex_ident_sys a(thd, ca), b(thd, cb); + if (a.is_null() || b.is_null()) + return NULL; // OEM + if ((spv= find_variable(&a, &rh)) && (spv->field_def.is_row() || spv->field_def.is_table_rowtype_ref() || spv->field_def.is_cursor_rowtype_ref())) - return create_item_spvar_row_field(thd, rh, a, b, spv, start, end); + return create_item_spvar_row_field(thd, rh, &a, &b, spv, start, end); - if ((thd->variables.sql_mode & MODE_ORACLE) && b->length == 7) + if ((thd->variables.sql_mode & MODE_ORACLE) && b.length == 7) { if (!my_strnncoll(system_charset_info, - (const uchar *) b->str, 7, + (const uchar *) b.str, 7, (const uchar *) "NEXTVAL", 7)) - return create_item_func_nextval(thd, &null_clex_str, a); + return create_item_func_nextval(thd, &null_clex_str, &a); else if (!my_strnncoll(system_charset_info, - (const uchar *) b->str, 7, + (const uchar *) b.str, 7, (const uchar *) "CURRVAL", 7)) - return create_item_func_lastval(thd, &null_clex_str, a); + return create_item_func_lastval(thd, &null_clex_str, &a); } - return create_item_ident_nospvar(thd, a, b); + return create_item_ident_nospvar(thd, &a, &b); } Item *LEX::create_item_ident(THD *thd, - const LEX_CSTRING *a, - const LEX_CSTRING *b, - const LEX_CSTRING *c) + const Lex_ident_sys_st *a, + const Lex_ident_sys_st *b, + const Lex_ident_sys_st *c) { const char *schema= (thd->client_capabilities & CLIENT_NO_SCHEMA ? NullS : a->str); @@ -6826,23 +6942,29 @@ Item *LEX::create_item_ident(THD *thd, } -Item *LEX::create_item_limit(THD *thd, - const LEX_CSTRING *a, - const char *start, const char *end) +Item *LEX::create_item_limit(THD *thd, const Lex_ident_cli_st *ca) { + DBUG_ASSERT(thd->m_parser_state->m_lip.get_buf() <= ca->pos()); + DBUG_ASSERT(ca->pos() <= ca->end()); + DBUG_ASSERT(ca->end() <= thd->m_parser_state->m_lip.get_end_of_query()); + const Sp_rcontext_handler *rh; sp_variable *spv; - if (!(spv= find_variable(a, &rh))) + Lex_ident_sys sa(thd, ca); + if (sa.is_null()) + return NULL; // EOM + if (!(spv= find_variable(&sa, &rh))) { - my_error(ER_SP_UNDECLARED_VAR, MYF(0), a->str); + my_error(ER_SP_UNDECLARED_VAR, MYF(0), sa.str); return NULL; } - Query_fragment pos(thd, sphead, start, end); + Query_fragment pos(thd, sphead, ca->pos(), ca->end()); Item_splocal *item; - if (!(item= new (thd->mem_root) Item_splocal(thd, rh, a, - spv->offset, spv->type_handler(), - pos.pos(), pos.length()))) + if (unlikely(!(item= new (thd->mem_root) + Item_splocal(thd, rh, &sa, + spv->offset, spv->type_handler(), + pos.pos(), pos.length())))) return NULL; #ifdef DBUG_ASSERT_EXISTS item->m_sp= sphead; @@ -6858,21 +6980,28 @@ Item *LEX::create_item_limit(THD *thd, Item *LEX::create_item_limit(THD *thd, - const LEX_CSTRING *a, - const LEX_CSTRING *b, - const char *start, const char *end) + const Lex_ident_cli_st *ca, + const Lex_ident_cli_st *cb) { + DBUG_ASSERT(thd->m_parser_state->m_lip.get_buf() <= ca->pos()); + DBUG_ASSERT(ca->pos() <= cb->end()); + DBUG_ASSERT(cb->end() <= thd->m_parser_state->m_lip.get_end_of_query()); + const Sp_rcontext_handler *rh; sp_variable *spv; - if (!(spv= find_variable(a, &rh))) + Lex_ident_sys sa(thd, ca), sb(thd, cb); + if (unlikely(sa.is_null() || sb.is_null())) + return NULL; // EOM + if (!(spv= find_variable(&sa, &rh))) { - my_error(ER_SP_UNDECLARED_VAR, MYF(0), a->str); + my_error(ER_SP_UNDECLARED_VAR, MYF(0), sa.str); return NULL; } // Qualified %TYPE variables are not possible DBUG_ASSERT(!spv->field_def.column_type_ref()); Item_splocal *item; - if (!(item= create_item_spvar_row_field(thd, rh, a, b, spv, start, end))) + if (unlikely(!(item= create_item_spvar_row_field(thd, rh, &sa, &sb, spv, + ca->pos(), cb->end())))) return NULL; if (!item->is_valid_limit_clause_variable_with_error()) return NULL; @@ -6885,15 +7014,17 @@ bool LEX::set_user_variable(THD *thd, const LEX_CSTRING *name, Item *val) { Item_func_set_user_var *item; set_var_user *var; - if (!(item= new (thd->mem_root) Item_func_set_user_var(thd, name, val)) || - !(var= new (thd->mem_root) set_var_user(item))) + if (unlikely(!(item= new (thd->mem_root) Item_func_set_user_var(thd, name, + val))) || + unlikely(!(var= new (thd->mem_root) set_var_user(item)))) + return true; + if (unlikely(var_list.push_back(var, thd->mem_root))) return true; - var_list.push_back(var, thd->mem_root); return false; } -Item *LEX::create_item_ident_nosp(THD *thd, LEX_CSTRING *name) +Item *LEX::create_item_ident_nosp(THD *thd, Lex_ident_sys_st *name) { if (current_select->parsing_place != IN_HAVING || current_select->get_in_sum_expr() > 0) @@ -6905,10 +7036,14 @@ Item *LEX::create_item_ident_nosp(THD *thd, LEX_CSTRING *name) } -Item *LEX::create_item_ident_sp(THD *thd, LEX_CSTRING *name, +Item *LEX::create_item_ident_sp(THD *thd, Lex_ident_sys_st *name, const char *start, const char *end) { + DBUG_ASSERT(thd->m_parser_state->m_lip.get_buf() <= start); + DBUG_ASSERT(start <= end); + DBUG_ASSERT(end <= thd->m_parser_state->m_lip.get_end_of_query()); + const Sp_rcontext_handler *rh; sp_variable *spv; DBUG_ASSERT(spcont); @@ -6931,7 +7066,7 @@ Item *LEX::create_item_ident_sp(THD *thd, LEX_CSTRING *name, new (thd->mem_root) Item_splocal(thd, rh, name, spv->offset, spv->type_handler(), pos.pos(), pos.length()); - if (splocal == NULL) + if (unlikely(splocal == NULL)) return NULL; #ifdef DBUG_ASSERT_EXISTS splocal->m_sp= sphead; @@ -6942,9 +7077,9 @@ Item *LEX::create_item_ident_sp(THD *thd, LEX_CSTRING *name, if (thd->variables.sql_mode & MODE_ORACLE) { - if (!my_strcasecmp(system_charset_info, name->str, "SQLCODE")) + if (lex_string_eq(name, STRING_WITH_LEN("SQLCODE"))) return new (thd->mem_root) Item_func_sqlcode(thd); - if (!my_strcasecmp(system_charset_info, name->str, "SQLERRM")) + if (lex_string_eq(name, STRING_WITH_LEN("SQLERRM"))) return new (thd->mem_root) Item_func_sqlerrm(thd); } return create_item_ident_nosp(thd, name); @@ -7004,7 +7139,7 @@ bool LEX::set_default_system_variable(enum_var_type var_type, sys_var *var= find_sys_var(thd, name->str, name->length); if (!var) return true; - if (!var->is_struct()) + if (unlikely(!var->is_struct())) { my_error(ER_VARIABLE_IS_NOT_STRUCT, MYF(0), name->str); return true; @@ -7019,7 +7154,7 @@ bool LEX::set_system_variable(enum_var_type var_type, { sys_var *var= find_sys_var(thd, name->str, name->length); DBUG_ASSERT(thd->is_error() || var != NULL); - return var ? set_system_variable(var_type, var, &null_clex_str, val) : true; + return likely(var) ? set_system_variable(var_type, var, &null_clex_str, val) : true; } @@ -7029,14 +7164,15 @@ bool LEX::set_system_variable(THD *thd, enum_var_type var_type, Item *val) { sys_var *tmp; - if (check_reserved_words(name1) || - !(tmp= find_sys_var_ex(thd, name2->str, name2->length, true, false))) + if (unlikely(check_reserved_words(name1)) || + unlikely(!(tmp= find_sys_var_ex(thd, name2->str, name2->length, true, + false)))) { my_error(ER_UNKNOWN_STRUCTURED_VARIABLE, MYF(0), (int) name1->length, name1->str); return true; } - if (!tmp->is_struct()) + if (unlikely(!tmp->is_struct())) { my_error(ER_VARIABLE_IS_NOT_STRUCT, MYF(0), name2->str); return true; @@ -7049,17 +7185,17 @@ bool LEX::set_trigger_field(const LEX_CSTRING *name1, const LEX_CSTRING *name2, Item *val) { DBUG_ASSERT(is_trigger_new_or_old_reference(name1)); - if (name1->str[0]=='O' || name1->str[0]=='o') + if (unlikely(name1->str[0]=='O' || name1->str[0]=='o')) { my_error(ER_TRG_CANT_CHANGE_ROW, MYF(0), "OLD", ""); return true; } - if (trg_chistics.event == TRG_EVENT_DELETE) + if (unlikely(trg_chistics.event == TRG_EVENT_DELETE)) { my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), "NEW", "on DELETE"); return true; } - if (trg_chistics.action_time == TRG_ACTION_AFTER) + if (unlikely(trg_chistics.action_time == TRG_ACTION_AFTER)) { my_error(ER_TRG_CANT_CHANGE_ROW, MYF(0), "NEW", "after "); return true; @@ -7366,8 +7502,8 @@ Item *st_select_lex::build_cond_for_grouping_fields(THD *thd, Item *cond, } else new_cond= new (thd->mem_root) Item_cond_or(thd); - if (!new_cond) - return 0; + if (unlikely(!new_cond)) + return 0; List_iterator li(*((Item_cond*) cond)->argument_list()); Item *item; while ((item=li++)) @@ -7380,7 +7516,7 @@ Item *st_select_lex::build_cond_for_grouping_fields(THD *thd, Item *cond, } Item *fix= build_cond_for_grouping_fields(thd, item, no_top_clones & cond_and); - if (!fix) + if (unlikely(!fix)) { if (cond_and) continue; @@ -7398,7 +7534,7 @@ Item *st_select_lex::build_cond_for_grouping_fields(THD *thd, Item *cond, switch (new_cond->argument_list()->elements) { case 0: - return 0; + return 0; case 1: return new_cond->argument_list()->head(); default: @@ -7413,12 +7549,12 @@ int set_statement_var_if_exists(THD *thd, const char *var_name, size_t var_name_length, ulonglong value) { sys_var *sysvar; - if (thd->lex->sql_command == SQLCOM_CREATE_VIEW) + if (unlikely(thd->lex->sql_command == SQLCOM_CREATE_VIEW)) { my_error(ER_VIEW_SELECT_CLAUSE, MYF(0), "[NO]WAIT"); return 1; } - if (thd->lex->sphead) + if (unlikely(thd->lex->sphead)) { my_error(ER_SP_BADSTATEMENT, MYF(0), "[NO]WAIT"); return 1; @@ -7429,7 +7565,8 @@ int set_statement_var_if_exists(THD *thd, const char *var_name, set_var *var= new (thd->mem_root) set_var(thd, OPT_SESSION, sysvar, &null_clex_str, item); - if (!item || !var || thd->lex->stmt_var_list.push_back(var, thd->mem_root)) + if (unlikely(!item) || unlikely(!var) || + unlikely(thd->lex->stmt_var_list.push_back(var, thd->mem_root))) { my_error(ER_OUT_OF_RESOURCES, MYF(0)); return 1; @@ -7452,7 +7589,7 @@ bool LEX::sp_add_cfetch(THD *thd, const LEX_CSTRING *name) i= new (thd->mem_root) sp_instr_cfetch(sphead->instructions(), spcont, offset, !(thd->variables.sql_mode & MODE_ORACLE)); - if (i == NULL || sphead->add_instr(i)) + if (unlikely(i == NULL) || unlikely(sphead->add_instr(i))) return true; return false; } @@ -7462,10 +7599,10 @@ bool LEX::create_or_alter_view_finalize(THD *thd, Table_ident *table_ident) { sql_command= SQLCOM_CREATE_VIEW; /* first table in list is target VIEW name */ - if (!select_lex.add_table_to_list(thd, table_ident, NULL, - TL_OPTION_UPDATING, - TL_IGNORE, - MDL_EXCLUSIVE)) + if (unlikely(!select_lex.add_table_to_list(thd, table_ident, NULL, + TL_OPTION_UPDATING, + TL_IGNORE, + MDL_EXCLUSIVE))) return true; query_tables->open_strategy= TABLE_LIST::OPEN_STUB; return false; @@ -7476,13 +7613,13 @@ bool LEX::add_alter_view(THD *thd, uint16 algorithm, enum_view_suid suid, Table_ident *table_ident) { - if (sphead) + if (unlikely(sphead)) { my_error(ER_SP_BADSTATEMENT, MYF(0), "ALTER VIEW"); return true; } - if (!(create_view= new (thd->mem_root) - Create_view_info(VIEW_ALTER, algorithm, suid))) + if (unlikely(!(create_view= new (thd->mem_root) + Create_view_info(VIEW_ALTER, algorithm, suid)))) return true; return create_or_alter_view_finalize(thd, table_ident); } @@ -7492,13 +7629,13 @@ bool LEX::add_create_view(THD *thd, DDL_options_st ddl, uint16 algorithm, enum_view_suid suid, Table_ident *table_ident) { - if (set_create_options_with_check(ddl)) + if (unlikely(set_create_options_with_check(ddl))) return true; - if (!(create_view= new (thd->mem_root) - Create_view_info(ddl.or_replace() ? - VIEW_CREATE_OR_REPLACE : - VIEW_CREATE_NEW, - algorithm, suid))) + if (unlikely(!(create_view= new (thd->mem_root) + Create_view_info(ddl.or_replace() ? + VIEW_CREATE_OR_REPLACE : + VIEW_CREATE_NEW, + algorithm, suid)))) return true; return create_or_alter_view_finalize(thd, table_ident); } @@ -7510,10 +7647,10 @@ bool LEX::call_statement_start(THD *thd, sp_name *name) const Sp_handler *sph= &sp_handler_procedure; sql_command= SQLCOM_CALL; value_list.empty(); - if (sph->sp_resolve_package_routine(thd, thd->lex->sphead, - name, &sph, &pkgname)) + if (unlikely(sph->sp_resolve_package_routine(thd, thd->lex->sphead, + name, &sph, &pkgname))) return true; - if (!(m_sql_cmd= new (thd->mem_root) Sql_cmd_call(name, sph))) + if (unlikely(!(m_sql_cmd= new (thd->mem_root) Sql_cmd_call(name, sph)))) return true; sph->add_used_routine(this, thd, name); if (pkgname.m_name.length) @@ -7525,7 +7662,7 @@ bool LEX::call_statement_start(THD *thd, sp_name *name) bool LEX::call_statement_start(THD *thd, const LEX_CSTRING *name) { sp_name *spname= make_sp_name(thd, name); - return !spname || call_statement_start(thd, spname); + return unlikely(!spname) || call_statement_start(thd, spname); } @@ -7533,7 +7670,7 @@ bool LEX::call_statement_start(THD *thd, const LEX_CSTRING *name1, const LEX_CSTRING *name2) { sp_name *spname= make_sp_name(thd, name1, name2); - return !spname || call_statement_start(thd, spname); + return unlikely(!spname) || call_statement_start(thd, spname); } @@ -7550,12 +7687,13 @@ sp_package *LEX::create_package_start(THD *thd, DDL_options_st options) { sp_package *pkg; - if (sphead) + + if (unlikely(sphead)) { my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), sph->type_str()); return NULL; } - if (set_command_with_check(command, options)) + if (unlikely(set_command_with_check(command, options))) return NULL; if (sph->type() == TYPE_ENUM_PACKAGE_BODY) { @@ -7577,7 +7715,7 @@ sp_package *LEX::create_package_start(THD *thd, sp_head *spec; int ret= sp_handler_package_spec. sp_cache_routine_reentrant(thd, name_arg, &spec); - if (!spec) + if (unlikely(!spec)) { if (!ret) my_error(ER_SP_DOES_NOT_EXIST, MYF(0), @@ -7585,7 +7723,7 @@ sp_package *LEX::create_package_start(THD *thd, return 0; } } - if (!(pkg= new sp_package(this, name_arg, sph))) + if (unlikely(!(pkg= new sp_package(this, name_arg, sph)))) return NULL; pkg->reset_thd_mem_root(thd); pkg->init(this); @@ -7613,7 +7751,8 @@ bool LEX::create_package_finalize(THD *thd, return true; } sphead->m_body.length= body_end - body_start; - if (!(sphead->m_body.str= thd->strmake(body_start, sphead->m_body.length))) + if (unlikely(!(sphead->m_body.str= thd->strmake(body_start, + sphead->m_body.length)))) return true; size_t not_used; @@ -7670,20 +7809,24 @@ Item *LEX::make_item_func_replace(THD *thd, } -bool SELECT_LEX::vers_push_field(THD *thd, TABLE_LIST *table, const LEX_CSTRING field_name) +bool SELECT_LEX::vers_push_field(THD *thd, TABLE_LIST *table, + const LEX_CSTRING field_name) { DBUG_ASSERT(field_name.str); Item_field *fld= new (thd->mem_root) Item_field(thd, &context, - table->db.str, table->alias.str, &field_name); - if (!fld || item_list.push_back(fld)) + table->db.str, + table->alias.str, + &field_name); + if (unlikely(!fld) || unlikely(item_list.push_back(fld))) return true; if (thd->lex->view_list.elements) { LEX_CSTRING *l; - if (!(l= thd->make_clex_string(field_name.str, field_name.length))) + if (unlikely(!(l= thd->make_clex_string(field_name.str, + field_name.length))) || + unlikely(thd->lex->view_list.push_back(l))) return true; - thd->lex->view_list.push_back(l); } return false; @@ -7875,3 +8018,277 @@ void st_select_lex::pushdown_cond_into_where_clause(THD *thd, Item *cond, *remaining_cond= cond; } + + +Item *LEX::create_item_qualified_asterisk(THD *thd, + const Lex_ident_sys_st *name) +{ + Item *item; + if (!(item= new (thd->mem_root) Item_field(thd, current_context(), + NullS, name->str, + &star_clex_str))) + return NULL; + current_select->with_wild++; + return item; +} + + +Item *LEX::make_item_func_call_generic(THD *thd, Lex_ident_cli_st *cdb, + Lex_ident_cli_st *cname, List *args) +{ + Lex_ident_sys db(thd, cdb), name(thd, cname); + if (db.is_null() || name.is_null()) + return NULL; // EOM + /* + The following in practice calls: + Create_sp_func::create() + and builds a stored function. + + However, it's important to maintain the interface between the + parser and the implementation in item_create.cc clean, + since this will change with WL#2128 (SQL PATH): + - INFORMATION_SCHEMA.version() is the SQL 99 syntax for the native + function version(), + - MySQL.version() is the SQL 2003 syntax for the native function + version() (a vendor can specify any schema). + */ + + if (!name.str || check_db_name((LEX_STRING*) static_cast(&db))) + { + my_error(ER_WRONG_DB_NAME, MYF(0), db.str); + return NULL; + } + if (check_routine_name(&name)) + return NULL; + + Create_qfunc *builder= find_qualified_function_builder(thd); + DBUG_ASSERT(builder); + return builder->create_with_db(thd, &db, &name, true, args); +} + + +Item *LEX::create_item_qualified_asterisk(THD *thd, + const Lex_ident_sys_st *a, + const Lex_ident_sys_st *b) +{ + Item *item; + const char* schema= thd->client_capabilities & CLIENT_NO_SCHEMA ? + NullS : a->str; + if (!(item= new (thd->mem_root) Item_field(thd, current_context(), + schema, b->str, + &star_clex_str))) + return NULL; + current_select->with_wild++; + return item; +} + + +bool Lex_ident_sys_st::copy_ident_cli(THD *thd, const Lex_ident_cli_st *str) +{ + return thd->to_ident_sys_alloc(this, str); +} + +bool Lex_ident_sys_st::copy_keyword(THD *thd, const Lex_ident_cli_st *str) +{ + return thd->make_lex_string(static_cast(this), + str->str, str->length) == NULL; +} + +bool Lex_ident_sys_st::copy_or_convert(THD *thd, + const Lex_ident_cli_st *src, + CHARSET_INFO *cs) +{ + if (!src->is_8bit()) + return copy_keyword(thd, src); // 7bit string makes a wellformed identifier + return convert(thd, src, cs); +} + + +bool Lex_ident_sys_st::copy_sys(THD *thd, const LEX_CSTRING *src) +{ + if (thd->check_string_for_wellformedness(src->str, src->length, + system_charset_info)) + return true; + return thd->make_lex_string(this, src->str, src->length) == NULL; +} + + +bool Lex_ident_sys_st::convert(THD *thd, + const LEX_CSTRING *src, CHARSET_INFO *cs) +{ + LEX_STRING tmp; + if (thd->convert_with_error(system_charset_info, &tmp, cs, + src->str, src->length)) + return true; + str= tmp.str; + length= tmp.length; + return false; +} + + +bool Lex_ident_sys_st::to_size_number(ulonglong *to) const +{ + ulonglong number; + uint text_shift_number= 0; + longlong prefix_number; + const char *start_ptr= str; + size_t str_len= length; + const char *end_ptr= start_ptr + str_len; + int error; + prefix_number= my_strtoll10(start_ptr, (char**) &end_ptr, &error); + if (likely((start_ptr + str_len - 1) == end_ptr)) + { + switch (end_ptr[0]) + { + case 'g': + case 'G': text_shift_number+=30; break; + case 'm': + case 'M': text_shift_number+=20; break; + case 'k': + case 'K': text_shift_number+=10; break; + default: + my_error(ER_WRONG_SIZE_NUMBER, MYF(0)); + return true; + } + if (unlikely(prefix_number >> 31)) + { + my_error(ER_SIZE_OVERFLOW_ERROR, MYF(0)); + return true; + } + number= prefix_number << text_shift_number; + } + else + { + my_error(ER_WRONG_SIZE_NUMBER, MYF(0)); + return true; + } + *to= number; + return false; +} + + +bool LEX::part_values_current(THD *thd) +{ + partition_element *elem= part_info->curr_part_elem; + if (!is_partition_management()) + { + if (unlikely(part_info->part_type != VERSIONING_PARTITION)) + { + my_error(ER_PARTITION_WRONG_TYPE, MYF(0), "SYSTEM_TIME"); + return true; + } + } + else + { + DBUG_ASSERT(create_last_non_select_table); + DBUG_ASSERT(create_last_non_select_table->table_name.str); + // FIXME: other ALTER commands? + my_error(ER_VERS_WRONG_PARTS, MYF(0), + create_last_non_select_table->table_name.str); + return true; + } + elem->type(partition_element::CURRENT); + DBUG_ASSERT(part_info->vers_info); + part_info->vers_info->now_part= elem; + if (unlikely(part_info->init_column_part(thd))) + return true; + return false; +} + + +bool LEX::part_values_history(THD *thd) +{ + partition_element *elem= part_info->curr_part_elem; + if (!is_partition_management()) + { + if (unlikely(part_info->part_type != VERSIONING_PARTITION)) + { + my_error(ER_PARTITION_WRONG_TYPE, MYF(0), "SYSTEM_TIME"); + return true; + } + } + else + { + part_info->vers_init_info(thd); + elem->id= UINT_MAX32; + } + DBUG_ASSERT(part_info->vers_info); + if (unlikely(part_info->vers_info->now_part)) + { + DBUG_ASSERT(create_last_non_select_table); + DBUG_ASSERT(create_last_non_select_table->table_name.str); + my_error(ER_VERS_WRONG_PARTS, MYF(0), + create_last_non_select_table->table_name.str); + return true; + } + elem->type(partition_element::HISTORY); + if (unlikely(part_info->init_column_part(thd))) + return true; + return false; +} + + +bool LEX::last_field_generated_always_as_row_start_or_end(Lex_ident *p, + const char *type, + uint flag) +{ + if (unlikely(p->str)) + { + my_error(ER_VERS_DUPLICATE_ROW_START_END, MYF(0), type, + last_field->field_name.str); + return true; + } + last_field->flags|= (flag | NOT_NULL_FLAG); + DBUG_ASSERT(p); + *p= last_field->field_name; + return false; +} + + + +bool LEX::last_field_generated_always_as_row_start() +{ + Vers_parse_info &info= vers_get_info(); + Lex_ident *p= &info.as_row.start; + return last_field_generated_always_as_row_start_or_end(p, "START", + VERS_SYS_START_FLAG); +} + + +bool LEX::last_field_generated_always_as_row_end() +{ + Vers_parse_info &info= vers_get_info(); + Lex_ident *p= &info.as_row.end; + return last_field_generated_always_as_row_start_or_end(p, "END", + VERS_SYS_END_FLAG); +} + + +bool LEX::tvc_finalize() +{ + mysql_init_select(this); + if (unlikely(!(current_select->tvc= + new (thd->mem_root) + table_value_constr(many_values, + current_select, + current_select->options)))) + return true; + many_values.empty(); + return false; +} + + +bool LEX::tvc_finalize_derived() +{ + derived_tables|= DERIVED_SUBQUERY; + if (unlikely(!expr_allows_subselect || sql_command == (int)SQLCOM_PURGE)) + { + thd->parse_error(); + return true; + } + if (current_select->linkage == GLOBAL_OPTIONS_TYPE || + unlikely(mysql_new_select(this, 1, NULL))) + return true; + current_select->linkage= DERIVED_TABLE_TYPE; + return tvc_finalize(); +} diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 89e3abe284f..c3ef33ea9dd 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -37,12 +37,16 @@ /** - A string with metadata. + A string with metadata. Usually points to a string in the client + character set, but unlike Lex_ident_cli_st (see below) it does not + necessarily point to a query fragment. It can also point to memory + of other kinds (e.g. an additional THD allocated memory buffer + not overlapping with the current query text). + We'll add more flags here eventually, to know if the string has, e.g.: - multi-byte characters - bad byte sequences - backslash escapes: 'a\nb' - - separator escapes: 'a''b' and reuse the original query fragments instead of making the string copy too early, in Lex_input_stream::get_text(). This will allow to avoid unnecessary copying, as well as @@ -50,9 +54,30 @@ */ struct Lex_string_with_metadata_st: public LEX_CSTRING { +private: bool m_is_8bit; // True if the string has 8bit characters + char m_quote; // Quote character, or 0 if not quoted public: void set_8bit(bool is_8bit) { m_is_8bit= is_8bit; } + void set_metadata(bool is_8bit, char quote) + { + m_is_8bit= is_8bit; + m_quote= quote; + } + void set(const char *s, size_t len, bool is_8bit, char quote) + { + str= s; + length= len; + set_metadata(is_8bit, quote); + } + void set(const LEX_CSTRING *s, bool is_8bit, char quote) + { + ((LEX_CSTRING &)*this)= *s; + set_metadata(is_8bit, quote); + } + bool is_8bit() const { return m_is_8bit; } + bool is_quoted() const { return m_quote != '\0'; } + char quote() const { return m_quote; } // Get string repertoire by the 8-bit flag and the character set uint repertoire(CHARSET_INFO *cs) const { @@ -67,6 +92,76 @@ public: }; +/* + Used to store identifiers in the client character set. + Points to a query fragment. +*/ +struct Lex_ident_cli_st: public Lex_string_with_metadata_st +{ +public: + void set_keyword(const char *s, size_t len) + { + set(s, len, false, '\0'); + } + void set_ident(const char *s, size_t len, bool is_8bit) + { + set(s, len, is_8bit, '\0'); + } + void set_ident_quoted(const char *s, size_t len, bool is_8bit, char quote) + { + set(s, len, is_8bit, quote); + } + void set_unquoted(const LEX_CSTRING *s, bool is_8bit) + { + set(s, is_8bit, '\0'); + } + const char *pos() const { return str - is_quoted(); } + const char *end() const { return str + length + is_quoted(); } +}; + + +class Lex_ident_cli: public Lex_ident_cli_st +{ +public: + Lex_ident_cli(const LEX_CSTRING *s, bool is_8bit) + { + set_unquoted(s, is_8bit); + } + Lex_ident_cli(const char *s, size_t len) + { + set_ident(s, len, false); + } +}; + + +struct Lex_ident_sys_st: public LEX_CSTRING +{ +public: + bool copy_ident_cli(THD *thd, const Lex_ident_cli_st *str); + bool copy_keyword(THD *thd, const Lex_ident_cli_st *str); + bool copy_sys(THD *thd, const LEX_CSTRING *str); + bool convert(THD *thd, const LEX_CSTRING *str, CHARSET_INFO *cs); + bool copy_or_convert(THD *thd, const Lex_ident_cli_st *str, CHARSET_INFO *cs); + bool is_null() const { return str == NULL; } + bool to_size_number(ulonglong *to) const; +}; + + +class Lex_ident_sys: public Lex_ident_sys_st +{ +public: + Lex_ident_sys(THD *thd, const Lex_ident_cli_st *str) + { + if (copy_ident_cli(thd, str)) + ((LEX_CSTRING &) *this)= null_clex_str; + } + Lex_ident_sys() + { + ((LEX_CSTRING &) *this)= null_clex_str; + } +}; + + enum sub_select_type { UNSPECIFIED_TYPE, @@ -173,7 +268,7 @@ struct LEX_TYPE #define LEX_YYSTYPE void * #else #include "lex_symbol.h" -#if MYSQL_LEX +#ifdef MYSQL_LEX #include "item_func.h" /* Cast_target used in sql_yacc.h */ #include "sql_get_diagnostics.h" /* Types used in sql_yacc.h */ #include "sp_pcontext.h" @@ -186,13 +281,13 @@ struct LEX_TYPE #endif // describe/explain types -#define DESCRIBE_NORMAL 1 -#define DESCRIBE_EXTENDED 2 +#define DESCRIBE_NORMAL 1 +#define DESCRIBE_EXTENDED 2 /* This is not within #ifdef because we want "EXPLAIN PARTITIONS ..." to produce additional "partitions" column even if partitioning is not compiled in. */ -#define DESCRIBE_PARTITIONS 4 +#define DESCRIBE_PARTITIONS 4 #ifdef MYSQL_SERVER @@ -224,24 +319,24 @@ enum enum_sp_aggregate_type GROUP_AGGREGATE }; -const LEX_STRING sp_data_access_name[]= +const LEX_CSTRING sp_data_access_name[]= { - { C_STRING_WITH_LEN("") }, - { C_STRING_WITH_LEN("CONTAINS SQL") }, - { C_STRING_WITH_LEN("NO SQL") }, - { C_STRING_WITH_LEN("READS SQL DATA") }, - { C_STRING_WITH_LEN("MODIFIES SQL DATA") } + { STRING_WITH_LEN("") }, + { STRING_WITH_LEN("CONTAINS SQL") }, + { STRING_WITH_LEN("NO SQL") }, + { STRING_WITH_LEN("READS SQL DATA") }, + { STRING_WITH_LEN("MODIFIES SQL DATA") } }; -#define DERIVED_SUBQUERY 1 -#define DERIVED_VIEW 2 +#define DERIVED_SUBQUERY 1 +#define DERIVED_VIEW 2 #define DERIVED_WITH 4 enum enum_view_create_mode { - VIEW_CREATE_NEW, // check that there are not such VIEW/table - VIEW_ALTER, // check that VIEW .frm with such name exists - VIEW_CREATE_OR_REPLACE // check only that there are not such table + VIEW_CREATE_NEW, // check that there are not such VIEW/table + VIEW_ALTER, // check that VIEW .frm with such name exists + VIEW_CREATE_OR_REPLACE // check only that there are not such table }; @@ -273,8 +368,8 @@ enum enum_drop_mode }; /* Options to add_table_to_list() */ -#define TL_OPTION_UPDATING 1 -#define TL_OPTION_FORCE_INDEX 2 +#define TL_OPTION_UPDATING 1 +#define TL_OPTION_FORCE_INDEX 2 #define TL_OPTION_IGNORE_LEAVES 4 #define TL_OPTION_ALIAS 8 #define TL_OPTION_SEQUENCE 16 @@ -749,7 +844,7 @@ public: st_select_lex *union_distinct; /* pointer to the last UNION DISTINCT */ bool describe; /* union exec() called for EXPLAIN */ - Procedure *last_procedure; /* Pointer to procedure, if such exists */ + Procedure *last_procedure; /* Pointer to procedure, if such exists */ bool columns_are_renamed; @@ -770,7 +865,8 @@ public: bool is_excluded() { return prev == NULL; } /* UNION methods */ - bool prepare(THD *thd, select_result *result, ulong additional_options); + bool prepare(TABLE_LIST *derived_arg, select_result *sel_result, + ulong additional_options); bool optimize(); bool exec(); bool exec_recursive(); @@ -839,7 +935,7 @@ public: /* Point to the LEX in which it was created, used in view subquery detection. - TODO: make also st_select_lex::parent_stmt_lex (see THD::stmt_lex) + TODO: make also st_select_lex::parent_stmt_lex (see LEX::stmt_lex) and use st_select_lex::parent_lex & st_select_lex::parent_stmt_lex instead of global (from THD) references where it is possible. */ @@ -859,7 +955,7 @@ public: List item_list; /* list of fields & expressions */ List pre_fix; /* above list before fix_fields */ - bool is_item_list_lookup; + bool is_item_list_lookup; /* Usualy it is pointer to ftfunc_list_alloc, but in union used to create fake select_lex for calling mysql_select under results of union @@ -942,6 +1038,11 @@ public: uint select_n_where_fields; /* reserved for exists 2 in */ uint select_n_reserved; + /* + it counts the number of bit fields in the SELECT list. These are used when DISTINCT is + converted to a GROUP BY involving BIT fields. + */ + uint hidden_bit_fields; enum_parsing_place parsing_place; /* where we are parsing expression */ enum_parsing_place context_analysis_place; /* where we are in prepare */ bool with_sum_func; /* sum function indicator */ @@ -983,6 +1084,7 @@ public: */ bool subquery_in_having; /* TRUE <=> this SELECT is correlated w.r.t. some ancestor select */ + bool with_all_modifier; /* used for selects in union */ bool is_correlated; /* This variable is required to ensure proper work of subqueries and @@ -1087,11 +1189,11 @@ public: bool add_order_to_list(THD *thd, Item *item, bool asc); bool add_gorder_to_list(THD *thd, Item *item, bool asc); TABLE_LIST* add_table_to_list(THD *thd, Table_ident *table, - LEX_CSTRING *alias, - ulong table_options, - thr_lock_type flags= TL_UNLOCK, + LEX_CSTRING *alias, + ulong table_options, + thr_lock_type flags= TL_UNLOCK, enum_mdl_type mdl_type= MDL_SHARED_READ, - List *hints= 0, + List *hints= 0, List *partition_names= 0, LEX_STRING *option= 0); TABLE_LIST* get_table_list(); @@ -1242,7 +1344,7 @@ public: void collect_grouping_fields(THD *thd, ORDER *grouping_list); void check_cond_extraction_for_grouping_fields(Item *cond); Item *build_cond_for_grouping_fields(THD *thd, Item *cond, - bool no_to_clones); + bool no_to_clones); List window_specs; void prepare_add_window_spec(THD *thd); @@ -2064,6 +2166,16 @@ public: void reset(char *buff, size_t length); + /** + The main method to scan the next token, with token contraction processing + for LALR(2) resolution, e.g. translate "WITH" followed by "ROLLUP" + to a single token WITH_ROLLUP_SYM. + */ + int lex_token(union YYSTYPE *yylval, THD *thd); + + void reduce_digest_token(uint token_left, uint token_right); + +private: /** Set the echo mode. @@ -2191,15 +2303,6 @@ public: return m_ptr; } - /** - End of file indicator for the query text to parse. - @return true if there are no more characters to parse - */ - bool eof() - { - return (m_ptr >= m_end_of_query); - } - /** End of file indicator for the query text to parse. @param n number of characters expected @@ -2210,24 +2313,6 @@ public: return ((m_ptr + n) >= m_end_of_query); } - /** Get the raw query buffer. */ - const char *get_buf() - { - return m_buf; - } - - /** Get the pre-processed query buffer. */ - const char *get_cpp_buf() - { - return m_cpp_buf; - } - - /** Get the end of the raw query buffer. */ - const char *get_end_of_query() - { - return m_end_of_query; - } - /** Mark the stream position as the start of a new token. */ void start_token() { @@ -2250,10 +2335,65 @@ public: m_cpp_tok_start= m_cpp_ptr; } + /** + Get the maximum length of the utf8-body buffer. + The utf8 body can grow because of the character set conversion and escaping. + */ + size_t get_body_utf8_maximum_length(THD *thd); + + /** Get the length of the current token, in the raw buffer. */ + uint yyLength() + { + /* + The assumption is that the lexical analyser is always 1 character ahead, + which the -1 account for. + */ + DBUG_ASSERT(m_ptr > m_tok_start); + return (uint) ((m_ptr - m_tok_start) - 1); + } + + /** + Test if a lookahead token was already scanned by lex_token(), + for LALR(2) resolution. + */ + bool has_lookahead() const + { + return lookahead_token >= 0; + } + +public: + + /** + End of file indicator for the query text to parse. + @return true if there are no more characters to parse + */ + bool eof() + { + return (m_ptr >= m_end_of_query); + } + + /** Get the raw query buffer. */ + const char *get_buf() + { + return m_buf; + } + + /** Get the pre-processed query buffer. */ + const char *get_cpp_buf() + { + return m_cpp_buf; + } + + /** Get the end of the raw query buffer. */ + const char *get_end_of_query() + { + return m_end_of_query; + } + /** Get the token start position, in the raw buffer. */ const char *get_tok_start() { - return m_tok_start; + return has_lookahead() ? m_tok_start_prev : m_tok_start; } void set_cpp_tok_start(const char *pos) @@ -2267,39 +2407,16 @@ public: return m_tok_end; } - /** Get the previous token start position, in the raw buffer. */ - const char *get_tok_start_prev() - { - return m_tok_start_prev; - } - /** Get the current stream pointer, in the raw buffer. */ const char *get_ptr() { return m_ptr; } - /** Get the length of the current token, in the raw buffer. */ - uint yyLength() - { - /* - The assumption is that the lexical analyser is always 1 character ahead, - which the -1 account for. - */ - DBUG_ASSERT(m_ptr > m_tok_start); - return (uint) ((m_ptr - m_tok_start) - 1); - } - - /** Get the previus token start position, in the pre-processed buffer. */ - const char *get_cpp_start_prev() - { - return m_cpp_tok_start_prev; - } - /** Get the token start position, in the pre-processed buffer. */ const char *get_cpp_tok_start() { - return m_cpp_tok_start; + return has_lookahead() ? m_cpp_tok_start_prev : m_cpp_tok_start; } /** Get the token end position, in the pre-processed buffer. */ @@ -2353,32 +2470,19 @@ public: return (size_t) (m_body_utf8_ptr - m_body_utf8); } - /** - Get the maximum length of the utf8-body buffer. - The utf8 body can grow because of the character set conversion and escaping. - */ - size_t get_body_utf8_maximum_length(THD *thd); - void body_utf8_start(THD *thd, const char *begin_ptr); void body_utf8_append(const char *ptr); void body_utf8_append(const char *ptr, const char *end_ptr); void body_utf8_append_ident(THD *thd, - const LEX_CSTRING *txt, + const Lex_string_with_metadata_st *txt, const char *end_ptr); void body_utf8_append_escape(THD *thd, const LEX_CSTRING *txt, CHARSET_INFO *txt_cs, const char *end_ptr, my_wc_t sep); - /** Current thread. */ - THD *m_thd; - - /** Current line number. */ - uint yylineno; - - /** Interface with bison, value of the last token parsed. */ - LEX_YYSTYPE yylval; +private: /** LALR(2) resolution, look ahead token. Value of the next token to return, if any, @@ -2395,9 +2499,20 @@ public: void add_digest_token(uint token, LEX_YYSTYPE yylval); - void reduce_digest_token(uint token_left, uint token_right); + bool consume_comment(int remaining_recursions_permitted); + int lex_one_token(union YYSTYPE *yylval, THD *thd); + int find_keyword(Lex_ident_cli_st *str, uint len, bool function); + LEX_CSTRING get_token(uint skip, uint length); + int scan_ident_sysvar(THD *thd, Lex_ident_cli_st *str); + int scan_ident_start(THD *thd, Lex_ident_cli_st *str); + int scan_ident_middle(THD *thd, Lex_ident_cli_st *str, + CHARSET_INFO **cs, my_lex_states *); + int scan_ident_delimited(THD *thd, Lex_ident_cli_st *str); + bool get_7bit_or_8bit_ident(THD *thd, uchar *last_char); + + /** Current thread. */ + THD *m_thd; -private: /** Pointer to the current position in the raw input stream. */ char *m_ptr; @@ -2483,6 +2598,15 @@ public: */ bool multi_statements; + /** Current line number. */ + uint yylineno; + + /** + Current statement digest instrumentation. + */ + sql_digest_state* m_digest; + +private: /** State of the lexical analyser for comments. */ enum_comment_state in_comment; enum_comment_state in_comment_saved; @@ -2509,13 +2633,9 @@ public: NOTE: this member must be used within MYSQLlex() function only. */ CHARSET_INFO *m_underscore_cs; - - /** - Current statement digest instrumentation. - */ - sql_digest_state* m_digest; }; + /** Abstract representation of a statement. This class is an interface between the parser and the runtime. @@ -2691,10 +2811,25 @@ struct LEX: public Query_tables_list // type information CHARSET_INFO *charset; + /* + LEX which represents current statement (conventional, SP or PS) + + For example during view parsing THD::lex will point to the views LEX and + lex::stmt_lex will point to LEX of the statement where the view will be + included + + Currently it is used to have always correct select numbering inside + statement (LEX::current_select_number) without storing and restoring a + global counter which was THD::select_number. + + TODO: make some unified statement representation (now SP has different) + to store such data like LEX::current_select_number. + */ + LEX *stmt_lex; LEX_CSTRING name; const char *help_arg; - const char *backup_dir; /* For RESTORE/BACKUP */ + const char *backup_dir; /* For RESTORE/BACKUP */ const char* to_log; /* For PURGE MASTER LOGS TO */ const char* x509_subject,*x509_issuer,*ssl_cipher; String *wild; /* Wildcard in SHOW {something} LIKE 'wild'*/ @@ -2732,7 +2867,7 @@ struct LEX: public Query_tables_list List ref_list; List users_list; List columns; - List *insert_list,field_list,value_list,update_list; + List *insert_list,field_list,value_list,update_list; List many_values; List var_list; List stmt_var_list; //SET_STATEMENT values @@ -2785,10 +2920,10 @@ public: Column_definition *last_field; Item_sum *in_sum_func; udf_func udf; - HA_CHECK_OPT check_opt; // check/repair options + HA_CHECK_OPT check_opt; // check/repair options Table_specification_st create_info; Key *last_key; - LEX_MASTER_INFO mi; // used by CHANGE MASTER + LEX_MASTER_INFO mi; // used by CHANGE MASTER LEX_SERVER_OPTIONS server_options; LEX_CSTRING relay_log_connection_name; USER_RESOURCES mqh; @@ -2827,7 +2962,7 @@ public: */ bool parse_vcol_expr; - enum SSL_type ssl_type; /* defined in violite.h */ + enum SSL_type ssl_type; // defined in violite.h enum enum_duplicates duplicates; enum enum_tx_isolation tx_isolation; enum enum_ha_read_modes ha_read_mode; @@ -2888,7 +3023,7 @@ public: List prepared_stmt_params; sp_head *sphead; sp_name *spname; - bool sp_lex_in_use; /* Keep track on lex usage in SPs for error handling */ + bool sp_lex_in_use; // Keep track on lex usage in SPs for error handling bool all_privileges; bool proxy_priv; @@ -3149,7 +3284,10 @@ public: void restore_backup_query_tables_list(Query_tables_list *backup); bool table_or_sp_used(); + bool is_partition_management() const; + bool part_values_current(THD *thd); + bool part_values_history(THD *thd); /** @brief check if the statement is a single-level join @@ -3181,6 +3319,11 @@ public: void init_last_field(Column_definition *field, const LEX_CSTRING *name, const CHARSET_INFO *cs); + bool last_field_generated_always_as_row_start_or_end(Lex_ident *p, + const char *type, + uint flags); + bool last_field_generated_always_as_row_start(); + bool last_field_generated_always_as_row_end(); bool set_bincmp(CHARSET_INFO *cs, bool bin); bool get_dynamic_sql_string(LEX_CSTRING *dst, String *buffer); @@ -3304,20 +3447,42 @@ public: bool sp_open_cursor(THD *thd, const LEX_CSTRING *name, List *parameters); - Item_splocal *create_item_for_sp_var(LEX_CSTRING *name, sp_variable *spvar, - const char *start, const char *end); + Item_splocal *create_item_for_sp_var(const Lex_ident_cli_st *name, + sp_variable *spvar); - Item *create_item_ident_nosp(THD *thd, LEX_CSTRING *name); - Item *create_item_ident_sp(THD *thd, LEX_CSTRING *name, - const char *start, const char *end); - Item *create_item_ident(THD *thd, LEX_CSTRING *name, - const char *start, const char *end) + Item *create_item_qualified_asterisk(THD *thd, const Lex_ident_sys_st *name); + Item *create_item_qualified_asterisk(THD *thd, + const Lex_ident_sys_st *a, + const Lex_ident_sys_st *b); + Item *create_item_qualified_asterisk(THD *thd, const Lex_ident_cli_st *cname) { - return sphead ? - create_item_ident_sp(thd, name, start, end) : - create_item_ident_nosp(thd, name); + Lex_ident_sys name(thd, cname); + if (name.is_null()) + return NULL; // EOM + return create_item_qualified_asterisk(thd, &name); + } + Item *create_item_qualified_asterisk(THD *thd, + const Lex_ident_cli_st *ca, + const Lex_ident_cli_st *cb) + { + Lex_ident_sys a(thd, ca), b(thd, cb); + if (a.is_null() || b.is_null()) + return NULL; // EOM + return create_item_qualified_asterisk(thd, &a, &b); } + Item *create_item_ident_nosp(THD *thd, Lex_ident_sys_st *name); + Item *create_item_ident_sp(THD *thd, Lex_ident_sys_st *name, + const char *start, const char *end); + Item *create_item_ident(THD *thd, Lex_ident_cli_st *cname) + { + Lex_ident_sys name(thd, cname); + if (name.is_null()) + return NULL; // EOM + return sphead ? + create_item_ident_sp(thd, &name, cname->pos(), cname->end()) : + create_item_ident_nosp(thd, &name); + } /* Create an Item corresponding to a qualified name: a.b when the parser is out of an SP context. @@ -3332,8 +3497,8 @@ public: - Item_ref */ Item *create_item_ident_nospvar(THD *thd, - const LEX_CSTRING *a, - const LEX_CSTRING *b); + const Lex_ident_sys_st *a, + const Lex_ident_sys_st *b); /* Create an Item corresponding to a ROW field valiable: var.field @param THD - THD, for mem_root @@ -3347,8 +3512,8 @@ public: */ Item_splocal *create_item_spvar_row_field(THD *thd, const Sp_rcontext_handler *rh, - const LEX_CSTRING *var, - const LEX_CSTRING *field, + const Lex_ident_sys *var, + const Lex_ident_sys *field, sp_variable *spvar, const char *start, const char *end); @@ -3361,15 +3526,11 @@ public: @param thd - THD, for mem_root @param a - the first name @param b - the second name - @param start - position in the query (for binary log) - @param end - end in the query (for binary log) @retval - NULL on error, or a pointer to a new Item. */ Item *create_item_ident(THD *thd, - const LEX_CSTRING *a, - const LEX_CSTRING *b, - const char *start, - const char *end); + const Lex_ident_cli_st *a, + const Lex_ident_cli_st *b); /* Create an item from its qualified name. Depending on context, it can be a table field, a table field reference, @@ -3381,9 +3542,27 @@ public: @retval - NULL on error, or a pointer to a new Item. */ Item *create_item_ident(THD *thd, - const LEX_CSTRING *a, - const LEX_CSTRING *b, - const LEX_CSTRING *c); + const Lex_ident_sys_st *a, + const Lex_ident_sys_st *b, + const Lex_ident_sys_st *c); + + Item *create_item_ident(THD *thd, + const Lex_ident_cli_st *ca, + const Lex_ident_cli_st *cb, + const Lex_ident_cli_st *cc) + { + Lex_ident_sys b(thd, cb), c(thd, cc); + if (b.is_null() || c.is_null()) + return NULL; + if (ca->pos() == cb->pos()) // SELECT .t1.col1 + { + DBUG_ASSERT(ca->length == 0); + Lex_ident_sys none; + return create_item_ident(thd, &none, &b, &c); + } + Lex_ident_sys a(thd, ca); + return a.is_null() ? NULL : create_item_ident(thd, &a, &b, &c); + } /* Create an item for "NEXT VALUE FOR sequence_name" @@ -3408,16 +3587,11 @@ public: Create an item for a name in LIMIT clause: LIMIT var @param THD - THD, for mem_root @param var_name - the variable name - @param start - position in the query (for binary log) - @param end - end in the query (for binary log) @retval - a new Item corresponding to the SP variable, or NULL on error (non in SP, unknown variable, wrong data type). */ - Item *create_item_limit(THD *thd, - const LEX_CSTRING *var_name, - const char *start, - const char *end); + Item *create_item_limit(THD *thd, const Lex_ident_cli_st *var_name); /* Create an item for a qualified name in LIMIT clause: LIMIT var.field @@ -3432,14 +3606,14 @@ public: wrong data type). */ Item *create_item_limit(THD *thd, - const LEX_CSTRING *var_name, - const LEX_CSTRING *field_name, - const char *start, - const char *end); + const Lex_ident_cli_st *var_name, + const Lex_ident_cli_st *field_name); Item *make_item_func_replace(THD *thd, Item *org, Item *find, Item *replace); Item *make_item_func_substr(THD *thd, Item *a, Item *b, Item *c); Item *make_item_func_substr(THD *thd, Item *a, Item *b); + Item *make_item_func_call_generic(THD *thd, Lex_ident_cli_st *db, + Lex_ident_cli_st *name, List *args); my_var *create_outvar(THD *thd, const LEX_CSTRING *name); /* @@ -3457,7 +3631,21 @@ public: Item *create_and_link_Item_trigger_field(THD *thd, const LEX_CSTRING *name, bool new_row); - + // For syntax with colon, e.g. :NEW.a or :OLD.a + Item *make_item_colon_ident_ident(THD *thd, + const Lex_ident_cli_st *a, + const Lex_ident_cli_st *b); + // For "SELECT @@var", "SELECT @@var.field" + Item *make_item_sysvar(THD *thd, + enum_var_type type, + const LEX_CSTRING *name) + { + return make_item_sysvar(thd, type, name, &null_clex_str); + } + Item *make_item_sysvar(THD *thd, + enum_var_type type, + const LEX_CSTRING *name, + const LEX_CSTRING *component); void sp_block_init(THD *thd, const LEX_CSTRING *label); void sp_block_init(THD *thd) { @@ -3529,7 +3717,7 @@ public: Item *value); sp_variable *sp_add_for_loop_upper_bound(THD *thd, Item *value) { - LEX_CSTRING name= { C_STRING_WITH_LEN("[upper_bound]") }; + LEX_CSTRING name= { STRING_WITH_LEN("[upper_bound]") }; return sp_add_for_loop_variable(thd, &name, value); } bool sp_for_loop_intrange_declarations(THD *thd, Lex_for_loop_st *loop, @@ -3753,6 +3941,34 @@ public: return create_info.vers_info; } sp_package *get_sp_package() const; + + /** + Check if the select is a simple select (not an union). + @retval + 0 ok + @retval + 1 error ; In this case the error messege is sent to the client + */ + bool check_simple_select(const LEX_CSTRING *option) + { + if (current_select != &select_lex) + { + char command[80]; + strmake(command, option->str, MY_MIN(option->length, sizeof(command)-1)); + my_error(ER_CANT_USE_OPTION_HERE, MYF(0), command); + return true; + } + return false; + } + + void tvc_start() + { + field_list.empty(); + many_values.empty(); + insert_list= 0; + } + bool tvc_finalize(); + bool tvc_finalize_derived(); }; diff --git a/sql/sql_lifo_buffer.h b/sql/sql_lifo_buffer.h index 17024d15beb..62b23e7773a 100644 --- a/sql/sql_lifo_buffer.h +++ b/sql/sql_lifo_buffer.h @@ -84,7 +84,7 @@ public: start= start_arg; end= end_arg; if (end != start) - TRASH_ALLOC(start, end - start); + TRASH_ALLOC(start, size_t(end - start)); reset(); } @@ -224,7 +224,7 @@ public: { DBUG_ASSERT(unused_end >= unused_start); DBUG_ASSERT(end == unused_start); - TRASH_ALLOC(unused_start, unused_end - unused_start); + TRASH_ALLOC(unused_start, size_t(unused_end - unused_start)); end= unused_end; } /* Return pointer to start of the memory area that is occupied by the data */ diff --git a/sql/sql_list.h b/sql/sql_list.h index 311e601490b..0219c226803 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -138,6 +138,13 @@ public: first == rhs.first && last == rhs.last; } + base_list& operator=(const base_list &rhs) + { + elements= rhs.elements; + first= rhs.first; + last= elements ? rhs.last : &first; + return *this; + } inline void empty() { elements=0; first= &end_of_list; last=&first;} inline base_list() { empty(); } @@ -152,9 +159,7 @@ public: */ inline base_list(const base_list &tmp) :Sql_alloc() { - elements= tmp.elements; - first= tmp.first; - last= elements ? tmp.last : &first; + *this= tmp; } /** Construct a deep copy of the argument in memory root mem_root. @@ -164,7 +169,7 @@ public: */ bool copy(const base_list *rhs, MEM_ROOT *mem_root); base_list(const base_list &rhs, MEM_ROOT *mem_root) { copy(&rhs, mem_root); } - inline base_list(bool error) { } + inline base_list(bool) {} inline bool push_back(void *info) { if (((*last)=new list_node(info, &end_of_list))) @@ -533,10 +538,10 @@ public: template class List_iterator_fast :public base_list_iterator { protected: - inline T *replace(T *a) { return (T*) 0; } - inline T *replace(List &a) { return (T*) 0; } - inline void remove(void) { } - inline void after(T *a) { } + inline T *replace(T *) { return (T*) 0; } + inline T *replace(List &) { return (T*) 0; } + inline void remove(void) {} + inline void after(T *) {} inline T** ref(void) { return (T**) 0; } public: @@ -605,7 +610,7 @@ struct ilink { return (void*)my_malloc((uint)size, MYF(MY_WME | MY_FAE | ME_FATALERROR)); } - static void operator delete(void* ptr_arg, size_t size) + static void operator delete(void* ptr_arg, size_t) { my_free(ptr_arg); } diff --git a/sql/sql_load.cc b/sql/sql_load.cc index cfa92f170ab..cc5a5c7c794 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -444,6 +444,9 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, field_iterator.set(table_list); for (; !field_iterator.end_of_fields(); field_iterator.next()) { + if (field_iterator.field() && + field_iterator.field()->invisible > VISIBLE) + continue; Item *item; if (!(item= field_iterator.create_item(thd))) DBUG_RETURN(TRUE); @@ -593,7 +596,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, *ex->field_term, *ex->line_start, *ex->line_term, *ex->enclosed, info.escape_char, read_file_from_client, is_fifo); - if (read_info.error) + if (unlikely(read_info.error)) { if (file >= 0) mysql_file_close(file, MYF(0)); // no files in net reading @@ -625,7 +628,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, } thd_proc_info(thd, "Reading file"); - if (!(error= MY_TEST(read_info.error))) + if (likely(!(error= MY_TEST(read_info.error)))) { table->reset_default_fields(); table->next_number_field=table->found_next_number_field; @@ -662,7 +665,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, *ex->enclosed, skip_lines, ignore); thd_proc_info(thd, "End bulk insert"); - if (!error) + if (likely(!error)) thd_progress_next_stage(thd); if (thd->locked_tables_mode <= LTM_LOCK_TABLES && table->file->ha_end_bulk_insert() && !error) @@ -787,7 +790,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, */ error= error || mysql_bin_log.get_log_file()->error; } - if (error) + if (unlikely(error)) goto err; } #endif /*!EMBEDDED_LIBRARY*/ @@ -1110,11 +1113,11 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, } } - if (thd->is_error()) + if (unlikely(thd->is_error())) read_info.error= 1; - - if (read_info.error) + if (unlikely(read_info.error)) break; + if (skip_lines) { skip_lines--; @@ -1129,16 +1132,16 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, { Load_data_outvar *dst= item->get_load_data_outvar_or_error(); DBUG_ASSERT(dst); - if (dst->load_data_set_no_data(thd, &read_info)) + if (unlikely(dst->load_data_set_no_data(thd, &read_info))) DBUG_RETURN(1); } } - if (thd->killed || - fill_record_n_invoke_before_triggers(thd, table, set_fields, - set_values, - ignore_check_option_errors, - TRG_EVENT_INSERT)) + if (unlikely(thd->killed) || + unlikely(fill_record_n_invoke_before_triggers(thd, table, set_fields, + set_values, + ignore_check_option_errors, + TRG_EVENT_INSERT))) DBUG_RETURN(1); switch (table_list->view_check_option(thd, @@ -1247,7 +1250,7 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, DBUG_RETURN(1); } - if (read_info.error) + if (unlikely(read_info.error)) break; if (skip_lines) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 09ef7a587ba..e136f271fb7 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -420,7 +420,7 @@ static bool some_non_temp_table_to_be_updated(THD *thd, TABLE_LIST *tables) /* - Implicitly commit a active transaction if statement requires so. + Check whether the statement implicitly commits an active transaction. @param thd Thread handle. @param mask Bitmask used for the SQL command match. @@ -428,7 +428,7 @@ static bool some_non_temp_table_to_be_updated(THD *thd, TABLE_LIST *tables) @return 0 No implicit commit @return 1 Do a commit */ -static bool stmt_causes_implicit_commit(THD *thd, uint mask) +bool stmt_causes_implicit_commit(THD *thd, uint mask) { LEX *lex= thd->lex; bool skip= FALSE; @@ -775,6 +775,8 @@ void init_update_queries(void) There are other statements that deal with temporary tables and open them, but which are not listed here. The thing is that the order of pre-opening temporary tables for those statements is somewhat custom. + + Note that SQLCOM_RENAME_TABLE should not be in this list! */ sql_command_flags[SQLCOM_CREATE_TABLE]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_DROP_TABLE]|= CF_PREOPEN_TMP_TABLES; @@ -789,7 +791,6 @@ void init_update_queries(void) sql_command_flags[SQLCOM_INSERT_SELECT]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_DELETE]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_DELETE_MULTI]|= CF_PREOPEN_TMP_TABLES; - sql_command_flags[SQLCOM_RENAME_TABLE]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_REPLACE_SELECT]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_SELECT]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_SET_OPTION]|= CF_PREOPEN_TMP_TABLES; @@ -952,7 +953,7 @@ static char *fgets_fn(char *buffer, size_t size, fgets_input_t input, int *error { MYSQL_FILE *in= static_cast (input); char *line= mysql_file_fgets(buffer, (int)size, in); - if (error) + if (unlikely(error)) *error= (line == NULL) ? ferror(in->m_file) : 0; return line; } @@ -1066,7 +1067,7 @@ static void handle_bootstrap_impl(THD *thd) #endif delete_explain_query(thd->lex); - if (bootstrap_error) + if (unlikely(bootstrap_error)) break; thd->reset_kill_query(); /* Ensure that killed_errmsg is released */ @@ -1214,13 +1215,13 @@ bool do_command(THD *thd) #ifdef WITH_WSREP if (WSREP(thd)) { - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); thd->wsrep_query_state= QUERY_IDLE; if (thd->wsrep_conflict_state==MUST_ABORT) { wsrep_client_rollback(thd); } - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); } #endif /* WITH_WSREP */ @@ -1266,15 +1267,15 @@ bool do_command(THD *thd) packet_length= my_net_read_packet(net, 1); #ifdef WITH_WSREP if (WSREP(thd)) { - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); /* these THD's are aborted or are aborting during being idle */ if (thd->wsrep_conflict_state == ABORTING) { while (thd->wsrep_conflict_state == ABORTING) { - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); my_sleep(1000); - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); } thd->store_globals(); } @@ -1284,11 +1285,11 @@ bool do_command(THD *thd) } thd->wsrep_query_state= QUERY_EXEC; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); } #endif /* WITH_WSREP */ - if (packet_length == packet_error) + if (unlikely(packet_length == packet_error)) { DBUG_PRINT("info",("Got error %d reading command from socket %s", net->error, @@ -1297,14 +1298,14 @@ bool do_command(THD *thd) #ifdef WITH_WSREP if (WSREP(thd)) { - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); if (thd->wsrep_conflict_state == MUST_ABORT) { DBUG_PRINT("wsrep",("aborted for wsrep rollback: %lu", (ulong) thd->real_id)); wsrep_client_rollback(thd); } - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); } #endif /* WITH_WSREP */ @@ -1582,7 +1583,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd->wsrep_PA_safe= true; } - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); thd->wsrep_query_state= QUERY_EXEC; if (thd->wsrep_conflict_state== RETRY_AUTOCOMMIT) { @@ -1599,14 +1600,14 @@ bool dispatch_command(enum enum_server_command command, THD *thd, my_message(ER_LOCK_DEADLOCK, "Deadlock: wsrep aborted transaction", MYF(0)); WSREP_DEBUG("Deadlock error for: %s", thd->query()); - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); thd->reset_killed(); thd->mysys_var->abort = 0; thd->wsrep_conflict_state = NO_CONFLICT; thd->wsrep_retry_counter = 0; goto dispatch_end; } - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); } #endif /* WITH_WSREP */ #if defined(ENABLED_PROFILING) @@ -1686,8 +1687,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, { LEX_CSTRING tmp; status_var_increment(thd->status_var.com_stat[SQLCOM_CHANGE_DB]); - if (thd->copy_with_error(system_charset_info, (LEX_STRING*) &tmp, - thd->charset(), packet, packet_length)) + if (unlikely(thd->copy_with_error(system_charset_info, (LEX_STRING*) &tmp, + thd->charset(), packet, packet_length))) break; if (!mysql_change_db(thd, &tmp, FALSE)) { @@ -1819,7 +1820,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd->m_digest= & thd->m_digest_state; thd->m_digest->reset(thd->m_token_array, max_digest_length); - if (alloc_query(thd, packet, packet_length)) + if (unlikely(alloc_query(thd, packet, packet_length))) break; // fatal error is set MYSQL_QUERY_START(thd->query(), thd->thread_id, thd->get_db(), @@ -1835,7 +1836,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd->query_length()); Parser_state parser_state; - if (parser_state.init(thd, thd->query(), thd->query_length())) + if (unlikely(parser_state.init(thd, thd->query(), thd->query_length()))) break; if (WSREP_ON) @@ -2378,10 +2379,10 @@ com_multi_end: DBUG_ASSERT((command != COM_QUIT && command != COM_STMT_CLOSE) || thd->get_stmt_da()->is_disabled()); /* wsrep BF abort in query exec phase */ - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); do_end_of_statement= thd->wsrep_conflict_state != REPLAYING && thd->wsrep_conflict_state != RETRY_AUTOCOMMIT; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); } else do_end_of_statement= true; @@ -2406,7 +2407,7 @@ com_multi_end: if (drop_more_results) thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; - if (!thd->is_error() && !thd->killed_errno()) + if (likely(!thd->is_error() && !thd->killed_errno())) mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_RESULT, 0, 0); mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_STATUS, @@ -3237,10 +3238,6 @@ mysql_execute_command(THD *thd) #endif DBUG_ENTER("mysql_execute_command"); -#ifdef WITH_PARTITION_STORAGE_ENGINE - thd->work_part_info= 0; -#endif - DBUG_ASSERT(thd->transaction.stmt.is_empty() || thd->in_sub_stmt); /* Each statement or replication event which might produce deadlock @@ -3933,7 +3930,7 @@ mysql_execute_command(THD *thd) { /* New replication created */ mi= new Master_info(&lex_mi->connection_name, relay_log_recovery); - if (!mi || mi->error()) + if (unlikely(!mi || mi->error())) { delete mi; res= 1; @@ -4043,7 +4040,7 @@ mysql_execute_command(THD *thd) copy. */ Alter_info alter_info(lex->alter_info, thd->mem_root); - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) { /* If out of memory when creating a copy of alter_info. */ res= 1; @@ -4100,6 +4097,7 @@ mysql_execute_command(THD *thd) } #ifdef WITH_PARTITION_STORAGE_ENGINE + thd->work_part_info= 0; { partition_info *part_info= thd->lex->part_info; if (part_info && !(part_info= part_info->get_clone(thd))) @@ -4185,7 +4183,7 @@ mysql_execute_command(THD *thd) thd->lex->create_info.options|= create_info.options; res= open_and_lock_tables(thd, create_info, lex->query_tables, TRUE, 0); thd->lex->create_info.options= save_thd_create_info_options; - if (res) + if (unlikely(res)) { /* Got error or warning. Set res to 1 if error */ if (!(res= thd->is_error())) @@ -4197,9 +4195,9 @@ mysql_execute_command(THD *thd) if (create_info.or_replace() && !create_info.tmp_table()) { TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, lex->query_tables, - lex->query_tables->next_global, - 0))) + if (unlikely((duplicate= unique_table(thd, lex->query_tables, + lex->query_tables->next_global, + CHECK_DUP_FOR_CREATE)))) { update_non_unique_table_error(lex->query_tables, "CREATE", duplicate); @@ -4223,13 +4221,14 @@ mysql_execute_command(THD *thd) needs to be created for every execution of a PS/SP. Note: In wsrep-patch, CTAS is handled like a regular transaction. */ - if ((result= new (thd->mem_root) select_create(thd, create_table, - &create_info, - &alter_info, - select_lex->item_list, - lex->duplicates, - lex->ignore, - select_tables))) + if (unlikely((result= new (thd->mem_root) + select_create(thd, create_table, + &create_info, + &alter_info, + select_lex->item_list, + lex->duplicates, + lex->ignore, + select_tables)))) { /* CREATE from SELECT give its SELECT_LEX for SELECT, @@ -4310,7 +4309,7 @@ end_with_restore_list: HA_CREATE_INFO create_info; Alter_info alter_info(lex->alter_info, thd->mem_root); - if (thd->is_fatal_error) /* out of memory creating a copy of alter_info */ + if (unlikely(thd->is_fatal_error)) /* out of memory creating alter_info */ goto error; DBUG_ASSERT(first_table == all_tables && first_table != 0); @@ -4347,8 +4346,8 @@ end_with_restore_list: We don't need to ensure that only one user is using master_info as start_slave is protected against simultaneous usage */ - if ((mi= get_master_info(&lex_mi->connection_name, - Sql_condition::WARN_LEVEL_ERROR))) + if (unlikely((mi= get_master_info(&lex_mi->connection_name, + Sql_condition::WARN_LEVEL_ERROR)))) { if (load_error) { @@ -4891,7 +4890,7 @@ end_with_restore_list: unit->set_limit(select_lex); MYSQL_DELETE_START(thd->query()); - Protocol *save_protocol; + Protocol * UNINIT_VAR(save_protocol); bool replaced_protocol= false; if (!select_lex->item_list.is_empty()) @@ -4961,17 +4960,17 @@ end_with_restore_list: break; MYSQL_MULTI_DELETE_START(thd->query()); - if ((res= mysql_multi_delete_prepare(thd))) + if (unlikely(res= mysql_multi_delete_prepare(thd))) { MYSQL_MULTI_DELETE_DONE(1, 0); goto error; } - if (!thd->is_fatal_error) + if (likely(!thd->is_fatal_error)) { result= new (thd->mem_root) multi_delete(thd, aux_tables, lex->table_count); - if (result) + if (unlikely(result)) { res= mysql_select(thd, select_lex->get_table_list(), @@ -5132,9 +5131,9 @@ end_with_restore_list: if ((check_table_access(thd, SELECT_ACL, all_tables, FALSE, UINT_MAX, FALSE) || open_and_lock_tables(thd, all_tables, TRUE, 0))) goto error; - if (!(res= sql_set_variables(thd, lex_var_list, true))) + if (likely(!(res= sql_set_variables(thd, lex_var_list, true)))) { - if (!thd->is_error()) + if (likely(!thd->is_error())) my_ok(thd); } else @@ -6226,7 +6225,7 @@ end_with_restore_list: WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL); - if ((error= alter_server(thd, &lex->server_options))) + if (unlikely((error= alter_server(thd, &lex->server_options)))) { DBUG_PRINT("info", ("problem altering server <%s>", lex->server_options.server_name.str)); @@ -6306,6 +6305,10 @@ finish: lex->unit.cleanup(); + /* close/reopen tables that were marked to need reopen under LOCK TABLES */ + if (! thd->lex->requires_prelocking()) + thd->locked_tables_list.reopen_tables(thd, true); + if (! thd->in_sub_stmt) { if (thd->killed != NOT_KILLED) @@ -6319,7 +6322,8 @@ finish: } thd->reset_kill_query(); } - if (thd->is_error() || (thd->variables.option_bits & OPTION_MASTER_SQL_ERROR)) + if (unlikely(thd->is_error()) || + (thd->variables.option_bits & OPTION_MASTER_SQL_ERROR)) { THD_STAGE_INFO(thd, stage_rollback); trans_rollback_stmt(thd); @@ -6467,7 +6471,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) to prepend EXPLAIN to any query and receive output for it, even if the query itself redirects the output. */ - if (!(result= new (thd->mem_root) select_send(thd))) + if (unlikely(!(result= new (thd->mem_root) select_send(thd)))) return 1; /* purecov: inspected */ thd->send_explain_fields(result, lex->describe, lex->analyze_stmt); @@ -6478,7 +6482,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) res= mysql_explain_union(thd, &lex->unit, result); /* Print EXPLAIN only if we don't have an error */ - if (!res) + if (likely(!res)) { /* Do like the original select_describe did: remove OFFSET from the @@ -6588,6 +6592,60 @@ static bool execute_show_status(THD *thd, TABLE_LIST *all_tables) } +/* + Find out if a table is a temporary table + + A table is a temporary table if it's a temporary table or + there has been before a temporary table that has been renamed + to the current name. + + Some examples: + A->B B is a temporary table if and only if A is a temp. + A->B, B->C Second B is temp if A is temp + A->B, A->C Second A can't be temp as if A was temp then B is temp + and Second A can only be a normal table. C is also not temp +*/ + +static TABLE *find_temporary_table_for_rename(THD *thd, + TABLE_LIST *first_table, + TABLE_LIST *cur_table) +{ + TABLE_LIST *table; + TABLE *res= 0; + bool found= 0; + DBUG_ENTER("find_temporary_table_for_rename"); + + /* Find last instance when cur_table is in TO part */ + for (table= first_table; + table != cur_table; + table= table->next_local->next_local) + { + TABLE_LIST *next= table->next_local; + + if (!strcmp(table->get_db_name(), cur_table->get_db_name()) && + !strcmp(table->get_table_name(), cur_table->get_table_name())) + { + /* Table was moved away, can't be same as 'table' */ + found= 1; + res= 0; // Table can't be a temporary table + } + if (!strcmp(next->get_db_name(), cur_table->get_db_name()) && + !strcmp(next->get_table_name(), cur_table->get_table_name())) + { + /* + Table has matching name with new name of this table. cur_table should + have same temporary type as this table. + */ + found= 1; + res= table->table; + } + } + if (!found) + res= thd->find_temporary_table(table, THD::TMP_TABLE_ANY); + DBUG_RETURN(res); +} + + static bool check_rename_table(THD *thd, TABLE_LIST *first_table, TABLE_LIST *all_tables) { @@ -6604,13 +6662,19 @@ static bool check_rename_table(THD *thd, TABLE_LIST *first_table, &table->next_local->grant.m_internal, 0, 0)) return 1; + + /* check if these are refering to temporary tables */ + table->table= find_temporary_table_for_rename(thd, first_table, table); + table->next_local->table= table->table; + TABLE_LIST old_list, new_list; /* we do not need initialize old_list and new_list because we will - come table[0] and table->next[0] there + copy table[0] and table->next[0] there */ old_list= table[0]; new_list= table->next_local[0]; + if (check_grant(thd, ALTER_ACL | DROP_ACL, &old_list, FALSE, 1, FALSE) || (!test_all_bits(table->next_local->grant.privilege, INSERT_ACL | CREATE_ACL) && @@ -6689,7 +6753,7 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, /* check access may be called twice in a row. Don't change to same stage */ if (thd->proc_info != stage_checking_permissions.m_name) THD_STAGE_INFO(thd, stage_checking_permissions); - if ((!db || !db[0]) && !thd->db.str && !dont_check_global_grants) + if (unlikely((!db || !db[0]) && !thd->db.str && !dont_check_global_grants)) { DBUG_PRINT("error",("No database")); if (!no_errors) @@ -6698,7 +6762,7 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, DBUG_RETURN(TRUE); /* purecov: tested */ } - if ((db != NULL) && (db != any_db)) + if (likely((db != NULL) && (db != any_db))) { /* Check if this is reserved database, like information schema or @@ -6768,8 +6832,8 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, *save_priv|= sctx->master_access; DBUG_RETURN(FALSE); } - if (((want_access & ~sctx->master_access) & ~DB_ACLS) || - (! db && dont_check_global_grants)) + if (unlikely(((want_access & ~sctx->master_access) & ~DB_ACLS) || + (! db && dont_check_global_grants))) { // We can never grant this DBUG_PRINT("error",("No possible access")); if (!no_errors) @@ -6785,7 +6849,7 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, DBUG_RETURN(TRUE); /* purecov: tested */ } - if (db == any_db) + if (unlikely(db == any_db)) { /* Access granted; Allow select on *any* db. @@ -7279,7 +7343,7 @@ bool check_global_access(THD *thd, ulong want_access, bool no_errors) char command[128]; if ((thd->security_ctx->master_access & want_access)) return 0; - if (!no_errors) + if (unlikely(!no_errors)) { get_privilege_desc(command, sizeof(command), want_access); my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), command); @@ -7526,7 +7590,7 @@ void THD::reset_for_next_command(bool do_clear_error) DBUG_ASSERT(!spcont); /* not for substatements of routines */ DBUG_ASSERT(!in_sub_stmt); - if (do_clear_error) + if (likely(do_clear_error)) clear_error(1); free_list= 0; @@ -7534,8 +7598,9 @@ void THD::reset_for_next_command(bool do_clear_error) We also assign stmt_lex in lex_start(), but during bootstrap this code is executed first. */ - stmt_lex= &main_lex; stmt_lex->current_select_number= 1; - DBUG_PRINT("info", ("Lex %p stmt_lex: %p", lex, stmt_lex)); + DBUG_ASSERT(lex == &main_lex); + main_lex.stmt_lex= &main_lex; main_lex.current_select_number= 1; + DBUG_PRINT("info", ("Lex and stmt_lex: %p", &main_lex)); /* Those two lines below are theoretically unneeded as THD::cleanup_after_query() should take care of this already. @@ -7652,7 +7717,7 @@ mysql_new_select(LEX *lex, bool move_down, SELECT_LEX *select_lex) { if (!(select_lex= new (thd->mem_root) SELECT_LEX())) DBUG_RETURN(1); - select_lex->select_number= ++thd->stmt_lex->current_select_number; + select_lex->select_number= ++thd->lex->stmt_lex->current_select_number; select_lex->parent_lex= lex; /* Used in init_query. */ select_lex->init_query(); select_lex->init_select(); @@ -7803,13 +7868,22 @@ static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length, com_statement_info[thd->get_command()].m_key); MYSQL_SET_STATEMENT_TEXT(thd->m_statement_psi, thd->query(), thd->query_length()); + + DBUG_EXECUTE_IF("sync.wsrep_retry_autocommit", + { + const char act[]= + "now " + "SIGNAL wsrep_retry_autocommit_reached " + "WAIT_FOR wsrep_retry_autocommit_continue"; + DBUG_ASSERT(!debug_sync_set_action(thd, STRING_WITH_LEN(act))); + }); } mysql_parse(thd, rawbuf, length, parser_state, is_com_multi, is_next_command); if (WSREP(thd)) { /* wsrep BF abort in query exec phase */ - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); if (thd->wsrep_conflict_state == MUST_ABORT) { wsrep_client_rollback(thd); @@ -7818,8 +7892,11 @@ static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length, if (thd->wsrep_conflict_state == MUST_REPLAY) { + mysql_mutex_unlock(&thd->LOCK_thd_data); if (thd->lex->explain) delete_explain_query(thd->lex); + mysql_mutex_lock(&thd->LOCK_thd_data); + wsrep_replay_transaction(thd); } @@ -7861,13 +7938,13 @@ static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length, if (thd->wsrep_conflict_state != REPLAYING) thd->wsrep_retry_counter= 0; // reset } - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); thd->reset_killed(); } else { set_if_smaller(thd->wsrep_retry_counter, 0); // reset; eventually ok - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); } } @@ -7953,7 +8030,7 @@ void mysql_parse(THD *thd, char *rawbuf, uint length, bool err= parse_sql(thd, parser_state, NULL, true); - if (!err) + if (likely(!err)) { thd->m_statement_psi= MYSQL_REFINE_STATEMENT(thd->m_statement_psi, @@ -7968,7 +8045,7 @@ void mysql_parse(THD *thd, char *rawbuf, uint length, else #endif { - if (! thd->is_error()) + if (likely(! thd->is_error())) { const char *found_semicolon= parser_state->m_lip.found_semicolon; /* @@ -8062,7 +8139,7 @@ bool mysql_test_parse_for_slave(THD *thd, char *rawbuf, uint length) DBUG_ENTER("mysql_test_parse_for_slave"); Parser_state parser_state; - if (!(error= parser_state.init(thd, rawbuf, length))) + if (likely(!(error= parser_state.init(thd, rawbuf, length)))) { lex_start(thd); thd->reset_for_next_command(); @@ -8084,7 +8161,7 @@ add_proc_to_list(THD* thd, Item *item) ORDER *order; Item **item_ptr; - if (!(order = (ORDER *) thd->alloc(sizeof(ORDER)+sizeof(Item*)))) + if (unlikely(!(order = (ORDER *) thd->alloc(sizeof(ORDER)+sizeof(Item*))))) return 1; item_ptr = (Item**) (order+1); *item_ptr= item; @@ -8102,7 +8179,7 @@ bool add_to_list(THD *thd, SQL_I_List &list, Item *item,bool asc) { ORDER *order; DBUG_ENTER("add_to_list"); - if (!(order = (ORDER *) thd->alloc(sizeof(ORDER)))) + if (unlikely(!(order = (ORDER *) thd->alloc(sizeof(ORDER))))) DBUG_RETURN(1); order->item_ptr= item; order->item= &order->item_ptr; @@ -8145,25 +8222,25 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, List *partition_names, LEX_STRING *option) { - register TABLE_LIST *ptr; + TABLE_LIST *ptr; TABLE_LIST *UNINIT_VAR(previous_table_ref); /* The table preceding the current one. */ LEX_CSTRING alias_str; LEX *lex= thd->lex; DBUG_ENTER("add_table_to_list"); - if (!table) + if (unlikely(!table)) DBUG_RETURN(0); // End of memory alias_str= alias ? *alias : table->table; DBUG_ASSERT(alias_str.str); if (!MY_TEST(table_options & TL_OPTION_ALIAS) && - check_table_name(table->table.str, table->table.length, FALSE)) + unlikely(check_table_name(table->table.str, table->table.length, FALSE))) { my_error(ER_WRONG_TABLE_NAME, MYF(0), table->table.str); DBUG_RETURN(0); } - if (table->is_derived_table() == FALSE && table->db.str && - check_db_name((LEX_STRING*) &table->db)) + if (unlikely(table->is_derived_table() == FALSE && table->db.str && + check_db_name((LEX_STRING*) &table->db))) { my_error(ER_WRONG_DB_NAME, MYF(0), table->db.str); DBUG_RETURN(0); @@ -8171,17 +8248,17 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, if (!alias) /* Alias is case sensitive */ { - if (table->sel) + if (unlikely(table->sel)) { my_message(ER_DERIVED_MUST_HAVE_ALIAS, ER_THD(thd, ER_DERIVED_MUST_HAVE_ALIAS), MYF(0)); DBUG_RETURN(0); } /* alias_str points to table->table; Let's make a copy */ - if (!(alias_str.str= (char*) thd->memdup(alias_str.str, alias_str.length+1))) + if (unlikely(!(alias_str.str= (char*) thd->memdup(alias_str.str, alias_str.length+1)))) DBUG_RETURN(0); } - if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST)))) + if (unlikely(!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST))))) DBUG_RETURN(0); /* purecov: inspected */ if (table->db.str) { @@ -8227,7 +8304,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, DBUG_RETURN(0); } schema_table= find_schema_table(thd, &ptr->table_name); - if (!schema_table || + if (unlikely(!schema_table) || (schema_table->hidden && ((sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0 || /* @@ -8261,8 +8338,9 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, tables ; tables=tables->next_local) { - if (!my_strcasecmp(table_alias_charset, alias_str.str, tables->alias.str) && - !cmp(&ptr->db, &tables->db) && ! tables->sequence) + if (unlikely(!my_strcasecmp(table_alias_charset, alias_str.str, + tables->alias.str) && + !cmp(&ptr->db, &tables->db) && ! tables->sequence)) { my_error(ER_NONUNIQ_TABLE, MYF(0), alias_str.str); /* purecov: tested */ DBUG_RETURN(0); /* purecov: tested */ @@ -8270,7 +8348,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, } } /* Store the table reference preceding the current one. */ - if (table_list.elements > 0 && !ptr->sequence) + if (table_list.elements > 0 && likely(!ptr->sequence)) { /* table_list.next points to the last inserted TABLE_LIST->next_local' @@ -8298,7 +8376,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, We don't store sequences into the local list to hide them from INSERT and SELECT. */ - if (!ptr->sequence) + if (likely(!ptr->sequence)) table_list.link_in_list(ptr, &ptr->next_local); ptr->next_name_resolution_table= NULL; #ifdef WITH_PARTITION_STORAGE_ENGINE @@ -8343,13 +8421,14 @@ bool st_select_lex::init_nested_join(THD *thd) NESTED_JOIN *nested_join; DBUG_ENTER("init_nested_join"); - if (!(ptr= (TABLE_LIST*) thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST))+ - sizeof(NESTED_JOIN)))) + if (unlikely(!(ptr= (TABLE_LIST*) thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST))+ + sizeof(NESTED_JOIN))))) DBUG_RETURN(1); nested_join= ptr->nested_join= ((NESTED_JOIN*) ((uchar*) ptr + ALIGN_SIZE(sizeof(TABLE_LIST)))); - join_list->push_front(ptr, thd->mem_root); + if (unlikely(join_list->push_front(ptr, thd->mem_root))) + DBUG_RETURN(1); ptr->embedding= embedding; ptr->join_list= join_list; ptr->alias.str="(nested_join)"; @@ -8425,8 +8504,8 @@ TABLE_LIST *st_select_lex::nest_last_join(THD *thd) List *embedded_list; DBUG_ENTER("nest_last_join"); - if (!(ptr= (TABLE_LIST*) thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST))+ - sizeof(NESTED_JOIN)))) + if (unlikely(!(ptr= (TABLE_LIST*) thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST))+ + sizeof(NESTED_JOIN))))) DBUG_RETURN(0); nested_join= ptr->nested_join= ((NESTED_JOIN*) ((uchar*) ptr + ALIGN_SIZE(sizeof(TABLE_LIST)))); @@ -8441,7 +8520,7 @@ TABLE_LIST *st_select_lex::nest_last_join(THD *thd) for (uint i=0; i < 2; i++) { TABLE_LIST *table= join_list->pop(); - if (!table) + if (unlikely(!table)) DBUG_RETURN(NULL); table->join_list= embedded_list; table->embedding= ptr; @@ -8894,7 +8973,7 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ if (((thd->security_ctx->master_access & SUPER_ACL) || thd->security_ctx->user_matches(tmp->security_ctx)) && - !wsrep_thd_is_BF(tmp, true)) + !wsrep_thd_is_BF(tmp, false)) { tmp->awake_no_mutex(kill_signal); error=0; @@ -8932,7 +9011,7 @@ static uint kill_threads_for_user(THD *thd, LEX_USER *user, *rows= 0; - if (thd->is_fatal_error) // If we run out of memory + if (unlikely(thd->is_fatal_error)) // If we run out of memory DBUG_RETURN(ER_OUT_OF_RESOURCES); DBUG_PRINT("enter", ("user: %s signal: %u", user->user.str, @@ -9002,7 +9081,7 @@ static void sql_kill(THD *thd, longlong id, killed_state state, killed_type type) { uint error; - if (!(error= kill_one_thread(thd, id, state, type))) + if (likely(!(error= kill_one_thread(thd, id, state, type)))) { if (!thd->killed) my_ok(thd); @@ -9019,7 +9098,7 @@ void sql_kill_user(THD *thd, LEX_USER *user, killed_state state) { uint error; ha_rows rows; - if (!(error= kill_threads_for_user(thd, user, state, &rows))) + if (likely(!(error= kill_threads_for_user(thd, user, state, &rows)))) my_ok(thd, rows); else { @@ -9051,7 +9130,8 @@ bool append_file_to_dir(THD *thd, const char **filename_ptr, /* Fix is using unix filename format on dos */ strmov(buff,*filename_ptr); end=convert_dirname(buff, *filename_ptr, NullS); - if (!(ptr= (char*) thd->alloc((size_t) (end-buff) + table_name->length + 1))) + if (unlikely(!(ptr= (char*) thd->alloc((size_t) (end-buff) + + table_name->length + 1)))) return 1; // End of memory *filename_ptr=ptr; strxmov(ptr,buff,table_name->str,NullS); @@ -9059,32 +9139,6 @@ bool append_file_to_dir(THD *thd, const char **filename_ptr, } -/** - Check if the select is a simple select (not an union). - - @retval - 0 ok - @retval - 1 error ; In this case the error messege is sent to the client -*/ - -bool check_simple_select() -{ - THD *thd= current_thd; - LEX *lex= thd->lex; - if (lex->current_select != &lex->select_lex) - { - char command[80]; - Lex_input_stream *lip= & thd->m_parser_state->m_lip; - strmake(command, lip->yylval->symbol.str, - MY_MIN(lip->yylval->symbol.length, sizeof(command)-1)); - my_error(ER_CANT_USE_OPTION_HERE, MYF(0), command); - return 1; - } - return 0; -} - - Comp_creator *comp_eq_creator(bool invert) { return invert?(Comp_creator *)&ne_creator:(Comp_creator *)&eq_creator; @@ -9424,7 +9478,7 @@ bool update_precheck(THD *thd, TABLE_LIST *tables) bool delete_precheck(THD *thd, TABLE_LIST *tables) { DBUG_ENTER("delete_precheck"); - if (tables->vers_conditions) + if (tables->vers_conditions.is_set()) { if (check_one_table_access(thd, DELETE_HISTORY_ACL, tables)) DBUG_RETURN(TRUE); @@ -9724,7 +9778,7 @@ LEX_USER *create_default_definer(THD *thd, bool role) { LEX_USER *definer; - if (! (definer= (LEX_USER*) thd->alloc(sizeof(LEX_USER)))) + if (unlikely(! (definer= (LEX_USER*) thd->alloc(sizeof(LEX_USER))))) return 0; thd->get_definer(definer, role); @@ -9759,7 +9813,7 @@ LEX_USER *create_definer(THD *thd, LEX_CSTRING *user_name, /* Create and initialize. */ - if (! (definer= (LEX_USER*) thd->alloc(sizeof(LEX_USER)))) + if (unlikely(!(definer= (LEX_USER*) thd->alloc(sizeof(LEX_USER))))) return 0; definer->user= *user_name; @@ -9822,7 +9876,8 @@ bool check_string_char_length(const LEX_CSTRING *str, uint err_msg, bool no_error) { Well_formed_prefix prefix(cs, str->str, str->length, max_char_length); - if (!prefix.well_formed_error_pos() && str->length == prefix.length()) + if (likely(!prefix.well_formed_error_pos() && + str->length == prefix.length())) return FALSE; if (!no_error) diff --git a/sql/sql_parse.h b/sql/sql_parse.h index d23da6f1b68..1027872898a 100644 --- a/sql/sql_parse.h +++ b/sql/sql_parse.h @@ -85,6 +85,7 @@ bool check_identifier_name(LEX_CSTRING *str, uint max_char_length, uint err_code, const char *param_for_err_msg); bool mysql_test_parse_for_slave(THD *thd,char *inBuf,uint length); bool sqlcom_can_generate_row_events(const THD *thd); +bool stmt_causes_implicit_commit(THD *thd, uint mask); bool is_update_query(enum enum_sql_command command); bool is_log_table_write_query(enum enum_sql_command command); bool alloc_query(THD *thd, const char *packet, size_t packet_length); @@ -119,7 +120,6 @@ bool push_new_name_resolution_context(THD *thd, TABLE_LIST *left_op, TABLE_LIST *right_op); void init_update_queries(void); -bool check_simple_select(); Item *normalize_cond(THD *thd, Item *cond); Item *negate_expression(THD *thd, Item *expr); bool check_stack_overrun(THD *thd, long margin, uchar *dummy); diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 09c59c862ad..c0ff1c54549 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2005, 2017, Oracle and/or its affiliates. - Copyright (c) 2009, 2017, SkySQL Ab. + Copyright (c) 2009, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -108,14 +108,14 @@ static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR*); uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter); uint32 get_next_partition_id_list(PARTITION_ITERATOR* part_iter); + +#ifdef WITH_PARTITION_STORAGE_ENGINE static int get_part_iter_for_interval_via_mapping(partition_info *, bool, uint32 *, uchar *, uchar *, uint, uint, uint, PARTITION_ITERATOR *); static int get_part_iter_for_interval_cols_via_map(partition_info *, bool, uint32 *, uchar *, uchar *, uint, uint, uint, PARTITION_ITERATOR *); static int get_part_iter_for_interval_via_walking(partition_info *, bool, uint32 *, uchar *, uchar *, uint, uint, uint, PARTITION_ITERATOR *); - -#ifdef WITH_PARTITION_STORAGE_ENGINE static int cmp_rec_and_tuple(part_column_list_val *val, uint32 nvals_in_rec); static int cmp_rec_and_tuple_prune(part_column_list_val *val, uint32 n_vals_in_rec, @@ -343,7 +343,7 @@ static bool set_up_field_array(THD *thd, TABLE *table, if (field->flags & GET_FIXED_FIELDS_FLAG) num_fields++; } - if (num_fields > MAX_REF_PARTS) + if (unlikely(num_fields > MAX_REF_PARTS)) { char *err_str; if (is_sub_part) @@ -359,15 +359,13 @@ static bool set_up_field_array(THD *thd, TABLE *table, We are using hidden key as partitioning field */ DBUG_ASSERT(!is_sub_part); - DBUG_RETURN(result); + DBUG_RETURN(FALSE); } size_field_array= (num_fields+1)*sizeof(Field*); field_array= (Field**) thd->calloc(size_field_array); if (unlikely(!field_array)) - { - mem_alloc_error(size_field_array); - result= TRUE; - } + DBUG_RETURN(TRUE); + ptr= table->field; while ((field= *(ptr++))) { @@ -490,7 +488,6 @@ static bool create_full_part_field_array(THD *thd, TABLE *table, field_array= (Field**) thd->calloc(size_field_array); if (unlikely(!field_array)) { - mem_alloc_error(size_field_array); result= TRUE; goto end; } @@ -515,14 +512,12 @@ static bool create_full_part_field_array(THD *thd, TABLE *table, if (!(bitmap_buf= (my_bitmap_map*) thd->alloc(bitmap_buffer_size(table->s->fields)))) { - mem_alloc_error(bitmap_buffer_size(table->s->fields)); result= TRUE; goto end; } - if (my_bitmap_init(&part_info->full_part_field_set, bitmap_buf, - table->s->fields, FALSE)) + if (unlikely(my_bitmap_init(&part_info->full_part_field_set, bitmap_buf, + table->s->fields, FALSE))) { - mem_alloc_error(table->s->fields); result= TRUE; goto end; } @@ -530,9 +525,16 @@ static bool create_full_part_field_array(THD *thd, TABLE *table, full_part_field_array may be NULL if storage engine supports native partitioning. */ + table->vcol_set= table->read_set= &part_info->full_part_field_set; if ((ptr= part_info->full_part_field_array)) for (; *ptr; ptr++) - bitmap_set_bit(&part_info->full_part_field_set, (*ptr)->field_index); + { + if ((*ptr)->vcol_info) + table->mark_virtual_col(*ptr); + else + bitmap_fast_test_and_set(table->read_set, (*ptr)->field_index); + } + table->default_column_bitmaps(); end: DBUG_RETURN(result); @@ -820,7 +822,7 @@ int check_signed_flag(partition_info *part_info) */ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table, - bool is_sub_part, bool is_create_table_ind) + bool is_sub_part, bool is_create_table_ind) { partition_info *part_info= table->part_info; bool result= TRUE; @@ -857,8 +859,8 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table, const nesting_map saved_allow_sum_func= thd->lex->allow_sum_func; thd->lex->allow_sum_func= 0; - if (!(error= func_expr->fix_fields(thd, (Item**)&func_expr))) - func_expr->walk(&Item::vcol_in_partition_func_processor, 0, NULL); + if (likely(!(error= func_expr->fix_fields(thd, (Item**)&func_expr)))) + func_expr->walk(&Item::post_fix_fields_part_expr_processor, 0, NULL); /* Restore agg_field/agg_func and allow_sum_func, @@ -901,7 +903,7 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table, ER_THD(thd, ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR)); } - if ((!is_sub_part) && (error= check_signed_flag(part_info))) + if (unlikely((!is_sub_part) && (error= check_signed_flag(part_info)))) goto end; result= set_up_field_array(thd, table, is_sub_part); end: @@ -1088,12 +1090,11 @@ static bool set_up_partition_bitmaps(THD *thd, partition_info *part_info) DBUG_ASSERT(!part_info->bitmaps_are_initialized); /* Allocate for both read and lock_partitions */ - if (!(bitmap_buf= (uint32*) alloc_root(&part_info->table->mem_root, - bitmap_bytes * 2))) - { - mem_alloc_error(bitmap_bytes * 2); + if (unlikely(!(bitmap_buf= + (uint32*) alloc_root(&part_info->table->mem_root, + bitmap_bytes * 2)))) DBUG_RETURN(TRUE); - } + my_bitmap_init(&part_info->read_partitions, bitmap_buf, bitmap_bits, FALSE); /* Use the second half of the allocated buffer for lock_partitions */ my_bitmap_init(&part_info->lock_partitions, bitmap_buf + (bitmap_bytes / 4), @@ -1291,11 +1292,9 @@ static bool check_range_constants(THD *thd, partition_info *part_info) uint size_entries= sizeof(part_column_list_val) * num_column_values; part_info->range_col_array= (part_column_list_val*) thd->calloc(part_info->num_parts * size_entries); - if (part_info->range_col_array == NULL) - { - mem_alloc_error(part_info->num_parts * size_entries); + if (unlikely(part_info->range_col_array == NULL)) goto end; - } + loc_range_col_array= part_info->range_col_array; i= 0; do @@ -1329,11 +1328,9 @@ static bool check_range_constants(THD *thd, partition_info *part_info) part_info->range_int_array= (longlong*) thd->alloc(part_info->num_parts * sizeof(longlong)); - if (part_info->range_int_array == NULL) - { - mem_alloc_error(part_info->num_parts * sizeof(longlong)); + if (unlikely(part_info->range_int_array == NULL)) goto end; - } + i= 0; do { @@ -1574,7 +1571,7 @@ static bool check_vers_constants(THD *thd, partition_info *part_info) my_tz_OFFSET0->TIME_to_gmt_sec(<ime, &error); if (error) goto err; - if (vers_info->hist_part->range_value <= thd->systime()) + if (vers_info->hist_part->range_value <= thd->query_start()) vers_info->hist_part= el; } return 0; @@ -2823,10 +2820,9 @@ static inline int part_val_int(Item *item_expr, longlong *result) *result= item_expr->val_int(); if (item_expr->null_value) { - if (current_thd->is_error()) + if (unlikely(current_thd->is_error())) return TRUE; - else - *result= LONGLONG_MIN; + *result= LONGLONG_MIN; } return FALSE; } @@ -3534,7 +3530,7 @@ int get_partition_id_range(partition_info *part_info, bool unsigned_flag= part_info->part_expr->unsigned_flag; DBUG_ENTER("get_partition_id_range"); - if (error) + if (unlikely(error)) DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND); if (part_info->part_expr->null_value) @@ -4107,26 +4103,21 @@ bool verify_data_with_partition(TABLE *table, TABLE *part_table, old_rec= part_table->record[0]; part_table->record[0]= table->record[0]; part_info->table->move_fields(part_info->full_part_field_array, table->record[0], old_rec); - if ((error= file->ha_rnd_init(TRUE))) - { - file->print_error(error, MYF(0)); + if (unlikely(error= file->ha_rnd_init_with_error(TRUE))) goto err; - } do { - if ((error= file->ha_rnd_next(table->record[0]))) + if (unlikely((error= file->ha_rnd_next(table->record[0])))) { - if (error == HA_ERR_RECORD_DELETED) - continue; if (error == HA_ERR_END_OF_FILE) error= 0; else file->print_error(error, MYF(0)); break; } - if ((error= part_info->get_partition_id(part_info, &found_part_id, - &func_value))) + if (unlikely((error= part_info->get_partition_id(part_info, &found_part_id, + &func_value)))) { part_table->file->print_error(error, MYF(0)); break; @@ -4144,9 +4135,7 @@ err: part_info->table->move_fields(part_info->full_part_field_array, old_rec, table->record[0]); part_table->record[0]= old_rec; - if (error) - DBUG_RETURN(TRUE); - DBUG_RETURN(FALSE); + DBUG_RETURN(unlikely(error) ? TRUE : FALSE); } @@ -4461,26 +4450,24 @@ bool mysql_unpack_partition(THD *thd, thd->variables.character_set_client= system_charset_info; Parser_state parser_state; - if (parser_state.init(thd, part_buf, part_info_len)) + if (unlikely(parser_state.init(thd, part_buf, part_info_len))) goto end; - if (init_lex_with_single_table(thd, table, &lex)) + if (unlikely(init_lex_with_single_table(thd, table, &lex))) goto end; *work_part_info_used= FALSE; - lex.part_info= new partition_info(); - lex.part_info->table= table; /* Indicates MYSQLparse from this place */ - if (!lex.part_info) - { - mem_alloc_error(sizeof(partition_info)); + + if (unlikely(!(lex.part_info= new partition_info()))) goto end; - } + + lex.part_info->table= table; /* Indicates MYSQLparse from this place */ part_info= lex.part_info; DBUG_PRINT("info", ("Parse: %s", part_buf)); thd->m_statement_psi= NULL; - if (parse_sql(thd, & parser_state, NULL) || - part_info->fix_parser_data(thd)) + if (unlikely(parse_sql(thd, & parser_state, NULL)) || + unlikely(part_info->fix_parser_data(thd))) { thd->free_items(); thd->m_statement_psi= parent_locker; @@ -4765,8 +4752,6 @@ bool compare_partition_options(HA_CREATE_INFO *table_create_info, const char *option_diffs[MAX_COMPARE_PARTITION_OPTION_ERRORS + 1]; int i, errors= 0; DBUG_ENTER("compare_partition_options"); - DBUG_ASSERT(!part_elem->tablespace_name && - !table_create_info->tablespace); /* Note that there are not yet any engine supporting tablespace together @@ -4877,6 +4862,17 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, ALTER_PARTITION_TABLE_REORG | ALTER_PARTITION_REBUILD)) { + /* + You can't add column when we are doing alter related to partition + */ + DBUG_EXECUTE_IF("test_pseudo_invisible", { + my_error(ER_INTERNAL_ERROR, MYF(0), "Don't to it with test_pseudo_invisible"); + DBUG_RETURN(1); + }); + DBUG_EXECUTE_IF("test_completely_invisible", { + my_error(ER_INTERNAL_ERROR, MYF(0), "Don't to it with test_completely_invisible"); + DBUG_RETURN(1); + }); partition_info *tab_part_info; ulonglong flags= 0; bool is_last_partition_reorged= FALSE; @@ -5314,7 +5310,7 @@ that are reorganised. if (*fast_alter_table && tab_part_info->vers_info->interval.is_set()) { partition_element *hist_part= tab_part_info->vers_info->hist_part; - if (hist_part->range_value <= thd->systime()) + if (hist_part->range_value <= thd->query_start()) hist_part->part_state= PART_CHANGED; } } @@ -5325,21 +5321,17 @@ that are reorganised. partition_element *part_elem= alt_it++; if (*fast_alter_table) part_elem->part_state= PART_TO_BE_ADDED; - if (tab_part_info->partitions.push_back(part_elem, thd->mem_root)) - { - mem_alloc_error(1); + if (unlikely(tab_part_info->partitions.push_back(part_elem, + thd->mem_root))) goto err; - } } while (++part_count < num_new_partitions); tab_part_info->num_parts+= num_new_partitions; if (tab_part_info->part_type == VERSIONING_PARTITION) { DBUG_ASSERT(now_part); - if (tab_part_info->partitions.push_back(now_part, thd->mem_root)) - { - mem_alloc_error(1); + if (unlikely(tab_part_info->partitions.push_back(now_part, + thd->mem_root))) goto err; - } } } /* @@ -5672,12 +5664,10 @@ the generated partition syntax in a correct manner. else tab_max_range= part_elem->range_value; if (*fast_alter_table && - tab_part_info->temp_partitions.push_back(part_elem, - thd->mem_root)) - { - mem_alloc_error(1); + unlikely(tab_part_info->temp_partitions. + push_back(part_elem, thd->mem_root))) goto err; - } + if (*fast_alter_table) part_elem->part_state= PART_TO_BE_REORGED; if (!found_first) @@ -5995,9 +5985,11 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt) /* TODO: test if bulk_insert would increase the performance */ - if ((error= file->ha_change_partitions(lpt->create_info, path, &lpt->copied, - &lpt->deleted, lpt->pack_frm_data, - lpt->pack_frm_len))) + if (unlikely((error= file->ha_change_partitions(lpt->create_info, path, + &lpt->copied, + &lpt->deleted, + lpt->pack_frm_data, + lpt->pack_frm_len)))) { file->print_error(error, MYF(error != ER_OUTOFMEMORY ? 0 : ME_FATALERROR)); } @@ -6035,7 +6027,7 @@ static bool mysql_rename_partitions(ALTER_PARTITION_PARAM_TYPE *lpt) DBUG_ENTER("mysql_rename_partitions"); build_table_filename(path, sizeof(path) - 1, lpt->db.str, lpt->table_name.str, "", 0); - if ((error= lpt->table->file->ha_rename_partitions(path))) + if (unlikely((error= lpt->table->file->ha_rename_partitions(path)))) { if (error != 1) lpt->table->file->print_error(error, MYF(0)); @@ -6772,14 +6764,14 @@ static void alter_partition_lock_handling(ALTER_PARTITION_PARAM_TYPE *lpt) Diagnostics_area *stmt_da= NULL; Diagnostics_area tmp_stmt_da(true); - if (thd->is_error()) + if (unlikely(thd->is_error())) { /* reopen might fail if we have a previous error, use a temporary da. */ stmt_da= thd->get_stmt_da(); thd->set_stmt_da(&tmp_stmt_da); } - if (thd->locked_tables_list.reopen_tables(thd)) + if (unlikely(thd->locked_tables_list.reopen_tables(thd, false))) sql_print_warning("We failed to reacquire LOCKs in ALTER TABLE"); if (stmt_da) @@ -6978,14 +6970,14 @@ err_exclusive_lock: Diagnostics_area *stmt_da= NULL; Diagnostics_area tmp_stmt_da(true); - if (thd->is_error()) + if (unlikely(thd->is_error())) { /* reopen might fail if we have a previous error, use a temporary da. */ stmt_da= thd->get_stmt_da(); thd->set_stmt_da(&tmp_stmt_da); } - if (thd->locked_tables_list.reopen_tables(thd)) + if (unlikely(thd->locked_tables_list.reopen_tables(thd, false))) sql_print_warning("We failed to reacquire LOCKs in ALTER TABLE"); if (stmt_da) @@ -7536,27 +7528,6 @@ void append_row_to_str(String &str, const uchar *row, TABLE *table) } -/* - SYNOPSIS - mem_alloc_error() - size Size of memory attempted to allocate - None - - RETURN VALUES - None - - DESCRIPTION - A routine to use for all the many places in the code where memory - allocation error can happen, a tremendous amount of them, needs - simple routine that signals this error. -*/ - -void mem_alloc_error(size_t size) -{ - my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), - static_cast(size)); -} - #ifdef WITH_PARTITION_STORAGE_ENGINE /** Return comma-separated list of used partitions in the provided given string. diff --git a/sql/sql_partition.h b/sql/sql_partition.h index 4315c84e4f0..170ae8ccee1 100644 --- a/sql/sql_partition.h +++ b/sql/sql_partition.h @@ -133,7 +133,6 @@ Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs); @param[in] table Table containing read_set and fields for the row. */ void append_row_to_str(String &str, const uchar *row, TABLE *table); -void mem_alloc_error(size_t size); void truncate_partition_filename(char *path); /* diff --git a/sql/sql_partition_admin.cc b/sql/sql_partition_admin.cc index 6e176a40e6c..99fe09d5afe 100644 --- a/sql/sql_partition_admin.cc +++ b/sql/sql_partition_admin.cc @@ -67,25 +67,29 @@ bool Sql_cmd_alter_table_exchange_partition::execute(THD *thd) DBUG_ENTER("Sql_cmd_alter_table_exchange_partition::execute"); - if (thd->is_fatal_error) /* out of memory creating a copy of alter_info */ + if (unlikely(thd->is_fatal_error)) + { + /* out of memory creating a copy of alter_info */ DBUG_RETURN(TRUE); + } /* Must be set in the parser */ DBUG_ASSERT(select_lex->db.str); /* also check the table to be exchanged with the partition */ DBUG_ASSERT(alter_info.partition_flags & ALTER_PARTITION_EXCHANGE); - if (check_access(thd, priv_needed, first_table->db.str, - &first_table->grant.privilege, - &first_table->grant.m_internal, - 0, 0) || - check_access(thd, priv_needed, first_table->next_local->db.str, - &first_table->next_local->grant.privilege, - &first_table->next_local->grant.m_internal, - 0, 0)) + if (unlikely(check_access(thd, priv_needed, first_table->db.str, + &first_table->grant.privilege, + &first_table->grant.m_internal, + 0, 0)) || + unlikely(check_access(thd, priv_needed, first_table->next_local->db.str, + &first_table->next_local->grant.privilege, + &first_table->next_local->grant.m_internal, + 0, 0))) DBUG_RETURN(TRUE); - if (check_grant(thd, priv_needed, first_table, FALSE, UINT_MAX, FALSE)) + if (unlikely(check_grant(thd, priv_needed, first_table, FALSE, UINT_MAX, + FALSE))) DBUG_RETURN(TRUE); /* Not allowed with EXCHANGE PARTITION */ @@ -109,31 +113,32 @@ bool Sql_cmd_alter_table_exchange_partition::execute(THD *thd) @retval FALSE if OK, otherwise error is reported and TRUE is returned. */ + static bool check_exchange_partition(TABLE *table, TABLE *part_table) { DBUG_ENTER("check_exchange_partition"); /* Both tables must exist */ - if (!part_table || !table) + if (unlikely(!part_table || !table)) { my_error(ER_CHECK_NO_SUCH_TABLE, MYF(0)); DBUG_RETURN(TRUE); } /* The first table must be partitioned, and the second must not */ - if (!part_table->part_info) + if (unlikely(!part_table->part_info)) { my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0)); DBUG_RETURN(TRUE); } - if (table->part_info) + if (unlikely(table->part_info)) { my_error(ER_PARTITION_EXCHANGE_PART_TABLE, MYF(0), table->s->table_name.str); DBUG_RETURN(TRUE); } - if (part_table->file->ht != partition_hton) + if (unlikely(part_table->file->ht != partition_hton)) { /* Only allowed on partitioned tables throught the generic ha_partition @@ -143,14 +148,14 @@ static bool check_exchange_partition(TABLE *table, TABLE *part_table) DBUG_RETURN(TRUE); } - if (table->file->ht != part_table->part_info->default_engine_type) + if (unlikely(table->file->ht != part_table->part_info->default_engine_type)) { my_error(ER_MIX_HANDLER_ERROR, MYF(0)); DBUG_RETURN(TRUE); } /* Verify that table is not tmp table, partitioned tables cannot be tmp. */ - if (table->s->tmp_table != NO_TMP_TABLE) + if (unlikely(table->s->tmp_table != NO_TMP_TABLE)) { my_error(ER_PARTITION_EXCHANGE_TEMP_TABLE, MYF(0), table->s->table_name.str); @@ -158,7 +163,7 @@ static bool check_exchange_partition(TABLE *table, TABLE *part_table) } /* The table cannot have foreign keys constraints or be referenced */ - if(!table->file->can_switch_engines()) + if (unlikely(!table->file->can_switch_engines())) { my_error(ER_PARTITION_EXCHANGE_FOREIGN_KEY, MYF(0), table->s->table_name.str); @@ -197,8 +202,8 @@ static bool compare_table_with_partition(THD *thd, TABLE *table, /* mark all columns used, since they are used when preparing the new table */ part_table->use_all_columns(); table->use_all_columns(); - if (mysql_prepare_alter_table(thd, part_table, &part_create_info, - &part_alter_info, &part_alter_ctx)) + if (unlikely(mysql_prepare_alter_table(thd, part_table, &part_create_info, + &part_alter_info, &part_alter_ctx))) { my_error(ER_TABLES_DIFFERENT_METADATA, MYF(0)); DBUG_RETURN(TRUE); @@ -336,11 +341,8 @@ static bool exchange_name_with_ddl_log(THD *thd, handler *file= NULL; DBUG_ENTER("exchange_name_with_ddl_log"); - if (!(file= get_new_handler(NULL, thd->mem_root, ht))) - { - mem_alloc_error(sizeof(handler)); + if (unlikely(!(file= get_new_handler(NULL, thd->mem_root, ht)))) DBUG_RETURN(TRUE); - } /* prepare the action entry */ exchange_entry.entry_type= DDL_LOG_ENTRY_CODE; @@ -360,12 +362,13 @@ static bool exchange_name_with_ddl_log(THD *thd, */ DBUG_EXECUTE_IF("exchange_partition_fail_1", goto err_no_action_written;); DBUG_EXECUTE_IF("exchange_partition_abort_1", DBUG_SUICIDE();); - if (write_ddl_log_entry(&exchange_entry, &log_entry)) + if (unlikely(write_ddl_log_entry(&exchange_entry, &log_entry))) goto err_no_action_written; DBUG_EXECUTE_IF("exchange_partition_fail_2", goto err_no_execute_written;); DBUG_EXECUTE_IF("exchange_partition_abort_2", DBUG_SUICIDE();); - if (write_execute_ddl_log_entry(log_entry->entry_pos, FALSE, &exec_log_entry)) + if (unlikely(write_execute_ddl_log_entry(log_entry->entry_pos, FALSE, + &exec_log_entry))) goto err_no_execute_written; /* ddl_log is written and synced */ @@ -383,7 +386,7 @@ static bool exchange_name_with_ddl_log(THD *thd, error_set= TRUE; goto err_rename;); DBUG_EXECUTE_IF("exchange_partition_abort_3", DBUG_SUICIDE();); - if (file->ha_rename_table(name, tmp_name)) + if (unlikely(file->ha_rename_table(name, tmp_name))) { my_error(ER_ERROR_ON_RENAME, MYF(0), name, tmp_name, my_errno); error_set= TRUE; @@ -391,7 +394,7 @@ static bool exchange_name_with_ddl_log(THD *thd, } DBUG_EXECUTE_IF("exchange_partition_fail_4", goto err_rename;); DBUG_EXECUTE_IF("exchange_partition_abort_4", DBUG_SUICIDE();); - if (deactivate_ddl_log_entry(log_entry->entry_pos)) + if (unlikely(deactivate_ddl_log_entry(log_entry->entry_pos))) goto err_rename; /* call rename table from partition to table */ @@ -400,7 +403,7 @@ static bool exchange_name_with_ddl_log(THD *thd, error_set= TRUE; goto err_rename;); DBUG_EXECUTE_IF("exchange_partition_abort_5", DBUG_SUICIDE();); - if (file->ha_rename_table(from_name, name)) + if (unlikely(file->ha_rename_table(from_name, name))) { my_error(ER_ERROR_ON_RENAME, MYF(0), from_name, name, my_errno); error_set= TRUE; @@ -408,7 +411,7 @@ static bool exchange_name_with_ddl_log(THD *thd, } DBUG_EXECUTE_IF("exchange_partition_fail_6", goto err_rename;); DBUG_EXECUTE_IF("exchange_partition_abort_6", DBUG_SUICIDE();); - if (deactivate_ddl_log_entry(log_entry->entry_pos)) + if (unlikely(deactivate_ddl_log_entry(log_entry->entry_pos))) goto err_rename; /* call rename table from tmp-nam to partition */ @@ -417,7 +420,7 @@ static bool exchange_name_with_ddl_log(THD *thd, error_set= TRUE; goto err_rename;); DBUG_EXECUTE_IF("exchange_partition_abort_7", DBUG_SUICIDE();); - if (file->ha_rename_table(tmp_name, from_name)) + if (unlikely(file->ha_rename_table(tmp_name, from_name))) { my_error(ER_ERROR_ON_RENAME, MYF(0), tmp_name, from_name, my_errno); error_set= TRUE; @@ -425,7 +428,7 @@ static bool exchange_name_with_ddl_log(THD *thd, } DBUG_EXECUTE_IF("exchange_partition_fail_8", goto err_rename;); DBUG_EXECUTE_IF("exchange_partition_abort_8", DBUG_SUICIDE();); - if (deactivate_ddl_log_entry(log_entry->entry_pos)) + if (unlikely(deactivate_ddl_log_entry(log_entry->entry_pos))) goto err_rename; /* The exchange is complete and ddl_log is deactivated */ @@ -525,23 +528,24 @@ bool Sql_cmd_alter_table_exchange_partition:: to be able to verify the structure/metadata. */ table_list->mdl_request.set_type(MDL_SHARED_NO_WRITE); - if (open_tables(thd, &table_list, &table_counter, 0, - &alter_prelocking_strategy)) + if (unlikely(open_tables(thd, &table_list, &table_counter, 0, + &alter_prelocking_strategy))) DBUG_RETURN(true); part_table= table_list->table; swap_table= swap_table_list->table; - if (check_exchange_partition(swap_table, part_table)) + if (unlikely(check_exchange_partition(swap_table, part_table))) DBUG_RETURN(TRUE); /* set lock pruning on first table */ partition_name= alter_info->partition_names.head(); - if (table_list->table->part_info-> - set_named_partition_bitmap(partition_name, strlen(partition_name))) + if (unlikely(table_list->table->part_info-> + set_named_partition_bitmap(partition_name, + strlen(partition_name)))) DBUG_RETURN(true); - if (lock_tables(thd, table_list, table_counter, 0)) + if (unlikely(lock_tables(thd, table_list, table_counter, 0))) DBUG_RETURN(true); @@ -569,32 +573,35 @@ bool Sql_cmd_alter_table_exchange_partition:: table_list->next_local->db.str, temp_name, "", FN_IS_TMP); - if (!(part_elem= part_table->part_info->get_part_elem(partition_name, - part_file_name + part_file_name_len, - sizeof(part_file_name) - part_file_name_len, - &swap_part_id))) + if (unlikely(!(part_elem= + part_table->part_info->get_part_elem(partition_name, + part_file_name + + part_file_name_len, + sizeof(part_file_name) - + part_file_name_len, + &swap_part_id)))) { - // my_error(ER_UNKNOWN_PARTITION, MYF(0), partition_name, - // part_table->alias); DBUG_RETURN(TRUE); } - if (swap_part_id == NOT_A_PARTITION_ID) + if (unlikely(swap_part_id == NOT_A_PARTITION_ID)) { DBUG_ASSERT(part_table->part_info->is_sub_partitioned()); my_error(ER_PARTITION_INSTEAD_OF_SUBPARTITION, MYF(0)); DBUG_RETURN(TRUE); } - if (compare_table_with_partition(thd, swap_table, part_table, part_elem, - swap_part_id)) + if (unlikely(compare_table_with_partition(thd, swap_table, part_table, + part_elem, + swap_part_id))) DBUG_RETURN(TRUE); /* Table and partition has same structure/options, OK to exchange */ thd_proc_info(thd, "Verifying data with partition"); - if (verify_data_with_partition(swap_table, part_table, swap_part_id)) + if (unlikely(verify_data_with_partition(swap_table, part_table, + swap_part_id))) DBUG_RETURN(TRUE); /* @@ -621,8 +628,8 @@ bool Sql_cmd_alter_table_exchange_partition:: DEBUG_SYNC(thd, "swap_partition_before_rename"); - if (exchange_name_with_ddl_log(thd, swap_file_name, part_file_name, - temp_file_name, table_hton)) + if (unlikely(exchange_name_with_ddl_log(thd, swap_file_name, part_file_name, + temp_file_name, table_hton))) goto err; /* @@ -630,9 +637,10 @@ bool Sql_cmd_alter_table_exchange_partition:: better to keep master/slave in consistent state. Alternative would be to try to revert the exchange operation and issue error. */ - (void) thd->locked_tables_list.reopen_tables(thd); + (void) thd->locked_tables_list.reopen_tables(thd, false); - if ((error= write_bin_log(thd, TRUE, thd->query(), thd->query_length()))) + if (unlikely((error= write_bin_log(thd, TRUE, thd->query(), + thd->query_length())))) { /* The error is reported in write_bin_log(). @@ -651,7 +659,7 @@ err: part_table_mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE); } - if (!error) + if (unlikely(!error)) my_ok(thd); // For query cache @@ -823,7 +831,8 @@ bool Sql_cmd_alter_table_truncate_partition::execute(THD *thd) partition= (ha_partition*) first_table->table->file; /* Invoke the handler method responsible for truncating the partition. */ - if ((error= partition->truncate_partition(alter_info, &binlog_stmt))) + if (unlikely(error= partition->truncate_partition(alter_info, + &binlog_stmt))) partition->print_error(error, MYF(0)); /* @@ -836,7 +845,7 @@ bool Sql_cmd_alter_table_truncate_partition::execute(THD *thd) Since we've changed data within the table, we also have to invalidate the query cache for it. */ - if (error != HA_ERR_WRONG_COMMAND) + if (likely(error != HA_ERR_WRONG_COMMAND)) { query_cache_invalidate3(thd, first_table, FALSE); if (binlog_stmt) @@ -851,7 +860,7 @@ bool Sql_cmd_alter_table_truncate_partition::execute(THD *thd) if (thd->locked_tables_mode) ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE); - if (! error) + if (likely(!error)) my_ok(thd); // Invalidate query cache diff --git a/sql/sql_plist.h b/sql/sql_plist.h index 14f6eb5e2aa..bb9889cc534 100644 --- a/sql/sql_plist.h +++ b/sql/sql_plist.h @@ -230,7 +230,7 @@ protected: void reset() {} void inc() {} void dec() {} - void swap(I_P_List_null_counter &rhs) {} + void swap(I_P_List_null_counter &) {} }; @@ -262,14 +262,14 @@ public: template class I_P_List_no_push_back { protected: - I_P_List_no_push_back(T **a) {}; - void set_last(T **a) {} + I_P_List_no_push_back(T **) {} + void set_last(T **) {} /* T** get_last() const method is intentionally left unimplemented in order to prohibit usage of push_back() method in lists which use this policy. */ - void swap(I_P_List_no_push_back &rhs) {} + void swap(I_P_List_no_push_back &) {} }; diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 605f96293d3..ac1524381c7 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -1,6 +1,6 @@ /* - Copyright (c) 2005, 2013, Oracle and/or its affiliates. - Copyright (c) 2010, 2017, MariaDB Corporation. + Copyright (c) 2005, 2018, Oracle and/or its affiliates. + Copyright (c) 2010, 2018, MariaDB Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -40,9 +40,6 @@ #include #include "sql_plugin_compat.h" -#define REPORT_TO_LOG 1 -#define REPORT_TO_USER 2 - #ifdef HAVE_LINK_H #include #endif @@ -333,24 +330,6 @@ bool plugin_is_forced(struct st_plugin_int *p) p->load_option == PLUGIN_FORCE_PLUS_PERMANENT; } -static void report_error(int where_to, uint error, ...) -{ - va_list args; - DBUG_ASSERT(where_to & (REPORT_TO_USER | REPORT_TO_LOG)); - if (where_to & REPORT_TO_USER) - { - va_start(args, error); - my_printv_error(error, ER(error), MYF(0), args); - va_end(args); - } - if (where_to & REPORT_TO_LOG) - { - va_start(args, error); - error_log_print(ERROR_LEVEL, ER_DEFAULT(error), args); - va_end(args); - } -} - /** Check if the provided path is valid in the sense that it does cause a relative reference outside the directory. @@ -523,7 +502,7 @@ static void free_plugin_mem(struct st_plugin_dl *p) @param plugin_dl Structure where the data should be put @param sym Reverence on version info @param dlpath Path to the module - @param report What errors should be reported + @param MyFlags Where errors should be reported (0 or ME_ERROR_LOG) @retval FALSE OK @retval TRUE ERROR @@ -531,14 +510,13 @@ static void free_plugin_mem(struct st_plugin_dl *p) #ifdef HAVE_DLOPEN static my_bool read_mysql_plugin_info(struct st_plugin_dl *plugin_dl, - void *sym, char *dlpath, - int report) + void *sym, char *dlpath, myf MyFlags) { DBUG_ENTER("read_maria_plugin_info"); /* Determine interface version */ if (!sym) { - report_error(report, ER_CANT_FIND_DL_ENTRY, plugin_interface_version_sym); + my_error(ER_CANT_FIND_DL_ENTRY, MyFlags, plugin_interface_version_sym); DBUG_RETURN(TRUE); } plugin_dl->mariaversion= 0; @@ -547,14 +525,14 @@ static my_bool read_mysql_plugin_info(struct st_plugin_dl *plugin_dl, if (plugin_dl->mysqlversion < min_plugin_interface_version || (plugin_dl->mysqlversion >> 8) > (MYSQL_PLUGIN_INTERFACE_VERSION >> 8)) { - report_error(report, ER_CANT_OPEN_LIBRARY, dlpath, ENOEXEC, + my_error(ER_CANT_OPEN_LIBRARY, MyFlags, dlpath, ENOEXEC, "plugin interface version mismatch"); DBUG_RETURN(TRUE); } /* Find plugin declarations */ if (!(sym= dlsym(plugin_dl->handle, plugin_declarations_sym))) { - report_error(report, ER_CANT_FIND_DL_ENTRY, plugin_declarations_sym); + my_error(ER_CANT_FIND_DL_ENTRY, MyFlags, plugin_declarations_sym); DBUG_RETURN(TRUE); } @@ -584,7 +562,7 @@ static my_bool read_mysql_plugin_info(struct st_plugin_dl *plugin_dl, MYF(MY_ZEROFILL|MY_WME)); if (!cur) { - report_error(report, ER_OUTOFMEMORY, + my_error(ER_OUTOFMEMORY, MyFlags, static_cast(plugin_dl->dl.length)); DBUG_RETURN(TRUE); } @@ -639,15 +617,14 @@ static my_bool read_mysql_plugin_info(struct st_plugin_dl *plugin_dl, @param plugin_dl Structure where the data should be put @param sym Reverence on version info @param dlpath Path to the module - @param report what errors should be reported + @param MyFlags Where errors should be reported (0 or ME_ERROR_LOG) @retval FALSE OK @retval TRUE ERROR */ static my_bool read_maria_plugin_info(struct st_plugin_dl *plugin_dl, - void *sym, char *dlpath, - int report) + void *sym, char *dlpath, myf MyFlags) { DBUG_ENTER("read_maria_plugin_info"); @@ -658,7 +635,7 @@ static my_bool read_maria_plugin_info(struct st_plugin_dl *plugin_dl, Actually this branch impossible because in case of absence of maria version we try mysql version. */ - report_error(report, ER_CANT_FIND_DL_ENTRY, + my_error(ER_CANT_FIND_DL_ENTRY, MyFlags, maria_plugin_interface_version_sym); DBUG_RETURN(TRUE); } @@ -668,14 +645,14 @@ static my_bool read_maria_plugin_info(struct st_plugin_dl *plugin_dl, if (plugin_dl->mariaversion < min_maria_plugin_interface_version || (plugin_dl->mariaversion >> 8) > (MARIA_PLUGIN_INTERFACE_VERSION >> 8)) { - report_error(report, ER_CANT_OPEN_LIBRARY, dlpath, ENOEXEC, + my_error(ER_CANT_OPEN_LIBRARY, MyFlags, dlpath, ENOEXEC, "plugin interface version mismatch"); DBUG_RETURN(TRUE); } /* Find plugin declarations */ if (!(sym= dlsym(plugin_dl->handle, maria_plugin_declarations_sym))) { - report_error(report, ER_CANT_FIND_DL_ENTRY, maria_plugin_declarations_sym); + my_error(ER_CANT_FIND_DL_ENTRY, MyFlags, maria_plugin_declarations_sym); DBUG_RETURN(TRUE); } if (plugin_dl->mariaversion != MARIA_PLUGIN_INTERFACE_VERSION) @@ -688,7 +665,7 @@ static my_bool read_maria_plugin_info(struct st_plugin_dl *plugin_dl, sizeof_st_plugin= *(int *)sym; else { - report_error(report, ER_CANT_FIND_DL_ENTRY, maria_sizeof_st_plugin_sym); + my_error(ER_CANT_FIND_DL_ENTRY, MyFlags, maria_sizeof_st_plugin_sym); DBUG_RETURN(TRUE); } @@ -705,7 +682,7 @@ static my_bool read_maria_plugin_info(struct st_plugin_dl *plugin_dl, MYF(MY_ZEROFILL|MY_WME)); if (!cur) { - report_error(report, ER_OUTOFMEMORY, + my_error(ER_OUTOFMEMORY, MyFlags, static_cast(plugin_dl->dl.length)); DBUG_RETURN(TRUE); } @@ -731,7 +708,7 @@ static my_bool read_maria_plugin_info(struct st_plugin_dl *plugin_dl, } #endif /* HAVE_DLOPEN */ -static st_plugin_dl *plugin_dl_add(const LEX_CSTRING *dl, int report) +static st_plugin_dl *plugin_dl_add(const LEX_CSTRING *dl, myf MyFlags) { #ifdef HAVE_DLOPEN char dlpath[FN_REFLEN]; @@ -755,7 +732,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_CSTRING *dl, int report) system_charset_info, 1) || plugin_dir_len + dl->length + 1 >= FN_REFLEN) { - report_error(report, ER_UDF_NO_PATHS); + my_error(ER_UDF_NO_PATHS, MyFlags); DBUG_RETURN(0); } /* If this dll is already loaded just increase ref_count. */ @@ -772,7 +749,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_CSTRING *dl, int report) /* Open new dll handle */ if (!(plugin_dl.handle= dlopen(dlpath, RTLD_NOW))) { - report_error(report, ER_CANT_OPEN_LIBRARY, dlpath, errno, my_dlerror(dlpath)); + my_error(ER_CANT_OPEN_LIBRARY, MyFlags, dlpath, errno, my_dlerror(dlpath)); goto ret; } dlopen_count++; @@ -792,12 +769,12 @@ static st_plugin_dl *plugin_dl_add(const LEX_CSTRING *dl, int report) dlsym(plugin_dl.handle, plugin_interface_version_sym), dlpath, - report)) + MyFlags)) goto ret; } else { - if (read_maria_plugin_info(&plugin_dl, sym, dlpath, report)) + if (read_maria_plugin_info(&plugin_dl, sym, dlpath, MyFlags)) goto ret; } @@ -815,7 +792,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_CSTRING *dl, int report) my_snprintf(buf, sizeof(buf), "service '%s' interface version mismatch", list_of_services[i].name); - report_error(report, ER_CANT_OPEN_LIBRARY, dlpath, ENOEXEC, buf); + my_error(ER_CANT_OPEN_LIBRARY, MyFlags, dlpath, ENOEXEC, buf); goto ret; } tmp_backup[plugin_dl.nbackups++].save(ptr); @@ -830,7 +807,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_CSTRING *dl, int report) if (!plugin_dl.ptr_backup) { restore_ptr_backup(plugin_dl.nbackups, tmp_backup); - report_error(report, ER_OUTOFMEMORY, bytes); + my_error(ER_OUTOFMEMORY, MyFlags, bytes); goto ret; } memcpy(plugin_dl.ptr_backup, tmp_backup, bytes); @@ -840,7 +817,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_CSTRING *dl, int report) plugin_dl.dl.length= dl->length * files_charset_info->mbmaxlen + 1; if (! (plugin_dl.dl.str= (char*) my_malloc(plugin_dl.dl.length, MYF(0)))) { - report_error(report, ER_OUTOFMEMORY, + my_error(ER_OUTOFMEMORY, MyFlags, static_cast(plugin_dl.dl.length)); goto ret; } @@ -853,7 +830,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_CSTRING *dl, int report) /* Add this dll to array */ if (! (tmp= plugin_dl_insert_or_reuse(&plugin_dl))) { - report_error(report, ER_OUTOFMEMORY, + my_error(ER_OUTOFMEMORY, MyFlags, static_cast(sizeof(struct st_plugin_dl))); goto ret; } @@ -866,7 +843,7 @@ ret: #else DBUG_ENTER("plugin_dl_add"); - report_error(report, ER_FEATURE_DISABLED, "plugin", "HAVE_DLOPEN"); + my_error(ER_FEATURE_DISABLED, MyFlags, "plugin", "HAVE_DLOPEN"); DBUG_RETURN(0); #endif } @@ -1101,7 +1078,7 @@ static st_plugin_int *plugin_insert_or_reuse(struct st_plugin_int *plugin) Requires that a write-lock is held on LOCK_system_variables_hash */ static bool plugin_add(MEM_ROOT *tmp_root, - const LEX_CSTRING *name, LEX_CSTRING *dl, int report) + const LEX_CSTRING *name, LEX_CSTRING *dl, myf MyFlags) { struct st_plugin_int tmp, *maybe_dupe; struct st_maria_plugin *plugin; @@ -1111,13 +1088,13 @@ static bool plugin_add(MEM_ROOT *tmp_root, if (name->str && plugin_find_internal(name, MYSQL_ANY_PLUGIN)) { - report_error(report, ER_PLUGIN_INSTALLED, name->str); + my_error(ER_PLUGIN_INSTALLED, MyFlags, name->str); DBUG_RETURN(TRUE); } /* Clear the whole struct to catch future extensions. */ bzero((char*) &tmp, sizeof(tmp)); fix_dl_name(tmp_root, dl); - if (! (tmp.plugin_dl= plugin_dl_add(dl, report))) + if (! (tmp.plugin_dl= plugin_dl_add(dl, MyFlags))) DBUG_RETURN(TRUE); /* Find plugin by name */ for (plugin= tmp.plugin_dl->plugins; plugin->info; plugin++) @@ -1143,7 +1120,7 @@ static bool plugin_add(MEM_ROOT *tmp_root, { if (plugin->name != maybe_dupe->plugin->name) { - report_error(report, ER_UDF_EXISTS, plugin->name); + my_error(ER_UDF_EXISTS, MyFlags, plugin->name); DBUG_RETURN(TRUE); } dupes++; @@ -1160,7 +1137,7 @@ static bool plugin_add(MEM_ROOT *tmp_root, plugin_type_names[plugin->type].str, " plugin ", tmp.name.str, " not supported by this version of the server", NullS); - report_error(report, ER_CANT_OPEN_LIBRARY, dl->str, ENOEXEC, buf); + my_error(ER_CANT_OPEN_LIBRARY, MyFlags, dl->str, ENOEXEC, buf); goto err; } @@ -1173,7 +1150,7 @@ static bool plugin_add(MEM_ROOT *tmp_root, " is prohibited by --plugin-maturity=", plugin_maturity_names[plugin_maturity], NullS); - report_error(report, ER_CANT_OPEN_LIBRARY, dl->str, EPERM, buf); + my_error(ER_CANT_OPEN_LIBRARY, MyFlags, dl->str, EPERM, buf); goto err; } else if (plugin_maturity_map[plugin->maturity] < SERVER_MATURITY_LEVEL) @@ -1211,7 +1188,7 @@ err: DBUG_ASSERT(!name->str || !dupes); // dupes is ONLY for name->str == 0 if (errs == 0 && oks == 0 && !dupes) // no plugin was found - report_error(report, ER_CANT_FIND_DL_ENTRY, name->str); + my_error(ER_CANT_FIND_DL_ENTRY, MyFlags, name->str); plugin_dl_del(tmp.plugin_dl); DBUG_RETURN(errs > 0 || oks + dupes == 0); @@ -1870,11 +1847,11 @@ static void plugin_load(MEM_ROOT *tmp_root) the mutex here to satisfy the assert */ mysql_mutex_lock(&LOCK_plugin); - plugin_add(tmp_root, &name, &dl, REPORT_TO_LOG); + plugin_add(tmp_root, &name, &dl, MYF(ME_ERROR_LOG)); free_root(tmp_root, MYF(MY_MARK_BLOCKS_FREE)); mysql_mutex_unlock(&LOCK_plugin); } - if (error > 0) + if (unlikely(error > 0)) sql_print_error(ER_THD(new_thd, ER_GET_ERRNO), my_errno, table->file->table_type()); end_read_record(&read_record_info); @@ -1926,7 +1903,7 @@ static bool plugin_load_list(MEM_ROOT *tmp_root, const char *list) free_root(tmp_root, MYF(MY_MARK_BLOCKS_FREE)); name.str= 0; // load everything if (plugin_add(tmp_root, (LEX_CSTRING*) &name, (LEX_CSTRING*) &dl, - REPORT_TO_LOG)) + MYF(ME_ERROR_LOG))) goto error; } else @@ -1934,7 +1911,7 @@ static bool plugin_load_list(MEM_ROOT *tmp_root, const char *list) free_root(tmp_root, MYF(MY_MARK_BLOCKS_FREE)); mysql_mutex_lock(&LOCK_plugin); if (plugin_add(tmp_root, (LEX_CSTRING*) &name, (LEX_CSTRING*) &dl, - REPORT_TO_LOG)) + MYF(ME_ERROR_LOG))) goto error; } mysql_mutex_unlock(&LOCK_plugin); @@ -2126,7 +2103,7 @@ static bool finalize_install(THD *thd, TABLE *table, const LEX_CSTRING *name, { if (plugin_initialize(thd->mem_root, tmp, argc, argv, false)) { - report_error(REPORT_TO_USER, ER_CANT_INITIALIZE_UDF, name->str, + my_error(ER_CANT_INITIALIZE_UDF, MYF(0), name->str, "Plugin initialization function failed."); tmp->state= PLUGIN_IS_DELETED; return 1; @@ -2154,7 +2131,7 @@ static bool finalize_install(THD *thd, TABLE *table, const LEX_CSTRING *name, files_charset_info); error= table->file->ha_write_row(table->record[0]); reenable_binlog(thd); - if (error) + if (unlikely(error)) { table->file->print_error(error, MYF(0)); tmp->state= PLUGIN_IS_DELETED; @@ -2189,7 +2166,7 @@ bool mysql_install_plugin(THD *thd, const LEX_CSTRING *name, if (my_load_defaults(MYSQL_CONFIG_NAME, load_default_groups, &argc, &argv, NULL)) { - report_error(REPORT_TO_USER, ER_PLUGIN_IS_NOT_LOADED, name->str); + my_error(ER_PLUGIN_IS_NOT_LOADED, MYF(0), name->str); DBUG_RETURN(TRUE); } @@ -2218,8 +2195,8 @@ bool mysql_install_plugin(THD *thd, const LEX_CSTRING *name, mysql_audit_acquire_plugins(thd, event_class_mask); mysql_mutex_lock(&LOCK_plugin); - error= plugin_add(thd->mem_root, name, &dl, REPORT_TO_USER); - if (error) + error= plugin_add(thd->mem_root, name, &dl, MYF(0)); + if (unlikely(error)) goto err; if (name->str) @@ -2235,7 +2212,7 @@ bool mysql_install_plugin(THD *thd, const LEX_CSTRING *name, } } - if (error) + if (unlikely(error)) { reap_needed= true; reap_plugins(); @@ -2298,7 +2275,7 @@ static bool do_uninstall(THD *thd, TABLE *table, const LEX_CSTRING *name) tmp_disable_binlog(thd); error= table->file->ha_delete_row(table->record[0]); reenable_binlog(thd); - if (error) + if (unlikely(error)) { table->file->print_error(error, MYF(0)); return 1; @@ -2330,6 +2307,16 @@ bool mysql_uninstall_plugin(THD *thd, const LEX_CSTRING *name, if (! (table= open_ltable(thd, &tables, TL_WRITE, MYSQL_LOCK_IGNORE_TIMEOUT))) DBUG_RETURN(TRUE); + if (!table->key_info) + { + my_printf_error(ER_UNKNOWN_ERROR, + "The table %s.%s has no primary key. " + "Please check the table definition and " + "create the primary key accordingly.", MYF(0), + table->s->db.str, table->s->table_name.str); + DBUG_RETURN(TRUE); + } + /* Pre-acquire audit plugins for events that may potentially occur during [UN]INSTALL PLUGIN. @@ -2486,7 +2473,7 @@ bool plugin_dl_foreach(THD *thd, const LEX_CSTRING *dl, if (dl) { mysql_mutex_lock(&LOCK_plugin); - st_plugin_dl *plugin_dl= plugin_dl_add(dl, REPORT_TO_USER); + st_plugin_dl *plugin_dl= plugin_dl_add(dl, MYF(0)); mysql_mutex_unlock(&LOCK_plugin); if (!plugin_dl) @@ -2751,7 +2738,7 @@ static int check_func_set(THD *thd, struct st_mysql_sys_var *var, goto err; result= find_set(typelib, str, length, NULL, &error, &error_len, ¬_used); - if (error_len) + if (unlikely(error_len)) goto err; } else @@ -2870,7 +2857,7 @@ sys_var *find_sys_var_ex(THD *thd, const char *str, size_t length, if (!locked) mysql_mutex_unlock(&LOCK_plugin); - if (!throw_error && !var) + if (unlikely(!throw_error && !var)) my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0), (int) (length ? length : strlen(str)), (char*) str); DBUG_RETURN(var); @@ -4121,7 +4108,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp, error= handle_options(argc, &argv, opts, mark_changed); (*argc)++; /* add back one for the program name */ - if (error) + if (unlikely(error)) { sql_print_error("Parsing options for plugin '%s' failed.", tmp->name.str); diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 24f3cc66c6b..d4c96ebc535 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -380,16 +380,18 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns) XXX: fix this nasty upcast from List to List */ error= my_net_write(net, buff, sizeof(buff)); - if (stmt->param_count && ! error) + if (stmt->param_count && likely(!error)) { error= thd->protocol_text.send_result_set_metadata((List *) &stmt->lex->param_list, Protocol::SEND_EOF); } - if (!error) + if (likely(!error)) + { /* Flag that a response has already been sent */ thd->get_stmt_da()->disable_status(); + } DBUG_RETURN(error); } @@ -427,7 +429,7 @@ static bool send_prep_stmt(Prepared_statement *stmt, static ulong get_param_length(uchar **packet, ulong len) { - reg1 uchar *pos= *packet; + uchar *pos= *packet; if (len < 1) return 0; if (*pos < 251) @@ -1528,7 +1530,7 @@ static int mysql_test_select(Prepared_statement *stmt, It is not SELECT COMMAND for sure, so setup_tables will be called as usual, and we pass 0 as setup_tables_done_option */ - if (unit->prepare(thd, 0, 0)) + if (unit->prepare(unit->derived, 0, 0)) goto error; if (!lex->describe && !thd->lex->analyze_stmt && !stmt->is_sql_prepare()) { @@ -1699,7 +1701,7 @@ static bool select_like_stmt_test(Prepared_statement *stmt, thd->lex->used_tables= 0; // Updated by setup_fields /* Calls JOIN::prepare */ - DBUG_RETURN(lex->unit.prepare(thd, 0, setup_tables_done_option)); + DBUG_RETURN(lex->unit.prepare(lex->unit.derived, 0, setup_tables_done_option)); } /** @@ -2202,6 +2204,7 @@ static int mysql_test_handler_read(Prepared_statement *stmt, if (!(ha_table= mysql_ha_read_prepare(thd, tables, lex->ha_read_mode, lex->ident.str, lex->insert_list, + lex->ha_rkey_mode, lex->select_lex.where))) DBUG_RETURN(1); @@ -2500,8 +2503,28 @@ static bool check_prepared_statement(Prepared_statement *stmt) break; } if (res == 0) - DBUG_RETURN(stmt->is_sql_prepare() ? - FALSE : (send_prep_stmt(stmt, 0) || thd->protocol->flush())); + { + if (!stmt->is_sql_prepare()) + { + if (lex->describe || lex->analyze_stmt) + { + if (!lex->result && + !(lex->result= new (stmt->mem_root) select_send(thd))) + DBUG_RETURN(TRUE); + List field_list; + thd->prepare_explain_fields(lex->result, &field_list, + lex->describe, lex->analyze_stmt); + res= send_prep_stmt(stmt, lex->result->field_count(field_list)) || + lex->result->send_result_set_metadata(field_list, + Protocol::SEND_EOF); + } + else + res= send_prep_stmt(stmt, 0); + if (!res) + thd->protocol->flush(); + } + DBUG_RETURN(FALSE); + } error: DBUG_RETURN(TRUE); } @@ -3530,7 +3553,7 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) #else param->set_longdata(thd->extra_data, thd->extra_length); #endif - if (thd->get_stmt_da()->is_error()) + if (unlikely(thd->get_stmt_da()->is_error())) { stmt->state= Query_arena::STMT_ERROR; stmt->last_errno= thd->get_stmt_da()->sql_errno(); @@ -3576,7 +3599,7 @@ bool Select_fetch_protocol_binary::send_eof() Don't send EOF if we're in error condition (which implies we've already sent or are sending an error) */ - if (thd->is_error()) + if (unlikely(thd->is_error())) return true; ::my_eof(thd); @@ -3662,7 +3685,7 @@ Execute_sql_statement::execute_server_code(THD *thd) error= parse_sql(thd, &parser_state, NULL) || thd->is_error(); - if (error) + if (unlikely(error)) goto end; thd->lex->set_trg_event_type_for_tables(); @@ -3673,7 +3696,7 @@ Execute_sql_statement::execute_server_code(THD *thd) thd->m_statement_psi= parent_locker; /* report error issued during command execution */ - if (error == 0 && thd->spcont == NULL) + if (likely(error == 0) && thd->spcont == NULL) general_log_write(thd, COM_STMT_EXECUTE, thd->query(), thd->query_length()); @@ -3888,7 +3911,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) if (! (lex= new (mem_root) st_lex_local)) DBUG_RETURN(TRUE); - stmt_lex= lex; + lex->stmt_lex= lex; if (set_db(&thd->db)) DBUG_RETURN(TRUE); @@ -3925,9 +3948,9 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) lex_start(thd); lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_PREPARE; - error= parse_sql(thd, & parser_state, NULL) || - thd->is_error() || - init_param_array(this); + error= (parse_sql(thd, & parser_state, NULL) || + thd->is_error() || + init_param_array(this)); lex->set_trg_event_type_for_tables(); @@ -3959,10 +3982,10 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) Item_null objects. */ - if (error == 0) + if (likely(error == 0)) error= check_prepared_statement(this); - if (error) + if (unlikely(error)) { /* let the following code know we're not in PS anymore, @@ -4001,7 +4024,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) thd->restore_backup_statement(this, &stmt_backup); thd->stmt_arena= old_stmt_arena; - if (error == 0) + if (likely(error == 0)) { setup_set_params(); lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_PREPARE; @@ -4131,7 +4154,7 @@ Prepared_statement::execute_loop(String *expanded_query, DBUG_ASSERT(thd->free_list == NULL); /* Check if we got an error when sending long data */ - if (state == Query_arena::STMT_ERROR) + if (unlikely(state == Query_arena::STMT_ERROR)) { my_message(last_errno, last_error, MYF(0)); return TRUE; @@ -4174,7 +4197,7 @@ reexecute: if (WSREP_ON) { - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); switch (thd->wsrep_conflict_state) { case CERT_FAILURE: @@ -4191,12 +4214,13 @@ reexecute: default: break; } - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); } #endif /* WITH_WSREP */ - if ((sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) && - error && !thd->is_fatal_error && !thd->killed && + if (unlikely(error) && + (sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) && + !thd->is_fatal_error && !thd->killed && reprepare_observer.is_invalidated() && reprepare_attempt++ < MAX_REPREPARE_ATTEMPTS) { @@ -4205,7 +4229,7 @@ reexecute: error= reprepare(); - if (! error) /* Success */ + if (likely(!error)) /* Success */ goto reexecute; } reset_stmt_params(this); @@ -4218,7 +4242,7 @@ my_bool bulk_parameters_set(THD *thd) DBUG_ENTER("bulk_parameters_set"); Prepared_statement *stmt= (Prepared_statement *) thd->bulk_param; - if (stmt && stmt->set_bulk_parameters(FALSE)) + if (stmt && unlikely(stmt->set_bulk_parameters(FALSE))) DBUG_RETURN(TRUE); DBUG_RETURN(FALSE); } @@ -4369,7 +4393,7 @@ reexecute: if (WSREP_ON) { - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); switch (thd->wsrep_conflict_state) { case CERT_FAILURE: @@ -4386,12 +4410,13 @@ reexecute: default: break; } - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); } #endif /* WITH_WSREP */ - if ((sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) && - error && !thd->is_fatal_error && !thd->killed && + if (unlikely(error) && + (sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) && + !thd->is_fatal_error && !thd->killed && reprepare_observer.is_invalidated() && reprepare_attempt++ < MAX_REPREPARE_ATTEMPTS) { @@ -4400,7 +4425,7 @@ reexecute: error= reprepare(); - if (! error) /* Success */ + if (likely(!error)) /* Success */ goto reexecute; } } @@ -4475,8 +4500,8 @@ Prepared_statement::reprepare() status_var_increment(thd->status_var.com_stmt_reprepare); - if (mysql_opt_change_db(thd, &stmt_db_name, &saved_cur_db_name, TRUE, - &cur_db_changed)) + if (unlikely(mysql_opt_change_db(thd, &stmt_db_name, &saved_cur_db_name, + TRUE, &cur_db_changed))) return TRUE; sql_mode_t save_sql_mode= thd->variables.sql_mode; @@ -4489,7 +4514,7 @@ Prepared_statement::reprepare() if (cur_db_changed) mysql_change_db(thd, (LEX_CSTRING*) &saved_cur_db_name, TRUE); - if (! error) + if (likely(!error)) { swap_prepared_statement(©); swap_parameter_array(param_array, copy.param_array, param_count); @@ -4788,7 +4813,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) if (state == Query_arena::STMT_PREPARED && !qc_executed) state= Query_arena::STMT_EXECUTED; - if (error == 0 && this->lex->sql_command == SQLCOM_CALL) + if (likely(error == 0) && this->lex->sql_command == SQLCOM_CALL) { if (is_sql_prepare()) { @@ -4823,7 +4848,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) sub-statements inside stored procedures are not logged into the general log. */ - if (error == 0 && thd->spcont == NULL) + if (likely(error == 0 && thd->spcont == NULL)) general_log_write(thd, COM_STMT_EXECUTE, thd->query(), thd->query_length()); error: @@ -4851,7 +4876,7 @@ bool Prepared_statement::execute_immediate(const char *query, uint query_len) set_sql_prepare(); name= execute_immediate_stmt_name; // for DBUG_PRINT etc - if (prepare(query, query_len)) + if (unlikely(prepare(query, query_len))) DBUG_RETURN(true); if (param_count != thd->lex->prepared_stmt_params.elements) @@ -5224,7 +5249,7 @@ Protocol_local::store_string(const char *str, size_t length, src_cs != &my_charset_bin && dst_cs != &my_charset_bin) { - if (convert->copy(str, length, src_cs, dst_cs, &error_unused)) + if (unlikely(convert->copy(str, length, src_cs, dst_cs, &error_unused))) return TRUE; str= convert->ptr(); length= convert->length(); diff --git a/sql/sql_priv.h b/sql/sql_priv.h index 7d2e1bae741..4e82b6a5d3a 100644 --- a/sql/sql_priv.h +++ b/sql/sql_priv.h @@ -1,5 +1,5 @@ -/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. - Copyright (c) 2010, 2014, Monty Program Ab. +/* Copyright (c) 2000, 2018, Oracle and/or its affiliates. + Copyright (c) 2010, 2018, Monty Program Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc index 6c1a4a32fb4..9bcb9a30a4c 100644 --- a/sql/sql_reload.cc +++ b/sql/sql_reload.cc @@ -74,13 +74,13 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, If reload_acl_and_cache() is called from SIGHUP handler we have to allocate temporary THD for execution of acl_reload()/grant_reload(). */ - if (!thd && (thd= (tmp_thd= new THD(0)))) + if (unlikely(!thd) && (thd= (tmp_thd= new THD(0)))) { thd->thread_stack= (char*) &tmp_thd; thd->store_globals(); } - if (thd) + if (likely(thd)) { bool reload_acl_failed= acl_reload(thd); bool reload_grants_failed= grant_reload(thd); @@ -98,7 +98,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, } opt_noacl= 0; - if (tmp_thd) + if (unlikely(tmp_thd)) { delete tmp_thd; thd= 0; @@ -123,7 +123,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, } if (options & REFRESH_ERROR_LOG) - if (flush_error_log()) + if (unlikely(flush_error_log())) { /* When flush_error_log() failed, my_error() has not been called. diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index 1095bb25d0b..0f7bb7748c1 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -173,14 +173,14 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent) error= 1; } - if (!silent && !error) + if (likely(!silent && !error)) { binlog_error= write_bin_log(thd, TRUE, thd->query(), thd->query_length()); - if (!binlog_error) + if (likely(!binlog_error)) my_ok(thd); } - if (!error) + if (likely(!error)) query_cache_invalidate3(thd, table_list, 0); err: @@ -223,7 +223,7 @@ do_rename_temporary(THD *thd, TABLE_LIST *ren_table, TABLE_LIST *new_table, new_alias= (lower_case_table_names == 2) ? &new_table->alias : &new_table->table_name; - if (is_temporary_table(new_table)) + if (thd->find_temporary_table(new_table, THD::TMP_TABLE_ANY)) { my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias->str); DBUG_RETURN(1); // This can't be skipped @@ -332,7 +332,7 @@ do_rename(THD *thd, TABLE_LIST *ren_table, const LEX_CSTRING *new_db, { my_error(ER_NO_SUCH_TABLE, MYF(0), ren_table->db.str, old_alias.str); } - if (rc && !skip_error) + if (unlikely(rc && !skip_error)) DBUG_RETURN(1); DBUG_RETURN(0); diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index e1d1190e58f..f01ec789186 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -374,7 +374,7 @@ static int send_file(THD *thd) We need net_flush here because the client will not know it needs to send us the file name until it has processed the load event entry */ - if (net_flush(net) || (packet_len = my_net_read(net)) == packet_error) + if (unlikely(net_flush(net) || (packet_len = my_net_read(net)) == packet_error)) { errmsg = "while reading file name"; goto err; @@ -1259,12 +1259,12 @@ gtid_find_binlog_file(slave_connection_state *state, char *out_name, goto end; } bzero((char*) &cache, sizeof(cache)); - if ((file= open_binlog(&cache, buf, &errormsg)) == (File)-1) + if (unlikely((file= open_binlog(&cache, buf, &errormsg)) == (File)-1)) goto end; errormsg= get_gtid_list_event(&cache, &glev); end_io_cache(&cache); mysql_file_close(file, MYF(MY_WME)); - if (errormsg) + if (unlikely(errormsg)) goto end; if (!glev || contains_all_slave_gtid(state, glev)) @@ -1371,14 +1371,14 @@ gtid_state_from_pos(const char *name, uint32 offset, String packet; Format_description_log_event *fdev= NULL; - if (gtid_state->load((const rpl_gtid *)NULL, 0)) + if (unlikely(gtid_state->load((const rpl_gtid *)NULL, 0))) { errormsg= "Internal error (out of memory?) initializing slave state " "while scanning binlog to find start position"; return errormsg; } - if ((file= open_binlog(&cache, name, &errormsg)) == (File)-1) + if (unlikely((file= open_binlog(&cache, name, &errormsg)) == (File)-1)) return errormsg; if (!(fdev= new Format_description_log_event(3))) @@ -1411,7 +1411,7 @@ gtid_state_from_pos(const char *name, uint32 offset, err= Log_event::read_log_event(&cache, &packet, fdev, opt_master_verify_checksum ? current_checksum_alg : BINLOG_CHECKSUM_ALG_OFF); - if (err) + if (unlikely(err)) { errormsg= "Could not read binlog while searching for slave start " "position on master"; @@ -1426,7 +1426,7 @@ gtid_state_from_pos(const char *name, uint32 offset, { Format_description_log_event *tmp; - if (found_format_description_event) + if (unlikely(found_format_description_event)) { errormsg= "Duplicate format description log event found while " "searching for old-style position in binlog"; @@ -1435,8 +1435,9 @@ gtid_state_from_pos(const char *name, uint32 offset, current_checksum_alg= get_checksum_alg(packet.ptr(), packet.length()); found_format_description_event= true; - if (!(tmp= new Format_description_log_event(packet.ptr(), packet.length(), - fdev))) + if (unlikely(!(tmp= new Format_description_log_event(packet.ptr(), + packet.length(), + fdev)))) { errormsg= "Corrupt Format_description event found or out-of-memory " "while searching for old-style position in binlog"; @@ -1459,7 +1460,8 @@ gtid_state_from_pos(const char *name, uint32 offset, goto end; } } - else if (typ != FORMAT_DESCRIPTION_EVENT && !found_format_description_event) + else if (unlikely(typ != FORMAT_DESCRIPTION_EVENT && + !found_format_description_event)) { errormsg= "Did not find format description log event while searching " "for old-style position in binlog"; @@ -1474,7 +1476,7 @@ gtid_state_from_pos(const char *name, uint32 offset, bool status; uint32 list_len; - if (found_gtid_list_event) + if (unlikely(found_gtid_list_event)) { errormsg= "Found duplicate Gtid_list_log_event while scanning binlog " "to find slave start position"; @@ -1483,7 +1485,7 @@ gtid_state_from_pos(const char *name, uint32 offset, status= Gtid_list_log_event::peek(packet.ptr(), packet.length(), current_checksum_alg, >id_list, &list_len, fdev); - if (status) + if (unlikely(status)) { errormsg= "Error reading Gtid_list_log_event while searching " "for old-style position in binlog"; @@ -1491,7 +1493,7 @@ gtid_state_from_pos(const char *name, uint32 offset, } err= gtid_state->load(gtid_list, list_len); my_free(gtid_list); - if (err) + if (unlikely(err)) { errormsg= "Internal error (out of memory?) initialising slave state " "while scanning binlog to find start position"; @@ -1499,7 +1501,7 @@ gtid_state_from_pos(const char *name, uint32 offset, } found_gtid_list_event= true; } - else if (!found_gtid_list_event) + else if (unlikely(!found_gtid_list_event)) { /* We did not find any Gtid_list_log_event, must be old binlog. */ goto end; @@ -1508,15 +1510,16 @@ gtid_state_from_pos(const char *name, uint32 offset, { rpl_gtid gtid; uchar flags2; - if (Gtid_log_event::peek(packet.ptr(), packet.length(), - current_checksum_alg, >id.domain_id, - >id.server_id, >id.seq_no, &flags2, fdev)) + if (unlikely(Gtid_log_event::peek(packet.ptr(), packet.length(), + current_checksum_alg, >id.domain_id, + >id.server_id, >id.seq_no, &flags2, + fdev))) { errormsg= "Corrupt gtid_log_event found while scanning binlog to find " "initial slave position"; goto end; } - if (gtid_state->update(>id)) + if (unlikely(gtid_state->update(>id))) { errormsg= "Internal error (out of memory?) updating slave state while " "scanning binlog to find start position"; @@ -1525,7 +1528,7 @@ gtid_state_from_pos(const char *name, uint32 offset, } } - if (!valid_pos) + if (unlikely(!valid_pos)) { errormsg= "Slave requested incorrect position in master binlog. " "Requested position %u in file '%s', but this position does not " @@ -2099,8 +2102,8 @@ static int init_binlog_sender(binlog_send_info *info, info->error= ER_UNKNOWN_ERROR; return 1; } - if ((error= check_slave_start_position(info, &info->errmsg, - &info->error_gtid))) + if (unlikely((error= check_slave_start_position(info, &info->errmsg, + &info->error_gtid)))) { info->error= error; return 1; @@ -2199,7 +2202,7 @@ static int send_format_descriptor_event(binlog_send_info *info, IO_CACHE *log, : BINLOG_CHECKSUM_ALG_OFF); linfo->pos= my_b_tell(log); - if (error) + if (unlikely(error)) { set_read_error(info, error); DBUG_RETURN(1); @@ -2333,7 +2336,7 @@ static int send_format_descriptor_event(binlog_send_info *info, IO_CACHE *log, : BINLOG_CHECKSUM_ALG_OFF); linfo->pos= my_b_tell(log); - if (error) + if (unlikely(error)) { set_read_error(info, error); DBUG_RETURN(1); @@ -2585,7 +2588,7 @@ static int send_events(binlog_send_info *info, IO_CACHE* log, LOG_INFO* linfo, : BINLOG_CHECKSUM_ALG_OFF); linfo->pos= my_b_tell(log); - if (error) + if (unlikely(error)) { set_read_error(info, error); return 1; @@ -2893,6 +2896,12 @@ err: thd->variables.max_allowed_packet= old_max_allowed_packet; delete info->fdev; + if (likely(info->error == 0)) + { + my_eof(thd); + DBUG_VOID_RETURN; + } + if ((info->error == ER_MASTER_FATAL_ERROR_READING_BINLOG || info->error == ER_SLAVE_SAME_ID) && binlog_open) { @@ -2954,17 +2963,10 @@ err: "mysql", rpl_gtid_slave_state_table_name.str); info->error= ER_MASTER_FATAL_ERROR_READING_BINLOG; } - else if (info->error != 0 && info->errmsg != NULL) + else if (info->errmsg != NULL) strcpy(info->error_text, info->errmsg); - if (info->error == 0) - { - my_eof(thd); - } - else - { - my_message(info->error, info->error_text, MYF(0)); - } + my_message(info->error, info->error_text, MYF(0)); DBUG_VOID_RETURN; } @@ -3283,9 +3285,9 @@ int reset_slave(THD *thd, Master_info* mi) } // delete relay logs, clear relay log coordinates - if ((error= purge_relay_logs(&mi->rli, thd, + if (unlikely((error= purge_relay_logs(&mi->rli, thd, 1 /* just reset */, - &errmsg))) + &errmsg)))) { sql_errno= ER_RELAY_LOG_FAIL; goto err; @@ -3343,7 +3345,7 @@ int reset_slave(THD *thd, Master_info* mi) repl_semisync_slave.reset_slave(mi); err: mi->unlock_slave_threads(); - if (error) + if (unlikely(error)) my_error(sql_errno, MYF(0), errmsg); DBUG_RETURN(error); } @@ -4042,7 +4044,7 @@ bool mysql_show_binlog_events(THD* thd) break; } - if (event_count < limit_end && log.error) + if (unlikely(event_count < limit_end && log.error)) { errmsg = "Wrong offset or I/O error"; mysql_mutex_unlock(log_lock); @@ -4223,7 +4225,7 @@ bool show_binlogs(THD* thd) if (protocol->write()) goto err; } - if(index_file->error == -1) + if (unlikely(index_file->error == -1)) goto err; mysql_bin_log.unlock_index(); my_eof(thd); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 958ab0201e0..fdd890795d9 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -309,7 +309,7 @@ void dbug_serve_apcs(THD *thd, int n_calls) thd_proc_info(thd, "show_explain_trap"); my_sleep(30000); thd_proc_info(thd, save_proc_info); - if (thd->check_killed()) + if (unlikely(thd->check_killed(1))) break; } } @@ -351,7 +351,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, ulong setup_tables_done_option) { bool res; - register SELECT_LEX *select_lex = &lex->select_lex; + SELECT_LEX *select_lex = &lex->select_lex; DBUG_ENTER("handle_select"); MYSQL_SELECT_START(thd->query()); @@ -386,7 +386,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, res|= thd->is_error(); if (unlikely(res)) result->abort_result_set(); - if (thd->killed == ABORT_QUERY && !thd->no_errors) + if (unlikely(thd->killed == ABORT_QUERY && !thd->no_errors)) { /* If LIMIT ROWS EXAMINED interrupted query execution, issue a warning, @@ -693,7 +693,7 @@ bool vers_select_conds_t::init_from_sysvar(THD *thd) return false; } -void vers_select_conds_t::print(String *str, enum_query_type query_type) +void vers_select_conds_t::print(String *str, enum_query_type query_type) const { switch (type) { case SYSTEM_TIME_UNSPECIFIED: @@ -765,14 +765,14 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) { TABLE_LIST* derived= master_unit()->derived; // inner SELECT may not be a derived table (derived == NULL) - while (derived && outer_slex && !derived->vers_conditions) + while (derived && outer_slex && !derived->vers_conditions.is_set()) { derived= outer_slex->master_unit()->derived; outer_slex= outer_slex->outer_select(); } if (derived && outer_slex) { - DBUG_ASSERT(derived->vers_conditions); + DBUG_ASSERT(derived->vers_conditions.is_set()); outer_table= derived; } } @@ -791,13 +791,9 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) */ if (table->partition_names && table->table->part_info->vers_info) { - if (vers_conditions) + if (vers_conditions.is_set()) { -#define PART_VERS_ERR_MSG "%s PARTITION (%s)" - char buf[NAME_LEN*2 + sizeof(PART_VERS_ERR_MSG)]; - my_snprintf(buf, sizeof(buf), PART_VERS_ERR_MSG, table->alias.str, - table->partition_names->head()->c_ptr()); - my_error(ER_VERS_NOT_VERSIONED, MYF(0), buf); + my_error(ER_VERS_QUERY_IN_PARTITION, MYF(0), table->alias.str); DBUG_RETURN(-1); } else @@ -805,7 +801,7 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) } #endif - if (outer_table && !vers_conditions) + if (outer_table && !vers_conditions.is_set()) { // propagate system_time from nearest outer SELECT_LEX vers_conditions= outer_table->vers_conditions; @@ -813,15 +809,15 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) } // propagate system_time from sysvar - if (!vers_conditions) + if (!vers_conditions.is_set()) { if (vers_conditions.init_from_sysvar(thd)) DBUG_RETURN(-1); } - if (vers_conditions) + if (vers_conditions.is_set()) { - if (vers_conditions == SYSTEM_TIME_ALL) + if (vers_conditions.type == SYSTEM_TIME_ALL) continue; lock_type= TL_READ; // ignore TL_WRITE, history is immutable anyway @@ -837,13 +833,15 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) bool timestamps_only= table->table->versioned(VERS_TIMESTAMP); - if (vers_conditions) + if (vers_conditions.is_set()) { + thd->where= "FOR SYSTEM_TIME"; /* TODO: do resolve fix_length_and_dec(), fix_fields(). This requires storing vers_conditions as Item and make some magic related to vers_system_time_t/VERS_TRX_ID at stage of fix_fields() (this is large refactoring). */ - vers_conditions.resolve_units(timestamps_only); + if (vers_conditions.resolve_units(thd)) + DBUG_RETURN(-1); if (timestamps_only && (vers_conditions.start.unit == VERS_TRX_ID || vers_conditions.end.unit == VERS_TRX_ID)) { @@ -852,10 +850,9 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) } } - Item *cond1= 0, *cond2= 0, *curr= 0; - // Temporary tables of can be created from INNODB tables and thus will - // have uint64 type of sys_trx_(start|end) field. - // They need special handling. + Item *cond1= NULL, *cond2= NULL, *cond3= NULL, *curr= NULL; + Item *point_in_time1= vers_conditions.start.item; + Item *point_in_time2= vers_conditions.end.item; TABLE *t= table->table; if (t->versioned(VERS_TIMESTAMP)) { @@ -869,19 +866,21 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) cond1= newx Item_func_eq(thd, row_end, curr); break; case SYSTEM_TIME_AS_OF: - cond1= newx Item_func_le(thd, row_start, vers_conditions.start.item); - cond2= newx Item_func_gt(thd, row_end, vers_conditions.start.item); + cond1= newx Item_func_le(thd, row_start, point_in_time1); + cond2= newx Item_func_gt(thd, row_end, point_in_time1); break; case SYSTEM_TIME_FROM_TO: - cond1= newx Item_func_lt(thd, row_start, vers_conditions.end.item); - cond2= newx Item_func_ge(thd, row_end, vers_conditions.start.item); + cond1= newx Item_func_lt(thd, row_start, point_in_time2); + cond2= newx Item_func_gt(thd, row_end, point_in_time1); + cond3= newx Item_func_lt(thd, point_in_time1, point_in_time2); break; case SYSTEM_TIME_BETWEEN: - cond1= newx Item_func_le(thd, row_start, vers_conditions.end.item); - cond2= newx Item_func_ge(thd, row_end, vers_conditions.start.item); + cond1= newx Item_func_le(thd, row_start, point_in_time2); + cond2= newx Item_func_gt(thd, row_end, point_in_time1); + cond3= newx Item_func_le(thd, point_in_time1, point_in_time2); break; case SYSTEM_TIME_BEFORE: - cond1= newx Item_func_lt(thd, row_end, vers_conditions.start.item); + cond1= newx Item_func_lt(thd, row_end, point_in_time1); break; default: DBUG_ASSERT(0); @@ -901,29 +900,33 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) break; case SYSTEM_TIME_AS_OF: trx_id0= vers_conditions.start.unit == VERS_TIMESTAMP - ? newx Item_func_vtq_id(thd, vers_conditions.start.item, TR_table::FLD_TRX_ID) - : vers_conditions.start.item; - cond1= newx Item_func_vtq_trx_sees_eq(thd, trx_id0, row_start); - cond2= newx Item_func_vtq_trx_sees(thd, row_end, trx_id0); + ? newx Item_func_trt_id(thd, point_in_time1, TR_table::FLD_TRX_ID) + : point_in_time1; + cond1= newx Item_func_trt_trx_sees_eq(thd, trx_id0, row_start); + cond2= newx Item_func_trt_trx_sees(thd, row_end, trx_id0); break; case SYSTEM_TIME_FROM_TO: + cond3= newx Item_func_lt(thd, point_in_time1, point_in_time2); + /* fall through */ case SYSTEM_TIME_BETWEEN: trx_id0= vers_conditions.start.unit == VERS_TIMESTAMP - ? newx Item_func_vtq_id(thd, vers_conditions.start.item, TR_table::FLD_TRX_ID, true) - : vers_conditions.start.item; + ? newx Item_func_trt_id(thd, point_in_time1, TR_table::FLD_TRX_ID, true) + : point_in_time1; trx_id1= vers_conditions.end.unit == VERS_TIMESTAMP - ? newx Item_func_vtq_id(thd, vers_conditions.end.item, TR_table::FLD_TRX_ID, false) - : vers_conditions.end.item; + ? newx Item_func_trt_id(thd, point_in_time2, TR_table::FLD_TRX_ID, false) + : point_in_time2; cond1= vers_conditions.type == SYSTEM_TIME_FROM_TO - ? newx Item_func_vtq_trx_sees(thd, trx_id1, row_start) - : newx Item_func_vtq_trx_sees_eq(thd, trx_id1, row_start); - cond2= newx Item_func_vtq_trx_sees_eq(thd, row_end, trx_id0); + ? newx Item_func_trt_trx_sees(thd, trx_id1, row_start) + : newx Item_func_trt_trx_sees_eq(thd, trx_id1, row_start); + cond2= newx Item_func_trt_trx_sees_eq(thd, row_end, trx_id0); + if (!cond3) + cond3= newx Item_func_le(thd, point_in_time1, point_in_time2); break; case SYSTEM_TIME_BEFORE: trx_id0= vers_conditions.start.unit == VERS_TIMESTAMP - ? newx Item_func_vtq_id(thd, vers_conditions.start.item, TR_table::FLD_TRX_ID) - : vers_conditions.start.item; - cond1= newx Item_func_lt(thd, row_end, trx_id0); + ? newx Item_func_trt_id(thd, point_in_time1, TR_table::FLD_TRX_ID, true) + : point_in_time1; + cond1= newx Item_func_trt_trx_sees(thd, trx_id0, row_end); break; default: DBUG_ASSERT(0); @@ -933,6 +936,7 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) if (cond1) { cond1= and_items(thd, cond2, cond1); + cond1= and_items(thd, cond3, cond1); table->on_expr= and_items(thd, table->on_expr, cond1); } @@ -1079,7 +1083,9 @@ JOIN::prepare(TABLE_LIST *tables_init, select_lex != select_lex->master_unit()->global_parameters()) real_og_num+= select_lex->order_list.elements; - if (setup_wild(thd, tables_list, fields_list, &all_fields, wild_num)) + DBUG_ASSERT(select_lex->hidden_bit_fields == 0); + if (setup_wild(thd, tables_list, fields_list, &all_fields, wild_num, + &select_lex->hidden_bit_fields)) DBUG_RETURN(-1); if (select_lex->setup_ref_array(thd, real_og_num)) DBUG_RETURN(-1); @@ -1140,7 +1146,7 @@ JOIN::prepare(TABLE_LIST *tables_init, having->check_cols(1))); select_lex->having_fix_field= 0; - if (having_fix_rc || thd->is_error()) + if (unlikely(having_fix_rc || thd->is_error())) DBUG_RETURN(-1); /* purecov: inspected */ thd->lex->allow_sum_func= save_allow_sum_func; @@ -1285,7 +1291,7 @@ JOIN::prepare(TABLE_LIST *tables_init, } procedure= setup_procedure(thd, proc_param, result, fields_list, &error); - if (error) + if (unlikely(error)) goto err; /* purecov: inspected */ if (procedure) { @@ -1390,16 +1396,28 @@ bool JOIN::build_explain() { create_explain_query_if_not_exists(thd->lex, thd->mem_root); have_query_plan= QEP_AVAILABLE; - if (save_explain_data(thd->lex->explain, false /* can overwrite */, + + /* + explain data must be created on the Explain_query::mem_root. Because it's + just a memroot, not an arena, explain data must not contain any Items + */ + MEM_ROOT *old_mem_root= thd->mem_root; + Item *old_free_list __attribute__((unused))= thd->free_list; + thd->mem_root= thd->lex->explain->mem_root; + bool res= save_explain_data(thd->lex->explain, false /* can overwrite */, need_tmp, !skip_sort_order && !no_order && (order || group_list), - select_distinct)) + select_distinct); + thd->mem_root= old_mem_root; + DBUG_ASSERT(thd->free_list == old_free_list); // no Items were created + if (res) return 1; + uint select_nr= select_lex->select_number; JOIN_TAB *curr_tab= join_tab + exec_join_tab_cnt(); for (uint i= 0; i < aggr_tables; i++, curr_tab++) { - if (select_nr == INT_MAX) + if (select_nr == INT_MAX) { /* this is a fake_select_lex of a union */ select_nr= select_lex->master_unit()->first_select()->select_number; @@ -1553,9 +1571,6 @@ JOIN::optimize_inner() eval_select_list_used_tables(); - if (optimize_constant_subqueries()) - DBUG_RETURN(1); - table_count= select_lex->leaf_tables.elements; if (setup_ftfuncs(select_lex)) /* should be after having->fix_fields */ @@ -1596,7 +1611,11 @@ JOIN::optimize_inner() /* The following code will allocate the new items in a permanent MEMROOT for prepared statements and stored procedures. + + But first we need to ensure that thd->lex->explain is allocated + in the execution arena */ + create_explain_query_if_not_exists(thd->lex, thd->mem_root); Query_arena *arena, backup; arena= thd->activate_stmt_arena_if_needed(&backup); @@ -1605,8 +1624,12 @@ JOIN::optimize_inner() /* Convert all outer joins to inner joins if possible */ conds= simplify_joins(this, join_list, conds, TRUE, FALSE); - if (select_lex->save_leaf_tables(thd)) + if (thd->is_error() || select_lex->save_leaf_tables(thd)) + { + if (arena) + thd->restore_active_arena(arena, &backup); DBUG_RETURN(1); + } build_bitmap_for_nested_joins(join_list, 0); sel->prep_where= conds ? conds->copy_andor_structure(thd) : 0; @@ -1617,6 +1640,9 @@ JOIN::optimize_inner() thd->restore_active_arena(arena, &backup); } + if (optimize_constant_subqueries()) + DBUG_RETURN(1); + List eq_list; if (setup_degenerate_jtbm_semi_joins(this, join_list, eq_list)) @@ -1724,7 +1750,8 @@ JOIN::optimize_inner() if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE)) DBUG_RETURN(1); } - if (thd->is_error()) + + if (unlikely(thd->is_error())) { error= 1; DBUG_PRINT("error",("Error from optimize_cond")); @@ -1735,7 +1762,7 @@ JOIN::optimize_inner() having= optimize_cond(this, having, join_list, TRUE, &having_value, &having_equal); - if (thd->is_error()) + if (unlikely(thd->is_error())) { error= 1; DBUG_PRINT("error",("Error from optimize_cond")); @@ -1877,7 +1904,7 @@ JOIN::optimize_inner() group_list= remove_const(this, group_list, conds, rollup.state == ROLLUP::STATE_NONE, &simple_group); - if (thd->is_error()) + if (unlikely(thd->is_error())) { error= 1; DBUG_RETURN(1); @@ -1887,13 +1914,26 @@ JOIN::optimize_inner() /* Calculate how to do the join */ THD_STAGE_INFO(thd, stage_statistics); result->prepare_to_read_rows(); - if (make_join_statistics(this, select_lex->leaf_tables, &keyuse) || - thd->is_fatal_error) + if (unlikely(make_join_statistics(this, select_lex->leaf_tables, + &keyuse)) || + unlikely(thd->is_fatal_error)) { DBUG_PRINT("error",("Error: make_join_statistics() failed")); DBUG_RETURN(1); } + /* + If a splittable materialized derived/view dt_i is embedded into + into another splittable materialized derived/view dt_o then + splitting plans for dt_i and dt_o are evaluated independently. + First the optimizer looks for the best splitting plan sp_i for dt_i. + It happens when non-splitting plans for dt_o are evaluated. + The cost of sp_i is considered as the cost of materialization of dt_i + when evaluating any splitting plan for dt_o. + */ + if (fix_all_splittings_in_plan()) + DBUG_RETURN(1); + setup_subq_exit: with_two_phase_optimization= check_two_phase_optimization(thd); if (with_two_phase_optimization) @@ -1917,7 +1957,7 @@ int JOIN::optimize_stage2() if (subq_exit_fl) goto setup_subq_exit; - if (thd->check_killed()) + if (unlikely(thd->check_killed())) DBUG_RETURN(1); /* Generate an execution plan from the found optimal join order. */ @@ -1993,7 +2033,7 @@ int JOIN::optimize_stage2() select= make_select(*table, const_table_map, const_table_map, conds, (SORT_INFO*) 0, 1, &error); - if (error) + if (unlikely(error)) { /* purecov: inspected */ error= -1; /* purecov: inspected */ DBUG_PRINT("error",("Error: make_select() failed")); @@ -2016,7 +2056,7 @@ int JOIN::optimize_stage2() { conds= substitute_for_best_equal_field(thd, NO_PARTICULAR_TAB, conds, cond_equal, map2table); - if (thd->is_error()) + if (unlikely(thd->is_error())) { error= 1; DBUG_PRINT("error",("Error from substitute_for_best_equal")); @@ -2042,7 +2082,7 @@ int JOIN::optimize_stage2() *tab->on_expr_ref, tab->cond_equal, map2table); - if (thd->is_error()) + if (unlikely(thd->is_error())) { error= 1; DBUG_PRINT("error",("Error from substitute_for_best_equal")); @@ -2072,7 +2112,7 @@ int JOIN::optimize_stage2() { ref_item= substitute_for_best_equal_field(thd, tab, ref_item, equals, map2table); - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) DBUG_RETURN(1); if (first_inner) @@ -2142,7 +2182,7 @@ int JOIN::optimize_stage2() { ORDER *org_order= order; order=remove_const(this, order,conds,1, &simple_order); - if (thd->is_error()) + if (unlikely(thd->is_error())) { error= 1; DBUG_RETURN(1); @@ -2304,7 +2344,7 @@ int JOIN::optimize_stage2() group_list= remove_const(this, group_list, conds, rollup.state == ROLLUP::STATE_NONE, &simple_group); - if (thd->is_error()) + if (unlikely(thd->is_error())) { error= 1; DBUG_RETURN(1); @@ -2325,7 +2365,7 @@ int JOIN::optimize_stage2() { group_list= procedure->group= remove_const(this, procedure->group, conds, 1, &simple_group); - if (thd->is_error()) + if (unlikely(thd->is_error())) { error= 1; DBUG_RETURN(1); @@ -2552,7 +2592,7 @@ int JOIN::optimize_stage2() ordered_index_usage= ordered_index_order_by; } } - } + } if (having) having_is_correlated= MY_TEST(having->used_tables() & OUTER_REF_TABLE_BIT); @@ -2694,6 +2734,25 @@ bool JOIN::add_having_as_table_cond(JOIN_TAB *tab) } +bool JOIN::add_fields_for_current_rowid(JOIN_TAB *cur, List *table_fields) +{ + /* + this will not walk into semi-join materialization nests but this is ok + because we will never need to save current rowids for those. + */ + for (JOIN_TAB *tab=join_tab; tab < cur; tab++) + { + if (!tab->keep_current_rowid) + continue; + Item *item= new (thd->mem_root) Item_temptable_rowid(tab->table); + item->fix_fields(thd, 0); + table_fields->push_back(item, thd->mem_root); + cur->tmp_table_param->func_count++; + } + return 0; +} + + /** Set info for aggregation tables @@ -3000,13 +3059,13 @@ bool JOIN::make_aggr_tables_info() (select_distinct && tmp_table_param.using_outer_summary_function)) { /* Must copy to another table */ DBUG_PRINT("info",("Creating group table")); - + calc_group_buffer(this, group_list); count_field_types(select_lex, &tmp_table_param, tmp_all_fields1, select_distinct && !group_list); - tmp_table_param.hidden_field_count= + tmp_table_param.hidden_field_count= tmp_all_fields1.elements - tmp_fields_list1.elements; - + curr_tab++; aggr_tables++; bzero(curr_tab, sizeof(JOIN_TAB)); @@ -3021,12 +3080,11 @@ bool JOIN::make_aggr_tables_info() if (join_tab->is_using_loose_index_scan()) tmp_table_param.precomputed_group_by= TRUE; - tmp_table_param.hidden_field_count= + tmp_table_param.hidden_field_count= curr_all_fields->elements - curr_fields_list->elements; ORDER *dummy= NULL; //TODO can use table->group here also - if (create_postjoin_aggr_table(curr_tab, - curr_all_fields, dummy, true, + if (create_postjoin_aggr_table(curr_tab, curr_all_fields, dummy, true, distinct, keep_row_order)) DBUG_RETURN(true); @@ -3162,7 +3220,7 @@ bool JOIN::make_aggr_tables_info() !join_tab || !join_tab-> is_using_agg_loose_index_scan())) DBUG_RETURN(true); - if (setup_sum_funcs(thd, sum_funcs) || thd->is_fatal_error) + if (unlikely(setup_sum_funcs(thd, sum_funcs) || thd->is_fatal_error)) DBUG_RETURN(true); } if (group_list || order) @@ -3252,7 +3310,7 @@ bool JOIN::make_aggr_tables_info() - duplicate value removal Both of these operations are done after window function computation step. */ - curr_tab= join_tab + exec_join_tab_cnt() + aggr_tables - 1; + curr_tab= join_tab + total_join_tab_cnt(); if (select_lex->window_funcs.elements) { if (!(curr_tab->window_funcs_step= new Window_funcs_computation)) @@ -3297,11 +3355,13 @@ JOIN::create_postjoin_aggr_table(JOIN_TAB *tab, List *table_fields, */ ha_rows table_rows_limit= ((order == NULL || skip_sort_order) && !table_group && - !select_lex->with_sum_func) ? - select_limit : HA_POS_ERROR; + !select_lex->with_sum_func) ? select_limit + : HA_POS_ERROR; if (!(tab->tmp_table_param= new TMP_TABLE_PARAM(tmp_table_param))) DBUG_RETURN(true); + if (tmp_table_keep_current_rowid) + add_fields_for_current_rowid(tab, table_fields); tab->tmp_table_param->skip_create_table= true; TABLE* table= create_tmp_table(thd, tab->tmp_table_param, *table_fields, table_group, distinct, @@ -3696,7 +3756,7 @@ bool JOIN::prepare_result(List **columns_list) select_lex->handle_derived(thd->lex, DT_CREATE)) goto err; - if (result->prepare2()) + if (result->prepare2(this)) goto err; if ((select_lex->options & OPTION_SCHEMA_TABLE) && @@ -3723,7 +3783,7 @@ bool JOIN::save_explain_data(Explain_query *output, bool can_overwrite, bool distinct) { /* - If there is SELECT in this statemet with the same number it must be the + If there is SELECT in this statement with the same number it must be the same SELECT */ DBUG_ASSERT(select_lex->select_number == UINT_MAX || @@ -3833,7 +3893,7 @@ void JOIN::exec_inner() } columns_list= &procedure_fields_list; } - if (result->prepare2()) + if (result->prepare2(this)) DBUG_VOID_RETURN; if (!tables_list && (table_count || !select_lex->with_sum_func) && @@ -3877,7 +3937,7 @@ void JOIN::exec_inner() } else send_records= 0; - if (!error) + if (likely(!error)) { join_free(); // Unlock all cursors error= (int) result->send_eof(); @@ -3903,7 +3963,7 @@ void JOIN::exec_inner() /* We've called exec_const_cond->val_int(). This may have caused an error. */ - if (thd->is_error()) + if (unlikely(thd->is_error())) { error= thd->is_error(); DBUG_VOID_RETURN; @@ -3948,7 +4008,7 @@ void JOIN::exec_inner() while ((cur_const_item= const_item_it++)) { cur_const_item->val_str(); // This caches val_str() to Item::str_value - if (thd->is_error()) + if (unlikely(thd->is_error())) { error= thd->is_error(); DBUG_VOID_RETURN; @@ -3982,7 +4042,7 @@ void JOIN::exec_inner() join_examined_rows= 0; /* XXX: When can we have here thd->is_error() not zero? */ - if (thd->is_error()) + if (unlikely(thd->is_error())) { error= thd->is_error(); DBUG_VOID_RETURN; @@ -4190,7 +4250,7 @@ mysql_select(THD *thd, join->having_history= (join->having?join->having:join->tmp_having); } - if (thd->is_error()) + if (unlikely(thd->is_error())) goto err; join->exec(); @@ -4224,17 +4284,20 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select, int error; DBUG_ENTER("get_quick_record_count"); uchar buff[STACK_BUFF_ALLOC]; - if (check_stack_overrun(thd, STACK_MIN_SIZE, buff)) + if (unlikely(check_stack_overrun(thd, STACK_MIN_SIZE, buff))) DBUG_RETURN(0); // Fatal error flag is set if (select) { select->head=table; table->reginfo.impossible_range=0; - if ((error= select->test_quick_select(thd, *(key_map *)keys,(table_map) 0, - limit, 0, FALSE, - TRUE /* remove_where_parts*/)) == 1) + if (likely((error= + select->test_quick_select(thd, *(key_map *)keys, + (table_map) 0, + limit, 0, FALSE, + TRUE /* remove_where_parts*/)) == + 1)) DBUG_RETURN(select->quick->records); - if (error == -1) + if (unlikely(error == -1)) { table->reginfo.impossible_range=1; DBUG_RETURN(0); @@ -4341,7 +4404,7 @@ make_join_statistics(JOIN *join, List &tables_list, DBUG_EXECUTE_IF("bug11747970_raise_error", { join->thd->set_killed(KILL_QUERY_HARD); }); - if (error) + if (unlikely(error)) { table->file->print_error(error, MYF(0)); goto error; @@ -8597,7 +8660,7 @@ best_extension_by_limited_search(JOIN *join, dbug_serve_apcs(thd, 1); ); - if (thd->check_killed()) // Abort + if (unlikely(thd->check_killed())) // Abort DBUG_RETURN(TRUE); DBUG_EXECUTE("opt", print_plan(join, idx, read_time, record_count, idx, @@ -9393,7 +9456,7 @@ bool JOIN::get_best_combination() */ uint aggr_tables= (group_list ? 1 : 0) + (select_distinct ? - (tmp_table_param. using_outer_summary_function ? 2 : 1) : 0) + + (tmp_table_param.using_outer_summary_function ? 2 : 1) : 0) + (order ? 1 : 0) + (select_options & (SELECT_BIG_RESULT | OPTION_BUFFER_RESULT) ? 1 : 0) ; @@ -9412,9 +9475,6 @@ bool JOIN::get_best_combination() full_join=0; hash_join= FALSE; - if (fix_all_splittings_in_plan()) - DBUG_RETURN(TRUE); - fix_semijoin_strategies_for_picked_join_order(this); JOIN_TAB_RANGE *root_range; @@ -9857,7 +9917,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, keyinfo->key_part[i].length, keyuse->val, FALSE); - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) DBUG_RETURN(TRUE); tmp.copy(); j->ref.const_ref_part_map |= key_part_map(1) << i ; @@ -11335,9 +11395,9 @@ end_sj_materialize(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) DBUG_RETURN(NESTED_LOOP_OK); } fill_record(thd, table, table->field, sjm->sjm_table_cols, TRUE, FALSE); - if (thd->is_error()) + if (unlikely(thd->is_error())) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ - if ((error= table->file->ha_write_tmp_row(table->record[0]))) + if (unlikely((error= table->file->ha_write_tmp_row(table->record[0])))) { /* create_myisam_from_heap will generate error if needed */ if (table->file->is_fatal_error(error, HA_CHECK_DUP) && @@ -13145,7 +13205,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, if (prev_ptr == &first_order) // Nothing to sort/group *simple_order=1; #ifndef DBUG_OFF - if (join->thd->is_error()) + if (unlikely(join->thd->is_error())) DBUG_PRINT("error",("Error from remove_const")); #endif DBUG_PRINT("exit",("simple_order: %d",(int) *simple_order)); @@ -13251,7 +13311,7 @@ return_zero_rows(JOIN *join, select_result *result, List &tables, bool send_error= FALSE; if (send_row) send_error= result->send_data(fields) > 0; - if (!send_error) + if (likely(!send_error)) result->send_eof(); // Should be safe } DBUG_RETURN(0); @@ -16906,6 +16966,10 @@ setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps) temporary table @param table_alias possible name of the temporary table that can be used for name resolving; can be "". + @param do_not_open only create the TABLE object, do not + open the table in the engine + @param keep_row_order rows need to be read in the order they were + inserted, the engine should preserve this order */ TABLE * @@ -17216,9 +17280,10 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List &fields, */ item->marker == 4 || param->bit_fields_as_long, force_copy_fields); - if (!new_field) + + if (unlikely(!new_field)) { - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) goto err; // Got OOM continue; // Some kind of const item } @@ -17730,7 +17795,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List &fields, } } - if (thd->is_fatal_error) // If end of memory + if (unlikely(thd->is_fatal_error)) // If end of memory goto err; /* purecov: inspected */ share->db_record_offset= 1; table->used_for_duplicate_elimination= (param->sum_func_count == 0 && @@ -17934,9 +17999,10 @@ bool Virtual_tmp_table::sp_set_all_fields_from_item(THD *thd, Item *value) bool open_tmp_table(TABLE *table) { int error; - if ((error= table->file->ha_open(table, table->s->table_name.str, O_RDWR, - HA_OPEN_TMP_TABLE | - HA_OPEN_INTERNAL_TABLE))) + if (unlikely((error= table->file->ha_open(table, table->s->table_name.str, + O_RDWR, + HA_OPEN_TMP_TABLE | + HA_OPEN_INTERNAL_TABLE)))) { table->file->print_error(error, MYF(0)); /* purecov: inspected */ table->db_stat= 0; @@ -18130,14 +18196,14 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, } } - if ((error= maria_create(share->table_name.str, - file_type, - share->keys, &keydef, - (uint) (*recinfo-start_recinfo), - start_recinfo, - share->uniques, &uniquedef, - &create_info, - create_flags))) + if (unlikely((error= maria_create(share->table_name.str, + file_type, + share->keys, &keydef, + (uint) (*recinfo-start_recinfo), + start_recinfo, + share->uniques, &uniquedef, + &create_info, + create_flags)))) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ table->db_stat=0; @@ -18285,15 +18351,17 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, bzero((char*) &create_info,sizeof(create_info)); create_info.data_file_length= table->in_use->variables.tmp_disk_table_size; - if ((error=mi_create(share->table_name.str, share->keys, &keydef, - (uint) (*recinfo-start_recinfo), - start_recinfo, - share->uniques, &uniquedef, - &create_info, - HA_CREATE_TMP_TABLE | HA_CREATE_INTERNAL_TABLE | - ((share->db_create_options & HA_OPTION_PACK_RECORD) ? - HA_PACK_RECORD : 0) - ))) + if (unlikely((error= mi_create(share->table_name.str, share->keys, &keydef, + (uint) (*recinfo-start_recinfo), + start_recinfo, + share->uniques, &uniquedef, + &create_info, + HA_CREATE_TMP_TABLE | + HA_CREATE_INTERNAL_TABLE | + ((share->db_create_options & + HA_OPTION_PACK_RECORD) ? + HA_PACK_RECORD : 0) + )))) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ table->db_stat=0; @@ -18347,11 +18415,11 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table, share= *table->s; new_table.s= &share; new_table.s->db_plugin= ha_lock_engine(thd, TMP_ENGINE_HTON); - if (!(new_table.file= get_new_handler(&share, &new_table.mem_root, - new_table.s->db_type()))) + if (unlikely(!(new_table.file= get_new_handler(&share, &new_table.mem_root, + new_table.s->db_type())))) DBUG_RETURN(1); // End of memory - if (new_table.file->set_ha_share_ref(&share.ha_share)) + if (unlikely(new_table.file->set_ha_share_ref(&share.ha_share))) { delete new_table.file; DBUG_RETURN(1); @@ -18394,16 +18462,13 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table, DBUG_EXECUTE_IF("raise_error", write_err= HA_ERR_FOUND_DUPP_KEY ;); if (write_err) goto err; - if (thd->check_killed()) - { - thd->send_kill_message(); + if (unlikely(thd->check_killed())) goto err_killed; - } } if (!new_table.no_rows && new_table.file->ha_end_bulk_insert()) goto err; /* copy row that filled HEAP table */ - if ((write_err=new_table.file->ha_write_tmp_row(table->record[0]))) + if (unlikely((write_err=new_table.file->ha_write_tmp_row(table->record[0])))) { if (new_table.file->is_fatal_error(write_err, HA_CHECK_DUP) || !ignore_last_dupp_key_error) @@ -18692,7 +18757,7 @@ do_select(JOIN *join, Procedure *procedure) (the join condition and piece of where clause relevant to this join table). */ - if (join->thd->is_error()) + if (unlikely(join->thd->is_error())) error= NESTED_LOOP_ERROR; } else @@ -18710,13 +18775,14 @@ do_select(JOIN *join, Procedure *procedure) error= NESTED_LOOP_NO_MORE_ROWS; else error= join->first_select(join,join_tab,0); - if (error >= NESTED_LOOP_OK && join->thd->killed != ABORT_QUERY) + if (error >= NESTED_LOOP_OK && likely(join->thd->killed != ABORT_QUERY)) error= join->first_select(join,join_tab,1); } join->thd->limit_found_rows= join->send_records - join->duplicate_rows; - if (error == NESTED_LOOP_NO_MORE_ROWS || join->thd->killed == ABORT_QUERY) + if (error == NESTED_LOOP_NO_MORE_ROWS || + unlikely(join->thd->killed == ABORT_QUERY)) error= NESTED_LOOP_OK; /* @@ -18763,7 +18829,7 @@ do_select(JOIN *join, Procedure *procedure) Sic: this branch works even if rc != 0, e.g. when send_data above returns an error. */ - if (join->result->send_eof()) + if (unlikely(join->result->send_eof())) rc= 1; // Don't send error DBUG_PRINT("info",("%ld records output", (long) join->send_records)); } @@ -18783,7 +18849,7 @@ do_select(JOIN *join, Procedure *procedure) int rr_sequential_and_unpack(READ_RECORD *info) { int error; - if ((error= rr_sequential(info))) + if (unlikely((error= rr_sequential(info)))) return error; for (Copy_field *cp= info->copy_field; cp != info->copy_field_end; cp++) @@ -18954,10 +19020,9 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) rc= sub_select(join, join_tab, end_of_records); DBUG_RETURN(rc); } - if (join->thd->check_killed()) + if (unlikely(join->thd->check_killed())) { /* The user has aborted the execution of the query */ - join->thd->send_kill_message(); DBUG_RETURN(NESTED_LOOP_KILLED); } if (!test_if_use_dynamic_range_scan(join_tab)) @@ -19190,10 +19255,10 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) error= info->read_record(); - if (skip_over && !error) + if (skip_over && likely(!error)) { - if(!key_cmp(join_tab->table->key_info[join_tab->loosescan_key].key_part, - join_tab->loosescan_buf, join_tab->loosescan_key_len)) + if (!key_cmp(join_tab->table->key_info[join_tab->loosescan_key].key_part, + join_tab->loosescan_buf, join_tab->loosescan_key_len)) { /* This is the LooseScan action: skip over records with the same key @@ -19205,7 +19270,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) skip_over= FALSE; } - if (join_tab->keep_current_rowid && !error) + if (join_tab->keep_current_rowid && likely(!error)) join_tab->table->file->position(join_tab->table->record[0]); rc= evaluate_join_record(join, join_tab, error); @@ -19250,13 +19315,13 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, " cond: %p error: %d alias %s", join, join_tab, select_cond, error, join_tab->table->alias.ptr())); - if (error > 0 || (join->thd->is_error())) // Fatal error + + if (error > 0 || unlikely(join->thd->is_error())) // Fatal error DBUG_RETURN(NESTED_LOOP_ERROR); if (error < 0) DBUG_RETURN(NESTED_LOOP_NO_MORE_ROWS); - if (join->thd->check_killed()) // Aborted by user + if (unlikely(join->thd->check_killed())) // Aborted by user { - join->thd->send_kill_message(); DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */ } @@ -19267,7 +19332,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, select_cond_result= MY_TEST(select_cond->val_int()); /* check for errors evaluating the condition */ - if (join->thd->is_error()) + if (unlikely(join->thd->is_error())) DBUG_RETURN(NESTED_LOOP_ERROR); } @@ -19397,7 +19462,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, join->return_tab= return_tab; /* check for errors evaluating the condition */ - if (join->thd->is_error()) + if (unlikely(join->thd->is_error())) DBUG_RETURN(NESTED_LOOP_ERROR); if (join->return_tab < join_tab) @@ -19546,10 +19611,11 @@ int safe_index_read(JOIN_TAB *tab) { int error; TABLE *table= tab->table; - if ((error= table->file->ha_index_read_map(table->record[0], - tab->ref.key_buff, - make_prev_keypart_map(tab->ref.key_parts), - HA_READ_KEY_EXACT))) + if (unlikely((error= + table->file->ha_index_read_map(table->record[0], + tab->ref.key_buff, + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT)))) return report_error(table, error); return 0; } @@ -19597,7 +19663,7 @@ join_read_const_table(THD *thd, JOIN_TAB *tab, POSITION *pos) } else if (tab->type == JT_SYSTEM) { - if ((error=join_read_system(tab))) + if (unlikely((error=join_read_system(tab)))) { // Info for DESCRIBE tab->info= ET_CONST_ROW_NOT_FOUND; /* Mark for EXPLAIN that the row was not found */ @@ -19623,7 +19689,7 @@ join_read_const_table(THD *thd, JOIN_TAB *tab, POSITION *pos) } error=join_read_const(tab); table->file->ha_end_keyread(); - if (error) + if (unlikely(error)) { tab->info= ET_UNIQUE_ROW_NOT_FOUND; /* Mark for EXPLAIN that the row was not found */ @@ -19701,8 +19767,9 @@ join_read_system(JOIN_TAB *tab) int error; if (table->status & STATUS_GARBAGE) // If first read { - if ((error= table->file->ha_read_first_row(table->record[0], - table->s->primary_key))) + if (unlikely((error= + table->file->ha_read_first_row(table->record[0], + table->s->primary_key)))) { if (error != HA_ERR_END_OF_FILE) return report_error(table, error); @@ -19747,7 +19814,7 @@ join_read_const(JOIN_TAB *tab) make_prev_keypart_map(tab->ref.key_parts), HA_READ_KEY_EXACT); } - if (error) + if (unlikely(error)) { table->status= STATUS_NOT_FOUND; mark_as_null_row(tab->table); @@ -19803,7 +19870,7 @@ int join_read_key2(THD *thd, JOIN_TAB *tab, TABLE *table, TABLE_REF *table_ref) if (!table->file->inited) { error= table->file->ha_index_init(table_ref->key, tab ? tab->sorted : TRUE); - if (error) + if (unlikely(error)) { (void) report_error(table, error); return 1; @@ -19843,10 +19910,11 @@ int join_read_key2(THD *thd, JOIN_TAB *tab, TABLE *table, TABLE_REF *table_ref) table_ref->key_buff, make_prev_keypart_map(table_ref->key_parts), HA_READ_KEY_EXACT); - if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) + if (unlikely(error) && + error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) return report_error(table, error); - if (! error) + if (likely(!error)) { table_ref->has_record= TRUE; table_ref->use_count= 1; @@ -19907,16 +19975,19 @@ join_read_always_key(JOIN_TAB *tab) /* Initialize the index first */ if (!table->file->inited) { - if ((error= table->file->ha_index_init(tab->ref.key, tab->sorted))) + if (unlikely((error= table->file->ha_index_init(tab->ref.key, + tab->sorted)))) { (void) report_error(table, error); return 1; } } - if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref)) + if (unlikely(cp_buffer_from_ref(tab->join->thd, table, &tab->ref))) return -1; - if ((error= table->file->prepare_index_key_scan_map(tab->ref.key_buff, make_prev_keypart_map(tab->ref.key_parts)))) + if (unlikely((error= + table->file->prepare_index_key_scan_map(tab->ref.key_buff, + make_prev_keypart_map(tab->ref.key_parts))))) { report_error(table,error); return -1; @@ -19946,23 +20017,26 @@ join_read_last_key(JOIN_TAB *tab) TABLE *table= tab->table; if (!table->file->inited && - (error= table->file->ha_index_init(tab->ref.key, tab->sorted))) + unlikely((error= table->file->ha_index_init(tab->ref.key, tab->sorted)))) { (void) report_error(table, error); return 1; } - if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref)) + if (unlikely(cp_buffer_from_ref(tab->join->thd, table, &tab->ref))) return -1; - if ((error= table->file->prepare_index_key_scan_map(tab->ref.key_buff, make_prev_keypart_map(tab->ref.key_parts)))) + if (unlikely((error= + table->file->prepare_index_key_scan_map(tab->ref.key_buff, + make_prev_keypart_map(tab->ref.key_parts)))) ) { report_error(table,error); return -1; } - if ((error= table->file->ha_index_read_map(table->record[0], - tab->ref.key_buff, - make_prev_keypart_map(tab->ref.key_parts), - HA_READ_PREFIX_LAST))) + if (unlikely((error= + table->file->ha_index_read_map(table->record[0], + tab->ref.key_buff, + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_PREFIX_LAST)))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) return report_error(table, error); @@ -19987,9 +20061,9 @@ join_read_next_same(READ_RECORD *info) TABLE *table= info->table; JOIN_TAB *tab=table->reginfo.join_tab; - if ((error= table->file->ha_index_next_same(table->record[0], - tab->ref.key_buff, - tab->ref.key_length))) + if (unlikely((error= table->file->ha_index_next_same(table->record[0], + tab->ref.key_buff, + tab->ref.key_length)))) { if (error != HA_ERR_END_OF_FILE) return report_error(table, error); @@ -20007,7 +20081,7 @@ join_read_prev_same(READ_RECORD *info) TABLE *table= info->table; JOIN_TAB *tab=table->reginfo.join_tab; - if ((error= table->file->ha_index_prev(table->record[0]))) + if (unlikely((error= table->file->ha_index_prev(table->record[0])))) return report_error(table, error); if (key_cmp_if_same(table, tab->ref.key_buff, tab->ref.key, tab->ref.key_length)) @@ -20030,7 +20104,7 @@ join_init_quick_read_record(JOIN_TAB *tab) int read_first_record_seq(JOIN_TAB *tab) { - if (tab->read_record.table->file->ha_rnd_init_with_error(1)) + if (unlikely(tab->read_record.table->file->ha_rnd_init_with_error(1))) return 1; return tab->read_record.read_record(); } @@ -20153,9 +20227,10 @@ join_read_first(JOIN_TAB *tab) tab->read_record.record=table->record[0]; if (!table->file->inited) error= table->file->ha_index_init(tab->index, tab->sorted); - if (!error) + if (likely(!error)) error= table->file->prepare_index_scan(); - if (error || (error=tab->table->file->ha_index_first(tab->table->record[0]))) + if (unlikely(error) || + unlikely(error= tab->table->file->ha_index_first(tab->table->record[0]))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) report_error(table, error); @@ -20169,7 +20244,7 @@ static int join_read_next(READ_RECORD *info) { int error; - if ((error= info->table->file->ha_index_next(info->record))) + if (unlikely((error= info->table->file->ha_index_next(info->record)))) return report_error(info->table, error); return 0; @@ -20193,9 +20268,10 @@ join_read_last(JOIN_TAB *tab) tab->read_record.record=table->record[0]; if (!table->file->inited) error= table->file->ha_index_init(tab->index, 1); - if (!error) + if (likely(!error)) error= table->file->prepare_index_scan(); - if (error || (error= tab->table->file->ha_index_last(tab->table->record[0]))) + if (unlikely(error) || + unlikely(error= tab->table->file->ha_index_last(tab->table->record[0]))) DBUG_RETURN(report_error(table, error)); DBUG_RETURN(0); @@ -20206,7 +20282,7 @@ static int join_read_prev(READ_RECORD *info) { int error; - if ((error= info->table->file->ha_index_prev(info->record))) + if (unlikely((error= info->table->file->ha_index_prev(info->record)))) return report_error(info->table, error); return 0; } @@ -20227,7 +20303,7 @@ join_ft_read_first(JOIN_TAB *tab) table->file->ft_init(); - if ((error= table->file->ha_ft_read(table->record[0]))) + if (unlikely((error= table->file->ha_ft_read(table->record[0])))) return report_error(table, error); return 0; } @@ -20236,7 +20312,7 @@ static int join_ft_read_next(READ_RECORD *info) { int error; - if ((error= info->table->file->ha_ft_read(info->table->record[0]))) + if (unlikely((error= info->table->file->ha_ft_read(info->table->record[0])))) return report_error(info->table, error); return 0; } @@ -20266,7 +20342,7 @@ int join_read_next_same_or_null(READ_RECORD *info) { int error; - if ((error= join_read_next_same(info)) >= 0) + if (unlikely((error= join_read_next_same(info)) >= 0)) return error; JOIN_TAB *tab= info->table->reginfo.join_tab; @@ -20338,7 +20414,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { int error; /* result < 0 if row was not accepted and should not be counted */ - if ((error= join->result->send_data(*fields))) + if (unlikely((error= join->result->send_data(*fields)))) { if (error > 0) DBUG_RETURN(NESTED_LOOP_ERROR); @@ -20487,7 +20563,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (join->do_send_rows) { error=join->result->send_data(*fields); - if (error < 0) + if (unlikely(error < 0)) { /* Duplicate row, don't count */ join->duplicate_rows++; @@ -20497,13 +20573,13 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), join->send_records++; join->group_sent= true; } - if (join->rollup.state != ROLLUP::STATE_NONE && error <= 0) + if (unlikely(join->rollup.state != ROLLUP::STATE_NONE && error <= 0)) { if (join->rollup_send_data((uint) (idx+1))) error= 1; } } - if (error > 0) + if (unlikely(error > 0)) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ if (end_of_records) DBUG_RETURN(NESTED_LOOP_OK); @@ -20573,14 +20649,14 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd)) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ - if (!join_tab->having || join_tab->having->val_int()) + if (likely(!join_tab->having || join_tab->having->val_int())) { int error; join->found_records++; if ((error= table->file->ha_write_tmp_row(table->record[0]))) { - if (!table->file->is_fatal_error(error, HA_CHECK_DUP)) - goto end; + if (likely(!table->file->is_fatal_error(error, HA_CHECK_DUP))) + goto end; // Ignore duplicate keys bool is_duplicate; if (create_internal_tmp_table_from_heap(join->thd, table, join_tab->tmp_table_param->start_recinfo, @@ -20603,9 +20679,8 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } } end: - if (join->thd->check_killed()) + if (unlikely(join->thd->check_killed())) { - join->thd->send_kill_message(); DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */ } DBUG_RETURN(NESTED_LOOP_OK); @@ -20662,8 +20737,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { /* Update old record */ restore_record(table,record[1]); update_tmptable_sum_func(join->sum_funcs,table); - if ((error= table->file->ha_update_tmp_row(table->record[1], - table->record[0]))) + if (unlikely((error= table->file->ha_update_tmp_row(table->record[1], + table->record[0])))) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ @@ -20672,9 +20747,10 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } init_tmptable_sum_functions(join->sum_funcs); - if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd)) + if (unlikely(copy_funcs(join_tab->tmp_table_param->items_to_copy, + join->thd))) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ - if ((error= table->file->ha_write_tmp_row(table->record[0]))) + if (unlikely((error= table->file->ha_write_tmp_row(table->record[0])))) { if (create_internal_tmp_table_from_heap(join->thd, table, join_tab->tmp_table_param->start_recinfo, @@ -20682,7 +20758,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), error, 0, NULL)) DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error /* Change method to update rows */ - if ((error= table->file->ha_index_init(0, 0))) + if (unlikely((error= table->file->ha_index_init(0, 0)))) { table->file->print_error(error, MYF(0)); DBUG_RETURN(NESTED_LOOP_ERROR); @@ -20692,9 +20768,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } join_tab->send_records++; end: - if (join->thd->check_killed()) + if (unlikely(join->thd->check_killed())) { - join->thd->send_kill_message(); DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */ } DBUG_RETURN(NESTED_LOOP_OK); @@ -20719,32 +20794,31 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd)) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ - if (!(error= table->file->ha_write_tmp_row(table->record[0]))) + if (likely(!(error= table->file->ha_write_tmp_row(table->record[0])))) join_tab->send_records++; // New group else { - if ((int) table->file->get_dup_key(error) < 0) + if (unlikely((int) table->file->get_dup_key(error) < 0)) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ } - if (table->file->ha_rnd_pos(table->record[1],table->file->dup_ref)) + if (unlikely(table->file->ha_rnd_pos(table->record[1],table->file->dup_ref))) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ } restore_record(table,record[1]); update_tmptable_sum_func(join->sum_funcs,table); - if ((error= table->file->ha_update_tmp_row(table->record[1], - table->record[0]))) + if (unlikely((error= table->file->ha_update_tmp_row(table->record[1], + table->record[0])))) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ } } - if (join->thd->check_killed()) + if (unlikely(join->thd->check_killed())) { - join->thd->send_kill_message(); DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */ } DBUG_RETURN(NESTED_LOOP_OK); @@ -20790,17 +20864,18 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (!join_tab->having || join_tab->having->val_int()) { int error= table->file->ha_write_tmp_row(table->record[0]); - if (error && + if (unlikely(error) && create_internal_tmp_table_from_heap(join->thd, table, join_tab->tmp_table_param->start_recinfo, &join_tab->tmp_table_param->recinfo, error, 0, NULL)) DBUG_RETURN(NESTED_LOOP_ERROR); } - if (join->rollup.state != ROLLUP::STATE_NONE) + if (unlikely(join->rollup.state != ROLLUP::STATE_NONE)) { - if (join->rollup_write_data((uint) (idx+1), - join_tab->tmp_table_param, table)) + if (unlikely(join->rollup_write_data((uint) (idx+1), + join_tab->tmp_table_param, + table))) { DBUG_RETURN(NESTED_LOOP_ERROR); } @@ -20819,23 +20894,24 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (idx < (int) join->send_group_parts) { copy_fields(join_tab->tmp_table_param); - if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd)) + if (unlikely(copy_funcs(join_tab->tmp_table_param->items_to_copy, + join->thd))) DBUG_RETURN(NESTED_LOOP_ERROR); - if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1])) + if (unlikely(init_sum_functions(join->sum_funcs, + join->sum_funcs_end[idx+1]))) DBUG_RETURN(NESTED_LOOP_ERROR); - if (join->procedure) + if (unlikely(join->procedure)) join->procedure->add(); goto end; } } - if (update_sum_func(join->sum_funcs)) + if (unlikely(update_sum_func(join->sum_funcs))) DBUG_RETURN(NESTED_LOOP_ERROR); - if (join->procedure) + if (unlikely(join->procedure)) join->procedure->add(); end: - if (join->thd->check_killed()) + if (unlikely(join->thd->check_killed())) { - join->thd->send_kill_message(); DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */ } DBUG_RETURN(NESTED_LOOP_OK); @@ -22283,7 +22359,8 @@ create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort) { DBUG_ASSERT(tab->type == JT_REF || tab->type == JT_EQ_REF); // Update ref value - if ((cp_buffer_from_ref(thd, table, &tab->ref) && thd->is_fatal_error)) + if (unlikely(cp_buffer_from_ref(thd, table, &tab->ref) && + thd->is_fatal_error)) goto err; // out of memory } } @@ -22291,7 +22368,7 @@ create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort) /* Fill schema tables with data before filesort if it's necessary */ if ((join->select_lex->options & OPTION_SCHEMA_TABLE) && - get_schema_tables_result(join, PROCESSED_BY_CREATE_SORT_INDEX)) + unlikely(get_schema_tables_result(join, PROCESSED_BY_CREATE_SORT_INDEX))) goto err; if (table->s->tmp_table) @@ -22459,37 +22536,31 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, int error; DBUG_ENTER("remove_dup_with_compare"); - if (file->ha_rnd_init_with_error(1)) + if (unlikely(file->ha_rnd_init_with_error(1))) DBUG_RETURN(1); error= file->ha_rnd_next(record); for (;;) { - if (thd->check_killed()) + if (unlikely(thd->check_killed())) { - thd->send_kill_message(); error=0; goto err; } - if (error) + if (unlikely(error)) { - if (error == HA_ERR_RECORD_DELETED) - { - error= file->ha_rnd_next(record); - continue; - } if (error == HA_ERR_END_OF_FILE) break; goto err; } if (having && !having->val_int()) { - if ((error= file->ha_delete_row(record))) + if (unlikely((error= file->ha_delete_row(record)))) goto err; error= file->ha_rnd_next(record); continue; } - if (copy_blobs(first_field)) + if (unlikely(copy_blobs(first_field))) { my_message(ER_OUTOFMEMORY, ER_THD(thd,ER_OUTOFMEMORY), MYF(ME_FATALERROR)); @@ -22502,30 +22573,28 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, bool found=0; for (;;) { - if ((error= file->ha_rnd_next(record))) + if (unlikely((error= file->ha_rnd_next(record)))) { - if (error == HA_ERR_RECORD_DELETED) - continue; if (error == HA_ERR_END_OF_FILE) break; goto err; } if (compare_record(table, first_field) == 0) { - if ((error= file->ha_delete_row(record))) + if (unlikely((error= file->ha_delete_row(record)))) goto err; } else if (!found) { found=1; - if ((error= file->remember_rnd_pos())) + if (unlikely((error= file->remember_rnd_pos()))) goto err; } } if (!found) break; // End of file /* Restart search on saved row */ - if ((error= file->restart_rnd_next(record))) + if (unlikely((error= file->restart_rnd_next(record)))) goto err; } @@ -22561,49 +22630,47 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, Field **ptr; DBUG_ENTER("remove_dup_with_hash_index"); - if (!my_multi_malloc(MYF(MY_WME), - &key_buffer, - (uint) ((key_length + extra_length) * - (long) file->stats.records), - &field_lengths, - (uint) (field_count*sizeof(*field_lengths)), - NullS)) + if (unlikely(!my_multi_malloc(MYF(MY_WME), + &key_buffer, + (uint) ((key_length + extra_length) * + (long) file->stats.records), + &field_lengths, + (uint) (field_count*sizeof(*field_lengths)), + NullS))) DBUG_RETURN(1); for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++) (*field_length++)= (*ptr)->sort_length(); - if (my_hash_init(&hash, &my_charset_bin, (uint) file->stats.records, 0, - key_length, (my_hash_get_key) 0, 0, 0)) + if (unlikely(my_hash_init(&hash, &my_charset_bin, + (uint) file->stats.records, 0, + key_length, (my_hash_get_key) 0, 0, 0))) { my_free(key_buffer); DBUG_RETURN(1); } - if ((error= file->ha_rnd_init(1))) + if (unlikely((error= file->ha_rnd_init(1)))) goto err; key_pos=key_buffer; for (;;) { uchar *org_key_pos; - if (thd->check_killed()) + if (unlikely(thd->check_killed())) { - thd->send_kill_message(); error=0; goto err; } - if ((error= file->ha_rnd_next(record))) + if (unlikely((error= file->ha_rnd_next(record)))) { - if (error == HA_ERR_RECORD_DELETED) - continue; if (error == HA_ERR_END_OF_FILE) break; goto err; } if (having && !having->val_int()) { - if ((error= file->ha_delete_row(record))) + if (unlikely((error= file->ha_delete_row(record)))) goto err; continue; } @@ -22620,7 +22687,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, if (my_hash_search(&hash, org_key_pos, key_length)) { /* Duplicated found ; Remove the row */ - if ((error= file->ha_delete_row(record))) + if (unlikely((error= file->ha_delete_row(record)))) goto err; } else @@ -22641,7 +22708,7 @@ err: my_hash_free(&hash); file->extra(HA_EXTRA_NO_CACHE); (void) file->ha_rnd_end(); - if (error) + if (unlikely(error)) file->print_error(error,MYF(0)); DBUG_RETURN(1); } @@ -23337,13 +23404,10 @@ get_sort_by_table(ORDER *a,ORDER *b, List &tables, calc how big buffer we need for comparing group entries. */ -static void -calc_group_buffer(JOIN *join,ORDER *group) +void calc_group_buffer(TMP_TABLE_PARAM *param, ORDER *group) { uint key_length=0, parts=0, null_parts=0; - if (group) - join->group= 1; for (; group ; group=group->next) { Item *group_item= *group->item; @@ -23413,9 +23477,16 @@ calc_group_buffer(JOIN *join,ORDER *group) if (group_item->maybe_null) null_parts++; } - join->tmp_table_param.group_length=key_length+null_parts; - join->tmp_table_param.group_parts=parts; - join->tmp_table_param.group_null_parts=null_parts; + param->group_length= key_length + null_parts; + param->group_parts= parts; + param->group_null_parts= null_parts; +} + +static void calc_group_buffer(JOIN *join, ORDER *group) +{ + if (group) + join->group= 1; + calc_group_buffer(&join->tmp_table_param, group); } @@ -24132,7 +24203,7 @@ copy_funcs(Item **func_ptr, const THD *thd) TODO: change it for a real status check when Item::val_xxx() are extended to return status code. */ - if (thd->is_error()) + if (unlikely(thd->is_error())) return TRUE; } return FALSE; @@ -24166,7 +24237,7 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab) value), thd->mem_root); } - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) DBUG_RETURN(TRUE); if (!cond->fixed) { @@ -24658,7 +24729,8 @@ int JOIN::rollup_write_data(uint idx, TMP_TABLE_PARAM *tmp_table_param_arg, TABL item->save_in_result_field(1); } copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]); - if ((write_error= table_arg->file->ha_write_tmp_row(table_arg->record[0]))) + if (unlikely((write_error= + table_arg->file->ha_write_tmp_row(table_arg->record[0])))) { if (create_internal_tmp_table_from_heap(thd, table_arg, tmp_table_param_arg->start_recinfo, @@ -24756,7 +24828,7 @@ int print_explain_message_line(select_result_sink *result, else item_list.push_back(item_null, mem_root); - if (thd->is_fatal_error || result->send_data(item_list)) + if (unlikely(thd->is_fatal_error) || unlikely(result->send_data(item_list))) return 1; return 0; } @@ -24827,9 +24899,9 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta, if (filesort) { if (!(eta->pre_join_sort= - new Explain_aggr_filesort(thd->mem_root, - thd->lex->analyze_stmt, - filesort))) + new (thd->mem_root) Explain_aggr_filesort(thd->mem_root, + thd->lex->analyze_stmt, + filesort))) return 1; } @@ -25267,7 +25339,7 @@ bool save_agg_explain_data(JOIN *join, Explain_select *xpl_sel) { // Each aggregate means a temp.table prev_node= node; - if (!(node= new Explain_aggr_tmp_table)) + if (!(node= new (thd->mem_root) Explain_aggr_tmp_table)) return 1; node->child= prev_node; @@ -25288,7 +25360,7 @@ bool save_agg_explain_data(JOIN *join, Explain_select *xpl_sel) if (join_tab->distinct) { prev_node= node; - if (!(node= new Explain_aggr_remove_dups)) + if (!(node= new (thd->mem_root) Explain_aggr_remove_dups)) return 1; node->child= prev_node; } @@ -25296,7 +25368,7 @@ bool save_agg_explain_data(JOIN *join, Explain_select *xpl_sel) if (join_tab->filesort) { Explain_aggr_filesort *eaf = - new Explain_aggr_filesort(thd->mem_root, is_analyze, join_tab->filesort); + new (thd->mem_root) Explain_aggr_filesort(thd->mem_root, is_analyze, join_tab->filesort); if (!eaf) return 1; prev_node= node; @@ -25590,7 +25662,8 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) unit->fake_select_lex->type= unit_operation_text[unit->common_op()]; unit->fake_select_lex->options|= SELECT_DESCRIBE; } - if (!(res= unit->prepare(thd, result, SELECT_NO_UNLOCK | SELECT_DESCRIBE))) + if (!(res= unit->prepare(unit->derived, result, + SELECT_NO_UNLOCK | SELECT_DESCRIBE))) res= unit->exec(); } else @@ -26156,7 +26229,7 @@ bool JOIN::change_result(select_result *new_result, select_result *old_result) { result= new_result; if (result->prepare(fields_list, select_lex->master_unit()) || - result->prepare2()) + result->prepare2(this)) DBUG_RETURN(true); /* purecov: inspected */ DBUG_RETURN(false); } @@ -26987,22 +27060,20 @@ ulong check_selectivity(THD *thd, } it.rewind(); - if (file->ha_rnd_init_with_error(1)) + if (unlikely(file->ha_rnd_init_with_error(1))) DBUG_RETURN(0); do { error= file->ha_rnd_next(record); - if (thd->killed) + if (unlikely(thd->killed)) { thd->send_kill_message(); count= 0; goto err; } - if (error) + if (unlikely(error)) { - if (error == HA_ERR_RECORD_DELETED) - continue; if (error == HA_ERR_END_OF_FILE) break; goto err; @@ -27154,11 +27225,11 @@ AGGR_OP::end_send() else error= join_tab->read_record.read_record(); - if (error > 0 || (join->thd->is_error())) // Fatal error + if (unlikely(error > 0 || (join->thd->is_error()))) // Fatal error rc= NESTED_LOOP_ERROR; else if (error < 0) break; - else if (join->thd->killed) // Aborted by user + else if (unlikely(join->thd->killed)) // Aborted by user { join->thd->send_kill_message(); rc= NESTED_LOOP_KILLED; diff --git a/sql/sql_select.h b/sql/sql_select.h index 95b2d34c631..4140a0293f8 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -470,7 +470,7 @@ typedef struct st_join_table { Window_funcs_computation* window_funcs_step; /** - List of topmost expressions in the select list. The *next* JOIN TAB + List of topmost expressions in the select list. The *next* JOIN_TAB in the plan should use it to obtain correct values. Same applicable to all_fields. These lists are needed because after tmp tables functions will be turned to fields. These variables are pointing to @@ -1438,6 +1438,9 @@ public: enum { QEP_NOT_PRESENT_YET, QEP_AVAILABLE, QEP_DELETED} have_query_plan; + // if keep_current_rowid=true, whether they should be saved in temporary table + bool tmp_table_keep_current_rowid; + /* Additional WHERE and HAVING predicates to be considered for IN=>EXISTS subquery transformation of a JOIN object. @@ -1548,6 +1551,7 @@ public: pushdown_query= 0; original_join_tab= 0; explain= NULL; + tmp_table_keep_current_rowid= 0; all_fields= fields_arg; if (&fields_list != &fields_arg) /* Avoid valgrind-warning */ @@ -1577,6 +1581,15 @@ public: /* Number of tables actually joined at the top level */ uint exec_join_tab_cnt() { return tables_list ? top_join_tab_count : 0; } + /* + Number of tables in the join which also includes the temporary tables + created for GROUP BY, DISTINCT , WINDOW FUNCTION etc. + */ + uint total_join_tab_cnt() + { + return exec_join_tab_cnt() + aggr_tables - 1; + } + int prepare(TABLE_LIST *tables, uint wind_num, COND *conds, uint og_num, ORDER *order, bool skip_order_by, ORDER *group, Item *having, ORDER *proc_param, SELECT_LEX *select, @@ -1774,6 +1787,7 @@ private: void cleanup_item_list(List &items) const; bool add_having_as_table_cond(JOIN_TAB *tab); bool make_aggr_tables_info(); + bool add_fields_for_current_rowid(JOIN_TAB *cur, List *fields); }; enum enum_with_bush_roots { WITH_BUSH_ROOTS, WITHOUT_BUSH_ROOTS}; @@ -2370,6 +2384,7 @@ int append_possible_keys(MEM_ROOT *alloc, String_list &list, TABLE *table, #define RATIO_TO_PACK_ROWS 2 #define MIN_STRING_LENGTH_TO_PACK_ROWS 10 +void calc_group_buffer(TMP_TABLE_PARAM *param, ORDER *group); TABLE *create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, ORDER *group, bool distinct, bool save_sum_fields, ulonglong select_options, ha_rows rows_limit, diff --git a/sql/sql_sequence.cc b/sql/sql_sequence.cc index 18f0028908f..21a8781087f 100644 --- a/sql/sql_sequence.cc +++ b/sql/sql_sequence.cc @@ -341,7 +341,7 @@ bool sequence_insert(THD *thd, LEX *lex, TABLE_LIST *org_table_list) MYSQL_OPEN_HAS_MDL_LOCK); thd->open_options&= ~HA_OPEN_FOR_CREATE; thd->m_reprepare_observer= save_reprepare_observer; - if (error) + if (unlikely(error)) { lex->restore_backup_query_tables_list(&query_tables_list_backup); thd->restore_backup_open_tables_state(&open_tables_backup); @@ -459,7 +459,10 @@ int SEQUENCE::read_initial_values(TABLE *table) mdl_requests.push_front(&mdl_request); if (thd->mdl_context.acquire_locks(&mdl_requests, thd->variables.lock_wait_timeout)) + { + write_unlock(table); DBUG_RETURN(HA_ERR_LOCK_WAIT_TIMEOUT); + } } save_lock_type= table->reginfo.lock_type; table->reginfo.lock_type= TL_READ; @@ -468,10 +471,11 @@ int SEQUENCE::read_initial_values(TABLE *table) { if (mdl_lock_used) thd->mdl_context.release_lock(mdl_request.ticket); + write_unlock(table); DBUG_RETURN(HA_ERR_LOCK_WAIT_TIMEOUT); } DBUG_ASSERT(table->reginfo.lock_type == TL_READ); - if (!(error= read_stored_values(table))) + if (likely(!(error= read_stored_values(table)))) initialized= SEQ_READY_TO_USE; mysql_unlock_tables(thd, lock); if (mdl_lock_used) @@ -510,7 +514,7 @@ int SEQUENCE::read_stored_values(TABLE *table) error= table->file->ha_read_first_row(table->record[0], MAX_KEY); tmp_restore_column_map(table->read_set, save_read_set); - if (error) + if (unlikely(error)) { table->file->print_error(error, MYF(0)); DBUG_RETURN(error); @@ -560,8 +564,7 @@ void sequence_definition::adjust_values(longlong next_value) else { next_free_value+= to_add; - DBUG_ASSERT(next_free_value % real_increment == offset && - next_free_value >= reserved_until); + DBUG_ASSERT(next_free_value % real_increment == offset); } } } @@ -592,7 +595,7 @@ int sequence_definition::write_initial_sequence(TABLE *table) table->s->sequence->initialized= SEQUENCE::SEQ_UNINTIALIZED; reenable_binlog(thd); table->write_set= save_write_set; - if (error) + if (unlikely(error)) table->file->print_error(error, MYF(0)); else { @@ -634,7 +637,7 @@ int sequence_definition::write(TABLE *table, bool all_fields) table->read_set= table->write_set= &table->s->all_set; table->file->column_bitmaps_signal(); store_fields(table); - if ((error= table->file->ha_write_row(table->record[0]))) + if (unlikely((error= table->file->ha_write_row(table->record[0])))) table->file->print_error(error, MYF(0)); table->rpl_write_set= save_rpl_write_set; table->read_set= save_read_set; @@ -741,7 +744,7 @@ longlong SEQUENCE::next_value(TABLE *table, bool second_round, int *error) DBUG_RETURN(next_value(table, 1, error)); } - if ((*error= write(table, 0))) + if (unlikely((*error= write(table, 0)))) { reserved_until= org_reserved_until; next_free_value= res_value; @@ -893,7 +896,7 @@ bool Sql_cmd_alter_sequence::execute(THD *thd) trapped_errors= no_such_table_handler.safely_trapped_errors(); thd->pop_internal_handler(); } - if (error) + if (unlikely(error)) { if (trapped_errors) { @@ -949,7 +952,7 @@ bool Sql_cmd_alter_sequence::execute(THD *thd) } table->s->sequence->write_lock(table); - if (!(error= new_seq->write(table, 1))) + if (likely(!(error= new_seq->write(table, 1)))) { /* Store the sequence values in table share */ table->s->sequence->copy(new_seq); @@ -961,9 +964,9 @@ bool Sql_cmd_alter_sequence::execute(THD *thd) error= 1; if (trans_commit_implicit(thd)) error= 1; - if (!error) + if (likely(!error)) error= write_bin_log(thd, 1, thd->query(), thd->query_length()); - if (!error) + if (likely(!error)) my_ok(thd); end: diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc index 353d1f551a5..da2a91e5880 100644 --- a/sql/sql_servers.cc +++ b/sql/sql_servers.cc @@ -256,7 +256,8 @@ bool servers_reload(THD *thd) tables[0].init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_SERVERS_NAME, 0, TL_READ); - if (open_and_lock_tables(thd, tables, FALSE, MYSQL_LOCK_IGNORE_TIMEOUT)) + if (unlikely(open_and_lock_tables(thd, tables, FALSE, + MYSQL_LOCK_IGNORE_TIMEOUT))) { /* Execution might have been interrupted; only print the error message @@ -395,11 +396,11 @@ insert_server(THD *thd, FOREIGN_SERVER *server) goto end; /* insert the server into the table */ - if ((error= insert_server_record(table, server))) + if (unlikely(error= insert_server_record(table, server))) goto end; /* insert the server into the cache */ - if ((error= insert_server_record_into_cache(server))) + if (unlikely((error= insert_server_record_into_cache(server)))) goto end; end: @@ -542,10 +543,12 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server) system_charset_info); /* read index until record is that specified in server_name */ - if ((error= table->file->ha_index_read_idx_map(table->record[0], 0, - (uchar *)table->field[0]->ptr, - HA_WHOLE_KEY, - HA_READ_KEY_EXACT))) + if (unlikely((error= + table->file->ha_index_read_idx_map(table->record[0], 0, + (uchar *)table->field[0]-> + ptr, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT)))) { /* if not found, err */ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) @@ -559,12 +562,8 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server) DBUG_PRINT("info",("record for server '%s' not found!", server->server_name)); /* write/insert the new server */ - if ((error=table->file->ha_write_row(table->record[0]))) - { + if (unlikely(error=table->file->ha_write_row(table->record[0]))) table->file->print_error(error, MYF(0)); - } - else - error= 0; } else error= ER_FOREIGN_SERVER_EXISTS; @@ -608,10 +607,11 @@ static int drop_server_internal(THD *thd, LEX_SERVER_OPTIONS *server_options) tables.init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_SERVERS_NAME, 0, TL_WRITE); /* hit the memory hit first */ - if ((error= delete_server_record_in_cache(server_options))) + if (unlikely((error= delete_server_record_in_cache(server_options)))) goto end; - if (! (table= open_ltable(thd, &tables, TL_WRITE, MYSQL_LOCK_IGNORE_TIMEOUT))) + if (unlikely(!(table= open_ltable(thd, &tables, TL_WRITE, + MYSQL_LOCK_IGNORE_TIMEOUT)))) { error= my_errno; goto end; @@ -744,7 +744,7 @@ int update_server(THD *thd, FOREIGN_SERVER *existing, FOREIGN_SERVER *altered) goto end; } - if ((error= update_server_record(table, altered))) + if (unlikely((error= update_server_record(table, altered)))) goto end; error= update_server_record_in_cache(existing, altered); @@ -892,10 +892,12 @@ update_server_record(TABLE *table, FOREIGN_SERVER *server) server->server_name_length, system_charset_info); - if ((error= table->file->ha_index_read_idx_map(table->record[0], 0, - (uchar *)table->field[0]->ptr, - ~(longlong)0, - HA_READ_KEY_EXACT))) + if (unlikely((error= + table->file->ha_index_read_idx_map(table->record[0], 0, + (uchar *)table->field[0]-> + ptr, + ~(longlong)0, + HA_READ_KEY_EXACT)))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) table->file->print_error(error, MYF(0)); @@ -907,9 +909,9 @@ update_server_record(TABLE *table, FOREIGN_SERVER *server) /* ok, so we can update since the record exists in the table */ store_record(table,record[1]); store_server_fields(table, server); - if ((error=table->file->ha_update_row(table->record[1], - table->record[0])) && - error != HA_ERR_RECORD_IS_THE_SAME) + if (unlikely((error=table->file->ha_update_row(table->record[1], + table->record[0])) && + error != HA_ERR_RECORD_IS_THE_SAME)) { DBUG_PRINT("info",("problems with ha_update_row %d", error)); goto end; @@ -950,10 +952,12 @@ delete_server_record(TABLE *table, LEX_CSTRING *name) /* set the field that's the PK to the value we're looking for */ table->field[0]->store(name->str, name->length, system_charset_info); - if ((error= table->file->ha_index_read_idx_map(table->record[0], 0, - (uchar *)table->field[0]->ptr, - HA_WHOLE_KEY, - HA_READ_KEY_EXACT))) + if (unlikely((error= + table->file->ha_index_read_idx_map(table->record[0], 0, + (uchar *)table->field[0]-> + ptr, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT)))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) table->file->print_error(error, MYF(0)); @@ -962,7 +966,7 @@ delete_server_record(TABLE *table, LEX_CSTRING *name) } else { - if ((error= table->file->ha_delete_row(table->record[0]))) + if (unlikely((error= table->file->ha_delete_row(table->record[0])))) table->file->print_error(error, MYF(0)); } @@ -1001,7 +1005,7 @@ int create_server(THD *thd, LEX_SERVER_OPTIONS *server_options) { if (thd->lex->create_info.or_replace()) { - if ((error= drop_server_internal(thd, server_options))) + if (unlikely((error= drop_server_internal(thd, server_options)))) goto end; } else if (thd->lex->create_info.if_not_exists()) @@ -1032,7 +1036,7 @@ int create_server(THD *thd, LEX_SERVER_OPTIONS *server_options) end: mysql_rwlock_unlock(&THR_LOCK_servers); - if (error) + if (unlikely(error)) { DBUG_PRINT("info", ("problem creating server <%s>", server_options->server_name.str)); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 38a13c49278..f8702657be7 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1076,13 +1076,7 @@ find_files(THD *thd, Dynamic_array *files, LEX_CSTRING *db, if (ha_discover_table_names(thd, db, dirp, &tl, false)) goto err; } -#if MYSQL_VERSION_ID < 100300 - /* incomplete optimization, but a less drastic change in GA version */ - if (!thd->lex->select_lex.order_list.elements && - !thd->lex->select_lex.group_list.elements) -#else if (is_show_command(thd)) -#endif tl.sort(); #ifndef DBUG_OFF else @@ -1267,7 +1261,7 @@ mysqld_show_create_get_fields(THD *thd, TABLE_LIST *table_list, MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL) || mysql_handle_derived(thd->lex, DT_INIT | DT_PREPARE); thd->pop_internal_handler(); - if (open_error && (thd->killed || thd->is_error())) + if (unlikely(open_error && (thd->killed || thd->is_error()))) goto exit; } @@ -3821,17 +3815,19 @@ bool schema_table_store_record(THD *thd, TABLE *table) { int error; - if (thd->killed) + if (unlikely(thd->killed)) { thd->send_kill_message(); return 1; } - if ((error= table->file->ha_write_tmp_row(table->record[0]))) + if (unlikely((error= table->file->ha_write_tmp_row(table->record[0])))) { TMP_TABLE_PARAM *param= table->pos_in_table_list->schema_table_param; - if (create_internal_tmp_table_from_heap(thd, table, param->start_recinfo, - ¶m->recinfo, error, 0, NULL)) + if (unlikely(create_internal_tmp_table_from_heap(thd, table, + param->start_recinfo, + ¶m->recinfo, error, 0, + NULL))) return 1; } @@ -4596,7 +4592,7 @@ fill_schema_table_by_open(THD *thd, MEM_ROOT *mem_root, else { char buf[NAME_CHAR_LEN + 1]; - if (thd->is_error()) + if (unlikely(thd->is_error())) get_table_engine_for_i_s(thd, buf, table_list, &db_name, &table_name); result= schema_table->process_table(thd, table_list, @@ -4678,13 +4674,14 @@ static int fill_schema_table_names(THD *thd, TABLE_LIST *tables, else table->field[3]->store(STRING_WITH_LEN("ERROR"), cs); - if (thd->is_error() && thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE) + if (unlikely(thd->is_error() && + thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE)) { thd->clear_error(); return 0; } } - if (schema_table_store_record(thd, table)) + if (unlikely(schema_table_store_record(thd, table))) return 1; return 0; } @@ -5010,7 +5007,7 @@ public: if (*level != Sql_condition::WARN_LEVEL_ERROR) return false; - if (!thd->get_stmt_da()->is_error()) + if (likely(!thd->get_stmt_da()->is_error())) thd->get_stmt_da()->set_error_status(sql_errno, msg, sqlstate, *cond_hdl); return true; // handled! } @@ -5135,9 +5132,9 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) Dynamic_array table_names; int res= make_table_name_list(thd, &table_names, lex, &plan->lookup_field_vals, db_name); - if (res == 2) /* Not fatal error, continue */ + if (unlikely(res == 2)) /* Not fatal error, continue */ continue; - if (res) + if (unlikely(res)) goto err; for (size_t i=0; i < table_names.elements(); i++) @@ -5496,13 +5493,13 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, /* Collect table info from the storage engine */ - if(file) + if (file) { /* If info() fails, then there's nothing else to do */ - if ((info_error= file->info(HA_STATUS_VARIABLE | - HA_STATUS_TIME | - HA_STATUS_VARIABLE_EXTRA | - HA_STATUS_AUTO)) != 0) + if (unlikely((info_error= file->info(HA_STATUS_VARIABLE | + HA_STATUS_TIME | + HA_STATUS_VARIABLE_EXTRA | + HA_STATUS_AUTO)) != 0)) { file->print_error(info_error, MYF(0)); goto err; @@ -5601,7 +5598,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, } err: - if (res || info_error) + if (unlikely(res || info_error)) { /* If an error was encountered, push a warning, set the TABLE COMMENT @@ -5635,8 +5632,6 @@ err: static void store_column_type(TABLE *table, Field *field, CHARSET_INFO *cs, uint offset) { - bool is_blob; - int decimals, field_length; const char *tmp_buff; char column_type_buff[MAX_FIELD_WIDTH]; String column_type(column_type_buff, sizeof(column_type_buff), cs); @@ -5662,22 +5657,18 @@ static void store_column_type(TABLE *table, Field *field, CHARSET_INFO *cs, (tmp_buff ? (uint)(tmp_buff - column_type.ptr()) : column_type.length()), cs); - is_blob= (field->type() == MYSQL_TYPE_BLOB); - if (field->has_charset() || is_blob || - field->real_type() == MYSQL_TYPE_VARCHAR || // For varbinary type - field->real_type() == MYSQL_TYPE_STRING) // For binary type + Information_schema_character_attributes cattr= + field->information_schema_character_attributes(); + if (cattr.has_char_length()) { - uint32 octet_max_length= field->max_display_length(); - if (is_blob && octet_max_length != (uint32) UINT_MAX32) - octet_max_length /= field->charset()->mbmaxlen; - longlong char_max_len= is_blob ? - (longlong) octet_max_length / field->charset()->mbminlen : - (longlong) octet_max_length / field->charset()->mbmaxlen; /* CHARACTER_MAXIMUM_LENGTH column*/ - table->field[offset + 1]->store(char_max_len, TRUE); + table->field[offset + 1]->store((longlong) cattr.char_length(), true); table->field[offset + 1]->set_notnull(); + } + if (cattr.has_octet_length()) + { /* CHARACTER_OCTET_LENGTH column */ - table->field[offset + 2]->store((longlong) octet_max_length, TRUE); + table->field[offset + 2]->store((longlong) cattr.octet_length(), true); table->field[offset + 2]->set_notnull(); } @@ -5686,35 +5677,10 @@ static void store_column_type(TABLE *table, Field *field, CHARSET_INFO *cs, They are set to -1 if they should not be set (we should return NULL) */ - field_length= -1; - decimals= field->decimals(); + Information_schema_numeric_attributes num= + field->information_schema_numeric_attributes(); + switch (field->type()) { - case MYSQL_TYPE_NEWDECIMAL: - field_length= ((Field_new_decimal*) field)->precision; - break; - case MYSQL_TYPE_DECIMAL: - field_length= field->field_length - (decimals ? 2 : 1); - break; - case MYSQL_TYPE_TINY: - case MYSQL_TYPE_SHORT: - case MYSQL_TYPE_LONG: - case MYSQL_TYPE_INT24: - field_length= field->max_display_length() - 1; - break; - case MYSQL_TYPE_LONGLONG: - field_length= field->max_display_length() - - ((field->flags & UNSIGNED_FLAG) ? 0 : 1); - break; - case MYSQL_TYPE_BIT: - field_length= field->max_display_length(); - decimals= -1; // return NULL - break; - case MYSQL_TYPE_FLOAT: - case MYSQL_TYPE_DOUBLE: - field_length= field->field_length; - if (decimals == NOT_FIXED_DEC) - decimals= -1; // return NULL - break; case MYSQL_TYPE_TIME: case MYSQL_TYPE_TIMESTAMP: case MYSQL_TYPE_DATETIME: @@ -5727,15 +5693,15 @@ static void store_column_type(TABLE *table, Field *field, CHARSET_INFO *cs, } /* NUMERIC_PRECISION column */ - if (field_length >= 0) + if (num.has_precision()) { - table->field[offset + 3]->store((longlong) field_length, TRUE); + table->field[offset + 3]->store((longlong) num.precision(), true); table->field[offset + 3]->set_notnull(); /* NUMERIC_SCALE column */ - if (decimals >= 0) + if (num.has_scale()) { - table->field[offset + 4]->store((longlong) decimals, TRUE); + table->field[offset + 4]->store((longlong) num.scale(), true); table->field[offset + 4]->set_notnull(); } } @@ -6564,7 +6530,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables, I.e. we are in SELECT FROM INFORMATION_SCHEMA.STATISTICS rather than in SHOW KEYS */ - if (thd->is_error()) + if (unlikely(thd->is_error())) push_warning(thd, Sql_condition::WARN_LEVEL_WARN, thd->get_stmt_da()->sql_errno(), thd->get_stmt_da()->message()); @@ -6792,7 +6758,7 @@ static int get_schema_views_record(THD *thd, TABLE_LIST *tables, if (schema_table_store_record(thd, table)) DBUG_RETURN(1); - if (res && thd->is_error()) + if (unlikely(res && thd->is_error())) push_warning(thd, Sql_condition::WARN_LEVEL_WARN, thd->get_stmt_da()->sql_errno(), thd->get_stmt_da()->message()); @@ -6828,7 +6794,7 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables, DBUG_ENTER("get_schema_constraints_record"); if (res) { - if (thd->is_error()) + if (unlikely(thd->is_error())) push_warning(thd, Sql_condition::WARN_LEVEL_WARN, thd->get_stmt_da()->sql_errno(), thd->get_stmt_da()->message()); @@ -6962,7 +6928,7 @@ static int get_schema_triggers_record(THD *thd, TABLE_LIST *tables, */ if (res) { - if (thd->is_error()) + if (unlikely(thd->is_error())) push_warning(thd, Sql_condition::WARN_LEVEL_WARN, thd->get_stmt_da()->sql_errno(), thd->get_stmt_da()->message()); @@ -7026,7 +6992,7 @@ static int get_schema_key_column_usage_record(THD *thd, DBUG_ENTER("get_schema_key_column_usage_record"); if (res) { - if (thd->is_error()) + if (unlikely(thd->is_error())) push_warning(thd, Sql_condition::WARN_LEVEL_WARN, thd->get_stmt_da()->sql_errno(), thd->get_stmt_da()->message()); @@ -7313,7 +7279,7 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables, if (res) { - if (thd->is_error()) + if (unlikely(thd->is_error())) push_warning(thd, Sql_condition::WARN_LEVEL_WARN, thd->get_stmt_da()->sql_errno(), thd->get_stmt_da()->message()); @@ -7725,8 +7691,9 @@ int fill_open_tables(THD *thd, TABLE_LIST *tables, COND *cond) TABLE *table= tables->table; CHARSET_INFO *cs= system_charset_info; OPEN_TABLE_LIST *open_list; - if (!(open_list=list_open_tables(thd,thd->lex->select_lex.db.str, wild)) - && thd->is_fatal_error) + if (unlikely(!(open_list= list_open_tables(thd, thd->lex->select_lex.db.str, + wild))) && + unlikely(thd->is_fatal_error)) DBUG_RETURN(1); for (; open_list ; open_list=open_list->next) @@ -7736,7 +7703,7 @@ int fill_open_tables(THD *thd, TABLE_LIST *tables, COND *cond) table->field[1]->store(open_list->table, strlen(open_list->table), cs); table->field[2]->store((longlong) open_list->in_use, TRUE); table->field[3]->store((longlong) open_list->locked, TRUE); - if (schema_table_store_record(thd, table)) + if (unlikely(schema_table_store_record(thd, table))) DBUG_RETURN(1); } DBUG_RETURN(0); @@ -7865,7 +7832,7 @@ get_referential_constraints_record(THD *thd, TABLE_LIST *tables, if (res) { - if (thd->is_error()) + if (unlikely(thd->is_error())) push_warning(thd, Sql_condition::WARN_LEVEL_WARN, thd->get_stmt_da()->sql_errno(), thd->get_stmt_da()->message()); @@ -8811,7 +8778,7 @@ bool get_schema_tables_result(JOIN *join, } } thd->pop_internal_handler(); - if (thd->is_error()) + if (unlikely(thd->is_error())) { /* This hack is here, because I_S code uses thd->clear_error() a lot. @@ -10310,14 +10277,14 @@ static void get_cs_converted_string_value(THD *thd, try_val.copy(input_str->ptr(), input_str->length(), cs, thd->variables.character_set_client, &try_conv_error); - if (!try_conv_error) + if (likely(!try_conv_error)) { String val; uint conv_error= 0; val.copy(input_str->ptr(), input_str->length(), cs, system_charset_info, &conv_error); - if (!conv_error) + if (likely(!conv_error)) { append_unescaped(output_str, val.ptr(), val.length()); return; diff --git a/sql/sql_signal.cc b/sql/sql_signal.cc index e1d9dcad57b..7a28ba65ba2 100644 --- a/sql/sql_signal.cc +++ b/sql/sql_signal.cc @@ -319,7 +319,7 @@ int Sql_cmd_common_signal::eval_signal_informations(THD *thd, Sql_condition *con The various item->val_xxx() methods don't return an error code, but flag thd in case of failure. */ - if (! thd->is_error()) + if (likely(!thd->is_error())) result= 0; end: diff --git a/sql/sql_sort.h b/sql/sql_sort.h index d57239671a8..c29bf1440c9 100644 --- a/sql/sql_sort.h +++ b/sql/sql_sort.h @@ -100,12 +100,12 @@ public: int merge_many_buff(Sort_param *param, uchar *sort_buffer, BUFFPEK *buffpek, uint *maxbuffer, IO_CACHE *t_file); -uint read_to_buffer(IO_CACHE *fromfile,BUFFPEK *buffpek, - uint sort_length); -int merge_buffers(Sort_param *param,IO_CACHE *from_file, - IO_CACHE *to_file, uchar *sort_buffer, - BUFFPEK *lastbuff,BUFFPEK *Fb, - BUFFPEK *Tb,int flag); +ulong read_to_buffer(IO_CACHE *fromfile,BUFFPEK *buffpek, + uint sort_length); +bool merge_buffers(Sort_param *param,IO_CACHE *from_file, + IO_CACHE *to_file, uchar *sort_buffer, + BUFFPEK *lastbuff,BUFFPEK *Fb, + BUFFPEK *Tb,int flag); int merge_index(Sort_param *param, uchar *sort_buffer, BUFFPEK *buffpek, uint maxbuffer, IO_CACHE *tempfile, IO_CACHE *outfile); diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index e62a49f71ea..4edcb779379 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -1517,7 +1517,8 @@ public: ~Stat_table_write_iter() { - cleanup(); + /* Ensure that cleanup has been run */ + DBUG_ASSERT(rowid_buf == 0); } }; @@ -1799,6 +1800,7 @@ private: public: bool is_single_comp_pk; + bool is_partial_fields_present; Index_prefix_calc(THD *thd, TABLE *table, KEY *key_info) : index_table(table), index_info(key_info) @@ -1810,7 +1812,7 @@ public: prefixes= 0; LINT_INIT_STRUCT(calc_state); - is_single_comp_pk= FALSE; + is_partial_fields_present= is_single_comp_pk= FALSE; uint pk= table->s->primary_key; if ((uint) (table->key_info - key_info) == pk && table->key_info[pk].user_defined_key_parts == 1) @@ -1832,7 +1834,10 @@ public: calculating the values of 'avg_frequency' for prefixes. */ if (!key_info->key_part[i].field->part_of_key.is_set(keyno)) + { + is_partial_fields_present= TRUE; break; + } if (!(state->last_prefix= new (thd->mem_root) Cached_item_field(thd, @@ -2617,7 +2622,7 @@ int collect_statistics_for_index(THD *thd, TABLE *table, uint index) DBUG_ENTER("collect_statistics_for_index"); /* No statistics for FULLTEXT indexes. */ - if (key_info->flags & HA_FULLTEXT) + if (key_info->flags & (HA_FULLTEXT|HA_SPATIAL)) DBUG_RETURN(rc); Index_prefix_calc index_prefix_calc(thd, table, key_info); @@ -2631,7 +2636,13 @@ int collect_statistics_for_index(THD *thd, TABLE *table, uint index) DBUG_RETURN(rc); } - table->file->ha_start_keyread(index); + /* + Request "only index read" in case of absence of fields which are + partially in the index to avoid problems with partitioning (for example) + which want to get whole field value. + */ + if (!index_prefix_calc.is_partial_fields_present) + table->file->ha_start_keyread(index); table->file->ha_index_init(index, TRUE); rc= table->file->ha_index_first(table->record[0]); while (rc != HA_ERR_END_OF_FILE) @@ -2743,11 +2754,7 @@ int collect_statistics_for_table(THD *thd, TABLE *table) break; if (rc) - { - if (rc == HA_ERR_RECORD_DELETED) - continue; break; - } for (field_ptr= table->field; *field_ptr; field_ptr++) { @@ -3990,11 +3997,11 @@ bool is_stat_table(const LEX_CSTRING *db, LEX_CSTRING *table) { DBUG_ASSERT(db->str && table->str); - if (!cmp(db, &MYSQL_SCHEMA_NAME)) + if (!my_strcasecmp(table_alias_charset, db->str, MYSQL_SCHEMA_NAME.str)) { for (uint i= 0; i < STATISTICS_TABLES; i ++) { - if (cmp(table, &stat_table_name[i]) == 0) + if (!my_strcasecmp(table_alias_charset, table->str, stat_table_name[i].str)) return true; } } diff --git a/sql/sql_string.cc b/sql/sql_string.cc index 390abe36b6c..5d5cc90431b 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -240,8 +240,17 @@ bool String::copy(const char *str,size_t arg_length, CHARSET_INFO *cs) { if (alloc(arg_length)) return TRUE; - DBUG_ASSERT(arg_length <= UINT_MAX32); - if ((str_length=(uint32)arg_length)) + DBUG_ASSERT(arg_length < UINT_MAX32); + if (Ptr == str && arg_length == uint32(str_length)) + { + /* + This can happen in some cases. This code is here mainly to avoid + warnings from valgrind, but can also be an indication of error. + */ + DBUG_PRINT("warning", ("Copying string on itself: %p %zu", + str, arg_length)); + } + else if ((str_length=uint32(arg_length))) memcpy(Ptr,str,arg_length); Ptr[arg_length]=0; str_charset=cs; @@ -667,8 +676,8 @@ int String::strstr(const String &s,uint32 offset) if (!s.length()) return ((int) offset); // Empty string is always found - register const char *str = Ptr+offset; - register const char *search=s.ptr(); + const char *str = Ptr+offset; + const char *search=s.ptr(); const char *end=Ptr+str_length-s.length()+1; const char *search_end=s.ptr()+s.length(); skip: @@ -676,7 +685,7 @@ skip: { if (*str++ == *search) { - register char *i,*j; + char *i,*j; i=(char*) str; j=(char*) search+1; while (j != search_end) if (*i++ != *j++) goto skip; @@ -697,8 +706,8 @@ int String::strrstr(const String &s,uint32 offset) { if (!s.length()) return offset; // Empty string is always found - register const char *str = Ptr+offset-1; - register const char *search=s.ptr()+s.length()-1; + const char *str = Ptr+offset-1; + const char *search=s.ptr()+s.length()-1; const char *end=Ptr+s.length()-2; const char *search_end=s.ptr()-1; @@ -707,7 +716,7 @@ skip: { if (*str-- == *search) { - register char *i,*j; + char *i,*j; i=(char*) str; j=(char*) search-1; while (j != search_end) if (*i-- != *j--) goto skip; diff --git a/sql/sql_string.h b/sql/sql_string.h index 37531429f8d..d110e10647a 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -190,7 +190,7 @@ public: { /* never called */ } static void operator delete[](void *ptr, size_t size) { TRASH_FREE(ptr, size); } - static void operator delete[](void *ptr, MEM_ROOT *mem_root) + static void operator delete[](void *, MEM_ROOT *) { /* never called */ } ~String() { free(); } @@ -402,9 +402,10 @@ public: if (ALIGN_SIZE(arg_length+1) < Alloced_length) { char *new_ptr; - if (!(new_ptr=(char*) - my_realloc(Ptr, arg_length,MYF((thread_specific ? - MY_THREAD_SPECIFIC : 0))))) + if (unlikely(!(new_ptr=(char*) + my_realloc(Ptr, + arg_length,MYF((thread_specific ? + MY_THREAD_SPECIFIC : 0)))))) { Alloced_length = 0; real_alloc(arg_length); @@ -455,7 +456,7 @@ public: CHARSET_INFO *fromcs, const char *src, size_t src_length, size_t nchars, String_copier *copier) { - if (alloc(tocs->mbmaxlen * src_length)) + if (unlikely(alloc(tocs->mbmaxlen * src_length))) return true; str_length= copier->well_formed_copy(tocs, Ptr, Alloced_length, fromcs, src, (uint)src_length, (uint)nchars); @@ -511,7 +512,7 @@ public: } else { - if (realloc_with_extra(str_length + 1)) + if (unlikely(realloc_with_extra(str_length + 1))) return 1; Ptr[str_length++]=chr; } @@ -521,8 +522,8 @@ public: { for (const char *src_end= src + srclen ; src != src_end ; src++) { - if (append(_dig_vec_lower[((uchar) *src) >> 4]) || - append(_dig_vec_lower[((uchar) *src) & 0x0F])) + if (unlikely(append(_dig_vec_lower[((uchar) *src) >> 4])) || + unlikely(append(_dig_vec_lower[((uchar) *src) & 0x0F]))) return true; } return false; @@ -628,7 +629,7 @@ public: { char *buff= Ptr + str_length; char *end= ll2str(i, buff, radix, 0); - str_length+= (int) (end-buff); + str_length+= uint32(end-buff); } /* Inline (general) functions used by the protocol functions */ @@ -638,7 +639,7 @@ public: uint32 new_length= arg_length + str_length; if (new_length > Alloced_length) { - if (realloc(new_length + step_alloc)) + if (unlikely(realloc(new_length + step_alloc))) return 0; } uint32 old_length= str_length; @@ -650,7 +651,8 @@ public: inline bool append(const char *s, uint32 arg_length, uint32 step_alloc) { uint32 new_length= arg_length + str_length; - if (new_length > Alloced_length && realloc(new_length + step_alloc)) + if (new_length > Alloced_length && + unlikely(realloc(new_length + step_alloc))) return TRUE; memcpy(Ptr+str_length, s, arg_length); str_length+= arg_length; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 448d095c876..6835d92773c 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2016, Oracle and/or its affiliates. - Copyright (c) 2010, 2017, MariaDB Corporation. + Copyright (c) 2010, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -29,8 +29,7 @@ #include "lock.h" // mysql_unlock_tables #include "strfunc.h" // find_type2, find_set #include "sql_truncate.h" // regenerate_locked_table -#include "sql_partition.h" // mem_alloc_error, - // generate_partition_syntax, +#include "sql_partition.h" // generate_partition_syntax, // partition_info // NOT_A_PARTITION_ID #include "sql_db.h" // load_db_opt_by_name @@ -115,7 +114,7 @@ static char* add_identifier(THD* thd, char *to_p, const char * end_p, res= strconvert(&my_charset_filename, conv_name, name_len, system_charset_info, conv_string, FN_REFLEN, &errors); - if (!res || errors) + if (unlikely(!res || errors)) { DBUG_PRINT("error", ("strconvert of '%s' failed with %u (errors: %u)", conv_name, res, errors)); conv_name= name; @@ -128,7 +127,9 @@ static char* add_identifier(THD* thd, char *to_p, const char * end_p, conv_name_end= conv_string + res; } - quote = thd ? get_quote_char_for_identifier(thd, conv_name, res - 1) : '`'; + quote= (likely(thd) ? + get_quote_char_for_identifier(thd, conv_name, res - 1) : + '`'); if (quote != EOF && (end_p - to_p > 2)) { @@ -390,7 +391,7 @@ uint filename_to_tablename(const char *from, char *to, size_t to_length, res= strconvert(&my_charset_filename, from, FN_REFLEN, system_charset_info, to, to_length, &errors); - if (errors) // Old 5.0 name + if (unlikely(errors)) // Old 5.0 name { res= (strxnmov(to, to_length, MYSQL50_TABLE_NAME_PREFIX, from, NullS) - to); @@ -1144,11 +1145,8 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry) } hton= plugin_data(plugin, handlerton*); file= get_new_handler((TABLE_SHARE*)0, &mem_root, hton); - if (!file) - { - mem_alloc_error(sizeof(handler)); + if (unlikely(!file)) goto error; - } } switch (ddl_log_entry->action_type) { @@ -1160,7 +1158,8 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry) if (frm_action) { strxmov(to_path, ddl_log_entry->name, reg_ext, NullS); - if ((error= mysql_file_delete(key_file_frm, to_path, MYF(MY_WME)))) + if (unlikely((error= mysql_file_delete(key_file_frm, to_path, + MYF(MY_WME))))) { if (my_errno != ENOENT) break; @@ -1172,7 +1171,7 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry) } else { - if ((error= file->ha_delete_table(ddl_log_entry->name))) + if (unlikely((error= file->ha_delete_table(ddl_log_entry->name)))) { if (error != ENOENT && error != HA_ERR_NO_SUCH_TABLE) break; @@ -1425,19 +1424,19 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry, + (2*FN_REFLEN)], (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (3*FN_REFLEN)])); - if (write_ddl_log_file_entry((*active_entry)->entry_pos)) + if (unlikely(write_ddl_log_file_entry((*active_entry)->entry_pos))) { error= TRUE; sql_print_error("Failed to write entry_no = %u", (*active_entry)->entry_pos); } - if (write_header && !error) + if (write_header && likely(!error)) { (void) sync_ddl_log_no_lock(); if (write_ddl_log_header()) error= TRUE; } - if (error) + if (unlikely(error)) release_ddl_log_memory_entry(*active_entry); DBUG_RETURN(error); } @@ -1868,8 +1867,10 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) lpt->create_info->tmp_table(), frm.str, frm.length); my_free(const_cast(frm.str)); - if (error || lpt->table->file->ha_create_partitioning_metadata(shadow_path, - NULL, CHF_CREATE_FLAG)) + if (unlikely(error) || + unlikely(lpt->table->file-> + ha_create_partitioning_metadata(shadow_path, + NULL, CHF_CREATE_FLAG))) { mysql_file_delete(key_file_frm, shadow_frm_name, MYF(0)); error= 1; @@ -2127,7 +2128,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, bool if_exists, false, drop_sequence, false, false); thd->pop_internal_handler(); - if (error) + if (unlikely(error)) DBUG_RETURN(TRUE); my_ok(thd); DBUG_RETURN(FALSE); @@ -2524,26 +2525,26 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, */ (void) mysql_file_delete(key_file_frm, path, MYF(0)); } - else if (mysql_file_delete(key_file_frm, path, - MYF(MY_WME))) + else if (unlikely(mysql_file_delete(key_file_frm, path, + MYF(MY_WME)))) { frm_delete_error= my_errno; DBUG_ASSERT(frm_delete_error); } } - if (!error) + if (likely(!error)) { int trigger_drop_error= 0; - if (!frm_delete_error) + if (likely(!frm_delete_error)) { non_tmp_table_deleted= TRUE; trigger_drop_error= Table_triggers_list::drop_all_triggers(thd, &db, &table->table_name); } - if (trigger_drop_error || + if (unlikely(trigger_drop_error) || (frm_delete_error && frm_delete_error != ENOENT)) error= 1; else if (frm_delete_error && if_exists) @@ -3190,8 +3191,10 @@ bool Column_definition::prepare_stage1_string(THD *thd, Convert the default value from client character set into the column character set if necessary. We can only do this for constants as we have not yet run fix_fields. + But not for blobs, as they will be stored as SQL expressions, not + written down into the record image. */ - if (default_value && + if (!(flags & BLOB_FLAG) && default_value && default_value->expr->basic_const_item() && charset != default_value->expr->collation.collation) { @@ -4487,12 +4490,10 @@ handler *mysql_create_frm_image(THD *thd, db_options= create_info->table_options_with_row_type(); - if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, - create_info->db_type))) - { - mem_alloc_error(sizeof(handler)); + if (unlikely(!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, + create_info->db_type)))) DBUG_RETURN(NULL); - } + #ifdef WITH_PARTITION_STORAGE_ENGINE partition_info *part_info= thd->work_part_info; @@ -4505,11 +4506,9 @@ handler *mysql_create_frm_image(THD *thd, object with the default settings. */ thd->work_part_info= part_info= new partition_info(); - if (!part_info) - { - mem_alloc_error(sizeof(partition_info)); + if (unlikely(!part_info)) goto err; - } + file->set_auto_partitions(part_info); part_info->default_engine_type= create_info->db_type; part_info->is_auto_partitioned= TRUE; @@ -4680,12 +4679,9 @@ handler *mysql_create_frm_image(THD *thd, engines in partition clauses. */ delete file; - if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, - engine_type))) - { - mem_alloc_error(sizeof(handler)); + if (unlikely(!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, + engine_type)))) DBUG_RETURN(NULL); - } } } /* @@ -4767,7 +4763,7 @@ err: @retval 0 OK @retval 1 error - @retval -1 table existed but IF EXISTS was used + @retval -1 table existed but IF NOT EXISTS was used */ static @@ -4809,10 +4805,14 @@ int create_table_impl(THD *thd, create_info->data_file_name= create_info->index_file_name= 0; } else - if (error_if_data_home_dir(create_info->data_file_name, "DATA DIRECTORY") || - error_if_data_home_dir(create_info->index_file_name, "INDEX DIRECTORY")|| - check_partition_dirs(thd->lex->part_info)) - goto err; + { + if (unlikely(error_if_data_home_dir(create_info->data_file_name, + "DATA DIRECTORY")) || + unlikely(error_if_data_home_dir(create_info->index_file_name, + "INDEX DIRECTORY")) || + unlikely(check_partition_dirs(thd->lex->part_info))) + goto err; + } alias= const_cast(table_case_name(create_info, table_name)); @@ -5044,6 +5044,12 @@ warn: /** Simple wrapper around create_table_impl() to be used in various version of CREATE TABLE statement. + + @result + 1 unspefied error + 2 error; Don't log create statement + 0 ok + -1 Table was used with IF NOT EXISTS and table existed (warning, not error) */ int mysql_create_table_no_lock(THD *thd, @@ -5090,6 +5096,24 @@ int mysql_create_table_no_lock(THD *thd, else table_list->table= 0; res= sequence_insert(thd, thd->lex, table_list); + if (res) + { + DBUG_ASSERT(thd->is_error()); + /* Drop the table as it wasn't completely done */ + if (!mysql_rm_table_no_locks(thd, table_list, 1, + create_info->tmp_table(), + false, true /* Sequence*/, + true /* Don't log_query */, + true /* Don't free locks */ )) + { + /* + From the user point of view, the table creation failed + We return 2 to indicate that this statement doesn't have + to be logged. + */ + res= 2; + } + } } return res; @@ -5173,7 +5197,7 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table, This should always work as we have a meta lock on the table. */ thd->locked_tables_list.add_back_last_deleted_lock(pos_in_locked_tables); - if (thd->locked_tables_list.reopen_tables(thd)) + if (thd->locked_tables_list.reopen_tables(thd, false)) { thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0); result= 1; @@ -5195,9 +5219,10 @@ err: thd->transaction.stmt.mark_created_temp_table(); /* Write log if no error or if we already deleted a table */ - if (!result || thd->log_current_statement) + if (likely(!result) || thd->log_current_statement) { - if (result && create_info->table_was_deleted && pos_in_locked_tables) + if (unlikely(result) && create_info->table_was_deleted && + pos_in_locked_tables) { /* Possible locked table was dropped. We should remove meta data locks @@ -5205,7 +5230,7 @@ err: */ thd->locked_tables_list.unlock_locked_table(thd, mdl_ticket); } - else if (!result && create_info->tmp_table() && create_info->table) + else if (likely(!result) && create_info->tmp_table() && create_info->table) { /* Remember that tmp table creation was logged so that we know if @@ -5213,8 +5238,8 @@ err: */ create_info->table->s->table_creation_was_logged= 1; } - if (write_bin_log(thd, result ? FALSE : TRUE, thd->query(), - thd->query_length(), is_trans)) + if (unlikely(write_bin_log(thd, result ? FALSE : TRUE, thd->query(), + thd->query_length(), is_trans))) result= 1; } DBUG_RETURN(result); @@ -5439,9 +5464,9 @@ mysql_rename_table(handlerton *base, const LEX_CSTRING *old_db, error= my_errno; (void) file->ha_create_partitioning_metadata(to, from, CHF_RENAME_FLAG); } - else if (!file || !(error=file->ha_rename_table(from_base, to_base))) + else if (!file || likely(!(error=file->ha_rename_table(from_base, to_base)))) { - if (!(flags & NO_FRM_RENAME) && rename_file_ext(from,to,reg_ext)) + if (!(flags & NO_FRM_RENAME) && unlikely(rename_file_ext(from,to,reg_ext))) { error=my_errno; if (file) @@ -5454,10 +5479,14 @@ mysql_rename_table(handlerton *base, const LEX_CSTRING *old_db, } } delete file; - if (error == HA_ERR_WRONG_COMMAND) - my_error(ER_NOT_SUPPORTED_YET, MYF(0), "ALTER TABLE"); - else if (error) - my_error(ER_ERROR_ON_RENAME, MYF(0), from, to, error); + + if (unlikely(error)) + { + if (error == HA_ERR_WRONG_COMMAND) + my_error(ER_NOT_SUPPORTED_YET, MYF(0), "ALTER TABLE"); + else + my_error(ER_ERROR_ON_RENAME, MYF(0), from, to, error); + } else if (!(flags & FN_IS_TMP)) mysql_audit_rename_table(thd, old_db, old_name, new_db, new_name); @@ -5502,7 +5531,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST *pos_in_locked_tables= 0; Alter_info local_alter_info; Alter_table_ctx local_alter_ctx; // Not used - bool res= TRUE; + int res= 1; bool is_trans= FALSE; bool do_logging= FALSE; uint not_used; @@ -5625,7 +5654,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, This should always work as we have a meta lock on the table. */ thd->locked_tables_list.add_back_last_deleted_lock(pos_in_locked_tables); - if (thd->locked_tables_list.reopen_tables(thd)) + if (thd->locked_tables_list.reopen_tables(thd, false)) { thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0); res= 1; // We got an error @@ -5807,12 +5836,15 @@ err: */ log_drop_table(thd, &table->db, &table->table_name, create_info->tmp_table()); } - else if (write_bin_log(thd, res ? FALSE : TRUE, thd->query(), - thd->query_length(), is_trans)) + else if (res != 2) // Table was not dropped + { + if (write_bin_log(thd, res ? FALSE : TRUE, thd->query(), + thd->query_length(), is_trans)) res= 1; + } } - DBUG_RETURN(res); + DBUG_RETURN(res != 0); } @@ -5859,7 +5891,7 @@ int mysql_discard_or_import_tablespace(THD *thd, THD_STAGE_INFO(thd, stage_end); - if (error) + if (unlikely(error)) goto err; /* @@ -5870,15 +5902,15 @@ int mysql_discard_or_import_tablespace(THD *thd, /* The ALTER TABLE is always in its own transaction */ error= trans_commit_stmt(thd); - if (trans_commit_implicit(thd)) + if (unlikely(trans_commit_implicit(thd))) error=1; - if (!error) + if (likely(!error)) error= write_bin_log(thd, FALSE, thd->query(), thd->query_length()); err: thd->tablespace_op=FALSE; - if (error == 0) + if (likely(error == 0)) { my_ok(thd); DBUG_RETURN(0); @@ -6007,7 +6039,7 @@ drop_create_field: break; } } - if (*f_ptr == NULL) + if (unlikely(*f_ptr == NULL)) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_BAD_FIELD_ERROR, @@ -6043,7 +6075,7 @@ drop_create_field: acol->name, (*f_ptr)->field_name.str) == 0) break; } - if (*f_ptr == NULL) + if (unlikely(*f_ptr == NULL)) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_BAD_FIELD_ERROR, @@ -6063,10 +6095,28 @@ drop_create_field: List_iterator drop_it(alter_info->drop_list); Alter_drop *drop; bool remove_drop; + ulonglong left_flags= 0; while ((drop= drop_it++)) { + ulonglong cur_flag= 0; + switch (drop->type) { + case Alter_drop::COLUMN: + cur_flag= ALTER_PARSER_DROP_COLUMN; + break; + case Alter_drop::FOREIGN_KEY: + cur_flag= ALTER_DROP_FOREIGN_KEY; + break; + case Alter_drop::KEY: + cur_flag= ALTER_DROP_INDEX; + break; + default: + break; + } if (!drop->drop_if_exists) + { + left_flags|= cur_flag; continue; + } remove_drop= TRUE; if (drop->type == Alter_drop::COLUMN) { @@ -6158,12 +6208,15 @@ drop_create_field: ER_THD(thd, ER_CANT_DROP_FIELD_OR_KEY), drop->type_name(), drop->name); drop_it.remove(); - if (alter_info->drop_list.is_empty()) - alter_info->flags&= ~(ALTER_PARSER_DROP_COLUMN | - ALTER_DROP_INDEX | - ALTER_DROP_FOREIGN_KEY); } + else + left_flags|= cur_flag; } + /* Reset state to what's left in drop list */ + alter_info->flags&= ~(ALTER_PARSER_DROP_COLUMN | + ALTER_DROP_INDEX | + ALTER_DROP_FOREIGN_KEY); + alter_info->flags|= left_flags; } /* ALTER TABLE ADD KEY IF NOT EXISTS */ @@ -6277,8 +6330,9 @@ remove_key: } } } - + #ifdef WITH_PARTITION_STORAGE_ENGINE + DBUG_ASSERT(thd->work_part_info == 0); partition_info *tab_part_info= table->part_info; thd->work_part_info= thd->lex->part_info; if (tab_part_info) @@ -7216,18 +7270,20 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled, error= table->file->ha_disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); } - if (error == HA_ERR_WRONG_COMMAND) + if (unlikely(error)) { - THD *thd= table->in_use; - push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, - ER_ILLEGAL_HA, ER_THD(thd, ER_ILLEGAL_HA), - table->file->table_type(), - table->s->db.str, table->s->table_name.str); - error= 0; + if (error == HA_ERR_WRONG_COMMAND) + { + THD *thd= table->in_use; + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_ILLEGAL_HA, ER_THD(thd, ER_ILLEGAL_HA), + table->file->table_type(), + table->s->db.str, table->s->table_name.str); + error= 0; + } + else + table->file->print_error(error, MYF(0)); } - else if (error) - table->file->print_error(error, MYF(0)); - DBUG_RETURN(error); } @@ -7365,8 +7421,11 @@ static bool mysql_inplace_alter_table(THD *thd, exclusive lock is required for duration of the whole statement. */ if (inplace_supported == HA_ALTER_INPLACE_EXCLUSIVE_LOCK || - ((inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE || - inplace_supported == HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE) && + ((inplace_supported == HA_ALTER_INPLACE_COPY_NO_LOCK || + inplace_supported == HA_ALTER_INPLACE_COPY_LOCK || + inplace_supported == HA_ALTER_INPLACE_NOCOPY_NO_LOCK || + inplace_supported == HA_ALTER_INPLACE_NOCOPY_LOCK || + inplace_supported == HA_ALTER_INPLACE_INSTANT) && (thd->locked_tables_mode == LTM_LOCK_TABLES || thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES)) || alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE) @@ -7390,8 +7449,11 @@ static bool mysql_inplace_alter_table(THD *thd, */ reopen_tables= true; } - else if (inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE || - inplace_supported == HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE) + else if (inplace_supported == HA_ALTER_INPLACE_COPY_LOCK || + inplace_supported == HA_ALTER_INPLACE_COPY_NO_LOCK || + inplace_supported == HA_ALTER_INPLACE_NOCOPY_LOCK || + inplace_supported == HA_ALTER_INPLACE_NOCOPY_NO_LOCK || + inplace_supported == HA_ALTER_INPLACE_INSTANT) { /* Storage engine has requested exclusive lock only for prepare phase @@ -7436,7 +7498,9 @@ static bool mysql_inplace_alter_table(THD *thd, DBUG_ASSERT(0); // fall through case HA_ALTER_INPLACE_NO_LOCK: - case HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE: + case HA_ALTER_INPLACE_INSTANT: + case HA_ALTER_INPLACE_COPY_NO_LOCK: + case HA_ALTER_INPLACE_NOCOPY_NO_LOCK: switch (alter_info->requested_lock) { case Alter_info::ALTER_TABLE_LOCK_DEFAULT: case Alter_info::ALTER_TABLE_LOCK_NONE: @@ -7448,8 +7512,9 @@ static bool mysql_inplace_alter_table(THD *thd, } break; case HA_ALTER_INPLACE_EXCLUSIVE_LOCK: - case HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE: case HA_ALTER_INPLACE_SHARED_LOCK: + case HA_ALTER_INPLACE_COPY_LOCK: + case HA_ALTER_INPLACE_NOCOPY_LOCK: break; } @@ -7464,19 +7529,23 @@ static bool mysql_inplace_alter_table(THD *thd, necessary only for prepare phase (unless we are not under LOCK TABLES) and user has not explicitly requested exclusive lock. */ - if ((inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE || - inplace_supported == HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE) && + if ((inplace_supported == HA_ALTER_INPLACE_COPY_NO_LOCK || + inplace_supported == HA_ALTER_INPLACE_COPY_LOCK || + inplace_supported == HA_ALTER_INPLACE_NOCOPY_LOCK || + inplace_supported == HA_ALTER_INPLACE_NOCOPY_NO_LOCK) && !(thd->locked_tables_mode == LTM_LOCK_TABLES || thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES) && (alter_info->requested_lock != Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE)) { /* If storage engine or user requested shared lock downgrade to SNW. */ - if (inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE || + if (inplace_supported == HA_ALTER_INPLACE_COPY_LOCK || + inplace_supported == HA_ALTER_INPLACE_NOCOPY_LOCK || alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_SHARED) table->mdl_ticket->downgrade_lock(MDL_SHARED_NO_WRITE); else { - DBUG_ASSERT(inplace_supported == HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE); + DBUG_ASSERT(inplace_supported == HA_ALTER_INPLACE_COPY_NO_LOCK || + inplace_supported == HA_ALTER_INPLACE_NOCOPY_NO_LOCK); table->mdl_ticket->downgrade_lock(MDL_SHARED_UPGRADABLE); } } @@ -7633,7 +7702,7 @@ static bool mysql_inplace_alter_table(THD *thd, HA_EXTRA_PREPARE_FOR_RENAME : HA_EXTRA_NOT_USED, NULL); - if (thd->locked_tables_list.reopen_tables(thd)) + if (thd->locked_tables_list.reopen_tables(thd, false)) thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0); /* QQ; do something about metadata locks ? */ } @@ -7888,6 +7957,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, if (field->default_value) field->default_value->expr->walk(&Item::rename_fields_processor, 1, &column_rename_param); + table->m_needs_reopen= 1; // because new column name is on thd->mem_root } /* Check if field is changed */ @@ -8007,7 +8077,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, break; } - if (find && !find->field) + if (likely(find && !find->field)) find_it.remove(); else { @@ -8078,7 +8148,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, &find->field_name)) break; } - if (!find) + if (unlikely(!find)) { my_error(ER_BAD_FIELD_ERROR, MYF(0), def->after.str, table->s->table_name.str); @@ -8112,13 +8182,13 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, alter_it.remove(); } } - if (alter_info->alter_list.elements) + if (unlikely(alter_info->alter_list.elements)) { my_error(ER_BAD_FIELD_ERROR, MYF(0), alter_info->alter_list.head()->name, table->s->table_name.str); goto err; } - if (!new_create_list.elements) + if (unlikely(!new_create_list.elements)) { my_message(ER_CANT_REMOVE_ALL_FIELDS, ER_THD(thd, ER_CANT_REMOVE_ALL_FIELDS), @@ -8594,7 +8664,7 @@ static bool fk_prepare_copy_alter_table(THD *thd, TABLE *table, table->file->get_parent_foreign_key_list(thd, &fk_parent_key_list); /* OOM when building list. */ - if (thd->is_error()) + if (unlikely(thd->is_error())) DBUG_RETURN(true); /* @@ -8689,7 +8759,7 @@ static bool fk_prepare_copy_alter_table(THD *thd, TABLE *table, table->file->get_foreign_key_list(thd, &fk_child_key_list); /* OOM when building list. */ - if (thd->is_error()) + if (unlikely(thd->is_error())) DBUG_RETURN(true); /* @@ -8783,7 +8853,7 @@ simple_tmp_rename_or_index_change(THD *thd, TABLE_LIST *table_list, keys_onoff); } - if (!error && alter_ctx->is_table_renamed()) + if (likely(!error) && alter_ctx->is_table_renamed()) { THD_STAGE_INFO(thd, stage_rename); @@ -8796,20 +8866,17 @@ simple_tmp_rename_or_index_change(THD *thd, TABLE_LIST *table_list, &alter_ctx->new_alias); } - if (!error) + if (likely(!error)) { - int res= 0; /* We do not replicate alter table statement on temporary tables under ROW-based replication. */ if (!thd->is_current_stmt_binlog_format_row()) { - res= write_bin_log(thd, true, thd->query(), thd->query_length()); + error= write_bin_log(thd, true, thd->query(), thd->query_length()) != 0; } - if (res != 0) - error= true; - else + if (likely(!error)) my_ok(thd); } @@ -8858,7 +8925,7 @@ simple_rename_or_index_change(THD *thd, TABLE_LIST *table_list, keys_onoff); } - if (!error && alter_ctx->is_table_renamed()) + if (likely(!error) && alter_ctx->is_table_renamed()) { THD_STAGE_INFO(thd, stage_rename); handlerton *old_db_type= table->s->db_type(); @@ -8898,11 +8965,11 @@ simple_rename_or_index_change(THD *thd, TABLE_LIST *table_list, } } - if (!error) + if (likely(!error)) { error= write_bin_log(thd, TRUE, thd->query(), thd->query_length()); - if (!error) + if (likely(!error)) my_ok(thd); } table_list->table= NULL; // For query cache @@ -8963,7 +9030,8 @@ simple_rename_or_index_change(THD *thd, TABLE_LIST *table_list, based on information about the table changes from fill_alter_inplace_info(). */ -bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, const LEX_CSTRING *new_name, +bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, + const LEX_CSTRING *new_name, HA_CREATE_INFO *create_info, TABLE_LIST *table_list, Alter_info *alter_info, @@ -8971,6 +9039,10 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, const LEX_CSTRING *n { DBUG_ENTER("mysql_alter_table"); +#ifdef WITH_PARTITION_STORAGE_ENGINE + thd->work_part_info= 0; // Used by partitioning +#endif + /* Check if we attempt to alter mysql.slow_log or mysql.general_log table and return an error if @@ -9065,7 +9137,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, const LEX_CSTRING *n };); #endif // WITH_WSREP - if (error) + if (unlikely(error)) DBUG_RETURN(true); table->use_all_columns(); @@ -9386,7 +9458,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, const LEX_CSTRING *n supports auto-partitioning as such engines can do some changes using in-place API. */ - if ((thd->variables.old_alter_table && + if ((thd->variables.alter_algorithm == Alter_info::ALTER_TABLE_ALGORITHM_COPY && alter_info->requested_algorithm != Alter_info::ALTER_TABLE_ALGORITHM_INPLACE) || is_inplace_alter_impossible(table, create_info, alter_info) @@ -9511,7 +9583,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, const LEX_CSTRING *n &key_info, &key_count, &frm); reenable_binlog(thd); thd->abort_on_warning= false; - if (error) + if (unlikely(error)) { my_free(const_cast(frm.str)); DBUG_RETURN(true); @@ -9594,74 +9666,32 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, const LEX_CSTRING *n goto err_new_table_cleanup; thd->count_cuted_fields= CHECK_FIELD_IGNORE; + if (alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_NONE) + ha_alter_info.online= true; // Ask storage engine whether to use copy or in-place enum_alter_inplace_result inplace_supported= table->file->check_if_supported_inplace_alter(altered_table, &ha_alter_info); - switch (inplace_supported) { - case HA_ALTER_INPLACE_EXCLUSIVE_LOCK: - // If SHARED lock and no particular algorithm was requested, use COPY. - if (alter_info->requested_lock == - Alter_info::ALTER_TABLE_LOCK_SHARED && - alter_info->requested_algorithm == - Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT) - { - use_inplace= false; - } - // Otherwise, if weaker lock was requested, report errror. - else if (alter_info->requested_lock == - Alter_info::ALTER_TABLE_LOCK_NONE || - alter_info->requested_lock == - Alter_info::ALTER_TABLE_LOCK_SHARED) - { - ha_alter_info.report_unsupported_error("LOCK=NONE/SHARED", - "LOCK=EXCLUSIVE"); - thd->drop_temporary_table(altered_table, NULL, false); - goto err_new_table_cleanup; - } - break; - case HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE: - case HA_ALTER_INPLACE_SHARED_LOCK: - // If weaker lock was requested, report errror. - if (alter_info->requested_lock == - Alter_info::ALTER_TABLE_LOCK_NONE) - { - ha_alter_info.report_unsupported_error("LOCK=NONE", "LOCK=SHARED"); - thd->drop_temporary_table(altered_table, NULL, false); - goto err_new_table_cleanup; - } - break; - case HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE: - case HA_ALTER_INPLACE_NO_LOCK: - break; - case HA_ALTER_INPLACE_NOT_SUPPORTED: - // If INPLACE was requested, report error. - if (alter_info->requested_algorithm == - Alter_info::ALTER_TABLE_ALGORITHM_INPLACE) - { - ha_alter_info.report_unsupported_error("ALGORITHM=INPLACE", - "ALGORITHM=COPY"); - thd->drop_temporary_table(altered_table, NULL, false); - goto err_new_table_cleanup; - } - // COPY with LOCK=NONE is not supported, no point in trying. - if (alter_info->requested_lock == - Alter_info::ALTER_TABLE_LOCK_NONE) - { - ha_alter_info.report_unsupported_error("LOCK=NONE", "LOCK=SHARED"); - thd->drop_temporary_table(altered_table, NULL, false); - goto err_new_table_cleanup; - } - // Otherwise use COPY - use_inplace= false; - break; - case HA_ALTER_ERROR: - default: + if (alter_info->supports_algorithm(thd, inplace_supported, &ha_alter_info) || + alter_info->supports_lock(thd, inplace_supported, &ha_alter_info)) + { thd->drop_temporary_table(altered_table, NULL, false); goto err_new_table_cleanup; } + // If SHARED lock and no particular algorithm was requested, use COPY. + if (inplace_supported == HA_ALTER_INPLACE_EXCLUSIVE_LOCK && + alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_SHARED && + alter_info->requested_algorithm == + Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT && + thd->variables.alter_algorithm == + Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT) + use_inplace= false; + + if (inplace_supported == HA_ALTER_INPLACE_NOT_SUPPORTED) + use_inplace= false; + if (use_inplace) { table->s->frm_image= &frm; @@ -9967,7 +9997,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, const LEX_CSTRING *n end_inplace: - if (thd->locked_tables_list.reopen_tables(thd)) + if (thd->locked_tables_list.reopen_tables(thd, false)) goto err_with_mdl_after_alter; THD_STAGE_INFO(thd, stage_end); @@ -10018,8 +10048,8 @@ err_new_table_cleanup: the table to be altered isn't empty. Report error here. */ - if (alter_ctx.error_if_not_empty && - thd->get_stmt_da()->current_row_for_warning()) + if (unlikely(alter_ctx.error_if_not_empty && + thd->get_stmt_da()->current_row_for_warning())) { const char *f_val= 0; enum enum_mysql_timestamp_type t_type= MYSQL_TIMESTAMP_DATE; @@ -10083,9 +10113,7 @@ bool mysql_trans_prepare_alter_copy_data(THD *thd) This needs to be done before external_lock. */ - if (ha_enable_transaction(thd, FALSE)) - DBUG_RETURN(TRUE); - DBUG_RETURN(FALSE); + DBUG_RETURN(ha_enable_transaction(thd, FALSE) != 0); } @@ -10138,6 +10166,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, List fields; List all_fields; bool auto_increment_field_copied= 0; + bool cleanup_done= 0; bool init_read_record_done= 0; sql_mode_t save_sql_mode= thd->variables.sql_mode; ulonglong prev_insert_id, time_to_report_progress; @@ -10153,18 +10182,31 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, /* Two or 3 stages; Sorting, copying data and update indexes */ thd_progress_init(thd, 2 + MY_TEST(order)); - if (mysql_trans_prepare_alter_copy_data(thd)) + if (!(copy= new (thd->mem_root) Copy_field[to->s->fields])) DBUG_RETURN(-1); - if (!(copy= new (thd->mem_root) Copy_field[to->s->fields])) - DBUG_RETURN(-1); /* purecov: inspected */ + if (mysql_trans_prepare_alter_copy_data(thd)) + { + delete [] copy; + DBUG_RETURN(-1); + } /* We need external lock before we can disable/enable keys */ if (to->file->ha_external_lock(thd, F_WRLCK)) + { + /* Undo call to mysql_trans_prepare_alter_copy_data() */ + ha_enable_transaction(thd, TRUE); + delete [] copy; DBUG_RETURN(-1); + } alter_table_manage_keys(to, from->file->indexes_are_disabled(), keys_onoff); + /* Set read map for all fields in from table */ + from->default_column_bitmaps(); + bitmap_set_all(from->read_set); + from->file->column_bitmaps_signal(); + /* We can abort alter table for any table type */ thd->abort_on_warning= !ignore && thd->is_strict_mode(); @@ -10172,7 +10214,6 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, to->file->extra(HA_EXTRA_PREPARE_FOR_ALTER_TABLE); to->file->ha_start_bulk_insert(from->file->stats.records, ignore ? 0 : HA_CREATE_UNIQUE_INDEX_BY_SORT); - List_iterator it(create); Create_field *def; copy_end=copy; @@ -10285,15 +10326,15 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, if (!ignore) /* for now, InnoDB needs the undo log for ALTER IGNORE */ to->file->extra(HA_EXTRA_BEGIN_ALTER_COPY); - while (!(error= info.read_record())) + while (likely(!(error= info.read_record()))) { - if (thd->killed) + if (unlikely(thd->killed)) { thd->send_kill_message(); error= 1; break; } - if (++thd->progress.counter >= time_to_report_progress) + if (unlikely(++thd->progress.counter >= time_to_report_progress)) { time_to_report_progress+= MY_HOW_OFTEN_TO_WRITE/10; thd_progress_report(thd, thd->progress.counter, @@ -10301,7 +10342,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, } /* Return error if source table isn't empty. */ - if (alter_ctx->error_if_not_empty) + if (unlikely(alter_ctx->error_if_not_empty)) { error= 1; break; @@ -10343,7 +10384,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, /* This will set thd->is_error() if fatal failure */ if (to->verify_constraints(ignore) == VIEW_CHECK_SKIP) continue; - if (thd->is_error()) + if (unlikely(thd->is_error())) { error= 1; break; @@ -10353,7 +10394,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, error= to->file->ha_write_row(to->record[0]); to->auto_increment_field_not_null= FALSE; - if (error) + if (unlikely(error)) { if (to->file->is_fatal_error(error, HA_CHECK_DUP)) { @@ -10365,7 +10406,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, else { /* Duplicate key error. */ - if (alter_ctx->fk_error_if_delete_row) + if (unlikely(alter_ctx->fk_error_if_delete_row)) { /* We are trying to omit a row from the table which serves as parent @@ -10421,7 +10462,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, /* We are going to drop the temporary table */ to->file->extra(HA_EXTRA_PREPARE_FOR_DROP); } - if (to->file->ha_end_bulk_insert() && error <= 0) + if (unlikely(to->file->ha_end_bulk_insert()) && error <= 0) { /* Give error, if not already given */ if (!thd->is_error()) @@ -10430,9 +10471,11 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, } if (!ignore) to->file->extra(HA_EXTRA_END_ALTER_COPY); + + cleanup_done= 1; to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); - if (mysql_trans_commit_alter_copy_data(thd)) + if (unlikely(mysql_trans_commit_alter_copy_data(thd))) error= 1; err: @@ -10447,6 +10490,15 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, *copied= found_count; *deleted=delete_count; to->file->ha_release_auto_increment(); + + if (!cleanup_done) + { + /* This happens if we get an error during initialzation of data */ + DBUG_ASSERT(error); + to->file->ha_end_bulk_insert(); + ha_enable_transaction(thd, TRUE); + } + if (to->file->ha_external_lock(thd,F_UNLCK)) error=1; if (error < 0 && !from->s->tmp_table && @@ -10623,8 +10675,6 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, int error= t->file->ha_rnd_next(t->record[0]); if (unlikely(error)) { - if (error == HA_ERR_RECORD_DELETED) - continue; break; } if (t->s->null_bytes) diff --git a/sql/sql_tablespace.cc b/sql/sql_tablespace.cc index 93a3007d1ea..d97d50912e6 100644 --- a/sql/sql_tablespace.cc +++ b/sql/sql_tablespace.cc @@ -46,21 +46,16 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info) if (hton->alter_tablespace) { - if ((error= hton->alter_tablespace(hton, thd, ts_info))) + if (unlikely((error= hton->alter_tablespace(hton, thd, ts_info)))) { if (error == 1) - { DBUG_RETURN(1); - } if (error == HA_ADMIN_NOT_IMPLEMENTED) - { my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), ""); - } else - { my_error(error, MYF(0)); - } + DBUG_RETURN(error); } } diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 4c1e2a51fbf..6961f821a0a 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -1,5 +1,6 @@ /* Copyright (c) 2004, 2012, Oracle and/or its affiliates. + Copyright (c) 2010, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -430,7 +431,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) /* We don't allow creating triggers on tables in the 'mysql' schema */ - if (create && !my_strcasecmp(system_charset_info, "mysql", tables->db.str)) + if (create && lex_string_eq(&tables->db, STRING_WITH_LEN("mysql"))) { my_error(ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA, MYF(0)); DBUG_RETURN(TRUE); @@ -588,7 +589,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) Ignore the return value for now. It's better to keep master/slave in consistent state. */ - if (thd->locked_tables_list.reopen_tables(thd)) + if (thd->locked_tables_list.reopen_tables(thd, false)) thd->clear_error(); /* @@ -623,6 +624,7 @@ end: #endif /* WITH_WSREP */ } + /** Build stmt_query to write it in the bin-log, the statement to write in the trigger file and the trigger definer. @@ -1190,6 +1192,12 @@ Table_triggers_list::~Table_triggers_list() } } } + + /* Free blobs used in insert */ + if (record0_field) + for (Field **fld_ptr= record0_field; *fld_ptr; fld_ptr++) + (*fld_ptr)->free(); + if (record1_field) for (Field **fld_ptr= record1_field; *fld_ptr; fld_ptr++) delete *fld_ptr; @@ -1240,6 +1248,7 @@ bool Table_triggers_list::prepare_record_accessors(TABLE *table) return 1; f->flags= (*fld)->flags; + f->invisible= (*fld)->invisible; f->null_ptr= null_ptr; f->null_bit= null_bit; if (null_bit == 128) @@ -1368,12 +1377,12 @@ bool Table_triggers_list::check_n_load(THD *thd, const LEX_CSTRING *db, List_iterator_fast it_connection_cl_name(trigger_list->connection_cl_names); List_iterator_fast it_db_cl_name(trigger_list->db_cl_names); List_iterator_fast it_create_times(trigger_list->create_times); - LEX *old_lex= thd->lex, *old_stmt_lex= thd->stmt_lex; + LEX *old_lex= thd->lex; LEX lex; sp_rcontext *save_spcont= thd->spcont; sql_mode_t save_sql_mode= thd->variables.sql_mode; - thd->lex= thd->stmt_lex= &lex; + thd->lex= &lex; save_db= thd->db; thd->reset_db(db); @@ -1475,7 +1484,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const LEX_CSTRING *db, &lex.trg_chistics.anchor_trigger_name, trigger); - if (parse_error) + if (unlikely(parse_error)) { LEX_CSTRING *name; @@ -1489,10 +1498,10 @@ bool Table_triggers_list::check_n_load(THD *thd, const LEX_CSTRING *db, DBUG_ASSERT(lex.sphead == 0); lex_end(&lex); - if ((name= error_handler.get_trigger_name())) + if (likely((name= error_handler.get_trigger_name()))) { - if (!(make_lex_string(&trigger->name, name->str, - name->length, &table->mem_root))) + if (unlikely(!(make_lex_string(&trigger->name, name->str, + name->length, &table->mem_root)))) goto err_with_lex_cleanup; } trigger->definer= ((!trg_definer || !trg_definer->length) ? @@ -1590,7 +1599,6 @@ bool Table_triggers_list::check_n_load(THD *thd, const LEX_CSTRING *db, } thd->reset_db(&save_db); thd->lex= old_lex; - thd->stmt_lex= old_stmt_lex; thd->spcont= save_spcont; thd->variables.sql_mode= save_sql_mode; @@ -1604,7 +1612,6 @@ bool Table_triggers_list::check_n_load(THD *thd, const LEX_CSTRING *db, err_with_lex_cleanup: lex_end(&lex); thd->lex= old_lex; - thd->stmt_lex= old_stmt_lex; thd->spcont= save_spcont; thd->variables.sql_mode= save_sql_mode; thd->reset_db(&save_db); @@ -1613,7 +1620,7 @@ err_with_lex_cleanup: } error: - if (!thd->is_error()) + if (unlikely(!thd->is_error())) { /* We don't care about this error message much because .TRG files will @@ -1889,7 +1896,7 @@ change_table_name_in_triggers(THD *thd, thd->variables.sql_mode= save_sql_mode; - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) return TRUE; /* OOM */ if (save_trigger_file(thd, new_db_name, new_table_name)) @@ -2101,9 +2108,9 @@ bool Table_triggers_list::change_table_name(THD *thd, const LEX_CSTRING *db, goto end; } } - if (table.triggers->change_table_name_in_triggers(thd, db, new_db, - old_alias, - new_table)) + if (unlikely(table.triggers->change_table_name_in_triggers(thd, db, new_db, + old_alias, + new_table))) { result= 1; goto end; @@ -2247,7 +2254,7 @@ add_tables_and_routines_for_triggers(THD *thd, { sp_head *trigger= triggers->body; - if (!triggers->body) // Parse error + if (unlikely(!triggers->body)) // Parse error continue; MDL_key key(MDL_key::TRIGGER, trigger->m_db.str, trigger->m_name.str); diff --git a/sql/sql_truncate.cc b/sql/sql_truncate.cc index 2ddb4bc042c..cf91d6d2189 100644 --- a/sql/sql_truncate.cc +++ b/sql/sql_truncate.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2010, 2015, Oracle and/or its affiliates. - Copyright (c) 2013, 2015, MariaDB + Copyright (c) 2012, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -139,7 +139,7 @@ fk_truncate_illegal_if_parent(THD *thd, TABLE *table) table->file->get_parent_foreign_key_list(thd, &fk_list); /* Out of memory when building list. */ - if (thd->is_error()) + if (unlikely(thd->is_error())) return TRUE; it.init(fk_list); @@ -240,7 +240,7 @@ Sql_cmd_truncate_table::handler_truncate(THD *thd, TABLE_LIST *table_ref, DBUG_RETURN(TRUNCATE_FAILED_SKIP_BINLOG); error= table_ref->table->file->ha_truncate(); - if (error) + if (unlikely(error)) { table_ref->table->file->print_error(error, MYF(0)); /* @@ -302,7 +302,7 @@ bool Sql_cmd_truncate_table::lock_table(THD *thd, TABLE_LIST *table_ref, table_ref->table_name.str, FALSE))) DBUG_RETURN(TRUE); - *hton_can_recreate= ha_check_storage_engine_flag(table->s->db_type(), + *hton_can_recreate= ha_check_storage_engine_flag(table->file->ht, HTON_CAN_RECREATE); table_ref->mdl_request.ticket= table->mdl_ticket; } @@ -427,8 +427,11 @@ bool Sql_cmd_truncate_table::truncate_table(THD *thd, TABLE_LIST *table_ref) */ error= dd_recreate_table(thd, table_ref->db.str, table_ref->table_name.str); - if (thd->locked_tables_mode && thd->locked_tables_list.reopen_tables(thd)) - thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0); + if (thd->locked_tables_mode && thd->locked_tables_list.reopen_tables(thd, false)) + { + thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0); + error=1; + } /* No need to binlog a failed truncate-by-recreate. */ binlog_stmt= !error; @@ -447,7 +450,7 @@ bool Sql_cmd_truncate_table::truncate_table(THD *thd, TABLE_LIST *table_ref) query must be written to the binary log. The only exception is a unimplemented truncate method. */ - if (error == TRUNCATE_OK || error == TRUNCATE_FAILED_BUT_BINLOG) + if (unlikely(error == TRUNCATE_OK || error == TRUNCATE_FAILED_BUT_BINLOG)) binlog_stmt= true; else binlog_stmt= false; @@ -500,4 +503,3 @@ bool Sql_cmd_truncate_table::execute(THD *thd) DBUG_RETURN(res); } - diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc index 7004c32e602..188ba8c4629 100644 --- a/sql/sql_tvc.cc +++ b/sql/sql_tvc.cc @@ -221,6 +221,12 @@ bool table_value_constr::prepare(THD *thd, SELECT_LEX *sl, uint cnt= first_elem->elements; Type_holder *holders; + if (cnt == 0) + { + my_error(ER_EMPTY_ROW_IN_TVC, MYF(0)); + DBUG_RETURN(true); + } + if (fix_fields_for_tvc(thd, li)) DBUG_RETURN(true); @@ -249,7 +255,7 @@ bool table_value_constr::prepare(THD *thd, SELECT_LEX *sl, sl->item_list.push_back(new_holder); } - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) DBUG_RETURN(true); // out of memory result= tmp_result; diff --git a/sql/sql_type.cc b/sql/sql_type.cc index 0c9dc57dcd3..07a5fea6551 100644 --- a/sql/sql_type.cc +++ b/sql/sql_type.cc @@ -161,15 +161,6 @@ void Temporal_with_date::make_from_item(THD *thd, Item *item, sql_mode_t flags) } -void Type_std_attributes::set(const Field *field) -{ - decimals= field->decimals(); - unsigned_flag= MY_TEST(field->flags & UNSIGNED_FLAG); - collation.set(field->charset(), field->derivation(), field->repertoire()); - fix_char_length(field->char_length()); -} - - uint Type_std_attributes::count_max_decimals(Item **item, uint nitems) { uint res= 0; @@ -476,6 +467,20 @@ const Name Type_handler_datetime_common::m_name_datetime(STRING_WITH_LEN("datetime")), Type_handler_timestamp_common::m_name_timestamp(STRING_WITH_LEN("timestamp")); + +const Type_limits_int + Type_handler_tiny::m_limits_sint8= Type_limits_sint8(), + Type_handler_tiny::m_limits_uint8= Type_limits_uint8(), + Type_handler_short::m_limits_sint16= Type_limits_sint16(), + Type_handler_short::m_limits_uint16= Type_limits_uint16(), + Type_handler_int24::m_limits_sint24= Type_limits_sint24(), + Type_handler_int24::m_limits_uint24= Type_limits_uint24(), + Type_handler_long::m_limits_sint32= Type_limits_sint32(), + Type_handler_long::m_limits_uint32= Type_limits_uint32(), + Type_handler_longlong::m_limits_sint64= Type_limits_sint64(), + Type_handler_longlong::m_limits_uint64= Type_limits_uint64(); + + /***************************************************************************/ const Type_handler *Type_handler_null::type_handler_for_comparison() const @@ -696,9 +701,7 @@ Type_handler_hybrid_field_type::aggregate_for_comparison(const Type_handler *h) Item_result a= cmp_type(); Item_result b= h->cmp_type(); - if (m_vers_trx_id && (a == STRING_RESULT || b == STRING_RESULT)) - m_type_handler= &type_handler_datetime; - else if (a == STRING_RESULT && b == STRING_RESULT) + if (a == STRING_RESULT && b == STRING_RESULT) m_type_handler= &type_handler_long_blob; else if (a == INT_RESULT && b == INT_RESULT) m_type_handler= &type_handler_longlong; @@ -2506,6 +2509,14 @@ uint32 Type_handler_bit::max_display_length(const Item *item) const return item->max_length; } + +uint32 Type_handler_general_purpose_int::max_display_length(const Item *item) + const +{ + return type_limits_int_by_unsigned_flag(item->unsigned_flag)->char_length(); +} + + /*************************************************************************/ int Type_handler_time_common::Item_save_in_field(Item *item, Field *field, @@ -6285,4 +6296,61 @@ bool Type_handler_geometry:: } #endif +bool Type_handler::Vers_history_point_resolve_unit(THD *thd, + Vers_history_point *point) + const +{ + /* + Disallow using non-relevant data types in history points. + Even expressions with explicit TRANSACTION or TIMESTAMP units. + */ + point->bad_expression_data_type_error(name().ptr()); + return true; +} + + +bool Type_handler_typelib:: + Vers_history_point_resolve_unit(THD *thd, + Vers_history_point *point) const +{ + /* + ENUM/SET have dual type properties (string and numeric). + Require explicit CAST to avoid ambiguity. + */ + point->bad_expression_data_type_error(name().ptr()); + return true; +} + + +bool Type_handler_general_purpose_int:: + Vers_history_point_resolve_unit(THD *thd, + Vers_history_point *point) const +{ + return point->resolve_unit_trx_id(thd); +} + + +bool Type_handler_bit:: + Vers_history_point_resolve_unit(THD *thd, + Vers_history_point *point) const +{ + return point->resolve_unit_trx_id(thd); +} + + +bool Type_handler_temporal_result:: + Vers_history_point_resolve_unit(THD *thd, + Vers_history_point *point) const +{ + return point->resolve_unit_timestamp(thd); +} + + +bool Type_handler_general_purpose_string:: + Vers_history_point_resolve_unit(THD *thd, + Vers_history_point *point) const +{ + return point->resolve_unit_timestamp(thd); +} + /***************************************************************************/ diff --git a/sql/sql_type.h b/sql/sql_type.h index b9d739be8b9..ad554a91024 100644 --- a/sql/sql_type.h +++ b/sql/sql_type.h @@ -73,6 +73,7 @@ class handler; struct Schema_specification_st; struct TABLE; struct SORT_FIELD_ATTR; +class Vers_history_point; /** @@ -618,7 +619,6 @@ public: { *this= other; } - void set(const Field *field); uint32 max_char_length() const { return max_length / collation.collation->mbmaxlen; } void fix_length_and_charset(uint32 max_char_length_arg, CHARSET_INFO *cs) @@ -951,6 +951,74 @@ public: }; +class Information_schema_numeric_attributes +{ + enum enum_attr + { + ATTR_NONE= 0, + ATTR_PRECISION= 1, + ATTR_SCALE= 2, + ATTR_PRECISION_AND_SCALE= (ATTR_PRECISION|ATTR_SCALE) + }; + uint m_precision; + uint m_scale; + enum_attr m_available_attributes; +public: + Information_schema_numeric_attributes() + :m_precision(0), m_scale(0), + m_available_attributes(ATTR_NONE) + { } + Information_schema_numeric_attributes(uint precision) + :m_precision(precision), m_scale(0), + m_available_attributes(ATTR_PRECISION) + { } + Information_schema_numeric_attributes(uint precision, uint scale) + :m_precision(precision), m_scale(scale), + m_available_attributes(ATTR_PRECISION_AND_SCALE) + { } + bool has_precision() const { return m_available_attributes & ATTR_PRECISION; } + bool has_scale() const { return m_available_attributes & ATTR_SCALE; } + uint precision() const + { + DBUG_ASSERT(has_precision()); + return (uint) m_precision; + } + uint scale() const + { + DBUG_ASSERT(has_scale()); + return (uint) m_scale; + } +}; + + +class Information_schema_character_attributes +{ + uint32 m_octet_length; + uint32 m_char_length; + bool m_is_set; +public: + Information_schema_character_attributes() + :m_octet_length(0), m_char_length(0), m_is_set(false) + { } + Information_schema_character_attributes(uint32 octet_length, + uint32 char_length) + :m_octet_length(octet_length), m_char_length(char_length), m_is_set(true) + { } + bool has_octet_length() const { return m_is_set; } + bool has_char_length() const { return m_is_set; } + uint32 octet_length() const + { + DBUG_ASSERT(has_octet_length()); + return m_octet_length; + } + uint char_length() const + { + DBUG_ASSERT(has_char_length()); + return m_char_length; + } +}; + + class Type_handler { protected: @@ -1430,6 +1498,9 @@ public: Item_func_div_fix_length_and_dec(Item_func_div *func) const= 0; virtual bool Item_func_mod_fix_length_and_dec(Item_func_mod *func) const= 0; + + virtual bool + Vers_history_point_resolve_unit(THD *thd, Vers_history_point *point) const; }; @@ -1932,6 +2003,131 @@ public: }; +class Type_limits_int +{ +private: + uint32 m_precision; + uint32 m_char_length; +public: + Type_limits_int(uint32 prec, uint32 nchars) + :m_precision(prec), m_char_length(nchars) + { } + uint32 precision() const { return m_precision; } + uint32 char_length() const { return m_char_length; } +}; + + +/* + UNDIGNED TINYINT: 0..255 digits=3 nchars=3 + SIGNED TINYINT : -128..127 digits=3 nchars=4 +*/ +class Type_limits_uint8: public Type_limits_int +{ +public: + Type_limits_uint8() + :Type_limits_int(MAX_TINYINT_WIDTH, MAX_TINYINT_WIDTH) + { } +}; + + +class Type_limits_sint8: public Type_limits_int +{ +public: + Type_limits_sint8() + :Type_limits_int(MAX_TINYINT_WIDTH, MAX_TINYINT_WIDTH + 1) + { } +}; + + +/* + UNDIGNED SMALLINT: 0..65535 digits=5 nchars=5 + SIGNED SMALLINT: -32768..32767 digits=5 nchars=6 +*/ +class Type_limits_uint16: public Type_limits_int +{ +public: + Type_limits_uint16() + :Type_limits_int(MAX_SMALLINT_WIDTH, MAX_SMALLINT_WIDTH) + { } +}; + + +class Type_limits_sint16: public Type_limits_int +{ +public: + Type_limits_sint16() + :Type_limits_int(MAX_SMALLINT_WIDTH, MAX_SMALLINT_WIDTH + 1) + { } +}; + + +/* + MEDIUMINT UNSIGNED 0 .. 16777215 digits=8 char_length=8 + MEDIUMINT SIGNED: -8388608 .. 8388607 digits=7 char_length=8 +*/ +class Type_limits_uint24: public Type_limits_int +{ +public: + Type_limits_uint24() + :Type_limits_int(MAX_MEDIUMINT_WIDTH, MAX_MEDIUMINT_WIDTH) + { } +}; + + +class Type_limits_sint24: public Type_limits_int +{ +public: + Type_limits_sint24() + :Type_limits_int(MAX_MEDIUMINT_WIDTH - 1, MAX_MEDIUMINT_WIDTH) + { } +}; + + +/* + UNSIGNED INT: 0..4294967295 digits=10 nchars=10 + SIGNED INT: -2147483648..2147483647 digits=10 nchars=11 +*/ +class Type_limits_uint32: public Type_limits_int +{ +public: + Type_limits_uint32() + :Type_limits_int(MAX_INT_WIDTH, MAX_INT_WIDTH) + { } +}; + + + +class Type_limits_sint32: public Type_limits_int +{ +public: + Type_limits_sint32() + :Type_limits_int(MAX_INT_WIDTH, MAX_INT_WIDTH + 1) + { } +}; + + +/* + UNSIGNED BIGINT: 0..18446744073709551615 digits=20 nchars=20 + SIGNED BIGINT: -9223372036854775808..9223372036854775807 digits=19 nchars=20 +*/ +class Type_limits_uint64: public Type_limits_int +{ +public: + Type_limits_uint64(): Type_limits_int(MAX_BIGINT_WIDTH, MAX_BIGINT_WIDTH) + { } +}; + + +class Type_limits_sint64: public Type_limits_int +{ +public: + Type_limits_sint64() + :Type_limits_int(MAX_BIGINT_WIDTH - 1, MAX_BIGINT_WIDTH) + { } +}; + + + class Type_handler_int_result: public Type_handler_numeric { public: @@ -2007,6 +2203,10 @@ class Type_handler_general_purpose_int: public Type_handler_int_result { public: bool type_can_have_auto_increment_attribute() const { return true; } + virtual const Type_limits_int * + type_limits_int_by_unsigned_flag(bool unsigned_flag) const= 0; + uint32 max_display_length(const Item *item) const; + bool Vers_history_point_resolve_unit(THD *thd, Vers_history_point *p) const; }; @@ -2076,6 +2276,7 @@ public: bool Item_func_mul_fix_length_and_dec(Item_func_mul *) const; bool Item_func_div_fix_length_and_dec(Item_func_div *) const; bool Item_func_mod_fix_length_and_dec(Item_func_mod *) const; + bool Vers_history_point_resolve_unit(THD *thd, Vers_history_point *p) const; }; @@ -2200,6 +2401,7 @@ class Type_handler_general_purpose_string: public Type_handler_string_result { public: bool is_general_purpose_string_type() const { return true; } + bool Vers_history_point_resolve_unit(THD *thd, Vers_history_point *p) const; }; @@ -2226,11 +2428,16 @@ public: class Type_handler_tiny: public Type_handler_general_purpose_int { static const Name m_name_tiny; + static const Type_limits_int m_limits_sint8; + static const Type_limits_int m_limits_uint8; public: virtual ~Type_handler_tiny() {} const Name name() const { return m_name_tiny; } enum_field_types field_type() const { return MYSQL_TYPE_TINY; } - uint32 max_display_length(const Item *item) const { return 4; } + const Type_limits_int *type_limits_int_by_unsigned_flag(bool unsigned_fl) const + { + return unsigned_fl ? &m_limits_uint8 : &m_limits_sint8; + } uint32 calc_pack_length(uint32 length) const { return 1; } bool Item_send(Item *item, Protocol *protocol, st_value *buf) const { @@ -2262,6 +2469,8 @@ public: class Type_handler_short: public Type_handler_general_purpose_int { static const Name m_name_short; + static const Type_limits_int m_limits_sint16; + static const Type_limits_int m_limits_uint16; public: virtual ~Type_handler_short() {} const Name name() const { return m_name_short; } @@ -2270,7 +2479,10 @@ public: { return Item_send_short(item, protocol, buf); } - uint32 max_display_length(const Item *item) const { return 6; } + const Type_limits_int *type_limits_int_by_unsigned_flag(bool unsigned_fl) const + { + return unsigned_fl ? &m_limits_uint16 : &m_limits_sint16; + } uint32 calc_pack_length(uint32 length) const { return 2; } Field *make_conversion_table_field(TABLE *TABLE, uint metadata, const Field *target) const; @@ -2298,13 +2510,15 @@ public: class Type_handler_long: public Type_handler_general_purpose_int { static const Name m_name_int; + static const Type_limits_int m_limits_sint32; + static const Type_limits_int m_limits_uint32; public: virtual ~Type_handler_long() {} const Name name() const { return m_name_int; } enum_field_types field_type() const { return MYSQL_TYPE_LONG; } - uint32 max_display_length(const Item *item) const + const Type_limits_int *type_limits_int_by_unsigned_flag(bool unsigned_fl) const { - return MY_INT32_NUM_DECIMAL_DIGITS; + return unsigned_fl ? &m_limits_uint32 : &m_limits_sint32; } uint32 calc_pack_length(uint32 length) const { return 4; } bool Item_send(Item *item, Protocol *protocol, st_value *buf) const @@ -2347,11 +2561,16 @@ public: class Type_handler_longlong: public Type_handler_general_purpose_int { static const Name m_name_longlong; + static const Type_limits_int m_limits_sint64; + static const Type_limits_int m_limits_uint64; public: virtual ~Type_handler_longlong() {} const Name name() const { return m_name_longlong; } enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; } - uint32 max_display_length(const Item *item) const { return 20; } + const Type_limits_int *type_limits_int_by_unsigned_flag(bool unsigned_fl) const + { + return unsigned_fl ? &m_limits_uint64 : &m_limits_sint64; + } uint32 calc_pack_length(uint32 length) const { return 8; } Item *create_typecast_item(THD *thd, Item *item, const Type_cast_attributes &attr) const; @@ -2398,6 +2617,8 @@ public: class Type_handler_int24: public Type_handler_general_purpose_int { static const Name m_name_mediumint; + static const Type_limits_int m_limits_sint24; + static const Type_limits_int m_limits_uint24; public: virtual ~Type_handler_int24() {} const Name name() const { return m_name_mediumint; } @@ -2406,7 +2627,10 @@ public: { return Item_send_long(item, protocol, buf); } - uint32 max_display_length(const Item *item) const { return 8; } + const Type_limits_int *type_limits_int_by_unsigned_flag(bool unsigned_fl) const + { + return unsigned_fl ? &m_limits_uint24 : &m_limits_sint24; + } uint32 calc_pack_length(uint32 length) const { return 3; } Field *make_conversion_table_field(TABLE *, uint metadata, const Field *target) const; @@ -2509,6 +2733,7 @@ public: const Bit_addr &bit, const Column_definition_attributes *attr, uint32 flags) const; + bool Vers_history_point_resolve_unit(THD *thd, Vers_history_point *p) const; }; @@ -3455,6 +3680,7 @@ public: const; void Item_param_set_param_func(Item_param *param, uchar **pos, ulong len) const; + bool Vers_history_point_resolve_unit(THD *thd, Vers_history_point *p) const; }; @@ -3534,16 +3760,15 @@ public: class Type_handler_hybrid_field_type { const Type_handler *m_type_handler; - bool m_vers_trx_id; bool aggregate_for_min_max(const Type_handler *other); public: Type_handler_hybrid_field_type(); Type_handler_hybrid_field_type(const Type_handler *handler) - :m_type_handler(handler), m_vers_trx_id(false) + :m_type_handler(handler) { } Type_handler_hybrid_field_type(const Type_handler_hybrid_field_type *other) - :m_type_handler(other->m_type_handler), m_vers_trx_id(other->m_vers_trx_id) + :m_type_handler(other->m_type_handler) { } void swap(Type_handler_hybrid_field_type &other) { diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index 3eb50d45b42..9a036156de6 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -252,7 +252,7 @@ void udf_init() } } } - if (error > 0) + if (unlikely(error > 0)) sql_print_error("Got unknown error: %d", my_errno); end_read_record(&read_record_info); table->m_needs_reopen= TRUE; // Force close to free memory @@ -453,7 +453,7 @@ static int mysql_drop_function_internal(THD *thd, udf_func *udf, TABLE *table) HA_READ_KEY_EXACT)) { int error; - if ((error= table->file->ha_delete_row(table->record[0]))) + if (unlikely((error= table->file->ha_delete_row(table->record[0])))) table->file->print_error(error, MYF(0)); } DBUG_RETURN(0); @@ -513,7 +513,7 @@ int mysql_create_function(THD *thd,udf_func *udf) { if (thd->lex->create_info.or_replace()) { - if ((error= mysql_drop_function_internal(thd, u_d, table))) + if (unlikely((error= mysql_drop_function_internal(thd, u_d, table)))) goto err; } else if (thd->lex->create_info.if_not_exists()) @@ -569,7 +569,7 @@ int mysql_create_function(THD *thd,udf_func *udf) /* create entry in mysql.func table */ /* Allow creation of functions even if we can't open func table */ - if (!table) + if (unlikely(!table)) goto err; table->use_all_columns(); restore_record(table, s->default_values); // Default values for fields @@ -578,9 +578,9 @@ int mysql_create_function(THD *thd,udf_func *udf) table->field[2]->store(u_d->dl,(uint) strlen(u_d->dl), system_charset_info); if (table->s->fields >= 4) // If not old func format table->field[3]->store((longlong) u_d->type, TRUE); - error = table->file->ha_write_row(table->record[0]); + error= table->file->ha_write_row(table->record[0]); - if (error) + if (unlikely(error)) { my_error(ER_ERROR_ON_WRITE, MYF(0), "mysql.func", error); del_udf(u_d); @@ -591,7 +591,7 @@ done: mysql_rwlock_unlock(&THR_LOCK_udf); /* Binlog the create function. */ - if (write_bin_log(thd, TRUE, thd->query(), thd->query_length())) + if (unlikely(write_bin_log(thd, TRUE, thd->query(), thd->query_length()))) DBUG_RETURN(1); DBUG_RETURN(0); diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 0149c2848c2..a1963c33a42 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -36,7 +36,7 @@ bool mysql_union(THD *thd, LEX *lex, select_result *result, { DBUG_ENTER("mysql_union"); bool res; - if (!(res= unit->prepare(thd, result, SELECT_NO_UNLOCK | + if (!(res= unit->prepare(unit->derived, result, SELECT_NO_UNLOCK | setup_tables_done_option))) res= unit->exec(); res|= unit->cleanup(); @@ -126,7 +126,7 @@ int select_unit::send_data(List &values) } else fill_record(thd, table, table->field, values, TRUE, FALSE); - if (thd->is_error()) + if (unlikely(thd->is_error())) { rc= 1; goto end; @@ -146,7 +146,8 @@ int select_unit::send_data(List &values) { case UNION_TYPE: { - if ((write_err= table->file->ha_write_tmp_row(table->record[0]))) + if (unlikely((write_err= + table->file->ha_write_tmp_row(table->record[0])))) { if (write_err == HA_ERR_FOUND_DUPP_KEY) { @@ -235,7 +236,7 @@ int select_unit::send_data(List &values) rc= 0; end: - if (not_reported_error) + if (unlikely(not_reported_error)) { DBUG_ASSERT(rc); table->file->print_error(not_reported_error, MYF(0)); @@ -267,32 +268,26 @@ bool select_unit::send_eof() handler *file= table->file; int error; - if (file->ha_rnd_init_with_error(1)) + if (unlikely(file->ha_rnd_init_with_error(1))) return 1; do { - error= file->ha_rnd_next(table->record[0]); - if (error) + if (unlikely(error= file->ha_rnd_next(table->record[0]))) { if (error == HA_ERR_END_OF_FILE) { error= 0; break; } - if (unlikely(error == HA_ERR_RECORD_DELETED)) - { - error= 0; - continue; - } break; } if (table->field[0]->val_int() != curr_step) error= file->ha_delete_tmp_row(table->record[0]); - } while (!error); + } while (likely(!error)); file->ha_rnd_end(); - if (error) + if (unlikely(error)) table->file->print_error(error, MYF(0)); return(MY_TEST(error)); @@ -325,7 +320,7 @@ int select_union_recursive::send_data(List &values) bool select_unit::flush() { int error; - if ((error=table->file->extra(HA_EXTRA_NO_CACHE))) + if (unlikely((error=table->file->extra(HA_EXTRA_NO_CACHE)))) { table->file->print_error(error, MYF(0)); return 1; @@ -413,19 +408,13 @@ select_union_recursive::create_result_table(THD *thd_arg, if (! (incr_table= create_tmp_table(thd_arg, &tmp_table_param, *column_types, (ORDER*) 0, false, 1, options, HA_POS_ERROR, &empty_clex_str, - !create_table, keep_row_order))) + true, keep_row_order))) return true; incr_table->keys_in_use_for_query.clear_all(); for (uint i=0; i < table->s->fields; i++) incr_table->field[i]->flags &= ~PART_KEY_FLAG; - if (create_table) - { - incr_table->file->extra(HA_EXTRA_WRITE_CACHE); - incr_table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - } - TABLE *rec_table= 0; if (! (rec_table= create_tmp_table(thd_arg, &tmp_table_param, *column_types, (ORDER*) 0, false, 1, @@ -469,8 +458,11 @@ void select_union_recursive::cleanup() if (incr_table) { - incr_table->file->extra(HA_EXTRA_RESET_STATE); - incr_table->file->ha_delete_all_rows(); + if (incr_table->is_created()) + { + incr_table->file->extra(HA_EXTRA_RESET_STATE); + incr_table->file->ha_delete_all_rows(); + } free_tmp_table(thd, incr_table); } @@ -507,14 +499,14 @@ void select_union_recursive::cleanup() bool select_union_direct::change_result(select_result *new_result) { result= new_result; - return (result->prepare(unit->types, unit) || result->prepare2()); + return (result->prepare(unit->types, unit) || result->prepare2(NULL)); } bool select_union_direct::postponed_prepare(List &types) { if (result != NULL) - return (result->prepare(types, unit) || result->prepare2()); + return (result->prepare(types, unit) || result->prepare2(NULL)); else return false; } @@ -555,7 +547,7 @@ int select_union_direct::send_data(List &items) send_records++; fill_record(thd, table, table->field, items, true, false); - if (thd->is_error()) + if (unlikely(thd->is_error())) return true; /* purecov: inspected */ return result->send_data(unit->item_list); @@ -678,7 +670,7 @@ bool st_select_lex_unit::prepare_join(THD *thd_arg, SELECT_LEX *sl, sl->with_wild= 0; last_procedure= join->procedure; - if (saved_error || (saved_error= thd_arg->is_fatal_error)) + if (unlikely(saved_error || (saved_error= thd_arg->is_fatal_error))) DBUG_RETURN(true); /* Remove all references from the select_lex_units to the subqueries that @@ -796,29 +788,40 @@ bool st_select_lex_unit::join_union_item_types(THD *thd_arg, join_union_type_attributes(thd_arg, holders, count)) DBUG_RETURN(true); + bool is_recursive= with_element && with_element->is_recursive; types.empty(); List_iterator_fast it(first_sl->item_list); Item *item_tmp; for (uint pos= 0; (item_tmp= it++); pos++) { + /* + SQL standard requires forced nullability only for + recursive columns. However type aggregation in our + implementation so far does not differentiate between + recursive and non-recursive columns of a recursive CTE. + TODO: this should be fixed. + */ + bool pos_maybe_null= is_recursive ? true : holders[pos].get_maybe_null(); + /* Error's in 'new' will be detected after loop */ types.push_back(new (thd_arg->mem_root) Item_type_holder(thd_arg, item_tmp, holders[pos].type_handler(), &holders[pos]/*Type_all_attributes*/, - holders[pos].get_maybe_null())); + pos_maybe_null)); } - if (thd_arg->is_fatal_error) + if (unlikely(thd_arg->is_fatal_error)) DBUG_RETURN(true); // out of memory DBUG_RETURN(false); } -bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, +bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg, + select_result *sel_result, ulong additional_options) { - SELECT_LEX *lex_select_save= thd_arg->lex->current_select; + SELECT_LEX *lex_select_save= thd->lex->current_select; SELECT_LEX *sl, *first_sl= first_select(); bool is_recursive= with_element && with_element->is_recursive; bool is_rec_result_table_created= false; @@ -829,9 +832,25 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, bool instantiate_tmp_table= false; bool single_tvc= !first_sl->next_select() && first_sl->tvc; DBUG_ENTER("st_select_lex_unit::prepare"); - DBUG_ASSERT(thd == thd_arg); DBUG_ASSERT(thd == current_thd); + if (is_recursive && (sl= first_sl->next_select())) + { + SELECT_LEX *next_sl; + for ( ; ; sl= next_sl) + { + next_sl= sl->next_select(); + if (!next_sl) + break; + if (next_sl->with_all_modifier != sl->with_all_modifier) + { + my_error(ER_NOT_SUPPORTED_YET, MYF(0), + "mix of ALL and DISTINCT UNION operations in recursive CTE spec"); + DBUG_RETURN(TRUE); + } + } + } + describe= additional_options & SELECT_DESCRIBE; /* @@ -881,7 +900,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, prepared= 1; saved_error= FALSE; - thd_arg->lex->current_select= sl= first_sl; + thd->lex->current_select= sl= first_sl; found_rows_for_union= first_sl->options & OPTION_FOUND_ROWS; is_union_select= is_unit_op() || fake_select_lex || single_tvc; @@ -910,7 +929,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, while (last->next_select()) last= last->next_select(); if (!(tmp_result= union_result= - new (thd_arg->mem_root) select_union_direct(thd_arg, sel_result, + new (thd->mem_root) select_union_direct(thd, sel_result, last))) goto err; /* purecov: inspected */ fake_select_lex= NULL; @@ -919,13 +938,24 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, else { if (!is_recursive) - union_result= new (thd_arg->mem_root) select_unit(thd_arg); + union_result= new (thd->mem_root) select_unit(thd); else { with_element->rec_result= - new (thd_arg->mem_root) select_union_recursive(thd_arg); + new (thd->mem_root) select_union_recursive(thd); union_result= with_element->rec_result; - fake_select_lex= NULL; + if (fake_select_lex) + { + if (fake_select_lex->order_list.first || + fake_select_lex->explicit_limit) + { + my_error(ER_NOT_SUPPORTED_YET, MYF(0), + "global ORDER_BY/LIMIT in recursive CTE spec"); + goto err; + } + fake_select_lex->cleanup(); + fake_select_lex= NULL; + } } if (!(tmp_result= union_result)) goto err; /* purecov: inspected */ @@ -941,10 +971,10 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, { if (sl->tvc) { - if (sl->tvc->prepare(thd_arg, sl, tmp_result, this)) + if (sl->tvc->prepare(thd, sl, tmp_result, this)) goto err; } - else if (prepare_join(thd_arg, first_sl, tmp_result, additional_options, + else if (prepare_join(thd, first_sl, tmp_result, additional_options, is_union_select)) goto err; types= first_sl->item_list; @@ -955,10 +985,10 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, { if (sl->tvc) { - if (sl->tvc->prepare(thd_arg, sl, tmp_result, this)) + if (sl->tvc->prepare(thd, sl, tmp_result, this)) goto err; } - else if (prepare_join(thd_arg, sl, tmp_result, additional_options, + else if (prepare_join(thd, sl, tmp_result, additional_options, is_union_select)) goto err; @@ -978,7 +1008,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, { if (with_element) { - if (derived->with->rename_columns_of_derived_unit(thd, this)) + if (derived_arg->with->rename_columns_of_derived_unit(thd, this)) goto err; if (check_duplicate_names(thd, sl->item_list, 0)) goto err; @@ -989,7 +1019,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, if (first_sl->item_list.elements != sl->item_list.elements) { my_message(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT, - ER_THD(thd_arg, ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT), + ER_THD(thd, ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT), MYF(0)); goto err; } @@ -998,25 +1028,25 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, { if (!with_element->is_anchor(sl)) sl->uncacheable|= UNCACHEABLE_UNITED; - if(!is_rec_result_table_created && - (!sl->next_select() || - sl->next_select() == with_element->first_recursive)) + if (!is_rec_result_table_created && + (!sl->next_select() || + sl->next_select() == with_element->first_recursive)) { ulonglong create_options; - create_options= (first_sl->options | thd_arg->variables.option_bits | + create_options= (first_sl->options | thd->variables.option_bits | TMP_TABLE_ALL_COLUMNS); // Join data types for all non-recursive parts of a recursive UNION if (join_union_item_types(thd, types, union_part_count + 1)) goto err; if (union_result->create_result_table(thd, &types, MY_TEST(union_distinct), - create_options, &derived->alias, - false, + create_options, + &derived_arg->alias, false, instantiate_tmp_table, false, 0)) goto err; - if (!derived->table) - derived->table= derived->derived_result->table= + if (!derived_arg->table) + derived_arg->table= derived_arg->derived_result->table= with_element->rec_result->rec_tables.head(); with_element->mark_as_with_prepared_anchor(); is_rec_result_table_created= true; @@ -1087,7 +1117,7 @@ cont: } - create_options= (first_sl->options | thd_arg->variables.option_bits | + create_options= (first_sl->options | thd->variables.option_bits | TMP_TABLE_ALL_COLUMNS); /* Force the temporary table to be a MyISAM table if we're going to use @@ -1115,7 +1145,7 @@ cont: Query_arena *arena, backup_arena; arena= thd->activate_stmt_arena_if_needed(&backup_arena); - intersect_mark= new (thd_arg->mem_root) Item_int(thd, 0); + intersect_mark= new (thd->mem_root) Item_int(thd, 0); if (arena) thd->restore_active_arena(arena, &backup_arena); @@ -1137,7 +1167,7 @@ cont: hidden); if (intersect_mark) types.pop(); - if (error) + if (unlikely(error)) goto err; } if (fake_select_lex && !fake_select_lex->first_cond_optimization) @@ -1159,7 +1189,7 @@ cont: result_table_list.maybe_null_exec= save_maybe_null; } - thd_arg->lex->current_select= lex_select_save; + thd->lex->current_select= lex_select_save; if (!item_list.elements) { Query_arena *arena, backup_arena; @@ -1174,7 +1204,7 @@ cont: if (arena) thd->restore_active_arena(arena, &backup_arena); - if (saved_error) + if (unlikely(saved_error)) goto err; if (fake_select_lex != NULL && @@ -1199,7 +1229,7 @@ cont: */ fake_select_lex->item_list= item_list; - thd_arg->lex->current_select= fake_select_lex; + thd->lex->current_select= fake_select_lex; /* We need to add up n_sum_items in order to make the correct @@ -1227,12 +1257,12 @@ cont: } } - thd_arg->lex->current_select= lex_select_save; + thd->lex->current_select= lex_select_save; - DBUG_RETURN(saved_error || thd_arg->is_fatal_error); + DBUG_RETURN(saved_error || thd->is_fatal_error); err: - thd_arg->lex->current_select= lex_select_save; + thd->lex->current_select= lex_select_save; (void) cleanup(); DBUG_RETURN(TRUE); } @@ -1323,7 +1353,7 @@ bool st_select_lex_unit::optimize() saved_error= sl->join->optimize(); } - if (saved_error) + if (unlikely(saved_error)) { thd->lex->current_select= lex_select_save; DBUG_RETURN(saved_error); @@ -1361,7 +1391,7 @@ bool st_select_lex_unit::exec() if (!saved_error && !was_executed) save_union_explain(thd->lex->explain); - if (saved_error) + if (unlikely(saved_error)) DBUG_RETURN(saved_error); if (union_result) @@ -1426,7 +1456,7 @@ bool st_select_lex_unit::exec() saved_error= sl->join->optimize(); } } - if (!saved_error) + if (likely(!saved_error)) { records_at_start= table->file->stats.records; if (sl->tvc) @@ -1437,7 +1467,7 @@ bool st_select_lex_unit::exec() { // This is UNION DISTINCT, so there should be a fake_select_lex DBUG_ASSERT(fake_select_lex != NULL); - if (table->file->ha_disable_indexes(HA_KEY_SWITCH_ALL)) + if (unlikely(table->file->ha_disable_indexes(HA_KEY_SWITCH_ALL))) DBUG_RETURN(TRUE); table->no_keyread=1; } @@ -1446,7 +1476,7 @@ bool st_select_lex_unit::exec() offset_limit_cnt= (ha_rows)(sl->offset_limit ? sl->offset_limit->val_uint() : 0); - if (!saved_error) + if (likely(!saved_error)) { examined_rows+= thd->get_examined_row_count(); thd->set_examined_row_count(0); @@ -1457,7 +1487,7 @@ bool st_select_lex_unit::exec() } } } - if (saved_error) + if (unlikely(saved_error)) { thd->lex->current_select= lex_select_save; DBUG_RETURN(saved_error); @@ -1466,7 +1496,7 @@ bool st_select_lex_unit::exec() { /* Needed for the following test and for records_at_start in next loop */ int error= table->file->info(HA_STATUS_VARIABLE); - if(error) + if (unlikely(error)) { table->file->print_error(error, MYF(0)); DBUG_RETURN(1); @@ -1512,7 +1542,8 @@ bool st_select_lex_unit::exec() */ thd->lex->limit_rows_examined_cnt= ULONGLONG_MAX; - if (fake_select_lex != NULL && !thd->is_fatal_error) // Check if EOM + // Check if EOM + if (fake_select_lex != NULL && likely(!thd->is_fatal_error)) { /* Send result to 'result' */ saved_error= true; @@ -1531,8 +1562,9 @@ bool st_select_lex_unit::exec() don't let it allocate the join. Perhaps this is because we need some special parameter values passed to join constructor? */ - if (!(fake_select_lex->join= new JOIN(thd, item_list, - fake_select_lex->options, result))) + if (unlikely(!(fake_select_lex->join= + new JOIN(thd, item_list, fake_select_lex->options, + result)))) { fake_select_lex->table_list.empty(); goto err; @@ -1598,7 +1630,7 @@ bool st_select_lex_unit::exec() } fake_select_lex->table_list.empty(); - if (!saved_error) + if (likely(!saved_error)) { thd->limit_found_rows = (ulonglong)table->file->stats.records + add_rows; thd->inc_examined_row_count(examined_rows); @@ -1659,16 +1691,24 @@ bool st_select_lex_unit::exec_recursive() if (!was_executed) save_union_explain(thd->lex->explain); - if ((saved_error= incr_table->file->ha_delete_all_rows())) - goto err; - if (with_element->level == 0) { + if (!incr_table->is_created() && + instantiate_tmp_table(incr_table, + tmp_table_param->keyinfo, + tmp_table_param->start_recinfo, + &tmp_table_param->recinfo, + 0)) + DBUG_RETURN(1); + incr_table->file->extra(HA_EXTRA_WRITE_CACHE); + incr_table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); start= first_select(); if (with_element->with_anchor) end= with_element->first_recursive; } - + else if (unlikely((saved_error= incr_table->file->ha_delete_all_rows()))) + goto err; + for (st_select_lex *sl= start ; sl != end; sl= sl->next_select()) { if (with_element->level) @@ -1693,17 +1733,17 @@ bool st_select_lex_unit::exec_recursive() sl->join->exec(); saved_error= sl->join->error; } - if (!saved_error) + if (likely(!saved_error)) { examined_rows+= thd->get_examined_row_count(); thd->set_examined_row_count(0); - if (union_result->flush()) + if (unlikely(union_result->flush())) { thd->lex->current_select= lex_select_save; DBUG_RETURN(1); } } - if (saved_error) + if (unlikely(saved_error)) { thd->lex->current_select= lex_select_save; goto err; @@ -1869,7 +1909,7 @@ bool st_select_lex_unit::change_result(select_result_interceptor *new_result, List *st_select_lex_unit::get_column_types(bool for_cursor) { SELECT_LEX *sl= first_select(); - bool is_procedure= MY_TEST(sl->join->procedure); + bool is_procedure= !sl->tvc && sl->join->procedure ; if (is_procedure) { @@ -1904,6 +1944,7 @@ bool st_select_lex::cleanup() cleanup_order(order_list.first); cleanup_order(group_list.first); + cleanup_ftfuncs(this); if (join) { @@ -1919,6 +1960,7 @@ bool st_select_lex::cleanup() } inner_refs_list.empty(); exclude_from_table_unique_test= FALSE; + hidden_bit_fields= 0; DBUG_RETURN(error); } diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 38638d3aa1d..13019cd4359 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -231,7 +231,7 @@ static void prepare_record_for_error_message(int error, TABLE *table) Get the number of the offended index. We will see MAX_KEY if the engine cannot determine the affected index. */ - if ((keynr= table->file->get_dup_key(error)) >= MAX_KEY) + if (unlikely((keynr= table->file->get_dup_key(error)) >= MAX_KEY)) DBUG_VOID_RETURN; /* Create unique_map with all fields used by that index. */ @@ -471,8 +471,8 @@ int mysql_update(THD *thd, set_statistics_for_table(thd, table); select= make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error); - if (error || !limit || thd->is_error() || - (select && select->check_quick(thd, safe_update, limit))) + if (unlikely(error || !limit || thd->is_error() || + (select && select->check_quick(thd, safe_update, limit)))) { query_plan.set_impossible_where(); if (thd->lex->describe || thd->lex->analyze_stmt) @@ -506,7 +506,7 @@ int mysql_update(THD *thd, goto err; } } - if (init_ftfuncs(thd, select_lex, 1)) + if (unlikely(init_ftfuncs(thd, select_lex, 1))) goto err; table->mark_columns_needed_for_update(); @@ -724,7 +724,7 @@ int mysql_update(THD *thd, error= init_read_record_idx(&info, thd, table, 1, query_plan.index, reverse); - if (error) + if (unlikely(error)) { close_cached_file(&tempfile); goto err; @@ -733,7 +733,7 @@ int mysql_update(THD *thd, THD_STAGE_INFO(thd, stage_searching_rows_for_update); ha_rows tmp_limit= limit; - while (!(error=info.read_record()) && !thd->killed) + while (likely(!(error=info.read_record())) && likely(!thd->killed)) { explain->buf_tracker.on_record_read(); thd->inc_examined_row_count(1); @@ -744,8 +744,8 @@ int mysql_update(THD *thd, explain->buf_tracker.on_record_after_where(); table->file->position(table->record[0]); - if (my_b_write(&tempfile,table->file->ref, - table->file->ref_length)) + if (unlikely(my_b_write(&tempfile,table->file->ref, + table->file->ref_length))) { error=1; /* purecov: inspected */ break; /* purecov: inspected */ @@ -763,7 +763,7 @@ int mysql_update(THD *thd, error since in this case the transaction might have been rolled back already. */ - if (error < 0) + if (unlikely(error < 0)) { /* Fatal error from select->skip_record() */ error= 1; @@ -773,7 +773,7 @@ int mysql_update(THD *thd, table->file->unlock_row(); } } - if (thd->killed && !error) + if (unlikely(thd->killed) && !error) error= 1; // Aborted limit= tmp_limit; table->file->try_semi_consistent_read(0); @@ -790,14 +790,15 @@ int mysql_update(THD *thd, } else { - select= new SQL_SELECT; + if (!(select= new SQL_SELECT)) + goto err; select->head=table; } - if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0)) - error=1; /* purecov: inspected */ - select->file=tempfile; // Read row ptrs from this file - if (error >= 0) + if (unlikely(reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))) + error= 1; /* purecov: inspected */ + select->file= tempfile; // Read row ptrs from this file + if (unlikely(error >= 0)) goto err; table->file->ha_end_keyread(); @@ -831,7 +832,7 @@ update_begin: /* Direct updating is supported */ DBUG_PRINT("info", ("Using direct update")); table->reset_default_fields(); - if (!(error= table->file->ha_direct_update_rows(&updated))) + if (unlikely(!(error= table->file->ha_direct_update_rows(&updated)))) error= -1; found= updated; goto update_end; @@ -942,11 +943,11 @@ update_begin: error= table->file->ha_update_row(table->record[1], table->record[0]); } - if (error == HA_ERR_RECORD_IS_THE_SAME) + if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME)) { error= 0; } - else if (!error) + else if (likely(!error)) { if (has_vers_fields && table->versioned()) { @@ -956,14 +957,15 @@ update_begin: error= vers_insert_history_row(table); restore_record(table, record[2]); } - if (!error) + if (likely(!error)) updated_sys_ver++; } - if (!error) + if (likely(!error)) updated++; } - if (error && (!ignore || table->file->is_fatal_error(error, HA_CHECK_ALL))) + if (unlikely(error) && + (!ignore || table->file->is_fatal_error(error, HA_CHECK_ALL))) { /* If (ignore && error is ignorable) we don't have to @@ -982,8 +984,8 @@ update_begin: } if (table->triggers && - table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, - TRG_ACTION_AFTER, TRUE)) + unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, + TRG_ACTION_AFTER, TRUE))) { error= 1; break; @@ -1036,7 +1038,7 @@ update_begin: Don't try unlocking the row if skip_record reported an error since in this case the transaction might have been rolled back already. */ - else if (!thd->is_error()) + else if (likely(!thd->is_error())) table->file->unlock_row(); else { @@ -1044,7 +1046,7 @@ update_begin: break; } thd->get_stmt_da()->inc_current_row_for_warning(); - if (thd->is_error()) + if (unlikely(thd->is_error())) { error= 1; break; @@ -1069,7 +1071,7 @@ update_begin: };); error= (killed_status == NOT_KILLED)? error : 1; - if (error && + if (likely(error) && will_batch && (loc_error= table->file->exec_bulk_update(&dup_key_found))) /* @@ -1127,12 +1129,12 @@ update_end: Sometimes we want to binlog even if we updated no rows, in case user used it to be sure master and slave are in same state. */ - if ((error < 0) || thd->transaction.stmt.modified_non_trans_table) + if (likely(error < 0) || thd->transaction.stmt.modified_non_trans_table) { if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open()) { int errcode= 0; - if (error < 0) + if (likely(error < 0)) thd->clear_error(); else errcode= query_error_code(thd, killed_status == NOT_KILLED); @@ -1161,7 +1163,7 @@ update_end: id= thd->arg_of_last_insert_id_function ? thd->first_successful_insert_id_in_prev_stmt : 0; - if (error < 0 && !thd->lex->analyze_stmt) + if (likely(error < 0) && likely(!thd->lex->analyze_stmt)) { char buff[MYSQL_ERRMSG_SIZE]; if (!table->versioned(VERS_TIMESTAMP)) @@ -1187,7 +1189,7 @@ update_end: *found_return= found; *updated_return= updated; - if (thd->lex->analyze_stmt) + if (unlikely(thd->lex->analyze_stmt)) goto emit_explain_and_leave; DBUG_RETURN((error >= 0 || thd->is_error()) ? 1 : 0); @@ -1207,7 +1209,7 @@ produce_explain_and_leave: We come here for various "degenerate" query plans: impossible WHERE, no-partitions-used, impossible-range, etc. */ - if (!query_plan.save_explain_update_data(query_plan.mem_root, thd)) + if (unlikely(!query_plan.save_explain_update_data(query_plan.mem_root, thd))) goto err; emit_explain_and_leave: @@ -1863,7 +1865,7 @@ int multi_update::prepare(List ¬_used_values, bitmap_union(table->read_set, &table->tmp_set); } } - if (error) + if (unlikely(error)) DBUG_RETURN(1); /* @@ -1907,14 +1909,14 @@ int multi_update::prepare(List ¬_used_values, table_count); values_for_table= (List_item **) thd->alloc(sizeof(List_item *) * table_count); - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) DBUG_RETURN(1); for (i=0 ; i < table_count ; i++) { fields_for_table[i]= new List_item; values_for_table[i]= new List_item; } - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) DBUG_RETURN(1); /* Split fields into fields_for_table[] and values_by_table[] */ @@ -1926,7 +1928,7 @@ int multi_update::prepare(List ¬_used_values, fields_for_table[offset]->push_back(item, thd->mem_root); values_for_table[offset]->push_back(value, thd->mem_root); } - if (thd->is_fatal_error) + if (unlikely(thd->is_fatal_error)) DBUG_RETURN(1); /* Allocate copy fields */ @@ -2058,7 +2060,8 @@ multi_update::initialize_tables(JOIN *join) TABLE_LIST *table_ref; DBUG_ENTER("initialize_tables"); - if ((thd->variables.option_bits & OPTION_SAFE_UPDATES) && error_if_full_join(join)) + if (unlikely((thd->variables.option_bits & OPTION_SAFE_UPDATES) && + error_if_full_join(join))) DBUG_RETURN(1); main_table=join->join_tab->table; table_to_update= 0; @@ -2164,22 +2167,12 @@ loop_end: tbl->prepare_for_position(); join->map2table[tbl->tablenr]->keep_current_rowid= true; - Field_string *field= new Field_string(tbl->file->ref_length, 0, - &field_name, - &my_charset_bin); - if (!field) - DBUG_RETURN(1); - field->init(tbl); - /* - The field will be converted to varstring when creating tmp table if - table to be updated was created by mysql 4.1. Deny this. - */ - field->can_alter_field_type= 0; - Item_field *ifield= new (thd->mem_root) Item_field(join->thd, (Field *) field); - if (!ifield) + Item_temptable_rowid *item= + new (thd->mem_root) Item_temptable_rowid(tbl); + if (!item) DBUG_RETURN(1); - ifield->maybe_null= 0; - if (temp_fields.push_back(ifield, thd->mem_root)) + item->fix_fields(thd, 0); + if (temp_fields.push_back(item, thd->mem_root)) DBUG_RETURN(1); } while ((tbl= tbl_it++)); @@ -2190,10 +2183,10 @@ loop_end: group.direction= ORDER::ORDER_ASC; group.item= (Item**) temp_fields.head_ref(); - tmp_param->quick_group=1; - tmp_param->field_count=temp_fields.elements; - tmp_param->group_parts=1; - tmp_param->group_length= table->file->ref_length; + tmp_param->quick_group= 1; + tmp_param->field_count= temp_fields.elements; + tmp_param->func_count= temp_fields.elements - 1; + calc_group_buffer(tmp_param, &group); /* small table, ignore SQL_BIG_TABLES */ my_bool save_big_tables= thd->variables.big_tables; thd->variables.big_tables= FALSE; @@ -2205,10 +2198,66 @@ loop_end: DBUG_RETURN(1); tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE); } + join->tmp_table_keep_current_rowid= TRUE; DBUG_RETURN(0); } +static TABLE *item_rowid_table(Item *item) +{ + if (item->type() != Item::FUNC_ITEM) + return NULL; + Item_func *func= (Item_func *)item; + if (func->functype() != Item_func::TEMPTABLE_ROWID) + return NULL; + Item_temptable_rowid *itr= (Item_temptable_rowid *)func; + return itr->table; +} + + +/* + multi_update stores a rowid and new field values for every updated row in a + temporary table (one temporary table per updated table). These rowids are + obtained via Item_temptable_rowid's by calling handler::position(). But if + the join is resolved via a temp table, rowids cannot be obtained from + handler::position() in the multi_update::send_data(). So, they're stored in + the join's temp table (JOIN::add_fields_for_current_rowid()) and here we + replace Item_temptable_rowid's (that would've done handler::position()) with + Item_field's (that will simply take the corresponding field value from the + temp table). +*/ +int multi_update::prepare2(JOIN *join) +{ + if (!join->need_tmp || !join->tmp_table_keep_current_rowid) + return 0; + + // there cannot be many tmp tables in multi-update + JOIN_TAB *tmptab= join->join_tab + join->exec_join_tab_cnt(); + + for (Item **it= tmptab->tmp_table_param->items_to_copy; *it ; it++) + { + TABLE *tbl= item_rowid_table(*it); + if (!tbl) + continue; + for (uint i= 0; i < table_count; i++) + { + for (Item **it2= tmp_table_param[i].items_to_copy; *it2; it2++) + { + if (item_rowid_table(*it2) != tbl) + continue; + Item *fld= new (thd->mem_root) + Item_field(thd, (*it)->get_tmp_table_field()); + if (!fld) + return 1; + fld->set_result_field((*it2)->get_tmp_table_field()); + *it2= fld; + } + } + } + return 0; +} + + multi_update::~multi_update() { TABLE_LIST *table; @@ -2295,7 +2344,8 @@ int multi_update::send_data(List ¬_used_values) { int error; - if (table->default_field && table->update_default_fields(1, ignore)) + if (table->default_field && + unlikely(table->update_default_fields(1, ignore))) DBUG_RETURN(1); if ((error= cur_table->view_check_option(thd, ignore)) != @@ -2304,10 +2354,10 @@ int multi_update::send_data(List ¬_used_values) found--; if (error == VIEW_CHECK_SKIP) continue; - else if (error == VIEW_CHECK_ERROR) + else if (unlikely(error == VIEW_CHECK_ERROR)) DBUG_RETURN(1); } - if (!updated++) + if (unlikely(!updated++)) { /* Inform the main table that we are going to update the table even @@ -2316,8 +2366,8 @@ int multi_update::send_data(List ¬_used_values) */ main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE); } - if ((error=table->file->ha_update_row(table->record[1], - table->record[0])) && + if (unlikely((error=table->file->ha_update_row(table->record[1], + table->record[0]))) && error != HA_ERR_RECORD_IS_THE_SAME) { updated--; @@ -2340,7 +2390,7 @@ int multi_update::send_data(List ¬_used_values) } else { - if (error == HA_ERR_RECORD_IS_THE_SAME) + if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME)) { error= 0; updated--; @@ -2372,55 +2422,42 @@ int multi_update::send_data(List ¬_used_values) } } if (table->triggers && - table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, - TRG_ACTION_AFTER, TRUE)) + unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, + TRG_ACTION_AFTER, TRUE))) DBUG_RETURN(1); } else { int error; TABLE *tmp_table= tmp_tables[offset]; - /* - For updatable VIEW store rowid of the updated table and - rowids of tables used in the CHECK OPTION condition. - */ - uint field_num= 0; - List_iterator_fast tbl_it(unupdated_check_opt_tables); - /* Set first tbl = table and then tbl to tables from tbl_it */ - TABLE *tbl= table; - do - { - tbl->file->position(tbl->record[0]); - memcpy((char*) tmp_table->field[field_num]->ptr, - (char*) tbl->file->ref, tbl->file->ref_length); - /* - For outer joins a rowid field may have no NOT_NULL_FLAG, - so we have to reset NULL bit for this field. - (set_notnull() resets NULL bit only if available). - */ - tmp_table->field[field_num]->set_notnull(); - field_num++; - } while ((tbl= tbl_it++)); - + if (copy_funcs(tmp_table_param[offset].items_to_copy, thd)) + DBUG_RETURN(1); /* Store regular updated fields in the row. */ + DBUG_ASSERT(1 + unupdated_check_opt_tables.elements == + tmp_table_param[offset].func_count); fill_record(thd, tmp_table, tmp_table->field + 1 + unupdated_check_opt_tables.elements, *values_for_table[offset], TRUE, FALSE); /* Write row, ignoring duplicated updates to a row */ error= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]); - if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE) + found++; + if (unlikely(error)) { - if (error && - create_internal_tmp_table_from_heap(thd, tmp_table, - tmp_table_param[offset].start_recinfo, - &tmp_table_param[offset].recinfo, - error, 1, NULL)) + found--; + if (error != HA_ERR_FOUND_DUPP_KEY && + error != HA_ERR_FOUND_DUPP_UNIQUE) { - do_update= 0; - DBUG_RETURN(1); // Not a table_is_full error + if (create_internal_tmp_table_from_heap(thd, tmp_table, + tmp_table_param[offset].start_recinfo, + &tmp_table_param[offset].recinfo, + error, 1, NULL)) + { + do_update= 0; + DBUG_RETURN(1); // Not a table_is_full error + } + found++; } - found++; } } } @@ -2431,8 +2468,8 @@ int multi_update::send_data(List ¬_used_values) void multi_update::abort_result_set() { /* the error was handled or nothing deleted and no side effects return */ - if (error_handled || - (!thd->transaction.stmt.modified_non_trans_table && !updated)) + if (unlikely(error_handled || + (!thd->transaction.stmt.modified_non_trans_table && !updated))) return; /* Something already updated so we have to invalidate cache */ @@ -2522,7 +2559,7 @@ int multi_update::do_updates() org_updated= updated; tmp_table= tmp_tables[cur_table->shared]; tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache - if ((local_error= table->file->ha_rnd_init(0))) + if (unlikely((local_error= table->file->ha_rnd_init(0)))) { err_table= table; goto err; @@ -2545,7 +2582,7 @@ int multi_update::do_updates() check_opt_it.rewind(); while(TABLE *tbl= check_opt_it++) { - if ((local_error= tbl->file->ha_rnd_init(1))) + if (unlikely((local_error= tbl->file->ha_rnd_init(1)))) { err_table= tbl; goto err; @@ -2569,7 +2606,7 @@ int multi_update::do_updates() } copy_field_end=copy_field_ptr; - if ((local_error= tmp_table->file->ha_rnd_init(1))) + if (unlikely((local_error= tmp_table->file->ha_rnd_init(1)))) { err_table= tmp_table; goto err; @@ -2584,12 +2621,11 @@ int multi_update::do_updates() thd->fatal_error(); goto err2; } - if ((local_error= tmp_table->file->ha_rnd_next(tmp_table->record[0]))) + if (unlikely((local_error= + tmp_table->file->ha_rnd_next(tmp_table->record[0])))) { if (local_error == HA_ERR_END_OF_FILE) break; - if (local_error == HA_ERR_RECORD_DELETED) - continue; // May happen on dup key err_table= tmp_table; goto err; } @@ -2600,9 +2636,10 @@ int multi_update::do_updates() uint field_num= 0; do { - if ((local_error= - tbl->file->ha_rnd_pos(tbl->record[0], - (uchar *) tmp_table->field[field_num]->ptr))) + uchar *ref= + ((Field_varstring *) tmp_table->field[field_num])->get_data(); + if (unlikely((local_error= + tbl->file->ha_rnd_pos(tbl->record[0], ref)))) { err_table= tbl; goto err; @@ -2611,8 +2648,8 @@ int multi_update::do_updates() } while ((tbl= check_opt_it++)); if (table->vfield && - table->update_virtual_fields(table->file, - VCOL_UPDATE_INDEXED_FOR_UPDATE)) + unlikely(table->update_virtual_fields(table->file, + VCOL_UPDATE_INDEXED_FOR_UPDATE))) goto err2; table->status|= STATUS_UPDATED; @@ -2646,7 +2683,7 @@ int multi_update::do_updates() { if (error == VIEW_CHECK_SKIP) continue; - else if (error == VIEW_CHECK_ERROR) + else if (unlikely(error == VIEW_CHECK_ERROR)) { thd->fatal_error(); goto err2; @@ -2655,8 +2692,9 @@ int multi_update::do_updates() if (has_vers_fields && table->versioned()) table->vers_update_fields(); - if ((local_error=table->file->ha_update_row(table->record[1], - table->record[0])) && + if (unlikely((local_error= + table->file->ha_update_row(table->record[1], + table->record[0]))) && local_error != HA_ERR_RECORD_IS_THE_SAME) { if (!ignore || @@ -2691,8 +2729,8 @@ int multi_update::do_updates() } if (table->triggers && - table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, - TRG_ACTION_AFTER, TRUE)) + unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, + TRG_ACTION_AFTER, TRUE))) goto err2; } @@ -2762,7 +2800,7 @@ bool multi_update::send_eof() error takes into account killed status gained in do_updates() */ int local_error= thd->is_error(); - if (!local_error) + if (likely(!local_error)) local_error = (table_count) ? do_updates() : 0; /* if local_error is not set ON until after do_updates() then @@ -2792,12 +2830,13 @@ bool multi_update::send_eof() thd->transaction.all.m_unsafe_rollback_flags|= (thd->transaction.stmt.m_unsafe_rollback_flags & THD_TRANS::DID_WAIT); - if (local_error == 0 || thd->transaction.stmt.modified_non_trans_table) + if (likely(local_error == 0 || + thd->transaction.stmt.modified_non_trans_table)) { if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open()) { int errcode= 0; - if (local_error == 0) + if (likely(local_error == 0)) thd->clear_error(); else errcode= query_error_code(thd, killed_status == NOT_KILLED); @@ -2811,23 +2850,25 @@ bool multi_update::send_eof() break; } } - ScopedStatementReplication scoped_stmt_rpl(force_stmt ? thd : NULL); + enum_binlog_format save_binlog_format; + save_binlog_format= thd->get_current_stmt_binlog_format(); + if (force_stmt) + thd->set_current_stmt_binlog_format_stmt(); if (thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query(), thd->query_length(), transactional_tables, FALSE, FALSE, errcode)) - { local_error= 1; // Rollback update - } + thd->set_current_stmt_binlog_format(save_binlog_format); } } - DBUG_ASSERT(trans_safe || !updated || + DBUG_ASSERT(trans_safe || !updated || thd->transaction.stmt.modified_non_trans_table); - if (local_error != 0) + if (likely(local_error != 0)) error_handled= TRUE; // to force early leave from ::abort_result_set() - if (local_error > 0) // if the above log write did not fail ... + if (unlikely(local_error > 0)) // if the above log write did not fail ... { /* Safety: If we haven't got an error before (can happen in do_updates) */ my_message(ER_UNKNOWN_ERROR, "An error occurred in multi-table update", diff --git a/sql/sql_view.cc b/sql/sql_view.cc index e910d48c75c..6679334552b 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -530,7 +530,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, /* prepare select to resolve all fields */ lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW; - if (unit->prepare(thd, 0, 0)) + if (unit->prepare(unit->derived, 0, 0)) { /* some errors from prepare are reported to user, if is not then @@ -609,7 +609,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, if (!fld) continue; TABLE_SHARE *s= fld->field->table->s; - const LString_i field_name= fld->field->field_name; + const Lex_ident field_name= fld->field->field_name; if (s->tmp_table || (s->versioned && (field_name.streq(s->vers_start_field()->field_name) || @@ -1333,6 +1333,7 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table, now Lex placed in statement memory */ + table->view= lex= thd->lex= (LEX*) new(thd->mem_root) st_lex_local; if (!table->view) { @@ -1359,8 +1360,9 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table, goto end; lex_start(thd); + lex->stmt_lex= old_lex; view_select= &lex->select_lex; - view_select->select_number= ++thd->stmt_lex->current_select_number; + view_select->select_number= ++thd->lex->stmt_lex->current_select_number; sql_mode_t saved_mode= thd->variables.sql_mode; /* switch off modes which can prevent normal parsing of VIEW @@ -1791,13 +1793,14 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) TABLES we have to simply prohibit dropping of views. */ - if (thd->locked_tables_mode) + if (unlikely(thd->locked_tables_mode)) { my_error(ER_LOCK_OR_ACTIVE_TRANSACTION, MYF(0)); DBUG_RETURN(TRUE); } - if (lock_table_names(thd, views, 0, thd->variables.lock_wait_timeout, 0)) + if (unlikely(lock_table_names(thd, views, 0, + thd->variables.lock_wait_timeout, 0))) DBUG_RETURN(TRUE); for (view= views; view; view= view->next_local) @@ -1835,7 +1838,7 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) } continue; } - if (mysql_file_delete(key_file_frm, path, MYF(MY_WME))) + if (unlikely(mysql_file_delete(key_file_frm, path, MYF(MY_WME)))) error= TRUE; some_views_deleted= TRUE; @@ -1850,12 +1853,12 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) sp_cache_invalidate(); } - if (wrong_object_name) + if (unlikely(wrong_object_name)) { my_error(ER_WRONG_OBJECT, MYF(0), wrong_object_db, wrong_object_name, "VIEW"); } - if (non_existant_views.length()) + if (unlikely(non_existant_views.length())) { my_error(ER_UNKNOWN_VIEW, MYF(0), non_existant_views.c_ptr_safe()); } @@ -1866,11 +1869,12 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) /* if something goes wrong, bin-log with possible error code, otherwise bin-log with error code cleared. */ - if (write_bin_log(thd, !something_wrong, thd->query(), thd->query_length())) + if (unlikely(write_bin_log(thd, !something_wrong, thd->query(), + thd->query_length()))) something_wrong= 1; } - if (something_wrong) + if (unlikely(something_wrong)) { DBUG_RETURN(TRUE); } @@ -2038,7 +2042,7 @@ bool insert_view_fields(THD *thd, List *list, TABLE_LIST *view) if ((fld= entry->item->field_for_view_update())) { TABLE_SHARE *s= fld->context->table_list->table->s; - LString_i field_name= fld->field_name; + Lex_ident field_name= fld->field_name; if (s->versioned && (field_name.streq(s->vers_start_field()->field_name) || field_name.streq(s->vers_end_field()->field_name))) diff --git a/sql/sql_window.cc b/sql/sql_window.cc index db34b77ddcb..38fdd8ab80b 100644 --- a/sql/sql_window.cc +++ b/sql/sql_window.cc @@ -2799,7 +2799,7 @@ bool compute_window_func(THD *thd, /* Check if we found any error in the window function while adding values through cursors. */ - if (thd->is_error() || thd->is_killed()) + if (unlikely(thd->is_error() || thd->is_killed())) break; @@ -2931,7 +2931,7 @@ bool Window_func_runner::exec(THD *thd, TABLE *tbl, SORT_INFO *filesort_result) bool Window_funcs_sort::exec(JOIN *join) { THD *thd= join->thd; - JOIN_TAB *join_tab= join->join_tab + join->exec_join_tab_cnt(); + JOIN_TAB *join_tab= join->join_tab + join->total_join_tab_cnt(); /* Sort the table based on the most specific sorting criteria of the window functions. */ diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index d69156ced9b..049b2c0cac9 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -37,7 +37,7 @@ #include "sql_priv.h" #include "sql_parse.h" /* comp_*_creator */ #include "sql_table.h" /* primary_key_name */ -#include "sql_partition.h" /* mem_alloc_error, partition_info, HASH_PARTITION */ +#include "sql_partition.h" /* partition_info, HASH_PARTITION */ #include "sql_acl.h" /* *_ACL */ #include "sql_class.h" /* Key_part_spec, enum_filetype, Diag_condition_item_name */ #include "slave.h" @@ -67,8 +67,6 @@ #include "lex_token.h" #include "sql_lex.h" #include "sql_sequence.h" -#include "sql_tvc.h" -#include "vers_utils.h" #include "my_base.h" /* this is to get the bison compilation windows warnings out */ @@ -82,7 +80,7 @@ int yylex(void *yylval, void *yythd); #define yyoverflow(A,B,C,D,E,F) \ { \ size_t val= *(F); \ - if (my_yyoverflow((B), (D), &val)) \ + if (unlikely(my_yyoverflow((B), (D), &val))) \ { \ yyerror(thd, (char*) (A)); \ return 2; \ @@ -101,7 +99,7 @@ int yylex(void *yylval, void *yythd); } while (0) #define MYSQL_YYABORT_UNLESS(A) \ - if (!(A)) \ + if (unlikely(!(A))) \ { \ thd->parse_error(); \ MYSQL_YYABORT; \ @@ -342,14 +340,14 @@ bool LEX::set_trigger_new_row(const LEX_CSTRING *name, Item *val) Item_trigger_field::NEW_ROW, name, UPDATE_ACL, FALSE); - if (trg_fld == NULL) + if (unlikely(trg_fld == NULL)) return TRUE; sp_fld= new (thd->mem_root) sp_instr_set_trigger_field(sphead->instructions(), spcont, trg_fld, val, this); - if (sp_fld == NULL) + if (unlikely(sp_fld == NULL)) return TRUE; /* @@ -377,20 +375,25 @@ bool LEX::set_trigger_new_row(const LEX_CSTRING *name, Item *val) @return An Item_splocal object representing the SP variable, or NULL on error. */ Item_splocal* -LEX::create_item_for_sp_var(LEX_CSTRING *name, sp_variable *spvar, - const char *start_in_q, const char *end_in_q) +LEX::create_item_for_sp_var(const Lex_ident_cli_st *cname, sp_variable *spvar) { const Sp_rcontext_handler *rh; Item_splocal *item; + const char *start_in_q= cname->pos(); + const char *end_in_q= cname->end(); uint pos_in_q, len_in_q; + Lex_ident_sys name(thd, cname); + + if (name.is_null()) + return NULL; // EOM /* If necessary, look for the variable. */ if (spcont && !spvar) - spvar= find_variable(name, &rh); + spvar= find_variable(&name, &rh); if (!spvar) { - my_error(ER_SP_UNDECLARED_VAR, MYF(0), name->str); + my_error(ER_SP_UNDECLARED_VAR, MYF(0), name.str); return NULL; } @@ -401,7 +404,7 @@ LEX::create_item_for_sp_var(LEX_CSTRING *name, sp_variable *spvar, len_in_q= (uint)(end_in_q - start_in_q); item= new (thd->mem_root) - Item_splocal(thd, rh, name, spvar->offset, spvar->type_handler(), + Item_splocal(thd, rh, &name, spvar->offset, spvar->type_handler(), pos_in_q, len_in_q); #ifdef DBUG_ASSERT_EXISTS @@ -546,7 +549,8 @@ bool LEX::add_select_to_union_list(bool is_union_distinct, as possible */ if (type == INTERSECT_TYPE && (current_select->linkage != INTERSECT_TYPE && - current_select != current_select->master_unit()->first_select())) + current_select != current_select->master_unit()->first_select()) + && !(thd->variables.sql_mode & MODE_ORACLE)) { /* This and previous SELECTs should go one level down because of @@ -567,11 +571,9 @@ bool LEX::add_select_to_union_list(bool is_union_distinct, return TRUE; mysql_init_select(this); current_select->linkage= type; + current_select->with_all_modifier= !is_union_distinct; if (is_union_distinct) /* UNION DISTINCT - remember position */ - { - current_select->master_unit()->union_distinct= - current_select; - } + current_select->master_unit()->union_distinct= current_select; else DBUG_ASSERT(type == UNION_TYPE); return FALSE; @@ -707,7 +709,7 @@ bool LEX::add_alter_list(const char *name, Virtual_column_info *expr, { MEM_ROOT *mem_root= thd->mem_root; Alter_column *ac= new (mem_root) Alter_column(name, expr, exists); - if (ac == NULL) + if (unlikely(ac == NULL)) return true; alter_info.alter_list.push_back(ac, mem_root); alter_info.flags|= ALTER_CHANGE_COLUMN_DEFAULT; @@ -750,7 +752,7 @@ bool LEX::set_bincmp(CHARSET_INFO *cs, bool bin) #define bincmp_collation(X,Y) \ do \ { \ - if (Lex->set_bincmp(X,Y)) \ + if (unlikely(Lex->set_bincmp(X,Y))) \ MYSQL_YYABORT; \ } while(0) @@ -758,11 +760,8 @@ bool LEX::set_bincmp(CHARSET_INFO *cs, bool bin) Virtual_column_info *add_virtual_expression(THD *thd, Item *expr) { Virtual_column_info *v= new (thd->mem_root) Virtual_column_info(); - if (!v) - { - mem_alloc_error(sizeof(Virtual_column_info)); + if (unlikely(!v)) return 0; - } v->expr= expr; v->utf8= 0; /* connection charset */ return v; @@ -778,9 +777,10 @@ Virtual_column_info *add_virtual_expression(THD *thd, Item *expr) /* structs */ LEX_CSTRING lex_str; - LEX_SYMBOL symbol; + Lex_ident_cli_st kwd; + Lex_ident_cli_st ident_cli; + Lex_ident_sys_st ident_sys; Lex_string_with_metadata_st lex_string_with_metadata; - Lex_string_with_pos_st lex_string_with_pos; Lex_spblock_st spblock; Lex_spblock_handlers_st spblock_handlers; Lex_length_and_dec_st Lex_length_and_dec; @@ -888,10 +888,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %parse-param { THD *thd } %lex-param { THD *thd } /* - Currently there are 139 shift/reduce conflicts. + Currently there are 62 shift/reduce conflicts. We should not introduce new conflicts any more. */ -%expect 139 +%expect 62 /* Comments for TOKENS. @@ -911,679 +911,277 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); This makes the code grep-able, and helps maintenance. */ - + + +/* + Reserved keywords and operators +*/ %token ABORT_SYM /* INTERNAL (used in lex) */ %token ACCESSIBLE_SYM -%token ACTION /* SQL-2003-N */ %token ADD /* SQL-2003-R */ -%token ADMIN_SYM /* SQL-2003-N */ -%token ADDDATE_SYM /* MYSQL-FUNC */ -%token AFTER_SYM /* SQL-2003-N */ -%token AGAINST -%token AGGREGATE_SYM -%token ALGORITHM_SYM %token ALL /* SQL-2003-R */ %token ALTER /* SQL-2003-R */ -%token ALWAYS_SYM %token ANALYZE_SYM %token AND_AND_SYM /* OPERATOR */ %token AND_SYM /* SQL-2003-R */ -%token ANY_SYM /* SQL-2003-R */ %token AS /* SQL-2003-R */ %token ASC /* SQL-2003-N */ -%token ASCII_SYM /* MYSQL-FUNC */ %token ASENSITIVE_SYM /* FUTURE-USE */ -%token AT_SYM /* SQL-2003-R */ -%token ATOMIC_SYM /* SQL-2003-R */ -%token AUTHORS_SYM -%token AUTOEXTEND_SIZE_SYM -%token AUTO_INC -%token AUTO_SYM -%token AVG_ROW_LENGTH -%token AVG_SYM /* SQL-2003-N */ -%token BACKUP_SYM %token BEFORE_SYM /* SQL-2003-N */ -%token BEGIN_SYM /* SQL-2003-R */ %token BETWEEN_SYM /* SQL-2003-R */ %token BIGINT /* SQL-2003-R */ %token BINARY /* SQL-2003-R */ -%token BINLOG_SYM %token BIN_NUM %token BIT_AND /* MYSQL-FUNC */ %token BIT_OR /* MYSQL-FUNC */ -%token BIT_SYM /* MYSQL-FUNC */ %token BIT_XOR /* MYSQL-FUNC */ %token BLOB_SYM /* SQL-2003-R */ -%token BLOCK_SYM -%token BODY_SYM /* Oracle-R */ -%token BOOLEAN_SYM /* SQL-2003-R */ -%token BOOL_SYM %token BOTH /* SQL-2003-R */ -%token BTREE_SYM %token BY /* SQL-2003-R */ -%token BYTE_SYM -%token CACHE_SYM %token CALL_SYM /* SQL-2003-R */ %token CASCADE /* SQL-2003-N */ -%token CASCADED /* SQL-2003-R */ %token CASE_SYM /* SQL-2003-R */ %token CAST_SYM /* SQL-2003-R */ -%token CATALOG_NAME_SYM /* SQL-2003-N */ -%token CHAIN_SYM /* SQL-2003-N */ %token CHANGE -%token CHANGED -%token CHARSET %token CHAR_SYM /* SQL-2003-R */ -%token CHECKPOINT_SYM -%token CHECKSUM_SYM %token CHECK_SYM /* SQL-2003-R */ -%token CIPHER_SYM -%token CLASS_ORIGIN_SYM /* SQL-2003-N */ -%token CLIENT_SYM -%token CLOSE_SYM /* SQL-2003-R */ -%token CLOB /* SQL-2003-R */ -%token COALESCE /* SQL-2003-N */ -%token CODE_SYM %token COLLATE_SYM /* SQL-2003-R */ -%token COLLATION_SYM /* SQL-2003-N */ -%token COLUMNS -%token COLUMN_ADD_SYM -%token COLUMN_CHECK_SYM -%token COLUMN_CREATE_SYM -%token COLUMN_DELETE_SYM -%token COLUMN_GET_SYM -%token COLUMN_SYM /* SQL-2003-R */ -%token COLUMN_NAME_SYM /* SQL-2003-N */ -%token COMMENT_SYM -%token COMMITTED_SYM /* SQL-2003-N */ -%token COMMIT_SYM /* SQL-2003-R */ -%token COMPACT_SYM -%token COMPLETION_SYM -%token COMPRESSED_SYM -%token CONCURRENT %token CONDITION_SYM /* SQL-2003-R, SQL-2008-R */ -%token CONNECTION_SYM -%token CONSISTENT_SYM %token CONSTRAINT /* SQL-2003-R */ -%token CONSTRAINT_CATALOG_SYM /* SQL-2003-N */ -%token CONSTRAINT_NAME_SYM /* SQL-2003-N */ -%token CONSTRAINT_SCHEMA_SYM /* SQL-2003-N */ -%token CONTAINS_SYM /* SQL-2003-N */ -%token CONTEXT_SYM %token CONTINUE_SYM /* SQL-2003-R */ -%token CONTRIBUTORS_SYM %token CONVERT_SYM /* SQL-2003-N */ %token COUNT_SYM /* SQL-2003-N */ -%token CPU_SYM %token CREATE /* SQL-2003-R */ %token CROSS /* SQL-2003-R */ -%token CUBE_SYM /* SQL-2003-R */ %token CUME_DIST_SYM %token CURDATE /* MYSQL-FUNC */ -%token CURRENT_SYM /* SQL-2003-R */ %token CURRENT_USER /* SQL-2003-R */ %token CURRENT_ROLE /* SQL-2003-R */ -%token CURRENT_POS_SYM %token CURSOR_SYM /* SQL-2003-R */ -%token CURSOR_NAME_SYM /* SQL-2003-N */ %token CURTIME /* MYSQL-FUNC */ -%token CYCLE_SYM %token DATABASE %token DATABASES -%token DATAFILE_SYM -%token DATA_SYM /* SQL-2003-N */ -%token DATETIME %token DATE_ADD_INTERVAL /* MYSQL-FUNC */ -%token DATE_FORMAT_SYM /* MYSQL-FUNC */ %token DATE_SUB_INTERVAL /* MYSQL-FUNC */ -%token DATE_SYM /* SQL-2003-R */ %token DAY_HOUR_SYM %token DAY_MICROSECOND_SYM %token DAY_MINUTE_SYM %token DAY_SECOND_SYM -%token DAY_SYM /* SQL-2003-R */ -%token DEALLOCATE_SYM /* SQL-2003-R */ %token DECIMAL_NUM %token DECIMAL_SYM /* SQL-2003-R */ %token DECLARE_SYM /* SQL-2003-R */ -%token DECODE_SYM /* Oracle function, non-reserved */ %token DEFAULT /* SQL-2003-R */ -%token DEFINER_SYM -%token DELAYED_SYM -%token DELAY_KEY_WRITE_SYM %token DELETE_DOMAIN_ID_SYM %token DELETE_SYM /* SQL-2003-R */ %token DENSE_RANK_SYM %token DESC /* SQL-2003-N */ %token DESCRIBE /* SQL-2003-R */ -%token DES_KEY_FILE %token DETERMINISTIC_SYM /* SQL-2003-R */ -%token DIAGNOSTICS_SYM /* SQL-2003-N */ -%token DIRECTORY_SYM -%token DISABLE_SYM -%token DISCARD -%token DISK_SYM %token DISTINCT /* SQL-2003-R */ %token DIV_SYM %token DOUBLE_SYM /* SQL-2003-R */ %token DO_DOMAIN_IDS_SYM -%token DO_SYM %token DOT_DOT_SYM %token DROP /* SQL-2003-R */ %token DUAL_SYM -%token DUMPFILE -%token DUPLICATE_SYM -%token DYNAMIC_SYM /* SQL-2003-R */ %token EACH_SYM /* SQL-2003-R */ %token ELSE /* SQL-2003-R */ %token ELSEIF_SYM -%token ELSIF_SYM /* Oracle, reserved in PL/SQL*/ -%token ENABLE_SYM %token ENCLOSED -%token END /* SQL-2003-R */ -%token ENDS_SYM %token END_OF_INPUT /* INTERNAL */ -%token ENGINES_SYM -%token ENGINE_SYM -%token ENUM %token EQUAL_SYM /* OPERATOR */ -%token ERROR_SYM -%token ERRORS %token ESCAPED -%token ESCAPE_SYM /* SQL-2003-R */ -%token EVENTS_SYM -%token EVENT_SYM -%token EVERY_SYM /* SQL-2003-N */ -%token EXCHANGE_SYM -%token EXAMINED_SYM %token EXCEPT_SYM /* SQL-2003-R */ -%token EXCLUDE_SYM /* SQL-2011-N */ -%token EXECUTE_SYM /* SQL-2003-R */ -%token EXCEPTION_SYM /* SQL-2003-N, Oracle-PLSQL-R */ %token EXISTS /* SQL-2003-R */ -%token EXIT_SYM -%token EXPANSION_SYM -%token EXPORT_SYM -%token EXTENDED_SYM -%token EXTENT_SIZE_SYM %token EXTRACT_SYM /* SQL-2003-N */ %token FALSE_SYM /* SQL-2003-R */ -%token FAST_SYM -%token FAULTS_SYM %token FETCH_SYM /* SQL-2003-R */ -%token FILE_SYM %token FIRST_VALUE_SYM /* SQL-2011 */ -%token FIRST_SYM /* SQL-2003-N */ -%token FIXED_SYM %token FLOAT_NUM %token FLOAT_SYM /* SQL-2003-R */ -%token FLUSH_SYM -%token FOLLOWS_SYM /* MYSQL trigger*/ -%token FOLLOWING_SYM /* SQL-2011-N */ -%token FORCE_SYM %token FOREIGN /* SQL-2003-R */ %token FOR_SYM /* SQL-2003-R */ %token FOR_SYSTEM_TIME_SYM /* INTERNAL */ -%token FORMAT_SYM -%token FOUND_SYM /* SQL-2003-R */ %token FROM -%token FULL /* SQL-2003-R */ %token FULLTEXT_SYM -%token FUNCTION_SYM /* SQL-2003-R */ %token GE -%token GENERAL -%token GENERATED_SYM -%token GEOMETRYCOLLECTION -%token GEOMETRY_SYM -%token GET_FORMAT /* MYSQL-FUNC */ -%token GET_SYM /* SQL-2003-R */ -%token GLOBAL_SYM /* SQL-2003-R */ -%token GOTO_SYM /* Oracle, reserved in PL/SQL*/ %token GRANT /* SQL-2003-R */ -%token GRANTS %token GROUP_SYM /* SQL-2003-R */ %token GROUP_CONCAT_SYM %token LAG_SYM /* SQL-2011 */ %token LEAD_SYM /* SQL-2011 */ -%token HANDLER_SYM -%token HARD_SYM -%token HASH_SYM %token HAVING /* SQL-2003-R */ -%token HELP_SYM %token HEX_NUM %token HEX_STRING -%token HIGH_PRIORITY -%token HISTORY_SYM /* MYSQL */ -%token HOST_SYM -%token HOSTS_SYM %token HOUR_MICROSECOND_SYM %token HOUR_MINUTE_SYM %token HOUR_SECOND_SYM -%token HOUR_SYM /* SQL-2003-R */ -%token ID_SYM /* MYSQL */ %token IDENT -%token IDENTIFIED_SYM %token IDENT_QUOTED %token IF_SYM %token IGNORE_DOMAIN_IDS_SYM %token IGNORE_SYM -%token IGNORE_SERVER_IDS_SYM -%token IMMEDIATE_SYM /* SQL-2003-R */ -%token IMPORT -%token INCREMENT_SYM -%token INDEXES %token INDEX_SYM %token INFILE -%token INITIAL_SIZE_SYM %token INNER_SYM /* SQL-2003-R */ %token INOUT_SYM /* SQL-2003-R */ %token INSENSITIVE_SYM /* SQL-2003-R */ %token INSERT /* SQL-2003-R */ -%token INSERT_METHOD -%token INSTALL_SYM %token INTERSECT_SYM /* SQL-2003-R */ %token INTERVAL_SYM /* SQL-2003-R */ %token INTO /* SQL-2003-R */ %token INT_SYM /* SQL-2003-R */ -%token INVOKER_SYM %token IN_SYM /* SQL-2003-R */ -%token IO_SYM -%token IPC_SYM %token IS /* SQL-2003-R */ -%token ISOLATION /* SQL-2003-R */ -%token ISOPEN_SYM /* Oracle-N */ -%token ISSUER_SYM %token ITERATE_SYM -%token INVISIBLE_SYM %token JOIN_SYM /* SQL-2003-R */ -%token JSON_SYM %token KEYS -%token KEY_BLOCK_SIZE %token KEY_SYM /* SQL-2003-N */ %token KILL_SYM -%token LANGUAGE_SYM /* SQL-2003-R */ -%token LAST_SYM /* SQL-2003-N */ -%token LAST_VALUE -%token LASTVAL_SYM /* PostgreSQL sequence function */ %token LE /* OPERATOR */ %token LEADING /* SQL-2003-R */ -%token LEAVES %token LEAVE_SYM %token LEFT /* SQL-2003-R */ -%token LESS_SYM -%token LEVEL_SYM %token LEX_HOSTNAME %token LIKE /* SQL-2003-R */ %token LIMIT %token LINEAR_SYM %token LINES -%token LINESTRING -%token LIST_SYM %token LOAD -%token LOCAL_SYM /* SQL-2003-R */ %token LOCATOR_SYM /* SQL-2003-N */ -%token LOCKS_SYM %token LOCK_SYM -%token LOGFILE_SYM -%token LOGS_SYM %token LONGBLOB %token LONGTEXT %token LONG_NUM %token LONG_SYM %token LOOP_SYM %token LOW_PRIORITY -%token MASTER_CONNECT_RETRY_SYM -%token MASTER_DELAY_SYM -%token MASTER_GTID_POS_SYM -%token MASTER_HOST_SYM -%token MASTER_LOG_FILE_SYM -%token MASTER_LOG_POS_SYM -%token MASTER_PASSWORD_SYM -%token MASTER_PORT_SYM -%token MASTER_SERVER_ID_SYM -%token MASTER_SSL_CAPATH_SYM -%token MASTER_SSL_CA_SYM -%token MASTER_SSL_CERT_SYM -%token MASTER_SSL_CIPHER_SYM -%token MASTER_SSL_CRL_SYM -%token MASTER_SSL_CRLPATH_SYM -%token MASTER_SSL_KEY_SYM -%token MASTER_SSL_SYM %token MASTER_SSL_VERIFY_SERVER_CERT_SYM -%token MASTER_SYM -%token MASTER_USER_SYM -%token MASTER_USE_GTID_SYM -%token MASTER_HEARTBEAT_PERIOD_SYM %token MATCH /* SQL-2003-R */ -%token MAX_CONNECTIONS_PER_HOUR -%token MAX_QUERIES_PER_HOUR -%token MAX_ROWS -%token MAX_SIZE_SYM %token MAX_SYM /* SQL-2003-N */ -%token MAX_UPDATES_PER_HOUR -%token MAX_STATEMENT_TIME_SYM -%token MAX_USER_CONNECTIONS_SYM %token MAXVALUE_SYM /* SQL-2003-N */ %token MEDIAN_SYM %token MEDIUMBLOB %token MEDIUMINT %token MEDIUMTEXT -%token MEDIUM_SYM -%token MEMORY_SYM -%token MERGE_SYM /* SQL-2003-R */ -%token MESSAGE_TEXT_SYM /* SQL-2003-N */ -%token MICROSECOND_SYM /* MYSQL-FUNC */ -%token MIGRATE_SYM %token MINUTE_MICROSECOND_SYM %token MINUTE_SECOND_SYM -%token MINUTE_SYM /* SQL-2003-R */ -%token MINVALUE_SYM -%token MIN_ROWS %token MIN_SYM /* SQL-2003-N */ -%token MODE_SYM %token MODIFIES_SYM /* SQL-2003-R */ -%token MODIFY_SYM %token MOD_SYM /* SQL-2003-N */ -%token MONTH_SYM /* SQL-2003-R */ -%token MULTILINESTRING -%token MULTIPOINT -%token MULTIPOLYGON -%token MUTEX_SYM -%token MYSQL_SYM -%token MYSQL_ERRNO_SYM -%token NAMES_SYM /* SQL-2003-N */ -%token NAME_SYM /* SQL-2003-N */ -%token NATIONAL_SYM /* SQL-2003-R */ +%token MYSQL_CONCAT_SYM /* OPERATOR */ %token NATURAL /* SQL-2003-R */ %token NCHAR_STRING -%token NCHAR_SYM /* SQL-2003-R */ %token NE /* OPERATOR */ %token NEG -%token NEW_SYM /* SQL-2003-R */ -%token NEXT_SYM /* SQL-2003-N */ -%token NEXTVAL_SYM /* PostgreSQL sequence function */ -%token NOCACHE_SYM -%token NOCYCLE_SYM -%token NODEGROUP_SYM -%token NONE_SYM /* SQL-2003-R */ %token NOT2_SYM %token NOT_SYM /* SQL-2003-R */ -%token NOTFOUND_SYM /* Oracle-R */ %token NOW_SYM -%token NO_SYM /* SQL-2003-R */ -%token NOMAXVALUE_SYM -%token NOMINVALUE_SYM -%token NO_WAIT_SYM -%token NOWAIT_SYM %token NO_WRITE_TO_BINLOG %token NTILE_SYM %token NULL_SYM /* SQL-2003-R */ %token NUM -%token NUMBER_SYM /* SQL-2003-N */ %token NUMERIC_SYM /* SQL-2003-R */ %token NTH_VALUE_SYM /* SQL-2011 */ -%token NVARCHAR_SYM -%token OF_SYM /* SQL-1992-R, Oracle-R */ -%token OFFSET_SYM -%token OLD_PASSWORD_SYM %token ON /* SQL-2003-R */ -%token ONE_SYM -%token ONLY_SYM /* SQL-2003-R */ -%token ONLINE_SYM -%token OPEN_SYM /* SQL-2003-R */ %token OPTIMIZE -%token OPTIONS_SYM -%token OPTION /* SQL-2003-N */ %token OPTIONALLY +%token ORACLE_CONCAT_SYM /* INTERNAL */ %token OR2_SYM %token ORDER_SYM /* SQL-2003-R */ -%token OR_OR_SYM /* OPERATOR */ %token OR_SYM /* SQL-2003-R */ -%token OTHERS_SYM /* SQL-2011-N */ %token OUTER %token OUTFILE %token OUT_SYM /* SQL-2003-R */ %token OVER_SYM -%token OWNER_SYM -%token PACKAGE_SYM /* Oracle-R */ -%token PACK_KEYS_SYM -%token PAGE_SYM %token PAGE_CHECKSUM_SYM %token PARAM_MARKER -%token PARSER_SYM %token PARSE_VCOL_EXPR_SYM -%token PARTIAL /* SQL-2003-N */ %token PARTITION_SYM /* SQL-2003-R */ -%token PARTITIONS_SYM -%token PARTITIONING_SYM -%token PASSWORD_SYM %token PERCENT_RANK_SYM %token PERCENTILE_CONT_SYM %token PERCENTILE_DISC_SYM -%token PERIOD_SYM /* SQL-2011-R */ -%token PERSISTENT_SYM -%token PHASE_SYM -%token PLUGINS_SYM -%token PLUGIN_SYM -%token POINT_SYM -%token POLYGON -%token PORT_SYM %token POSITION_SYM /* SQL-2003-N */ -%token PRECEDES_SYM /* MYSQL */ -%token PRECEDING_SYM /* SQL-2011-N */ %token PRECISION /* SQL-2003-R */ -%token PREPARE_SYM /* SQL-2003-R */ -%token PRESERVE_SYM -%token PREV_SYM -%token PREVIOUS_SYM %token PRIMARY_SYM /* SQL-2003-R */ -%token PRIVILEGES /* SQL-2003-N */ %token PROCEDURE_SYM /* SQL-2003-R */ -%token PROCESS -%token PROCESSLIST_SYM -%token PROFILE_SYM -%token PROFILES_SYM -%token PROXY_SYM %token PURGE -%token QUARTER_SYM -%token QUERY_SYM -%token QUICK -%token RAISE_SYM /* Oracle-PLSQL-R */ %token RANGE_SYM /* SQL-2003-R */ %token RANK_SYM -%token RAW /* Oracle */ %token READS_SYM /* SQL-2003-R */ -%token READ_ONLY_SYM %token READ_SYM /* SQL-2003-N */ %token READ_WRITE_SYM %token REAL /* SQL-2003-R */ -%token REBUILD_SYM -%token RECOVER_SYM %token RECURSIVE_SYM -%token REDOFILE_SYM -%token REDO_BUFFER_SIZE_SYM -%token REDUNDANT_SYM +%token REF_SYSTEM_ID_SYM %token REFERENCES /* SQL-2003-R */ %token REGEXP -%token RELAY -%token RELAYLOG_SYM -%token RELAY_LOG_FILE_SYM -%token RELAY_LOG_POS_SYM -%token RELAY_THREAD %token RELEASE_SYM /* SQL-2003-R */ -%token RELOAD -%token REMOVE_SYM %token RENAME -%token REORGANIZE_SYM -%token REPAIR -%token REPEATABLE_SYM /* SQL-2003-N */ %token REPEAT_SYM /* MYSQL-FUNC */ %token REPLACE /* MYSQL-FUNC */ -%token REPLICATION %token REQUIRE_SYM -%token RESET_SYM -%token RESTART_SYM %token RESIGNAL_SYM /* SQL-2003-R */ -%token RESOURCES -%token RESTORE_SYM %token RESTRICT -%token RESUME_SYM -%token RETURNED_SQLSTATE_SYM /* SQL-2003-N */ %token RETURNING_SYM -%token RETURNS_SYM /* SQL-2003-R */ %token RETURN_SYM /* SQL-2003-R */ -%token REUSE_SYM /* Oracle-R */ -%token REVERSE_SYM %token REVOKE /* SQL-2003-R */ %token RIGHT /* SQL-2003-R */ -%token ROLE_SYM -%token ROLLBACK_SYM /* SQL-2003-R */ -%token ROLLUP_SYM /* SQL-2003-R */ -%token ROUTINE_SYM /* SQL-2003-N */ -%token ROWCOUNT_SYM /* Oracle-N */ -%token ROW_SYM /* SQL-2003-R */ %token ROWS_SYM /* SQL-2003-R */ -%token ROWTYPE_SYM /* Oracle-PLSQL-R */ -%token ROW_COUNT_SYM /* SQL-2003-N */ -%token ROW_FORMAT_SYM %token ROW_NUMBER_SYM -%token RTREE_SYM -%token SAVEPOINT_SYM /* SQL-2003-R */ -%token SCHEDULE_SYM -%token SCHEMA_NAME_SYM /* SQL-2003-N */ %token SECOND_MICROSECOND_SYM -%token SECOND_SYM /* SQL-2003-R */ -%token SECURITY_SYM /* SQL-2003-N */ %token SELECT_SYM /* SQL-2003-R */ %token SENSITIVE_SYM /* FUTURE-USE */ %token SEPARATOR_SYM -%token SEQUENCE_SYM -%token SERIALIZABLE_SYM /* SQL-2003-N */ -%token SERIAL_SYM -%token SESSION_SYM /* SQL-2003-N */ -%token SERVER_SYM %token SERVER_OPTIONS %token SET /* SQL-2003-R */ -%token SETVAL_SYM /* PostgreSQL sequence function */ %token SET_VAR -%token SHARE_SYM %token SHIFT_LEFT /* OPERATOR */ %token SHIFT_RIGHT /* OPERATOR */ %token SHOW -%token SHUTDOWN %token SIGNAL_SYM /* SQL-2003-R */ -%token SIGNED_SYM -%token SIMPLE_SYM /* SQL-2003-N */ -%token SLAVE -%token SLAVES -%token SLAVE_POS_SYM -%token SLOW %token SMALLINT /* SQL-2003-R */ -%token SNAPSHOT_SYM -%token SOCKET_SYM -%token SOFT_SYM -%token SONAME_SYM -%token SOUNDS_SYM -%token SOURCE_SYM %token SPATIAL_SYM %token SPECIFIC_SYM /* SQL-2003-R */ %token SQLEXCEPTION_SYM /* SQL-2003-R */ %token SQLSTATE_SYM /* SQL-2003-R */ %token SQLWARNING_SYM /* SQL-2003-R */ %token SQL_BIG_RESULT -%token SQL_BUFFER_RESULT -%token SQL_CACHE_SYM -%token SQL_CALC_FOUND_ROWS -%token SQL_NO_CACHE_SYM %token SQL_SMALL_RESULT %token SQL_SYM /* SQL-2003-R */ -%token SQL_THREAD -%token REF_SYSTEM_ID_SYM %token SSL_SYM %token STARTING -%token STARTS_SYM -%token START_SYM /* SQL-2003-R */ -%token STATEMENT_SYM %token STATS_AUTO_RECALC_SYM %token STATS_PERSISTENT_SYM %token STATS_SAMPLE_PAGES_SYM -%token STATUS_SYM %token STDDEV_SAMP_SYM /* SQL-2003-N */ %token STD_SYM -%token STOP_SYM -%token STORAGE_SYM -%token STORED_SYM %token STRAIGHT_JOIN -%token STRING_SYM -%token SUBCLASS_ORIGIN_SYM /* SQL-2003-N */ -%token SUBDATE_SYM -%token SUBJECT_SYM -%token SUBPARTITIONS_SYM -%token SUBPARTITION_SYM %token SUBSTRING /* SQL-2003-N */ %token SUM_SYM /* SQL-2003-N */ -%token SUPER_SYM -%token SUSPEND_SYM -%token SWAPS_SYM -%token SWITCHES_SYM %token SYSDATE -%token SYSTEM /* SQL-2011-R */ -%token SYSTEM_TIME_SYM /* SQL-2011-R */ -%token TABLES -%token TABLESPACE %token TABLE_REF_PRIORITY %token TABLE_SYM /* SQL-2003-R */ -%token TABLE_CHECKSUM_SYM -%token TABLE_NAME_SYM /* SQL-2003-N */ -%token TEMPORARY /* SQL-2003-N */ -%token TEMPTABLE_SYM %token TERMINATED %token TEXT_STRING -%token TEXT_SYM -%token THAN_SYM %token THEN_SYM /* SQL-2003-R */ -%token TIES_SYM /* SQL-2011-N */ -%token TIMESTAMP /* SQL-2003-R */ -%token TIMESTAMP_ADD -%token TIMESTAMP_DIFF -%token TIME_SYM /* SQL-2003-R */ %token TINYBLOB %token TINYINT %token TINYTEXT %token TO_SYM /* SQL-2003-R */ %token TRAILING /* SQL-2003-R */ -%token TRANSACTION_SYM -%token TRANSACTIONAL_SYM -%token TRIGGERS_SYM %token TRIGGER_SYM /* SQL-2003-R */ %token TRIM /* SQL-2003-N */ -%token TRIM_ORACLE %token TRUE_SYM /* SQL-2003-R */ -%token TRUNCATE_SYM -%token TYPES_SYM -%token TYPE_SYM /* SQL-2003-N */ -%token UDF_RETURNS_SYM %token ULONGLONG_NUM -%token UNBOUNDED_SYM /* SQL-2011-N */ -%token UNCOMMITTED_SYM /* SQL-2003-N */ -%token UNDEFINED_SYM %token UNDERSCORE_CHARSET -%token UNDOFILE_SYM -%token UNDO_BUFFER_SIZE_SYM %token UNDO_SYM /* FUTURE-USE */ -%token UNICODE_SYM -%token UNINSTALL_SYM %token UNION_SYM /* SQL-2003-R */ %token UNIQUE_SYM -%token UNKNOWN_SYM /* SQL-2003-R */ %token UNLOCK_SYM %token UNSIGNED -%token UNTIL_SYM %token UPDATE_SYM /* SQL-2003-R */ -%token UPGRADE_SYM %token USAGE /* SQL-2003-N */ -%token USER_SYM /* SQL-2003-R */ -%token USE_FRM %token USE_SYM %token USING /* SQL-2003-R */ %token UTC_DATE_SYM @@ -1592,80 +1190,595 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %token VALUES /* SQL-2003-R */ %token VALUES_IN_SYM %token VALUES_LESS_SYM -%token VALUE_SYM /* SQL-2003-R */ %token VARBINARY %token VARCHAR /* SQL-2003-R */ -%token VARCHAR2 /* Oracle */ -%token VARIABLES %token VARIANCE_SYM %token VARYING /* SQL-2003-R */ %token VAR_SAMP_SYM -%token VERSIONING_SYM /* SQL-2011-R */ -%token VIA_SYM -%token VIEW_SYM /* SQL-2003-N */ -%token VIRTUAL_SYM -%token WAIT_SYM -%token WARNINGS -%token WEEK_SYM -%token WEIGHT_STRING_SYM %token WHEN_SYM /* SQL-2003-R */ %token WHERE /* SQL-2003-R */ -%token WINDOW_SYM %token WHILE_SYM %token WITH /* SQL-2003-R */ -%token WITHIN -%token WITHOUT /* SQL-2003-R */ %token WITH_CUBE_SYM /* INTERNAL */ %token WITH_ROLLUP_SYM /* INTERNAL */ %token WITH_SYSTEM_SYM /* INTERNAL */ -%token WORK_SYM /* SQL-2003-N */ -%token WRAPPER_SYM -%token WRITE_SYM /* SQL-2003-N */ -%token X509_SYM -%token XA_SYM -%token XML_SYM %token XOR %token YEAR_MONTH_SYM -%token YEAR_SYM /* SQL-2003-R */ %token ZEROFILL -%token IMPOSSIBLE_ACTION /* To avoid warning for yyerrlab1 */ +%token IMPOSSIBLE_ACTION /* To avoid warning for yyerrlab1 */ + + +/* + Keywords that have different reserved status in std/oracle modes. +*/ +%token BODY_SYM /* Oracle-R */ +%token COMMENT_SYM +%token ELSIF_SYM /* Oracle, reserved in PL/SQL*/ +%token GOTO_SYM /* Oracle, reserved in PL/SQL*/ +%token OTHERS_SYM /* SQL-2011-N */ +%token PACKAGE_SYM /* Oracle-R */ +%token RAISE_SYM /* Oracle-PLSQL-R */ +%token ROWTYPE_SYM /* Oracle-PLSQL-R */ + +/* + Non-reserved keywords +*/ + +%token ACTION /* SQL-2003-N */ +%token ADMIN_SYM /* SQL-2003-N */ +%token ADDDATE_SYM /* MYSQL-FUNC */ +%token AFTER_SYM /* SQL-2003-N */ +%token AGAINST +%token AGGREGATE_SYM +%token ALGORITHM_SYM +%token ALWAYS_SYM +%token ANY_SYM /* SQL-2003-R */ +%token ASCII_SYM /* MYSQL-FUNC */ +%token AT_SYM /* SQL-2003-R */ +%token ATOMIC_SYM /* SQL-2003-R */ +%token AUTHORS_SYM +%token AUTOEXTEND_SIZE_SYM +%token AUTO_INC +%token AUTO_SYM +%token AVG_ROW_LENGTH +%token AVG_SYM /* SQL-2003-N */ +%token BACKUP_SYM +%token BEGIN_SYM /* SQL-2003-R, PLSQL-R */ +%token BINLOG_SYM +%token BIT_SYM /* MYSQL-FUNC */ +%token BLOCK_SYM +%token BOOL_SYM +%token BOOLEAN_SYM /* SQL-2003-R, PLSQL-R */ +%token BTREE_SYM +%token BYTE_SYM +%token CACHE_SYM +%token CASCADED /* SQL-2003-R */ +%token CATALOG_NAME_SYM /* SQL-2003-N */ +%token CHAIN_SYM /* SQL-2003-N */ +%token CHANGED +%token CHARSET +%token CHECKPOINT_SYM +%token CHECKSUM_SYM +%token CIPHER_SYM +%token CLASS_ORIGIN_SYM /* SQL-2003-N */ +%token CLIENT_SYM +%token CLOB /* SQL-2003-R */ +%token CLOSE_SYM /* SQL-2003-R */ +%token COALESCE /* SQL-2003-N */ +%token CODE_SYM +%token COLLATION_SYM /* SQL-2003-N */ +%token COLUMNS +%token COLUMN_ADD_SYM +%token COLUMN_CHECK_SYM +%token COLUMN_CREATE_SYM +%token COLUMN_DELETE_SYM +%token COLUMN_GET_SYM +%token COLUMN_SYM /* SQL-2003-R */ +%token COLUMN_NAME_SYM /* SQL-2003-N */ +%token COMMITTED_SYM /* SQL-2003-N */ +%token COMMIT_SYM /* SQL-2003-R */ +%token COMPACT_SYM +%token COMPLETION_SYM +%token COMPRESSED_SYM +%token CONCURRENT +%token CONNECTION_SYM +%token CONSISTENT_SYM +%token CONSTRAINT_CATALOG_SYM /* SQL-2003-N */ +%token CONSTRAINT_NAME_SYM /* SQL-2003-N */ +%token CONSTRAINT_SCHEMA_SYM /* SQL-2003-N */ +%token CONTAINS_SYM /* SQL-2003-N */ +%token CONTEXT_SYM +%token CONTRIBUTORS_SYM +%token CPU_SYM +%token CUBE_SYM /* SQL-2003-R */ +%token CURRENT_SYM /* SQL-2003-R */ +%token CURRENT_POS_SYM +%token CURSOR_NAME_SYM /* SQL-2003-N */ +%token CYCLE_SYM +%token DATAFILE_SYM +%token DATA_SYM /* SQL-2003-N */ +%token DATETIME +%token DATE_FORMAT_SYM /* MYSQL-FUNC */ +%token DATE_SYM /* SQL-2003-R, Oracle-R, PLSQL-R */ +%token DAY_SYM /* SQL-2003-R */ +%token DEALLOCATE_SYM /* SQL-2003-R */ +%token DECODE_SYM /* Oracle function, non-reserved */ +%token DEFINER_SYM +%token DELAYED_SYM +%token DELAY_KEY_WRITE_SYM +%token DES_KEY_FILE +%token DIAGNOSTICS_SYM /* SQL-2003-N */ +%token DIRECTORY_SYM +%token DISABLE_SYM +%token DISCARD +%token DISK_SYM +%token DO_SYM +%token DUMPFILE +%token DUPLICATE_SYM +%token DYNAMIC_SYM /* SQL-2003-R */ +%token ENABLE_SYM +%token END /* SQL-2003-R, PLSQL-R */ +%token ENDS_SYM +%token ENGINES_SYM +%token ENGINE_SYM +%token ENUM +%token ERROR_SYM +%token ERRORS +%token ESCAPE_SYM /* SQL-2003-R */ +%token EVENTS_SYM +%token EVENT_SYM +%token EVERY_SYM /* SQL-2003-N */ +%token EXCHANGE_SYM +%token EXAMINED_SYM +%token EXCLUDE_SYM /* SQL-2011-N */ +%token EXECUTE_SYM /* SQL-2003-R */ +%token EXCEPTION_SYM /* SQL-2003-N, Oracle-PLSQL-R */ +%token EXIT_SYM +%token EXPANSION_SYM +%token EXPORT_SYM +%token EXTENDED_SYM +%token EXTENT_SIZE_SYM +%token FAST_SYM +%token FAULTS_SYM +%token FILE_SYM +%token FIRST_SYM /* SQL-2003-N */ +%token FIXED_SYM +%token FLUSH_SYM +%token FOLLOWS_SYM /* MYSQL trigger*/ +%token FOLLOWING_SYM /* SQL-2011-N */ +%token FORCE_SYM +%token FORMAT_SYM +%token FOUND_SYM /* SQL-2003-R */ +%token FULL /* SQL-2003-R */ +%token FUNCTION_SYM /* SQL-2003-R */ +%token GENERAL +%token GENERATED_SYM +%token GEOMETRYCOLLECTION +%token GEOMETRY_SYM +%token GET_FORMAT /* MYSQL-FUNC */ +%token GET_SYM /* SQL-2003-R */ +%token GLOBAL_SYM /* SQL-2003-R */ +%token GRANTS +%token HANDLER_SYM +%token HARD_SYM +%token HASH_SYM +%token HELP_SYM +%token HIGH_PRIORITY +%token HISTORY_SYM /* MYSQL */ +%token HOST_SYM +%token HOSTS_SYM +%token HOUR_SYM /* SQL-2003-R */ +%token ID_SYM /* MYSQL */ +%token IDENTIFIED_SYM +%token IGNORE_SERVER_IDS_SYM +%token IMMEDIATE_SYM /* SQL-2003-R */ +%token IMPORT +%token INCREMENT_SYM +%token INDEXES +%token INITIAL_SIZE_SYM +%token INSERT_METHOD +%token INSTALL_SYM +%token INVOKER_SYM +%token IO_SYM +%token IPC_SYM +%token ISOLATION /* SQL-2003-R */ +%token ISOPEN_SYM /* Oracle-N */ +%token ISSUER_SYM +%token INVISIBLE_SYM +%token JSON_SYM +%token KEY_BLOCK_SIZE +%token LANGUAGE_SYM /* SQL-2003-R */ +%token LAST_SYM /* SQL-2003-N */ +%token LAST_VALUE +%token LASTVAL_SYM /* PostgreSQL sequence function */ +%token LEAVES +%token LESS_SYM +%token LEVEL_SYM +%token LINESTRING +%token LIST_SYM +%token LOCAL_SYM /* SQL-2003-R */ +%token LOCKS_SYM +%token LOGFILE_SYM +%token LOGS_SYM +%token MASTER_CONNECT_RETRY_SYM +%token MASTER_DELAY_SYM +%token MASTER_GTID_POS_SYM +%token MASTER_HOST_SYM +%token MASTER_LOG_FILE_SYM +%token MASTER_LOG_POS_SYM +%token MASTER_PASSWORD_SYM +%token MASTER_PORT_SYM +%token MASTER_SERVER_ID_SYM +%token MASTER_SSL_CAPATH_SYM +%token MASTER_SSL_CA_SYM +%token MASTER_SSL_CERT_SYM +%token MASTER_SSL_CIPHER_SYM +%token MASTER_SSL_CRL_SYM +%token MASTER_SSL_CRLPATH_SYM +%token MASTER_SSL_KEY_SYM +%token MASTER_SSL_SYM +%token MASTER_SYM +%token MASTER_USER_SYM +%token MASTER_USE_GTID_SYM +%token MASTER_HEARTBEAT_PERIOD_SYM +%token MAX_CONNECTIONS_PER_HOUR +%token MAX_QUERIES_PER_HOUR +%token MAX_ROWS +%token MAX_SIZE_SYM +%token MAX_UPDATES_PER_HOUR +%token MAX_STATEMENT_TIME_SYM +%token MAX_USER_CONNECTIONS_SYM +%token MEDIUM_SYM +%token MEMORY_SYM +%token MERGE_SYM /* SQL-2003-R */ +%token MESSAGE_TEXT_SYM /* SQL-2003-N */ +%token MICROSECOND_SYM /* MYSQL-FUNC */ +%token MIGRATE_SYM +%token MINUTE_SYM /* SQL-2003-R */ +%token MINVALUE_SYM +%token MIN_ROWS +%token MODE_SYM +%token MODIFY_SYM +%token MONTH_SYM /* SQL-2003-R */ +%token MULTILINESTRING +%token MULTIPOINT +%token MULTIPOLYGON +%token MUTEX_SYM +%token MYSQL_SYM +%token MYSQL_ERRNO_SYM +%token NAMES_SYM /* SQL-2003-N */ +%token NAME_SYM /* SQL-2003-N */ +%token NATIONAL_SYM /* SQL-2003-R */ +%token NCHAR_SYM /* SQL-2003-R */ +%token NEW_SYM /* SQL-2003-R */ +%token NEXT_SYM /* SQL-2003-N */ +%token NEXTVAL_SYM /* PostgreSQL sequence function */ +%token NOCACHE_SYM +%token NOCYCLE_SYM +%token NODEGROUP_SYM +%token NONE_SYM /* SQL-2003-R */ +%token NOTFOUND_SYM /* Oracle-R */ +%token NO_SYM /* SQL-2003-R */ +%token NOMAXVALUE_SYM +%token NOMINVALUE_SYM +%token NO_WAIT_SYM +%token NOWAIT_SYM +%token NUMBER_SYM /* SQL-2003-N, Oracle-R, PLSQL-R */ +%token NVARCHAR_SYM +%token OF_SYM /* SQL-1992-R, Oracle-R */ +%token OFFSET_SYM +%token OLD_PASSWORD_SYM +%token ONE_SYM +%token ONLY_SYM /* SQL-2003-R */ +%token ONLINE_SYM +%token OPEN_SYM /* SQL-2003-R */ +%token OPTIONS_SYM +%token OPTION /* SQL-2003-N */ +%token OWNER_SYM +%token PACK_KEYS_SYM +%token PAGE_SYM +%token PARSER_SYM +%token PARTIAL /* SQL-2003-N */ +%token PARTITIONS_SYM +%token PARTITIONING_SYM +%token PASSWORD_SYM +%token PERIOD_SYM /* SQL-2011-R */ +%token PERSISTENT_SYM +%token PHASE_SYM +%token PLUGINS_SYM +%token PLUGIN_SYM +%token POINT_SYM +%token POLYGON +%token PORT_SYM +%token PRECEDES_SYM /* MYSQL */ +%token PRECEDING_SYM /* SQL-2011-N */ +%token PREPARE_SYM /* SQL-2003-R */ +%token PRESERVE_SYM +%token PREV_SYM +%token PREVIOUS_SYM +%token PRIVILEGES /* SQL-2003-N */ +%token PROCESS +%token PROCESSLIST_SYM +%token PROFILE_SYM +%token PROFILES_SYM +%token PROXY_SYM +%token QUARTER_SYM +%token QUERY_SYM +%token QUICK +%token RAW /* Oracle-R */ +%token READ_ONLY_SYM +%token REBUILD_SYM +%token RECOVER_SYM +%token REDOFILE_SYM +%token REDO_BUFFER_SIZE_SYM +%token REDUNDANT_SYM +%token RELAY +%token RELAYLOG_SYM +%token RELAY_LOG_FILE_SYM +%token RELAY_LOG_POS_SYM +%token RELAY_THREAD +%token RELOAD +%token REMOVE_SYM +%token REORGANIZE_SYM +%token REPAIR +%token REPEATABLE_SYM /* SQL-2003-N */ +%token REPLICATION +%token RESET_SYM +%token RESTART_SYM +%token RESOURCES +%token RESTORE_SYM +%token RESUME_SYM +%token RETURNED_SQLSTATE_SYM /* SQL-2003-N */ +%token RETURNS_SYM /* SQL-2003-R */ +%token REUSE_SYM /* Oracle-R */ +%token REVERSE_SYM +%token ROLE_SYM +%token ROLLBACK_SYM /* SQL-2003-R */ +%token ROLLUP_SYM /* SQL-2003-R */ +%token ROUTINE_SYM /* SQL-2003-N */ +%token ROWCOUNT_SYM /* Oracle-N */ +%token ROW_SYM /* SQL-2003-R */ +%token ROW_COUNT_SYM /* SQL-2003-N */ +%token ROW_FORMAT_SYM +%token RTREE_SYM +%token SAVEPOINT_SYM /* SQL-2003-R */ +%token SCHEDULE_SYM +%token SCHEMA_NAME_SYM /* SQL-2003-N */ +%token SECOND_SYM /* SQL-2003-R */ +%token SECURITY_SYM /* SQL-2003-N */ +%token SEQUENCE_SYM +%token SERIALIZABLE_SYM /* SQL-2003-N */ +%token SERIAL_SYM +%token SESSION_SYM /* SQL-2003-N */ +%token SERVER_SYM +%token SETVAL_SYM /* PostgreSQL sequence function */ +%token SHARE_SYM +%token SHUTDOWN +%token SIGNED_SYM +%token SIMPLE_SYM /* SQL-2003-N */ +%token SLAVE +%token SLAVES +%token SLAVE_POS_SYM +%token SLOW +%token SNAPSHOT_SYM +%token SOCKET_SYM +%token SOFT_SYM +%token SONAME_SYM +%token SOUNDS_SYM +%token SOURCE_SYM +%token SQL_BUFFER_RESULT +%token SQL_CACHE_SYM +%token SQL_CALC_FOUND_ROWS +%token SQL_NO_CACHE_SYM +%token SQL_THREAD +%token STARTS_SYM +%token START_SYM /* SQL-2003-R */ +%token STATEMENT_SYM +%token STATUS_SYM +%token STOP_SYM +%token STORAGE_SYM +%token STORED_SYM +%token STRING_SYM +%token SUBCLASS_ORIGIN_SYM /* SQL-2003-N */ +%token SUBDATE_SYM +%token SUBJECT_SYM +%token SUBPARTITIONS_SYM +%token SUBPARTITION_SYM +%token SUPER_SYM +%token SUSPEND_SYM +%token SWAPS_SYM +%token SWITCHES_SYM +%token SYSTEM /* SQL-2011-R */ +%token SYSTEM_TIME_SYM /* SQL-2011-R */ +%token TABLES +%token TABLESPACE +%token TABLE_CHECKSUM_SYM +%token TABLE_NAME_SYM /* SQL-2003-N */ +%token TEMPORARY /* SQL-2003-N */ +%token TEMPTABLE_SYM +%token TEXT_SYM +%token THAN_SYM +%token TIES_SYM /* SQL-2011-N */ +%token TIMESTAMP /* SQL-2003-R */ +%token TIMESTAMP_ADD +%token TIMESTAMP_DIFF +%token TIME_SYM /* SQL-2003-R, Oracle-R */ +%token TRANSACTION_SYM +%token TRANSACTIONAL_SYM +%token TRIGGERS_SYM +%token TRIM_ORACLE +%token TRUNCATE_SYM +%token TYPES_SYM +%token TYPE_SYM /* SQL-2003-N */ +%token UDF_RETURNS_SYM +%token UNBOUNDED_SYM /* SQL-2011-N */ +%token UNCOMMITTED_SYM /* SQL-2003-N */ +%token UNDEFINED_SYM +%token UNDOFILE_SYM +%token UNDO_BUFFER_SIZE_SYM +%token UNICODE_SYM +%token UNINSTALL_SYM +%token UNKNOWN_SYM /* SQL-2003-R */ +%token UNTIL_SYM +%token UPGRADE_SYM +%token USER_SYM /* SQL-2003-R */ +%token USE_FRM +%token VALUE_SYM /* SQL-2003-R */ +%token VARCHAR2 /* Oracle-R, PLSQL-R */ +%token VARIABLES +%token VERSIONING_SYM /* SQL-2011-R */ +%token VIA_SYM +%token VIEW_SYM /* SQL-2003-N */ +%token VIRTUAL_SYM +%token WAIT_SYM +%token WARNINGS +%token WEEK_SYM +%token WEIGHT_STRING_SYM +%token WINDOW_SYM /* SQL-2003-R */ +%token WITHIN +%token WITHOUT /* SQL-2003-R */ +%token WORK_SYM /* SQL-2003-N */ +%token WRAPPER_SYM +%token WRITE_SYM /* SQL-2003-N */ +%token X509_SYM +%token XA_SYM +%token XML_SYM +%token YEAR_SYM /* SQL-2003-R */ + %left JOIN_SYM INNER_SYM STRAIGHT_JOIN CROSS LEFT RIGHT /* A dummy token to force the priority of table_ref production in a join. */ %left TABLE_REF_PRIORITY %left SET_VAR -%left OR_OR_SYM OR_SYM OR2_SYM +%left OR_SYM OR2_SYM %left XOR %left AND_SYM AND_AND_SYM + +%left PREC_BELOW_NOT +%left NOT_SYM + %left BETWEEN_SYM CASE_SYM WHEN_SYM THEN_SYM ELSE %left '=' EQUAL_SYM GE '>' LE '<' NE IS LIKE REGEXP IN_SYM %left '|' %left '&' %left SHIFT_LEFT SHIFT_RIGHT -%left '-' '+' +%left '-' '+' ORACLE_CONCAT_SYM %left '*' '/' '%' DIV_SYM MOD_SYM %left '^' -%left NEG '~' -%right NOT_SYM NOT2_SYM -%right BINARY COLLATE_SYM -%left INTERVAL_SYM +%left MYSQL_CONCAT_SYM +%left NEG '~' NOT2_SYM BINARY +%left COLLATE_SYM + +/* + Tokens that can change their meaning from identifier to something else + in certain context. + + - TRANSACTION: identifier, history unit: + SELECT transaction FROM t1; + SELECT * FROM t1 FOR SYSTEM_TIME AS OF TRANSACTION @var; + + - TIMESTAMP: identifier, literal, history unit: + SELECT timestamp FROM t1; + SELECT TIMESTAMP '2001-01-01 10:20:30'; + SELECT * FROM t1 FOR SYSTEM_TIME AS OF TIMESTAMP CONCAT(@date,' ',@time); + + - PERIOD: identifier, period for sytem time: + SELECT period FROM t1; + ALTER TABLE DROP PERIOD FOR SYSTEM TIME; + + - SYSTEM: identifier, system versioning: + SELECT system FROM t1; + ALTER TABLE DROP SYSTEM VERSIONIONG; + + Note, we need here only tokens that cause shirt/reduce conflicts + with keyword identifiers. For example: + opt_clause1: %empty | KEYWORD ... ; + clause2: opt_clause1 ident; + KEYWORD can appear both in opt_clause1 and in "ident" through the "keyword" + rule. So the parser reports a conflict on how to interpret KEYWORD: + - as a start of non-empty branch in opt_clause1, or + - as an identifier which follows the empty branch in opt_clause1. + + Example#1: + alter_list_item: + DROP opt_column opt_if_exists_table_element field_ident + | DROP SYSTEM VERSIONING_SYM + SYSTEM can be a keyword in field_ident, or can be a start of + SYSTEM VERSIONING. + + Example#2: + system_time_expr: AS OF_SYM history_point + history_point: opt_history_unit bit_expr + opt_history_unit: | TRANSACTION_SYM + TRANSACTION can be a non-empty history unit, or can be an identifier + in bit_expr. + + In the grammar below we use %prec to explicitely tell Bison to go + through the empty branch in the optional rule only when the lookahead + token does not belong to a small set of selected tokens. + + Tokens NEXT_SYM and PREVIOUS_SYM also change their meaning from + identifiers to sequence operations when followed by VALUE_SYM: + SELECT NEXT VALUE FOR s1, PREVIOUS VALUE FOR s1; + but we don't need to list them here as they do not seem to cause + conflicts (according to bison -v), as both meanings + (as identifier, and as a sequence operation) are parts of the same target + column_default_non_parenthesized_expr, and there are no any optional + clauses between the start of column_default_non_parenthesized_expr + and until NEXT_SYM / PREVIOUS_SYM. +*/ +%left PREC_BELOW_IDENTIFIER_OPT_SPECIAL_CASE +%left TRANSACTION_SYM TIMESTAMP PERIOD_SYM SYSTEM + + +/* + Tokens that can appear in a token contraction on the second place + and change the meaning of the previous token. + + - TEXT_STRING: changes the meaning of TIMESTAMP/TIME/DATE + from identifier to literal: + SELECT timestamp FROM t1; + SELECT TIMESTAMP'2001-01-01 00:00:00' FROM t1; + + - Parenthesis: changes the meaning of TIMESTAMP/TIME/DATE + from identifiers to CAST-alike functions: + SELECT timestamp FROM t1; + SELECT timestamp(1) FROM t1; + + - VALUE: changes NEXT and PREVIOUS from identifier to sequence operation: + SELECT next, previous FROM t1; + SELECT NEXT VALUE FOR s1, PREVIOUS VALUE FOR s1; + + - VERSIONING: changes SYSTEM from identifier to SYSTEM VERSIONING + SELECT system FROM t1; + ALTER TABLE t1 ADD SYSTEM VERSIONING; +*/ +%left PREC_BELOW_CONTRACTION_TOKEN2 +%left TEXT_STRING '(' VALUE_SYM VERSIONING_SYM %type - IDENT IDENT_QUOTED DECIMAL_NUM FLOAT_NUM NUM LONG_NUM + DECIMAL_NUM FLOAT_NUM NUM LONG_NUM HEX_NUM HEX_STRING LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident_or_text - IDENT_sys TEXT_STRING_sys TEXT_STRING_literal - opt_component key_cache_name - sp_opt_label BIN_NUM TEXT_STRING_filesystem ident_or_empty - opt_constraint constraint opt_ident ident_table_alias - sp_decl_ident + TEXT_STRING_sys TEXT_STRING_literal + key_cache_name + sp_opt_label BIN_NUM TEXT_STRING_filesystem + opt_constraint constraint opt_ident sp_block_label opt_place opt_db %type - label_ident sp_label +%type + IDENT_sys + ident + label_ident + sp_decl_ident + ident_or_empty + ident_table_alias + ident_sysvar_name + %type TEXT_STRING NCHAR_STRING @@ -1673,8 +1786,25 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %type opt_table_alias -%type - ident ident_with_tok_start +%type + IDENT + IDENT_QUOTED + IDENT_cli + ident_cli + +%type + keyword_data_type + keyword_ident + keyword_label + keyword_sp_block_section + keyword_sp_decl + keyword_sp_head + keyword_sp_var_and_label + keyword_sp_var_not_label + keyword_sysvar_name + keyword_sysvar_type + keyword_table_alias + keyword_verb_clause %type
table_ident table_ident_nodb references xid @@ -1725,7 +1855,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); opt_default_time_precision case_stmt_body opt_bin_mod opt_for_system_time_clause opt_if_exists_table_element opt_if_not_exists_table_element - opt_recursive opt_format_xid + opt_recursive opt_format_xid %type create_or_replace @@ -1769,6 +1899,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); variable variable_aux bool_pri predicate bit_expr parenthesized_expr table_wild simple_expr column_default_non_parenthesized_expr udf_expr + primary_expr string_factor_expr mysql_concatenation_expr + select_sublist_qualified_asterisk expr_or_default set_expr_or_default geometry_function signed_literal expr_or_literal opt_escape @@ -1856,10 +1988,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %type precision opt_precision float_options -%type keyword keyword_sp keyword_alias - keyword_sp_data_type - keyword_sp_not_data_type - %type user grant_user grant_role user_or_role current_role admin_option_for_role user_maybe_role @@ -1961,9 +2089,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); vcol_opt_attribute_list vcol_attribute opt_serial_attribute opt_serial_attribute_list serial_attribute explainable_command - opt_lock_wait_timeout + opt_lock_wait_timeout opt_delete_gtid_domain - asrow_attribute + asrow_attribute END_OF_INPUT %type call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt @@ -1984,7 +2112,6 @@ END_OF_INPUT %type sp_decl_idents sp_decl_idents_init_vars %type sp_handler_type sp_hcond_list -%type start_or_end %type sp_cond sp_hcond sqlstate signal_value opt_signal_value %type sp_decls sp_decl sp_decl_body sp_decl_variable_list %type sp_name @@ -2015,6 +2142,7 @@ END_OF_INPUT %type row_field_name row_field_definition %type row_field_definition_list row_type_body + %type opt_window_clause window_def_list window_def window_spec %type window_name %type opt_window_ref opt_window_frame_clause @@ -2025,9 +2153,9 @@ END_OF_INPUT %type '-' '+' '*' '/' '%' '(' ')' - ',' '!' '{' '}' '&' '|' AND_SYM OR_SYM OR_OR_SYM BETWEEN_SYM CASE_SYM + ',' '!' '{' '}' '&' '|' AND_SYM OR_SYM BETWEEN_SYM CASE_SYM THEN_SYM WHEN_SYM DIV_SYM MOD_SYM OR2_SYM AND_AND_SYM DELETE_SYM - ROLE_SYM + MYSQL_CONCAT_SYM ORACLE_CONCAT_SYM %type opt_with_clause with_clause @@ -2065,8 +2193,8 @@ rule: <-- starts at col 1 query: END_OF_INPUT { - if (!thd->bootstrap && - (!(thd->lex->select_lex.options & OPTION_FOUND_COMMENT))) + if (likely(!thd->bootstrap) && + unlikely(!(thd->lex->select_lex.options & OPTION_FOUND_COMMENT))) my_yyabort_error((ER_EMPTY_QUERY, MYF(0))); thd->lex->sql_command= SQLCOM_EMPTY_QUERY; @@ -2193,7 +2321,7 @@ prepare: PREPARE_SYM ident FROM prepare_src { LEX *lex= thd->lex; - if (lex->table_or_sp_used()) + if (unlikely(lex->table_or_sp_used())) my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0), "PREPARE..FROM")); lex->sql_command= SQLCOM_PREPARE; @@ -2221,7 +2349,7 @@ execute: {} | EXECUTE_SYM IMMEDIATE_SYM prepare_src { - if (Lex->table_or_sp_used()) + if (unlikely(Lex->table_or_sp_used())) my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0), "EXECUTE IMMEDIATE")); Lex->sql_command= SQLCOM_EXECUTE_IMMEDIATE; @@ -2235,7 +2363,7 @@ execute_using: | USING { Lex->expr_allows_subselect= false; } execute_var_list { - if (Lex->table_or_sp_used()) + if (unlikely(Lex->table_or_sp_used())) my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0), "EXECUTE..USING")); Lex->expr_allows_subselect= true; @@ -2250,7 +2378,8 @@ execute_var_list: execute_var_ident: expr_or_default { - if (Lex->prepared_stmt_params.push_back($1, thd->mem_root)) + if (unlikely(Lex->prepared_stmt_params.push_back($1, + thd->mem_root))) MYSQL_YYABORT; } ; @@ -2260,7 +2389,7 @@ execute_var_ident: help: HELP_SYM { - if (Lex->sphead) + if (unlikely(Lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "HELP")); } ident_or_text @@ -2360,20 +2489,21 @@ master_def: | MASTER_HEARTBEAT_PERIOD_SYM '=' NUM_literal { Lex->mi.heartbeat_period= (float) $3->val_real(); - if (Lex->mi.heartbeat_period > SLAVE_MAX_HEARTBEAT_PERIOD || - Lex->mi.heartbeat_period < 0.0) + if (unlikely(Lex->mi.heartbeat_period > + SLAVE_MAX_HEARTBEAT_PERIOD) || + unlikely(Lex->mi.heartbeat_period < 0.0)) my_yyabort_error((ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE, MYF(0), SLAVE_MAX_HEARTBEAT_PERIOD)); - if (Lex->mi.heartbeat_period > slave_net_timeout) + if (unlikely(Lex->mi.heartbeat_period > slave_net_timeout)) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX, ER_THD(thd, ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX)); } - if (Lex->mi.heartbeat_period < 0.001) + if (unlikely(Lex->mi.heartbeat_period < 0.001)) { - if (Lex->mi.heartbeat_period != 0.0) + if (unlikely(Lex->mi.heartbeat_period != 0.0)) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN, @@ -2471,19 +2601,19 @@ master_file_def: } | MASTER_USE_GTID_SYM '=' CURRENT_POS_SYM { - if (Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED) + if (unlikely(Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MASTER_use_gtid")); Lex->mi.use_gtid_opt= LEX_MASTER_INFO::LEX_GTID_CURRENT_POS; } | MASTER_USE_GTID_SYM '=' SLAVE_POS_SYM { - if (Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED) + if (unlikely(Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MASTER_use_gtid")); Lex->mi.use_gtid_opt= LEX_MASTER_INFO::LEX_GTID_SLAVE_POS; } | MASTER_USE_GTID_SYM '=' NO_SYM { - if (Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED) + if (unlikely(Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MASTER_use_gtid")); Lex->mi.use_gtid_opt= LEX_MASTER_INFO::LEX_GTID_NO; } @@ -2503,7 +2633,7 @@ connection_name: { Lex->mi.connection_name= $1; #ifdef HAVE_REPLICATION - if (check_master_connection_name(&$1)) + if (unlikely(check_master_connection_name(&$1))) my_yyabort_error((ER_WRONG_ARGUMENTS, MYF(0), "MASTER_CONNECTION_NAME")); #endif } @@ -2516,11 +2646,13 @@ create: { LEX *lex= thd->lex; lex->create_info.init(); - if (lex->set_command_with_check(SQLCOM_CREATE_TABLE, $2, $1 | $4)) + if (unlikely(lex->set_command_with_check(SQLCOM_CREATE_TABLE, $2, + $1 | $4))) MYSQL_YYABORT; - if (!lex->select_lex.add_table_to_list(thd, $5, NULL, - TL_OPTION_UPDATING, - TL_WRITE, MDL_EXCLUSIVE)) + if (unlikely(!lex->select_lex.add_table_to_list(thd, $5, NULL, + TL_OPTION_UPDATING, + TL_WRITE, + MDL_EXCLUSIVE))) MYSQL_YYABORT; lex->alter_info.reset(); /* @@ -2552,12 +2684,14 @@ create: { LEX *lex= thd->lex; lex->create_info.init(); - if (lex->set_command_with_check(SQLCOM_CREATE_SEQUENCE, $2, $1 | $4)) + if (unlikely(lex->set_command_with_check(SQLCOM_CREATE_SEQUENCE, $2, + $1 | $4))) MYSQL_YYABORT; - if (!lex->select_lex.add_table_to_list(thd, $5, NULL, - TL_OPTION_UPDATING, - TL_WRITE, MDL_EXCLUSIVE)) + if (unlikely(!lex->select_lex.add_table_to_list(thd, $5, NULL, + TL_OPTION_UPDATING, + TL_WRITE, + MDL_EXCLUSIVE))) MYSQL_YYABORT; /* @@ -2569,15 +2703,15 @@ create: lex->query_tables->open_strategy= TABLE_LIST::OPEN_STUB; lex->name= null_clex_str; lex->create_last_non_select_table= lex->last_table(); - if (!(lex->create_info.seq_create_info= new (thd->mem_root) - sequence_definition())) + if (unlikely(!(lex->create_info.seq_create_info= + new (thd->mem_root) sequence_definition()))) MYSQL_YYABORT; } opt_sequence opt_create_table_options { LEX *lex= thd->lex; - if (lex->create_info.seq_create_info->check_and_adjust(1)) + if (unlikely(lex->create_info.seq_create_info->check_and_adjust(1))) { my_error(ER_SEQUENCE_INVALID_DATA, MYF(0), lex->select_lex.table_list.first->db.str, @@ -2586,7 +2720,8 @@ create: } /* No fields specified, generate them */ - if (prepare_sequence_fields(thd, &lex->alter_info.create_list)) + if (unlikely(prepare_sequence_fields(thd, + &lex->alter_info.create_list))) MYSQL_YYABORT; /* CREATE SEQUENCE always creates a sequence */ @@ -2594,8 +2729,9 @@ create: Lex->create_info.sequence= 1; lex->current_select= &lex->select_lex; - if ((lex->create_info.used_fields & HA_CREATE_USED_ENGINE) && - !lex->create_info.db_type) + if (unlikely((lex->create_info.used_fields & + HA_CREATE_USED_ENGINE) && + !lex->create_info.db_type)) { lex->create_info.use_default_db_type(thd); push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, @@ -2610,9 +2746,9 @@ create: opt_key_algorithm_clause ON table_ident { - if (Lex->add_create_index_prepare($8)) + if (unlikely(Lex->add_create_index_prepare($8))) MYSQL_YYABORT; - if (Lex->add_create_index($2, &$5, $6, $1 | $4)) + if (unlikely(Lex->add_create_index($2, &$5, $6, $1 | $4))) MYSQL_YYABORT; } '(' key_list ')' opt_lock_wait_timeout normal_key_options @@ -2620,9 +2756,10 @@ create: | create_or_replace fulltext INDEX_SYM opt_if_not_exists ident ON table_ident { - if (Lex->add_create_index_prepare($7)) + if (unlikely(Lex->add_create_index_prepare($7))) MYSQL_YYABORT; - if (Lex->add_create_index($2, &$5, HA_KEY_ALG_UNDEF, $1 | $4)) + if (unlikely(Lex->add_create_index($2, &$5, HA_KEY_ALG_UNDEF, + $1 | $4))) MYSQL_YYABORT; } '(' key_list ')' opt_lock_wait_timeout fulltext_key_options @@ -2630,9 +2767,10 @@ create: | create_or_replace spatial INDEX_SYM opt_if_not_exists ident ON table_ident { - if (Lex->add_create_index_prepare($7)) + if (unlikely(Lex->add_create_index_prepare($7))) MYSQL_YYABORT; - if (Lex->add_create_index($2, &$5, HA_KEY_ALG_UNDEF, $1 | $4)) + if (unlikely(Lex->add_create_index($2, &$5, HA_KEY_ALG_UNDEF, + $1 | $4))) MYSQL_YYABORT; } '(' key_list ')' opt_lock_wait_timeout spatial_key_options @@ -2645,15 +2783,17 @@ create: opt_create_database_options { LEX *lex=Lex; - if (lex->set_command_with_check(SQLCOM_CREATE_DB, 0, $1 | $3)) + if (unlikely(lex->set_command_with_check(SQLCOM_CREATE_DB, 0, + $1 | $3))) MYSQL_YYABORT; lex->name= $4; } | create_or_replace definer_opt opt_view_suid VIEW_SYM opt_if_not_exists table_ident { - if (Lex->add_create_view(thd, $1 | $5, - DTYPE_ALGORITHM_UNDEFINED, $3, $6)) + if (unlikely(Lex->add_create_view(thd, $1 | $5, + DTYPE_ALGORITHM_UNDEFINED, $3, + $6))) MYSQL_YYABORT; } view_list_opt AS view_select @@ -2661,7 +2801,7 @@ create: | create_or_replace view_algorithm definer_opt opt_view_suid VIEW_SYM opt_if_not_exists table_ident { - if (Lex->add_create_view(thd, $1 | $6, $2, $4, $7)) + if (unlikely(Lex->add_create_view(thd, $1 | $6, $2, $4, $7))) MYSQL_YYABORT; } view_list_opt AS view_select @@ -2703,13 +2843,15 @@ create: | create_or_replace USER_SYM opt_if_not_exists clear_privileges grant_list opt_require_clause opt_resource_options { - if (Lex->set_command_with_check(SQLCOM_CREATE_USER, $1 | $3)) + if (unlikely(Lex->set_command_with_check(SQLCOM_CREATE_USER, + $1 | $3))) MYSQL_YYABORT; } | create_or_replace ROLE_SYM opt_if_not_exists clear_privileges role_list opt_with_admin { - if (Lex->set_command_with_check(SQLCOM_CREATE_ROLE, $1 | $3)) + if (unlikely(Lex->set_command_with_check(SQLCOM_CREATE_ROLE, + $1 | $3))) MYSQL_YYABORT; } | CREATE LOGFILE_SYM GROUP_SYM logfile_group_info @@ -2728,7 +2870,7 @@ create: sf_tail_not_aggregate: sf_tail { - if (Lex->sphead->m_flags & sp_head::HAS_AGGREGATE_INSTR) + if (unlikely(Lex->sphead->m_flags & sp_head::HAS_AGGREGATE_INSTR)) { my_yyabort_error((ER_NOT_AGGREGATE_FUNCTION, MYF(0))); } @@ -2738,7 +2880,7 @@ sf_tail_not_aggregate: sf_tail_aggregate: sf_tail { - if (!(Lex->sphead->m_flags & sp_head::HAS_AGGREGATE_INSTR)) + if (unlikely(!(Lex->sphead->m_flags & sp_head::HAS_AGGREGATE_INSTR))) { my_yyabort_error((ER_INVALID_AGGREGATE_FUNCTION, MYF(0))); } @@ -2773,105 +2915,105 @@ sequence_def: } | NO_SYM MINVALUE_SYM { - if (Lex->create_info.seq_create_info->used_fields & seq_field_used_min_value) + if (unlikely(Lex->create_info.seq_create_info->used_fields & seq_field_used_min_value)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MINVALUE")); Lex->create_info.seq_create_info->used_fields|= seq_field_used_min_value; } | NOMINVALUE_SYM { - if (Lex->create_info.seq_create_info->used_fields & seq_field_used_min_value) + if (unlikely(Lex->create_info.seq_create_info->used_fields & seq_field_used_min_value)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MINVALUE")); Lex->create_info.seq_create_info->used_fields|= seq_field_used_min_value; } | MAXVALUE_SYM opt_equal longlong_num { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_max_value) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_max_value)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MAXVALUE")); Lex->create_info.seq_create_info->max_value= $3; Lex->create_info.seq_create_info->used_fields|= seq_field_used_max_value; } | NO_SYM MAXVALUE_SYM { - if (Lex->create_info.seq_create_info->used_fields & seq_field_used_max_value) + if (unlikely(Lex->create_info.seq_create_info->used_fields & seq_field_used_max_value)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MAXVALUE")); Lex->create_info.seq_create_info->used_fields|= seq_field_used_max_value; } | NOMAXVALUE_SYM { - if (Lex->create_info.seq_create_info->used_fields & seq_field_used_max_value) + if (unlikely(Lex->create_info.seq_create_info->used_fields & seq_field_used_max_value)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MAXVALUE")); Lex->create_info.seq_create_info->used_fields|= seq_field_used_max_value; } | START_SYM opt_with longlong_num { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_start) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_start)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "START")); Lex->create_info.seq_create_info->start= $3; Lex->create_info.seq_create_info->used_fields|= seq_field_used_start; } | INCREMENT_SYM opt_by longlong_num { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_increment) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_increment)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "INCREMENT")); Lex->create_info.seq_create_info->increment= $3; Lex->create_info.seq_create_info->used_fields|= seq_field_used_increment; } | CACHE_SYM opt_equal longlong_num { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_cache) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_cache)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "CACHE")); Lex->create_info.seq_create_info->cache= $3; Lex->create_info.seq_create_info->used_fields|= seq_field_used_cache; } | NOCACHE_SYM { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_cache) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_cache)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "CACHE")); Lex->create_info.seq_create_info->cache= 0; Lex->create_info.seq_create_info->used_fields|= seq_field_used_cache; } | CYCLE_SYM { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_cycle) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_cycle)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "CYCLE")); Lex->create_info.seq_create_info->cycle= 1; Lex->create_info.seq_create_info->used_fields|= seq_field_used_cycle; } | NOCYCLE_SYM { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_cycle) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_cycle)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "CYCLE")); Lex->create_info.seq_create_info->cycle= 0; Lex->create_info.seq_create_info->used_fields|= seq_field_used_cycle; } | RESTART_SYM { - if (Lex->sql_command != SQLCOM_ALTER_SEQUENCE) + if (unlikely(Lex->sql_command != SQLCOM_ALTER_SEQUENCE)) { thd->parse_error(ER_SYNTAX_ERROR, "RESTART"); YYABORT; } - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_restart) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_restart)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "RESTART")); Lex->create_info.seq_create_info->used_fields|= seq_field_used_restart; } | RESTART_SYM opt_with longlong_num { - if (Lex->sql_command != SQLCOM_ALTER_SEQUENCE) + if (unlikely(Lex->sql_command != SQLCOM_ALTER_SEQUENCE)) { thd->parse_error(ER_SYNTAX_ERROR, "RESTART"); YYABORT; } - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_restart) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_restart)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "RESTART")); Lex->create_info.seq_create_info->restart= $3; Lex->create_info.seq_create_info->used_fields|= seq_field_used_restart | seq_field_used_restart_value; @@ -2881,7 +3023,7 @@ sequence_def: server_def: SERVER_SYM opt_if_not_exists ident_or_text { - if (Lex->add_create_options_with_check($2)) + if (unlikely(Lex->add_create_options_with_check($2))) MYSQL_YYABORT; Lex->server_options.reset($3); } @@ -2940,9 +3082,10 @@ event_tail: LEX *lex=Lex; lex->stmt_definition_begin= $1; - if (lex->add_create_options_with_check($2)) + if (unlikely(lex->add_create_options_with_check($2))) MYSQL_YYABORT; - if (!(lex->event_parse_data= Event_parse_data::new_instance(thd))) + if (unlikely(!(lex->event_parse_data= + Event_parse_data::new_instance(thd)))) MYSQL_YYABORT; lex->event_parse_data->identifier= $3; lex->event_parse_data->on_completion= @@ -3005,7 +3148,7 @@ ev_starts: /* empty */ { Item *item= new (thd->mem_root) Item_func_now_local(thd, 0); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; Lex->event_parse_data->item_starts= item; } @@ -3069,11 +3212,12 @@ ev_sql_stmt: (the nested ALTER EVENT can have anything but DO clause) - CREATE PROCEDURE ... BEGIN DROP EVENT ... END| */ - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_EVENT_RECURSION_FORBIDDEN, MYF(0))); - if (!lex->make_sp_head(thd, lex->event_parse_data->identifier, - &sp_handler_procedure)) + if (unlikely(!lex->make_sp_head(thd, + lex->event_parse_data->identifier, + &sp_handler_procedure))) MYSQL_YYABORT; lex->sphead->set_body_start(thd, lip->get_cpp_ptr()); @@ -3108,12 +3252,12 @@ clear_privileges: sp_name: ident '.' ident { - if (!($$= Lex->make_sp_name(thd, &$1, &$3))) + if (unlikely(!($$= Lex->make_sp_name(thd, &$1, &$3)))) MYSQL_YYABORT; } | ident { - if (!($$= Lex->make_sp_name(thd, &$1))) + if (unlikely(!($$= Lex->make_sp_name(thd, &$1)))) MYSQL_YYABORT; } ; @@ -3166,7 +3310,7 @@ sp_suid: call: CALL_SYM sp_name { - if (Lex->call_statement_start(thd, $2)) + if (unlikely(Lex->call_statement_start(thd, $2))) MYSQL_YYABORT; } opt_sp_cparam_list {} @@ -3208,7 +3352,7 @@ sp_fdparams: sp_param_name: ident { - if (!($$= Lex->sp_param_init(&$1))) + if (unlikely(!($$= Lex->sp_param_init(&$1)))) MYSQL_YYABORT; } ; @@ -3216,32 +3360,35 @@ sp_param_name: sp_param_name_and_type: sp_param_name type_with_opt_collate { - if (Lex->sp_param_fill_definition($$= $1)) + if (unlikely(Lex->sp_param_fill_definition($$= $1))) MYSQL_YYABORT; } | sp_param_name TYPE_SYM OF_SYM ident '.' ident { - if (Lex->sphead->spvar_fill_type_reference(thd, $$= $1, $4, $6)) + if (unlikely(Lex->sphead->spvar_fill_type_reference(thd, + $$= $1, $4, + $6))) MYSQL_YYABORT; } | sp_param_name TYPE_SYM OF_SYM ident '.' ident '.' ident { - if (Lex->sphead->spvar_fill_type_reference(thd, $$= $1, $4, $6, $8)) + if (unlikely(Lex->sphead->spvar_fill_type_reference(thd, $$= $1, + $4, $6, $8))) MYSQL_YYABORT; } | sp_param_name ROW_SYM TYPE_SYM OF_SYM ident { - if (Lex->sphead->spvar_fill_table_rowtype_reference(thd, $$= $1, $5)) + if (unlikely(Lex->sphead->spvar_fill_table_rowtype_reference(thd, $$= $1, $5))) MYSQL_YYABORT; } | sp_param_name ROW_SYM TYPE_SYM OF_SYM ident '.' ident { - if (Lex->sphead->spvar_fill_table_rowtype_reference(thd, $$= $1, $5, $7)) + if (unlikely(Lex->sphead->spvar_fill_table_rowtype_reference(thd, $$= $1, $5, $7))) MYSQL_YYABORT; } | sp_param_name ROW_SYM row_type_body { - if (Lex->sphead->spvar_fill_row(thd, $$= $1, $3)) + if (unlikely(Lex->sphead->spvar_fill_row(thd, $$= $1, $3))) MYSQL_YYABORT; } ; @@ -3313,7 +3460,7 @@ sp_decls: because letting the grammar rules reflect it caused tricky shift/reduce conflicts with the wrong result. (And we get better error handling this way.) */ - if (Lex->sp_declarations_join(&$$, $1, $2)) + if (unlikely(Lex->sp_declarations_join(&$$, $1, $2))) MYSQL_YYABORT; } ; @@ -3326,18 +3473,20 @@ sp_decl: optionally_qualified_column_ident: sp_decl_ident { - if (!($$= new (thd->mem_root) Qualified_column_ident(&$1))) + if (unlikely(!($$= new (thd->mem_root) + Qualified_column_ident(&$1)))) MYSQL_YYABORT; } | sp_decl_ident '.' ident { - if (!($$= new (thd->mem_root) Qualified_column_ident(&$1, &$3))) + if (unlikely(!($$= new (thd->mem_root) + Qualified_column_ident(&$1, &$3)))) MYSQL_YYABORT; } | sp_decl_ident '.' ident '.' ident { - if (!($$= new (thd->mem_root) Qualified_column_ident(thd, - &$1, &$3, &$5))) + if (unlikely(!($$= new (thd->mem_root) + Qualified_column_ident(thd, &$1, &$3, &$5)))) MYSQL_YYABORT; } ; @@ -3345,10 +3494,10 @@ optionally_qualified_column_ident: row_field_name: ident { - if (check_string_char_length(&$1, 0, NAME_CHAR_LEN, - system_charset_info, 1)) + if (unlikely(check_string_char_length(&$1, 0, NAME_CHAR_LEN, + system_charset_info, 1))) my_yyabort_error((ER_TOO_LONG_IDENT, MYF(0), $1.str)); - if (!($$= new (thd->mem_root) Spvar_definition())) + if (unlikely(!($$= new (thd->mem_root) Spvar_definition()))) MYSQL_YYABORT; Lex->init_last_field($$, &$1, thd->variables.collation_database); } @@ -3361,17 +3510,18 @@ row_field_definition: row_field_definition_list: row_field_definition { - if (!($$= new (thd->mem_root) Row_definition_list())) + if (unlikely(!($$= new (thd->mem_root) Row_definition_list())) || + unlikely($$->push_back($1, thd->mem_root))) MYSQL_YYABORT; - $$->push_back($1, thd->mem_root); } | row_field_definition_list ',' row_field_definition { uint unused; - if ($1->find_row_field_by_name(&$3->field_name, &unused)) + if (unlikely($1->find_row_field_by_name(&$3->field_name, &unused))) my_yyabort_error((ER_DUP_FIELDNAME, MYF(0), $3->field_name.str)); $$= $1; - $$->push_back($3, thd->mem_root); + if (unlikely($$->push_back($3, thd->mem_root))) + MYSQL_YYABORT; } ; @@ -3391,8 +3541,9 @@ sp_decl_variable_list: type_with_opt_collate sp_opt_default { - if (Lex->sp_variable_declarations_finalize(thd, $1, - &Lex->last_field[0], $3)) + if (unlikely(Lex->sp_variable_declarations_finalize(thd, $1, + &Lex->last_field[0], + $3))) MYSQL_YYABORT; $$.init_using_vars($1); } @@ -3400,7 +3551,7 @@ sp_decl_variable_list: TYPE_SYM OF_SYM optionally_qualified_column_ident sp_opt_default { - if (Lex->sp_variable_declarations_with_ref_finalize(thd, $1, $4, $5)) + if (unlikely(Lex->sp_variable_declarations_with_ref_finalize(thd, $1, $4, $5))) MYSQL_YYABORT; $$.init_using_vars($1); } @@ -3408,7 +3559,7 @@ sp_decl_variable_list: ROW_SYM TYPE_SYM OF_SYM optionally_qualified_column_ident sp_opt_default { - if (Lex->sp_variable_declarations_rowtype_finalize(thd, $1, $5, $6)) + if (unlikely(Lex->sp_variable_declarations_rowtype_finalize(thd, $1, $5, $6))) MYSQL_YYABORT; $$.init_using_vars($1); } @@ -3416,7 +3567,7 @@ sp_decl_variable_list: ROW_SYM row_type_body sp_opt_default { - if (Lex->sp_variable_declarations_row_finalize(thd, $1, $3, $4)) + if (unlikely(Lex->sp_variable_declarations_row_finalize(thd, $1, $3, $4))) MYSQL_YYABORT; $$.init_using_vars($1); } @@ -3426,19 +3577,19 @@ sp_decl_body: sp_decl_variable_list | sp_decl_ident CONDITION_SYM FOR_SYM sp_cond { - if (Lex->spcont->declare_condition(thd, &$1, $4)) + if (unlikely(Lex->spcont->declare_condition(thd, &$1, $4))) MYSQL_YYABORT; $$.vars= $$.hndlrs= $$.curs= 0; $$.conds= 1; } | sp_handler_type HANDLER_SYM FOR_SYM { - if (Lex->sp_handler_declaration_init(thd, $1)) + if (unlikely(Lex->sp_handler_declaration_init(thd, $1))) MYSQL_YYABORT; } sp_hcond_list sp_proc_stmt { - if (Lex->sp_handler_declaration_finalize(thd, $1)) + if (unlikely(Lex->sp_handler_declaration_finalize(thd, $1))) MYSQL_YYABORT; $$.vars= $$.conds= $$.curs= 0; $$.hndlrs= 1; @@ -3451,9 +3602,9 @@ sp_decl_body: FOR_SYM sp_cursor_stmt { sp_pcontext *param_ctx= Lex->spcont; - if (Lex->sp_block_finalize(thd)) + if (unlikely(Lex->sp_block_finalize(thd))) MYSQL_YYABORT; - if (Lex->sp_declare_cursor(thd, &$1, $6, param_ctx, true)) + if (unlikely(Lex->sp_declare_cursor(thd, &$1, $6, param_ctx, true))) MYSQL_YYABORT; $$.vars= $$.conds= $$.hndlrs= 0; $$.curs= 1; @@ -3469,7 +3620,8 @@ opt_parenthesized_cursor_formal_parameters: sp_cursor_stmt_lex: { DBUG_ASSERT(thd->lex->sphead); - if (!($$= new (thd->mem_root) sp_lex_cursor(thd, thd->lex))) + if (unlikely(!($$= new (thd->mem_root) + sp_lex_cursor(thd, thd->lex)))) MYSQL_YYABORT; } ; @@ -3483,8 +3635,8 @@ sp_cursor_stmt: select { DBUG_ASSERT(Lex == $1); - if ($1->stmt_finalize(thd) || - $1->sphead->restore_lex(thd)) + if (unlikely($1->stmt_finalize(thd)) || + unlikely($1->sphead->restore_lex(thd))) MYSQL_YYABORT; $$= $1; } @@ -3510,7 +3662,7 @@ sp_hcond_element: sp_head *sp= lex->sphead; sp_pcontext *ctx= lex->spcont->parent_context(); - if (ctx->check_duplicate_handler($1)) + if (unlikely(ctx->check_duplicate_handler($1))) my_yyabort_error((ER_SP_DUP_HANDLER, MYF(0))); sp_instr_hpush_jump *i= (sp_instr_hpush_jump *)sp->last_instruction(); @@ -3521,10 +3673,10 @@ sp_hcond_element: sp_cond: ulong_num { /* mysql errno */ - if ($1 == 0) + if (unlikely($1 == 0)) my_yyabort_error((ER_WRONG_VALUE, MYF(0), "CONDITION", "0")); $$= new (thd->mem_root) sp_condition_value($1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | sqlstate @@ -3541,10 +3693,11 @@ sqlstate: allowed to SIGNAL, or declare a handler for the completion condition. */ - if (!is_sqlstate_valid(&$3) || is_sqlstate_completion($3.str)) + if (unlikely(!is_sqlstate_valid(&$3) || + is_sqlstate_completion($3.str))) my_yyabort_error((ER_SP_BAD_SQLSTATE, MYF(0), $3.str)); $$= new (thd->mem_root) sp_condition_value($3.str); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -3562,25 +3715,25 @@ sp_hcond: | ident /* CONDITION name */ { $$= Lex->spcont->find_condition(&$1, false); - if ($$ == NULL) + if (unlikely($$ == NULL)) my_yyabort_error((ER_SP_COND_MISMATCH, MYF(0), $1.str)); } | SQLWARNING_SYM /* SQLSTATEs 01??? */ { $$= new (thd->mem_root) sp_condition_value(sp_condition_value::WARNING); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | not FOUND_SYM /* SQLSTATEs 02??? */ { $$= new (thd->mem_root) sp_condition_value(sp_condition_value::NOT_FOUND); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SQLEXCEPTION_SYM /* All other SQLSTATEs */ { $$= new (thd->mem_root) sp_condition_value(sp_condition_value::EXCEPTION); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -3588,7 +3741,7 @@ sp_hcond: signal_stmt: SIGNAL_SYM signal_value opt_set_signal_information { - if (Lex->add_signal_statement(thd, $2)) + if (unlikely(Lex->add_signal_statement(thd, $2))) MYSQL_YYABORT; } ; @@ -3600,12 +3753,12 @@ signal_value: sp_condition_value *cond; /* SIGNAL foo cannot be used outside of stored programs */ - if (lex->spcont == NULL) + if (unlikely(lex->spcont == NULL)) my_yyabort_error((ER_SP_COND_MISMATCH, MYF(0), $1.str)); cond= lex->spcont->find_condition(&$1, false); - if (cond == NULL) + if (unlikely(cond == NULL)) my_yyabort_error((ER_SP_COND_MISMATCH, MYF(0), $1.str)); - if (cond->type != sp_condition_value::SQLSTATE) + if (unlikely(cond->type != sp_condition_value::SQLSTATE)) my_yyabort_error((ER_SIGNAL_BAD_CONDITION_TYPE, MYF(0))); $$= cond; } @@ -3643,7 +3796,7 @@ signal_information_item_list: Set_signal_information *info; info= &thd->m_parser_state->m_yacc.m_set_signal_info; int index= (int) $3; - if (info->m_item[index] != NULL) + if (unlikely(info->m_item[index] != NULL)) my_yyabort_error((ER_DUP_SIGNAL_SET, MYF(0), Diag_condition_item_names[index].str)); info->m_item[index]= $5; @@ -3661,7 +3814,7 @@ signal_allowed_expr: if ($1->type() == Item::FUNC_ITEM) { Item_func *item= (Item_func*) $1; - if (item->functype() == Item_func::SUSERVAR_FUNC) + if (unlikely(item->functype() == Item_func::SUSERVAR_FUNC)) { /* Don't allow the following syntax: @@ -3709,7 +3862,7 @@ signal_condition_information_item_name: resignal_stmt: RESIGNAL_SYM opt_signal_value opt_set_signal_information { - if (Lex->add_resignal_statement(thd, $2)) + if (unlikely(Lex->add_resignal_statement(thd, $2))) MYSQL_YYABORT; } ; @@ -3724,7 +3877,7 @@ get_diagnostics: Lex->sql_command= SQLCOM_GET_DIAGNOSTICS; Lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_get_diagnostics(info); - if (Lex->m_sql_cmd == NULL) + if (unlikely(Lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -3740,13 +3893,13 @@ diagnostics_information: statement_information { $$= new (thd->mem_root) Statement_information($1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CONDITION_SYM condition_number condition_information { $$= new (thd->mem_root) Condition_information($2, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -3755,12 +3908,13 @@ statement_information: statement_information_item { $$= new (thd->mem_root) List; - if ($$ == NULL || $$->push_back($1, thd->mem_root)) + if (unlikely($$ == NULL) || + unlikely($$->push_back($1, thd->mem_root))) MYSQL_YYABORT; } | statement_information ',' statement_information_item { - if ($1->push_back($3, thd->mem_root)) + if (unlikely($1->push_back($3, thd->mem_root))) MYSQL_YYABORT; $$= $1; } @@ -3770,24 +3924,20 @@ statement_information_item: simple_target_specification '=' statement_information_item_name { $$= new (thd->mem_root) Statement_information_item($3, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } simple_target_specification: - ident + ident_cli { - Lex_input_stream *lip= &thd->m_parser_state->m_lip; - $$= thd->lex->create_item_for_sp_var(&$1, NULL, - lip->get_tok_start(), - lip->get_ptr()); - if ($$ == NULL) + if (unlikely(!($$= thd->lex->create_item_for_sp_var(&$1, NULL)))) MYSQL_YYABORT; } | '@' ident_or_text { $$= new (thd->mem_root) Item_func_get_user_var(thd, &$2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -3812,12 +3962,13 @@ condition_information: condition_information_item { $$= new (thd->mem_root) List; - if ($$ == NULL || $$->push_back($1, thd->mem_root)) + if (unlikely($$ == NULL) || + unlikely($$->push_back($1, thd->mem_root))) MYSQL_YYABORT; } | condition_information ',' condition_information_item { - if ($1->push_back($3, thd->mem_root)) + if (unlikely($1->push_back($3, thd->mem_root))) MYSQL_YYABORT; $$= $1; } @@ -3827,7 +3978,7 @@ condition_information_item: simple_target_specification '=' condition_information_item_name { $$= new (thd->mem_root) Condition_information_item($3, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } @@ -3861,7 +4012,12 @@ condition_information_item_name: ; sp_decl_ident: - ident { $$= $1; } + IDENT_sys + | keyword_sp_decl + { + if (unlikely($$.copy_ident_cli(thd, &$1))) + MYSQL_YYABORT; + } ; sp_decl_idents: @@ -3872,7 +4028,7 @@ sp_decl_idents: LEX *lex= Lex; sp_pcontext *spc= lex->spcont; - if (spc->find_variable(&$1, TRUE)) + if (unlikely(spc->find_variable(&$1, TRUE))) my_yyabort_error((ER_SP_DUP_VAR, MYF(0), $1.str)); spc->add_variable(thd, &$1); $$= 1; @@ -3884,7 +4040,7 @@ sp_decl_idents: LEX *lex= Lex; sp_pcontext *spc= lex->spcont; - if (spc->find_variable(&$3, TRUE)) + if (unlikely(spc->find_variable(&$3, TRUE))) my_yyabort_error((ER_SP_DUP_VAR, MYF(0), $3.str)); spc->add_variable(thd, &$3); $$= $1 + 1; @@ -3933,7 +4089,7 @@ sp_proc_stmt_compound_ok: sp_proc_stmt_if: IF_SYM { - if (Lex->maybe_start_compound_statement(thd)) + if (unlikely(Lex->maybe_start_compound_statement(thd))) MYSQL_YYABORT; Lex->sphead->new_cont_backpatch(NULL); } @@ -3957,7 +4113,7 @@ sp_proc_stmt_statement: sp->m_flags|= sp_get_flags_for_command(lex); /* "USE db" doesn't work in a procedure */ - if (lex->sql_command == SQLCOM_CHANGE_DB) + if (unlikely(lex->sql_command == SQLCOM_CHANGE_DB)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "USE")); /* Don't add an instruction for SET statements, since all @@ -3970,7 +4126,7 @@ sp_proc_stmt_statement: { sp_instr_stmt *i=new (thd->mem_root) sp_instr_stmt(sp->instructions(), lex->spcont, lex); - if (i == NULL) + if (unlikely(i == NULL)) MYSQL_YYABORT; /* @@ -3982,13 +4138,13 @@ sp_proc_stmt_statement: i->m_query.length= lip->get_ptr() - sp->m_tmp_query; else i->m_query.length= lip->get_tok_start() - sp->m_tmp_query;; - if (!(i->m_query.str= strmake_root(thd->mem_root, - sp->m_tmp_query, - i->m_query.length)) || - sp->add_instr(i)) + if (unlikely(!(i->m_query.str= strmake_root(thd->mem_root, + sp->m_tmp_query, + i->m_query.length))) || + unlikely(sp->add_instr(i))) MYSQL_YYABORT; } - if (sp->restore_lex(thd)) + if (unlikely(sp->restore_lex(thd))) MYSQL_YYABORT; } ; @@ -4000,9 +4156,9 @@ sp_proc_stmt_return: { LEX *lex= Lex; sp_head *sp= lex->sphead; - if (sp->m_handler->add_instr_freturn(thd, sp, lex->spcont, - $3, lex) || - sp->restore_lex(thd)) + if (unlikely(sp->m_handler->add_instr_freturn(thd, sp, lex->spcont, + $3, lex)) || + unlikely(sp->restore_lex(thd))) MYSQL_YYABORT; } ; @@ -4010,7 +4166,7 @@ sp_proc_stmt_return: sp_proc_stmt_leave: LEAVE_SYM label_ident { - if (Lex->sp_leave_statement(thd, &$2)) + if (unlikely(Lex->sp_leave_statement(thd, &$2))) MYSQL_YYABORT; } ; @@ -4018,7 +4174,7 @@ sp_proc_stmt_leave: sp_proc_stmt_iterate: ITERATE_SYM label_ident { - if (Lex->sp_iterate_statement(thd, &$2)) + if (unlikely(Lex->sp_iterate_statement(thd, &$2))) MYSQL_YYABORT; } ; @@ -4026,7 +4182,8 @@ sp_proc_stmt_iterate: assignment_source_lex: { DBUG_ASSERT(Lex->sphead); - if (!($$= new (thd->mem_root) sp_assignment_lex(thd, thd->lex))) + if (unlikely(!($$= new (thd->mem_root) + sp_assignment_lex(thd, thd->lex)))) MYSQL_YYABORT; } ; @@ -4044,7 +4201,7 @@ assignment_source_expr: $$->sp_lex_in_use= true; $$->set_item_and_free_list($3, thd->free_list); thd->free_list= NULL; - if ($$->sphead->restore_lex(thd)) + if (unlikely($$->sphead->restore_lex(thd))) MYSQL_YYABORT; } ; @@ -4060,7 +4217,7 @@ for_loop_bound_expr: $$= $1; $$->sp_lex_in_use= true; $$->set_item_and_free_list($3, NULL); - if ($$->sphead->restore_lex(thd)) + if (unlikely($$->sphead->restore_lex(thd))) MYSQL_YYABORT; } ; @@ -4068,7 +4225,7 @@ for_loop_bound_expr: cursor_actual_parameters: assignment_source_expr { - if (!($$= new (thd->mem_root) List)) + if (unlikely(!($$= new (thd->mem_root) List))) MYSQL_YYABORT; $$->push_back($1, thd->mem_root); } @@ -4087,7 +4244,7 @@ opt_parenthesized_cursor_actual_parameters: sp_proc_stmt_open: OPEN_SYM ident opt_parenthesized_cursor_actual_parameters { - if (Lex->sp_open_cursor(thd, &$2, $3)) + if (unlikely(Lex->sp_open_cursor(thd, &$2, $3))) MYSQL_YYABORT; } ; @@ -4095,17 +4252,17 @@ sp_proc_stmt_open: sp_proc_stmt_fetch_head: FETCH_SYM ident INTO { - if (Lex->sp_add_cfetch(thd, &$2)) + if (unlikely(Lex->sp_add_cfetch(thd, &$2))) MYSQL_YYABORT; } | FETCH_SYM FROM ident INTO { - if (Lex->sp_add_cfetch(thd, &$3)) + if (unlikely(Lex->sp_add_cfetch(thd, &$3))) MYSQL_YYABORT; } | FETCH_SYM NEXT_SYM FROM ident INTO { - if (Lex->sp_add_cfetch(thd, &$4)) + if (unlikely(Lex->sp_add_cfetch(thd, &$4))) MYSQL_YYABORT; } ; @@ -4120,8 +4277,8 @@ sp_proc_stmt_fetch: sp_instr_agg_cfetch *i= new (thd->mem_root) sp_instr_agg_cfetch(sp->instructions(), lex->spcont); - if (i == NULL || - sp->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(sp->add_instr(i))) MYSQL_YYABORT; } ; @@ -4134,12 +4291,12 @@ sp_proc_stmt_close: uint offset; sp_instr_cclose *i; - if (! lex->spcont->find_cursor(&$2, &offset, false)) + if (unlikely(!lex->spcont->find_cursor(&$2, &offset, false))) my_yyabort_error((ER_SP_CURSOR_MISMATCH, MYF(0), $2.str)); i= new (thd->mem_root) sp_instr_cclose(sp->instructions(), lex->spcont, offset); - if (i == NULL || - sp->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(sp->add_instr(i))) MYSQL_YYABORT; } ; @@ -4152,7 +4309,7 @@ sp_fetch_list: sp_pcontext *spc= lex->spcont; sp_variable *spv; - if (!spc || !(spv = spc->find_variable(&$1, false))) + if (unlikely(!spc || !(spv = spc->find_variable(&$1, false)))) my_yyabort_error((ER_SP_UNDECLARED_VAR, MYF(0), $1.str)); /* An SP local variable */ @@ -4166,7 +4323,7 @@ sp_fetch_list: sp_pcontext *spc= lex->spcont; sp_variable *spv; - if (!spc || !(spv = spc->find_variable(&$3, false))) + if (unlikely(!spc || !(spv = spc->find_variable(&$3, false)))) my_yyabort_error((ER_SP_UNDECLARED_VAR, MYF(0), $3.str)); /* An SP local variable */ @@ -4185,12 +4342,12 @@ sp_if: uint ip= sp->instructions(); sp_instr_jump_if_not *i= new (thd->mem_root) sp_instr_jump_if_not(ip, ctx, $2, lex); - if (i == NULL || - sp->push_backpatch(thd, i, ctx->push_label(thd, &empty_clex_str, 0)) || - sp->add_cont_backpatch(i) || - sp->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(sp->push_backpatch(thd, i, ctx->push_label(thd, &empty_clex_str, 0))) || + unlikely(sp->add_cont_backpatch(i)) || + unlikely(sp->add_instr(i))) MYSQL_YYABORT; - if (sp->restore_lex(thd)) + if (unlikely(sp->restore_lex(thd))) MYSQL_YYABORT; } sp_proc_stmts1 @@ -4199,8 +4356,8 @@ sp_if: sp_pcontext *ctx= Lex->spcont; uint ip= sp->instructions(); sp_instr_jump *i= new (thd->mem_root) sp_instr_jump(ip, ctx); - if (i == NULL || - sp->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(sp->add_instr(i))) MYSQL_YYABORT; sp->backpatch(ctx->pop_label()); sp->push_backpatch(thd, i, ctx->push_label(thd, &empty_clex_str, 0)); @@ -4222,7 +4379,7 @@ sp_elseifs: case_stmt_specification: CASE_SYM { - if (Lex->maybe_start_compound_statement(thd)) + if (unlikely(Lex->maybe_start_compound_statement(thd))) MYSQL_YYABORT; /** @@ -4293,10 +4450,9 @@ case_stmt_body: { Lex->sphead->reset_lex(thd); /* For expr $2 */ } expr { - if (Lex->case_stmt_action_expr($2)) + if (unlikely(Lex->case_stmt_action_expr($2))) MYSQL_YYABORT; - - if (Lex->sphead->restore_lex(thd)) + if (unlikely(Lex->sphead->restore_lex(thd))) MYSQL_YYABORT; } simple_when_clause_list @@ -4325,16 +4481,16 @@ simple_when_clause: /* Simple case: = */ LEX *lex= Lex; - if (lex->case_stmt_action_when($3, true)) + if (unlikely(lex->case_stmt_action_when($3, true))) MYSQL_YYABORT; /* For expr $3 */ - if (lex->sphead->restore_lex(thd)) + if (unlikely(lex->sphead->restore_lex(thd))) MYSQL_YYABORT; } THEN_SYM sp_proc_stmts1 { - if (Lex->case_stmt_action_then()) + if (unlikely(Lex->case_stmt_action_then())) MYSQL_YYABORT; } ; @@ -4347,16 +4503,16 @@ searched_when_clause: expr { LEX *lex= Lex; - if (lex->case_stmt_action_when($3, false)) + if (unlikely(lex->case_stmt_action_when($3, false))) MYSQL_YYABORT; /* For expr $3 */ - if (lex->sphead->restore_lex(thd)) + if (unlikely(lex->sphead->restore_lex(thd))) MYSQL_YYABORT; } THEN_SYM sp_proc_stmts1 { - if (Lex->case_stmt_action_then()) + if (unlikely(Lex->case_stmt_action_then())) MYSQL_YYABORT; } ; @@ -4369,8 +4525,8 @@ else_clause_opt: uint ip= sp->instructions(); sp_instr_error *i= new (thd->mem_root) sp_instr_error(ip, lex->spcont, ER_SP_CASE_NOT_FOUND); - if (i == NULL || - sp->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(sp->add_instr(i))) MYSQL_YYABORT; } | ELSE sp_proc_stmts1 @@ -4388,7 +4544,7 @@ sp_opt_label: sp_block_label: sp_label { - if (Lex->spcont->block_label_declare(&$1)) + if (unlikely(Lex->spcont->block_label_declare(&$1))) MYSQL_YYABORT; $$= $1; } @@ -4405,7 +4561,7 @@ sp_labeled_block: END sp_opt_label { - if (Lex->sp_block_finalize(thd, $4, &$7)) + if (unlikely(Lex->sp_block_finalize(thd, $4, &$7))) MYSQL_YYABORT; } ; @@ -4419,7 +4575,7 @@ sp_unlabeled_block: sp_proc_stmts END { - if (Lex->sp_block_finalize(thd, $3)) + if (unlikely(Lex->sp_block_finalize(thd, $3))) MYSQL_YYABORT; } ; @@ -4427,7 +4583,7 @@ sp_unlabeled_block: sp_unlabeled_block_not_atomic: BEGIN_SYM not ATOMIC_SYM /* TODO: BEGIN ATOMIC (not -> opt_not) */ { - if (Lex->maybe_start_compound_statement(thd)) + if (unlikely(Lex->maybe_start_compound_statement(thd))) MYSQL_YYABORT; Lex->sp_block_init(thd); } @@ -4435,7 +4591,7 @@ sp_unlabeled_block_not_atomic: sp_proc_stmts END { - if (Lex->sp_block_finalize(thd, $5)) + if (unlikely(Lex->sp_block_finalize(thd, $5))) MYSQL_YYABORT; } ; @@ -4449,7 +4605,7 @@ opt_sp_for_loop_direction: sp_for_loop_index_and_bounds: ident sp_for_loop_bounds { - if (Lex->sp_for_loop_declarations(thd, &$$, &$1, $2)) + if (unlikely(Lex->sp_for_loop_declarations(thd, &$$, &$1, $2))) MYSQL_YYABORT; } ; @@ -4472,7 +4628,8 @@ sp_for_loop_bounds: } | IN_SYM opt_sp_for_loop_direction '(' sp_cursor_stmt ')' { - if (Lex->sp_for_loop_implicit_cursor_statement(thd, &$$, $4)) + if (unlikely(Lex->sp_for_loop_implicit_cursor_statement(thd, &$$, + $4))) MYSQL_YYABORT; } ; @@ -4485,8 +4642,8 @@ loop_body: sp_label *lab= lex->spcont->last_label(); /* Jumping back */ sp_instr_jump *i= new (thd->mem_root) sp_instr_jump(ip, lex->spcont, lab->ip); - if (i == NULL || - lex->sphead->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(lex->sphead->add_instr(i))) MYSQL_YYABORT; } ; @@ -4495,14 +4652,14 @@ while_body: expr DO_SYM { LEX *lex= Lex; - if (lex->sp_while_loop_expression(thd, $1)) + if (unlikely(lex->sp_while_loop_expression(thd, $1))) MYSQL_YYABORT; - if (lex->sphead->restore_lex(thd)) + if (unlikely(lex->sphead->restore_lex(thd))) MYSQL_YYABORT; } sp_proc_stmts1 END WHILE_SYM { - if (Lex->sp_while_loop_finalize(thd)) + if (unlikely(Lex->sp_while_loop_finalize(thd))) MYSQL_YYABORT; } ; @@ -4517,10 +4674,10 @@ repeat_body: sp_label *lab= lex->spcont->last_label(); /* Jumping back */ sp_instr_jump_if_not *i= new (thd->mem_root) sp_instr_jump_if_not(ip, lex->spcont, $4, lab->ip, lex); - if (i == NULL || - lex->sphead->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(lex->sphead->add_instr(i))) MYSQL_YYABORT; - if (lex->sphead->restore_lex(thd)) + if (unlikely(lex->sphead->restore_lex(thd))) MYSQL_YYABORT; /* We can shortcut the cont_backpatch here */ i->m_cont_dest= ip+1; @@ -4530,7 +4687,7 @@ repeat_body: pop_sp_loop_label: sp_opt_label { - if (Lex->sp_pop_loop_label(thd, &$1)) + if (unlikely(Lex->sp_pop_loop_label(thd, &$1))) MYSQL_YYABORT; } ; @@ -4538,14 +4695,14 @@ pop_sp_loop_label: sp_labeled_control: sp_label LOOP_SYM { - if (Lex->sp_push_loop_label(thd, &$1)) + if (unlikely(Lex->sp_push_loop_label(thd, &$1))) MYSQL_YYABORT; } loop_body pop_sp_loop_label { } | sp_label WHILE_SYM { - if (Lex->sp_push_loop_label(thd, &$1)) + if (unlikely(Lex->sp_push_loop_label(thd, &$1))) MYSQL_YYABORT; Lex->sphead->reset_lex(thd); } @@ -4558,28 +4715,28 @@ sp_labeled_control: } sp_for_loop_index_and_bounds { - if (Lex->sp_push_loop_label(thd, &$1)) // The inner WHILE block + if (unlikely(Lex->sp_push_loop_label(thd, &$1))) // The inner WHILE block MYSQL_YYABORT; - if (Lex->sp_for_loop_condition_test(thd, $4)) + if (unlikely(Lex->sp_for_loop_condition_test(thd, $4))) MYSQL_YYABORT; } DO_SYM sp_proc_stmts1 END FOR_SYM { - if (Lex->sp_for_loop_finalize(thd, $4)) + if (unlikely(Lex->sp_for_loop_finalize(thd, $4))) MYSQL_YYABORT; } pop_sp_loop_label // The inner WHILE block { Lex_spblock tmp; tmp.curs= MY_TEST($4.m_implicit_cursor); - if (Lex->sp_block_finalize(thd, tmp)) // The outer DECLARE..BEGIN..END + if (unlikely(Lex->sp_block_finalize(thd, tmp))) // The outer DECLARE..BEGIN..END MYSQL_YYABORT; } | sp_label REPEAT_SYM { - if (Lex->sp_push_loop_label(thd, &$1)) + if (unlikely(Lex->sp_push_loop_label(thd, &$1))) MYSQL_YYABORT; } repeat_body pop_sp_loop_label @@ -4589,7 +4746,7 @@ sp_labeled_control: sp_unlabeled_control: LOOP_SYM { - if (Lex->sp_push_loop_empty_label(thd)) + if (unlikely(Lex->sp_push_loop_empty_label(thd))) MYSQL_YYABORT; } loop_body @@ -4598,7 +4755,7 @@ sp_unlabeled_control: } | WHILE_SYM { - if (Lex->sp_push_loop_empty_label(thd)) + if (unlikely(Lex->sp_push_loop_empty_label(thd))) MYSQL_YYABORT; Lex->sphead->reset_lex(thd); } @@ -4609,15 +4766,15 @@ sp_unlabeled_control: | FOR_SYM { // See "The FOR LOOP statement" comments in sql_lex.cc - if (Lex->maybe_start_compound_statement(thd)) + if (unlikely(Lex->maybe_start_compound_statement(thd))) MYSQL_YYABORT; Lex->sp_block_init(thd); // The outer DECLARE..BEGIN..END block } sp_for_loop_index_and_bounds { - if (Lex->sp_push_loop_empty_label(thd)) // The inner WHILE block + if (unlikely(Lex->sp_push_loop_empty_label(thd))) // The inner WHILE block MYSQL_YYABORT; - if (Lex->sp_for_loop_condition_test(thd, $3)) + if (unlikely(Lex->sp_for_loop_condition_test(thd, $3))) MYSQL_YYABORT; } DO_SYM @@ -4626,15 +4783,15 @@ sp_unlabeled_control: { Lex_spblock tmp; tmp.curs= MY_TEST($3.m_implicit_cursor); - if (Lex->sp_for_loop_finalize(thd, $3)) + if (unlikely(Lex->sp_for_loop_finalize(thd, $3))) MYSQL_YYABORT; Lex->sp_pop_loop_empty_label(thd); // The inner WHILE block - if (Lex->sp_block_finalize(thd, tmp)) // The outer DECLARE..BEGIN..END + if (unlikely(Lex->sp_block_finalize(thd, tmp))) // The outer DECLARE..BEGIN..END MYSQL_YYABORT; } | REPEAT_SYM { - if (Lex->sp_push_loop_empty_label(thd)) + if (unlikely(Lex->sp_push_loop_empty_label(thd))) MYSQL_YYABORT; } repeat_body @@ -4853,7 +5010,7 @@ tablespace_name: LEX *lex= Lex; lex->alter_tablespace_info= (new (thd->mem_root) st_alter_tablespace()); - if (lex->alter_tablespace_info == NULL) + if (unlikely(lex->alter_tablespace_info == NULL)) MYSQL_YYABORT; lex->alter_tablespace_info->tablespace_name= $1.str; lex->sql_command= SQLCOM_ALTER_TABLESPACE; @@ -4866,7 +5023,7 @@ logfile_group_name: LEX *lex= Lex; lex->alter_tablespace_info= (new (thd->mem_root) st_alter_tablespace()); - if (lex->alter_tablespace_info == NULL) + if (unlikely(lex->alter_tablespace_info == NULL)) MYSQL_YYABORT; lex->alter_tablespace_info->logfile_group_name= $1.str; lex->sql_command= SQLCOM_ALTER_TABLESPACE; @@ -4943,7 +5100,7 @@ opt_ts_nodegroup: NODEGROUP_SYM opt_equal real_ulong_num { LEX *lex= Lex; - if (lex->alter_tablespace_info->nodegroup_id != UNDEF_NODEGROUP) + if (unlikely(lex->alter_tablespace_info->nodegroup_id != UNDEF_NODEGROUP)) my_yyabort_error((ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"NODEGROUP")); lex->alter_tablespace_info->nodegroup_id= $3; } @@ -4953,7 +5110,7 @@ opt_ts_comment: COMMENT_SYM opt_equal TEXT_STRING_sys { LEX *lex= Lex; - if (lex->alter_tablespace_info->ts_comment != NULL) + if (unlikely(lex->alter_tablespace_info->ts_comment != NULL)) my_yyabort_error((ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"COMMENT")); lex->alter_tablespace_info->ts_comment= $3.str; } @@ -4963,7 +5120,7 @@ opt_ts_engine: opt_storage ENGINE_SYM opt_equal storage_engines { LEX *lex= Lex; - if (lex->alter_tablespace_info->storage_engine != NULL) + if (unlikely(lex->alter_tablespace_info->storage_engine != NULL)) my_yyabort_error((ER_FILEGROUP_OPTION_ONLY_ONCE, MYF(0), "STORAGE ENGINE")); lex->alter_tablespace_info->storage_engine= $4; @@ -4984,7 +5141,7 @@ ts_wait: | NO_WAIT_SYM { LEX *lex= Lex; - if (!(lex->alter_tablespace_info->wait_until_completed)) + if (unlikely(!(lex->alter_tablespace_info->wait_until_completed))) my_yyabort_error((ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"NO_WAIT")); lex->alter_tablespace_info->wait_until_completed= FALSE; } @@ -4994,34 +5151,8 @@ size_number: real_ulonglong_num { $$= $1;} | IDENT_sys { - ulonglong number; - uint text_shift_number= 0; - longlong prefix_number; - const char *start_ptr= $1.str; - size_t str_len= $1.length; - const char *end_ptr= start_ptr + str_len; - int error; - prefix_number= my_strtoll10(start_ptr, (char**) &end_ptr, &error); - if ((start_ptr + str_len - 1) == end_ptr) - { - switch (end_ptr[0]) - { - case 'g': - case 'G': text_shift_number+=30; break; - case 'm': - case 'M': text_shift_number+=20; break; - case 'k': - case 'K': text_shift_number+=10; break; - default: - my_yyabort_error((ER_WRONG_SIZE_NUMBER, MYF(0))); - } - if (prefix_number >> 31) - my_yyabort_error((ER_SIZE_OVERFLOW_ERROR, MYF(0))); - number= prefix_number << text_shift_number; - } - else - my_yyabort_error((ER_WRONG_SIZE_NUMBER, MYF(0))); - $$= number; + if ($1.to_size_number(&$$)) + MYSQL_YYABORT; } ; @@ -5050,7 +5181,7 @@ create_body: Lex->create_info.add(DDL_options_st::OPT_LIKE); TABLE_LIST *src_table= Lex->select_lex.add_table_to_list(thd, $1, NULL, 0, TL_READ, MDL_SHARED_READ); - if (! src_table) + if (unlikely(! src_table)) MYSQL_YYABORT; /* CREATE TABLE ... LIKE is not allowed for views. */ src_table->required_type= TABLE_TYPE_NORMAL; @@ -5134,11 +5265,8 @@ partitioning: { LEX *lex= Lex; lex->part_info= new (thd->mem_root) partition_info(); - if (!lex->part_info) - { - mem_alloc_error(sizeof(partition_info)); + if (unlikely(!lex->part_info)) MYSQL_YYABORT; - } if (lex->sql_command == SQLCOM_ALTER_TABLE) { lex->alter_info.partition_flags|= ALTER_PARTITION_INFO; @@ -5152,7 +5280,7 @@ have_partitioning: { #ifdef WITH_PARTITION_STORAGE_ENGINE LEX_CSTRING partition_name={STRING_WITH_LEN("partition")}; - if (!plugin_is_ready(&partition_name, MYSQL_STORAGE_ENGINE_PLUGIN)) + if (unlikely(!plugin_is_ready(&partition_name, MYSQL_STORAGE_ENGINE_PLUGIN))) my_yyabort_error((ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-partition")); #else @@ -5165,7 +5293,7 @@ have_partitioning: partition_entry: PARTITION_SYM { - if (!Lex->part_info) + if (unlikely(!Lex->part_info)) { thd->parse_error(ER_PARTITION_ENTRY_ERROR); MYSQL_YYABORT; @@ -5212,7 +5340,10 @@ part_type_def: | LIST_SYM part_column_list { Lex->part_info->part_type= LIST_PARTITION; } | SYSTEM_TIME_SYM - { if (Lex->part_info->vers_init_info(thd)) MYSQL_YYABORT; } + { + if (unlikely(Lex->part_info->vers_init_info(thd))) + MYSQL_YYABORT; + } opt_versioning_rotation ; @@ -5256,12 +5387,10 @@ part_field_item: { partition_info *part_info= Lex->part_info; part_info->num_columns++; - if (part_info->part_field_list.push_back($1.str, thd->mem_root)) - { - mem_alloc_error(1); + if (unlikely(part_info->part_field_list.push_back($1.str, + thd->mem_root))) MYSQL_YYABORT; - } - if (part_info->num_columns > MAX_REF_PARTS) + if (unlikely(part_info->num_columns > MAX_REF_PARTS)) my_yyabort_error((ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0), "list of partition fields")); } @@ -5281,8 +5410,8 @@ part_func: '(' remember_name part_func_expr remember_end ')' { partition_info *part_info= Lex->part_info; - if (part_info->set_part_expr(thd, $2 + 1, $3, $4, FALSE)) - { MYSQL_YYABORT; } + if (unlikely(part_info->set_part_expr(thd, $2 + 1, $3, $4, FALSE))) + MYSQL_YYABORT; part_info->num_columns= 1; part_info->column_list= FALSE; } @@ -5291,8 +5420,8 @@ part_func: sub_part_func: '(' remember_name part_func_expr remember_end ')' { - if (Lex->part_info->set_part_expr(thd, $2 + 1, $3, $4, TRUE)) - { MYSQL_YYABORT; } + if (unlikely(Lex->part_info->set_part_expr(thd, $2 + 1, $3, $4, TRUE))) + MYSQL_YYABORT; } ; @@ -5303,7 +5432,7 @@ opt_num_parts: { uint num_parts= $2; partition_info *part_info= Lex->part_info; - if (num_parts == 0) + if (unlikely(num_parts == 0)) my_yyabort_error((ER_NO_PARTS_ERROR, MYF(0), "partitions")); part_info->num_parts= num_parts; @@ -5335,12 +5464,11 @@ sub_part_field_item: ident { partition_info *part_info= Lex->part_info; - if (part_info->subpart_field_list.push_back($1.str, thd->mem_root)) - { - mem_alloc_error(1); + if (unlikely(part_info->subpart_field_list.push_back($1.str, + thd->mem_root))) MYSQL_YYABORT; - } - if (part_info->subpart_field_list.elements > MAX_REF_PARTS) + + if (unlikely(part_info->subpart_field_list.elements > MAX_REF_PARTS)) my_yyabort_error((ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0), "list of subpartition fields")); } @@ -5349,7 +5477,7 @@ sub_part_field_item: part_func_expr: bit_expr { - if (!Lex->safe_to_cache_query) + if (unlikely(!Lex->safe_to_cache_query)) { thd->parse_error(ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR); MYSQL_YYABORT; @@ -5364,7 +5492,7 @@ opt_num_subparts: { uint num_parts= $2; LEX *lex= Lex; - if (num_parts == 0) + if (unlikely(num_parts == 0)) my_yyabort_error((ER_NO_PARTS_ERROR, MYF(0), "subpartitions")); lex->part_info->num_subparts= num_parts; lex->part_info->use_default_num_subpartitions= FALSE; @@ -5375,10 +5503,10 @@ part_defs: /* empty */ { partition_info *part_info= Lex->part_info; - if (part_info->part_type == RANGE_PARTITION) + if (unlikely(part_info->part_type == RANGE_PARTITION)) my_yyabort_error((ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), "RANGE")); - if (part_info->part_type == LIST_PARTITION) + if (unlikely(part_info->part_type == LIST_PARTITION)) my_yyabort_error((ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), "LIST")); } @@ -5388,8 +5516,8 @@ part_defs: uint count_curr_parts= part_info->partitions.elements; if (part_info->num_parts != 0) { - if (part_info->num_parts != - count_curr_parts) + if (unlikely(part_info->num_parts != + count_curr_parts)) { thd->parse_error(ER_PARTITION_WRONG_NO_PART_ERROR); MYSQL_YYABORT; @@ -5414,12 +5542,10 @@ part_definition: partition_info *part_info= Lex->part_info; partition_element *p_elem= new (thd->mem_root) partition_element(); - if (!p_elem || - part_info->partitions.push_back(p_elem, thd->mem_root)) - { - mem_alloc_error(sizeof(partition_element)); + if (unlikely(!p_elem) || + unlikely(part_info->partitions.push_back(p_elem, thd->mem_root))) MYSQL_YYABORT; - } + p_elem->part_state= PART_NORMAL; p_elem->id= part_info->partitions.elements - 1; part_info->curr_part_elem= p_elem; @@ -5439,7 +5565,7 @@ part_name: { partition_info *part_info= Lex->part_info; partition_element *p_elem= part_info->curr_part_elem; - if (check_ident_length(&$1)) + if (unlikely(check_ident_length(&$1))) MYSQL_YYABORT; p_elem->partition_name= $1.str; } @@ -5452,9 +5578,9 @@ opt_part_values: partition_info *part_info= lex->part_info; if (! lex->is_partition_management()) { - if (part_info->error_if_requires_values()) - MYSQL_YYABORT; - if (part_info->part_type == VERSIONING_PARTITION) + if (unlikely(part_info->error_if_requires_values())) + MYSQL_YYABORT; + if (unlikely(part_info->part_type == VERSIONING_PARTITION)) my_yyabort_error((ER_VERS_WRONG_PARTS, MYF(0), lex->create_last_non_select_table-> table_name.str)); @@ -5468,7 +5594,7 @@ opt_part_values: partition_info *part_info= lex->part_info; if (! lex->is_partition_management()) { - if (part_info->part_type != RANGE_PARTITION) + if (unlikely(part_info->part_type != RANGE_PARTITION)) my_yyabort_error((ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), "RANGE", "LESS THAN")); } @@ -5482,7 +5608,7 @@ opt_part_values: partition_info *part_info= lex->part_info; if (! lex->is_partition_management()) { - if (part_info->part_type != LIST_PARTITION) + if (unlikely(part_info->part_type != LIST_PARTITION)) my_yyabort_error((ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), "LIST", "IN")); } @@ -5492,60 +5618,13 @@ opt_part_values: part_values_in {} | CURRENT_SYM { - LEX *lex= Lex; - partition_info *part_info= lex->part_info; - partition_element *elem= part_info->curr_part_elem; - if (! lex->is_partition_management()) - { - if (part_info->part_type != VERSIONING_PARTITION) - my_yyabort_error((ER_PARTITION_WRONG_TYPE, MYF(0), "SYSTEM_TIME")); - } - else - { - DBUG_ASSERT(Lex->create_last_non_select_table); - DBUG_ASSERT(Lex->create_last_non_select_table->table_name.str); - // FIXME: other ALTER commands? - my_yyabort_error((ER_VERS_WRONG_PARTS, MYF(0), - Lex->create_last_non_select_table-> - table_name.str)); - } - elem->type(partition_element::CURRENT); - DBUG_ASSERT(part_info->vers_info); - part_info->vers_info->now_part= elem; - if (part_info->init_column_part(thd)) - { + if (Lex->part_values_current(thd)) MYSQL_YYABORT; - } } | HISTORY_SYM { - LEX *lex= Lex; - partition_info *part_info= lex->part_info; - partition_element *elem= part_info->curr_part_elem; - if (! lex->is_partition_management()) - { - if (part_info->part_type != VERSIONING_PARTITION) - my_yyabort_error((ER_PARTITION_WRONG_TYPE, MYF(0), "SYSTEM_TIME")); - } - else - { - part_info->vers_init_info(thd); - elem->id= UINT_MAX32; - } - DBUG_ASSERT(part_info->vers_info); - if (part_info->vers_info->now_part) - { - DBUG_ASSERT(Lex->create_last_non_select_table); - DBUG_ASSERT(Lex->create_last_non_select_table->table_name.str); - my_yyabort_error((ER_VERS_WRONG_PARTS, MYF(0), - Lex->create_last_non_select_table-> - table_name.str)); - } - elem->type(partition_element::HISTORY); - if (part_info->init_column_part(thd)) - { + if (Lex->part_values_history(thd)) MYSQL_YYABORT; - } } | DEFAULT { @@ -5553,20 +5632,16 @@ opt_part_values: partition_info *part_info= lex->part_info; if (! lex->is_partition_management()) { - if (part_info->part_type != LIST_PARTITION) + if (unlikely(part_info->part_type != LIST_PARTITION)) my_yyabort_error((ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), "LIST", "DEFAULT")); } else part_info->part_type= LIST_PARTITION; - if (part_info->init_column_part(thd)) - { + if (unlikely(part_info->init_column_part(thd))) MYSQL_YYABORT; - } - if (part_info->add_max_value(thd)) - { + if (unlikely(part_info->add_max_value(thd))) MYSQL_YYABORT; - } } ; @@ -5575,8 +5650,8 @@ part_func_max: { partition_info *part_info= Lex->part_info; - if (part_info->num_columns && - part_info->num_columns != 1U) + if (unlikely(part_info->num_columns && + part_info->num_columns != 1U)) { part_info->print_debug("Kilroy II", NULL); thd->parse_error(ER_PARTITION_COLUMN_LIST_ERROR); @@ -5584,14 +5659,10 @@ part_func_max: } else part_info->num_columns= 1U; - if (part_info->init_column_part(thd)) - { + if (unlikely(part_info->init_column_part(thd))) MYSQL_YYABORT; - } - if (part_info->add_max_value(thd)) - { + if (unlikely(part_info->add_max_value(thd))) MYSQL_YYABORT; - } } | part_value_item {} ; @@ -5605,9 +5676,9 @@ part_values_in: if (part_info->num_columns != 1U) { - if (!lex->is_partition_management() || - part_info->num_columns == 0 || - part_info->num_columns > MAX_REF_PARTS) + if (unlikely(!lex->is_partition_management() || + part_info->num_columns == 0 || + part_info->num_columns > MAX_REF_PARTS)) { part_info->print_debug("Kilroy III", NULL); thd->parse_error(ER_PARTITION_COLUMN_LIST_ERROR); @@ -5620,16 +5691,14 @@ part_values_in: we ADD or REORGANIZE partitions. Also can only happen for LIST partitions. */ - if (part_info->reorganize_into_single_field_col_val(thd)) - { + if (unlikely(part_info->reorganize_into_single_field_col_val(thd))) MYSQL_YYABORT; - } } } | '(' part_value_list ')' { partition_info *part_info= Lex->part_info; - if (part_info->num_columns < 2U) + if (unlikely(part_info->num_columns < 2U)) { thd->parse_error(ER_ROW_SINGLE_PARTITION_FIELD_ERROR); MYSQL_YYABORT; @@ -5648,12 +5717,10 @@ part_value_item: partition_info *part_info= Lex->part_info; part_info->print_debug("( part_value_item", NULL); /* Initialisation code needed for each list of value expressions */ - if (!(part_info->part_type == LIST_PARTITION && - part_info->num_columns == 1U) && - part_info->init_column_part(thd)) - { + if (unlikely(!(part_info->part_type == LIST_PARTITION && + part_info->num_columns == 1U) && + part_info->init_column_part(thd))) MYSQL_YYABORT; - } } part_value_item_list {} ')' @@ -5662,7 +5729,7 @@ part_value_item: part_info->print_debug(") part_value_item", NULL); if (part_info->num_columns == 0) part_info->num_columns= part_info->curr_list_object; - if (part_info->num_columns != part_info->curr_list_object) + if (unlikely(part_info->num_columns != part_info->curr_list_object)) { /* All value items lists must be of equal length, in some cases @@ -5688,15 +5755,13 @@ part_value_expr_item: MAXVALUE_SYM { partition_info *part_info= Lex->part_info; - if (part_info->part_type == LIST_PARTITION) + if (unlikely(part_info->part_type == LIST_PARTITION)) { thd->parse_error(ER_MAXVALUE_IN_VALUES_IN); MYSQL_YYABORT; } - if (part_info->add_max_value(thd)) - { + if (unlikely(part_info->add_max_value(thd))) MYSQL_YYABORT; - } } | bit_expr { @@ -5704,15 +5769,13 @@ part_value_expr_item: partition_info *part_info= lex->part_info; Item *part_expr= $1; - if (!lex->safe_to_cache_query) + if (unlikely(!lex->safe_to_cache_query)) { thd->parse_error(ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR); MYSQL_YYABORT; } - if (part_info->add_column_list_value(thd, part_expr)) - { + if (unlikely(part_info->add_column_list_value(thd, part_expr))) MYSQL_YYABORT; - } } ; @@ -5721,8 +5784,8 @@ opt_sub_partition: /* empty */ { partition_info *part_info= Lex->part_info; - if (part_info->num_subparts != 0 && - !part_info->use_default_subpartitions) + if (unlikely(part_info->num_subparts != 0 && + !part_info->use_default_subpartitions)) { /* We come here when we have defined subpartitions on the first @@ -5737,8 +5800,8 @@ opt_sub_partition: partition_info *part_info= Lex->part_info; if (part_info->num_subparts != 0) { - if (part_info->num_subparts != - part_info->count_curr_subparts) + if (unlikely(part_info->num_subparts != + part_info->count_curr_subparts)) { thd->parse_error(ER_PARTITION_WRONG_NO_SUBPART_ERROR); MYSQL_YYABORT; @@ -5746,7 +5809,7 @@ opt_sub_partition: } else if (part_info->count_curr_subparts > 0) { - if (part_info->partitions.elements > 1) + if (unlikely(part_info->partitions.elements > 1)) { thd->parse_error(ER_PARTITION_WRONG_NO_SUBPART_ERROR); MYSQL_YYABORT; @@ -5769,8 +5832,8 @@ sub_part_definition: partition_element *curr_part= part_info->current_partition; partition_element *sub_p_elem= new (thd->mem_root) partition_element(curr_part); - if (part_info->use_default_subpartitions && - part_info->partitions.elements >= 2) + if (unlikely(part_info->use_default_subpartitions && + part_info->partitions.elements >= 2)) { /* create table t1 (a int) @@ -5786,12 +5849,10 @@ sub_part_definition: thd->parse_error(ER_PARTITION_WRONG_NO_SUBPART_ERROR); MYSQL_YYABORT; } - if (!sub_p_elem || - curr_part->subpartitions.push_back(sub_p_elem, thd->mem_root)) - { - mem_alloc_error(sizeof(partition_element)); + if (unlikely(!sub_p_elem) || + unlikely(curr_part->subpartitions.push_back(sub_p_elem, thd->mem_root))) MYSQL_YYABORT; - } + sub_p_elem->id= curr_part->subpartitions.elements - 1; part_info->curr_part_elem= sub_p_elem; part_info->use_default_subpartitions= FALSE; @@ -5804,7 +5865,7 @@ sub_part_definition: sub_name: ident_or_text { - if (check_ident_length(&$1)) + if (unlikely(check_ident_length(&$1))) MYSQL_YYABORT; Lex->part_info->curr_part_elem->partition_name= $1.str; } @@ -5854,7 +5915,7 @@ opt_versioning_rotation: | INTERVAL_SYM expr interval opt_versioning_interval_start { partition_info *part_info= Lex->part_info; - if (part_info->vers_set_interval($2, $3, $4)) + if (unlikely(part_info->vers_set_interval($2, $3, $4))) { my_error(ER_PART_WRONG_VALUE, MYF(0), Lex->create_last_non_select_table->table_name.str, @@ -5865,7 +5926,7 @@ opt_versioning_rotation: | LIMIT ulonglong_num { partition_info *part_info= Lex->part_info; - if (part_info->vers_set_limit($2)) + if (unlikely(part_info->vers_set_limit($2))) { my_error(ER_PART_WRONG_VALUE, MYF(0), Lex->create_last_non_select_table->table_name.str, @@ -5875,22 +5936,21 @@ opt_versioning_rotation: } ; - ; opt_versioning_interval_start: /* empty */ { - $$= thd->systime(); + $$= thd->query_start(); } - | remember_tok_start STARTS_SYM ulong_num + | STARTS_SYM ulong_num { /* only allowed from mysql_unpack_partition() */ - if (!Lex->part_info->table) + if (unlikely(!Lex->part_info->table)) { - thd->parse_error(ER_SYNTAX_ERROR, $1); + thd->parse_error(ER_SYNTAX_ERROR, $1.pos()); MYSQL_YYABORT; } - $$= (ulong)$3; + $$= (ulong)$2; } ; @@ -6125,7 +6185,7 @@ create_table_option: larger values. 65535 pages, 16kb each means to sample 1GB, which is impractical. If at some point this needs to be extended, then we can store the higher bits from stats_sample_pages in .frm too. */ - if ($3 == 0 || $3 > 0xffff) + if (unlikely($3 == 0 || $3 > 0xffff)) { thd->parse_error(); MYSQL_YYABORT; @@ -6230,31 +6290,33 @@ create_table_option: } | IDENT_sys equal TEXT_STRING_sys { - if ($3.length > ENGINE_OPTION_MAX_LENGTH) + if (unlikely($3.length > ENGINE_OPTION_MAX_LENGTH)) my_yyabort_error((ER_VALUE_TOO_LONG, MYF(0), $1.str)); - new (thd->mem_root) - engine_option_value($1, $3, true, &Lex->create_info.option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, $3, true, + &Lex->create_info.option_list, + &Lex->option_list_last); } | IDENT_sys equal ident { - if ($3.length > ENGINE_OPTION_MAX_LENGTH) + if (unlikely($3.length > ENGINE_OPTION_MAX_LENGTH)) my_yyabort_error((ER_VALUE_TOO_LONG, MYF(0), $1.str)); - new (thd->mem_root) - engine_option_value($1, $3, false, &Lex->create_info.option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, $3, false, + &Lex->create_info.option_list, + &Lex->option_list_last); } | IDENT_sys equal real_ulonglong_num { - new (thd->mem_root) - engine_option_value($1, $3, &Lex->create_info.option_list, - &Lex->option_list_last, thd->mem_root); + (void) new (thd->mem_root) + engine_option_value($1, $3, &Lex->create_info.option_list, + &Lex->option_list_last, thd->mem_root); } | IDENT_sys equal DEFAULT { - new (thd->mem_root) - engine_option_value($1, &Lex->create_info.option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, &Lex->create_info.option_list, + &Lex->option_list_last); } | SEQUENCE_SYM opt_equal choice { @@ -6272,7 +6334,7 @@ opt_versioning_option: versioning_option: WITH_SYSTEM_SYM VERSIONING_SYM { - if (Lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) + if (unlikely(Lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)) { if (DBUG_EVALUATE_IF("sysvers_force", 0, 1)) { @@ -6291,7 +6353,7 @@ versioning_option: default_charset: opt_default charset opt_equal charset_name_or_default { - if (Lex->create_info.add_table_option_default_charset($4)) + if (unlikely(Lex->create_info.add_table_option_default_charset($4))) MYSQL_YYABORT; } ; @@ -6300,13 +6362,11 @@ default_collation: opt_default COLLATE_SYM opt_equal collation_name_or_default { HA_CREATE_INFO *cinfo= &Lex->create_info; - if ((cinfo->used_fields & HA_CREATE_USED_DEFAULT_CHARSET) && - cinfo->default_table_charset && $4 && - !($4= merge_charset_and_collation(cinfo->default_table_charset, - $4))) - { + if (unlikely((cinfo->used_fields & HA_CREATE_USED_DEFAULT_CHARSET) && + cinfo->default_table_charset && $4 && + !($4= merge_charset_and_collation(cinfo->default_table_charset, + $4)))) MYSQL_YYABORT; - } Lex->create_info.default_table_charset= $4; Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET; @@ -6319,7 +6379,7 @@ storage_engines: plugin_ref plugin= ha_resolve_by_name(thd, &$1, thd->lex->create_info.tmp_table()); - if (plugin) + if (likely(plugin)) $$= plugin_hton(plugin); else { @@ -6338,7 +6398,7 @@ known_storage_engines: ident_or_text { plugin_ref plugin; - if ((plugin= ha_resolve_by_name(thd, &$1, false))) + if (likely((plugin= ha_resolve_by_name(thd, &$1, false)))) $$= plugin_hton(plugin); else my_yyabort_error((ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str)); @@ -6399,28 +6459,28 @@ key_def: key_or_index opt_if_not_exists opt_ident opt_USING_key_algorithm { Lex->option_list= NULL; - if (Lex->add_key(Key::MULTIPLE, &$3, $4, $2)) + if (unlikely(Lex->add_key(Key::MULTIPLE, &$3, $4, $2))) MYSQL_YYABORT; } '(' key_list ')' normal_key_options { } | key_or_index opt_if_not_exists ident TYPE_SYM btree_or_rtree { Lex->option_list= NULL; - if (Lex->add_key(Key::MULTIPLE, &$3, $5, $2)) + if (unlikely(Lex->add_key(Key::MULTIPLE, &$3, $5, $2))) MYSQL_YYABORT; } '(' key_list ')' normal_key_options { } | fulltext opt_key_or_index opt_if_not_exists opt_ident { Lex->option_list= NULL; - if (Lex->add_key($1, &$4, HA_KEY_ALG_UNDEF, $3)) + if (unlikely(Lex->add_key($1, &$4, HA_KEY_ALG_UNDEF, $3))) MYSQL_YYABORT; } '(' key_list ')' fulltext_key_options { } | spatial opt_key_or_index opt_if_not_exists opt_ident { Lex->option_list= NULL; - if (Lex->add_key($1, &$4, HA_KEY_ALG_UNDEF, $3)) + if (unlikely(Lex->add_key($1, &$4, HA_KEY_ALG_UNDEF, $3))) MYSQL_YYABORT; } '(' key_list ')' spatial_key_options { } @@ -6429,7 +6489,7 @@ key_def: opt_USING_key_algorithm { Lex->option_list= NULL; - if (Lex->add_key($2, $4.str ? &$4 : &$1, $5, $3)) + if (unlikely(Lex->add_key($2, $4.str ? &$4 : &$1, $5, $3))) MYSQL_YYABORT; } '(' key_list ')' normal_key_options { } @@ -6437,16 +6497,17 @@ key_def: TYPE_SYM btree_or_rtree { Lex->option_list= NULL; - if (Lex->add_key($2, $4.str ? &$4 : &$1, $6, $3)) + if (unlikely(Lex->add_key($2, $4.str ? &$4 : &$1, $6, $3))) MYSQL_YYABORT; } '(' key_list ')' normal_key_options { } | opt_constraint FOREIGN KEY_SYM opt_if_not_exists opt_ident { - if (Lex->check_add_key($4) || - !(Lex->last_key= (new (thd->mem_root) - Key(Key::MULTIPLE, $1.str ? &$1 : &$5, - HA_KEY_ALG_UNDEF, true, $4)))) + if (unlikely(Lex->check_add_key($4)) || + unlikely(!(Lex->last_key= (new (thd->mem_root) + Key(Key::MULTIPLE, + $1.str ? &$1 : &$5, + HA_KEY_ALG_UNDEF, true, $4))))) MYSQL_YYABORT; Lex->option_list= NULL; } @@ -6463,7 +6524,7 @@ key_def: lex->fk_update_opt, lex->fk_match_option, $4)); - if (key == NULL) + if (unlikely(key == NULL)) MYSQL_YYABORT; /* handle_if_exists_options() expectes the two keys in this order: @@ -6502,12 +6563,9 @@ opt_check_constraint: check_constraint: CHECK_SYM '(' expr ')' { - Virtual_column_info *v= - add_virtual_expression(thd, $3); - if (!v) - { + Virtual_column_info *v= add_virtual_expression(thd, $3); + if (unlikely(!v)) MYSQL_YYABORT; - } $$= v; } ; @@ -6527,11 +6585,11 @@ field_spec: LEX *lex=Lex; Create_field *f= new (thd->mem_root) Create_field(); - if (check_string_char_length(&$1, 0, NAME_CHAR_LEN, - system_charset_info, 1)) + if (unlikely(check_string_char_length(&$1, 0, NAME_CHAR_LEN, + system_charset_info, 1))) my_yyabort_error((ER_TOO_LONG_IDENT, MYF(0), $1.str)); - if (!f) + if (unlikely(!f)) MYSQL_YYABORT; lex->init_last_field(f, &$1, NULL); @@ -6544,7 +6602,7 @@ field_spec: $$->check_constraint= $4; - if ($$->check(thd)) + if (unlikely($$->check(thd))) MYSQL_YYABORT; lex->alter_info.create_list.push_back($$, thd->mem_root); @@ -6597,46 +6655,16 @@ field_def: Lex->last_field->flags&= ~NOT_NULL_FLAG; // undo automatic NOT NULL for timestamps } vcol_opt_specifier vcol_opt_attribute - | opt_generated_always AS ROW_SYM start_or_end opt_asrow_attribute + | opt_generated_always AS ROW_SYM START_SYM opt_asrow_attribute { - LEX *lex= Lex; - Vers_parse_info &info= lex->vers_get_info(); - const LEX_CSTRING &field_name= lex->last_field->field_name; - - LString_i *p; - switch ($4) - { - case 1: - p= &info.as_row.start; - if (*p) - { - my_yyabort_error((ER_VERS_DUPLICATE_ROW_START_END, MYF(0), - "START", field_name.str)); - } - lex->last_field->flags|= VERS_SYS_START_FLAG | NOT_NULL_FLAG; - break; - case 0: - p= &info.as_row.end; - if (*p) - { - my_yyabort_error((ER_VERS_DUPLICATE_ROW_START_END, MYF(0), - "END", field_name.str)); - } - lex->last_field->flags|= VERS_SYS_END_FLAG | NOT_NULL_FLAG; - break; - default: - /* Not Reachable */ + if (Lex->last_field_generated_always_as_row_start()) + MYSQL_YYABORT; + } + | opt_generated_always AS ROW_SYM END opt_asrow_attribute + { + if (Lex->last_field_generated_always_as_row_end()) MYSQL_YYABORT; - break; - } - DBUG_ASSERT(p); - *p= field_name; } - ; - -start_or_end: - START_SYM { $$ = 1; } - | END { $$ = 0; } ; opt_generated_always: @@ -6689,7 +6717,7 @@ vcol_attribute: | COMMENT_SYM TEXT_STRING_sys { Lex->last_field->comment= $2; } | INVISIBLE_SYM { - Lex->last_field->invisible= INVISIBLE_USER; + Lex->last_field->invisible= INVISIBLE_USER; } ; @@ -6706,7 +6734,7 @@ parse_vcol_expr: expr { Virtual_column_info *v= add_virtual_expression(thd, $3); - if (!v) + if (unlikely(!v)) MYSQL_YYABORT; Lex->last_field->vcol_info= v; } @@ -6716,7 +6744,7 @@ parenthesized_expr: subselect { $$= new (thd->mem_root) Item_singlerow_subselect(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | expr @@ -6724,7 +6752,7 @@ parenthesized_expr: { $3->push_front($1, thd->mem_root); $$= new (thd->mem_root) Item_row(thd, *$3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -6734,10 +6762,8 @@ virtual_column_func: { Virtual_column_info *v= add_virtual_expression(thd, $2); - if (!v) - { + if (unlikely(!v)) MYSQL_YYABORT; - } $$= v; } ; @@ -6748,7 +6774,7 @@ column_default_expr: virtual_column_func | expr_or_literal { - if (!($$= add_virtual_expression(thd, $1))) + if (unlikely(!($$= add_virtual_expression(thd, $1)))) MYSQL_YYABORT; } ; @@ -6771,7 +6797,7 @@ field_type_numeric: { int err; ulonglong tmp_length= my_strtoll10($2.length(), NULL, &err); - if (err || tmp_length > PRECISION_FOR_DOUBLE) + if (unlikely(err || tmp_length > PRECISION_FOR_DOUBLE)) my_yyabort_error((ER_WRONG_FIELD_SPEC, MYF(0), Lex->last_field->field_name.str)); if (tmp_length > PRECISION_FOR_FLOAT) @@ -7069,7 +7095,7 @@ attribute: | ON UPDATE_SYM NOW_SYM opt_default_time_precision { Item *item= new (thd->mem_root) Item_func_now_local(thd, $4); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; Lex->last_field->on_update= item; } @@ -7082,14 +7108,14 @@ attribute: } | COLLATE_SYM collation_name { - if (Lex->charset && !my_charset_same(Lex->charset,$2)) + if (unlikely(Lex->charset && !my_charset_same(Lex->charset,$2))) my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0), $2->name,Lex->charset->csname)); Lex->last_field->charset= $2; } | COMPRESSED_SYM opt_compression_method { - if (Lex->last_field->set_compressed($2)) + if (unlikely(Lex->last_field->set_compressed($2))) MYSQL_YYABORT; } | serial_attribute @@ -7118,30 +7144,33 @@ serial_attribute: asrow_attribute | IDENT_sys equal TEXT_STRING_sys { - if ($3.length > ENGINE_OPTION_MAX_LENGTH) + if (unlikely($3.length > ENGINE_OPTION_MAX_LENGTH)) my_yyabort_error((ER_VALUE_TOO_LONG, MYF(0), $1.str)); - new (thd->mem_root) - engine_option_value($1, $3, true, &Lex->last_field->option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, $3, true, + &Lex->last_field->option_list, + &Lex->option_list_last); } | IDENT_sys equal ident { - if ($3.length > ENGINE_OPTION_MAX_LENGTH) + if (unlikely($3.length > ENGINE_OPTION_MAX_LENGTH)) my_yyabort_error((ER_VALUE_TOO_LONG, MYF(0), $1.str)); - new (thd->mem_root) - engine_option_value($1, $3, false, &Lex->last_field->option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, $3, false, + &Lex->last_field->option_list, + &Lex->option_list_last); } | IDENT_sys equal real_ulonglong_num { - new (thd->mem_root) - engine_option_value($1, $3, &Lex->last_field->option_list, - &Lex->option_list_last, thd->mem_root); + (void) new (thd->mem_root) + engine_option_value($1, $3, &Lex->last_field->option_list, + &Lex->option_list_last, thd->mem_root); } | IDENT_sys equal DEFAULT { - new (thd->mem_root) - engine_option_value($1, &Lex->last_field->option_list, &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, &Lex->last_field->option_list, + &Lex->option_list_last); } | with_or_without_system VERSIONING_SYM { @@ -7173,7 +7202,7 @@ type_with_opt_collate: if ($2) { - if (!(Lex->charset= merge_charset_and_collation(Lex->charset, $2))) + if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2)))) MYSQL_YYABORT; } Lex->last_field->set_attributes($1, Lex->charset); @@ -7188,7 +7217,7 @@ charset: charset_name: ident_or_text { - if (!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0)))) + if (unlikely(!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0))))) my_yyabort_error((ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str)); } | BINARY { $$= &my_charset_bin; } @@ -7207,8 +7236,9 @@ opt_load_data_charset: old_or_new_charset_name: ident_or_text { - if (!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0))) && - !($$=get_old_charset_by_name($1.str))) + if (unlikely(!($$=get_charset_by_csname($1.str, + MY_CS_PRIMARY,MYF(0))) && + !($$=get_old_charset_by_name($1.str)))) my_yyabort_error((ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str)); } | BINARY { $$= &my_charset_bin; } @@ -7222,7 +7252,7 @@ old_or_new_charset_name_or_default: collation_name: ident_or_text { - if (!($$= mysqld_collation_get_by_name($1.str))) + if (unlikely(!($$= mysqld_collation_get_by_name($1.str)))) MYSQL_YYABORT; } ; @@ -7247,7 +7277,7 @@ charset_or_alias: | ASCII_SYM { $$= &my_charset_latin1; } | UNICODE_SYM { - if (!($$= get_charset_by_csname("ucs2", MY_CS_PRIMARY,MYF(0)))) + if (unlikely(!($$= get_charset_by_csname("ucs2", MY_CS_PRIMARY,MYF(0))))) my_yyabort_error((ER_UNKNOWN_CHARACTER_SET, MYF(0), "ucs2")); } ; @@ -7268,7 +7298,7 @@ opt_bin_mod: ws_nweights: '(' real_ulong_num { - if ($2 == 0) + if (unlikely($2 == 0)) { thd->parse_error(); MYSQL_YYABORT; @@ -7359,14 +7389,14 @@ ref_list: ref_list ',' ident { Key_part_spec *key= new (thd->mem_root) Key_part_spec(&$3, 0); - if (key == NULL) + if (unlikely(key == NULL)) MYSQL_YYABORT; Lex->ref_list.push_back(key, thd->mem_root); } | ident { Key_part_spec *key= new (thd->mem_root) Key_part_spec(&$1, 0); - if (key == NULL) + if (unlikely(key == NULL)) MYSQL_YYABORT; LEX *lex= Lex; lex->ref_list.empty(); @@ -7527,30 +7557,31 @@ all_key_opt: { Lex->last_key->key_create_info.comment= $2; } | IDENT_sys equal TEXT_STRING_sys { - if ($3.length > ENGINE_OPTION_MAX_LENGTH) + if (unlikely($3.length > ENGINE_OPTION_MAX_LENGTH)) my_yyabort_error((ER_VALUE_TOO_LONG, MYF(0), $1.str)); - new (thd->mem_root) - engine_option_value($1, $3, true, &Lex->option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, $3, true, &Lex->option_list, + &Lex->option_list_last); } | IDENT_sys equal ident { - if ($3.length > ENGINE_OPTION_MAX_LENGTH) + if (unlikely($3.length > ENGINE_OPTION_MAX_LENGTH)) my_yyabort_error((ER_VALUE_TOO_LONG, MYF(0), $1.str)); - new (thd->mem_root) - engine_option_value($1, $3, false, &Lex->option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, $3, false, &Lex->option_list, + &Lex->option_list_last); } | IDENT_sys equal real_ulonglong_num { - new (thd->mem_root) - engine_option_value($1, $3, &Lex->option_list, - &Lex->option_list_last, thd->mem_root); + (void) new (thd->mem_root) + engine_option_value($1, $3, &Lex->option_list, + &Lex->option_list_last, thd->mem_root); } | IDENT_sys equal DEFAULT { - new (thd->mem_root) - engine_option_value($1, &Lex->option_list, &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, &Lex->option_list, + &Lex->option_list_last); } ; @@ -7567,7 +7598,7 @@ fulltext_key_opt: all_key_opt | WITH PARSER_SYM IDENT_sys { - if (plugin_is_ready(&$3, MYSQL_FTPARSER_PLUGIN)) + if (likely(plugin_is_ready(&$3, MYSQL_FTPARSER_PLUGIN))) Lex->last_key->key_create_info.parser_name= $3; else my_yyabort_error((ER_FUNCTION_NOT_DEFINED, MYF(0), $3.str)); @@ -7595,16 +7626,16 @@ key_part: ident { $$= new (thd->mem_root) Key_part_spec(&$1, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ident '(' NUM ')' { int key_part_len= atoi($3.str); - if (!key_part_len) + if (unlikely(!key_part_len)) my_yyabort_error((ER_KEY_PART_0, MYF(0), $1.str)); $$= new (thd->mem_root) Key_part_spec(&$1, (uint) key_part_len); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -7614,11 +7645,6 @@ opt_ident: | field_ident { $$= $1; } ; -opt_component: - /* empty */ { $$= null_clex_str; } - | '.' ident { $$= $2; } - ; - string_list: text_string { Lex->last_field->interval_list.push_back($1, thd->mem_root); } @@ -7646,21 +7672,21 @@ alter: } alter_options TABLE_SYM table_ident opt_lock_wait_timeout { - if (!Lex->select_lex.add_table_to_list(thd, $5, NULL, - TL_OPTION_UPDATING, - TL_READ_NO_INSERT, - MDL_SHARED_UPGRADABLE)) + if (unlikely(!Lex->select_lex.add_table_to_list(thd, $5, NULL, + TL_OPTION_UPDATING, + TL_READ_NO_INSERT, + MDL_SHARED_UPGRADABLE))) MYSQL_YYABORT; Lex->select_lex.db= (Lex->select_lex.table_list.first)->db; Lex->create_last_non_select_table= Lex->last_table(); } alter_commands { - if (!Lex->m_sql_cmd) + if (likely(!Lex->m_sql_cmd)) { /* Create a generic ALTER TABLE statment. */ Lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table(); - if (Lex->m_sql_cmd == NULL) + if (unlikely(Lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } } @@ -7674,13 +7700,14 @@ alter: LEX *lex=Lex; lex->sql_command=SQLCOM_ALTER_DB; lex->name= $3; - if (lex->name.str == NULL && lex->copy_db_to(&lex->name)) + if (lex->name.str == NULL && + unlikely(lex->copy_db_to(&lex->name))) MYSQL_YYABORT; } | ALTER DATABASE ident UPGRADE_SYM DATA_SYM DIRECTORY_SYM NAME_SYM { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "DATABASE")); lex->sql_command= SQLCOM_ALTER_DB_UPGRADE; lex->name= $3; @@ -7689,7 +7716,7 @@ alter: { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE")); lex->sp_chistics.init(); } @@ -7704,7 +7731,7 @@ alter: { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "FUNCTION")); lex->sp_chistics.init(); } @@ -7717,7 +7744,7 @@ alter: } | ALTER view_algorithm definer_opt opt_view_suid VIEW_SYM table_ident { - if (Lex->add_alter_view(thd, $2, $4, $6)) + if (unlikely(Lex->add_alter_view(thd, $2, $4, $6))) MYSQL_YYABORT; } view_list_opt AS view_select @@ -7729,7 +7756,7 @@ alter: with the ALTER EVENT below. */ { - if (Lex->add_alter_view(thd, VIEW_ALGORITHM_INHERIT, $3, $5)) + if (unlikely(Lex->add_alter_view(thd, VIEW_ALGORITHM_INHERIT, $3, $5))) MYSQL_YYABORT; } view_list_opt AS view_select @@ -7744,7 +7771,7 @@ alter: Event_parse_data. */ - if (!(Lex->event_parse_data= Event_parse_data::new_instance(thd))) + if (unlikely(!(Lex->event_parse_data= Event_parse_data::new_instance(thd)))) MYSQL_YYABORT; Lex->event_parse_data->identifier= $5; @@ -7757,7 +7784,7 @@ alter: opt_ev_comment opt_ev_sql_stmt { - if (!($7 || $8 || $9 || $10 || $11)) + if (unlikely(!($7 || $8 || $9 || $10 || $11))) { thd->parse_error(); MYSQL_YYABORT; @@ -7815,18 +7842,19 @@ alter: table_ident { LEX *lex= Lex; - if (!(lex->create_info.seq_create_info= new (thd->mem_root) - sequence_definition()) || - !lex->select_lex.add_table_to_list(thd, $5, NULL, - TL_OPTION_SEQUENCE, - TL_WRITE, MDL_EXCLUSIVE)) + if (unlikely(!(lex->create_info.seq_create_info= + new (thd->mem_root) sequence_definition())) || + unlikely(!lex->select_lex.add_table_to_list(thd, $5, NULL, + TL_OPTION_SEQUENCE, + TL_WRITE, + MDL_EXCLUSIVE))) MYSQL_YYABORT; } sequence_defs { /* Create a generic ALTER SEQUENCE statment. */ Lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_sequence($3); - if (Lex->m_sql_cmd == NULL) + if (unlikely(Lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -7857,8 +7885,8 @@ opt_ev_sql_stmt: ; ident_or_empty: - /* empty */ { $$= null_clex_str; } - | ident { $$= $1; } + /* empty */ { $$= Lex_ident_sys(); } + | ident ; alter_commands: @@ -7868,7 +7896,7 @@ alter_commands: Lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_discard_import_tablespace( Sql_cmd_discard_import_tablespace::DISCARD_TABLESPACE); - if (Lex->m_sql_cmd == NULL) + if (unlikely(Lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } | IMPORT TABLESPACE @@ -7876,7 +7904,7 @@ alter_commands: Lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_discard_import_tablespace( Sql_cmd_discard_import_tablespace::IMPORT_TABLESPACE); - if (Lex->m_sql_cmd == NULL) + if (unlikely(Lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } | alter_list @@ -7915,7 +7943,7 @@ alter_commands: DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table_optimize_partition(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } opt_no_write_to_binlog @@ -7928,7 +7956,7 @@ alter_commands: DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table_analyze_partition(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } | CHECK_SYM PARTITION_SYM all_or_alt_part_name_list @@ -7938,7 +7966,7 @@ alter_commands: DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table_check_partition(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } opt_mi_check_type @@ -7951,7 +7979,7 @@ alter_commands: DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table_repair_partition(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } opt_mi_repair_type @@ -7969,7 +7997,7 @@ alter_commands: DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table_truncate_partition(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } | reorg_partition_rule @@ -7979,21 +8007,19 @@ alter_commands: LEX *lex= thd->lex; lex->select_lex.db= $6->db; if (lex->select_lex.db.str == NULL && - lex->copy_db_to(&lex->select_lex.db)) - { + unlikely(lex->copy_db_to(&lex->select_lex.db))) MYSQL_YYABORT; - } lex->name= $6->table; lex->alter_info.partition_flags|= ALTER_PARTITION_EXCHANGE; - if (!lex->select_lex.add_table_to_list(thd, $6, NULL, - TL_OPTION_UPDATING, - TL_READ_NO_INSERT, - MDL_SHARED_NO_WRITE)) + if (unlikely(!lex->select_lex.add_table_to_list(thd, $6, NULL, + TL_OPTION_UPDATING, + TL_READ_NO_INSERT, + MDL_SHARED_NO_WRITE))) MYSQL_YYABORT; DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table_exchange_partition(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -8019,11 +8045,9 @@ add_partition_rule: { LEX *lex= Lex; lex->part_info= new (thd->mem_root) partition_info(); - if (!lex->part_info) - { - mem_alloc_error(sizeof(partition_info)); + if (unlikely(!lex->part_info)) MYSQL_YYABORT; - } + lex->alter_info.partition_flags|= ALTER_PARTITION_ADD; DBUG_ASSERT(!Lex->create_info.if_not_exists()); lex->create_info.set($3); @@ -8051,11 +8075,9 @@ reorg_partition_rule: { LEX *lex= Lex; lex->part_info= new (thd->mem_root) partition_info(); - if (!lex->part_info) - { - mem_alloc_error(sizeof(partition_info)); + if (unlikely(!lex->part_info)) MYSQL_YYABORT; - } + lex->no_write_to_binlog= $3; } reorg_parts_rule @@ -8085,12 +8107,9 @@ alt_part_name_list: alt_part_name_item: ident { - if (Lex->alter_info.partition_names.push_back($1.str, - thd->mem_root)) - { - mem_alloc_error(1); + if (unlikely(Lex->alter_info.partition_names.push_back($1.str, + thd->mem_root))) MYSQL_YYABORT; - } } ; @@ -8161,7 +8180,7 @@ alter_list_item: LEX *lex=Lex; Alter_drop *ad= (new (thd->mem_root) Alter_drop(Alter_drop::COLUMN, $4.str, $3)); - if (ad == NULL) + if (unlikely(ad == NULL)) MYSQL_YYABORT; lex->alter_info.drop_list.push_back(ad, thd->mem_root); lex->alter_info.flags|= ALTER_PARSER_DROP_COLUMN; @@ -8172,7 +8191,7 @@ alter_list_item: Alter_drop *ad= (new (thd->mem_root) Alter_drop(Alter_drop::CHECK_CONSTRAINT, $4.str, $3)); - if (ad == NULL) + if (unlikely(ad == NULL)) MYSQL_YYABORT; lex->alter_info.drop_list.push_back(ad, thd->mem_root); lex->alter_info.flags|= ALTER_DROP_CHECK_CONSTRAINT; @@ -8182,7 +8201,7 @@ alter_list_item: LEX *lex=Lex; Alter_drop *ad= (new (thd->mem_root) Alter_drop(Alter_drop::FOREIGN_KEY, $5.str, $4)); - if (ad == NULL) + if (unlikely(ad == NULL)) MYSQL_YYABORT; lex->alter_info.drop_list.push_back(ad, thd->mem_root); lex->alter_info.flags|= ALTER_DROP_FOREIGN_KEY; @@ -8193,7 +8212,7 @@ alter_list_item: Alter_drop *ad= (new (thd->mem_root) Alter_drop(Alter_drop::KEY, primary_key_name, FALSE)); - if (ad == NULL) + if (unlikely(ad == NULL)) MYSQL_YYABORT; lex->alter_info.drop_list.push_back(ad, thd->mem_root); lex->alter_info.flags|= ALTER_DROP_INDEX; @@ -8203,7 +8222,7 @@ alter_list_item: LEX *lex=Lex; Alter_drop *ad= (new (thd->mem_root) Alter_drop(Alter_drop::KEY, $4.str, $3)); - if (ad == NULL) + if (unlikely(ad == NULL)) MYSQL_YYABORT; lex->alter_info.drop_list.push_back(ad, thd->mem_root); lex->alter_info.flags|= ALTER_DROP_INDEX; @@ -8222,12 +8241,13 @@ alter_list_item: } | ALTER opt_column opt_if_exists_table_element field_ident SET DEFAULT column_default_expr { - if (Lex->add_alter_list($4.str, $7, $3)) + if (unlikely(Lex->add_alter_list($4.str, $7, $3))) MYSQL_YYABORT; } | ALTER opt_column opt_if_exists_table_element field_ident DROP DEFAULT { - if (Lex->add_alter_list($4.str, (Virtual_column_info*) 0, $3)) + if (unlikely(Lex->add_alter_list($4.str, (Virtual_column_info*) 0, + $3))) MYSQL_YYABORT; } | RENAME opt_to table_ident @@ -8235,12 +8255,11 @@ alter_list_item: LEX *lex=Lex; lex->select_lex.db= $3->db; if (lex->select_lex.db.str == NULL && - lex->copy_db_to(&lex->select_lex.db)) - { + unlikely(lex->copy_db_to(&lex->select_lex.db))) MYSQL_YYABORT; - } - if (check_table_name($3->table.str,$3->table.length, FALSE) || - ($3->db.str && check_db_name((LEX_STRING*) &$3->db))) + if (unlikely(check_table_name($3->table.str,$3->table.length, + FALSE)) || + ($3->db.str && unlikely(check_db_name((LEX_STRING*) &$3->db)))) my_yyabort_error((ER_WRONG_TABLE_NAME, MYF(0), $3->table.str)); lex->name= $3->table; lex->alter_info.flags|= ALTER_RENAME; @@ -8252,10 +8271,10 @@ alter_list_item: $4= thd->variables.collation_database; } $5= $5 ? $5 : $4; - if (!my_charset_same($4,$5)) + if (unlikely(!my_charset_same($4,$5))) my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0), $5->name, $4->csname)); - if (Lex->create_info.add_alter_list_item_convert_to_charset($5)) + if (unlikely(Lex->create_info.add_alter_list_item_convert_to_charset($5))) MYSQL_YYABORT; Lex->alter_info.flags|= ALTER_OPTIONS; } @@ -8310,7 +8329,7 @@ alter_algorithm_option: } | ALGORITHM_SYM opt_equal ident { - if (Lex->alter_info.set_requested_algorithm(&$3)) + if (unlikely(Lex->alter_info.set_requested_algorithm(&$3))) my_yyabort_error((ER_UNKNOWN_ALTER_ALGORITHM, MYF(0), $3.str)); } ; @@ -8323,13 +8342,13 @@ alter_lock_option: } | LOCK_SYM opt_equal ident { - if (Lex->alter_info.set_requested_lock(&$3)) + if (unlikely(Lex->alter_info.set_requested_lock(&$3))) my_yyabort_error((ER_UNKNOWN_ALTER_LOCK, MYF(0), $3.str)); } ; opt_column: - /* empty */ {} + /* empty */ {} %prec PREC_BELOW_IDENTIFIER_OPT_SPECIAL_CASE | COLUMN_SYM {} ; @@ -8429,8 +8448,8 @@ start: LEX *lex= Lex; lex->sql_command= SQLCOM_BEGIN; /* READ ONLY and READ WRITE are mutually exclusive. */ - if (($3 & MYSQL_START_TRANS_OPT_READ_WRITE) && - ($3 & MYSQL_START_TRANS_OPT_READ_ONLY)) + if (unlikely(($3 & MYSQL_START_TRANS_OPT_READ_WRITE) && + ($3 & MYSQL_START_TRANS_OPT_READ_ONLY))) { thd->parse_error(); MYSQL_YYABORT; @@ -8498,10 +8517,10 @@ slave_until: | UNTIL_SYM slave_until_opts { LEX *lex=Lex; - if (((lex->mi.log_file_name || lex->mi.pos) && - (lex->mi.relay_log_name || lex->mi.relay_log_pos)) || - !((lex->mi.log_file_name && lex->mi.pos) || - (lex->mi.relay_log_name && lex->mi.relay_log_pos))) + if (unlikely(((lex->mi.log_file_name || lex->mi.pos) && + (lex->mi.relay_log_name || lex->mi.relay_log_pos)) || + !((lex->mi.log_file_name && lex->mi.pos) || + (lex->mi.relay_log_name && lex->mi.relay_log_pos)))) my_yyabort_error((ER_BAD_SLAVE_UNTIL_COND, MYF(0))); } | UNTIL_SYM MASTER_GTID_POS_SYM '=' TEXT_STRING_sys @@ -8556,7 +8575,7 @@ repair: LEX* lex= thd->lex; DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_repair_table(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -8598,7 +8617,7 @@ analyze: LEX* lex= thd->lex; DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_analyze_table(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -8633,7 +8652,7 @@ persistent_column_stat_spec: { LEX* lex= thd->lex; lex->column_list= new (thd->mem_root) List; - if (lex->column_list == NULL) + if (unlikely(lex->column_list == NULL)) MYSQL_YYABORT; } table_column_list @@ -8646,7 +8665,7 @@ persistent_index_stat_spec: { LEX* lex= thd->lex; lex->index_list= new (thd->mem_root) List; - if (lex->index_list == NULL) + if (unlikely(lex->index_list == NULL)) MYSQL_YYABORT; } table_index_list @@ -8720,11 +8739,11 @@ check: CHECK_SYM check_view_or_table { LEX* lex= thd->lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "CHECK")); DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_check_table(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -8769,7 +8788,7 @@ optimize: LEX* lex= thd->lex; DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_optimize_table(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -8796,14 +8815,14 @@ rename: rename_list: user TO_SYM user { - if (Lex->users_list.push_back($1, thd->mem_root) || - Lex->users_list.push_back($3, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($1, thd->mem_root) || + Lex->users_list.push_back($3, thd->mem_root))) MYSQL_YYABORT; } | rename_list ',' user TO_SYM user { - if (Lex->users_list.push_back($3, thd->mem_root) || - Lex->users_list.push_back($5, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($3, thd->mem_root) || + Lex->users_list.push_back($5, thd->mem_root))) MYSQL_YYABORT; } ; @@ -8818,10 +8837,12 @@ table_to_table: { LEX *lex=Lex; SELECT_LEX *sl= lex->current_select; - if (!sl->add_table_to_list(thd, $1,NULL,TL_OPTION_UPDATING, - TL_IGNORE, MDL_EXCLUSIVE) || - !sl->add_table_to_list(thd, $4, NULL, TL_OPTION_UPDATING, - TL_IGNORE, MDL_EXCLUSIVE)) + if (unlikely(!sl->add_table_to_list(thd, $1,NULL, + TL_OPTION_UPDATING, + TL_IGNORE, MDL_EXCLUSIVE)) || + unlikely(!sl->add_table_to_list(thd, $4, NULL, + TL_OPTION_UPDATING, + TL_IGNORE, MDL_EXCLUSIVE))) MYSQL_YYABORT; } ; @@ -8852,9 +8873,10 @@ keycache_list: assign_to_keycache: table_ident cache_keys_spec { - if (!Select->add_table_to_list(thd, $1, NULL, 0, TL_READ, - MDL_SHARED_READ, - Select->pop_index_hints())) + if (unlikely(!Select->add_table_to_list(thd, $1, NULL, 0, TL_READ, + MDL_SHARED_READ, + Select-> + pop_index_hints()))) MYSQL_YYABORT; } ; @@ -8862,9 +8884,10 @@ assign_to_keycache: assign_to_keycache_parts: table_ident adm_partition cache_keys_spec { - if (!Select->add_table_to_list(thd, $1, NULL, 0, TL_READ, - MDL_SHARED_READ, - Select->pop_index_hints())) + if (unlikely(!Select->add_table_to_list(thd, $1, NULL, 0, TL_READ, + MDL_SHARED_READ, + Select-> + pop_index_hints()))) MYSQL_YYABORT; } ; @@ -8898,9 +8921,10 @@ preload_list: preload_keys: table_ident cache_keys_spec opt_ignore_leaves { - if (!Select->add_table_to_list(thd, $1, NULL, $3, TL_READ, - MDL_SHARED_READ, - Select->pop_index_hints())) + if (unlikely(!Select->add_table_to_list(thd, $1, NULL, $3, TL_READ, + MDL_SHARED_READ, + Select-> + pop_index_hints()))) MYSQL_YYABORT; } ; @@ -8908,9 +8932,10 @@ preload_keys: preload_keys_parts: table_ident adm_partition cache_keys_spec opt_ignore_leaves { - if (!Select->add_table_to_list(thd, $1, NULL, $4, TL_READ, - MDL_SHARED_READ, - Select->pop_index_hints())) + if (unlikely(!Select->add_table_to_list(thd, $1, NULL, $4, TL_READ, + MDL_SHARED_READ, + Select-> + pop_index_hints()))) MYSQL_YYABORT; } ; @@ -9200,13 +9225,14 @@ select_options: /* empty*/ | select_option_list { - if (Select->options & SELECT_DISTINCT && Select->options & SELECT_ALL) + if (unlikely((Select->options & SELECT_DISTINCT) && + (Select->options & SELECT_ALL))) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "ALL", "DISTINCT")); } ; opt_history_unit: - /* empty*/ + /* empty*/ %prec PREC_BELOW_IDENTIFIER_OPT_SPECIAL_CASE { $$= VERS_UNDEFINED; } @@ -9221,9 +9247,13 @@ opt_history_unit: ; history_point: - temporal_literal + TIMESTAMP TEXT_STRING { - $$= Vers_history_point(VERS_TIMESTAMP, $1); + Item *item; + if (!(item= create_temporal_literal(thd, $2.str, $2.length, YYCSCL, + MYSQL_TYPE_DATETIME, true))) + MYSQL_YYABORT; + $$= Vers_history_point(VERS_TIMESTAMP, item); } | function_call_keyword_timestamp { @@ -9278,11 +9308,11 @@ select_option: Allow this flag only on the first top-level SELECT statement, if SQL_CACHE wasn't specified, and only once per query. */ - if (Lex->current_select != &Lex->select_lex) + if (unlikely(Lex->current_select != &Lex->select_lex)) my_yyabort_error((ER_CANT_USE_OPTION_HERE, MYF(0), "SQL_NO_CACHE")); - if (Lex->select_lex.sql_cache == SELECT_LEX::SQL_CACHE) + if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_CACHE)) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "SQL_CACHE", "SQL_NO_CACHE")); - if (Lex->select_lex.sql_cache == SELECT_LEX::SQL_NO_CACHE) + if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_NO_CACHE)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "SQL_NO_CACHE")); Lex->safe_to_cache_query=0; @@ -9295,11 +9325,11 @@ select_option: Allow this flag only on the first top-level SELECT statement, if SQL_NO_CACHE wasn't specified, and only once per query. */ - if (Lex->current_select != &Lex->select_lex) + if (unlikely(Lex->current_select != &Lex->select_lex)) my_yyabort_error((ER_CANT_USE_OPTION_HERE, MYF(0), "SQL_CACHE")); - if (Lex->select_lex.sql_cache == SELECT_LEX::SQL_NO_CACHE) + if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_NO_CACHE)) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "SQL_NO_CACHE", "SQL_CACHE")); - if (Lex->select_lex.sql_cache == SELECT_LEX::SQL_CACHE) + if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_CACHE)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "SQL_CACHE")); Lex->safe_to_cache_query=1; @@ -9335,30 +9365,30 @@ select_item_list: Item *item= new (thd->mem_root) Item_field(thd, &thd->lex->current_select->context, NULL, NULL, &star_clex_str); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; - if (add_item_to_list(thd, item)) + if (unlikely(add_item_to_list(thd, item))) MYSQL_YYABORT; (thd->lex->current_select->with_wild)++; } ; select_item: - remember_name table_wild remember_end + remember_name select_sublist_qualified_asterisk remember_end { - if (add_item_to_list(thd, $2)) + if (unlikely(add_item_to_list(thd, $2))) MYSQL_YYABORT; } | remember_name expr remember_end select_alias { DBUG_ASSERT($1 < $3); - if (add_item_to_list(thd, $2)) + if (unlikely(add_item_to_list(thd, $2))) MYSQL_YYABORT; if ($4.str) { - if (Lex->sql_command == SQLCOM_CREATE_VIEW && - check_column_name($4.str)) + if (unlikely(Lex->sql_command == SQLCOM_CREATE_VIEW && + check_column_name($4.str))) my_yyabort_error((ER_WRONG_COLUMN_NAME, MYF(0), $4.str)); $2->is_autogenerated_name= FALSE; $2->set_name(thd, $4.str, $4.length, system_charset_info); @@ -9462,7 +9492,7 @@ expr: { /* X OR Y */ $$= new (thd->mem_root) Item_cond_or(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } } @@ -9470,7 +9500,7 @@ expr: { /* XOR is a proprietary extension */ $$= new (thd->mem_root) Item_func_xor(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | expr and expr %prec AND_SYM @@ -9512,84 +9542,84 @@ expr: { /* X AND Y */ $$= new (thd->mem_root) Item_cond_and(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } } | NOT_SYM expr %prec NOT_SYM { $$= negate_expression(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS TRUE_SYM %prec IS { $$= new (thd->mem_root) Item_func_istrue(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS not TRUE_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnottrue(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS FALSE_SYM %prec IS { $$= new (thd->mem_root) Item_func_isfalse(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS not FALSE_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnotfalse(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS UNKNOWN_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnull(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS not UNKNOWN_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnotnull(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | bool_pri + | bool_pri %prec PREC_BELOW_NOT ; bool_pri: bool_pri IS NULL_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnull(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS not NULL_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnotnull(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri EQUAL_SYM predicate %prec EQUAL_SYM { $$= new (thd->mem_root) Item_func_equal(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri comp_op predicate %prec '=' { $$= (*$2)(0)->create(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri comp_op all_or_any '(' subselect ')' %prec '=' { $$= all_any_subquery_creator(thd, $1, $2, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | predicate @@ -9599,22 +9629,22 @@ predicate: bit_expr IN_SYM '(' subselect ')' { $$= new (thd->mem_root) Item_in_subselect(thd, $1, $4); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr not IN_SYM '(' subselect ')' { Item *item= new (thd->mem_root) Item_in_subselect(thd, $1, $5); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= negate_expression(thd, item); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr IN_SYM '(' expr ')' { $$= handle_sql2003_note184_exception(thd, $1, true, $4); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr IN_SYM '(' expr ',' expr_list ')' @@ -9622,13 +9652,13 @@ predicate: $6->push_front($4, thd->mem_root); $6->push_front($1, thd->mem_root); $$= new (thd->mem_root) Item_func_in(thd, *$6); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr not IN_SYM '(' expr ')' { $$= handle_sql2003_note184_exception(thd, $1, false, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr not IN_SYM '(' expr ',' expr_list ')' @@ -9636,21 +9666,21 @@ predicate: $7->push_front($5, thd->mem_root); $7->push_front($1, thd->mem_root); Item_func_in *item= new (thd->mem_root) Item_func_in(thd, *$7); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= item->neg_transformer(thd); } | bit_expr BETWEEN_SYM bit_expr AND_SYM predicate { $$= new (thd->mem_root) Item_func_between(thd, $1, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr not BETWEEN_SYM bit_expr AND_SYM predicate { Item_func_between *item; item= new (thd->mem_root) Item_func_between(thd, $1, $4, $6); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= item->neg_transformer(thd); } @@ -9658,131 +9688,157 @@ predicate: { Item *item1= new (thd->mem_root) Item_func_soundex(thd, $1); Item *item4= new (thd->mem_root) Item_func_soundex(thd, $4); - if ((item1 == NULL) || (item4 == NULL)) + if (unlikely(item1 == NULL) || unlikely(item4 == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_func_eq(thd, item1, item4); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | bit_expr LIKE simple_expr opt_escape + | bit_expr LIKE mysql_concatenation_expr opt_escape { $$= new (thd->mem_root) Item_func_like(thd, $1, $3, $4, Lex->escape_used); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | bit_expr not LIKE simple_expr opt_escape + | bit_expr not LIKE mysql_concatenation_expr opt_escape { Item *item= new (thd->mem_root) Item_func_like(thd, $1, $4, $5, Lex->escape_used); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= item->neg_transformer(thd); } | bit_expr REGEXP bit_expr { $$= new (thd->mem_root) Item_func_regex(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr not REGEXP bit_expr { Item *item= new (thd->mem_root) Item_func_regex(thd, $1, $4); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= negate_expression(thd, item); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | bit_expr + | bit_expr %prec PREC_BELOW_NOT ; bit_expr: bit_expr '|' bit_expr %prec '|' { $$= new (thd->mem_root) Item_func_bit_or(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '&' bit_expr %prec '&' { $$= new (thd->mem_root) Item_func_bit_and(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr SHIFT_LEFT bit_expr %prec SHIFT_LEFT { $$= new (thd->mem_root) Item_func_shift_left(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr SHIFT_RIGHT bit_expr %prec SHIFT_RIGHT { $$= new (thd->mem_root) Item_func_shift_right(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + } + | bit_expr ORACLE_CONCAT_SYM bit_expr + { + $$= new (thd->mem_root) Item_func_concat_operator_oracle(thd, + $1, $3); + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '+' bit_expr %prec '+' { $$= new (thd->mem_root) Item_func_plus(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '-' bit_expr %prec '-' { $$= new (thd->mem_root) Item_func_minus(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '+' INTERVAL_SYM expr interval %prec '+' { $$= new (thd->mem_root) Item_date_add_interval(thd, $1, $4, $5, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '-' INTERVAL_SYM expr interval %prec '-' { $$= new (thd->mem_root) Item_date_add_interval(thd, $1, $4, $5, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + } + | INTERVAL_SYM expr interval '+' expr + /* we cannot put interval before - */ + { + $$= new (thd->mem_root) Item_date_add_interval(thd, $5, $2, $3, 0); + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + } + | '+' INTERVAL_SYM expr interval '+' expr %prec NEG + { + $$= new (thd->mem_root) Item_date_add_interval(thd, $6, $3, $4, 0); + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + } + | '-' INTERVAL_SYM expr interval '+' expr %prec NEG + { + $$= new (thd->mem_root) Item_date_add_interval(thd, $6, $3, $4, 1); + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '*' bit_expr %prec '*' { $$= new (thd->mem_root) Item_func_mul(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '/' bit_expr %prec '/' { $$= new (thd->mem_root) Item_func_div(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '%' bit_expr %prec '%' { $$= new (thd->mem_root) Item_func_mod(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr DIV_SYM bit_expr %prec DIV_SYM { $$= new (thd->mem_root) Item_func_int_div(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr MOD_SYM bit_expr %prec MOD_SYM { $$= new (thd->mem_root) Item_func_mod(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '^' bit_expr { $$= new (thd->mem_root) Item_func_bit_xor(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | simple_expr + | mysql_concatenation_expr %prec '^' ; or: @@ -9869,7 +9925,7 @@ dyncall_create_element: LEX *lex= Lex; $$= (DYNCALL_CREATE_DEF *) alloc_root(thd->mem_root, sizeof(DYNCALL_CREATE_DEF)); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; $$->key= $1; $$->value= $3; @@ -9889,7 +9945,7 @@ dyncall_create_list: dyncall_create_element { $$= new (thd->mem_root) List; - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; $$->push_back($1, thd->mem_root); } @@ -9955,18 +10011,18 @@ column_default_non_parenthesized_expr: { $5->push_front($3, thd->mem_root); $$= new (thd->mem_root) Item_row(thd, *$5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | EXISTS '(' subselect ')' { $$= new (thd->mem_root) Item_exists_subselect(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | '{' ident expr '}' { - if (!($$= $3->make_odbc_literal(thd, &$2))) + if (unlikely(!($$= $3->make_odbc_literal(thd, &$2)))) MYSQL_YYABORT; } | MATCH ident_list_arg AGAINST '(' bit_expr fulltext_options ')' @@ -9974,46 +10030,46 @@ column_default_non_parenthesized_expr: $2->push_front($5, thd->mem_root); Item_func_match *i1= new (thd->mem_root) Item_func_match(thd, *$2, $6); - if (i1 == NULL) + if (unlikely(i1 == NULL)) MYSQL_YYABORT; Select->add_ftfunc_to_list(thd, i1); $$= i1; } | CAST_SYM '(' expr AS cast_type ')' { - if (!($$= $5.create_typecast_item(thd, $3, Lex->charset))) + if (unlikely(!($$= $5.create_typecast_item(thd, $3, Lex->charset)))) MYSQL_YYABORT; } | CASE_SYM when_list_opt_else END { - if (!($$= new(thd->mem_root) Item_func_case_searched(thd, *$2))) + if (unlikely(!($$= new(thd->mem_root) Item_func_case_searched(thd, *$2)))) MYSQL_YYABORT; } | CASE_SYM expr when_list_opt_else END { $3->push_front($2, thd->mem_root); - if (!($$= new (thd->mem_root) Item_func_case_simple(thd, *$3))) + if (unlikely(!($$= new (thd->mem_root) Item_func_case_simple(thd, *$3)))) MYSQL_YYABORT; } | CONVERT_SYM '(' expr ',' cast_type ')' { - if (!($$= $5.create_typecast_item(thd, $3, Lex->charset))) + if (unlikely(!($$= $5.create_typecast_item(thd, $3, Lex->charset)))) MYSQL_YYABORT; } | CONVERT_SYM '(' expr USING charset_name ')' { $$= new (thd->mem_root) Item_func_conv_charset(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DEFAULT '(' simple_ident ')' { Item_splocal *il= $3->get_item_splocal(); - if (il) + if (unlikely(il)) my_yyabort_error((ER_WRONG_COLUMN_NAME, MYF(0), il->my_name()->str)); $$= new (thd->mem_root) Item_default_value(thd, Lex->current_context(), $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->default_used= TRUE; } @@ -10021,64 +10077,66 @@ column_default_non_parenthesized_expr: { $$= new (thd->mem_root) Item_insert_value(thd, Lex->current_context(), $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | NEXT_SYM VALUE_SYM FOR_SYM table_ident { - if (!($$= Lex->create_item_func_nextval(thd, $4))) + if (unlikely(!($$= Lex->create_item_func_nextval(thd, $4)))) MYSQL_YYABORT; } | NEXTVAL_SYM '(' table_ident ')' { - if (!($$= Lex->create_item_func_nextval(thd, $3))) + if (unlikely(!($$= Lex->create_item_func_nextval(thd, $3)))) MYSQL_YYABORT; } | PREVIOUS_SYM VALUE_SYM FOR_SYM table_ident { - if (!($$= Lex->create_item_func_lastval(thd, $4))) + if (unlikely(!($$= Lex->create_item_func_lastval(thd, $4)))) MYSQL_YYABORT; } | LASTVAL_SYM '(' table_ident ')' { - if (!($$= Lex->create_item_func_lastval(thd, $3))) + if (unlikely(!($$= Lex->create_item_func_lastval(thd, $3)))) MYSQL_YYABORT; } | SETVAL_SYM '(' table_ident ',' longlong_num ')' { - if (!($$= Lex->create_item_func_setval(thd, $3, $5, 0, 1))) + if (unlikely(!($$= Lex->create_item_func_setval(thd, $3, $5, 0, 1)))) MYSQL_YYABORT; } | SETVAL_SYM '(' table_ident ',' longlong_num ',' bool ')' { - if (!($$= Lex->create_item_func_setval(thd, $3, $5, 0, $7))) + if (unlikely(!($$= Lex->create_item_func_setval(thd, $3, $5, 0, $7)))) MYSQL_YYABORT; } | SETVAL_SYM '(' table_ident ',' longlong_num ',' bool ',' ulonglong_num ')' { - if (!($$= Lex->create_item_func_setval(thd, $3, $5, $9, $7))) + if (unlikely(!($$= Lex->create_item_func_setval(thd, $3, $5, $9, $7)))) + MYSQL_YYABORT; + } + ; + +primary_expr: + column_default_non_parenthesized_expr + | '(' parenthesized_expr ')' { $$= $2; } + ; + +string_factor_expr: + primary_expr + | string_factor_expr COLLATE_SYM collation_name + { + if (unlikely(!($$= new (thd->mem_root) Item_func_set_collation(thd, $1, $3)))) MYSQL_YYABORT; } ; simple_expr: - column_default_non_parenthesized_expr - | simple_expr COLLATE_SYM collation_name %prec NEG - { - if (!($$= new (thd->mem_root) Item_func_set_collation(thd, $1, $3))) - MYSQL_YYABORT; - } - | '(' parenthesized_expr ')' { $$= $2; } - | BINARY simple_expr %prec NEG + string_factor_expr %prec NEG + | BINARY simple_expr { Type_cast_attributes at(&my_charset_bin); - if (!($$= type_handler_long_blob.create_typecast_item(thd, $2, at))) - MYSQL_YYABORT; - } - | simple_expr OR_OR_SYM simple_expr - { - $$= new (thd->mem_root) Item_func_concat(thd, $1, $3); - if ($$ == NULL) + if (unlikely(!($$= type_handler_long_blob.create_typecast_item(thd, $2, at)))) MYSQL_YYABORT; } | '+' simple_expr %prec NEG @@ -10088,26 +10146,29 @@ simple_expr: | '-' simple_expr %prec NEG { $$= $2->neg(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | '~' simple_expr %prec NEG { $$= new (thd->mem_root) Item_func_bit_neg(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | not2 simple_expr %prec NEG { $$= negate_expression(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | INTERVAL_SYM expr interval '+' expr %prec INTERVAL_SYM - /* we cannot put interval before - */ + ; + +mysql_concatenation_expr: + simple_expr + | mysql_concatenation_expr MYSQL_CONCAT_SYM simple_expr { - $$= new (thd->mem_root) Item_date_add_interval(thd, $5, $2, $3, 0); - if ($$ == NULL) + $$= new (thd->mem_root) Item_func_concat(thd, $1, $3); + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -10117,13 +10178,13 @@ function_call_keyword_timestamp: { $$= new (thd->mem_root) Item_datetime_typecast(thd, $3, AUTO_SEC_PART_DIGITS); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | TIMESTAMP '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_add_time(thd, $3, $5, 1, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -10137,20 +10198,20 @@ function_call_keyword: CHAR_SYM '(' expr_list ')' { $$= new (thd->mem_root) Item_func_char(thd, *$3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CHAR_SYM '(' expr_list USING charset_name ')' { $$= new (thd->mem_root) Item_func_char(thd, *$3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CURRENT_USER optional_braces { $$= new (thd->mem_root) Item_func_current_user(thd, Lex->current_context()); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); Lex->safe_to_cache_query= 0; @@ -10159,7 +10220,7 @@ function_call_keyword: { $$= new (thd->mem_root) Item_func_current_role(thd, Lex->current_context()); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); Lex->safe_to_cache_query= 0; @@ -10167,87 +10228,88 @@ function_call_keyword: | DATE_SYM '(' expr ')' { $$= new (thd->mem_root) Item_date_typecast(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DAY_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_dayofmonth(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | HOUR_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_hour(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | INSERT '(' expr ',' expr ',' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_insert(thd, $3, $5, $7, $9); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | INTERVAL_SYM '(' expr ',' expr ')' %prec INTERVAL_SYM + | INTERVAL_SYM '(' expr ',' expr ')' { List *list= new (thd->mem_root) List; - if (list == NULL) + if (unlikely(list == NULL)) + MYSQL_YYABORT; + if (unlikely(list->push_front($5, thd->mem_root)) || + unlikely(list->push_front($3, thd->mem_root))) MYSQL_YYABORT; - list->push_front($5, thd->mem_root); - list->push_front($3, thd->mem_root); Item_row *item= new (thd->mem_root) Item_row(thd, *list); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_func_interval(thd, item); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | INTERVAL_SYM '(' expr ',' expr ',' expr_list ')' %prec INTERVAL_SYM + | INTERVAL_SYM '(' expr ',' expr ',' expr_list ')' { $7->push_front($5, thd->mem_root); $7->push_front($3, thd->mem_root); Item_row *item= new (thd->mem_root) Item_row(thd, *$7); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_func_interval(thd, item); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LEFT '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_left(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MINUTE_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_minute(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MONTH_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_month(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | RIGHT '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_right(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SECOND_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_second(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | TIME_SYM '(' expr ')' { $$= new (thd->mem_root) Item_time_typecast(thd, $3, AUTO_SEC_PART_DIGITS); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | function_call_keyword_timestamp @@ -10256,13 +10318,13 @@ function_call_keyword: } | TRIM '(' trim_operands ')' { - if (!($$= $3.make_item_func_trim(thd))) + if (unlikely(!($$= $3.make_item_func_trim(thd)))) MYSQL_YYABORT; } | USER_SYM '(' ')' { $$= new (thd->mem_root) Item_func_user(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); Lex->safe_to_cache_query=0; @@ -10270,7 +10332,7 @@ function_call_keyword: | YEAR_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_year(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -10292,117 +10354,115 @@ function_call_nonkeyword: { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $5, INTERVAL_DAY, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ADDDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')' { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $6, $7, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CURDATE optional_braces { $$= new (thd->mem_root) Item_func_curdate_local(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | CURTIME opt_time_precision { $$= new (thd->mem_root) Item_func_curtime_local(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | DATE_ADD_INTERVAL '(' expr ',' INTERVAL_SYM expr interval ')' - %prec INTERVAL_SYM { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $6, $7, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DATE_SUB_INTERVAL '(' expr ',' INTERVAL_SYM expr interval ')' - %prec INTERVAL_SYM { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $6, $7, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DATE_FORMAT_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_date_format(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DATE_FORMAT_SYM '(' expr ',' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_date_format(thd, $3, $5, $7); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DECODE_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_decode(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | EXTRACT_SYM '(' interval FROM expr ')' { $$=new (thd->mem_root) Item_extract(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | GET_FORMAT '(' date_time_type ',' expr ')' { $$= new (thd->mem_root) Item_func_get_format(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | NOW_SYM opt_time_precision { $$= new (thd->mem_root) Item_func_now_local(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | POSITION_SYM '(' bit_expr IN_SYM expr ')' { $$= new (thd->mem_root) Item_func_locate(thd, $5, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SUBDATE_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $5, INTERVAL_DAY, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SUBDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')' { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $6, $7, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SUBSTRING '(' expr ',' expr ',' expr ')' { - if (!($$= Lex->make_item_func_substr(thd, $3, $5, $7))) + if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5, $7)))) MYSQL_YYABORT; } | SUBSTRING '(' expr ',' expr ')' { - if (!($$= Lex->make_item_func_substr(thd, $3, $5))) + if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5)))) MYSQL_YYABORT; } | SUBSTRING '(' expr FROM expr FOR_SYM expr ')' { - if (!($$= Lex->make_item_func_substr(thd, $3, $5, $7))) + if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5, $7)))) MYSQL_YYABORT; } | SUBSTRING '(' expr FROM expr ')' { - if (!($$= Lex->make_item_func_substr(thd, $3, $5))) + if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5)))) MYSQL_YYABORT; } | SYSDATE opt_time_precision @@ -10419,45 +10479,45 @@ function_call_nonkeyword: $$= new (thd->mem_root) Item_func_sysdate_local(thd, $2); else $$= new (thd->mem_root) Item_func_now_local(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | TIMESTAMP_ADD '(' interval_time_stamp ',' expr ',' expr ')' { $$= new (thd->mem_root) Item_date_add_interval(thd, $7, $5, $3, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | TIMESTAMP_DIFF '(' interval_time_stamp ',' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_timestamp_diff(thd, $5, $7, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | TRIM_ORACLE '(' trim_operands ')' { - if (!($$= $3.make_item_func_trim_oracle(thd))) + if (unlikely(!($$= $3.make_item_func_trim_oracle(thd)))) MYSQL_YYABORT; } | UTC_DATE_SYM optional_braces { $$= new (thd->mem_root) Item_func_curdate_utc(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | UTC_TIME_SYM opt_time_precision { $$= new (thd->mem_root) Item_func_curtime_utc(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | UTC_TIMESTAMP_SYM opt_time_precision { $$= new (thd->mem_root) Item_func_now_utc(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } @@ -10465,28 +10525,28 @@ function_call_nonkeyword: COLUMN_ADD_SYM '(' expr ',' dyncall_create_list ')' { $$= create_func_dyncol_add(thd, $3, *$5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COLUMN_DELETE_SYM '(' expr ',' expr_list ')' { $$= create_func_dyncol_delete(thd, $3, *$5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COLUMN_CHECK_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_dyncol_check(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COLUMN_CREATE_SYM '(' dyncall_create_list ')' { $$= create_func_dyncol_create(thd, *$3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | @@ -10496,7 +10556,7 @@ function_call_nonkeyword: $$= create_func_dyncol_get(thd, $3, $5, $7.type_handler(), $7.length(), $7.dec(), lex->charset); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -10510,50 +10570,50 @@ function_call_conflict: ASCII_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_ascii(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CHARSET '(' expr ')' { $$= new (thd->mem_root) Item_func_charset(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COALESCE '(' expr_list ')' { $$= new (thd->mem_root) Item_func_coalesce(thd, *$3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COLLATION_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_collation(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DATABASE '(' ')' { $$= new (thd->mem_root) Item_func_database(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | IF_SYM '(' expr ',' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_if(thd, $3, $5, $7); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | FORMAT_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_format(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | FORMAT_SYM '(' expr ',' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_format(thd, $3, $5, $7); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } /* LAST_VALUE here conflicts with the definition for window functions. @@ -10562,75 +10622,75 @@ function_call_conflict: | LAST_VALUE '(' expr ')' { List *list= new (thd->mem_root) List; - if (list == NULL) + if (unlikely(list == NULL)) MYSQL_YYABORT; list->push_back($3, thd->mem_root); $$= new (thd->mem_root) Item_func_last_value(thd, *list); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LAST_VALUE '(' expr_list ',' expr ')' { $3->push_back($5, thd->mem_root); $$= new (thd->mem_root) Item_func_last_value(thd, *$3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MICROSECOND_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_microsecond(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MOD_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_mod(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | OLD_PASSWORD_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_password(thd, $3, Item_func_password::OLD); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | PASSWORD_SYM '(' expr ')' { Item* i1; i1= new (thd->mem_root) Item_func_password(thd, $3); - if (i1 == NULL) + if (unlikely(i1 == NULL)) MYSQL_YYABORT; $$= i1; } | QUARTER_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_quarter(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | REPEAT_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_repeat(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | REPLACE '(' expr ',' expr ',' expr ')' { - if (!($$= Lex->make_item_func_replace(thd, $3, $5, $7))) + if (unlikely(!($$= Lex->make_item_func_replace(thd, $3, $5, $7)))) MYSQL_YYABORT; } | REVERSE_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_reverse(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ROW_COUNT_SYM '(' ')' { $$= new (thd->mem_root) Item_func_row_count(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); Lex->safe_to_cache_query= 0; @@ -10638,25 +10698,25 @@ function_call_conflict: | TRUNCATE_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_round(thd, $3, $5, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | WEEK_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_week(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | WEEK_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_week(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | WEIGHT_STRING_SYM '(' expr opt_ws_levels ')' { $$= new (thd->mem_root) Item_func_weight_string(thd, $3, 0, 0, $4); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | WEIGHT_STRING_SYM '(' expr AS CHAR_SYM ws_nweights opt_ws_levels ')' @@ -10664,26 +10724,26 @@ function_call_conflict: $$= new (thd->mem_root) Item_func_weight_string(thd, $3, 0, $6, $7 | MY_STRXFRM_PAD_WITH_SPACE); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | WEIGHT_STRING_SYM '(' expr AS BINARY ws_nweights ')' { Item *item= new (thd->mem_root) Item_char_typecast(thd, $3, $6, &my_charset_bin); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_func_weight_string(thd, item, 0, $6, MY_STRXFRM_PAD_WITH_SPACE); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | WEIGHT_STRING_SYM '(' expr ',' ulong_num ',' ulong_num ',' ulong_num ')' { $$= new (thd->mem_root) Item_func_weight_string(thd, $3, $5, $7, $9); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | geometry_function @@ -10691,7 +10751,7 @@ function_call_conflict: #ifdef HAVE_SPATIAL $$= $1; /* $1 may be NULL, GEOM_NEW not tested for out of memory */ - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; #else my_yyabort_error((ER_FEATURE_DISABLED, MYF(0), sym_group_geom.name, @@ -10779,7 +10839,7 @@ function_call_generic: (udf= find_udf($1.str, $1.length)) && udf->type == UDFTYPE_AGGREGATE) { - if (lex->current_select->inc_in_sum_expr()) + if (unlikely(lex->current_select->inc_in_sum_expr())) { thd->parse_error(); MYSQL_YYABORT; @@ -10794,10 +10854,8 @@ function_call_generic: Create_func *builder; Item *item= NULL; - if (check_routine_name(&$1)) - { + if (unlikely(check_routine_name(&$1))) MYSQL_YYABORT; - } /* Implementation note: @@ -10837,45 +10895,13 @@ function_call_generic: } } - if (! ($$= item)) - { + if (unlikely(! ($$= item))) MYSQL_YYABORT; - } } - | ident '.' ident '(' opt_expr_list ')' + | ident_cli '.' ident_cli '(' opt_expr_list ')' { - Create_qfunc *builder; - Item *item= NULL; - - /* - The following in practice calls: - Create_sp_func::create() - and builds a stored function. - - However, it's important to maintain the interface between the - parser and the implementation in item_create.cc clean, - since this will change with WL#2128 (SQL PATH): - - INFORMATION_SCHEMA.version() is the SQL 99 syntax for the native - function version(), - - MySQL.version() is the SQL 2003 syntax for the native function - version() (a vendor can specify any schema). - */ - - if (!$1.str || check_db_name((LEX_STRING*) &$1)) - my_yyabort_error((ER_WRONG_DB_NAME, MYF(0), $1.str)); - if (check_routine_name(&$3)) - { + if (unlikely(!($$= Lex->make_item_func_call_generic(thd, &$1, &$3, $5)))) MYSQL_YYABORT; - } - - builder= find_qualified_function_builder(thd); - DBUG_ASSERT(builder); - item= builder->create_with_db(thd, &$1, &$3, true, $5); - - if (! ($$= item)) - { - MYSQL_YYABORT; - } } ; @@ -10905,7 +10931,7 @@ udf_expr_list: udf_expr { $$= new (thd->mem_root) List; - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; $$->push_back($1, thd->mem_root); } @@ -10947,46 +10973,46 @@ sum_expr: AVG_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_avg(thd, $3, FALSE); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | AVG_SYM '(' DISTINCT in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_avg(thd, $4, TRUE); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | BIT_AND '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_and(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | BIT_OR '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_or(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | BIT_XOR '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_xor(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COUNT_SYM '(' opt_all '*' ')' { Item *item= new (thd->mem_root) Item_int(thd, (int32) 0L, 1); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_sum_count(thd, item); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COUNT_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_count(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COUNT_SYM '(' DISTINCT @@ -10996,13 +11022,13 @@ sum_expr: ')' { $$= new (thd->mem_root) Item_sum_count(thd, *$5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MIN_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_min(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } /* @@ -11013,55 +11039,55 @@ sum_expr: | MIN_SYM '(' DISTINCT in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_min(thd, $4); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MAX_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_max(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MAX_SYM '(' DISTINCT in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_max(thd, $4); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | STD_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_std(thd, $3, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | VARIANCE_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_variance(thd, $3, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | STDDEV_SAMP_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_std(thd, $3, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | VAR_SAMP_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_variance(thd, $3, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SUM_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_sum(thd, $3, FALSE); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SUM_SYM '(' DISTINCT in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_sum(thd, $4, TRUE); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | GROUP_CONCAT_SYM '(' opt_distinct @@ -11078,7 +11104,7 @@ sum_expr: sel->gorder_list, $7, $8, sel->select_limit, sel->offset_limit); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; sel->select_limit= NULL; sel->offset_limit= NULL; @@ -11092,25 +11118,25 @@ window_func_expr: window_func OVER_SYM window_name { $$= new (thd->mem_root) Item_window_func(thd, (Item_sum *) $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; - if (Select->add_window_func((Item_window_func *) $$)) + if (unlikely(Select->add_window_func((Item_window_func *) $$))) MYSQL_YYABORT; } | window_func OVER_SYM window_spec { LEX *lex= Lex; - if (Select->add_window_spec(thd, lex->win_ref, - Select->group_list, - Select->order_list, - lex->win_frame)) + if (unlikely(Select->add_window_spec(thd, lex->win_ref, + Select->group_list, + Select->order_list, + lex->win_frame))) MYSQL_YYABORT; $$= new (thd->mem_root) Item_window_func(thd, (Item_sum *) $1, thd->lex->win_spec); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; - if (Select->add_window_func((Item_window_func *) $$)) + if (unlikely(Select->add_window_func((Item_window_func *) $$))) MYSQL_YYABORT; } ; @@ -11128,63 +11154,63 @@ simple_window_func: ROW_NUMBER_SYM '(' ')' { $$= new (thd->mem_root) Item_sum_row_number(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | RANK_SYM '(' ')' { $$= new (thd->mem_root) Item_sum_rank(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DENSE_RANK_SYM '(' ')' { $$= new (thd->mem_root) Item_sum_dense_rank(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | PERCENT_RANK_SYM '(' ')' { $$= new (thd->mem_root) Item_sum_percent_rank(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CUME_DIST_SYM '(' ')' { $$= new (thd->mem_root) Item_sum_cume_dist(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | NTILE_SYM '(' expr ')' { $$= new (thd->mem_root) Item_sum_ntile(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | FIRST_VALUE_SYM '(' expr ')' { $$= new (thd->mem_root) Item_sum_first_value(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LAST_VALUE '(' expr ')' { $$= new (thd->mem_root) Item_sum_last_value(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | NTH_VALUE_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_sum_nth_value(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | @@ -11192,17 +11218,17 @@ simple_window_func: { /* No second argument defaults to 1. */ Item* item_offset= new (thd->mem_root) Item_uint(thd, 1); - if (item_offset == NULL) + if (unlikely(item_offset == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_sum_lead(thd, $3, item_offset); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LEAD_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_sum_lead(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | @@ -11210,17 +11236,17 @@ simple_window_func: { /* No second argument defaults to 1. */ Item* item_offset= new (thd->mem_root) Item_uint(thd, 1); - if (item_offset == NULL) + if (unlikely(item_offset == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_sum_lag(thd, $3, item_offset); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LAG_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_sum_lag(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -11232,16 +11258,16 @@ inverse_distribution_function: '(' opt_window_partition_clause ')' { LEX *lex= Lex; - if (Select->add_window_spec(thd, lex->win_ref, - Select->group_list, - Select->order_list, - NULL)) + if (unlikely(Select->add_window_spec(thd, lex->win_ref, + Select->group_list, + Select->order_list, + NULL))) MYSQL_YYABORT; $$= new (thd->mem_root) Item_window_func(thd, (Item_sum *) $1, thd->lex->win_spec); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; - if (Select->add_window_func((Item_window_func *) $$)) + if (unlikely(Select->add_window_func((Item_window_func *) $$))) MYSQL_YYABORT; } ; @@ -11257,14 +11283,14 @@ percentile_function: { Item *args= new (thd->mem_root) Item_decimal(thd, "0.5", 3, thd->charset()); - if (($$ == NULL) || (thd->is_error())) - { + if (unlikely(args == NULL) || unlikely(thd->is_error())) + MYSQL_YYABORT; + Select->prepare_add_window_spec(thd); + if (unlikely(add_order_to_list(thd, $3,FALSE))) MYSQL_YYABORT; - } - if (add_order_to_list(thd, $3,FALSE)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_sum_percentile_cont(thd, args); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -11273,20 +11299,23 @@ inverse_distribution_function_def: PERCENTILE_CONT_SYM '(' expr ')' { $$= new (thd->mem_root) Item_sum_percentile_cont(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | PERCENTILE_DISC_SYM '(' expr ')' { $$= new (thd->mem_root) Item_sum_percentile_disc(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; order_by_single_element_list: ORDER_SYM BY order_ident order_dir - { if (add_order_to_list(thd, $3,(bool) $4)) MYSQL_YYABORT; } + { + if (unlikely(add_order_to_list(thd, $3,(bool) $4))) + MYSQL_YYABORT; + } ; @@ -11294,7 +11323,7 @@ window_name: ident { $$= (LEX_CSTRING *) thd->memdup(&$1, sizeof(LEX_CSTRING)); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -11302,7 +11331,7 @@ window_name: variable: '@' { - if (! Lex->parsing_options.allows_variable) + if (unlikely(! Lex->parsing_options.allows_variable)) my_yyabort_error((ER_VIEW_SELECT_VARIABLE, MYF(0))); } variable_aux @@ -11316,7 +11345,7 @@ variable_aux: { Item_func_set_user_var *item; $$= item= new (thd->mem_root) Item_func_set_user_var(thd, &$1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; LEX *lex= Lex; lex->uncacheable(UNCACHEABLE_SIDEEFFECT); @@ -11325,23 +11354,20 @@ variable_aux: | ident_or_text { $$= new (thd->mem_root) Item_func_get_user_var(thd, &$1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; LEX *lex= Lex; lex->uncacheable(UNCACHEABLE_SIDEEFFECT); } - | '@' opt_var_ident_type ident_or_text opt_component + | '@' opt_var_ident_type ident_sysvar_name { - /* disallow "SELECT @@global.global.variable" */ - if ($3.str && $4.str && check_reserved_words(&$3)) - { - thd->parse_error(); + if (unlikely(!($$= Lex->make_item_sysvar(thd, $2, &$3)))) MYSQL_YYABORT; - } - if (!($$= get_system_var(thd, $2, &$3, &$4))) + } + | '@' opt_var_ident_type ident_sysvar_name '.' ident + { + if (unlikely(!($$= Lex->make_item_sysvar(thd, $2, &$3, &$5)))) MYSQL_YYABORT; - if (!((Item_func_get_system_var*) $$)->is_written_to_binlog()) - Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_VARIABLE); } ; @@ -11354,7 +11380,7 @@ opt_gconcat_separator: /* empty */ { $$= new (thd->mem_root) String(",", 1, &my_charset_latin1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SEPARATOR_SYM text_string { $$ = $2; } @@ -11367,9 +11393,15 @@ opt_gorder_clause: gorder_list: gorder_list ',' order_ident order_dir - { if (add_gorder_to_list(thd, $3,(bool) $4)) MYSQL_YYABORT; } + { + if (unlikely(add_gorder_to_list(thd, $3,(bool) $4))) + MYSQL_YYABORT; + } | order_ident order_dir - { if (add_gorder_to_list(thd, $1,(bool) $2)) MYSQL_YYABORT; } + { + if (unlikely(add_gorder_to_list(thd, $1,(bool) $2))) + MYSQL_YYABORT; + } ; opt_glimit_clause: @@ -11418,7 +11450,7 @@ in_sum_expr: opt_all { LEX *lex= Lex; - if (lex->current_select->inc_in_sum_expr()) + if (unlikely(lex->current_select->inc_in_sum_expr())) { thd->parse_error(); MYSQL_YYABORT; @@ -11472,9 +11504,9 @@ expr_list: expr { $$= new (thd->mem_root) List; - if ($$ == NULL) + if (unlikely($$ == NULL) || + unlikely($$->push_back($1, thd->mem_root))) MYSQL_YYABORT; - $$->push_back($1, thd->mem_root); } | expr_list ',' expr { @@ -11492,9 +11524,9 @@ ident_list: simple_ident { $$= new (thd->mem_root) List; - if ($$ == NULL) + if (unlikely($$ == NULL) || + unlikely($$->push_back($1, thd->mem_root))) MYSQL_YYABORT; - $$->push_back($1, thd->mem_root); } | ident_list ',' simple_ident { @@ -11507,15 +11539,17 @@ when_list: WHEN_SYM expr THEN_SYM expr { $$= new (thd->mem_root) List; - if ($$ == NULL) + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + if (unlikely($$->push_back($2, thd->mem_root) || + $$->push_back($4, thd->mem_root))) MYSQL_YYABORT; - $$->push_back($2, thd->mem_root); - $$->push_back($4, thd->mem_root); } | when_list WHEN_SYM expr THEN_SYM expr { - $1->push_back($3, thd->mem_root); - $1->push_back($5, thd->mem_root); + if (unlikely($1->push_back($3, thd->mem_root) || + $1->push_back($5, thd->mem_root))) + MYSQL_YYABORT; $$= $1; } ; @@ -11524,7 +11558,8 @@ when_list_opt_else: when_list | when_list ELSE expr { - $1->push_back($3, thd->mem_root); + if (unlikely($1->push_back($3, thd->mem_root))) + MYSQL_YYABORT; $$= $1; } ; @@ -11536,7 +11571,7 @@ table_ref: | join_table { LEX *lex= Lex; - if (!($$= lex->current_select->nest_last_join(thd))) + if (unlikely(!($$= lex->current_select->nest_last_join(thd)))) { thd->parse_error(); MYSQL_YYABORT; @@ -11591,7 +11626,7 @@ join_table: { MYSQL_YYABORT_UNLESS($1 && $3); /* Change the current name resolution context to a local context. */ - if (push_new_name_resolution_context(thd, $1, $3)) + if (unlikely(push_new_name_resolution_context(thd, $1, $3))) MYSQL_YYABORT; Select->parsing_place= IN_ON; } @@ -11608,7 +11643,7 @@ join_table: MYSQL_YYABORT_UNLESS($1 && $3); } '(' using_list ')' - { + { $3->straight=$2; add_join_natural($1,$3,$7,Select); $$=$3; @@ -11626,7 +11661,7 @@ join_table: { MYSQL_YYABORT_UNLESS($1 && $5); /* Change the current name resolution context to a local context. */ - if (push_new_name_resolution_context(thd, $1, $5)) + if (unlikely(push_new_name_resolution_context(thd, $1, $5))) MYSQL_YYABORT; Select->parsing_place= IN_ON; } @@ -11662,14 +11697,14 @@ join_table: { MYSQL_YYABORT_UNLESS($1 && $5); /* Change the current name resolution context to a local context. */ - if (push_new_name_resolution_context(thd, $1, $5)) + if (unlikely(push_new_name_resolution_context(thd, $1, $5))) MYSQL_YYABORT; Select->parsing_place= IN_ON; } expr { LEX *lex= Lex; - if (!($$= lex->current_select->convert_right_join())) + if (unlikely(!($$= lex->current_select->convert_right_join()))) MYSQL_YYABORT; add_join_on(thd, $$, $8); Lex->pop_context(); @@ -11682,7 +11717,7 @@ join_table: USING '(' using_list ')' { LEX *lex= Lex; - if (!($$= lex->current_select->convert_right_join())) + if (unlikely(!($$= lex->current_select->convert_right_join()))) MYSQL_YYABORT; add_join_natural($$,$5,$9,Select); } @@ -11691,7 +11726,7 @@ join_table: MYSQL_YYABORT_UNLESS($1 && $6); add_join_natural($6,$1,NULL,Select); LEX *lex= Lex; - if (!($$= lex->current_select->convert_right_join())) + if (unlikely(!($$= lex->current_select->convert_right_join()))) MYSQL_YYABORT; } ; @@ -11745,12 +11780,13 @@ table_primary_ident: } table_ident opt_use_partition opt_for_system_time_clause opt_table_alias opt_key_definition { - if (!($$= Select->add_table_to_list(thd, $2, $5, - Select->get_table_join_options(), - YYPS->m_lock_type, - YYPS->m_mdl_type, - Select->pop_index_hints(), - $3))) + if (unlikely(!($$= Select->add_table_to_list(thd, $2, $5, + Select->get_table_join_options(), + YYPS->m_lock_type, + YYPS->m_mdl_type, + Select-> + pop_index_hints(), + $3)))) MYSQL_YYABORT; Select->add_joined_table($$); if ($4) @@ -11801,18 +11837,18 @@ table_primary_derived: SELECT_LEX_UNIT *unit= sel->master_unit(); lex->current_select= sel= unit->outer_select(); Table_ident *ti= new (thd->mem_root) Table_ident(unit); - if (ti == NULL) + if (unlikely(ti == NULL)) MYSQL_YYABORT; - if (!($$= sel->add_table_to_list(thd, - ti, $6, 0, - TL_READ, MDL_SHARED_READ))) - + if (unlikely(!($$= sel->add_table_to_list(thd, + ti, $6, 0, + TL_READ, + MDL_SHARED_READ)))) MYSQL_YYABORT; sel->add_joined_table($$); lex->pop_context(); lex->nest_level--; } - else if ($6 != NULL) + else if (unlikely($6 != NULL)) { /* Tables with or without joins within parentheses cannot @@ -11851,13 +11887,14 @@ table_primary_derived: SELECT_LEX *sel= $2; SELECT_LEX_UNIT *unit= $5->master_unit(); Table_ident *ti= new (thd->mem_root) Table_ident(unit); - if (ti == NULL) + if (unlikely(ti == NULL)) MYSQL_YYABORT; $5->set_with_clause($4); lex->current_select= sel; - if (!($$= sel->add_table_to_list(lex->thd, - ti, $9, 0, - TL_READ, MDL_SHARED_READ))) + if (unlikely(!($$= sel->add_table_to_list(lex->thd, + ti, $9, 0, + TL_READ, + MDL_SHARED_READ)))) MYSQL_YYABORT; sel->add_joined_table($$); if ($8) @@ -11888,7 +11925,7 @@ select_derived_union: select_derived | select_derived union_order_or_limit { - if ($1) + if (unlikely($1)) { thd->parse_error(); MYSQL_YYABORT; @@ -11896,7 +11933,7 @@ select_derived_union: } | select_derived union_head_non_top { - if ($1) + if (unlikely($1)) { thd->parse_error(); MYSQL_YYABORT; @@ -11950,9 +11987,9 @@ select_derived: /* for normal joins, $2 != NULL and end_nested_join() != NULL, for derived tables, both must equal NULL */ - if (!($$= $1->end_nested_join(lex->thd)) && $2) + if (unlikely(!($$= $1->end_nested_join(lex->thd)) && $2)) MYSQL_YYABORT; - if (!$2 && $$) + if (unlikely(!$2 && $$)) { thd->parse_error(); MYSQL_YYABORT; @@ -11981,33 +12018,12 @@ derived_query_specification: derived_table_value_constructor: VALUES { - LEX *lex=Lex; - lex->field_list.empty(); - lex->many_values.empty(); - lex->insert_list=0; - } + Lex->tvc_start(); + } values_list { - LEX *lex= Lex; - lex->derived_tables|= DERIVED_SUBQUERY; - if (!lex->expr_allows_subselect || - lex->sql_command == (int)SQLCOM_PURGE) - { - thd->parse_error(); + if (Lex->tvc_finalize_derived()) MYSQL_YYABORT; - } - if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE || - mysql_new_select(lex, 1, NULL)) - MYSQL_YYABORT; - mysql_init_select(lex); - lex->current_select->linkage= DERIVED_TABLE_TYPE; - - if (!(lex->current_select->tvc= - new (lex->thd->mem_root) table_value_constr(lex->many_values, - lex->current_select, - lex->current_select->options))) - MYSQL_YYABORT; - lex->many_values.empty(); $$= NULL; } ; @@ -12017,14 +12033,14 @@ select_derived2: { LEX *lex= Lex; lex->derived_tables|= DERIVED_SUBQUERY; - if (!lex->expr_allows_subselect || - lex->sql_command == (int)SQLCOM_PURGE) + if (unlikely(!lex->expr_allows_subselect || + lex->sql_command == (int)SQLCOM_PURGE)) { thd->parse_error(); MYSQL_YYABORT; } if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE || - mysql_new_select(lex, 1, NULL)) + unlikely(mysql_new_select(lex, 1, NULL))) MYSQL_YYABORT; mysql_init_select(lex); lex->current_select->linkage= DERIVED_TABLE_TYPE; @@ -12045,7 +12061,7 @@ get_select_lex_derived: get_select_lex { LEX *lex= Lex; - if ($1->init_nested_join(lex->thd)) + if (unlikely($1->init_nested_join(lex->thd))) MYSQL_YYABORT; } ; @@ -12129,12 +12145,12 @@ key_usage_list: using_list: ident { - if (!($$= new (thd->mem_root) List)) + if (unlikely(!($$= new (thd->mem_root) List))) MYSQL_YYABORT; String *s= new (thd->mem_root) String((const char *) $1.str, $1.length, system_charset_info); - if (s == NULL) + if (unlikely(unlikely(s == NULL))) MYSQL_YYABORT; $$->push_back(s, thd->mem_root); } @@ -12143,9 +12159,10 @@ using_list: String *s= new (thd->mem_root) String((const char *) $3.str, $3.length, system_charset_info); - if (s == NULL) + if (unlikely(unlikely(s == NULL))) + MYSQL_YYABORT; + if (unlikely($1->push_back(s, thd->mem_root))) MYSQL_YYABORT; - $1->push_back(s, thd->mem_root); $$= $1; } ; @@ -12195,7 +12212,7 @@ opt_table_alias: | table_alias ident_table_alias { $$= (LEX_CSTRING*) thd->memdup(&$2,sizeof(LEX_STRING)); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -12249,7 +12266,7 @@ opt_escape: $$= ((thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) ? new (thd->mem_root) Item_string_ascii(thd, "", 0) : new (thd->mem_root) Item_string_ascii(thd, "\\", 1)); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -12265,9 +12282,15 @@ opt_group_clause: group_list: group_list ',' order_ident order_dir - { if (add_group_to_list(thd, $3,(bool) $4)) MYSQL_YYABORT; } + { + if (unlikely(add_group_to_list(thd, $3,(bool) $4))) + MYSQL_YYABORT; + } | order_ident order_dir - { if (add_group_to_list(thd, $1,(bool) $2)) MYSQL_YYABORT; } + { + if (unlikely(add_group_to_list(thd, $1,(bool) $2))) + MYSQL_YYABORT; + } ; olap_opt: @@ -12282,7 +12305,7 @@ olap_opt: SQL-2003: GROUP BY ... CUBE(col1, col2, col3) */ LEX *lex=Lex; - if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) + if (unlikely(lex->current_select->linkage == GLOBAL_OPTIONS_TYPE)) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "WITH CUBE", "global union parameters")); lex->current_select->olap= CUBE_TYPE; @@ -12299,7 +12322,7 @@ olap_opt: SQL-2003: GROUP BY ... ROLLUP(col1, col2, col3) */ LEX *lex= Lex; - if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) + if (unlikely(lex->current_select->linkage == GLOBAL_OPTIONS_TYPE)) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "WITH ROLLUP", "global union parameters")); lex->current_select->olap= ROLLUP_TYPE; @@ -12327,10 +12350,10 @@ window_def: window_name AS window_spec { LEX *lex= Lex; - if (Select->add_window_def(thd, $1, lex->win_ref, - Select->group_list, - Select->order_list, - lex->win_frame)) + if (unlikely(Select->add_window_def(thd, $1, lex->win_ref, + Select->group_list, + Select->order_list, + lex->win_frame))) MYSQL_YYABORT; } ; @@ -12348,7 +12371,7 @@ opt_window_ref: | ident { thd->lex->win_ref= (LEX_CSTRING *) thd->memdup(&$1, sizeof(LEX_CSTRING)); - if (thd->lex->win_ref == NULL) + if (unlikely(thd->lex->win_ref == NULL)) MYSQL_YYABORT; } @@ -12372,7 +12395,7 @@ opt_window_frame_clause: lex->frame_top_bound, lex->frame_bottom_bound, $3); - if (lex->win_frame == NULL) + if (unlikely(lex->win_frame == NULL)) MYSQL_YYABORT; } ; @@ -12390,7 +12413,7 @@ window_frame_extent: lex->frame_bottom_bound= new (thd->mem_root) Window_frame_bound(Window_frame_bound::CURRENT, NULL); - if (lex->frame_bottom_bound == NULL) + if (unlikely(lex->frame_bottom_bound == NULL)) MYSQL_YYABORT; } | BETWEEN_SYM window_frame_bound AND_SYM window_frame_bound @@ -12406,21 +12429,21 @@ window_frame_start: { $$= new (thd->mem_root) Window_frame_bound(Window_frame_bound::PRECEDING, NULL); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CURRENT_SYM ROW_SYM { $$= new (thd->mem_root) Window_frame_bound(Window_frame_bound::CURRENT, NULL); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | literal PRECEDING_SYM { $$= new (thd->mem_root) Window_frame_bound(Window_frame_bound::PRECEDING, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -12431,14 +12454,14 @@ window_frame_bound: { $$= new (thd->mem_root) Window_frame_bound(Window_frame_bound::FOLLOWING, NULL); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | literal FOLLOWING_SYM { $$= new (thd->mem_root) Window_frame_bound(Window_frame_bound::FOLLOWING, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -12472,7 +12495,7 @@ alter_order_item: simple_ident_nospvar order_dir { bool ascending= ($2 == 1) ? true : false; - if (add_order_to_list(thd, $1, ascending)) + if (unlikely(add_order_to_list(thd, $1, ascending))) MYSQL_YYABORT; } ; @@ -12492,9 +12515,9 @@ order_clause: LEX *lex=Lex; SELECT_LEX *sel= lex->current_select; SELECT_LEX_UNIT *unit= sel-> master_unit(); - if (sel->linkage != GLOBAL_OPTIONS_TYPE && - sel->olap != UNSPECIFIED_OLAP_TYPE && - (sel->linkage != UNION_TYPE || sel->braces)) + if (unlikely(sel->linkage != GLOBAL_OPTIONS_TYPE && + sel->olap != UNSPECIFIED_OLAP_TYPE && + (sel->linkage != UNION_TYPE || sel->braces))) { my_error(ER_WRONG_USAGE, MYF(0), "CUBE/ROLLUP", "ORDER BY"); @@ -12508,14 +12531,14 @@ order_clause: executed in the same way as the query SELECT ... ORDER BY order_list unless the SELECT construct contains ORDER BY or LIMIT clauses. - Otherwise we create a fake SELECT_LEX if it has not been created - yet. + Otherwise we create a fake SELECT_LEX if it has not been + created yet. */ SELECT_LEX *first_sl= unit->first_select(); - if (!unit->is_unit_op() && - (first_sl->order_list.elements || - first_sl->select_limit) && - unit->add_fake_select_lex(thd)) + if (unlikely(!unit->is_unit_op() && + (first_sl->order_list.elements || + first_sl->select_limit) && + unit->add_fake_select_lex(thd))) MYSQL_YYABORT; } if (sel->master_unit()->is_unit_op() && !sel->braces) @@ -12538,9 +12561,15 @@ order_clause: order_list: order_list ',' order_ident order_dir - { if (add_order_to_list(thd, $3,(bool) $4)) MYSQL_YYABORT; } + { + if (unlikely(add_order_to_list(thd, $3,(bool) $4))) + MYSQL_YYABORT; + } | order_ident order_dir - { if (add_order_to_list(thd, $1,(bool) $2)) MYSQL_YYABORT; } + { + if (unlikely(add_order_to_list(thd, $1,(bool) $2))) + MYSQL_YYABORT; + } ; order_dir: @@ -12611,42 +12640,36 @@ limit_options: ; limit_option: - ident_with_tok_start - { - LEX *lex= thd->lex; - Lex_input_stream *lip= & thd->m_parser_state->m_lip; - if (!($$= lex->create_item_limit(thd, &$1, - $1.m_pos, lip->get_tok_end()))) - MYSQL_YYABORT; - } - | ident_with_tok_start '.' ident - { - LEX *lex= thd->lex; - Lex_input_stream *lip= & thd->m_parser_state->m_lip; - if (!($$= lex->create_item_limit(thd, &$1, &$3, - $1.m_pos, lip->get_ptr()))) - MYSQL_YYABORT; - } + ident_cli + { + if (unlikely(!($$= Lex->create_item_limit(thd, &$1)))) + MYSQL_YYABORT; + } + | ident_cli '.' ident_cli + { + if (unlikely(!($$= Lex->create_item_limit(thd, &$1, &$3)))) + MYSQL_YYABORT; + } | param_marker - { - $1->limit_clause_param= TRUE; - } + { + $1->limit_clause_param= TRUE; + } | ULONGLONG_NUM { $$= new (thd->mem_root) Item_uint(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LONG_NUM { $$= new (thd->mem_root) Item_uint(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | NUM { $$= new (thd->mem_root) Item_uint(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -12758,9 +12781,9 @@ procedure_clause: Item_field *item= new (thd->mem_root) Item_field(thd, &lex->current_select->context, NULL, NULL, &$2); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; - if (add_proc_to_list(thd, item)) + if (unlikely(add_proc_to_list(thd, item))) MYSQL_YYABORT; Lex->uncacheable(UNCACHEABLE_SIDEEFFECT); @@ -12792,7 +12815,7 @@ procedure_list2: procedure_item: remember_name expr remember_end { - if (add_proc_to_list(thd, $2)) + if (unlikely(add_proc_to_list(thd, $2))) MYSQL_YYABORT; if (!$2->name.str || $2->name.str == item_empty_name) $2->set_name(thd, $1, (uint) ($3 - $1), thd->charset()); @@ -12803,7 +12826,8 @@ select_var_list_init: { LEX *lex=Lex; if (!lex->describe && - (!(lex->result= new (thd->mem_root) select_dumpvar(thd)))) + unlikely((!(lex->result= new (thd->mem_root) + select_dumpvar(thd))))) MYSQL_YYABORT; } select_var_list @@ -12819,7 +12843,7 @@ select_var_ident: select_outvar { if (Lex->result) { - if ($1 == NULL) + if (unlikely($1 == NULL)) MYSQL_YYABORT; ((select_dumpvar *)Lex->result)->var_list.push_back($1, thd->mem_root); } @@ -12841,12 +12865,12 @@ select_outvar: } | ident_or_text { - if (!($$= Lex->create_outvar(thd, &$1)) && Lex->result) + if (unlikely(!($$= Lex->create_outvar(thd, &$1)) && Lex->result)) MYSQL_YYABORT; } | ident '.' ident { - if (!($$= Lex->create_outvar(thd, &$1, &$3)) && Lex->result) + if (unlikely(!($$= Lex->create_outvar(thd, &$1, &$3)) && Lex->result)) MYSQL_YYABORT; } ; @@ -12860,10 +12884,11 @@ into_destination: { LEX *lex= Lex; lex->uncacheable(UNCACHEABLE_SIDEEFFECT); - if (!(lex->exchange= - new (thd->mem_root) sql_exchange($2.str, 0)) || - !(lex->result= - new (thd->mem_root) select_export(thd, lex->exchange))) + if (unlikely(!(lex->exchange= + new (thd->mem_root) sql_exchange($2.str, 0))) || + unlikely(!(lex->result= + new (thd->mem_root) + select_export(thd, lex->exchange)))) MYSQL_YYABORT; } opt_load_data_charset @@ -12875,10 +12900,12 @@ into_destination: if (!lex->describe) { lex->uncacheable(UNCACHEABLE_SIDEEFFECT); - if (!(lex->exchange= new (thd->mem_root) sql_exchange($2.str,1))) + if (unlikely(!(lex->exchange= + new (thd->mem_root) sql_exchange($2.str,1)))) MYSQL_YYABORT; - if (!(lex->result= - new (thd->mem_root) select_dump(thd, lex->exchange))) + if (unlikely(!(lex->result= + new (thd->mem_root) + select_dump(thd, lex->exchange)))) MYSQL_YYABORT; } } @@ -12924,16 +12951,16 @@ drop: LEX *lex=Lex; Alter_drop *ad= (new (thd->mem_root) Alter_drop(Alter_drop::KEY, $4.str, $3)); - if (ad == NULL) + if (unlikely(ad == NULL)) MYSQL_YYABORT; lex->sql_command= SQLCOM_DROP_INDEX; lex->alter_info.reset(); lex->alter_info.flags= ALTER_DROP_INDEX; lex->alter_info.drop_list.push_back(ad, thd->mem_root); - if (!lex->current_select->add_table_to_list(thd, $6, NULL, - TL_OPTION_UPDATING, - TL_READ_NO_INSERT, - MDL_SHARED_UPGRADABLE)) + if (unlikely(!lex->current_select-> + add_table_to_list(thd, $6, NULL, TL_OPTION_UPDATING, + TL_READ_NO_INSERT, + MDL_SHARED_UPGRADABLE))) MYSQL_YYABORT; } | DROP DATABASE opt_if_exists ident @@ -12946,13 +12973,13 @@ drop: { LEX *lex= thd->lex; sp_name *spname; - if ($4.str && check_db_name((LEX_STRING*) &$4)) - my_yyabort_error((ER_WRONG_DB_NAME, MYF(0), $4.str)); - if (lex->sphead) + if (unlikely($4.str && check_db_name((LEX_STRING*) &$4))) + my_yyabort_error((ER_WRONG_DB_NAME, MYF(0), $4.str)); + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "FUNCTION")); lex->set_command(SQLCOM_DROP_FUNCTION, $3); spname= new (thd->mem_root) sp_name(&$4, &$6, true); - if (spname == NULL) + if (unlikely(spname == NULL)) MYSQL_YYABORT; lex->spname= spname; } @@ -12961,20 +12988,20 @@ drop: LEX *lex= thd->lex; LEX_CSTRING db= {0, 0}; sp_name *spname; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "FUNCTION")); - if (thd->db.str && lex->copy_db_to(&db)) + if (thd->db.str && unlikely(lex->copy_db_to(&db))) MYSQL_YYABORT; lex->set_command(SQLCOM_DROP_FUNCTION, $3); spname= new (thd->mem_root) sp_name(&db, &$4, false); - if (spname == NULL) + if (unlikely(spname == NULL)) MYSQL_YYABORT; lex->spname= spname; } | DROP PROCEDURE_SYM opt_if_exists sp_name { LEX *lex=Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE")); lex->set_command(SQLCOM_DROP_PROCEDURE, $3); lex->spname= $4; @@ -13043,10 +13070,10 @@ table_list: table_name: table_ident { - if (!Select->add_table_to_list(thd, $1, NULL, - TL_OPTION_UPDATING, - YYPS->m_lock_type, - YYPS->m_mdl_type)) + if (unlikely(!Select->add_table_to_list(thd, $1, NULL, + TL_OPTION_UPDATING, + YYPS->m_lock_type, + YYPS->m_mdl_type))) MYSQL_YYABORT; } ; @@ -13054,12 +13081,12 @@ table_name: table_name_with_opt_use_partition: table_ident opt_use_partition { - if (!Select->add_table_to_list(thd, $1, NULL, - TL_OPTION_UPDATING, - YYPS->m_lock_type, - YYPS->m_mdl_type, - NULL, - $2)) + if (unlikely(!Select->add_table_to_list(thd, $1, NULL, + TL_OPTION_UPDATING, + YYPS->m_lock_type, + YYPS->m_mdl_type, + NULL, + $2))) MYSQL_YYABORT; } ; @@ -13072,10 +13099,12 @@ table_alias_ref_list: table_alias_ref: table_ident_opt_wild { - if (!Select->add_table_to_list(thd, $1, NULL, - TL_OPTION_UPDATING | TL_OPTION_ALIAS, + if (unlikely(!Select-> + add_table_to_list(thd, $1, NULL, + (TL_OPTION_UPDATING | + TL_OPTION_ALIAS), YYPS->m_lock_type, - YYPS->m_mdl_type)) + YYPS->m_mdl_type))) MYSQL_YYABORT; } ; @@ -13160,10 +13189,9 @@ insert_lock_option: | LOW_PRIORITY { $$= TL_WRITE_LOW_PRIORITY; } | DELAYED_SYM { - Lex->keyword_delayed_begin_offset= (uint)(YYLIP->get_tok_start() - - thd->query()); - Lex->keyword_delayed_end_offset= Lex->keyword_delayed_begin_offset + - YYLIP->yyLength() + 1; + // QQ: why was +1? + Lex->keyword_delayed_begin_offset= (uint)($1.pos() - thd->query()); + Lex->keyword_delayed_end_offset= (uint)($1.end() - thd->query()); $$= TL_WRITE_DELAYED; } | HIGH_PRIORITY { $$= TL_WRITE; } @@ -13173,10 +13201,8 @@ replace_lock_option: opt_low_priority { $$= $1; } | DELAYED_SYM { - Lex->keyword_delayed_begin_offset= (uint)(YYLIP->get_tok_start() - - thd->query()); - Lex->keyword_delayed_end_offset= Lex->keyword_delayed_begin_offset + - YYLIP->yyLength() + 1; + Lex->keyword_delayed_begin_offset= (uint)($1.pos() - thd->query()); + Lex->keyword_delayed_end_offset= (uint)($1.end() - thd->query()); $$= TL_WRITE_DELAYED; } ; @@ -13202,8 +13228,9 @@ insert_field_spec: | SET { LEX *lex=Lex; - if (!(lex->insert_list= new (thd->mem_root) List_item) || - lex->many_values.push_back(lex->insert_list, thd->mem_root)) + if (unlikely(!(lex->insert_list= new (thd->mem_root) List_item)) || + unlikely(lex->many_values.push_back(lex->insert_list, + thd->mem_root))) MYSQL_YYABORT; } ident_eq_list @@ -13235,8 +13262,8 @@ ident_eq_value: simple_ident_nospvar equal expr_or_default { LEX *lex=Lex; - if (lex->field_list.push_back($1, thd->mem_root) || - lex->insert_list->push_back($3, thd->mem_root)) + if (unlikely(lex->field_list.push_back($1, thd->mem_root)) || + unlikely(lex->insert_list->push_back($3, thd->mem_root))) MYSQL_YYABORT; } ; @@ -13264,13 +13291,14 @@ opt_by: no_braces: '(' { - if (!(Lex->insert_list= new (thd->mem_root) List_item)) - MYSQL_YYABORT; + if (unlikely(!(Lex->insert_list= new (thd->mem_root) List_item))) + MYSQL_YYABORT; } opt_values ')' { LEX *lex=Lex; - if (lex->many_values.push_back(lex->insert_list, thd->mem_root)) + if (unlikely(lex->many_values.push_back(lex->insert_list, + thd->mem_root))) MYSQL_YYABORT; } ; @@ -13283,12 +13311,12 @@ opt_values: values: values ',' expr_or_default { - if (Lex->insert_list->push_back($3, thd->mem_root)) + if (unlikely(Lex->insert_list->push_back($3, thd->mem_root))) MYSQL_YYABORT; } | expr_or_default { - if (Lex->insert_list->push_back($1, thd->mem_root)) + if (unlikely(Lex->insert_list->push_back($1, thd->mem_root))) MYSQL_YYABORT; } ; @@ -13298,13 +13326,13 @@ expr_or_default: | DEFAULT { $$= new (thd->mem_root) Item_default_value(thd, Lex->current_context()); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | IGNORE_SYM { $$= new (thd->mem_root) Item_ignore_value(thd, Lex->current_context()); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -13338,7 +13366,7 @@ update: LEX *lex= Lex; if (lex->select_lex.table_list.elements > 1) lex->sql_command= SQLCOM_UPDATE_MULTI; - else if (lex->select_lex.get_table_list()->derived) + else if (unlikely(lex->select_lex.get_table_list()->derived)) { /* it is single table update and it is update of derived table */ my_error(ER_NON_UPDATABLE_TABLE, MYF(0), @@ -13363,7 +13391,8 @@ update_list: update_elem: simple_ident_nospvar equal expr_or_default { - if (add_item_to_list(thd, $1) || add_value_to_list(thd, $3)) + if (unlikely(add_item_to_list(thd, $1)) || + unlikely(add_value_to_list(thd, $3))) MYSQL_YYABORT; } ; @@ -13377,8 +13406,8 @@ insert_update_elem: simple_ident_nospvar equal expr_or_default { LEX *lex= Lex; - if (lex->update_list.push_back($1, thd->mem_root) || - lex->value_list.push_back($3, thd->mem_root)) + if (unlikely(lex->update_list.push_back($1, thd->mem_root)) || + unlikely(lex->value_list.push_back($3, thd->mem_root))) MYSQL_YYABORT; } ; @@ -13427,11 +13456,12 @@ delete_part2: delete_single_table: FROM table_ident opt_use_partition { - if (!Select->add_table_to_list(thd, $2, NULL, TL_OPTION_UPDATING, + if (unlikely(!Select-> + add_table_to_list(thd, $2, NULL, TL_OPTION_UPDATING, YYPS->m_lock_type, YYPS->m_mdl_type, NULL, - $3)) + $3))) MYSQL_YYABORT; YYPS->m_lock_type= TL_READ_DEFAULT; YYPS->m_mdl_type= MDL_SHARED_READ; @@ -13452,7 +13482,7 @@ single_multi: } FROM join_table_list opt_where_clause { - if (multi_delete_set_locks_and_link_aux_tables(Lex)) + if (unlikely(multi_delete_set_locks_and_link_aux_tables(Lex))) MYSQL_YYABORT; } | FROM table_alias_ref_list @@ -13463,7 +13493,7 @@ single_multi: } USING join_table_list opt_where_clause { - if (multi_delete_set_locks_and_link_aux_tables(Lex)) + if (unlikely(multi_delete_set_locks_and_link_aux_tables(Lex))) MYSQL_YYABORT; } ; @@ -13482,27 +13512,31 @@ table_wild_one: ident opt_wild { Table_ident *ti= new (thd->mem_root) Table_ident(&$1); - if (ti == NULL) + if (unlikely(ti == NULL)) MYSQL_YYABORT; - if (!Select->add_table_to_list(thd, + if (unlikely(!Select-> + add_table_to_list(thd, ti, NULL, - TL_OPTION_UPDATING | TL_OPTION_ALIAS, + (TL_OPTION_UPDATING | + TL_OPTION_ALIAS), YYPS->m_lock_type, - YYPS->m_mdl_type)) + YYPS->m_mdl_type))) MYSQL_YYABORT; } | ident '.' ident opt_wild { Table_ident *ti= new (thd->mem_root) Table_ident(thd, &$1, &$3, 0); - if (ti == NULL) + if (unlikely(ti == NULL)) MYSQL_YYABORT; - if (!Select->add_table_to_list(thd, + if (unlikely(!Select-> + add_table_to_list(thd, ti, NULL, - TL_OPTION_UPDATING | TL_OPTION_ALIAS, + (TL_OPTION_UPDATING | + TL_OPTION_ALIAS), YYPS->m_lock_type, - YYPS->m_mdl_type)) + YYPS->m_mdl_type))) MYSQL_YYABORT; } ; @@ -13540,7 +13574,7 @@ truncate: LEX* lex= thd->lex; DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_truncate_table(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -13631,7 +13665,7 @@ show_param: { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_DATABASES; - if (prepare_schema_table(thd, lex, 0, SCH_SCHEMATA)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_SCHEMATA))) MYSQL_YYABORT; } | opt_full TABLES opt_db wild_and_where @@ -13639,7 +13673,7 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_TABLES; lex->select_lex.db= $3; - if (prepare_schema_table(thd, lex, 0, SCH_TABLE_NAMES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_TABLE_NAMES))) MYSQL_YYABORT; } | opt_full TRIGGERS_SYM opt_db wild_and_where @@ -13647,7 +13681,7 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_TRIGGERS; lex->select_lex.db= $3; - if (prepare_schema_table(thd, lex, 0, SCH_TRIGGERS)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_TRIGGERS))) MYSQL_YYABORT; } | EVENTS_SYM opt_db wild_and_where @@ -13655,7 +13689,7 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_EVENTS; lex->select_lex.db= $2; - if (prepare_schema_table(thd, lex, 0, SCH_EVENTS)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_EVENTS))) MYSQL_YYABORT; } | TABLE_SYM STATUS_SYM opt_db wild_and_where @@ -13663,7 +13697,7 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_TABLE_STATUS; lex->select_lex.db= $3; - if (prepare_schema_table(thd, lex, 0, SCH_TABLES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_TABLES))) MYSQL_YYABORT; } | OPEN_SYM TABLES opt_db wild_and_where @@ -13671,27 +13705,27 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_OPEN_TABLES; lex->select_lex.db= $3; - if (prepare_schema_table(thd, lex, 0, SCH_OPEN_TABLES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_OPEN_TABLES))) MYSQL_YYABORT; } | PLUGINS_SYM { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_PLUGINS; - if (prepare_schema_table(thd, lex, 0, SCH_PLUGINS)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_PLUGINS))) MYSQL_YYABORT; } | PLUGINS_SYM SONAME_SYM TEXT_STRING_sys { Lex->ident= $3; Lex->sql_command= SQLCOM_SHOW_PLUGINS; - if (prepare_schema_table(thd, Lex, 0, SCH_ALL_PLUGINS)) + if (unlikely(prepare_schema_table(thd, Lex, 0, SCH_ALL_PLUGINS))) MYSQL_YYABORT; } | PLUGINS_SYM SONAME_SYM wild_and_where { Lex->sql_command= SQLCOM_SHOW_PLUGINS; - if (prepare_schema_table(thd, Lex, 0, SCH_ALL_PLUGINS)) + if (unlikely(prepare_schema_table(thd, Lex, 0, SCH_ALL_PLUGINS))) MYSQL_YYABORT; } | ENGINE_SYM known_storage_engines show_engine_param @@ -13704,7 +13738,7 @@ show_param: lex->sql_command= SQLCOM_SHOW_FIELDS; if ($5.str) $4->change_db(&$5); - if (prepare_schema_table(thd, lex, $4, SCH_COLUMNS)) + if (unlikely(prepare_schema_table(thd, lex, $4, SCH_COLUMNS))) MYSQL_YYABORT; } | master_or_binary LOGS_SYM @@ -13732,14 +13766,14 @@ show_param: lex->sql_command= SQLCOM_SHOW_KEYS; if ($4.str) $3->change_db(&$4); - if (prepare_schema_table(thd, lex, $3, SCH_STATISTICS)) + if (unlikely(prepare_schema_table(thd, lex, $3, SCH_STATISTICS))) MYSQL_YYABORT; } | opt_storage ENGINES_SYM { LEX *lex=Lex; lex->sql_command= SQLCOM_SHOW_STORAGE_ENGINES; - if (prepare_schema_table(thd, lex, 0, SCH_ENGINES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_ENGINES))) MYSQL_YYABORT; } | AUTHORS_SYM @@ -13777,7 +13811,7 @@ show_param: { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_PROFILE; - if (prepare_schema_table(thd, lex, NULL, SCH_PROFILES) != 0) + if (unlikely(prepare_schema_table(thd, lex, NULL, SCH_PROFILES))) MYSQL_YYABORT; } | opt_var_type STATUS_SYM wild_and_where @@ -13785,7 +13819,7 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_STATUS; lex->option_type= $1; - if (prepare_schema_table(thd, lex, 0, SCH_SESSION_STATUS)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_SESSION_STATUS))) MYSQL_YYABORT; } | opt_full PROCESSLIST_SYM @@ -13795,27 +13829,28 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_VARIABLES; lex->option_type= $1; - if (prepare_schema_table(thd, lex, 0, SCH_SESSION_VARIABLES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_SESSION_VARIABLES))) MYSQL_YYABORT; } | charset wild_and_where { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_CHARSETS; - if (prepare_schema_table(thd, lex, 0, SCH_CHARSETS)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_CHARSETS))) MYSQL_YYABORT; } | COLLATION_SYM wild_and_where { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_COLLATIONS; - if (prepare_schema_table(thd, lex, 0, SCH_COLLATIONS)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_COLLATIONS))) MYSQL_YYABORT; } | GRANTS { Lex->sql_command= SQLCOM_SHOW_GRANTS; - if (!(Lex->grant_user= (LEX_USER*)thd->alloc(sizeof(LEX_USER)))) + if (unlikely(!(Lex->grant_user= + (LEX_USER*)thd->alloc(sizeof(LEX_USER))))) MYSQL_YYABORT; Lex->grant_user->user= current_user_and_current_role; } @@ -13834,7 +13869,7 @@ show_param: { LEX *lex= Lex; lex->sql_command = SQLCOM_SHOW_CREATE; - if (!lex->select_lex.add_table_to_list(thd, $3, NULL,0)) + if (unlikely(!lex->select_lex.add_table_to_list(thd, $3, NULL,0))) MYSQL_YYABORT; lex->create_info.storage_media= HA_SM_DEFAULT; } @@ -13842,7 +13877,7 @@ show_param: { LEX *lex= Lex; lex->sql_command = SQLCOM_SHOW_CREATE; - if (!lex->select_lex.add_table_to_list(thd, $3, NULL, 0)) + if (unlikely(!lex->select_lex.add_table_to_list(thd, $3, NULL, 0))) MYSQL_YYABORT; lex->table_type= TABLE_TYPE_VIEW; } @@ -13850,7 +13885,7 @@ show_param: { LEX *lex= Lex; lex->sql_command = SQLCOM_SHOW_CREATE; - if (!lex->select_lex.add_table_to_list(thd, $3, NULL, 0)) + if (unlikely(!lex->select_lex.add_table_to_list(thd, $3, NULL, 0))) MYSQL_YYABORT; lex->table_type= TABLE_TYPE_SEQUENCE; } @@ -13910,7 +13945,8 @@ show_param: | CREATE USER_SYM { Lex->sql_command= SQLCOM_SHOW_CREATE_USER; - if (!(Lex->grant_user= (LEX_USER*)thd->alloc(sizeof(LEX_USER)))) + if (unlikely(!(Lex->grant_user= + (LEX_USER*)thd->alloc(sizeof(LEX_USER))))) MYSQL_YYABORT; Lex->grant_user->user= current_user; } @@ -13923,28 +13959,28 @@ show_param: { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_STATUS_PROC; - if (prepare_schema_table(thd, lex, 0, SCH_PROCEDURES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_PROCEDURES))) MYSQL_YYABORT; } | FUNCTION_SYM STATUS_SYM wild_and_where { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_STATUS_FUNC; - if (prepare_schema_table(thd, lex, 0, SCH_PROCEDURES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_PROCEDURES))) MYSQL_YYABORT; } | PACKAGE_SYM STATUS_SYM wild_and_where { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_STATUS_PACKAGE; - if (prepare_schema_table(thd, lex, 0, SCH_PROCEDURES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_PROCEDURES))) MYSQL_YYABORT; } | PACKAGE_SYM BODY_SYM STATUS_SYM wild_and_where { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_STATUS_PACKAGE_BODY; - if (prepare_schema_table(thd, lex, 0, SCH_PROCEDURES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_PROCEDURES))) MYSQL_YYABORT; } | PROCEDURE_SYM CODE_SYM sp_name @@ -13970,7 +14006,7 @@ show_param: | describe_command FOR_SYM expr { Lex->sql_command= SQLCOM_SHOW_EXPLAIN; - if (prepare_schema_table(thd, Lex, 0, SCH_EXPLAIN)) + if (unlikely(prepare_schema_table(thd, Lex, 0, SCH_EXPLAIN))) MYSQL_YYABORT; add_value_to_list(thd, $3); } @@ -13980,17 +14016,17 @@ show_param: bool in_plugin; lex->sql_command= SQLCOM_SHOW_GENERIC; ST_SCHEMA_TABLE *table= find_schema_table(thd, &$1, &in_plugin); - if (!table || !table->old_format || !in_plugin) + if (unlikely(!table || !table->old_format || !in_plugin)) { thd->parse_error(ER_SYNTAX_ERROR, $2); MYSQL_YYABORT; } - if (lex->wild && table->idx_field1 < 0) + if (unlikely(lex->wild && table->idx_field1 < 0)) { thd->parse_error(ER_SYNTAX_ERROR, $3); MYSQL_YYABORT; } - if (make_schema_select(thd, Lex->current_select, table)) + if (unlikely(make_schema_select(thd, Lex->current_select, table))) MYSQL_YYABORT; } ; @@ -14045,7 +14081,7 @@ wild_and_where: { Lex->wild= new (thd->mem_root) String($3.str, $3.length, system_charset_info); - if (Lex->wild == NULL) + if (unlikely(Lex->wild == NULL)) MYSQL_YYABORT; $$= $2; } @@ -14068,7 +14104,7 @@ describe: lex->sql_command= SQLCOM_SHOW_FIELDS; lex->select_lex.db= null_clex_str; lex->verbose= 0; - if (prepare_schema_table(thd, lex, $2, SCH_COLUMNS)) + if (unlikely(prepare_schema_table(thd, lex, $2, SCH_COLUMNS))) MYSQL_YYABORT; } opt_describe_column @@ -14114,13 +14150,13 @@ opt_format_json: /* empty */ {} | FORMAT_SYM '=' ident_or_text { - if (!my_strcasecmp(system_charset_info, $3.str, "JSON")) + if (lex_string_eq(&$3, STRING_WITH_LEN("JSON"))) Lex->explain_json= true; - else if (!my_strcasecmp(system_charset_info, $3.str, "TRADITIONAL")) + else if (lex_string_eq(&$3, STRING_WITH_LEN("TRADITIONAL"))) DBUG_ASSERT(Lex->explain_json==false); else my_yyabort_error((ER_UNKNOWN_EXPLAIN_FORMAT, MYF(0), "EXPLAIN", - $3.str)); + $3.str)); } ; @@ -14132,7 +14168,7 @@ opt_describe_column: Lex->wild= new (thd->mem_root) String((const char*) $1.str, $1.length, system_charset_info); - if (Lex->wild == NULL) + if (unlikely(Lex->wild == NULL)) MYSQL_YYABORT; } ; @@ -14189,8 +14225,9 @@ flush_lock: { Lex->type|= REFRESH_READ_LOCK | $4; } | FOR_SYM { - if (Lex->query_tables == NULL) // Table list can't be empty + if (unlikely(Lex->query_tables == NULL)) { + // Table list can't be empty thd->parse_error(ER_NO_TABLES_USED); MYSQL_YYABORT; } @@ -14218,7 +14255,7 @@ flush_option: | RELAY LOGS_SYM optional_connection_name { LEX *lex= Lex; - if (lex->type & REFRESH_RELAY_LOG) + if (unlikely(lex->type & REFRESH_RELAY_LOG)) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "FLUSH", "RELAY LOGS")); lex->type|= REFRESH_RELAY_LOG; lex->relay_log_connection_name= lex->mi.connection_name; @@ -14239,7 +14276,7 @@ flush_option: | SLAVE optional_connection_name { LEX *lex= Lex; - if (lex->type & REFRESH_SLAVE) + if (unlikely(lex->type & REFRESH_SLAVE)) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "FLUSH","SLAVE")); lex->type|= REFRESH_SLAVE; lex->reset_slave_info.all= false; @@ -14254,14 +14291,15 @@ flush_option: { Lex->type|= REFRESH_GENERIC; ST_SCHEMA_TABLE *table= find_schema_table(thd, &$1); - if (!table || !table->reset_table) + if (unlikely(!table || !table->reset_table)) { thd->parse_error(ER_SYNTAX_ERROR, $2); MYSQL_YYABORT; } - Lex->view_list.push_back((LEX_CSTRING*) - thd->memdup(&$1, sizeof(LEX_CSTRING)), - thd->mem_root); + if (unlikely(Lex->view_list.push_back((LEX_CSTRING*) + thd->memdup(&$1, sizeof(LEX_CSTRING)), + thd->mem_root))) + MYSQL_YYABORT; } ; @@ -14430,7 +14468,7 @@ load: { LEX *lex= thd->lex; - if (lex->sphead) + if (unlikely(lex->sphead)) { my_error(ER_SP_BADSTATEMENT, MYF(0), $2 == FILETYPE_CSV ? "LOAD DATA" : "LOAD XML"); @@ -14444,14 +14482,17 @@ load: lex->local_file= $5; lex->duplicates= DUP_ERROR; lex->ignore= 0; - if (!(lex->exchange= new (thd->mem_root) sql_exchange($7.str, 0, $2))) + if (unlikely(!(lex->exchange= new (thd->mem_root) + sql_exchange($7.str, 0, $2)))) MYSQL_YYABORT; } opt_duplicate INTO TABLE_SYM table_ident opt_use_partition { LEX *lex=Lex; - if (!Select->add_table_to_list(thd, $12, NULL, TL_OPTION_UPDATING, - $4, MDL_SHARED_WRITE, NULL, $13)) + if (unlikely(!Select->add_table_to_list(thd, $12, NULL, + TL_OPTION_UPDATING, + $4, MDL_SHARED_WRITE, + NULL, $13))) MYSQL_YYABORT; lex->field_list.empty(); lex->update_list.empty(); @@ -14589,7 +14630,7 @@ field_or_var: | '@' ident_or_text { $$= new (thd->mem_root) Item_user_var_as_out_param(thd, &$2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -14608,8 +14649,8 @@ load_data_set_elem: simple_ident_nospvar equal remember_name expr_or_default remember_end { LEX *lex= Lex; - if (lex->update_list.push_back($1, thd->mem_root) || - lex->value_list.push_back($4, thd->mem_root)) + if (unlikely(lex->update_list.push_back($1, thd->mem_root)) || + unlikely(lex->value_list.push_back($4, thd->mem_root))) MYSQL_YYABORT; $4->set_name_no_truncate(thd, $3, (uint) ($5 - $3), thd->charset()); } @@ -14620,22 +14661,22 @@ load_data_set_elem: text_literal: TEXT_STRING { - if (!($$= thd->make_string_literal($1))) + if (unlikely(!($$= thd->make_string_literal($1)))) MYSQL_YYABORT; } | NCHAR_STRING { - if (!($$= thd->make_string_literal_nchar($1))) + if (unlikely(!($$= thd->make_string_literal_nchar($1)))) MYSQL_YYABORT; } | UNDERSCORE_CHARSET TEXT_STRING { - if (!($$= thd->make_string_literal_charset($2, $1))) + if (unlikely(!($$= thd->make_string_literal_charset($2, $1)))) MYSQL_YYABORT; } | text_literal TEXT_STRING_literal { - if (!($$= $1->make_string_literal_concat(thd, &$2))) + if (unlikely(!($$= $1->make_string_literal_concat(thd, &$2)))) MYSQL_YYABORT; } ; @@ -14646,7 +14687,7 @@ text_string: $$= new (thd->mem_root) String($1.str, $1.length, thd->variables.collation_connection); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | hex_or_bin_String { $$= $1; } @@ -14658,7 +14699,7 @@ hex_or_bin_String: { Item *tmp= new (thd->mem_root) Item_hex_hybrid(thd, $1.str, $1.length); - if (tmp == NULL) + if (unlikely(tmp == NULL)) MYSQL_YYABORT; /* it is OK only emulate fix_fields, because we need only @@ -14671,7 +14712,7 @@ hex_or_bin_String: { Item *tmp= new (thd->mem_root) Item_hex_string(thd, $1.str, $1.length); - if (tmp == NULL) + if (unlikely(tmp == NULL)) MYSQL_YYABORT; tmp->quick_fix_field(); $$= tmp->val_str((String*) 0); @@ -14680,7 +14721,7 @@ hex_or_bin_String: { Item *tmp= new (thd->mem_root) Item_bin_string(thd, $1.str, $1.length); - if (tmp == NULL) + if (unlikely(tmp == NULL)) MYSQL_YYABORT; /* it is OK only emulate fix_fields, because we need only @@ -14694,9 +14735,9 @@ hex_or_bin_String: param_marker: PARAM_MARKER { - if (!($$= Lex->add_placeholder(thd, ¶m_clex_str, - YYLIP->get_tok_start(), - YYLIP->get_tok_start() + 1))) + if (unlikely(!($$= Lex->add_placeholder(thd, ¶m_clex_str, + YYLIP->get_tok_start(), + YYLIP->get_tok_start() + 1)))) MYSQL_YYABORT; } ; @@ -14724,38 +14765,38 @@ literal: */ YYLIP->reduce_digest_token(TOK_GENERIC_VALUE, NULL_SYM); $$= new (thd->mem_root) Item_null(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; YYLIP->next_state= MY_LEX_OPERATOR_OR_IDENT; } | FALSE_SYM { $$= new (thd->mem_root) Item_bool(thd, (char*) "FALSE",0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | TRUE_SYM { $$= new (thd->mem_root) Item_bool(thd, (char*) "TRUE",1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | HEX_NUM { $$= new (thd->mem_root) Item_hex_hybrid(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | HEX_STRING { $$= new (thd->mem_root) Item_hex_string(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | BIN_NUM { $$= new (thd->mem_root) Item_bin_string(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | UNDERSCORE_CHARSET hex_or_bin_String @@ -14768,7 +14809,8 @@ literal: item_str= new (thd->mem_root) Item_string_with_introducer(thd, NULL, $2->ptr(), $2->length(), $1); - if (!item_str || !item_str->check_well_formed_result(true)) + if (unlikely(!item_str || + !item_str->check_well_formed_result(true))) MYSQL_YYABORT; $$= item_str; @@ -14783,7 +14825,7 @@ NUM_literal: Item_int(thd, $1.str, (longlong) my_strtoll10($1.str, NULL, &error), $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LONG_NUM @@ -14793,31 +14835,27 @@ NUM_literal: Item_int(thd, $1.str, (longlong) my_strtoll10($1.str, NULL, &error), $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ULONGLONG_NUM { $$= new (thd->mem_root) Item_uint(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DECIMAL_NUM { $$= new (thd->mem_root) Item_decimal(thd, $1.str, $1.length, thd->charset()); - if (($$ == NULL) || (thd->is_error())) - { + if (unlikely($$ == NULL) || unlikely(thd->is_error())) MYSQL_YYABORT; - } } | FLOAT_NUM { $$= new (thd->mem_root) Item_float(thd, $1.str, $1.length); - if (($$ == NULL) || (thd->is_error())) - { + if (unlikely($$ == NULL) || unlikely(thd->is_error())) MYSQL_YYABORT; - } } ; @@ -14825,20 +14863,26 @@ NUM_literal: temporal_literal: DATE_SYM TEXT_STRING { - if (!($$= create_temporal_literal(thd, $2.str, $2.length, YYCSCL, - MYSQL_TYPE_DATE, true))) + if (unlikely(!($$= create_temporal_literal(thd, $2.str, $2.length, + YYCSCL, + MYSQL_TYPE_DATE, + true)))) MYSQL_YYABORT; } | TIME_SYM TEXT_STRING { - if (!($$= create_temporal_literal(thd, $2.str, $2.length, YYCSCL, - MYSQL_TYPE_TIME, true))) + if (unlikely(!($$= create_temporal_literal(thd, $2.str, $2.length, + YYCSCL, + MYSQL_TYPE_TIME, + true)))) MYSQL_YYABORT; } | TIMESTAMP TEXT_STRING { - if (!($$= create_temporal_literal(thd, $2.str, $2.length, YYCSCL, - MYSQL_TYPE_DATETIME, true))) + if (unlikely(!($$= create_temporal_literal(thd, $2.str, $2.length, + YYCSCL, + MYSQL_TYPE_DATETIME, + true)))) MYSQL_YYABORT; } ; @@ -14858,7 +14902,7 @@ with_clause: { With_clause *with_clause= new With_clause($2, Lex->curr_with_clause); - if (with_clause == NULL) + if (unlikely(with_clause == NULL)) MYSQL_YYABORT; Lex->derived_tables|= DERIVED_WITH; Lex->curr_with_clause= with_clause; @@ -14889,16 +14933,17 @@ with_list_element: opt_with_column_list { $2= new List (Lex->with_column_list); - if ($2 == NULL) + if (unlikely($2 == NULL)) MYSQL_YYABORT; Lex->with_column_list.empty(); } AS '(' remember_name subselect remember_end ')' { With_element *elem= new With_element($1, *$2, $7->master_unit()); - if (elem == NULL || Lex->curr_with_clause->add_with_element(elem)) + if (unlikely(elem == NULL) || + unlikely(Lex->curr_with_clause->add_with_element(elem))) MYSQL_YYABORT; - if (elem->set_unparsed_spec(thd, $6+1, $8)) + if (unlikely(elem->set_unparsed_spec(thd, $6+1, $8))) MYSQL_YYABORT; } ; @@ -14930,7 +14975,7 @@ query_name: ident { $$= (LEX_CSTRING *) thd->memdup(&$1, sizeof(LEX_CSTRING)); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -14949,24 +14994,26 @@ insert_ident: table_wild: ident '.' '*' { - SELECT_LEX *sel= Select; - $$= new (thd->mem_root) Item_field(thd, Lex->current_context(), - NullS, $1.str, &star_clex_str); - if ($$ == NULL) + if (unlikely(!($$= Lex->create_item_qualified_asterisk(thd, &$1)))) MYSQL_YYABORT; - sel->with_wild++; } | ident '.' ident '.' '*' { - SELECT_LEX *sel= Select; - const char* schema= thd->client_capabilities & CLIENT_NO_SCHEMA ? - NullS : $1.str; - $$= new (thd->mem_root) Item_field(thd, Lex->current_context(), - schema, - $3.str, &star_clex_str); - if ($$ == NULL) + if (unlikely(!($$= Lex->create_item_qualified_asterisk(thd, &$1, &$3)))) + MYSQL_YYABORT; + } + ; + +select_sublist_qualified_asterisk: + ident_cli '.' '*' + { + if (unlikely(!($$= Lex->create_item_qualified_asterisk(thd, &$1)))) + MYSQL_YYABORT; + } + | ident_cli '.' ident_cli '.' '*' + { + if (unlikely(!($$= Lex->create_item_qualified_asterisk(thd, &$1, &$3)))) MYSQL_YYABORT; - sel->with_wild++; } ; @@ -14974,21 +15021,27 @@ order_ident: expr { $$=$1; } ; + simple_ident: - ident + ident_cli { - Lex_input_stream *lip= YYLIP; - if (!($$= Lex->create_item_ident(thd, &$1, - lip->get_tok_start_prev(), - lip->get_tok_end()))) + if (unlikely(!($$= Lex->create_item_ident(thd, &$1)))) MYSQL_YYABORT; } - | simple_ident_q2 - | ident '.' ident + | ident_cli '.' ident_cli { - LEX *lex= thd->lex; - if (!($$= lex->create_item_ident(thd, &$1, &$3, - $1.m_pos, YYLIP->get_tok_end()))) + if (unlikely(!($$= Lex->create_item_ident(thd, &$1, &$3)))) + MYSQL_YYABORT; + } + | '.' ident_cli '.' ident_cli + { + Lex_ident_cli empty($2.pos(), 0); + if (unlikely(!($$= Lex->create_item_ident(thd, &empty, &$2, &$4)))) + MYSQL_YYABORT; + } + | ident_cli '.' ident_cli '.' ident_cli + { + if (unlikely(!($$= Lex->create_item_ident(thd, &$1, &$3, &$5)))) MYSQL_YYABORT; } ; @@ -14996,7 +15049,7 @@ simple_ident: simple_ident_nospvar: ident { - if (!($$= Lex->create_item_ident_nosp(thd, &$1))) + if (unlikely(!($$= Lex->create_item_ident_nosp(thd, &$1)))) MYSQL_YYABORT; } | simple_ident_q { $$= $1; } @@ -15005,7 +15058,7 @@ simple_ident_nospvar: simple_ident_q: ident '.' ident { - if (!($$= Lex->create_item_ident_nospvar(thd, &$1, &$3))) + if (unlikely(!($$= Lex->create_item_ident_nospvar(thd, &$1, &$3)))) MYSQL_YYABORT; } | simple_ident_q2 @@ -15014,12 +15067,13 @@ simple_ident_q: simple_ident_q2: '.' ident '.' ident { - if (!($$= Lex->create_item_ident(thd, &null_clex_str, &$2, &$4))) + Lex_ident_sys none; + if (unlikely(!($$= Lex->create_item_ident(thd, &none, &$2, &$4)))) MYSQL_YYABORT; } | ident '.' ident '.' ident { - if (!($$= Lex->create_item_ident(thd, &$1, &$3, &$5))) + if (unlikely(!($$= Lex->create_item_ident(thd, &$1, &$3, &$5)))) MYSQL_YYABORT; } ; @@ -15029,17 +15083,19 @@ field_ident: | ident '.' ident '.' ident { TABLE_LIST *table= Select->table_list.first; - if (my_strcasecmp(table_alias_charset, $1.str, table->db.str)) + if (unlikely(my_strcasecmp(table_alias_charset, $1.str, + table->db.str))) my_yyabort_error((ER_WRONG_DB_NAME, MYF(0), $1.str)); - if (my_strcasecmp(table_alias_charset, $3.str, - table->table_name.str)) + if (unlikely(my_strcasecmp(table_alias_charset, $3.str, + table->table_name.str))) my_yyabort_error((ER_WRONG_TABLE_NAME, MYF(0), $3.str)); $$=$5; } | ident '.' ident { TABLE_LIST *table= Select->table_list.first; - if (my_strcasecmp(table_alias_charset, $1.str, table->alias.str)) + if (unlikely(my_strcasecmp(table_alias_charset, $1.str, + table->alias.str))) my_yyabort_error((ER_WRONG_TABLE_NAME, MYF(0), $1.str)); $$=$3; } @@ -15050,20 +15106,20 @@ table_ident: ident { $$= new (thd->mem_root) Table_ident(&$1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ident '.' ident { $$= new (thd->mem_root) Table_ident(thd, &$1, &$3, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | '.' ident { /* For Delphi */ $$= new (thd->mem_root) Table_ident(&$2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -15072,13 +15128,13 @@ table_ident_opt_wild: ident opt_wild { $$= new (thd->mem_root) Table_ident(&$1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ident '.' ident opt_wild { $$= new (thd->mem_root) Table_ident(thd, &$1, &$3, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -15088,141 +15144,94 @@ table_ident_nodb: { LEX_CSTRING db={(char*) any_db,3}; $$= new (thd->mem_root) Table_ident(thd, &db, &$1, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; -IDENT_sys: - IDENT { $$= $1; } +IDENT_cli: + IDENT | IDENT_QUOTED + ; + +ident_cli: + IDENT + | IDENT_QUOTED + | keyword_ident { $$= $1; } + ; + +IDENT_sys: + IDENT_cli { - if (thd->charset_is_system_charset) - { - CHARSET_INFO *cs= system_charset_info; - size_t wlen= Well_formed_prefix(cs, $1.str, $1.length).length(); - if (wlen < $1.length) - { - ErrConvString err($1.str, $1.length, &my_charset_bin); - my_error(ER_INVALID_CHARACTER_STRING, MYF(0), - cs->csname, err.ptr()); - MYSQL_YYABORT; - } - $$= $1; - } - else - { - LEX_STRING to; - if (thd->convert_with_error(system_charset_info, &to, - thd->charset(), $1.str, $1.length)) - MYSQL_YYABORT; - $$.str= to.str; - $$.length= to.length; - } + if (unlikely(thd->to_ident_sys_alloc(&$$, &$1))) + MYSQL_YYABORT; } ; TEXT_STRING_sys: TEXT_STRING { - if (thd->charset_is_system_charset) - $$= $1; - else - { - LEX_STRING to; - if (thd->convert_string(&to, system_charset_info, - $1.str, $1.length, thd->charset())) - MYSQL_YYABORT; - $$.str= to.str; - $$.length= to.length; - } + if (thd->make_text_string_sys(&$$, &$1)) + MYSQL_YYABORT; } ; TEXT_STRING_literal: TEXT_STRING { - if (thd->charset_is_collation_connection) - $$= $1; - else - { - LEX_STRING to; - if (thd->convert_string(&to, thd->variables.collation_connection, - $1.str, $1.length, thd->charset())) - MYSQL_YYABORT; - $$.str= to.str; - $$.length= to.length; - } + if (thd->make_text_string_connection(&$$, &$1)) + MYSQL_YYABORT; } ; TEXT_STRING_filesystem: TEXT_STRING { - if (thd->charset_is_character_set_filesystem) - $$= $1; - else - { - LEX_STRING to; - if (thd->convert_string(&to, - thd->variables.character_set_filesystem, - $1.str, $1.length, thd->charset())) - MYSQL_YYABORT; - $$.str= to.str; - $$.length= to.length; - } - } - -ident_table_alias: - IDENT_sys { $$= $1; } - | keyword_alias - { - $$.str= thd->strmake($1.str, $1.length); - if ($$.str == NULL) + if (thd->make_text_string_filesystem(&$$, &$1)) MYSQL_YYABORT; - $$.length= $1.length; } ; +ident_table_alias: + IDENT_sys + | keyword_table_alias + { + if (unlikely($$.copy_keyword(thd, &$1))) + MYSQL_YYABORT; + } + ; + + +ident_sysvar_name: + IDENT_sys + | keyword_sysvar_name + { + if (unlikely($$.copy_keyword(thd, &$1))) + MYSQL_YYABORT; + } + | TEXT_STRING_sys + { + if (unlikely($$.copy_sys(thd, &$1))) + MYSQL_YYABORT; + } + ; + + ident: IDENT_sys + | keyword_ident { - (LEX_CSTRING &)$$= $1; - $$.m_pos= (char *) YYLIP->get_tok_start_prev(); - } - | keyword - { - $$.str= thd->strmake($1.str, $1.length); - if ($$.str == NULL) + if (unlikely($$.copy_keyword(thd, &$1))) MYSQL_YYABORT; - $$.length= $1.length; - $$.m_pos= (char *) YYLIP->get_tok_start_prev(); - } - ; - -ident_with_tok_start: - IDENT_sys - { - (LEX_CSTRING &)$$= $1; - $$.m_pos= (char *) YYLIP->get_tok_start(); - } - | keyword - { - if (!($$.str= thd->strmake($1.str, $1.length))) - MYSQL_YYABORT; - $$.length= $1.length; - $$.m_pos= (char *) YYLIP->get_tok_start(); } ; label_ident: - IDENT_sys { $$=$1; } - | keyword_sp + IDENT_sys + | keyword_label { - $$.str= thd->strmake($1.str, $1.length); - if ($$.str == NULL) + if (unlikely($$.copy_keyword(thd, &$1))) MYSQL_YYABORT; - $$.length= $1.length; } ; @@ -15235,28 +15244,28 @@ ident_or_text: user_maybe_role: ident_or_text { - if (!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER)))) + if (unlikely(!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER))))) MYSQL_YYABORT; $$->user = $1; $$->host= null_clex_str; // User or Role, see get_current_user() $$->reset_auth(); - if (check_string_char_length(&$$->user, ER_USERNAME, - username_char_length, - system_charset_info, 0)) + if (unlikely(check_string_char_length(&$$->user, ER_USERNAME, + username_char_length, + system_charset_info, 0))) MYSQL_YYABORT; } | ident_or_text '@' ident_or_text { - if (!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER)))) + if (unlikely(!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER))))) MYSQL_YYABORT; $$->user = $1; $$->host=$3; $$->reset_auth(); - if (check_string_char_length(&$$->user, ER_USERNAME, - username_char_length, - system_charset_info, 0) || - check_host_name(&$$->host)) + if (unlikely(check_string_char_length(&$$->user, ER_USERNAME, + username_char_length, + system_charset_info, 0)) || + unlikely(check_host_name(&$$->host))) MYSQL_YYABORT; if ($$->host.str[0]) { @@ -15278,7 +15287,7 @@ user_maybe_role: } | CURRENT_USER optional_braces { - if (!($$=(LEX_USER*)thd->calloc(sizeof(LEX_USER)))) + if (unlikely(!($$=(LEX_USER*)thd->calloc(sizeof(LEX_USER))))) MYSQL_YYABORT; $$->user= current_user; $$->plugin= empty_clex_str; @@ -15297,86 +15306,170 @@ user: user_maybe_role ; /* Keywords which we allow as table aliases. */ -keyword_alias: - keyword_sp {} - | keyword_sp_verb_clause{} - | ASCII_SYM {} - | BACKUP_SYM {} - | BINLOG_SYM {} - | BYTE_SYM {} - | CACHE_SYM {} - | CHARSET {} - | CHECKSUM_SYM {} - | CHECKPOINT_SYM {} - | COLUMN_ADD_SYM {} - | COLUMN_CHECK_SYM {} - | COLUMN_CREATE_SYM {} - | COLUMN_DELETE_SYM {} - | COLUMN_GET_SYM {} - | COMMENT_SYM {} - | CONTAINS_SYM {} - | DEALLOCATE_SYM {} - | EXAMINED_SYM {} - | EXCLUDE_SYM {} - | EXECUTE_SYM {} - | FLUSH_SYM {} - | FOLLOWS_SYM {} - | FOLLOWING_SYM {} - | FORMAT_SYM {} - | GET_SYM {} - | HELP_SYM {} - | HOST_SYM {} - | INSTALL_SYM {} - | LANGUAGE_SYM {} - | NO_SYM {} - | OPTION {} - | OPTIONS_SYM {} - | OTHERS_SYM {} - | OWNER_SYM {} - | PARSER_SYM {} - | PERIOD_SYM {} - | PORT_SYM {} - | PRECEDES_SYM {} - | PRECEDING_SYM {} - | PREPARE_SYM {} - | REMOVE_SYM {} - | RESET_SYM {} - | RESTORE_SYM {} - | SECURITY_SYM {} - | SERVER_SYM {} - | SIGNED_SYM {} - | SOCKET_SYM {} - | SLAVE {} - | SLAVES {} - | SONAME_SYM {} - | START_SYM {} - | STOP_SYM {} - | STORED_SYM {} - | TIES_SYM {} - | UNICODE_SYM {} - | UNINSTALL_SYM {} - | UNBOUNDED_SYM {} - | WITHIN {} - | WRAPPER_SYM {} - | XA_SYM {} - | UPGRADE_SYM {} +keyword_table_alias: + keyword_data_type + | keyword_sp_block_section + | keyword_sp_head + | keyword_sp_var_and_label + | keyword_sp_var_not_label + | keyword_sysvar_type + | keyword_verb_clause ; - /* Keyword that we allow for identifiers (except SP labels) */ -keyword: keyword_alias | WINDOW_SYM {}; - -/* - * Keywords that we allow for labels in SPs. - * Anything that's the beginning of a statement or characteristics - * must be in keyword above, otherwise we get (harmful) shift/reduce - * conflicts. - */ -keyword_sp: - keyword_sp_data_type - | keyword_sp_not_data_type +keyword_ident: + keyword_data_type + | keyword_sp_block_section + | keyword_sp_head + | keyword_sp_var_and_label + | keyword_sp_var_not_label + | keyword_sysvar_type + | keyword_verb_clause + | WINDOW_SYM ; +/* + Keywords that we allow for labels in SPs. + Should not include keywords that start a statement or SP characteristics. +*/ +keyword_label: + keyword_data_type + | keyword_sp_var_and_label + | keyword_sysvar_type + ; + +keyword_sysvar_name: + keyword_data_type + | keyword_sp_block_section + | keyword_sp_head + | keyword_sp_var_and_label + | keyword_sp_var_not_label + | keyword_verb_clause + | WINDOW_SYM + ; + +keyword_sp_decl: + keyword_data_type + | keyword_sp_block_section + | keyword_sp_head + | keyword_sp_var_and_label + | keyword_sp_var_not_label + | keyword_sysvar_type + | keyword_verb_clause + | WINDOW_SYM + ; + +/* + Keywords that we allow in Oracle-style direct assignments: + xxx := 10; + but do not allow in labels in the default sql_mode: + label: + stmt1; + stmt2; + TODO: check if some of them can migrate to keyword_sp_var_and_label. +*/ +keyword_sp_var_not_label: + ASCII_SYM + | BACKUP_SYM + | BINLOG_SYM + | BYTE_SYM + | CACHE_SYM + | CHECKSUM_SYM + | CHECKPOINT_SYM + | COLUMN_ADD_SYM + | COLUMN_CHECK_SYM + | COLUMN_CREATE_SYM + | COLUMN_DELETE_SYM + | COLUMN_GET_SYM + | COMMENT_SYM + | DEALLOCATE_SYM + | EXAMINED_SYM + | EXCLUDE_SYM + | EXECUTE_SYM + | FLUSH_SYM + | FOLLOWING_SYM + | FORMAT_SYM + | GET_SYM + | HELP_SYM + | HOST_SYM + | INSTALL_SYM + | OPTION + | OPTIONS_SYM + | OTHERS_SYM + | OWNER_SYM + | PARSER_SYM + | PERIOD_SYM + | PORT_SYM + | PRECEDING_SYM + | PREPARE_SYM + | REMOVE_SYM + | RESET_SYM + | RESTORE_SYM + | SECURITY_SYM + | SERVER_SYM + | SIGNED_SYM + | SOCKET_SYM + | SLAVE + | SLAVES + | SONAME_SYM + | START_SYM + | STOP_SYM + | STORED_SYM + | TIES_SYM + | UNICODE_SYM + | UNINSTALL_SYM + | UNBOUNDED_SYM + | WITHIN + | WRAPPER_SYM + | XA_SYM + | UPGRADE_SYM + ; + +/* + Keywords that can start optional clauses in SP or trigger declarations + Allowed as identifiers (e.g. table, column names), + but: + - not allowed as SP label names + - not allowed as variable names in Oracle-style assignments: + xxx := 10; + + If we allowed these variables in assignments, there would be conflicts + with SP characteristics, or verb clauses, or compound statements, e.g.: + CREATE PROCEDURE p1 LANGUAGE ... + would be either: + CREATE PROCEDURE p1 LANGUAGE SQL BEGIN END; + or + CREATE PROCEDURE p1 LANGUAGE:=10; + + Note, these variables can still be assigned using quoted identifiers: + `do`:= 10; + "do":= 10; (when ANSI_QUOTES) + or using a SET statement: + SET do= 10; + + Note, some of these keywords are reserved keywords in Oracle. + In case if heavy grammar conflicts are found in the future, + we'll possibly need to make them reserved for sql_mode=ORACLE. + + TODO: Allow these variables as SP lables when sql_mode=ORACLE. + TODO: Allow assigning of "SP characteristics" marked variables + inside compound blocks. + TODO: Allow "follows" and "precedes" as variables in compound blocks: + BEGIN + follows := 10; + END; + as they conflict only with non-block FOR EACH ROW statement: + CREATE TRIGGER .. FOR EACH ROW follows:= 10; + CREATE TRIGGER .. FOR EACH ROW FOLLOWS tr1 a:= 10; +*/ +keyword_sp_head: + CONTAINS_SYM /* SP characteristic */ + | LANGUAGE_SYM /* SP characteristic */ + | NO_SYM /* SP characteristic */ + | CHARSET /* SET CHARSET utf8; */ + | FOLLOWS_SYM /* Conflicts with assignment in FOR EACH */ + | PRECEDES_SYM /* Conflicts with assignment in FOR EACH */ + ; /* Keywords that start a statement. @@ -15385,383 +15478,395 @@ keyword_sp: - not allowed as variable names in Oracle-style assignments: xxx:=10 */ -keyword_sp_verb_clause: - BEGIN_SYM { /* Compound. Reserved in Oracle */ } - | CLOSE_SYM { /* Verb clause. Reserved in Oracle */ } - | COMMIT_SYM { /* Verb clause. Reserved in Oracle */ } - | DO_SYM { /* Verb clause */ } - | END { /* Compound. Reserved in Oracle */ } - | HANDLER_SYM { /* Verb clause */ } - | OPEN_SYM { /* Verb clause. Reserved in Oracle */ } - | REPAIR { /* Verb clause */ } - | ROLLBACK_SYM { /* Verb clause. Reserved in Oracle */ } - | SAVEPOINT_SYM { /* Verb clause. Reserved in Oracle */ } - | SHUTDOWN { /* Verb clause */ } - | TRUNCATE_SYM { /* Verb clause. Reserved in Oracle */ } - ; +keyword_verb_clause: + CLOSE_SYM /* Verb clause. Reserved in Oracle */ + | COMMIT_SYM /* Verb clause. Reserved in Oracle */ + | DO_SYM /* Verb clause */ + | HANDLER_SYM /* Verb clause */ + | OPEN_SYM /* Verb clause. Reserved in Oracle */ + | REPAIR /* Verb clause */ + | ROLLBACK_SYM /* Verb clause. Reserved in Oracle */ + | SAVEPOINT_SYM /* Verb clause. Reserved in Oracle */ + | SHUTDOWN /* Verb clause */ + | TRUNCATE_SYM /* Verb clause. Reserved in Oracle */ + ; + +/* + Keywords that start an SP block section. +*/ +keyword_sp_block_section: + BEGIN_SYM + | END + ; + +keyword_sysvar_type: + GLOBAL_SYM + | LOCAL_SYM + | SESSION_SYM + ; /* These keywords are generally allowed as identifiers, but not allowed as non-delimited SP variable names in sql_mode=ORACLE. */ -keyword_sp_data_type: - BIT_SYM {} - | BOOLEAN_SYM {} /* PLSQL-R */ - | BOOL_SYM {} - | CLOB {} - | DATE_SYM {} /* Oracle-R, PLSQL-R */ - | DATETIME {} - | ENUM {} - | FIXED_SYM {} - | GEOMETRYCOLLECTION {} - | GEOMETRY_SYM {} - | JSON_SYM {} - | LINESTRING {} - | MEDIUM_SYM {} - | MULTILINESTRING {} - | MULTIPOINT {} - | MULTIPOLYGON {} - | NATIONAL_SYM {} - | NCHAR_SYM {} - | NUMBER_SYM {} /* Oracle-R, PLSQL-R */ - | NVARCHAR_SYM {} - | POINT_SYM {} - | POLYGON {} - | RAW {} /* Oracle-R */ - | ROW_SYM {} - | SERIAL_SYM {} - | TEXT_SYM {} - | TIMESTAMP {} - | TIME_SYM {} /* Oracle-R */ - | VARCHAR2 {} /* Oracle-R, PLSQL-R */ - | YEAR_SYM {} +keyword_data_type: + BIT_SYM + | BOOLEAN_SYM + | BOOL_SYM + | CLOB + | DATE_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2 + | DATETIME + | ENUM + | FIXED_SYM + | GEOMETRYCOLLECTION + | GEOMETRY_SYM + | JSON_SYM + | LINESTRING + | MEDIUM_SYM + | MULTILINESTRING + | MULTIPOINT + | MULTIPOLYGON + | NATIONAL_SYM + | NCHAR_SYM + | NUMBER_SYM + | NVARCHAR_SYM + | POINT_SYM + | POLYGON + | RAW + | ROW_SYM + | SERIAL_SYM + | TEXT_SYM + | TIMESTAMP %prec PREC_BELOW_CONTRACTION_TOKEN2 + | TIME_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2 + | VARCHAR2 + | YEAR_SYM ; -keyword_sp_not_data_type: - ACTION {} - | ADDDATE_SYM {} - | ADMIN_SYM {} - | AFTER_SYM {} - | AGAINST {} - | AGGREGATE_SYM {} - | ALGORITHM_SYM {} - | ALWAYS_SYM {} - | ANY_SYM {} - | AT_SYM {} - | ATOMIC_SYM {} - | AUTHORS_SYM {} - | AUTO_INC {} - | AUTOEXTEND_SIZE_SYM {} - | AUTO_SYM {} - | AVG_ROW_LENGTH {} - | AVG_SYM {} - | BLOCK_SYM {} - | BODY_SYM {} - | BTREE_SYM {} - | CASCADED {} - | CATALOG_NAME_SYM {} - | CHAIN_SYM {} - | CHANGED {} - | CIPHER_SYM {} - | CLIENT_SYM {} - | CLASS_ORIGIN_SYM {} - | COALESCE {} - | CODE_SYM {} - | COLLATION_SYM {} - | COLUMN_NAME_SYM {} - | COLUMNS {} - | COMMITTED_SYM {} - | COMPACT_SYM {} - | COMPLETION_SYM {} - | COMPRESSED_SYM {} - | CONCURRENT {} - | CONNECTION_SYM {} - | CONSISTENT_SYM {} - | CONSTRAINT_CATALOG_SYM {} - | CONSTRAINT_SCHEMA_SYM {} - | CONSTRAINT_NAME_SYM {} - | CONTEXT_SYM {} - | CONTRIBUTORS_SYM {} - | CURRENT_POS_SYM {} - | CPU_SYM {} - | CUBE_SYM {} +/* + These keywords are fine for both SP variable names and SP labels. +*/ +keyword_sp_var_and_label: + ACTION + | ADDDATE_SYM + | ADMIN_SYM + | AFTER_SYM + | AGAINST + | AGGREGATE_SYM + | ALGORITHM_SYM + | ALWAYS_SYM + | ANY_SYM + | AT_SYM + | ATOMIC_SYM + | AUTHORS_SYM + | AUTO_INC + | AUTOEXTEND_SIZE_SYM + | AUTO_SYM + | AVG_ROW_LENGTH + | AVG_SYM + | BLOCK_SYM + | BODY_SYM + | BTREE_SYM + | CASCADED + | CATALOG_NAME_SYM + | CHAIN_SYM + | CHANGED + | CIPHER_SYM + | CLIENT_SYM + | CLASS_ORIGIN_SYM + | COALESCE + | CODE_SYM + | COLLATION_SYM + | COLUMN_NAME_SYM + | COLUMNS + | COMMITTED_SYM + | COMPACT_SYM + | COMPLETION_SYM + | COMPRESSED_SYM + | CONCURRENT + | CONNECTION_SYM + | CONSISTENT_SYM + | CONSTRAINT_CATALOG_SYM + | CONSTRAINT_SCHEMA_SYM + | CONSTRAINT_NAME_SYM + | CONTEXT_SYM + | CONTRIBUTORS_SYM + | CURRENT_POS_SYM + | CPU_SYM + | CUBE_SYM /* Although a reserved keyword in SQL:2003 (and :2008), not reserved in MySQL per WL#2111 specification. */ - | CURRENT_SYM {} - | CURSOR_NAME_SYM {} - | CYCLE_SYM {} - | DATA_SYM {} - | DATAFILE_SYM {} - | DATE_FORMAT_SYM {} - | DAY_SYM {} - | DECODE_SYM {} - | DEFINER_SYM {} - | DELAY_KEY_WRITE_SYM {} - | DES_KEY_FILE {} - | DIAGNOSTICS_SYM {} - | DIRECTORY_SYM {} - | DISABLE_SYM {} - | DISCARD {} - | DISK_SYM {} - | DUMPFILE {} - | DUPLICATE_SYM {} - | DYNAMIC_SYM {} - | ELSIF_SYM {} - | ENDS_SYM {} - | ENGINE_SYM {} - | ENGINES_SYM {} - | ERROR_SYM {} - | ERRORS {} - | ESCAPE_SYM {} - | EVENT_SYM {} - | EVENTS_SYM {} - | EVERY_SYM {} - | EXCEPTION_SYM {} - | EXCHANGE_SYM {} - | EXPANSION_SYM {} - | EXPORT_SYM {} - | EXTENDED_SYM {} - | EXTENT_SIZE_SYM {} - | FAULTS_SYM {} - | FAST_SYM {} - | FOUND_SYM {} - | ENABLE_SYM {} - | FULL {} - | FILE_SYM {} - | FIRST_SYM {} - | GENERAL {} - | GENERATED_SYM {} - | GET_FORMAT {} - | GRANTS {} - | GLOBAL_SYM {} - | GOTO_SYM {} - | HASH_SYM {} - | HARD_SYM {} - | HISTORY_SYM {} - | HOSTS_SYM {} - | HOUR_SYM {} - | ID_SYM {} - | IDENTIFIED_SYM {} - | IGNORE_SERVER_IDS_SYM {} - | INCREMENT_SYM {} - | IMMEDIATE_SYM {} /* SQL-2003-R */ - | INVOKER_SYM {} - | IMPORT {} - | INDEXES {} - | INITIAL_SIZE_SYM {} - | IO_SYM {} - | IPC_SYM {} - | ISOLATION {} - | ISOPEN_SYM {} - | ISSUER_SYM {} - | INSERT_METHOD {} - | INVISIBLE_SYM {} - | KEY_BLOCK_SIZE {} - | LAST_VALUE {} - | LAST_SYM {} - | LASTVAL_SYM {} - | LEAVES {} - | LESS_SYM {} - | LEVEL_SYM {} - | LIST_SYM {} - | LOCAL_SYM {} - | LOCKS_SYM {} - | LOGFILE_SYM {} - | LOGS_SYM {} - | MAX_ROWS {} - | MASTER_SYM {} - | MASTER_HEARTBEAT_PERIOD_SYM {} - | MASTER_GTID_POS_SYM {} - | MASTER_HOST_SYM {} - | MASTER_PORT_SYM {} - | MASTER_LOG_FILE_SYM {} - | MASTER_LOG_POS_SYM {} - | MASTER_USER_SYM {} - | MASTER_USE_GTID_SYM {} - | MASTER_PASSWORD_SYM {} - | MASTER_SERVER_ID_SYM {} - | MASTER_CONNECT_RETRY_SYM {} - | MASTER_DELAY_SYM {} - | MASTER_SSL_SYM {} - | MASTER_SSL_CA_SYM {} - | MASTER_SSL_CAPATH_SYM {} - | MASTER_SSL_CERT_SYM {} - | MASTER_SSL_CIPHER_SYM {} - | MASTER_SSL_CRL_SYM {} - | MASTER_SSL_CRLPATH_SYM {} - | MASTER_SSL_KEY_SYM {} - | MAX_CONNECTIONS_PER_HOUR {} - | MAX_QUERIES_PER_HOUR {} - | MAX_SIZE_SYM {} - | MAX_STATEMENT_TIME_SYM {} - | MAX_UPDATES_PER_HOUR {} - | MAX_USER_CONNECTIONS_SYM {} - | MEMORY_SYM {} - | MERGE_SYM {} - | MESSAGE_TEXT_SYM {} - | MICROSECOND_SYM {} - | MIGRATE_SYM {} - | MINUTE_SYM {} - | MINVALUE_SYM {} - | MIN_ROWS {} - | MODIFY_SYM {} - | MODE_SYM {} - | MONTH_SYM {} - | MUTEX_SYM {} - | MYSQL_SYM {} - | MYSQL_ERRNO_SYM {} - | NAME_SYM {} - | NAMES_SYM {} - | NEXT_SYM {} - | NEXTVAL_SYM {} - | NEW_SYM {} - | NOCACHE_SYM {} - | NOCYCLE_SYM {} - | NOMINVALUE_SYM {} - | NOMAXVALUE_SYM {} - | NO_WAIT_SYM {} - | NOWAIT_SYM {} - | NODEGROUP_SYM {} - | NONE_SYM {} - | NOTFOUND_SYM {} - | OF_SYM {} /* SQL-1999-R, Oracle-R */ - | OFFSET_SYM {} - | OLD_PASSWORD_SYM {} - | ONE_SYM {} - | ONLINE_SYM {} - | ONLY_SYM {} - | PACKAGE_SYM {} - | PACK_KEYS_SYM {} - | PAGE_SYM {} - | PARTIAL {} - | PARTITIONING_SYM {} - | PARTITIONS_SYM {} - | PASSWORD_SYM {} - | PERSISTENT_SYM {} - | PHASE_SYM {} - | PLUGIN_SYM {} - | PLUGINS_SYM {} - | PRESERVE_SYM {} - | PREV_SYM {} - | PREVIOUS_SYM {} - | PRIVILEGES {} - | PROCESS {} - | PROCESSLIST_SYM {} - | PROFILE_SYM {} - | PROFILES_SYM {} - | PROXY_SYM {} - | QUARTER_SYM {} - | QUERY_SYM {} - | QUICK {} - | RAISE_SYM {} - | READ_ONLY_SYM {} - | REBUILD_SYM {} - | RECOVER_SYM {} - | REDO_BUFFER_SIZE_SYM {} - | REDOFILE_SYM {} - | REDUNDANT_SYM {} - | RELAY {} - | RELAYLOG_SYM {} - | RELAY_LOG_FILE_SYM {} - | RELAY_LOG_POS_SYM {} - | RELAY_THREAD {} - | RELOAD {} - | REORGANIZE_SYM {} - | REPEATABLE_SYM {} - | REPLICATION {} - | RESOURCES {} - | RESTART_SYM {} - | RESUME_SYM {} - | RETURNED_SQLSTATE_SYM {} - | RETURNS_SYM {} - | REUSE_SYM {} /* Oracle-R */ - | REVERSE_SYM {} - | ROLE_SYM {} - | ROLLUP_SYM {} - | ROUTINE_SYM {} - | ROWCOUNT_SYM {} - | ROWTYPE_SYM {} - | ROW_COUNT_SYM {} - | ROW_FORMAT_SYM {} - | RTREE_SYM {} - | SCHEDULE_SYM {} - | SCHEMA_NAME_SYM {} - | SECOND_SYM {} - | SEQUENCE_SYM {} - | SERIALIZABLE_SYM {} - | SESSION_SYM {} - | SETVAL_SYM {} - | SIMPLE_SYM {} - | SHARE_SYM {} - | SLAVE_POS_SYM {} - | SLOW {} - | SNAPSHOT_SYM {} - | SOFT_SYM {} - | SOUNDS_SYM {} - | SOURCE_SYM {} - | SQL_CACHE_SYM {} - | SQL_BUFFER_RESULT {} - | SQL_NO_CACHE_SYM {} - | SQL_THREAD {} - | STARTS_SYM {} - | STATEMENT_SYM {} - | STATUS_SYM {} - | STORAGE_SYM {} - | STRING_SYM {} - | SUBCLASS_ORIGIN_SYM {} - | SUBDATE_SYM {} - | SUBJECT_SYM {} - | SUBPARTITION_SYM {} - | SUBPARTITIONS_SYM {} - | SUPER_SYM {} - | SUSPEND_SYM {} - | SWAPS_SYM {} - | SWITCHES_SYM {} - | SYSTEM {} - | SYSTEM_TIME_SYM {} - | TABLE_NAME_SYM {} - | TABLES {} - | TABLE_CHECKSUM_SYM {} - | TABLESPACE {} - | TEMPORARY {} - | TEMPTABLE_SYM {} - | THAN_SYM {} - | TRANSACTION_SYM {} - | TRANSACTIONAL_SYM {} - | TRIGGERS_SYM {} - | TRIM_ORACLE {} - | TIMESTAMP_ADD {} - | TIMESTAMP_DIFF {} - | TYPES_SYM {} - | TYPE_SYM {} - | UDF_RETURNS_SYM {} - | FUNCTION_SYM {} - | UNCOMMITTED_SYM {} - | UNDEFINED_SYM {} - | UNDO_BUFFER_SIZE_SYM {} - | UNDOFILE_SYM {} - | UNKNOWN_SYM {} - | UNTIL_SYM {} - | USER_SYM {} - | USE_FRM {} - | VARIABLES {} - | VERSIONING_SYM {} - | VIEW_SYM {} - | VIRTUAL_SYM {} - | VALUE_SYM {} - | WARNINGS {} - | WAIT_SYM {} - | WEEK_SYM {} - | WEIGHT_STRING_SYM {} - | WITHOUT {} - | WORK_SYM {} - | X509_SYM {} - | XML_SYM {} - | VIA_SYM {} + | CURRENT_SYM + | CURSOR_NAME_SYM + | CYCLE_SYM + | DATA_SYM + | DATAFILE_SYM + | DATE_FORMAT_SYM + | DAY_SYM + | DECODE_SYM + | DEFINER_SYM + | DELAY_KEY_WRITE_SYM + | DES_KEY_FILE + | DIAGNOSTICS_SYM + | DIRECTORY_SYM + | DISABLE_SYM + | DISCARD + | DISK_SYM + | DUMPFILE + | DUPLICATE_SYM + | DYNAMIC_SYM + | ELSIF_SYM + | ENDS_SYM + | ENGINE_SYM + | ENGINES_SYM + | ERROR_SYM + | ERRORS + | ESCAPE_SYM + | EVENT_SYM + | EVENTS_SYM + | EVERY_SYM + | EXCEPTION_SYM + | EXCHANGE_SYM + | EXPANSION_SYM + | EXPORT_SYM + | EXTENDED_SYM + | EXTENT_SIZE_SYM + | FAULTS_SYM + | FAST_SYM + | FOUND_SYM + | ENABLE_SYM + | FULL + | FILE_SYM + | FIRST_SYM + | GENERAL + | GENERATED_SYM + | GET_FORMAT + | GRANTS + | GOTO_SYM + | HASH_SYM + | HARD_SYM + | HISTORY_SYM + | HOSTS_SYM + | HOUR_SYM + | ID_SYM + | IDENTIFIED_SYM + | IGNORE_SERVER_IDS_SYM + | INCREMENT_SYM + | IMMEDIATE_SYM + | INVOKER_SYM + | IMPORT + | INDEXES + | INITIAL_SIZE_SYM + | IO_SYM + | IPC_SYM + | ISOLATION + | ISOPEN_SYM + | ISSUER_SYM + | INSERT_METHOD + | INVISIBLE_SYM + | KEY_BLOCK_SIZE + | LAST_VALUE + | LAST_SYM + | LASTVAL_SYM + | LEAVES + | LESS_SYM + | LEVEL_SYM + | LIST_SYM + | LOCKS_SYM + | LOGFILE_SYM + | LOGS_SYM + | MAX_ROWS + | MASTER_SYM + | MASTER_HEARTBEAT_PERIOD_SYM + | MASTER_GTID_POS_SYM + | MASTER_HOST_SYM + | MASTER_PORT_SYM + | MASTER_LOG_FILE_SYM + | MASTER_LOG_POS_SYM + | MASTER_USER_SYM + | MASTER_USE_GTID_SYM + | MASTER_PASSWORD_SYM + | MASTER_SERVER_ID_SYM + | MASTER_CONNECT_RETRY_SYM + | MASTER_DELAY_SYM + | MASTER_SSL_SYM + | MASTER_SSL_CA_SYM + | MASTER_SSL_CAPATH_SYM + | MASTER_SSL_CERT_SYM + | MASTER_SSL_CIPHER_SYM + | MASTER_SSL_CRL_SYM + | MASTER_SSL_CRLPATH_SYM + | MASTER_SSL_KEY_SYM + | MAX_CONNECTIONS_PER_HOUR + | MAX_QUERIES_PER_HOUR + | MAX_SIZE_SYM + | MAX_STATEMENT_TIME_SYM + | MAX_UPDATES_PER_HOUR + | MAX_USER_CONNECTIONS_SYM + | MEMORY_SYM + | MERGE_SYM + | MESSAGE_TEXT_SYM + | MICROSECOND_SYM + | MIGRATE_SYM + | MINUTE_SYM + | MINVALUE_SYM + | MIN_ROWS + | MODIFY_SYM + | MODE_SYM + | MONTH_SYM + | MUTEX_SYM + | MYSQL_SYM + | MYSQL_ERRNO_SYM + | NAME_SYM + | NAMES_SYM + | NEXT_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2 + | NEXTVAL_SYM + | NEW_SYM + | NOCACHE_SYM + | NOCYCLE_SYM + | NOMINVALUE_SYM + | NOMAXVALUE_SYM + | NO_WAIT_SYM + | NOWAIT_SYM + | NODEGROUP_SYM + | NONE_SYM + | NOTFOUND_SYM + | OF_SYM + | OFFSET_SYM + | OLD_PASSWORD_SYM + | ONE_SYM + | ONLINE_SYM + | ONLY_SYM + | PACKAGE_SYM + | PACK_KEYS_SYM + | PAGE_SYM + | PARTIAL + | PARTITIONING_SYM + | PARTITIONS_SYM + | PASSWORD_SYM + | PERSISTENT_SYM + | PHASE_SYM + | PLUGIN_SYM + | PLUGINS_SYM + | PRESERVE_SYM + | PREV_SYM + | PREVIOUS_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2 + | PRIVILEGES + | PROCESS + | PROCESSLIST_SYM + | PROFILE_SYM + | PROFILES_SYM + | PROXY_SYM + | QUARTER_SYM + | QUERY_SYM + | QUICK + | RAISE_SYM + | READ_ONLY_SYM + | REBUILD_SYM + | RECOVER_SYM + | REDO_BUFFER_SIZE_SYM + | REDOFILE_SYM + | REDUNDANT_SYM + | RELAY + | RELAYLOG_SYM + | RELAY_LOG_FILE_SYM + | RELAY_LOG_POS_SYM + | RELAY_THREAD + | RELOAD + | REORGANIZE_SYM + | REPEATABLE_SYM + | REPLICATION + | RESOURCES + | RESTART_SYM + | RESUME_SYM + | RETURNED_SQLSTATE_SYM + | RETURNS_SYM + | REUSE_SYM + | REVERSE_SYM + | ROLE_SYM + | ROLLUP_SYM + | ROUTINE_SYM + | ROWCOUNT_SYM + | ROWTYPE_SYM + | ROW_COUNT_SYM + | ROW_FORMAT_SYM + | RTREE_SYM + | SCHEDULE_SYM + | SCHEMA_NAME_SYM + | SECOND_SYM + | SEQUENCE_SYM + | SERIALIZABLE_SYM + | SETVAL_SYM + | SIMPLE_SYM + | SHARE_SYM + | SLAVE_POS_SYM + | SLOW + | SNAPSHOT_SYM + | SOFT_SYM + | SOUNDS_SYM + | SOURCE_SYM + | SQL_CACHE_SYM + | SQL_BUFFER_RESULT + | SQL_NO_CACHE_SYM + | SQL_THREAD + | STARTS_SYM + | STATEMENT_SYM + | STATUS_SYM + | STORAGE_SYM + | STRING_SYM + | SUBCLASS_ORIGIN_SYM + | SUBDATE_SYM + | SUBJECT_SYM + | SUBPARTITION_SYM + | SUBPARTITIONS_SYM + | SUPER_SYM + | SUSPEND_SYM + | SWAPS_SYM + | SWITCHES_SYM + | SYSTEM + | SYSTEM_TIME_SYM + | TABLE_NAME_SYM + | TABLES + | TABLE_CHECKSUM_SYM + | TABLESPACE + | TEMPORARY + | TEMPTABLE_SYM + | THAN_SYM + | TRANSACTION_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2 + | TRANSACTIONAL_SYM + | TRIGGERS_SYM + | TRIM_ORACLE + | TIMESTAMP_ADD + | TIMESTAMP_DIFF + | TYPES_SYM + | TYPE_SYM + | UDF_RETURNS_SYM + | FUNCTION_SYM + | UNCOMMITTED_SYM + | UNDEFINED_SYM + | UNDO_BUFFER_SIZE_SYM + | UNDOFILE_SYM + | UNKNOWN_SYM + | UNTIL_SYM + | USER_SYM + | USE_FRM + | VARIABLES + | VERSIONING_SYM + | VIEW_SYM + | VIRTUAL_SYM + | VALUE_SYM + | WARNINGS + | WAIT_SYM + | WEEK_SYM + | WEIGHT_STRING_SYM + | WITHOUT + | WORK_SYM + | X509_SYM + | XML_SYM + | VIA_SYM ; /* @@ -15788,7 +15893,7 @@ set: set_stmt_option_value_following_option_type_list { LEX *lex= Lex; - if (lex->table_or_sp_used()) + if (unlikely(lex->table_or_sp_used())) my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0), "SET STATEMENT")); lex->stmt_var_list= lex->var_list; lex->var_list.empty(); @@ -15811,7 +15916,7 @@ set_stmt_option_value_following_option_type_list: start_option_value_list: option_value_no_option_type { - if (sp_create_assignment_instr(thd, yychar == YYEMPTY)) + if (unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) MYSQL_YYABORT; } option_value_list_continued @@ -15821,7 +15926,7 @@ start_option_value_list: } transaction_characteristics { - if (sp_create_assignment_instr(thd, yychar == YYEMPTY)) + if (unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) MYSQL_YYABORT; } | option_type @@ -15836,14 +15941,14 @@ start_option_value_list: start_option_value_list_following_option_type: option_value_following_option_type { - if (sp_create_assignment_instr(thd, yychar == YYEMPTY)) - MYSQL_YYABORT; + if (unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) + MYSQL_YYABORT; } option_value_list_continued | TRANSACTION_SYM transaction_characteristics { - if (sp_create_assignment_instr(thd, yychar == YYEMPTY)) - MYSQL_YYABORT; + if (unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) + MYSQL_YYABORT; } ; @@ -15860,8 +15965,8 @@ option_value_list: } option_value { - if (sp_create_assignment_instr(thd, yychar == YYEMPTY)) - MYSQL_YYABORT; + if (unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) + MYSQL_YYABORT; } | option_value_list ',' { @@ -15869,8 +15974,8 @@ option_value_list: } option_value { - if (sp_create_assignment_instr(thd, yychar == YYEMPTY)) - MYSQL_YYABORT; + if (unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) + MYSQL_YYABORT; } ; @@ -15908,17 +16013,17 @@ opt_var_ident_type: option_value_following_option_type: ident equal set_expr_or_default { - if (Lex->set_system_variable(Lex->option_type, &$1, $3)) + if (unlikely(Lex->set_system_variable(Lex->option_type, &$1, $3))) MYSQL_YYABORT; } | ident '.' ident equal set_expr_or_default { - if (Lex->set_system_variable(thd, Lex->option_type, &$1, &$3, $5)) + if (unlikely(Lex->set_system_variable(thd, Lex->option_type, &$1, &$3, $5))) MYSQL_YYABORT; } | DEFAULT '.' ident equal set_expr_or_default { - if (Lex->set_default_system_variable(Lex->option_type, &$3, $5)) + if (unlikely(Lex->set_default_system_variable(Lex->option_type, &$3, $5))) MYSQL_YYABORT; } ; @@ -15927,37 +16032,37 @@ option_value_following_option_type: option_value_no_option_type: ident equal set_expr_or_default { - if (Lex->set_variable(&$1, $3)) + if (unlikely(Lex->set_variable(&$1, $3))) MYSQL_YYABORT; } | ident '.' ident equal set_expr_or_default { - if (Lex->set_variable(&$1, &$3, $5)) + if (unlikely(Lex->set_variable(&$1, &$3, $5))) MYSQL_YYABORT; } | DEFAULT '.' ident equal set_expr_or_default { - if (Lex->set_default_system_variable(Lex->option_type, &$3, $5)) + if (unlikely(Lex->set_default_system_variable(Lex->option_type, &$3, $5))) MYSQL_YYABORT; } | '@' ident_or_text equal expr { - if (Lex->set_user_variable(thd, &$2, $4)) + if (unlikely(Lex->set_user_variable(thd, &$2, $4))) MYSQL_YYABORT; } - | '@' '@' opt_var_ident_type ident equal set_expr_or_default + | '@' '@' opt_var_ident_type ident_sysvar_name equal set_expr_or_default { - if (Lex->set_system_variable($3, &$4, $6)) + if (unlikely(Lex->set_system_variable($3, &$4, $6))) MYSQL_YYABORT; } - | '@' '@' opt_var_ident_type ident '.' ident equal set_expr_or_default + | '@' '@' opt_var_ident_type ident_sysvar_name '.' ident equal set_expr_or_default { - if (Lex->set_system_variable(thd, $3, &$4, &$6, $8)) + if (unlikely(Lex->set_system_variable(thd, $3, &$4, &$6, $8))) MYSQL_YYABORT; } | '@' '@' opt_var_ident_type DEFAULT '.' ident equal set_expr_or_default { - if (Lex->set_default_system_variable($3, &$6, $8)) + if (unlikely(Lex->set_default_system_variable($3, &$6, $8))) MYSQL_YYABORT; } | charset old_or_new_charset_name_or_default @@ -15970,7 +16075,7 @@ option_value_no_option_type: set_var_collation_client(cs2, thd->variables.collation_database, cs2)); - if (var == NULL) + if (unlikely(var == NULL)) MYSQL_YYABORT; lex->var_list.push_back(var, thd->mem_root); } @@ -15979,7 +16084,7 @@ option_value_no_option_type: LEX *lex= Lex; sp_pcontext *spc= lex->spcont; LEX_CSTRING names= { STRING_WITH_LEN("names") }; - if (spc && spc->find_variable(&names, false)) + if (unlikely(spc && spc->find_variable(&names, false))) my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), names.str); else thd->parse_error(); @@ -15992,7 +16097,7 @@ option_value_no_option_type: CHARSET_INFO *cs3; cs2= $2 ? $2 : global_system_variables.character_set_client; cs3= $3 ? $3 : cs2; - if (!my_charset_same(cs2, cs3)) + if (unlikely(!my_charset_same(cs2, cs3))) { my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), cs3->name, cs2->csname); @@ -16000,23 +16105,24 @@ option_value_no_option_type: } set_var_collation_client *var; var= new (thd->mem_root) set_var_collation_client(cs3, cs3, cs3); - if (var == NULL) + if (unlikely(var == NULL) || + unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); } | DEFAULT ROLE_SYM grant_role { LEX *lex = Lex; LEX_USER *user; - if (!(user=(LEX_USER *) thd->calloc(sizeof(LEX_USER)))) + if (unlikely(!(user=(LEX_USER *) thd->calloc(sizeof(LEX_USER))))) MYSQL_YYABORT; user->user= current_user; set_var_default_role *var= (new (thd->mem_root) set_var_default_role(user, $3->user)); - if (var == NULL) + if (unlikely(var == NULL) || + unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); + thd->lex->autocommit= TRUE; if (lex->sphead) lex->sphead->m_flags|= sp_head::HAS_SET_AUTOCOMMIT_STMT; @@ -16026,9 +16132,9 @@ option_value_no_option_type: LEX *lex = Lex; set_var_default_role *var= (new (thd->mem_root) set_var_default_role($5, $3->user)); - if (var == NULL) + if (unlikely(var == NULL) || + unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); thd->lex->autocommit= TRUE; if (lex->sphead) lex->sphead->m_flags|= sp_head::HAS_SET_AUTOCOMMIT_STMT; @@ -16037,24 +16143,25 @@ option_value_no_option_type: { LEX *lex = Lex; set_var_role *var= new (thd->mem_root) set_var_role($2); - if (var == NULL) + if (unlikely(var == NULL) || + unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); } | PASSWORD_SYM opt_for_user text_or_password { LEX *lex = Lex; set_var_password *var= (new (thd->mem_root) set_var_password(lex->definer)); - if (var == NULL) + if (unlikely(var == NULL) || + unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); lex->autocommit= TRUE; if (lex->sphead) lex->sphead->m_flags|= sp_head::HAS_SET_AUTOCOMMIT_STMT; } ; + transaction_characteristics: transaction_access_mode | isolation_level @@ -16067,16 +16174,17 @@ transaction_access_mode: { LEX *lex=Lex; Item *item= new (thd->mem_root) Item_int(thd, (int32) $1); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; set_var *var= (new (thd->mem_root) set_var(thd, lex->option_type, find_sys_var(thd, "tx_read_only"), &null_clex_str, item)); - if (var == NULL) + if (unlikely(var == NULL)) + MYSQL_YYABORT; + if (unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); } ; @@ -16085,16 +16193,16 @@ isolation_level: { LEX *lex=Lex; Item *item= new (thd->mem_root) Item_int(thd, (int32) $3); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; set_var *var= (new (thd->mem_root) set_var(thd, lex->option_type, find_sys_var(thd, "tx_isolation"), &null_clex_str, item)); - if (var == NULL) + if (unlikely(var == NULL) || + unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); } ; @@ -16117,9 +16225,10 @@ opt_for_user: sp_pcontext *spc= lex->spcont; LEX_CSTRING pw= { STRING_WITH_LEN("password") }; - if (spc && spc->find_variable(&pw, false)) + if (unlikely(spc && spc->find_variable(&pw, false))) my_yyabort_error((ER_SP_BAD_VAR_SHADOW, MYF(0), pw.str)); - if (!(lex->definer= (LEX_USER*) thd->calloc(sizeof(LEX_USER)))) + if (unlikely(!(lex->definer= (LEX_USER*) + thd->calloc(sizeof(LEX_USER))))) MYSQL_YYABORT; lex->definer->user= current_user; lex->definer->plugin= empty_clex_str; @@ -16146,19 +16255,19 @@ set_expr_or_default: | ON { $$=new (thd->mem_root) Item_string_sys(thd, "ON", 2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ALL { $$=new (thd->mem_root) Item_string_sys(thd, "ALL", 3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | BINARY { $$=new (thd->mem_root) Item_string_sys(thd, "binary", 6); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -16170,7 +16279,7 @@ lock: { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "LOCK")); lex->sql_command= SQLCOM_LOCK_TABLES; } @@ -16183,14 +16292,14 @@ opt_lock_wait_timeout: {} | WAIT_SYM ulong_num { - if (set_statement_var_if_exists(thd, STRING_WITH_LEN("lock_wait_timeout"), $2) || - set_statement_var_if_exists(thd, STRING_WITH_LEN("innodb_lock_wait_timeout"), $2)) + if (unlikely(set_statement_var_if_exists(thd, STRING_WITH_LEN("lock_wait_timeout"), $2)) || + unlikely(set_statement_var_if_exists(thd, STRING_WITH_LEN("innodb_lock_wait_timeout"), $2))) MYSQL_YYABORT; } | NOWAIT_SYM { - if (set_statement_var_if_exists(thd, STRING_WITH_LEN("lock_wait_timeout"), 0) || - set_statement_var_if_exists(thd, STRING_WITH_LEN("innodb_lock_wait_timeout"), 0)) + if (unlikely(set_statement_var_if_exists(thd, STRING_WITH_LEN("lock_wait_timeout"), 0)) || + unlikely(set_statement_var_if_exists(thd, STRING_WITH_LEN("innodb_lock_wait_timeout"), 0))) MYSQL_YYABORT; } ; @@ -16210,12 +16319,13 @@ table_lock: { thr_lock_type lock_type= (thr_lock_type) $3; bool lock_for_write= (lock_type >= TL_WRITE_ALLOW_WRITE); - if (!Select->add_table_to_list(thd, $1, $2, 0, lock_type, + if (unlikely(!Select-> + add_table_to_list(thd, $1, $2, 0, lock_type, (lock_for_write ? lock_type == TL_WRITE_CONCURRENT_INSERT ? MDL_SHARED_WRITE : MDL_SHARED_NO_READ_WRITE : - MDL_SHARED_READ))) + MDL_SHARED_READ)))) MYSQL_YYABORT; } ; @@ -16237,7 +16347,7 @@ unlock: { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "UNLOCK")); lex->sql_command= SQLCOM_UNLOCK_TABLES; } @@ -16253,43 +16363,46 @@ handler: HANDLER_SYM table_ident OPEN_SYM opt_table_alias { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "HANDLER")); lex->sql_command = SQLCOM_HA_OPEN; - if (!lex->current_select->add_table_to_list(thd, $2, $4, 0)) + if (unlikely(!lex->current_select->add_table_to_list(thd, $2, $4, + 0))) MYSQL_YYABORT; } | HANDLER_SYM table_ident_nodb CLOSE_SYM { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "HANDLER")); lex->sql_command = SQLCOM_HA_CLOSE; - if (!lex->current_select->add_table_to_list(thd, $2, 0, 0)) + if (unlikely(!lex->current_select->add_table_to_list(thd, $2, 0, + 0))) MYSQL_YYABORT; } | HANDLER_SYM table_ident_nodb READ_SYM { LEX *lex=Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "HANDLER")); lex->expr_allows_subselect= FALSE; lex->sql_command = SQLCOM_HA_READ; lex->ha_rkey_mode= HA_READ_KEY_EXACT; /* Avoid purify warnings */ Item *one= new (thd->mem_root) Item_int(thd, (int32) 1); - if (one == NULL) + if (unlikely(one == NULL)) MYSQL_YYABORT; lex->current_select->select_limit= one; lex->current_select->offset_limit= 0; lex->limit_rows_examined= 0; - if (!lex->current_select->add_table_to_list(thd, $2, 0, 0)) + if (unlikely(!lex->current_select->add_table_to_list(thd, $2, 0, + 0))) MYSQL_YYABORT; } handler_read_or_scan opt_where_clause opt_limit_clause { Lex->expr_allows_subselect= TRUE; /* Stored functions are not supported for HANDLER READ. */ - if (Lex->uses_stored_routines()) + if (unlikely(Lex->uses_stored_routines())) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), "stored functions in HANDLER ... READ"); @@ -16318,7 +16431,7 @@ handler_rkey_function: LEX *lex=Lex; lex->ha_read_mode = RKEY; lex->ha_rkey_mode=$1; - if (!(lex->insert_list= new (thd->mem_root) List_item)) + if (unlikely(!(lex->insert_list= new (thd->mem_root) List_item))) MYSQL_YYABORT; } '(' values ')' @@ -16349,12 +16462,14 @@ revoke_command: } | grant_privileges ON FUNCTION_SYM grant_ident FROM user_and_role_list { - if (Lex->add_grant_command(thd, SQLCOM_REVOKE, TYPE_ENUM_FUNCTION)) + if (unlikely(Lex->add_grant_command(thd, SQLCOM_REVOKE, + TYPE_ENUM_FUNCTION))) MYSQL_YYABORT; } | grant_privileges ON PROCEDURE_SYM grant_ident FROM user_and_role_list { - if (Lex->add_grant_command(thd, SQLCOM_REVOKE, TYPE_ENUM_PROCEDURE)) + if (unlikely(Lex->add_grant_command(thd, SQLCOM_REVOKE, + TYPE_ENUM_PROCEDURE))) MYSQL_YYABORT; } | ALL opt_privileges ',' GRANT OPTION FROM user_and_role_list @@ -16371,7 +16486,7 @@ revoke_command: | admin_option_for_role FROM user_and_role_list { Lex->sql_command= SQLCOM_REVOKE_ROLE; - if (Lex->users_list.push_front($1, thd->mem_root)) + if (unlikely(Lex->users_list.push_front($1, thd->mem_root))) MYSQL_YYABORT; } ; @@ -16399,13 +16514,15 @@ grant_command: | grant_privileges ON FUNCTION_SYM grant_ident TO_SYM grant_list opt_require_clause opt_grant_options { - if (Lex->add_grant_command(thd, SQLCOM_GRANT, TYPE_ENUM_FUNCTION)) + if (unlikely(Lex->add_grant_command(thd, SQLCOM_GRANT, + TYPE_ENUM_FUNCTION))) MYSQL_YYABORT; } | grant_privileges ON PROCEDURE_SYM grant_ident TO_SYM grant_list opt_require_clause opt_grant_options { - if (Lex->add_grant_command(thd, SQLCOM_GRANT, TYPE_ENUM_PROCEDURE)) + if (unlikely(Lex->add_grant_command(thd, SQLCOM_GRANT, + TYPE_ENUM_PROCEDURE))) MYSQL_YYABORT; } | PROXY_SYM ON user TO_SYM grant_list opt_grant_option @@ -16420,7 +16537,7 @@ grant_command: LEX *lex= Lex; lex->sql_command= SQLCOM_GRANT_ROLE; /* The first role is the one that is granted */ - if (Lex->users_list.push_front($1, thd->mem_root)) + if (unlikely(Lex->users_list.push_front($1, thd->mem_root))) MYSQL_YYABORT; } @@ -16437,12 +16554,12 @@ opt_with_admin_option: role_list: grant_role { - if (Lex->users_list.push_back($1, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($1, thd->mem_root))) MYSQL_YYABORT; } | role_list ',' grant_role { - if (Lex->users_list.push_back($3, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($3, thd->mem_root))) MYSQL_YYABORT; } ; @@ -16450,7 +16567,7 @@ role_list: current_role: CURRENT_ROLE optional_braces { - if (!($$=(LEX_USER*) thd->calloc(sizeof(LEX_USER)))) + if (unlikely(!($$=(LEX_USER*) thd->calloc(sizeof(LEX_USER))))) MYSQL_YYABORT; $$->user= current_role; $$->reset_auth(); @@ -16464,17 +16581,17 @@ grant_role: /* trim end spaces (as they'll be lost in mysql.user anyway) */ $1.length= cs->cset->lengthsp(cs, $1.str, $1.length); ((char*) $1.str)[$1.length] = '\0'; - if ($1.length == 0) + if (unlikely($1.length == 0)) my_yyabort_error((ER_INVALID_ROLE, MYF(0), "")); - if (!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER)))) + if (unlikely(!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER))))) MYSQL_YYABORT; $$->user= $1; $$->host= empty_clex_str; $$->reset_auth(); - if (check_string_char_length(&$$->user, ER_USERNAME, - username_char_length, - cs, 0)) + if (unlikely(check_string_char_length(&$$->user, ER_USERNAME, + username_char_length, + cs, 0))) MYSQL_YYABORT; } | current_role @@ -16560,21 +16677,21 @@ require_list_element: SUBJECT_SYM TEXT_STRING { LEX *lex=Lex; - if (lex->x509_subject) + if (unlikely(lex->x509_subject)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "SUBJECT")); lex->x509_subject=$2.str; } | ISSUER_SYM TEXT_STRING { LEX *lex=Lex; - if (lex->x509_issuer) + if (unlikely(lex->x509_issuer)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "ISSUER")); lex->x509_issuer=$2.str; } | CIPHER_SYM TEXT_STRING { LEX *lex=Lex; - if (lex->ssl_cipher) + if (unlikely(lex->ssl_cipher)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "CIPHER")); lex->ssl_cipher=$2.str; } @@ -16584,11 +16701,11 @@ grant_ident: '*' { LEX *lex= Lex; - if (lex->copy_db_to(&lex->current_select->db)) + if (unlikely(lex->copy_db_to(&lex->current_select->db))) MYSQL_YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = DB_ACLS & ~GRANT_ACL; - else if (lex->columns.elements) + else if (unlikely(lex->columns.elements)) my_yyabort_error((ER_ILLEGAL_GRANT_FOR_TABLE, MYF(0))); } | ident '.' '*' @@ -16597,7 +16714,7 @@ grant_ident: lex->current_select->db= $1; if (lex->grant == GLOBAL_ACLS) lex->grant = DB_ACLS & ~GRANT_ACL; - else if (lex->columns.elements) + else if (unlikely(lex->columns.elements)) my_yyabort_error((ER_ILLEGAL_GRANT_FOR_TABLE, MYF(0))); } | '*' '.' '*' @@ -16606,14 +16723,15 @@ grant_ident: lex->current_select->db= null_clex_str; if (lex->grant == GLOBAL_ACLS) lex->grant= GLOBAL_ACLS & ~GRANT_ACL; - else if (lex->columns.elements) + else if (unlikely(lex->columns.elements)) my_yyabort_error((ER_ILLEGAL_GRANT_FOR_TABLE, MYF(0))); } | table_ident { LEX *lex=Lex; - if (!lex->current_select->add_table_to_list(thd, $1,NULL, - TL_OPTION_UPDATING)) + if (unlikely(!lex->current_select-> + add_table_to_list(thd, $1,NULL, + TL_OPTION_UPDATING))) MYSQL_YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = TABLE_ACLS & ~GRANT_ACL; @@ -16623,12 +16741,12 @@ grant_ident: user_list: user { - if (Lex->users_list.push_back($1, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($1, thd->mem_root))) MYSQL_YYABORT; } | user_list ',' user { - if (Lex->users_list.push_back($3, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($3, thd->mem_root))) MYSQL_YYABORT; } ; @@ -16636,12 +16754,12 @@ user_list: grant_list: grant_user { - if (Lex->users_list.push_back($1, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($1, thd->mem_root))) MYSQL_YYABORT; } | grant_list ',' grant_user { - if (Lex->users_list.push_back($3, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($3, thd->mem_root))) MYSQL_YYABORT; } ; @@ -16649,12 +16767,12 @@ grant_list: user_and_role_list: user_or_role { - if (Lex->users_list.push_back($1, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($1, thd->mem_root))) MYSQL_YYABORT; } | user_and_role_list ',' user_or_role { - if (Lex->users_list.push_back($3, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($3, thd->mem_root))) MYSQL_YYABORT; } ; @@ -16667,7 +16785,7 @@ grant_user: { $$= $1; $1->pwtext= $4; - if (Lex->sql_command == SQLCOM_REVOKE) + if (unlikely(Lex->sql_command == SQLCOM_REVOKE)) MYSQL_YYABORT; } | user IDENTIFIED_SYM BY PASSWORD_SYM TEXT_STRING @@ -16709,7 +16827,7 @@ column_list_id: ident { String *new_str= new (thd->mem_root) String((const char*) $1.str,$1.length,system_charset_info); - if (new_str == NULL) + if (unlikely(new_str == NULL)) MYSQL_YYABORT; List_iterator iter(Lex->columns); class LEX_COLUMN *point; @@ -16727,7 +16845,7 @@ column_list_id: { LEX_COLUMN *col= (new (thd->mem_root) LEX_COLUMN(*new_str,lex->which_columns)); - if (col == NULL) + if (unlikely(col == NULL)) MYSQL_YYABORT; lex->columns.push_back(col, thd->mem_root); } @@ -16936,7 +17054,7 @@ union_clause: union_list: unit_type_decl union_option { - if (Lex->add_select_to_union_list((bool)$2, $1, TRUE)) + if (unlikely(Lex->add_select_to_union_list((bool)$2, $1, TRUE))) MYSQL_YYABORT; } union_list_part2 @@ -16952,7 +17070,7 @@ union_list: union_list_view: unit_type_decl union_option { - if (Lex->add_select_to_union_list((bool)$2, $1, TRUE)) + if (unlikely(Lex->add_select_to_union_list((bool)$2, $1, TRUE))) MYSQL_YYABORT; } query_expression_body_view @@ -16993,7 +17111,7 @@ order_or_limit: union_head_non_top: unit_type_decl union_option { - if (Lex->add_select_to_union_list((bool)$2, $1, FALSE)) + if (unlikely(Lex->add_select_to_union_list((bool)$2, $1, FALSE))) MYSQL_YYABORT; } ; @@ -17008,27 +17126,20 @@ simple_table: query_specification { $$= $1; } | table_value_constructor { $$= $1; } ; - + table_value_constructor: VALUES { - LEX *lex=Lex; - lex->field_list.empty(); - lex->many_values.empty(); - lex->insert_list=0; + Lex->tvc_start(); } values_list - { - LEX *lex=Lex; - $$= lex->current_select; - mysql_init_select(Lex); - if (!($$->tvc= - new (lex->thd->mem_root) table_value_constr(lex->many_values, $$, $$->options))) - MYSQL_YYABORT; - lex->many_values.empty(); + { + $$= Lex->current_select; + if (Lex->tvc_finalize()) + MYSQL_YYABORT; } ; - + /* Corresponds to the SQL Standard ::= @@ -17052,7 +17163,7 @@ query_term_union_not_ready: query_term_union_ready: simple_table opt_select_lock_type { $$= $1; } - | '(' select_paren_derived ')' { $$= $2; } + | '(' select_paren_derived ')' { $$= $2; } ; query_expression_body: @@ -17073,8 +17184,8 @@ subselect: subselect_start: { LEX *lex=Lex; - if (!lex->expr_allows_subselect || - lex->sql_command == (int)SQLCOM_PURGE) + if (unlikely(!lex->expr_allows_subselect || + lex->sql_command == (int)SQLCOM_PURGE)) { thd->parse_error(); MYSQL_YYABORT; @@ -17086,7 +17197,7 @@ subselect_start: (SELECT .. ) UNION ... becomes SELECT * FROM ((SELECT ...) UNION ...) */ - if (mysql_new_select(Lex, 1, NULL)) + if (unlikely(mysql_new_select(Lex, 1, NULL))) MYSQL_YYABORT; } ; @@ -17131,7 +17242,7 @@ query_expression_option: STRAIGHT_JOIN { Select->options|= SELECT_STRAIGHT_JOIN; } | HIGH_PRIORITY { - if (check_simple_select()) + if (unlikely(Lex->check_simple_select(&$1))) MYSQL_YYABORT; YYPS->m_lock_type= TL_READ_HIGH_PRIORITY; YYPS->m_mdl_type= MDL_SHARED_READ; @@ -17142,13 +17253,13 @@ query_expression_option: | SQL_BIG_RESULT { Select->options|= SELECT_BIG_RESULT; } | SQL_BUFFER_RESULT { - if (check_simple_select()) + if (unlikely(Lex->check_simple_select(&$1))) MYSQL_YYABORT; Select->options|= OPTION_BUFFER_RESULT; } | SQL_CALC_FOUND_ROWS { - if (check_simple_select()) + if (unlikely(Lex->check_simple_select(&$1))) MYSQL_YYABORT; Select->options|= OPTION_FOUND_ROWS; } @@ -17307,7 +17418,7 @@ trigger_tail: remember_name opt_if_not_exists { - if (Lex->add_create_options_with_check($2)) + if (unlikely(Lex->add_create_options_with_check($2))) MYSQL_YYABORT; } sp_name @@ -17320,14 +17431,9 @@ trigger_tail: } table_ident /* $10 */ FOR_SYM - remember_name /* $13 */ - { /* $14 */ - /* - FOR token is already passed through (see 'case FOR_SYM' in sql_lex.cc), - so we use _prev() to get it back. - */ - DBUG_ASSERT(YYLIP->lookahead_token >= 0); - Lex->raw_trg_on_table_name_end= YYLIP->get_tok_start_prev(); + remember_name /* $12 */ + { /* $13 */ + Lex->raw_trg_on_table_name_end= YYLIP->get_tok_start(); } EACH_SYM ROW_SYM @@ -17339,7 +17445,7 @@ trigger_tail: LEX *lex= thd->lex; Lex_input_stream *lip= YYLIP; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_RECURSIVE_CREATE, MYF(0), "TRIGGER")); lex->stmt_definition_begin= $1; @@ -17349,7 +17455,7 @@ trigger_tail: (*static_cast(&lex->trg_chistics))= ($17); lex->trg_chistics.ordering_clause_end= lip->get_cpp_ptr(); - if (!lex->make_sp_head(thd, $4, &sp_handler_trigger)) + if (unlikely(!lex->make_sp_head(thd, $4, &sp_handler_trigger))) MYSQL_YYABORT; lex->sphead->set_body_start(thd, lip->get_cpp_tok_start()); @@ -17363,7 +17469,7 @@ trigger_tail: sp->set_stmt_end(thd); sp->restore_thd_mem_root(thd); - if (sp->is_not_allowed_in_function("trigger")) + if (unlikely(sp->is_not_allowed_in_function("trigger"))) MYSQL_YYABORT; /* @@ -17371,11 +17477,11 @@ trigger_tail: sp_proc_stmt alternatives are not saving/restoring LEX, so lex->query_tables can be wiped out. */ - if (!lex->select_lex.add_table_to_list(thd, $10, - (LEX_CSTRING*) 0, - TL_OPTION_UPDATING, - TL_READ_NO_INSERT, - MDL_SHARED_NO_WRITE)) + if (unlikely(!lex->select_lex. + add_table_to_list(thd, $10, (LEX_CSTRING*) 0, + TL_OPTION_UPDATING, + TL_READ_NO_INSERT, + MDL_SHARED_NO_WRITE))) MYSQL_YYABORT; } ; @@ -17391,9 +17497,9 @@ udf_tail: RETURNS_SYM udf_type SONAME_SYM TEXT_STRING_sys { LEX *lex= thd->lex; - if (lex->add_create_options_with_check($1)) + if (unlikely(lex->add_create_options_with_check($1))) MYSQL_YYABORT; - if (is_native_function(thd, & $2)) + if (unlikely(is_native_function(thd, & $2))) my_yyabort_error((ER_NATIVE_FCT_NAME_COLLISION, MYF(0), $2.str)); lex->sql_command= SQLCOM_CREATE_FUNCTION; lex->udf.name= $2; @@ -17413,7 +17519,8 @@ sf_return_type: } type_with_opt_collate { - if (Lex->sphead->fill_field_definition(thd, Lex->last_field)) + if (unlikely(Lex->sphead->fill_field_definition(thd, + Lex->last_field))) MYSQL_YYABORT; } ; @@ -17423,8 +17530,8 @@ sf_tail: sp_name { Lex->sql_command= SQLCOM_CREATE_SPFUNCTION; - if (!Lex->make_sp_head_no_recursive(thd, $1, $2, - &sp_handler_function)) + if (unlikely(!Lex->make_sp_head_no_recursive(thd, $1, $2, + &sp_handler_function))) MYSQL_YYABORT; } sp_parenthesized_fdparam_list @@ -17439,7 +17546,7 @@ sf_tail: } sp_proc_stmt_in_returns_clause { - if (Lex->sp_body_finalize_function(thd)) + if (unlikely(Lex->sp_body_finalize_function(thd))) MYSQL_YYABORT; } ; @@ -17448,8 +17555,8 @@ sp_tail: opt_if_not_exists sp_name { Lex->sql_command= SQLCOM_CREATE_PROCEDURE; - if (!Lex->make_sp_head_no_recursive(thd, $1, $2, - &sp_handler_procedure)) + if (unlikely(!Lex->make_sp_head_no_recursive(thd, $1, $2, + &sp_handler_procedure))) MYSQL_YYABORT; } sp_parenthesized_pdparam_list @@ -17460,7 +17567,7 @@ sp_tail: } sp_proc_stmt { - if (Lex->sp_body_finalize_procedure(thd)) + if (unlikely(Lex->sp_body_finalize_procedure(thd))) MYSQL_YYABORT; } ; @@ -17499,9 +17606,9 @@ opt_format_xid: /* empty */ { $$= false; } | FORMAT_SYM '=' ident_or_text { - if (!my_strcasecmp(system_charset_info, $3.str, "SQL")) + if (lex_string_eq(&$3, STRING_WITH_LEN("SQL"))) $$= true; - else if (!my_strcasecmp(system_charset_info, $3.str, "RAW")) + else if (lex_string_eq(&$3, STRING_WITH_LEN("RAW"))) $$= false; else { @@ -17516,21 +17623,21 @@ xid: text_string { MYSQL_YYABORT_UNLESS($1->length() <= MAXGTRIDSIZE); - if (!(Lex->xid=(XID *)thd->alloc(sizeof(XID)))) + if (unlikely(!(Lex->xid=(XID *)thd->alloc(sizeof(XID))))) MYSQL_YYABORT; Lex->xid->set(1L, $1->ptr(), $1->length(), 0, 0); } | text_string ',' text_string { MYSQL_YYABORT_UNLESS($1->length() <= MAXGTRIDSIZE && $3->length() <= MAXBQUALSIZE); - if (!(Lex->xid=(XID *)thd->alloc(sizeof(XID)))) + if (unlikely(!(Lex->xid=(XID *)thd->alloc(sizeof(XID))))) MYSQL_YYABORT; Lex->xid->set(1L, $1->ptr(), $1->length(), $3->ptr(), $3->length()); } | text_string ',' text_string ',' ulong_num { MYSQL_YYABORT_UNLESS($1->length() <= MAXGTRIDSIZE && $3->length() <= MAXBQUALSIZE); - if (!(Lex->xid=(XID *)thd->alloc(sizeof(XID)))) + if (unlikely(!(Lex->xid=(XID *)thd->alloc(sizeof(XID))))) MYSQL_YYABORT; Lex->xid->set($5, $1->ptr(), $1->length(), $3->ptr(), $3->length()); } diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy index ec2aeeaefba..f0db33f67ae 100644 --- a/sql/sql_yacc_ora.yy +++ b/sql/sql_yacc_ora.yy @@ -37,7 +37,7 @@ #include "sql_priv.h" #include "sql_parse.h" /* comp_*_creator */ #include "sql_table.h" /* primary_key_name */ -#include "sql_partition.h" /* mem_alloc_error, partition_info, HASH_PARTITION */ +#include "sql_partition.h" /* partition_info, HASH_PARTITION */ #include "sql_acl.h" /* *_ACL */ #include "sql_class.h" /* Key_part_spec, enum_filetype, Diag_condition_item_name */ #include "slave.h" @@ -67,6 +67,7 @@ #include "lex_token.h" #include "sql_lex.h" #include "sql_sequence.h" +#include "my_base.h" /* this is to get the bison compilation windows warnings out */ #ifdef _MSC_VER @@ -79,7 +80,7 @@ int yylex(void *yylval, void *yythd); #define yyoverflow(A,B,C,D,E,F) \ { \ size_t val= *(F); \ - if (my_yyoverflow((B), (D), &val)) \ + if (unlikely(my_yyoverflow((B), (D), &val))) \ { \ yyerror(thd, (char*) (A)); \ return 2; \ @@ -98,7 +99,7 @@ int yylex(void *yylval, void *yythd); } while (0) #define MYSQL_YYABORT_UNLESS(A) \ - if (!(A)) \ + if (unlikely(!(A))) \ { \ thd->parse_error(); \ MYSQL_YYABORT; \ @@ -155,7 +156,7 @@ void ORAerror(THD *thd, const char *s) #define bincmp_collation(X,Y) \ do \ { \ - if (Lex->set_bincmp(X,Y)) \ + if (unlikely(Lex->set_bincmp(X,Y))) \ MYSQL_YYABORT; \ } while(0) @@ -169,9 +170,10 @@ void ORAerror(THD *thd, const char *s) /* structs */ LEX_CSTRING lex_str; - LEX_SYMBOL symbol; + Lex_ident_cli_st kwd; + Lex_ident_cli_st ident_cli; + Lex_ident_sys_st ident_sys; Lex_string_with_metadata_st lex_string_with_metadata; - Lex_string_with_pos_st lex_string_with_pos; Lex_spblock_st spblock; Lex_spblock_handlers_st spblock_handlers; Lex_length_and_dec_st Lex_length_and_dec; @@ -186,6 +188,7 @@ void ORAerror(THD *thd, const char *s) LEX_CSTRING name; uint offset; } sp_cursor_name_and_offset; + vers_history_point_t vers_history_point; /* pointers */ Create_field *create_field; @@ -267,6 +270,8 @@ void ORAerror(THD *thd, const char *s) enum Window_frame::Frame_exclusion frame_exclusion; enum trigger_order_type trigger_action_order_type; DDL_options_st object_ddl_options; + enum vers_sys_type_t vers_range_unit; + enum Column_definition::enum_column_versioning vers_column_versioning; } %{ @@ -277,10 +282,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %parse-param { THD *thd } %lex-param { THD *thd } /* - Currently there are 104 shift/reduce conflicts. + Currently there are 63 shift/reduce conflicts. We should not introduce new conflicts any more. */ -%expect 104 +%expect 63 /* Comments for TOKENS. @@ -300,679 +305,277 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); This makes the code grep-able, and helps maintenance. */ - + + +/* + Reserved keywords and operators +*/ %token ABORT_SYM /* INTERNAL (used in lex) */ %token ACCESSIBLE_SYM -%token ACTION /* SQL-2003-N */ %token ADD /* SQL-2003-R */ -%token ADMIN_SYM /* SQL-2003-N */ -%token ADDDATE_SYM /* MYSQL-FUNC */ -%token AFTER_SYM /* SQL-2003-N */ -%token AGAINST -%token AGGREGATE_SYM -%token ALGORITHM_SYM %token ALL /* SQL-2003-R */ %token ALTER /* SQL-2003-R */ -%token ALWAYS_SYM %token ANALYZE_SYM %token AND_AND_SYM /* OPERATOR */ %token AND_SYM /* SQL-2003-R */ -%token ANY_SYM /* SQL-2003-R */ %token AS /* SQL-2003-R */ %token ASC /* SQL-2003-N */ -%token ASCII_SYM /* MYSQL-FUNC */ %token ASENSITIVE_SYM /* FUTURE-USE */ -%token AT_SYM /* SQL-2003-R */ -%token ATOMIC_SYM /* SQL-2003-R */ -%token AUTHORS_SYM -%token AUTOEXTEND_SIZE_SYM -%token AUTO_INC -%token AUTO_SYM -%token AVG_ROW_LENGTH -%token AVG_SYM /* SQL-2003-N */ -%token BACKUP_SYM %token BEFORE_SYM /* SQL-2003-N */ -%token BEGIN_SYM /* SQL-2003-R */ %token BETWEEN_SYM /* SQL-2003-R */ %token BIGINT /* SQL-2003-R */ %token BINARY /* SQL-2003-R */ -%token BINLOG_SYM %token BIN_NUM %token BIT_AND /* MYSQL-FUNC */ %token BIT_OR /* MYSQL-FUNC */ -%token BIT_SYM /* MYSQL-FUNC */ %token BIT_XOR /* MYSQL-FUNC */ %token BLOB_SYM /* SQL-2003-R */ -%token BLOCK_SYM -%token BODY_SYM /* Oracle-R */ -%token BOOLEAN_SYM /* SQL-2003-R */ -%token BOOL_SYM %token BOTH /* SQL-2003-R */ -%token BTREE_SYM %token BY /* SQL-2003-R */ -%token BYTE_SYM -%token CACHE_SYM %token CALL_SYM /* SQL-2003-R */ %token CASCADE /* SQL-2003-N */ -%token CASCADED /* SQL-2003-R */ %token CASE_SYM /* SQL-2003-R */ %token CAST_SYM /* SQL-2003-R */ -%token CATALOG_NAME_SYM /* SQL-2003-N */ -%token CHAIN_SYM /* SQL-2003-N */ %token CHANGE -%token CHANGED -%token CHARSET %token CHAR_SYM /* SQL-2003-R */ -%token CHECKPOINT_SYM -%token CHECKSUM_SYM %token CHECK_SYM /* SQL-2003-R */ -%token CIPHER_SYM -%token CLASS_ORIGIN_SYM /* SQL-2003-N */ -%token CLIENT_SYM -%token CLOSE_SYM /* SQL-2003-R */ -%token CLOB /* SQL-2003-R */ -%token COALESCE /* SQL-2003-N */ -%token CODE_SYM %token COLLATE_SYM /* SQL-2003-R */ -%token COLLATION_SYM /* SQL-2003-N */ -%token COLUMNS -%token COLUMN_ADD_SYM -%token COLUMN_CHECK_SYM -%token COLUMN_CREATE_SYM -%token COLUMN_DELETE_SYM -%token COLUMN_GET_SYM -%token COLUMN_SYM /* SQL-2003-R */ -%token COLUMN_NAME_SYM /* SQL-2003-N */ -%token COMMENT_SYM -%token COMMITTED_SYM /* SQL-2003-N */ -%token COMMIT_SYM /* SQL-2003-R */ -%token COMPACT_SYM -%token COMPLETION_SYM -%token COMPRESSED_SYM -%token CONCURRENT %token CONDITION_SYM /* SQL-2003-R, SQL-2008-R */ -%token CONNECTION_SYM -%token CONSISTENT_SYM %token CONSTRAINT /* SQL-2003-R */ -%token CONSTRAINT_CATALOG_SYM /* SQL-2003-N */ -%token CONSTRAINT_NAME_SYM /* SQL-2003-N */ -%token CONSTRAINT_SCHEMA_SYM /* SQL-2003-N */ -%token CONTAINS_SYM /* SQL-2003-N */ -%token CONTEXT_SYM %token CONTINUE_SYM /* SQL-2003-R */ -%token CONTRIBUTORS_SYM %token CONVERT_SYM /* SQL-2003-N */ %token COUNT_SYM /* SQL-2003-N */ -%token CPU_SYM %token CREATE /* SQL-2003-R */ %token CROSS /* SQL-2003-R */ -%token CUBE_SYM /* SQL-2003-R */ %token CUME_DIST_SYM %token CURDATE /* MYSQL-FUNC */ -%token CURRENT_SYM /* SQL-2003-R */ %token CURRENT_USER /* SQL-2003-R */ %token CURRENT_ROLE /* SQL-2003-R */ -%token CURRENT_POS_SYM %token CURSOR_SYM /* SQL-2003-R */ -%token CURSOR_NAME_SYM /* SQL-2003-N */ %token CURTIME /* MYSQL-FUNC */ -%token CYCLE_SYM %token DATABASE %token DATABASES -%token DATAFILE_SYM -%token DATA_SYM /* SQL-2003-N */ -%token DATETIME %token DATE_ADD_INTERVAL /* MYSQL-FUNC */ -%token DATE_FORMAT_SYM /* MYSQL-FUNC */ %token DATE_SUB_INTERVAL /* MYSQL-FUNC */ -%token DATE_SYM /* SQL-2003-R */ %token DAY_HOUR_SYM %token DAY_MICROSECOND_SYM %token DAY_MINUTE_SYM %token DAY_SECOND_SYM -%token DAY_SYM /* SQL-2003-R */ -%token DEALLOCATE_SYM /* SQL-2003-R */ %token DECIMAL_NUM %token DECIMAL_SYM /* SQL-2003-R */ %token DECLARE_SYM /* SQL-2003-R */ -%token DECODE_SYM /* Oracle function, non-reserved */ %token DEFAULT /* SQL-2003-R */ -%token DEFINER_SYM -%token DELAYED_SYM -%token DELAY_KEY_WRITE_SYM %token DELETE_DOMAIN_ID_SYM %token DELETE_SYM /* SQL-2003-R */ %token DENSE_RANK_SYM %token DESC /* SQL-2003-N */ %token DESCRIBE /* SQL-2003-R */ -%token DES_KEY_FILE %token DETERMINISTIC_SYM /* SQL-2003-R */ -%token DIAGNOSTICS_SYM /* SQL-2003-N */ -%token DIRECTORY_SYM -%token DISABLE_SYM -%token DISCARD -%token DISK_SYM %token DISTINCT /* SQL-2003-R */ %token DIV_SYM %token DOUBLE_SYM /* SQL-2003-R */ %token DO_DOMAIN_IDS_SYM -%token DO_SYM %token DOT_DOT_SYM %token DROP /* SQL-2003-R */ %token DUAL_SYM -%token DUMPFILE -%token DUPLICATE_SYM -%token DYNAMIC_SYM /* SQL-2003-R */ %token EACH_SYM /* SQL-2003-R */ %token ELSE /* SQL-2003-R */ %token ELSEIF_SYM -%token ELSIF_SYM /* Oracle, reserved in PL/SQL*/ -%token ENABLE_SYM %token ENCLOSED -%token END /* SQL-2003-R */ -%token ENDS_SYM %token END_OF_INPUT /* INTERNAL */ -%token ENGINES_SYM -%token ENGINE_SYM -%token ENUM %token EQUAL_SYM /* OPERATOR */ -%token ERROR_SYM -%token ERRORS %token ESCAPED -%token ESCAPE_SYM /* SQL-2003-R */ -%token EVENTS_SYM -%token EVENT_SYM -%token EVERY_SYM /* SQL-2003-N */ -%token EXCHANGE_SYM -%token EXAMINED_SYM %token EXCEPT_SYM /* SQL-2003-R */ -%token EXCLUDE_SYM /* SQL-2011-N */ -%token EXECUTE_SYM /* SQL-2003-R */ -%token EXCEPTION_SYM /* SQL-2003-N, Oracle-PLSQL-R */ %token EXISTS /* SQL-2003-R */ -%token EXIT_SYM -%token EXPANSION_SYM -%token EXPORT_SYM -%token EXTENDED_SYM -%token EXTENT_SIZE_SYM %token EXTRACT_SYM /* SQL-2003-N */ %token FALSE_SYM /* SQL-2003-R */ -%token FAST_SYM -%token FAULTS_SYM %token FETCH_SYM /* SQL-2003-R */ -%token FILE_SYM %token FIRST_VALUE_SYM /* SQL-2011 */ -%token FIRST_SYM /* SQL-2003-N */ -%token FIXED_SYM %token FLOAT_NUM %token FLOAT_SYM /* SQL-2003-R */ -%token FLUSH_SYM -%token FOLLOWS_SYM /* MYSQL trigger*/ -%token FOLLOWING_SYM /* SQL-2011-N */ -%token FORCE_SYM %token FOREIGN /* SQL-2003-R */ %token FOR_SYM /* SQL-2003-R */ %token FOR_SYSTEM_TIME_SYM /* INTERNAL */ -%token FORMAT_SYM -%token FOUND_SYM /* SQL-2003-R */ %token FROM -%token FULL /* SQL-2003-R */ %token FULLTEXT_SYM -%token FUNCTION_SYM /* SQL-2003-R */ %token GE -%token GENERAL -%token GENERATED_SYM -%token GEOMETRYCOLLECTION -%token GEOMETRY_SYM -%token GET_FORMAT /* MYSQL-FUNC */ -%token GET_SYM /* SQL-2003-R */ -%token GLOBAL_SYM /* SQL-2003-R */ -%token GOTO_SYM /* Oracle, reserved in PL/SQL*/ %token GRANT /* SQL-2003-R */ -%token GRANTS %token GROUP_SYM /* SQL-2003-R */ %token GROUP_CONCAT_SYM %token LAG_SYM /* SQL-2011 */ %token LEAD_SYM /* SQL-2011 */ -%token HANDLER_SYM -%token HARD_SYM -%token HASH_SYM %token HAVING /* SQL-2003-R */ -%token HELP_SYM %token HEX_NUM %token HEX_STRING -%token HIGH_PRIORITY -%token HISTORY_SYM /* MYSQL */ -%token HOST_SYM -%token HOSTS_SYM %token HOUR_MICROSECOND_SYM %token HOUR_MINUTE_SYM %token HOUR_SECOND_SYM -%token HOUR_SYM /* SQL-2003-R */ -%token ID_SYM /* MYSQL */ %token IDENT -%token IDENTIFIED_SYM %token IDENT_QUOTED %token IF_SYM %token IGNORE_DOMAIN_IDS_SYM %token IGNORE_SYM -%token IGNORE_SERVER_IDS_SYM -%token IMMEDIATE_SYM /* SQL-2003-R */ -%token IMPORT -%token INCREMENT_SYM -%token INDEXES %token INDEX_SYM %token INFILE -%token INITIAL_SIZE_SYM %token INNER_SYM /* SQL-2003-R */ %token INOUT_SYM /* SQL-2003-R */ %token INSENSITIVE_SYM /* SQL-2003-R */ %token INSERT /* SQL-2003-R */ -%token INSERT_METHOD -%token INSTALL_SYM %token INTERSECT_SYM /* SQL-2003-R */ %token INTERVAL_SYM /* SQL-2003-R */ %token INTO /* SQL-2003-R */ %token INT_SYM /* SQL-2003-R */ -%token INVOKER_SYM %token IN_SYM /* SQL-2003-R */ -%token IO_SYM -%token IPC_SYM %token IS /* SQL-2003-R */ -%token ISOLATION /* SQL-2003-R */ -%token ISOPEN_SYM /* Oracle-N */ -%token ISSUER_SYM %token ITERATE_SYM -%token INVISIBLE_SYM %token JOIN_SYM /* SQL-2003-R */ -%token JSON_SYM %token KEYS -%token KEY_BLOCK_SIZE %token KEY_SYM /* SQL-2003-N */ %token KILL_SYM -%token LANGUAGE_SYM /* SQL-2003-R */ -%token LAST_SYM /* SQL-2003-N */ -%token LAST_VALUE -%token LASTVAL_SYM /* PostgreSQL sequence function */ %token LE /* OPERATOR */ %token LEADING /* SQL-2003-R */ -%token LEAVES %token LEAVE_SYM %token LEFT /* SQL-2003-R */ -%token LESS_SYM -%token LEVEL_SYM %token LEX_HOSTNAME %token LIKE /* SQL-2003-R */ %token LIMIT %token LINEAR_SYM %token LINES -%token LINESTRING -%token LIST_SYM %token LOAD -%token LOCAL_SYM /* SQL-2003-R */ %token LOCATOR_SYM /* SQL-2003-N */ -%token LOCKS_SYM %token LOCK_SYM -%token LOGFILE_SYM -%token LOGS_SYM %token LONGBLOB %token LONGTEXT %token LONG_NUM %token LONG_SYM %token LOOP_SYM %token LOW_PRIORITY -%token MASTER_CONNECT_RETRY_SYM -%token MASTER_DELAY_SYM -%token MASTER_GTID_POS_SYM -%token MASTER_HOST_SYM -%token MASTER_LOG_FILE_SYM -%token MASTER_LOG_POS_SYM -%token MASTER_PASSWORD_SYM -%token MASTER_PORT_SYM -%token MASTER_SERVER_ID_SYM -%token MASTER_SSL_CAPATH_SYM -%token MASTER_SSL_CA_SYM -%token MASTER_SSL_CERT_SYM -%token MASTER_SSL_CIPHER_SYM -%token MASTER_SSL_CRL_SYM -%token MASTER_SSL_CRLPATH_SYM -%token MASTER_SSL_KEY_SYM -%token MASTER_SSL_SYM %token MASTER_SSL_VERIFY_SERVER_CERT_SYM -%token MASTER_SYM -%token MASTER_USER_SYM -%token MASTER_USE_GTID_SYM -%token MASTER_HEARTBEAT_PERIOD_SYM %token MATCH /* SQL-2003-R */ -%token MAX_CONNECTIONS_PER_HOUR -%token MAX_QUERIES_PER_HOUR -%token MAX_ROWS -%token MAX_SIZE_SYM %token MAX_SYM /* SQL-2003-N */ -%token MAX_UPDATES_PER_HOUR -%token MAX_STATEMENT_TIME_SYM -%token MAX_USER_CONNECTIONS_SYM %token MAXVALUE_SYM /* SQL-2003-N */ %token MEDIAN_SYM %token MEDIUMBLOB %token MEDIUMINT %token MEDIUMTEXT -%token MEDIUM_SYM -%token MEMORY_SYM -%token MERGE_SYM /* SQL-2003-R */ -%token MESSAGE_TEXT_SYM /* SQL-2003-N */ -%token MICROSECOND_SYM /* MYSQL-FUNC */ -%token MIGRATE_SYM %token MINUTE_MICROSECOND_SYM %token MINUTE_SECOND_SYM -%token MINUTE_SYM /* SQL-2003-R */ -%token MINVALUE_SYM -%token MIN_ROWS %token MIN_SYM /* SQL-2003-N */ -%token MODE_SYM %token MODIFIES_SYM /* SQL-2003-R */ -%token MODIFY_SYM %token MOD_SYM /* SQL-2003-N */ -%token MONTH_SYM /* SQL-2003-R */ -%token MULTILINESTRING -%token MULTIPOINT -%token MULTIPOLYGON -%token MUTEX_SYM -%token MYSQL_SYM -%token MYSQL_ERRNO_SYM -%token NAMES_SYM /* SQL-2003-N */ -%token NAME_SYM /* SQL-2003-N */ -%token NATIONAL_SYM /* SQL-2003-R */ +%token MYSQL_CONCAT_SYM /* OPERATOR */ %token NATURAL /* SQL-2003-R */ %token NCHAR_STRING -%token NCHAR_SYM /* SQL-2003-R */ %token NE /* OPERATOR */ %token NEG -%token NEW_SYM /* SQL-2003-R */ -%token NEXT_SYM /* SQL-2003-N */ -%token NEXTVAL_SYM /* PostgreSQL sequence function */ -%token NOCACHE_SYM -%token NOCYCLE_SYM -%token NODEGROUP_SYM -%token NONE_SYM /* SQL-2003-R */ %token NOT2_SYM %token NOT_SYM /* SQL-2003-R */ -%token NOTFOUND_SYM /* Oracle-R */ %token NOW_SYM -%token NO_SYM /* SQL-2003-R */ -%token NOMAXVALUE_SYM -%token NOMINVALUE_SYM -%token NO_WAIT_SYM -%token NOWAIT_SYM %token NO_WRITE_TO_BINLOG %token NTILE_SYM %token NULL_SYM /* SQL-2003-R */ %token NUM -%token NUMBER_SYM /* SQL-2003-N */ %token NUMERIC_SYM /* SQL-2003-R */ %token NTH_VALUE_SYM /* SQL-2011 */ -%token NVARCHAR_SYM -%token OF_SYM /* SQL-1992-R, Oracle-R */ -%token OFFSET_SYM -%token OLD_PASSWORD_SYM %token ON /* SQL-2003-R */ -%token ONE_SYM -%token ONLY_SYM /* SQL-2003-R */ -%token ONLINE_SYM -%token OPEN_SYM /* SQL-2003-R */ %token OPTIMIZE -%token OPTIONS_SYM -%token OPTION /* SQL-2003-N */ %token OPTIONALLY +%token ORACLE_CONCAT_SYM /* INTERNAL */ %token OR2_SYM %token ORDER_SYM /* SQL-2003-R */ -%token OR_OR_SYM /* OPERATOR */ %token OR_SYM /* SQL-2003-R */ -%token OTHERS_SYM /* SQL-2011-N */ %token OUTER %token OUTFILE %token OUT_SYM /* SQL-2003-R */ %token OVER_SYM -%token OWNER_SYM -%token PACKAGE_SYM /* Oracle-R */ -%token PACK_KEYS_SYM -%token PAGE_SYM %token PAGE_CHECKSUM_SYM %token PARAM_MARKER -%token PARSER_SYM %token PARSE_VCOL_EXPR_SYM -%token PARTIAL /* SQL-2003-N */ %token PARTITION_SYM /* SQL-2003-R */ -%token PARTITIONS_SYM -%token PARTITIONING_SYM -%token PASSWORD_SYM %token PERCENT_RANK_SYM %token PERCENTILE_CONT_SYM %token PERCENTILE_DISC_SYM -%token PERIOD_SYM /* SQL-2011-R */ -%token PERSISTENT_SYM -%token PHASE_SYM -%token PLUGINS_SYM -%token PLUGIN_SYM -%token POINT_SYM -%token POLYGON -%token PORT_SYM %token POSITION_SYM /* SQL-2003-N */ -%token PRECEDES_SYM /* MYSQL */ -%token PRECEDING_SYM /* SQL-2011-N */ %token PRECISION /* SQL-2003-R */ -%token PREPARE_SYM /* SQL-2003-R */ -%token PRESERVE_SYM -%token PREV_SYM -%token PREVIOUS_SYM %token PRIMARY_SYM /* SQL-2003-R */ -%token PRIVILEGES /* SQL-2003-N */ %token PROCEDURE_SYM /* SQL-2003-R */ -%token PROCESS -%token PROCESSLIST_SYM -%token PROFILE_SYM -%token PROFILES_SYM -%token PROXY_SYM %token PURGE -%token QUARTER_SYM -%token QUERY_SYM -%token QUICK -%token RAISE_SYM /* Oracle-PLSQL-R */ %token RANGE_SYM /* SQL-2003-R */ %token RANK_SYM -%token RAW /* Oracle */ %token READS_SYM /* SQL-2003-R */ -%token READ_ONLY_SYM %token READ_SYM /* SQL-2003-N */ %token READ_WRITE_SYM %token REAL /* SQL-2003-R */ -%token REBUILD_SYM -%token RECOVER_SYM %token RECURSIVE_SYM -%token REDOFILE_SYM -%token REDO_BUFFER_SIZE_SYM -%token REDUNDANT_SYM +%token REF_SYSTEM_ID_SYM %token REFERENCES /* SQL-2003-R */ %token REGEXP -%token RELAY -%token RELAYLOG_SYM -%token RELAY_LOG_FILE_SYM -%token RELAY_LOG_POS_SYM -%token RELAY_THREAD %token RELEASE_SYM /* SQL-2003-R */ -%token RELOAD -%token REMOVE_SYM %token RENAME -%token REORGANIZE_SYM -%token REPAIR -%token REPEATABLE_SYM /* SQL-2003-N */ %token REPEAT_SYM /* MYSQL-FUNC */ %token REPLACE /* MYSQL-FUNC */ -%token REPLICATION %token REQUIRE_SYM -%token RESET_SYM -%token RESTART_SYM %token RESIGNAL_SYM /* SQL-2003-R */ -%token RESOURCES -%token RESTORE_SYM %token RESTRICT -%token RESUME_SYM -%token RETURNED_SQLSTATE_SYM /* SQL-2003-N */ %token RETURNING_SYM -%token RETURNS_SYM /* SQL-2003-R */ %token RETURN_SYM /* SQL-2003-R */ -%token REUSE_SYM /* Oracle-R */ -%token REVERSE_SYM %token REVOKE /* SQL-2003-R */ %token RIGHT /* SQL-2003-R */ -%token ROLE_SYM -%token ROLLBACK_SYM /* SQL-2003-R */ -%token ROLLUP_SYM /* SQL-2003-R */ -%token ROUTINE_SYM /* SQL-2003-N */ -%token ROWCOUNT_SYM /* Oracle-N */ -%token ROW_SYM /* SQL-2003-R */ %token ROWS_SYM /* SQL-2003-R */ -%token ROWTYPE_SYM /* Oracle-PLSQL-R */ -%token ROW_COUNT_SYM /* SQL-2003-N */ -%token ROW_FORMAT_SYM %token ROW_NUMBER_SYM -%token RTREE_SYM -%token SAVEPOINT_SYM /* SQL-2003-R */ -%token SCHEDULE_SYM -%token SCHEMA_NAME_SYM /* SQL-2003-N */ %token SECOND_MICROSECOND_SYM -%token SECOND_SYM /* SQL-2003-R */ -%token SECURITY_SYM /* SQL-2003-N */ %token SELECT_SYM /* SQL-2003-R */ %token SENSITIVE_SYM /* FUTURE-USE */ %token SEPARATOR_SYM -%token SEQUENCE_SYM -%token SERIALIZABLE_SYM /* SQL-2003-N */ -%token SERIAL_SYM -%token SESSION_SYM /* SQL-2003-N */ -%token SERVER_SYM %token SERVER_OPTIONS %token SET /* SQL-2003-R */ -%token SETVAL_SYM /* PostgreSQL sequence function */ %token SET_VAR -%token SHARE_SYM %token SHIFT_LEFT /* OPERATOR */ %token SHIFT_RIGHT /* OPERATOR */ %token SHOW -%token SHUTDOWN %token SIGNAL_SYM /* SQL-2003-R */ -%token SIGNED_SYM -%token SIMPLE_SYM /* SQL-2003-N */ -%token SLAVE -%token SLAVES -%token SLAVE_POS_SYM -%token SLOW %token SMALLINT /* SQL-2003-R */ -%token SNAPSHOT_SYM -%token SOCKET_SYM -%token SOFT_SYM -%token SONAME_SYM -%token SOUNDS_SYM -%token SOURCE_SYM %token SPATIAL_SYM %token SPECIFIC_SYM /* SQL-2003-R */ %token SQLEXCEPTION_SYM /* SQL-2003-R */ %token SQLSTATE_SYM /* SQL-2003-R */ %token SQLWARNING_SYM /* SQL-2003-R */ %token SQL_BIG_RESULT -%token SQL_BUFFER_RESULT -%token SQL_CACHE_SYM -%token SQL_CALC_FOUND_ROWS -%token SQL_NO_CACHE_SYM %token SQL_SMALL_RESULT %token SQL_SYM /* SQL-2003-R */ -%token SQL_THREAD -%token REF_SYSTEM_ID_SYM %token SSL_SYM %token STARTING -%token STARTS_SYM -%token START_SYM /* SQL-2003-R */ -%token STATEMENT_SYM %token STATS_AUTO_RECALC_SYM %token STATS_PERSISTENT_SYM %token STATS_SAMPLE_PAGES_SYM -%token STATUS_SYM %token STDDEV_SAMP_SYM /* SQL-2003-N */ %token STD_SYM -%token STOP_SYM -%token STORAGE_SYM -%token STORED_SYM %token STRAIGHT_JOIN -%token STRING_SYM -%token SUBCLASS_ORIGIN_SYM /* SQL-2003-N */ -%token SUBDATE_SYM -%token SUBJECT_SYM -%token SUBPARTITIONS_SYM -%token SUBPARTITION_SYM %token SUBSTRING /* SQL-2003-N */ %token SUM_SYM /* SQL-2003-N */ -%token SUPER_SYM -%token SUSPEND_SYM -%token SWAPS_SYM -%token SWITCHES_SYM %token SYSDATE -%token SYSTEM /* SQL-2011-R */ -%token SYSTEM_TIME_SYM /* SQL-2011-R */ -%token TABLES -%token TABLESPACE %token TABLE_REF_PRIORITY %token TABLE_SYM /* SQL-2003-R */ -%token TABLE_CHECKSUM_SYM -%token TABLE_NAME_SYM /* SQL-2003-N */ -%token TEMPORARY /* SQL-2003-N */ -%token TEMPTABLE_SYM %token TERMINATED %token TEXT_STRING -%token TEXT_SYM -%token THAN_SYM %token THEN_SYM /* SQL-2003-R */ -%token TIES_SYM /* SQL-2011-N */ -%token TIMESTAMP /* SQL-2003-R */ -%token TIMESTAMP_ADD -%token TIMESTAMP_DIFF -%token TIME_SYM /* SQL-2003-R */ %token TINYBLOB %token TINYINT %token TINYTEXT %token TO_SYM /* SQL-2003-R */ %token TRAILING /* SQL-2003-R */ -%token TRANSACTION_SYM -%token TRANSACTIONAL_SYM -%token TRIGGERS_SYM %token TRIGGER_SYM /* SQL-2003-R */ %token TRIM /* SQL-2003-N */ -%token TRIM_ORACLE %token TRUE_SYM /* SQL-2003-R */ -%token TRUNCATE_SYM -%token TYPES_SYM -%token TYPE_SYM /* SQL-2003-N */ -%token UDF_RETURNS_SYM %token ULONGLONG_NUM -%token UNBOUNDED_SYM /* SQL-2011-N */ -%token UNCOMMITTED_SYM /* SQL-2003-N */ -%token UNDEFINED_SYM %token UNDERSCORE_CHARSET -%token UNDOFILE_SYM -%token UNDO_BUFFER_SIZE_SYM %token UNDO_SYM /* FUTURE-USE */ -%token UNICODE_SYM -%token UNINSTALL_SYM %token UNION_SYM /* SQL-2003-R */ %token UNIQUE_SYM -%token UNKNOWN_SYM /* SQL-2003-R */ %token UNLOCK_SYM %token UNSIGNED -%token UNTIL_SYM %token UPDATE_SYM /* SQL-2003-R */ -%token UPGRADE_SYM %token USAGE /* SQL-2003-N */ -%token USER_SYM /* SQL-2003-R */ -%token USE_FRM %token USE_SYM %token USING /* SQL-2003-R */ %token UTC_DATE_SYM @@ -981,83 +584,598 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %token VALUES /* SQL-2003-R */ %token VALUES_IN_SYM %token VALUES_LESS_SYM -%token VALUE_SYM /* SQL-2003-R */ %token VARBINARY %token VARCHAR /* SQL-2003-R */ -%token VARCHAR2 /* Oracle */ -%token VARIABLES %token VARIANCE_SYM %token VARYING /* SQL-2003-R */ %token VAR_SAMP_SYM -%token VERSIONING_SYM /* SQL-2011-R */ -%token VIA_SYM -%token VIEW_SYM /* SQL-2003-N */ -%token VIRTUAL_SYM -%token WAIT_SYM -%token WARNINGS -%token WEEK_SYM -%token WEIGHT_STRING_SYM %token WHEN_SYM /* SQL-2003-R */ %token WHERE /* SQL-2003-R */ -%token WINDOW_SYM %token WHILE_SYM %token WITH /* SQL-2003-R */ -%token WITHIN -%token WITHOUT /* SQL-2003-R */ %token WITH_CUBE_SYM /* INTERNAL */ %token WITH_ROLLUP_SYM /* INTERNAL */ %token WITH_SYSTEM_SYM /* INTERNAL */ -%token WORK_SYM /* SQL-2003-N */ -%token WRAPPER_SYM -%token WRITE_SYM /* SQL-2003-N */ -%token X509_SYM -%token XA_SYM -%token XML_SYM %token XOR %token YEAR_MONTH_SYM -%token YEAR_SYM /* SQL-2003-R */ %token ZEROFILL -%token IMPOSSIBLE_ACTION /* To avoid warning for yyerrlab1 */ +%token IMPOSSIBLE_ACTION /* To avoid warning for yyerrlab1 */ + + +/* + Keywords that have different reserved status in std/oracle modes. +*/ +%token BODY_SYM /* Oracle-R */ +%token COMMENT_SYM +%token ELSIF_SYM /* Oracle, reserved in PL/SQL*/ +%token GOTO_SYM /* Oracle, reserved in PL/SQL*/ +%token OTHERS_SYM /* SQL-2011-N */ +%token PACKAGE_SYM /* Oracle-R */ +%token RAISE_SYM /* Oracle-PLSQL-R */ +%token ROWTYPE_SYM /* Oracle-PLSQL-R */ + +/* + Non-reserved keywords +*/ + +%token ACTION /* SQL-2003-N */ +%token ADMIN_SYM /* SQL-2003-N */ +%token ADDDATE_SYM /* MYSQL-FUNC */ +%token AFTER_SYM /* SQL-2003-N */ +%token AGAINST +%token AGGREGATE_SYM +%token ALGORITHM_SYM +%token ALWAYS_SYM +%token ANY_SYM /* SQL-2003-R */ +%token ASCII_SYM /* MYSQL-FUNC */ +%token AT_SYM /* SQL-2003-R */ +%token ATOMIC_SYM /* SQL-2003-R */ +%token AUTHORS_SYM +%token AUTOEXTEND_SIZE_SYM +%token AUTO_INC +%token AUTO_SYM +%token AVG_ROW_LENGTH +%token AVG_SYM /* SQL-2003-N */ +%token BACKUP_SYM +%token BEGIN_SYM /* SQL-2003-R, PLSQL-R */ +%token BINLOG_SYM +%token BIT_SYM /* MYSQL-FUNC */ +%token BLOCK_SYM +%token BOOL_SYM +%token BOOLEAN_SYM /* SQL-2003-R, PLSQL-R */ +%token BTREE_SYM +%token BYTE_SYM +%token CACHE_SYM +%token CASCADED /* SQL-2003-R */ +%token CATALOG_NAME_SYM /* SQL-2003-N */ +%token CHAIN_SYM /* SQL-2003-N */ +%token CHANGED +%token CHARSET +%token CHECKPOINT_SYM +%token CHECKSUM_SYM +%token CIPHER_SYM +%token CLASS_ORIGIN_SYM /* SQL-2003-N */ +%token CLIENT_SYM +%token CLOB /* SQL-2003-R */ +%token CLOSE_SYM /* SQL-2003-R */ +%token COALESCE /* SQL-2003-N */ +%token CODE_SYM +%token COLLATION_SYM /* SQL-2003-N */ +%token COLUMNS +%token COLUMN_ADD_SYM +%token COLUMN_CHECK_SYM +%token COLUMN_CREATE_SYM +%token COLUMN_DELETE_SYM +%token COLUMN_GET_SYM +%token COLUMN_SYM /* SQL-2003-R */ +%token COLUMN_NAME_SYM /* SQL-2003-N */ +%token COMMITTED_SYM /* SQL-2003-N */ +%token COMMIT_SYM /* SQL-2003-R */ +%token COMPACT_SYM +%token COMPLETION_SYM +%token COMPRESSED_SYM +%token CONCURRENT +%token CONNECTION_SYM +%token CONSISTENT_SYM +%token CONSTRAINT_CATALOG_SYM /* SQL-2003-N */ +%token CONSTRAINT_NAME_SYM /* SQL-2003-N */ +%token CONSTRAINT_SCHEMA_SYM /* SQL-2003-N */ +%token CONTAINS_SYM /* SQL-2003-N */ +%token CONTEXT_SYM +%token CONTRIBUTORS_SYM +%token CPU_SYM +%token CUBE_SYM /* SQL-2003-R */ +%token CURRENT_SYM /* SQL-2003-R */ +%token CURRENT_POS_SYM +%token CURSOR_NAME_SYM /* SQL-2003-N */ +%token CYCLE_SYM +%token DATAFILE_SYM +%token DATA_SYM /* SQL-2003-N */ +%token DATETIME +%token DATE_FORMAT_SYM /* MYSQL-FUNC */ +%token DATE_SYM /* SQL-2003-R, Oracle-R, PLSQL-R */ +%token DAY_SYM /* SQL-2003-R */ +%token DEALLOCATE_SYM /* SQL-2003-R */ +%token DECODE_SYM /* Oracle function, non-reserved */ +%token DEFINER_SYM +%token DELAYED_SYM +%token DELAY_KEY_WRITE_SYM +%token DES_KEY_FILE +%token DIAGNOSTICS_SYM /* SQL-2003-N */ +%token DIRECTORY_SYM +%token DISABLE_SYM +%token DISCARD +%token DISK_SYM +%token DO_SYM +%token DUMPFILE +%token DUPLICATE_SYM +%token DYNAMIC_SYM /* SQL-2003-R */ +%token ENABLE_SYM +%token END /* SQL-2003-R, PLSQL-R */ +%token ENDS_SYM +%token ENGINES_SYM +%token ENGINE_SYM +%token ENUM +%token ERROR_SYM +%token ERRORS +%token ESCAPE_SYM /* SQL-2003-R */ +%token EVENTS_SYM +%token EVENT_SYM +%token EVERY_SYM /* SQL-2003-N */ +%token EXCHANGE_SYM +%token EXAMINED_SYM +%token EXCLUDE_SYM /* SQL-2011-N */ +%token EXECUTE_SYM /* SQL-2003-R */ +%token EXCEPTION_SYM /* SQL-2003-N, Oracle-PLSQL-R */ +%token EXIT_SYM +%token EXPANSION_SYM +%token EXPORT_SYM +%token EXTENDED_SYM +%token EXTENT_SIZE_SYM +%token FAST_SYM +%token FAULTS_SYM +%token FILE_SYM +%token FIRST_SYM /* SQL-2003-N */ +%token FIXED_SYM +%token FLUSH_SYM +%token FOLLOWS_SYM /* MYSQL trigger*/ +%token FOLLOWING_SYM /* SQL-2011-N */ +%token FORCE_SYM +%token FORMAT_SYM +%token FOUND_SYM /* SQL-2003-R */ +%token FULL /* SQL-2003-R */ +%token FUNCTION_SYM /* SQL-2003-R, Oracle-PLSQL-R */ +%token GENERAL +%token GENERATED_SYM +%token GEOMETRYCOLLECTION +%token GEOMETRY_SYM +%token GET_FORMAT /* MYSQL-FUNC */ +%token GET_SYM /* SQL-2003-R */ +%token GLOBAL_SYM /* SQL-2003-R */ +%token GRANTS +%token HANDLER_SYM +%token HARD_SYM +%token HASH_SYM +%token HELP_SYM +%token HIGH_PRIORITY +%token HISTORY_SYM /* MYSQL */ +%token HOST_SYM +%token HOSTS_SYM +%token HOUR_SYM /* SQL-2003-R */ +%token ID_SYM /* MYSQL */ +%token IDENTIFIED_SYM +%token IGNORE_SERVER_IDS_SYM +%token IMMEDIATE_SYM /* SQL-2003-R */ +%token IMPORT +%token INCREMENT_SYM +%token INDEXES +%token INITIAL_SIZE_SYM +%token INSERT_METHOD +%token INSTALL_SYM +%token INVOKER_SYM +%token IO_SYM +%token IPC_SYM +%token ISOLATION /* SQL-2003-R */ +%token ISOPEN_SYM /* Oracle-N */ +%token ISSUER_SYM +%token INVISIBLE_SYM +%token JSON_SYM +%token KEY_BLOCK_SIZE +%token LANGUAGE_SYM /* SQL-2003-R */ +%token LAST_SYM /* SQL-2003-N */ +%token LAST_VALUE +%token LASTVAL_SYM /* PostgreSQL sequence function */ +%token LEAVES +%token LESS_SYM +%token LEVEL_SYM +%token LINESTRING +%token LIST_SYM +%token LOCAL_SYM /* SQL-2003-R */ +%token LOCKS_SYM +%token LOGFILE_SYM +%token LOGS_SYM +%token MASTER_CONNECT_RETRY_SYM +%token MASTER_DELAY_SYM +%token MASTER_GTID_POS_SYM +%token MASTER_HOST_SYM +%token MASTER_LOG_FILE_SYM +%token MASTER_LOG_POS_SYM +%token MASTER_PASSWORD_SYM +%token MASTER_PORT_SYM +%token MASTER_SERVER_ID_SYM +%token MASTER_SSL_CAPATH_SYM +%token MASTER_SSL_CA_SYM +%token MASTER_SSL_CERT_SYM +%token MASTER_SSL_CIPHER_SYM +%token MASTER_SSL_CRL_SYM +%token MASTER_SSL_CRLPATH_SYM +%token MASTER_SSL_KEY_SYM +%token MASTER_SSL_SYM +%token MASTER_SYM +%token MASTER_USER_SYM +%token MASTER_USE_GTID_SYM +%token MASTER_HEARTBEAT_PERIOD_SYM +%token MAX_CONNECTIONS_PER_HOUR +%token MAX_QUERIES_PER_HOUR +%token MAX_ROWS +%token MAX_SIZE_SYM +%token MAX_UPDATES_PER_HOUR +%token MAX_STATEMENT_TIME_SYM +%token MAX_USER_CONNECTIONS_SYM +%token MEDIUM_SYM +%token MEMORY_SYM +%token MERGE_SYM /* SQL-2003-R */ +%token MESSAGE_TEXT_SYM /* SQL-2003-N */ +%token MICROSECOND_SYM /* MYSQL-FUNC */ +%token MIGRATE_SYM +%token MINUTE_SYM /* SQL-2003-R */ +%token MINVALUE_SYM +%token MIN_ROWS +%token MODE_SYM +%token MODIFY_SYM +%token MONTH_SYM /* SQL-2003-R */ +%token MULTILINESTRING +%token MULTIPOINT +%token MULTIPOLYGON +%token MUTEX_SYM +%token MYSQL_SYM +%token MYSQL_ERRNO_SYM +%token NAMES_SYM /* SQL-2003-N */ +%token NAME_SYM /* SQL-2003-N */ +%token NATIONAL_SYM /* SQL-2003-R */ +%token NCHAR_SYM /* SQL-2003-R */ +%token NEW_SYM /* SQL-2003-R */ +%token NEXT_SYM /* SQL-2003-N */ +%token NEXTVAL_SYM /* PostgreSQL sequence function */ +%token NOCACHE_SYM +%token NOCYCLE_SYM +%token NODEGROUP_SYM +%token NONE_SYM /* SQL-2003-R */ +%token NOTFOUND_SYM /* Oracle-R */ +%token NO_SYM /* SQL-2003-R */ +%token NOMAXVALUE_SYM +%token NOMINVALUE_SYM +%token NO_WAIT_SYM +%token NOWAIT_SYM +%token NUMBER_SYM /* SQL-2003-N, Oracle-R, PLSQL-R */ +%token NVARCHAR_SYM +%token OF_SYM /* SQL-1992-R, Oracle-R */ +%token OFFSET_SYM +%token OLD_PASSWORD_SYM +%token ONE_SYM +%token ONLY_SYM /* SQL-2003-R */ +%token ONLINE_SYM +%token OPEN_SYM /* SQL-2003-R */ +%token OPTIONS_SYM +%token OPTION /* SQL-2003-N */ +%token OWNER_SYM +%token PACK_KEYS_SYM +%token PAGE_SYM +%token PARSER_SYM +%token PARTIAL /* SQL-2003-N */ +%token PARTITIONS_SYM +%token PARTITIONING_SYM +%token PASSWORD_SYM +%token PERIOD_SYM /* SQL-2011-R */ +%token PERSISTENT_SYM +%token PHASE_SYM +%token PLUGINS_SYM +%token PLUGIN_SYM +%token POINT_SYM +%token POLYGON +%token PORT_SYM +%token PRECEDES_SYM /* MYSQL */ +%token PRECEDING_SYM /* SQL-2011-N */ +%token PREPARE_SYM /* SQL-2003-R */ +%token PRESERVE_SYM +%token PREV_SYM +%token PREVIOUS_SYM +%token PRIVILEGES /* SQL-2003-N */ +%token PROCESS +%token PROCESSLIST_SYM +%token PROFILE_SYM +%token PROFILES_SYM +%token PROXY_SYM +%token QUARTER_SYM +%token QUERY_SYM +%token QUICK +%token RAW /* Oracle-R */ +%token READ_ONLY_SYM +%token REBUILD_SYM +%token RECOVER_SYM +%token REDOFILE_SYM +%token REDO_BUFFER_SIZE_SYM +%token REDUNDANT_SYM +%token RELAY +%token RELAYLOG_SYM +%token RELAY_LOG_FILE_SYM +%token RELAY_LOG_POS_SYM +%token RELAY_THREAD +%token RELOAD +%token REMOVE_SYM +%token REORGANIZE_SYM +%token REPAIR +%token REPEATABLE_SYM /* SQL-2003-N */ +%token REPLICATION +%token RESET_SYM +%token RESTART_SYM +%token RESOURCES +%token RESTORE_SYM +%token RESUME_SYM +%token RETURNED_SQLSTATE_SYM /* SQL-2003-N */ +%token RETURNS_SYM /* SQL-2003-R */ +%token REUSE_SYM /* Oracle-R */ +%token REVERSE_SYM +%token ROLE_SYM +%token ROLLBACK_SYM /* SQL-2003-R */ +%token ROLLUP_SYM /* SQL-2003-R */ +%token ROUTINE_SYM /* SQL-2003-N */ +%token ROWCOUNT_SYM /* Oracle-N */ +%token ROW_SYM /* SQL-2003-R */ +%token ROW_COUNT_SYM /* SQL-2003-N */ +%token ROW_FORMAT_SYM +%token RTREE_SYM +%token SAVEPOINT_SYM /* SQL-2003-R */ +%token SCHEDULE_SYM +%token SCHEMA_NAME_SYM /* SQL-2003-N */ +%token SECOND_SYM /* SQL-2003-R */ +%token SECURITY_SYM /* SQL-2003-N */ +%token SEQUENCE_SYM +%token SERIALIZABLE_SYM /* SQL-2003-N */ +%token SERIAL_SYM +%token SESSION_SYM /* SQL-2003-N */ +%token SERVER_SYM +%token SETVAL_SYM /* PostgreSQL sequence function */ +%token SHARE_SYM +%token SHUTDOWN +%token SIGNED_SYM +%token SIMPLE_SYM /* SQL-2003-N */ +%token SLAVE +%token SLAVES +%token SLAVE_POS_SYM +%token SLOW +%token SNAPSHOT_SYM +%token SOCKET_SYM +%token SOFT_SYM +%token SONAME_SYM +%token SOUNDS_SYM +%token SOURCE_SYM +%token SQL_BUFFER_RESULT +%token SQL_CACHE_SYM +%token SQL_CALC_FOUND_ROWS +%token SQL_NO_CACHE_SYM +%token SQL_THREAD +%token STARTS_SYM +%token START_SYM /* SQL-2003-R */ +%token STATEMENT_SYM +%token STATUS_SYM +%token STOP_SYM +%token STORAGE_SYM +%token STORED_SYM +%token STRING_SYM +%token SUBCLASS_ORIGIN_SYM /* SQL-2003-N */ +%token SUBDATE_SYM +%token SUBJECT_SYM +%token SUBPARTITIONS_SYM +%token SUBPARTITION_SYM +%token SUPER_SYM +%token SUSPEND_SYM +%token SWAPS_SYM +%token SWITCHES_SYM +%token SYSTEM /* SQL-2011-R */ +%token SYSTEM_TIME_SYM /* SQL-2011-R */ +%token TABLES +%token TABLESPACE +%token TABLE_CHECKSUM_SYM +%token TABLE_NAME_SYM /* SQL-2003-N */ +%token TEMPORARY /* SQL-2003-N */ +%token TEMPTABLE_SYM +%token TEXT_SYM +%token THAN_SYM +%token TIES_SYM /* SQL-2011-N */ +%token TIMESTAMP /* SQL-2003-R */ +%token TIMESTAMP_ADD +%token TIMESTAMP_DIFF +%token TIME_SYM /* SQL-2003-R, Oracle-R */ +%token TRANSACTION_SYM +%token TRANSACTIONAL_SYM +%token TRIGGERS_SYM +%token TRIM_ORACLE +%token TRUNCATE_SYM +%token TYPES_SYM +%token TYPE_SYM /* SQL-2003-N */ +%token UDF_RETURNS_SYM +%token UNBOUNDED_SYM /* SQL-2011-N */ +%token UNCOMMITTED_SYM /* SQL-2003-N */ +%token UNDEFINED_SYM +%token UNDOFILE_SYM +%token UNDO_BUFFER_SIZE_SYM +%token UNICODE_SYM +%token UNINSTALL_SYM +%token UNKNOWN_SYM /* SQL-2003-R */ +%token UNTIL_SYM +%token UPGRADE_SYM +%token USER_SYM /* SQL-2003-R */ +%token USE_FRM +%token VALUE_SYM /* SQL-2003-R */ +%token VARCHAR2 /* Oracle-R, PLSQL-R */ +%token VARIABLES +%token VERSIONING_SYM /* SQL-2011-R */ +%token VIA_SYM +%token VIEW_SYM /* SQL-2003-N */ +%token VIRTUAL_SYM +%token WAIT_SYM +%token WARNINGS +%token WEEK_SYM +%token WEIGHT_STRING_SYM +%token WINDOW_SYM /* SQL-2003-R */ +%token WITHIN +%token WITHOUT /* SQL-2003-R */ +%token WORK_SYM /* SQL-2003-N */ +%token WRAPPER_SYM +%token WRITE_SYM /* SQL-2003-N */ +%token X509_SYM +%token XA_SYM +%token XML_SYM +%token YEAR_SYM /* SQL-2003-R */ + %left JOIN_SYM INNER_SYM STRAIGHT_JOIN CROSS LEFT RIGHT /* A dummy token to force the priority of table_ref production in a join. */ %left TABLE_REF_PRIORITY %left SET_VAR -%left OR_OR_SYM OR_SYM OR2_SYM +%left OR_SYM OR2_SYM %left XOR %left AND_SYM AND_AND_SYM + +%left PREC_BELOW_NOT +%left NOT_SYM + %left BETWEEN_SYM CASE_SYM WHEN_SYM THEN_SYM ELSE %left '=' EQUAL_SYM GE '>' LE '<' NE IS LIKE REGEXP IN_SYM %left '|' %left '&' %left SHIFT_LEFT SHIFT_RIGHT -%left '-' '+' +%left '-' '+' ORACLE_CONCAT_SYM %left '*' '/' DIV_SYM MOD_SYM %left '^' -%left NEG '~' -%right NOT_SYM NOT2_SYM -%right BINARY COLLATE_SYM -%left INTERVAL_SYM +%left MYSQL_CONCAT_SYM +%left NEG '~' NOT2_SYM BINARY +%left COLLATE_SYM + +/* + Tokens that can change their meaning from identifier to something else + in certain context. + + - TRANSACTION: identifier, history unit: + SELECT transaction FROM t1; + SELECT * FROM t1 FOR SYSTEM_TIME AS OF TRANSACTION @var; + + - TIMESTAMP: identifier, literal, history unit: + SELECT timestamp FROM t1; + SELECT TIMESTAMP '2001-01-01 10:20:30'; + SELECT * FROM t1 FOR SYSTEM_TIME AS OF TIMESTAMP CONCAT(@date,' ',@time); + + - PERIOD: identifier, period for sytem time: + SELECT period FROM t1; + ALTER TABLE DROP PERIOD FOR SYSTEM TIME; + + - SYSTEM: identifier, system versioning: + SELECT system FROM t1; + ALTER TABLE DROP SYSTEM VERSIONIONG; + + Note, we need here only tokens that cause shirt/reduce conflicts + with keyword identifiers. For example: + opt_clause1: %empty | KEYWORD ... ; + clause2: opt_clause1 ident; + KEYWORD can appear both in opt_clause1 and in "ident" through the "keyword" + rule. So the parser reports a conflict on how to interpret KEYWORD: + - as a start of non-empty branch in opt_clause1, or + - as an identifier which follows the empty branch in opt_clause1. + + Example#1: + alter_list_item: + DROP opt_column opt_if_exists_table_element field_ident + | DROP SYSTEM VERSIONING_SYM + SYSTEM can be a keyword in field_ident, or can be a start of + SYSTEM VERSIONING. + + Example#2: + system_time_expr: AS OF_SYM history_point + history_point: opt_history_unit bit_expr + opt_history_unit: | TRANSACTION_SYM + TRANSACTION can be a non-empty history unit, or can be an identifier + in bit_expr. + + In the grammar below we use %prec to explicitely tell Bison to go + through the empty branch in the optional rule only when the lookahead + token does not belong to a small set of selected tokens. + + Tokens NEXT_SYM and PREVIOUS_SYM also change their meaning from + identifiers to sequence operations when followed by VALUE_SYM: + SELECT NEXT VALUE FOR s1, PREVIOUS VALUE FOR s1; + but we don't need to list them here as they do not seem to cause + conflicts (according to bison -v), as both meanings + (as identifier, and as a sequence operation) are parts of the same target + column_default_non_parenthesized_expr, and there are no any optional + clauses between the start of column_default_non_parenthesized_expr + and until NEXT_SYM / PREVIOUS_SYM. +*/ +%left PREC_BELOW_IDENTIFIER_OPT_SPECIAL_CASE +%left TRANSACTION_SYM TIMESTAMP PERIOD_SYM SYSTEM + + +/* + Tokens that can appear in a token contraction on the second place + and change the meaning of the previous token. + + - TEXT_STRING: changes the meaning of TIMESTAMP/TIME/DATE + from identifier to literal: + SELECT timestamp FROM t1; + SELECT TIMESTAMP'2001-01-01 00:00:00' FROM t1; + + - Parenthesis: changes the meaning of TIMESTAMP/TIME/DATE + from identifiers to CAST-alike functions: + SELECT timestamp FROM t1; + SELECT timestamp(1) FROM t1; + + - VALUE: changes NEXT and PREVIOUS from identifier to sequence operation: + SELECT next, previous FROM t1; + SELECT NEXT VALUE FOR s1, PREVIOUS VALUE FOR s1; + + - VERSIONING: changes SYSTEM from identifier to SYSTEM VERSIONING + SELECT system FROM t1; + ALTER TABLE t1 ADD SYSTEM VERSIONING; +*/ +%left PREC_BELOW_CONTRACTION_TOKEN2 +%left TEXT_STRING '(' VALUE_SYM VERSIONING_SYM %type - IDENT IDENT_QUOTED DECIMAL_NUM FLOAT_NUM NUM LONG_NUM + DECIMAL_NUM FLOAT_NUM NUM LONG_NUM HEX_NUM HEX_STRING LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident_or_text - IDENT_sys TEXT_STRING_sys TEXT_STRING_literal - opt_component key_cache_name - sp_opt_label BIN_NUM TEXT_STRING_filesystem ident_or_empty + TEXT_STRING_sys TEXT_STRING_literal + key_cache_name + sp_opt_label BIN_NUM TEXT_STRING_filesystem opt_constraint constraint opt_ident - ident_directly_assignable opt_package_routine_end_name - sp_decl_ident sp_block_label opt_place opt_db %type - label_ident label_declaration_oracle labels_declaration_oracle +%type + IDENT_sys + ident + label_ident + sp_decl_ident + ident_or_empty + ident_table_alias + ident_sysvar_name + ident_directly_assignable + %type TEXT_STRING NCHAR_STRING @@ -1065,8 +1183,26 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %type opt_table_alias -%type - ident ident_with_tok_start +%type + IDENT + IDENT_QUOTED + IDENT_cli + ident_cli + +%type + keyword_data_type + keyword_ident + keyword_label + keyword_sp_block_section + keyword_sp_decl + keyword_sp_head + keyword_sp_var_and_label + keyword_sp_var_not_label + keyword_sysvar_name + keyword_sysvar_type + keyword_table_alias + keyword_verb_clause + keyword_directly_assignable %type
table_ident table_ident_nodb references xid @@ -1082,6 +1218,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %type field_length opt_field_length opt_field_length_default_1 + opt_compression_method %type text_string hex_or_bin_String opt_gconcat_separator @@ -1118,7 +1255,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); optional_flush_tables_arguments opt_time_precision kill_type kill_option int_num opt_default_time_precision - case_stmt_body opt_bin_mod + case_stmt_body opt_bin_mod opt_for_system_time_clause opt_if_exists_table_element opt_if_not_exists_table_element opt_recursive opt_format_xid @@ -1142,7 +1279,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %type ulong_num real_ulong_num merge_insert_types - ws_nweights + ws_nweights opt_versioning_interval_start ws_level_flag_desc ws_level_flag_reverse ws_level_flags opt_ws_levels ws_level_list ws_level_list_item ws_level_number ws_level_range ws_level_list_or_range bool @@ -1164,11 +1301,13 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); variable variable_aux bool_pri predicate bit_expr parenthesized_expr table_wild simple_expr column_default_non_parenthesized_expr udf_expr + primary_expr string_factor_expr mysql_concatenation_expr + select_sublist_qualified_asterisk expr_or_default set_expr_or_default geometry_function signed_literal expr_or_literal opt_escape sp_opt_default - simple_ident_nospvar simple_ident_q simple_ident_q2 + simple_ident_nospvar field_or_var limit_option part_func_expr window_func_expr @@ -1179,6 +1318,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); inverse_distribution_function_def explicit_cursor_attr function_call_keyword + function_call_keyword_timestamp function_call_nonkeyword function_call_generic function_call_conflict kill_expr @@ -1233,7 +1373,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); table_primary_ident table_primary_derived select_derived derived_table_list select_derived_union + derived_simple_table derived_query_specification + derived_table_value_constructor %type date_time_type; %type interval @@ -1253,13 +1395,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); opt_field_length_default_sp_param_varchar opt_field_length_default_sp_param_char -%type keyword keyword_sp - keyword_directly_assignable - keyword_directly_not_assignable - sp_decl_ident_keyword - keyword_sp_data_type - keyword_sp_not_data_type - %type user grant_user grant_role user_or_role current_role admin_option_for_role user_maybe_role @@ -1277,11 +1412,13 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %type subselect get_select_lex get_select_lex_derived + simple_table query_specification query_term_union_not_ready query_term_union_ready query_expression_body select_paren_derived + table_value_constructor %type comp_op @@ -1352,12 +1489,16 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); keep_gcc_happy key_using_alg part_column_list + period_for_system_time server_def server_options_list server_option definer_opt no_definer definer get_diagnostics parse_vcol_expr vcol_opt_specifier vcol_opt_attribute vcol_opt_attribute_list vcol_attribute opt_serial_attribute opt_serial_attribute_list serial_attribute - explainable_command opt_lock_wait_timeout + explainable_command + opt_lock_wait_timeout + opt_delete_gtid_domain + asrow_attribute set_assign sf_tail_standalone sp_tail_standalone @@ -1440,12 +1581,11 @@ END_OF_INPUT %type opt_window_frame_exclusion; %type window_frame_start window_frame_bound; - %type '-' '+' '*' '/' '%' '(' ')' - ',' '!' '{' '}' '&' '|' AND_SYM OR_SYM OR_OR_SYM BETWEEN_SYM CASE_SYM + ',' '!' '{' '}' '&' '|' AND_SYM OR_SYM BETWEEN_SYM CASE_SYM THEN_SYM WHEN_SYM DIV_SYM MOD_SYM OR2_SYM AND_AND_SYM DELETE_SYM - ROLE_SYM + MYSQL_CONCAT_SYM ORACLE_CONCAT_SYM %type opt_with_clause with_clause @@ -1453,6 +1593,9 @@ END_OF_INPUT %type opt_with_column_list +%type opt_history_unit +%type history_point +%type with_or_without_system %% @@ -1480,8 +1623,8 @@ rule: <-- starts at col 1 query: END_OF_INPUT { - if (!thd->bootstrap && - (!(thd->lex->select_lex.options & OPTION_FOUND_COMMENT))) + if (likely(!thd->bootstrap) && + unlikely(!(thd->lex->select_lex.options & OPTION_FOUND_COMMENT))) my_yyabort_error((ER_EMPTY_QUERY, MYF(0))); thd->lex->sql_command= SQLCOM_EMPTY_QUERY; @@ -1609,7 +1752,7 @@ prepare: PREPARE_SYM ident FROM prepare_src { LEX *lex= thd->lex; - if (lex->table_or_sp_used()) + if (unlikely(lex->table_or_sp_used())) my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0), "PREPARE..FROM")); lex->sql_command= SQLCOM_PREPARE; @@ -1637,7 +1780,7 @@ execute: {} | EXECUTE_SYM IMMEDIATE_SYM prepare_src { - if (Lex->table_or_sp_used()) + if (unlikely(Lex->table_or_sp_used())) my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0), "EXECUTE IMMEDIATE")); Lex->sql_command= SQLCOM_EXECUTE_IMMEDIATE; @@ -1651,7 +1794,7 @@ execute_using: | USING { Lex->expr_allows_subselect= false; } execute_var_list { - if (Lex->table_or_sp_used()) + if (unlikely(Lex->table_or_sp_used())) my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0), "EXECUTE..USING")); Lex->expr_allows_subselect= true; @@ -1666,7 +1809,8 @@ execute_var_list: execute_var_ident: expr_or_default { - if (Lex->prepared_stmt_params.push_back($1, thd->mem_root)) + if (unlikely(Lex->prepared_stmt_params.push_back($1, + thd->mem_root))) MYSQL_YYABORT; } ; @@ -1676,7 +1820,7 @@ execute_var_ident: help: HELP_SYM { - if (Lex->sphead) + if (unlikely(Lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "HELP")); } ident_or_text @@ -1776,20 +1920,21 @@ master_def: | MASTER_HEARTBEAT_PERIOD_SYM '=' NUM_literal { Lex->mi.heartbeat_period= (float) $3->val_real(); - if (Lex->mi.heartbeat_period > SLAVE_MAX_HEARTBEAT_PERIOD || - Lex->mi.heartbeat_period < 0.0) + if (unlikely(Lex->mi.heartbeat_period > + SLAVE_MAX_HEARTBEAT_PERIOD) || + unlikely(Lex->mi.heartbeat_period < 0.0)) my_yyabort_error((ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE, MYF(0), SLAVE_MAX_HEARTBEAT_PERIOD)); - if (Lex->mi.heartbeat_period > slave_net_timeout) + if (unlikely(Lex->mi.heartbeat_period > slave_net_timeout)) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX, ER_THD(thd, ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX)); } - if (Lex->mi.heartbeat_period < 0.001) + if (unlikely(Lex->mi.heartbeat_period < 0.001)) { - if (Lex->mi.heartbeat_period != 0.0) + if (unlikely(Lex->mi.heartbeat_period != 0.0)) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN, @@ -1887,19 +2032,19 @@ master_file_def: } | MASTER_USE_GTID_SYM '=' CURRENT_POS_SYM { - if (Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED) + if (unlikely(Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MASTER_use_gtid")); Lex->mi.use_gtid_opt= LEX_MASTER_INFO::LEX_GTID_CURRENT_POS; } | MASTER_USE_GTID_SYM '=' SLAVE_POS_SYM { - if (Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED) + if (unlikely(Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MASTER_use_gtid")); Lex->mi.use_gtid_opt= LEX_MASTER_INFO::LEX_GTID_SLAVE_POS; } | MASTER_USE_GTID_SYM '=' NO_SYM { - if (Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED) + if (unlikely(Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MASTER_use_gtid")); Lex->mi.use_gtid_opt= LEX_MASTER_INFO::LEX_GTID_NO; } @@ -1919,7 +2064,7 @@ connection_name: { Lex->mi.connection_name= $1; #ifdef HAVE_REPLICATION - if (check_master_connection_name(&$1)) + if (unlikely(check_master_connection_name(&$1))) my_yyabort_error((ER_WRONG_ARGUMENTS, MYF(0), "MASTER_CONNECTION_NAME")); #endif } @@ -1932,11 +2077,13 @@ create: { LEX *lex= thd->lex; lex->create_info.init(); - if (lex->set_command_with_check(SQLCOM_CREATE_TABLE, $2, $1 | $4)) + if (unlikely(lex->set_command_with_check(SQLCOM_CREATE_TABLE, $2, + $1 | $4))) MYSQL_YYABORT; - if (!lex->select_lex.add_table_to_list(thd, $5, NULL, - TL_OPTION_UPDATING, - TL_WRITE, MDL_EXCLUSIVE)) + if (unlikely(!lex->select_lex.add_table_to_list(thd, $5, NULL, + TL_OPTION_UPDATING, + TL_WRITE, + MDL_EXCLUSIVE))) MYSQL_YYABORT; lex->alter_info.reset(); /* @@ -1968,12 +2115,14 @@ create: { LEX *lex= thd->lex; lex->create_info.init(); - if (lex->set_command_with_check(SQLCOM_CREATE_SEQUENCE, $2, $1 | $4)) + if (unlikely(lex->set_command_with_check(SQLCOM_CREATE_SEQUENCE, $2, + $1 | $4))) MYSQL_YYABORT; - if (!lex->select_lex.add_table_to_list(thd, $5, NULL, - TL_OPTION_UPDATING, - TL_WRITE, MDL_EXCLUSIVE)) + if (unlikely(!lex->select_lex.add_table_to_list(thd, $5, NULL, + TL_OPTION_UPDATING, + TL_WRITE, + MDL_EXCLUSIVE))) MYSQL_YYABORT; /* @@ -1985,15 +2134,15 @@ create: lex->query_tables->open_strategy= TABLE_LIST::OPEN_STUB; lex->name= null_clex_str; lex->create_last_non_select_table= lex->last_table(); - if (!(lex->create_info.seq_create_info= new (thd->mem_root) - sequence_definition())) + if (unlikely(!(lex->create_info.seq_create_info= + new (thd->mem_root) sequence_definition()))) MYSQL_YYABORT; } opt_sequence opt_create_table_options { LEX *lex= thd->lex; - if (lex->create_info.seq_create_info->check_and_adjust(1)) + if (unlikely(lex->create_info.seq_create_info->check_and_adjust(1))) { my_error(ER_SEQUENCE_INVALID_DATA, MYF(0), lex->select_lex.table_list.first->db.str, @@ -2002,7 +2151,8 @@ create: } /* No fields specified, generate them */ - if (prepare_sequence_fields(thd, &lex->alter_info.create_list)) + if (unlikely(prepare_sequence_fields(thd, + &lex->alter_info.create_list))) MYSQL_YYABORT; /* CREATE SEQUENCE always creates a sequence */ @@ -2010,8 +2160,9 @@ create: Lex->create_info.sequence= 1; lex->current_select= &lex->select_lex; - if ((lex->create_info.used_fields & HA_CREATE_USED_ENGINE) && - !lex->create_info.db_type) + if (unlikely((lex->create_info.used_fields & + HA_CREATE_USED_ENGINE) && + !lex->create_info.db_type)) { lex->create_info.use_default_db_type(thd); push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, @@ -2026,9 +2177,9 @@ create: opt_key_algorithm_clause ON table_ident { - if (Lex->add_create_index_prepare($8)) + if (unlikely(Lex->add_create_index_prepare($8))) MYSQL_YYABORT; - if (Lex->add_create_index($2, &$5, $6, $1 | $4)) + if (unlikely(Lex->add_create_index($2, &$5, $6, $1 | $4))) MYSQL_YYABORT; } '(' key_list ')' opt_lock_wait_timeout normal_key_options @@ -2036,9 +2187,10 @@ create: | create_or_replace fulltext INDEX_SYM opt_if_not_exists ident ON table_ident { - if (Lex->add_create_index_prepare($7)) + if (unlikely(Lex->add_create_index_prepare($7))) MYSQL_YYABORT; - if (Lex->add_create_index($2, &$5, HA_KEY_ALG_UNDEF, $1 | $4)) + if (unlikely(Lex->add_create_index($2, &$5, HA_KEY_ALG_UNDEF, + $1 | $4))) MYSQL_YYABORT; } '(' key_list ')' opt_lock_wait_timeout fulltext_key_options @@ -2046,9 +2198,10 @@ create: | create_or_replace spatial INDEX_SYM opt_if_not_exists ident ON table_ident { - if (Lex->add_create_index_prepare($7)) + if (unlikely(Lex->add_create_index_prepare($7))) MYSQL_YYABORT; - if (Lex->add_create_index($2, &$5, HA_KEY_ALG_UNDEF, $1 | $4)) + if (unlikely(Lex->add_create_index($2, &$5, HA_KEY_ALG_UNDEF, + $1 | $4))) MYSQL_YYABORT; } '(' key_list ')' opt_lock_wait_timeout spatial_key_options @@ -2061,15 +2214,17 @@ create: opt_create_database_options { LEX *lex=Lex; - if (lex->set_command_with_check(SQLCOM_CREATE_DB, 0, $1 | $3)) + if (unlikely(lex->set_command_with_check(SQLCOM_CREATE_DB, 0, + $1 | $3))) MYSQL_YYABORT; lex->name= $4; } | create_or_replace definer_opt opt_view_suid VIEW_SYM opt_if_not_exists table_ident { - if (Lex->add_create_view(thd, $1 | $5, - DTYPE_ALGORITHM_UNDEFINED, $3, $6)) + if (unlikely(Lex->add_create_view(thd, $1 | $5, + DTYPE_ALGORITHM_UNDEFINED, $3, + $6))) MYSQL_YYABORT; } view_list_opt AS view_select @@ -2077,7 +2232,7 @@ create: | create_or_replace view_algorithm definer_opt opt_view_suid VIEW_SYM opt_if_not_exists table_ident { - if (Lex->add_create_view(thd, $1 | $6, $2, $4, $7)) + if (unlikely(Lex->add_create_view(thd, $1 | $6, $2, $4, $7))) MYSQL_YYABORT; } view_list_opt AS view_select @@ -2112,13 +2267,15 @@ create: | create_or_replace USER_SYM opt_if_not_exists clear_privileges grant_list opt_require_clause opt_resource_options { - if (Lex->set_command_with_check(SQLCOM_CREATE_USER, $1 | $3)) + if (unlikely(Lex->set_command_with_check(SQLCOM_CREATE_USER, + $1 | $3))) MYSQL_YYABORT; } | create_or_replace ROLE_SYM opt_if_not_exists clear_privileges role_list opt_with_admin { - if (Lex->set_command_with_check(SQLCOM_CREATE_ROLE, $1 | $3)) + if (unlikely(Lex->set_command_with_check(SQLCOM_CREATE_ROLE, + $1 | $3))) MYSQL_YYABORT; } | CREATE LOGFILE_SYM GROUP_SYM logfile_group_info @@ -2138,17 +2295,18 @@ create: remember_name { sp_package *pkg; - if (!(pkg= Lex->create_package_start(thd, - SQLCOM_CREATE_PACKAGE, - &sp_handler_package_spec, - $5, $1 | $4))) + if (unlikely(!(pkg= Lex-> + create_package_start(thd, + SQLCOM_CREATE_PACKAGE, + &sp_handler_package_spec, + $5, $1 | $4)))) MYSQL_YYABORT; pkg->set_chistics(Lex->sp_chistics); } opt_package_specification_element_list END remember_end_opt opt_sp_name { - if (Lex->create_package_finalize(thd, $5, $13, $8, $12)) + if (unlikely(Lex->create_package_finalize(thd, $5, $13, $8, $12))) MYSQL_YYABORT; } | create_or_replace definer_opt PACKAGE_SYM BODY_SYM @@ -2157,28 +2315,29 @@ create: remember_name { sp_package *pkg; - if (!(pkg= Lex->create_package_start(thd, - SQLCOM_CREATE_PACKAGE_BODY, - &sp_handler_package_body, - $6, $1 | $5))) + if (unlikely(!(pkg= Lex-> + create_package_start(thd, + SQLCOM_CREATE_PACKAGE_BODY, + &sp_handler_package_body, + $6, $1 | $5)))) MYSQL_YYABORT; pkg->set_chistics(Lex->sp_chistics); Lex->sp_block_init(thd); } package_implementation_declare_section { - if (Lex->sp_block_with_exceptions_finalize_declarations(thd)) + if (unlikely(Lex->sp_block_with_exceptions_finalize_declarations(thd))) MYSQL_YYABORT; } package_implementation_executable_section { $11.hndlrs+= $13.hndlrs; - if (Lex->sp_block_finalize(thd, $11)) + if (unlikely(Lex->sp_block_finalize(thd, $11))) MYSQL_YYABORT; } remember_end_opt opt_sp_name { - if (Lex->create_package_finalize(thd, $6, $16, $9, $15)) + if (unlikely(Lex->create_package_finalize(thd, $6, $16, $9, $15))) MYSQL_YYABORT; } ; @@ -2186,7 +2345,7 @@ create: package_implementation_executable_section: END { - if (Lex->sp_block_with_exceptions_add_empty(thd)) + if (unlikely(Lex->sp_block_with_exceptions_add_empty(thd))) MYSQL_YYABORT; $$.init(0); } @@ -2221,7 +2380,8 @@ package_implementation_declare_section_list2: package_routine_lex: { - if (!($$= new (thd->mem_root) sp_lex_local(thd, thd->lex))) + if (unlikely(!($$= new (thd->mem_root) + sp_lex_local(thd, thd->lex)))) MYSQL_YYABORT; thd->m_parser_state->m_yacc.reset_before_substatement(); } @@ -2234,11 +2394,11 @@ package_specification_function: DBUG_ASSERT($1->sphead->get_package()); $2->sql_command= SQLCOM_CREATE_FUNCTION; sp_name *spname= $1->make_sp_name_package_routine(thd, &$3); - if (!spname) + if (unlikely(!spname)) MYSQL_YYABORT; thd->lex= $2; - if (!$2->make_sp_head_no_recursive(thd, spname, - &sp_handler_package_function)) + if (unlikely(!$2->make_sp_head_no_recursive(thd, spname, + &sp_handler_package_function))) MYSQL_YYABORT; $1->sphead->get_package()->m_current_routine= $2; (void) is_native_function_with_warn(thd, &$3); @@ -2260,11 +2420,11 @@ package_specification_procedure: DBUG_ASSERT($1->sphead->get_package()); $2->sql_command= SQLCOM_CREATE_PROCEDURE; sp_name *spname= $1->make_sp_name_package_routine(thd, &$3); - if (!spname) + if (unlikely(!spname)) MYSQL_YYABORT; thd->lex= $2; - if (!$2->make_sp_head_no_recursive(thd, spname, - &sp_handler_package_procedure)) + if (unlikely(!$2->make_sp_head_no_recursive(thd, spname, + &sp_handler_package_procedure))) MYSQL_YYABORT; $1->sphead->get_package()->m_current_routine= $2; } @@ -2285,7 +2445,7 @@ package_implementation_routine_definition: package_implementation_function_body ';' { sp_package *pkg= Lex->get_sp_package(); - if (pkg->add_routine_implementation($2)) + if (unlikely(pkg->add_routine_implementation($2))) MYSQL_YYABORT; pkg->m_current_routine= NULL; $$.init(); @@ -2294,7 +2454,7 @@ package_implementation_routine_definition: package_implementation_procedure_body ';' { sp_package *pkg= Lex->get_sp_package(); - if (pkg->add_routine_implementation($2)) + if (unlikely(pkg->add_routine_implementation($2))) MYSQL_YYABORT; pkg->m_current_routine= NULL; $$.init(); @@ -2314,13 +2474,13 @@ package_implementation_function_body: } sp_body opt_package_routine_end_name { - if (Lex->sphead->m_flags & sp_head::HAS_AGGREGATE_INSTR) + if (unlikely(Lex->sphead->m_flags & sp_head::HAS_AGGREGATE_INSTR)) { my_yyabort_error((ER_NOT_AGGREGATE_FUNCTION, MYF(0))); } Lex->sphead->set_chistics_agg_type(NOT_AGGREGATE); - if (thd->lex->sp_body_finalize_function(thd) || - thd->lex->sphead->check_package_routine_end_name($5)) + if (unlikely(thd->lex->sp_body_finalize_function(thd) || + thd->lex->sphead->check_package_routine_end_name($5))) MYSQL_YYABORT; thd->lex= $2; } @@ -2337,8 +2497,8 @@ package_implementation_procedure_body: } sp_body opt_package_routine_end_name { - if (thd->lex->sp_body_finalize_procedure(thd) || - thd->lex->sphead->check_package_routine_end_name($5)) + if (unlikely(thd->lex->sp_body_finalize_procedure(thd) || + thd->lex->sphead->check_package_routine_end_name($5))) MYSQL_YYABORT; thd->lex= $2; } @@ -2363,14 +2523,14 @@ package_specification_element: FUNCTION_SYM package_specification_function ';' { sp_package *pkg= Lex->get_sp_package(); - if (pkg->add_routine_declaration($2)) + if (unlikely(pkg->add_routine_declaration($2))) MYSQL_YYABORT; pkg->m_current_routine= NULL; } | PROCEDURE_SYM package_specification_procedure ';' { sp_package *pkg= Lex->get_sp_package(); - if (pkg->add_routine_declaration($2)) + if (unlikely(pkg->add_routine_declaration($2))) MYSQL_YYABORT; pkg->m_current_routine= NULL; } @@ -2399,105 +2559,105 @@ sequence_def: } | NO_SYM MINVALUE_SYM { - if (Lex->create_info.seq_create_info->used_fields & seq_field_used_min_value) + if (unlikely(Lex->create_info.seq_create_info->used_fields & seq_field_used_min_value)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MINVALUE")); Lex->create_info.seq_create_info->used_fields|= seq_field_used_min_value; } | NOMINVALUE_SYM { - if (Lex->create_info.seq_create_info->used_fields & seq_field_used_min_value) + if (unlikely(Lex->create_info.seq_create_info->used_fields & seq_field_used_min_value)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MINVALUE")); Lex->create_info.seq_create_info->used_fields|= seq_field_used_min_value; } | MAXVALUE_SYM opt_equal longlong_num { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_max_value) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_max_value)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MAXVALUE")); Lex->create_info.seq_create_info->max_value= $3; Lex->create_info.seq_create_info->used_fields|= seq_field_used_max_value; } | NO_SYM MAXVALUE_SYM { - if (Lex->create_info.seq_create_info->used_fields & seq_field_used_max_value) + if (unlikely(Lex->create_info.seq_create_info->used_fields & seq_field_used_max_value)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MAXVALUE")); Lex->create_info.seq_create_info->used_fields|= seq_field_used_max_value; } | NOMAXVALUE_SYM { - if (Lex->create_info.seq_create_info->used_fields & seq_field_used_max_value) + if (unlikely(Lex->create_info.seq_create_info->used_fields & seq_field_used_max_value)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MAXVALUE")); Lex->create_info.seq_create_info->used_fields|= seq_field_used_max_value; } | START_SYM opt_with longlong_num { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_start) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_start)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "START")); Lex->create_info.seq_create_info->start= $3; Lex->create_info.seq_create_info->used_fields|= seq_field_used_start; } | INCREMENT_SYM opt_by longlong_num { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_increment) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_increment)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "INCREMENT")); Lex->create_info.seq_create_info->increment= $3; Lex->create_info.seq_create_info->used_fields|= seq_field_used_increment; } | CACHE_SYM opt_equal longlong_num { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_cache) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_cache)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "CACHE")); Lex->create_info.seq_create_info->cache= $3; Lex->create_info.seq_create_info->used_fields|= seq_field_used_cache; } | NOCACHE_SYM { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_cache) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_cache)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "CACHE")); Lex->create_info.seq_create_info->cache= 0; Lex->create_info.seq_create_info->used_fields|= seq_field_used_cache; } | CYCLE_SYM { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_cycle) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_cycle)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "CYCLE")); Lex->create_info.seq_create_info->cycle= 1; Lex->create_info.seq_create_info->used_fields|= seq_field_used_cycle; } | NOCYCLE_SYM { - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_cycle) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_cycle)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "CYCLE")); Lex->create_info.seq_create_info->cycle= 0; Lex->create_info.seq_create_info->used_fields|= seq_field_used_cycle; } | RESTART_SYM { - if (Lex->sql_command != SQLCOM_ALTER_SEQUENCE) + if (unlikely(Lex->sql_command != SQLCOM_ALTER_SEQUENCE)) { thd->parse_error(ER_SYNTAX_ERROR, "RESTART"); YYABORT; } - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_restart) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_restart)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "RESTART")); Lex->create_info.seq_create_info->used_fields|= seq_field_used_restart; } | RESTART_SYM opt_with longlong_num { - if (Lex->sql_command != SQLCOM_ALTER_SEQUENCE) + if (unlikely(Lex->sql_command != SQLCOM_ALTER_SEQUENCE)) { thd->parse_error(ER_SYNTAX_ERROR, "RESTART"); YYABORT; } - if (Lex->create_info.seq_create_info->used_fields & - seq_field_used_restart) + if (unlikely(Lex->create_info.seq_create_info->used_fields & + seq_field_used_restart)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "RESTART")); Lex->create_info.seq_create_info->restart= $3; Lex->create_info.seq_create_info->used_fields|= seq_field_used_restart | seq_field_used_restart_value; @@ -2507,7 +2667,7 @@ sequence_def: server_def: SERVER_SYM opt_if_not_exists ident_or_text { - if (Lex->add_create_options_with_check($2)) + if (unlikely(Lex->add_create_options_with_check($2))) MYSQL_YYABORT; Lex->server_options.reset($3); } @@ -2566,9 +2726,10 @@ event_tail: LEX *lex=Lex; lex->stmt_definition_begin= $1; - if (lex->add_create_options_with_check($2)) + if (unlikely(lex->add_create_options_with_check($2))) MYSQL_YYABORT; - if (!(lex->event_parse_data= Event_parse_data::new_instance(thd))) + if (unlikely(!(lex->event_parse_data= + Event_parse_data::new_instance(thd)))) MYSQL_YYABORT; lex->event_parse_data->identifier= $3; lex->event_parse_data->on_completion= @@ -2631,7 +2792,7 @@ ev_starts: /* empty */ { Item *item= new (thd->mem_root) Item_func_now_local(thd, 0); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; Lex->event_parse_data->item_starts= item; } @@ -2695,11 +2856,12 @@ ev_sql_stmt: (the nested ALTER EVENT can have anything but DO clause) - CREATE PROCEDURE ... BEGIN DROP EVENT ... END| */ - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_EVENT_RECURSION_FORBIDDEN, MYF(0))); - if (!lex->make_sp_head(thd, lex->event_parse_data->identifier, - &sp_handler_procedure)) + if (unlikely(!lex->make_sp_head(thd, + lex->event_parse_data->identifier, + &sp_handler_procedure))) MYSQL_YYABORT; lex->sphead->set_body_start(thd, lip->get_cpp_ptr()); @@ -2734,12 +2896,12 @@ clear_privileges: sp_name: ident '.' ident { - if (!($$= Lex->make_sp_name(thd, &$1, &$3))) + if (unlikely(!($$= Lex->make_sp_name(thd, &$1, &$3)))) MYSQL_YYABORT; } | ident { - if (!($$= Lex->make_sp_name(thd, &$1))) + if (unlikely(!($$= Lex->make_sp_name(thd, &$1)))) MYSQL_YYABORT; } ; @@ -2813,7 +2975,7 @@ sp_suid: call: CALL_SYM sp_name { - if (Lex->call_statement_start(thd, $2)) + if (unlikely(Lex->call_statement_start(thd, $2))) MYSQL_YYABORT; } opt_sp_cparam_list {} @@ -2855,7 +3017,7 @@ sp_fdparams: sp_param_name: ident { - if (!($$= Lex->sp_param_init(&$1))) + if (unlikely(!($$= Lex->sp_param_init(&$1)))) MYSQL_YYABORT; } ; @@ -2863,32 +3025,32 @@ sp_param_name: sp_param_name_and_type: sp_param_name sp_param_type_with_opt_collate { - if (Lex->sp_param_fill_definition($$= $1)) + if (unlikely(Lex->sp_param_fill_definition($$= $1))) MYSQL_YYABORT; } | sp_param_name sp_decl_ident '.' ident '%' TYPE_SYM { - if (Lex->sphead->spvar_fill_type_reference(thd, $$= $1, $2, $4)) + if (unlikely(Lex->sphead->spvar_fill_type_reference(thd, $$= $1, $2, $4))) MYSQL_YYABORT; } | sp_param_name sp_decl_ident '.' ident '.' ident '%' TYPE_SYM { - if (Lex->sphead->spvar_fill_type_reference(thd, $$= $1, $2, $4, $6)) + if (unlikely(Lex->sphead->spvar_fill_type_reference(thd, $$= $1, $2, $4, $6))) MYSQL_YYABORT; } | sp_param_name sp_decl_ident '%' ROWTYPE_SYM { - if (Lex->sphead->spvar_fill_table_rowtype_reference(thd, $$= $1, $2)) + if (unlikely(Lex->sphead->spvar_fill_table_rowtype_reference(thd, $$= $1, $2))) MYSQL_YYABORT; } | sp_param_name sp_decl_ident '.' ident '%' ROWTYPE_SYM { - if (Lex->sphead->spvar_fill_table_rowtype_reference(thd, $$= $1, $2, $4)) + if (unlikely(Lex->sphead->spvar_fill_table_rowtype_reference(thd, $$= $1, $2, $4))) MYSQL_YYABORT; } | sp_param_name ROW_SYM row_type_body { - if (Lex->sphead->spvar_fill_row(thd, $$= $1, $3)) + if (unlikely(Lex->sphead->spvar_fill_row(thd, $$= $1, $3))) MYSQL_YYABORT; } ; @@ -2908,37 +3070,37 @@ sp_pdparam: sp_param_name sp_opt_inout sp_param_type_with_opt_collate { $1->mode= $2; - if (Lex->sp_param_fill_definition($1)) + if (unlikely(Lex->sp_param_fill_definition($1))) MYSQL_YYABORT; } | sp_param_name sp_opt_inout sp_decl_ident '.' ident '%' TYPE_SYM { $1->mode= $2; - if (Lex->sphead->spvar_fill_type_reference(thd, $1, $3, $5)) + if (unlikely(Lex->sphead->spvar_fill_type_reference(thd, $1, $3, $5))) MYSQL_YYABORT; } | sp_param_name sp_opt_inout sp_decl_ident '.' ident '.' ident '%' TYPE_SYM { $1->mode= $2; - if (Lex->sphead->spvar_fill_type_reference(thd, $1, $3, $5, $7)) + if (unlikely(Lex->sphead->spvar_fill_type_reference(thd, $1, $3, $5, $7))) MYSQL_YYABORT; } | sp_param_name sp_opt_inout sp_decl_ident '%' ROWTYPE_SYM { $1->mode= $2; - if (Lex->sphead->spvar_fill_table_rowtype_reference(thd, $1, $3)) + if (unlikely(Lex->sphead->spvar_fill_table_rowtype_reference(thd, $1, $3))) MYSQL_YYABORT; } | sp_param_name sp_opt_inout sp_decl_ident '.' ident '%' ROWTYPE_SYM { $1->mode= $2; - if (Lex->sphead->spvar_fill_table_rowtype_reference(thd, $1, $3, $5)) + if (unlikely(Lex->sphead->spvar_fill_table_rowtype_reference(thd, $1, $3, $5))) MYSQL_YYABORT; } | sp_param_name sp_opt_inout ROW_SYM row_type_body { $1->mode= $2; - if (Lex->sphead->spvar_fill_row(thd, $1, $4)) + if (unlikely(Lex->sphead->spvar_fill_row(thd, $1, $4))) MYSQL_YYABORT; } ; @@ -3009,7 +3171,7 @@ sp_proc_stmts1_implicit_block: } sp_proc_stmts1 { - if (Lex->sp_block_finalize(thd)) + if (unlikely(Lex->sp_block_finalize(thd))) MYSQL_YYABORT; } ; @@ -3025,7 +3187,7 @@ opt_sp_decl_body_list: sp_decl_body_list: sp_decl_non_handler_list { - if (Lex->sphead->sp_add_instr_cpush_for_cursors(thd, Lex->spcont)) + if (unlikely(Lex->sphead->sp_add_instr_cpush_for_cursors(thd, Lex->spcont))) MYSQL_YYABORT; } opt_sp_decl_handler_list @@ -3059,18 +3221,20 @@ opt_sp_decl_handler_list: optionally_qualified_column_ident: sp_decl_ident { - if (!($$= new (thd->mem_root) Qualified_column_ident(&$1))) + if (unlikely(!($$= new (thd->mem_root) + Qualified_column_ident(&$1)))) MYSQL_YYABORT; } | sp_decl_ident '.' ident { - if (!($$= new (thd->mem_root) Qualified_column_ident(&$1, &$3))) + if (unlikely(!($$= new (thd->mem_root) + Qualified_column_ident(&$1, &$3)))) MYSQL_YYABORT; } | sp_decl_ident '.' ident '.' ident { - if (!($$= new (thd->mem_root) Qualified_column_ident(thd, - &$1, &$3, &$5))) + if (unlikely(!($$= new (thd->mem_root) + Qualified_column_ident(thd, &$1, &$3, &$5)))) MYSQL_YYABORT; } ; @@ -3078,10 +3242,10 @@ optionally_qualified_column_ident: row_field_name: ident_directly_assignable { - if (check_string_char_length(&$1, 0, NAME_CHAR_LEN, - system_charset_info, 1)) + if (unlikely(check_string_char_length(&$1, 0, NAME_CHAR_LEN, + system_charset_info, 1))) my_yyabort_error((ER_TOO_LONG_IDENT, MYF(0), $1.str)); - if (!($$= new (thd->mem_root) Spvar_definition())) + if (unlikely(!($$= new (thd->mem_root) Spvar_definition()))) MYSQL_YYABORT; Lex->init_last_field($$, &$1, thd->variables.collation_database); } @@ -3094,17 +3258,18 @@ row_field_definition: row_field_definition_list: row_field_definition { - if (!($$= new (thd->mem_root) Row_definition_list())) + if (unlikely(!($$= new (thd->mem_root) Row_definition_list())) || + unlikely($$->push_back($1, thd->mem_root))) MYSQL_YYABORT; - $$->push_back($1, thd->mem_root); } | row_field_definition_list ',' row_field_definition { uint unused; - if ($1->find_row_field_by_name(&$3->field_name, &unused)) + if (unlikely($1->find_row_field_by_name(&$3->field_name, &unused))) my_yyabort_error((ER_DUP_FIELDNAME, MYF(0), $3->field_name.str)); $$= $1; - $$->push_back($3, thd->mem_root); + if (unlikely($$->push_back($3, thd->mem_root))) + MYSQL_YYABORT; } ; @@ -3124,8 +3289,9 @@ sp_decl_vars: type_with_opt_collate sp_opt_default { - if (Lex->sp_variable_declarations_finalize(thd, $1, - &Lex->last_field[0], $3)) + if (unlikely(Lex->sp_variable_declarations_finalize(thd, $1, + &Lex->last_field[0], + $3))) MYSQL_YYABORT; $$.init_using_vars($1); } @@ -3133,7 +3299,7 @@ sp_decl_vars: optionally_qualified_column_ident '%' TYPE_SYM sp_opt_default { - if (Lex->sp_variable_declarations_with_ref_finalize(thd, $1, $2, $5)) + if (unlikely(Lex->sp_variable_declarations_with_ref_finalize(thd, $1, $2, $5))) MYSQL_YYABORT; $$.init_using_vars($1); } @@ -3141,7 +3307,7 @@ sp_decl_vars: optionally_qualified_column_ident '%' ROWTYPE_SYM sp_opt_default { - if (Lex->sp_variable_declarations_rowtype_finalize(thd, $1, $2, $5)) + if (unlikely(Lex->sp_variable_declarations_rowtype_finalize(thd, $1, $2, $5))) MYSQL_YYABORT; $$.init_using_vars($1); } @@ -3149,7 +3315,7 @@ sp_decl_vars: ROW_SYM row_type_body sp_opt_default { - if (Lex->sp_variable_declarations_row_finalize(thd, $1, $3, $4)) + if (unlikely(Lex->sp_variable_declarations_row_finalize(thd, $1, $3, $4))) MYSQL_YYABORT; $$.init_using_vars($1); } @@ -3159,7 +3325,7 @@ sp_decl_non_handler: sp_decl_vars | ident_directly_assignable CONDITION_SYM FOR_SYM sp_cond { - if (Lex->spcont->declare_condition(thd, &$1, $4)) + if (unlikely(Lex->spcont->declare_condition(thd, &$1, $4))) MYSQL_YYABORT; $$.vars= $$.hndlrs= $$.curs= 0; $$.conds= 1; @@ -3168,8 +3334,8 @@ sp_decl_non_handler: { sp_condition_value *spcond= new (thd->mem_root) sp_condition_value_user_defined(); - if (!spcond || - Lex->spcont->declare_condition(thd, &$1, spcond)) + if (unlikely(!spcond) || + unlikely(Lex->spcont->declare_condition(thd, &$1, spcond))) MYSQL_YYABORT; $$.vars= $$.hndlrs= $$.curs= 0; $$.conds= 1; @@ -3182,9 +3348,9 @@ sp_decl_non_handler: IS sp_cursor_stmt { sp_pcontext *param_ctx= Lex->spcont; - if (Lex->sp_block_finalize(thd)) + if (unlikely(Lex->sp_block_finalize(thd))) MYSQL_YYABORT; - if (Lex->sp_declare_cursor(thd, &$2, $6, param_ctx, false)) + if (unlikely(Lex->sp_declare_cursor(thd, &$2, $6, param_ctx, false))) MYSQL_YYABORT; $$.vars= $$.conds= $$.hndlrs= 0; $$.curs= 1; @@ -3194,12 +3360,12 @@ sp_decl_non_handler: sp_decl_handler: sp_handler_type HANDLER_SYM FOR_SYM { - if (Lex->sp_handler_declaration_init(thd, $1)) + if (unlikely(Lex->sp_handler_declaration_init(thd, $1))) MYSQL_YYABORT; } sp_hcond_list sp_proc_stmt { - if (Lex->sp_handler_declaration_finalize(thd, $1)) + if (unlikely(Lex->sp_handler_declaration_finalize(thd, $1))) MYSQL_YYABORT; $$.vars= $$.conds= $$.curs= 0; $$.hndlrs= 1; @@ -3215,7 +3381,8 @@ opt_parenthesized_cursor_formal_parameters: sp_cursor_stmt_lex: { DBUG_ASSERT(thd->lex->sphead); - if (!($$= new (thd->mem_root) sp_lex_cursor(thd, thd->lex))) + if (unlikely(!($$= new (thd->mem_root) + sp_lex_cursor(thd, thd->lex)))) MYSQL_YYABORT; } ; @@ -3229,8 +3396,8 @@ sp_cursor_stmt: select { DBUG_ASSERT(Lex == $1); - if ($1->stmt_finalize(thd) || - $1->sphead->restore_lex(thd)) + if (unlikely($1->stmt_finalize(thd)) || + unlikely($1->sphead->restore_lex(thd))) MYSQL_YYABORT; $$= $1; } @@ -3256,7 +3423,7 @@ sp_hcond_element: sp_head *sp= lex->sphead; sp_pcontext *ctx= lex->spcont->parent_context(); - if (ctx->check_duplicate_handler($1)) + if (unlikely(ctx->check_duplicate_handler($1))) my_yyabort_error((ER_SP_DUP_HANDLER, MYF(0))); sp_instr_hpush_jump *i= (sp_instr_hpush_jump *)sp->last_instruction(); @@ -3267,10 +3434,10 @@ sp_hcond_element: sp_cond: ulong_num { /* mysql errno */ - if ($1 == 0) + if (unlikely($1 == 0)) my_yyabort_error((ER_WRONG_VALUE, MYF(0), "CONDITION", "0")); $$= new (thd->mem_root) sp_condition_value($1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | sqlstate @@ -3287,10 +3454,11 @@ sqlstate: allowed to SIGNAL, or declare a handler for the completion condition. */ - if (!is_sqlstate_valid(&$3) || is_sqlstate_completion($3.str)) + if (unlikely(!is_sqlstate_valid(&$3) || + is_sqlstate_completion($3.str))) my_yyabort_error((ER_SP_BAD_SQLSTATE, MYF(0), $3.str)); $$= new (thd->mem_root) sp_condition_value($3.str); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -3308,25 +3476,25 @@ sp_hcond: | ident /* CONDITION name */ { $$= Lex->spcont->find_declared_or_predefined_condition(&$1); - if ($$ == NULL) + if (unlikely($$ == NULL)) my_yyabort_error((ER_SP_COND_MISMATCH, MYF(0), $1.str)); } | SQLWARNING_SYM /* SQLSTATEs 01??? */ { $$= new (thd->mem_root) sp_condition_value(sp_condition_value::WARNING); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | not FOUND_SYM /* SQLSTATEs 02??? */ { $$= new (thd->mem_root) sp_condition_value(sp_condition_value::NOT_FOUND); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | OTHERS_SYM /* All other SQLSTATEs */ { $$= new (thd->mem_root) sp_condition_value(sp_condition_value::EXCEPTION); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -3335,12 +3503,12 @@ sp_hcond: raise_stmt: RAISE_SYM opt_set_signal_information { - if (Lex->add_resignal_statement(thd, NULL)) + if (unlikely(Lex->add_resignal_statement(thd, NULL))) MYSQL_YYABORT; } | RAISE_SYM signal_value opt_set_signal_information { - if (Lex->add_signal_statement(thd, $2)) + if (unlikely(Lex->add_signal_statement(thd, $2))) MYSQL_YYABORT; } ; @@ -3348,7 +3516,7 @@ raise_stmt: signal_stmt: SIGNAL_SYM signal_value opt_set_signal_information { - if (Lex->add_signal_statement(thd, $2)) + if (unlikely(Lex->add_signal_statement(thd, $2))) MYSQL_YYABORT; } ; @@ -3360,12 +3528,12 @@ signal_value: sp_condition_value *cond; /* SIGNAL foo cannot be used outside of stored programs */ - if (lex->spcont == NULL) + if (unlikely(lex->spcont == NULL)) my_yyabort_error((ER_SP_COND_MISMATCH, MYF(0), $1.str)); cond= lex->spcont->find_declared_or_predefined_condition(&$1); - if (cond == NULL) + if (unlikely(cond == NULL)) my_yyabort_error((ER_SP_COND_MISMATCH, MYF(0), $1.str)); - if (!cond->has_sql_state()) + if (unlikely(!cond->has_sql_state())) my_yyabort_error((ER_SIGNAL_BAD_CONDITION_TYPE, MYF(0))); $$= cond; } @@ -3403,7 +3571,7 @@ signal_information_item_list: Set_signal_information *info; info= &thd->m_parser_state->m_yacc.m_set_signal_info; int index= (int) $3; - if (info->m_item[index] != NULL) + if (unlikely(info->m_item[index] != NULL)) my_yyabort_error((ER_DUP_SIGNAL_SET, MYF(0), Diag_condition_item_names[index].str)); info->m_item[index]= $5; @@ -3421,7 +3589,7 @@ signal_allowed_expr: if ($1->type() == Item::FUNC_ITEM) { Item_func *item= (Item_func*) $1; - if (item->functype() == Item_func::SUSERVAR_FUNC) + if (unlikely(item->functype() == Item_func::SUSERVAR_FUNC)) { /* Don't allow the following syntax: @@ -3469,7 +3637,7 @@ signal_condition_information_item_name: resignal_stmt: RESIGNAL_SYM opt_signal_value opt_set_signal_information { - if (Lex->add_resignal_statement(thd, $2)) + if (unlikely(Lex->add_resignal_statement(thd, $2))) MYSQL_YYABORT; } ; @@ -3484,7 +3652,7 @@ get_diagnostics: Lex->sql_command= SQLCOM_GET_DIAGNOSTICS; Lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_get_diagnostics(info); - if (Lex->m_sql_cmd == NULL) + if (unlikely(Lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -3500,13 +3668,13 @@ diagnostics_information: statement_information { $$= new (thd->mem_root) Statement_information($1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CONDITION_SYM condition_number condition_information { $$= new (thd->mem_root) Condition_information($2, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -3515,12 +3683,13 @@ statement_information: statement_information_item { $$= new (thd->mem_root) List; - if ($$ == NULL || $$->push_back($1, thd->mem_root)) + if (unlikely($$ == NULL) || + unlikely($$->push_back($1, thd->mem_root))) MYSQL_YYABORT; } | statement_information ',' statement_information_item { - if ($1->push_back($3, thd->mem_root)) + if (unlikely($1->push_back($3, thd->mem_root))) MYSQL_YYABORT; $$= $1; } @@ -3530,24 +3699,20 @@ statement_information_item: simple_target_specification '=' statement_information_item_name { $$= new (thd->mem_root) Statement_information_item($3, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } simple_target_specification: - ident + ident_cli { - Lex_input_stream *lip= &thd->m_parser_state->m_lip; - $$= thd->lex->create_item_for_sp_var(&$1, NULL, - lip->get_tok_start(), - lip->get_ptr()); - if ($$ == NULL) + if (unlikely(!($$= thd->lex->create_item_for_sp_var(&$1, NULL)))) MYSQL_YYABORT; } | '@' ident_or_text { $$= new (thd->mem_root) Item_func_get_user_var(thd, &$2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -3572,12 +3737,13 @@ condition_information: condition_information_item { $$= new (thd->mem_root) List; - if ($$ == NULL || $$->push_back($1, thd->mem_root)) + if (unlikely($$ == NULL) || + unlikely($$->push_back($1, thd->mem_root))) MYSQL_YYABORT; } | condition_information ',' condition_information_item { - if ($1->push_back($3, thd->mem_root)) + if (unlikely($1->push_back($3, thd->mem_root))) MYSQL_YYABORT; $$= $1; } @@ -3587,7 +3753,7 @@ condition_information_item: simple_target_specification '=' condition_information_item_name { $$= new (thd->mem_root) Condition_information_item($3, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } @@ -3622,21 +3788,13 @@ condition_information_item_name: sp_decl_ident: IDENT_sys - | sp_decl_ident_keyword + | keyword_sp_decl { - $$.str= thd->strmake($1.str, $1.length); - if ($$.str == NULL) + if (unlikely($$.copy_ident_cli(thd, &$1))) MYSQL_YYABORT; - $$.length= $1.length; } ; -sp_decl_ident_keyword: - keyword_directly_assignable - | keyword_sp_not_data_type - ; - - sp_decl_idents: sp_decl_ident { @@ -3645,7 +3803,7 @@ sp_decl_idents: LEX *lex= Lex; sp_pcontext *spc= lex->spcont; - if (spc->find_variable(&$1, TRUE)) + if (unlikely(spc->find_variable(&$1, TRUE))) my_yyabort_error((ER_SP_DUP_VAR, MYF(0), $1.str)); spc->add_variable(thd, &$1); $$= 1; @@ -3657,7 +3815,7 @@ sp_decl_idents: LEX *lex= Lex; sp_pcontext *spc= lex->spcont; - if (spc->find_variable(&$3, TRUE)) + if (unlikely(spc->find_variable(&$3, TRUE))) my_yyabort_error((ER_SP_DUP_VAR, MYF(0), $3.str)); spc->add_variable(thd, &$3); $$= $1 + 1; @@ -3705,7 +3863,7 @@ sp_proc_stmt_compound_ok: sp_proc_stmt_if: IF_SYM { - if (Lex->maybe_start_compound_statement(thd)) + if (unlikely(Lex->maybe_start_compound_statement(thd))) MYSQL_YYABORT; Lex->sphead->new_cont_backpatch(NULL); } @@ -3718,13 +3876,13 @@ sp_statement: | ident_directly_assignable { // Direct procedure call (without the CALL keyword) - if (Lex->call_statement_start(thd, &$1)) + if (unlikely(Lex->call_statement_start(thd, &$1))) MYSQL_YYABORT; } opt_sp_cparam_list | ident_directly_assignable '.' ident { - if (Lex->call_statement_start(thd, &$1, &$3)) + if (unlikely(Lex->call_statement_start(thd, &$1, &$3))) MYSQL_YYABORT; } opt_sp_cparam_list @@ -3746,7 +3904,7 @@ sp_proc_stmt_statement: sp->m_flags|= sp_get_flags_for_command(lex); /* "USE db" doesn't work in a procedure */ - if (lex->sql_command == SQLCOM_CHANGE_DB) + if (unlikely(lex->sql_command == SQLCOM_CHANGE_DB)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "USE")); /* Don't add an instruction for SET statements, since all @@ -3759,7 +3917,7 @@ sp_proc_stmt_statement: { sp_instr_stmt *i=new (thd->mem_root) sp_instr_stmt(sp->instructions(), lex->spcont, lex); - if (i == NULL) + if (unlikely(i == NULL)) MYSQL_YYABORT; /* @@ -3771,13 +3929,13 @@ sp_proc_stmt_statement: i->m_query.length= lip->get_ptr() - sp->m_tmp_query; else i->m_query.length= lip->get_tok_start() - sp->m_tmp_query;; - if (!(i->m_query.str= strmake_root(thd->mem_root, - sp->m_tmp_query, - i->m_query.length)) || - sp->add_instr(i)) + if (unlikely(!(i->m_query.str= strmake_root(thd->mem_root, + sp->m_tmp_query, + i->m_query.length))) || + unlikely(sp->add_instr(i))) MYSQL_YYABORT; } - if (sp->restore_lex(thd)) + if (unlikely(sp->restore_lex(thd))) MYSQL_YYABORT; } ; @@ -3789,16 +3947,17 @@ sp_proc_stmt_return: { LEX *lex= Lex; sp_head *sp= lex->sphead; - if (sp->m_handler->add_instr_freturn(thd, sp, lex->spcont, - $3, lex) || - sp->restore_lex(thd)) + if (unlikely(sp->m_handler->add_instr_freturn(thd, sp, lex->spcont, + $3, lex)) || + unlikely(sp->restore_lex(thd))) MYSQL_YYABORT; } | RETURN_SYM { LEX *lex= Lex; sp_head *sp= lex->sphead; - if (sp->m_handler->add_instr_preturn(thd, sp, lex->spcont)) + if (unlikely(sp->m_handler->add_instr_preturn(thd, sp, + lex->spcont))) MYSQL_YYABORT; } ; @@ -3810,24 +3969,24 @@ reset_lex_expr: sp_proc_stmt_exit: EXIT_SYM { - if (Lex->sp_exit_statement(thd, NULL)) + if (unlikely(Lex->sp_exit_statement(thd, NULL))) MYSQL_YYABORT; } | EXIT_SYM label_ident { - if (Lex->sp_exit_statement(thd, &$2, NULL)) + if (unlikely(Lex->sp_exit_statement(thd, &$2, NULL))) MYSQL_YYABORT; } | EXIT_SYM WHEN_SYM reset_lex_expr { - if (Lex->sp_exit_statement(thd, $3) || - Lex->sphead->restore_lex(thd)) + if (unlikely(Lex->sp_exit_statement(thd, $3)) || + unlikely(Lex->sphead->restore_lex(thd))) MYSQL_YYABORT; } | EXIT_SYM label_ident WHEN_SYM reset_lex_expr { - if (Lex->sp_exit_statement(thd, &$2, $4) || - Lex->sphead->restore_lex(thd)) + if (unlikely(Lex->sp_exit_statement(thd, &$2, $4)) || + unlikely(Lex->sphead->restore_lex(thd))) MYSQL_YYABORT; } ; @@ -3835,24 +3994,24 @@ sp_proc_stmt_exit: sp_proc_stmt_continue: CONTINUE_SYM { - if (Lex->sp_continue_statement(thd, NULL)) + if (unlikely(Lex->sp_continue_statement(thd, NULL))) MYSQL_YYABORT; } | CONTINUE_SYM label_ident { - if (Lex->sp_continue_statement(thd, &$2, NULL)) + if (unlikely(Lex->sp_continue_statement(thd, &$2, NULL))) MYSQL_YYABORT; } | CONTINUE_SYM WHEN_SYM reset_lex_expr { - if (Lex->sp_continue_statement(thd, $3) || - Lex->sphead->restore_lex(thd)) + if (unlikely(Lex->sp_continue_statement(thd, $3)) || + unlikely(Lex->sphead->restore_lex(thd))) MYSQL_YYABORT; } | CONTINUE_SYM label_ident WHEN_SYM reset_lex_expr { - if (Lex->sp_continue_statement(thd, &$2, $4) || - Lex->sphead->restore_lex(thd)) + if (unlikely(Lex->sp_continue_statement(thd, &$2, $4)) || + unlikely(Lex->sphead->restore_lex(thd))) MYSQL_YYABORT; } ; @@ -3861,7 +4020,7 @@ sp_proc_stmt_continue: sp_proc_stmt_leave: LEAVE_SYM label_ident { - if (Lex->sp_leave_statement(thd, &$2)) + if (unlikely(Lex->sp_leave_statement(thd, &$2))) MYSQL_YYABORT; } ; @@ -3869,7 +4028,7 @@ sp_proc_stmt_leave: sp_proc_stmt_iterate: ITERATE_SYM label_ident { - if (Lex->sp_iterate_statement(thd, &$2)) + if (unlikely(Lex->sp_iterate_statement(thd, &$2))) MYSQL_YYABORT; } ; @@ -3877,7 +4036,7 @@ sp_proc_stmt_iterate: sp_proc_stmt_goto: GOTO_SYM label_ident { - if (Lex->sp_goto_statement(thd, &$2)) + if (unlikely(Lex->sp_goto_statement(thd, &$2))) MYSQL_YYABORT; } ; @@ -3892,7 +4051,8 @@ remember_lex: assignment_source_lex: { DBUG_ASSERT(Lex->sphead); - if (!($$= new (thd->mem_root) sp_assignment_lex(thd, thd->lex))) + if (unlikely(!($$= new (thd->mem_root) + sp_assignment_lex(thd, thd->lex)))) MYSQL_YYABORT; } ; @@ -3910,7 +4070,7 @@ assignment_source_expr: $$->sp_lex_in_use= true; $$->set_item_and_free_list($3, thd->free_list); thd->free_list= NULL; - if ($$->sphead->restore_lex(thd)) + if (unlikely($$->sphead->restore_lex(thd))) MYSQL_YYABORT; } ; @@ -3926,7 +4086,7 @@ for_loop_bound_expr: $$= $1; $$->sp_lex_in_use= true; $$->set_item_and_free_list($3, NULL); - if ($$->sphead->restore_lex(thd)) + if (unlikely($$->sphead->restore_lex(thd))) MYSQL_YYABORT; } ; @@ -3934,7 +4094,7 @@ for_loop_bound_expr: cursor_actual_parameters: assignment_source_expr { - if (!($$= new (thd->mem_root) List)) + if (unlikely(!($$= new (thd->mem_root) List))) MYSQL_YYABORT; $$->push_back($1, thd->mem_root); } @@ -3953,7 +4113,7 @@ opt_parenthesized_cursor_actual_parameters: sp_proc_stmt_open: OPEN_SYM ident opt_parenthesized_cursor_actual_parameters { - if (Lex->sp_open_cursor(thd, &$2, $3)) + if (unlikely(Lex->sp_open_cursor(thd, &$2, $3))) MYSQL_YYABORT; } ; @@ -3961,17 +4121,17 @@ sp_proc_stmt_open: sp_proc_stmt_fetch_head: FETCH_SYM ident INTO { - if (Lex->sp_add_cfetch(thd, &$2)) + if (unlikely(Lex->sp_add_cfetch(thd, &$2))) MYSQL_YYABORT; } | FETCH_SYM FROM ident INTO { - if (Lex->sp_add_cfetch(thd, &$3)) + if (unlikely(Lex->sp_add_cfetch(thd, &$3))) MYSQL_YYABORT; } | FETCH_SYM NEXT_SYM FROM ident INTO { - if (Lex->sp_add_cfetch(thd, &$4)) + if (unlikely(Lex->sp_add_cfetch(thd, &$4))) MYSQL_YYABORT; } ; @@ -3988,12 +4148,12 @@ sp_proc_stmt_close: uint offset; sp_instr_cclose *i; - if (! lex->spcont->find_cursor(&$2, &offset, false)) + if (unlikely(!lex->spcont->find_cursor(&$2, &offset, false))) my_yyabort_error((ER_SP_CURSOR_MISMATCH, MYF(0), $2.str)); i= new (thd->mem_root) sp_instr_cclose(sp->instructions(), lex->spcont, offset); - if (i == NULL || - sp->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(sp->add_instr(i))) MYSQL_YYABORT; } ; @@ -4006,7 +4166,7 @@ sp_fetch_list: sp_pcontext *spc= lex->spcont; sp_variable *spv; - if (!spc || !(spv = spc->find_variable(&$1, false))) + if (unlikely(!spc || !(spv = spc->find_variable(&$1, false)))) my_yyabort_error((ER_SP_UNDECLARED_VAR, MYF(0), $1.str)); /* An SP local variable */ @@ -4020,7 +4180,7 @@ sp_fetch_list: sp_pcontext *spc= lex->spcont; sp_variable *spv; - if (!spc || !(spv = spc->find_variable(&$3, false))) + if (unlikely(!spc || !(spv = spc->find_variable(&$3, false)))) my_yyabort_error((ER_SP_UNDECLARED_VAR, MYF(0), $3.str)); /* An SP local variable */ @@ -4039,12 +4199,12 @@ sp_if: uint ip= sp->instructions(); sp_instr_jump_if_not *i= new (thd->mem_root) sp_instr_jump_if_not(ip, ctx, $2, lex); - if (i == NULL || - sp->push_backpatch(thd, i, ctx->push_label(thd, &empty_clex_str, 0)) || - sp->add_cont_backpatch(i) || - sp->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(sp->push_backpatch(thd, i, ctx->push_label(thd, &empty_clex_str, 0))) || + unlikely(sp->add_cont_backpatch(i)) || + unlikely(sp->add_instr(i))) MYSQL_YYABORT; - if (sp->restore_lex(thd)) + if (unlikely(sp->restore_lex(thd))) MYSQL_YYABORT; } sp_proc_stmts1_implicit_block @@ -4053,8 +4213,8 @@ sp_if: sp_pcontext *ctx= Lex->spcont; uint ip= sp->instructions(); sp_instr_jump *i= new (thd->mem_root) sp_instr_jump(ip, ctx); - if (i == NULL || - sp->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(sp->add_instr(i))) MYSQL_YYABORT; sp->backpatch(ctx->pop_label()); sp->push_backpatch(thd, i, ctx->push_label(thd, &empty_clex_str, 0)); @@ -4076,7 +4236,7 @@ sp_elseifs: case_stmt_specification: CASE_SYM { - if (Lex->maybe_start_compound_statement(thd)) + if (unlikely(Lex->maybe_start_compound_statement(thd))) MYSQL_YYABORT; /** @@ -4147,10 +4307,9 @@ case_stmt_body: { Lex->sphead->reset_lex(thd); /* For expr $2 */ } expr { - if (Lex->case_stmt_action_expr($2)) + if (unlikely(Lex->case_stmt_action_expr($2))) MYSQL_YYABORT; - - if (Lex->sphead->restore_lex(thd)) + if (unlikely(Lex->sphead->restore_lex(thd))) MYSQL_YYABORT; } simple_when_clause_list @@ -4179,16 +4338,16 @@ simple_when_clause: /* Simple case: = */ LEX *lex= Lex; - if (lex->case_stmt_action_when($3, true)) + if (unlikely(lex->case_stmt_action_when($3, true))) MYSQL_YYABORT; /* For expr $3 */ - if (lex->sphead->restore_lex(thd)) + if (unlikely(lex->sphead->restore_lex(thd))) MYSQL_YYABORT; } THEN_SYM sp_proc_stmts1_implicit_block { - if (Lex->case_stmt_action_then()) + if (unlikely(Lex->case_stmt_action_then())) MYSQL_YYABORT; } ; @@ -4201,16 +4360,16 @@ searched_when_clause: expr { LEX *lex= Lex; - if (lex->case_stmt_action_when($3, false)) + if (unlikely(lex->case_stmt_action_when($3, false))) MYSQL_YYABORT; /* For expr $3 */ - if (lex->sphead->restore_lex(thd)) + if (unlikely(lex->sphead->restore_lex(thd))) MYSQL_YYABORT; } THEN_SYM sp_proc_stmts1_implicit_block { - if (Lex->case_stmt_action_then()) + if (unlikely(Lex->case_stmt_action_then())) MYSQL_YYABORT; } ; @@ -4223,8 +4382,8 @@ else_clause_opt: uint ip= sp->instructions(); sp_instr_error *i= new (thd->mem_root) sp_instr_error(ip, lex->spcont, ER_SP_CASE_NOT_FOUND); - if (i == NULL || - sp->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(sp->add_instr(i))) MYSQL_YYABORT; } | ELSE sp_proc_stmts1_implicit_block @@ -4238,7 +4397,7 @@ sp_opt_label: sp_block_label: labels_declaration_oracle { - if (Lex->spcont->block_label_declare(&$1)) + if (unlikely(Lex->spcont->block_label_declare(&$1))) MYSQL_YYABORT; $$= $1; } @@ -4249,14 +4408,14 @@ sp_labeled_block: BEGIN_SYM { Lex->sp_block_init(thd, &$1); - if (Lex->sp_block_with_exceptions_finalize_declarations(thd)) + if (unlikely(Lex->sp_block_with_exceptions_finalize_declarations(thd))) MYSQL_YYABORT; } sp_block_statements_and_exceptions END sp_opt_label { - if (Lex->sp_block_finalize(thd, Lex_spblock($4), &$6)) + if (unlikely(Lex->sp_block_finalize(thd, Lex_spblock($4), &$6))) MYSQL_YYABORT; } | sp_block_label @@ -4266,7 +4425,7 @@ sp_labeled_block: } sp_decl_body_list { - if (Lex->sp_block_with_exceptions_finalize_declarations(thd)) + if (unlikely(Lex->sp_block_with_exceptions_finalize_declarations(thd))) MYSQL_YYABORT; } BEGIN_SYM @@ -4275,7 +4434,7 @@ sp_labeled_block: sp_opt_label { $4.hndlrs+= $7.hndlrs; - if (Lex->sp_block_finalize(thd, $4, &$9)) + if (unlikely(Lex->sp_block_finalize(thd, $4, &$9))) MYSQL_YYABORT; } ; @@ -4288,27 +4447,27 @@ opt_not_atomic: sp_unlabeled_block: BEGIN_SYM opt_not_atomic { - if (Lex->maybe_start_compound_statement(thd)) + if (unlikely(Lex->maybe_start_compound_statement(thd))) MYSQL_YYABORT; Lex->sp_block_init(thd); - if (Lex->sp_block_with_exceptions_finalize_declarations(thd)) + if (unlikely(Lex->sp_block_with_exceptions_finalize_declarations(thd))) MYSQL_YYABORT; } sp_block_statements_and_exceptions END { - if (Lex->sp_block_finalize(thd, Lex_spblock($4))) + if (unlikely(Lex->sp_block_finalize(thd, Lex_spblock($4)))) MYSQL_YYABORT; } | DECLARE_SYM { - if (Lex->maybe_start_compound_statement(thd)) + if (unlikely(Lex->maybe_start_compound_statement(thd))) MYSQL_YYABORT; Lex->sp_block_init(thd); } sp_decl_body_list { - if (Lex->sp_block_with_exceptions_finalize_declarations(thd)) + if (unlikely(Lex->sp_block_with_exceptions_finalize_declarations(thd))) MYSQL_YYABORT; } BEGIN_SYM @@ -4316,7 +4475,7 @@ sp_unlabeled_block: END { $3.hndlrs+= $6.hndlrs; - if (Lex->sp_block_finalize(thd, $3)) + if (unlikely(Lex->sp_block_finalize(thd, $3))) MYSQL_YYABORT; } ; @@ -4331,14 +4490,14 @@ sp_body: } opt_sp_decl_body_list { - if (Lex->sp_block_with_exceptions_finalize_declarations(thd)) + if (unlikely(Lex->sp_block_with_exceptions_finalize_declarations(thd))) MYSQL_YYABORT; } BEGIN_SYM sp_block_statements_and_exceptions { $2.hndlrs+= $5.hndlrs; - if (Lex->sp_block_finalize(thd, $2)) + if (unlikely(Lex->sp_block_finalize(thd, $2))) MYSQL_YYABORT; } END @@ -4348,13 +4507,12 @@ sp_block_statements_and_exceptions: sp_instr_addr sp_proc_stmts { - if (Lex->sp_block_with_exceptions_finalize_executable_section(thd, - $1)) + if (unlikely(Lex->sp_block_with_exceptions_finalize_executable_section(thd, $1))) MYSQL_YYABORT; } opt_exception_clause { - if (Lex->sp_block_with_exceptions_finalize_exceptions(thd, $1, $4)) + if (unlikely(Lex->sp_block_with_exceptions_finalize_exceptions(thd, $1, $4))) MYSQL_YYABORT; $$.init($4); } @@ -4373,14 +4531,14 @@ exception_handlers: exception_handler: WHEN_SYM { - if (Lex->sp_handler_declaration_init(thd, sp_handler::EXIT)) + if (unlikely(Lex->sp_handler_declaration_init(thd, sp_handler::EXIT))) MYSQL_YYABORT; } sp_hcond_list THEN_SYM sp_proc_stmts1_implicit_block { - if (Lex->sp_handler_declaration_finalize(thd, sp_handler::EXIT)) + if (unlikely(Lex->sp_handler_declaration_finalize(thd, sp_handler::EXIT))) MYSQL_YYABORT; } ; @@ -4394,7 +4552,7 @@ opt_sp_for_loop_direction: sp_for_loop_index_and_bounds: ident_directly_assignable sp_for_loop_bounds { - if (Lex->sp_for_loop_declarations(thd, &$$, &$1, $2)) + if (unlikely(Lex->sp_for_loop_declarations(thd, &$$, &$1, $2))) MYSQL_YYABORT; } ; @@ -4417,7 +4575,8 @@ sp_for_loop_bounds: } | IN_SYM opt_sp_for_loop_direction '(' sp_cursor_stmt ')' { - if (Lex->sp_for_loop_implicit_cursor_statement(thd, &$$, $4)) + if (unlikely(Lex->sp_for_loop_implicit_cursor_statement(thd, &$$, + $4))) MYSQL_YYABORT; } ; @@ -4430,8 +4589,8 @@ loop_body: sp_label *lab= lex->spcont->last_label(); /* Jumping back */ sp_instr_jump *i= new (thd->mem_root) sp_instr_jump(ip, lex->spcont, lab->ip); - if (i == NULL || - lex->sphead->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(lex->sphead->add_instr(i))) MYSQL_YYABORT; } ; @@ -4440,14 +4599,14 @@ while_body: expr LOOP_SYM { LEX *lex= Lex; - if (lex->sp_while_loop_expression(thd, $1)) + if (unlikely(lex->sp_while_loop_expression(thd, $1))) MYSQL_YYABORT; - if (lex->sphead->restore_lex(thd)) + if (unlikely(lex->sphead->restore_lex(thd))) MYSQL_YYABORT; } sp_proc_stmts1 END LOOP_SYM { - if (Lex->sp_while_loop_finalize(thd)) + if (unlikely(Lex->sp_while_loop_finalize(thd))) MYSQL_YYABORT; } ; @@ -4462,10 +4621,10 @@ repeat_body: sp_label *lab= lex->spcont->last_label(); /* Jumping back */ sp_instr_jump_if_not *i= new (thd->mem_root) sp_instr_jump_if_not(ip, lex->spcont, $4, lab->ip, lex); - if (i == NULL || - lex->sphead->add_instr(i)) + if (unlikely(i == NULL) || + unlikely(lex->sphead->add_instr(i))) MYSQL_YYABORT; - if (lex->sphead->restore_lex(thd)) + if (unlikely(lex->sphead->restore_lex(thd))) MYSQL_YYABORT; /* We can shortcut the cont_backpatch here */ i->m_cont_dest= ip+1; @@ -4475,7 +4634,7 @@ repeat_body: pop_sp_loop_label: sp_opt_label { - if (Lex->sp_pop_loop_label(thd, &$1)) + if (unlikely(Lex->sp_pop_loop_label(thd, &$1))) MYSQL_YYABORT; } ; @@ -4483,14 +4642,14 @@ pop_sp_loop_label: sp_labeled_control: labels_declaration_oracle LOOP_SYM { - if (Lex->sp_push_loop_label(thd, &$1)) + if (unlikely(Lex->sp_push_loop_label(thd, &$1))) MYSQL_YYABORT; } loop_body pop_sp_loop_label { } | labels_declaration_oracle WHILE_SYM { - if (Lex->sp_push_loop_label(thd, &$1)) + if (unlikely(Lex->sp_push_loop_label(thd, &$1))) MYSQL_YYABORT; Lex->sphead->reset_lex(thd); } @@ -4503,28 +4662,28 @@ sp_labeled_control: } sp_for_loop_index_and_bounds { - if (Lex->sp_push_loop_label(thd, &$1)) // The inner WHILE block + if (unlikely(Lex->sp_push_loop_label(thd, &$1))) // The inner WHILE block MYSQL_YYABORT; - if (Lex->sp_for_loop_condition_test(thd, $4)) + if (unlikely(Lex->sp_for_loop_condition_test(thd, $4))) MYSQL_YYABORT; } LOOP_SYM sp_proc_stmts1 END LOOP_SYM { - if (Lex->sp_for_loop_finalize(thd, $4)) + if (unlikely(Lex->sp_for_loop_finalize(thd, $4))) MYSQL_YYABORT; } pop_sp_loop_label // The inner WHILE block { Lex_spblock tmp; tmp.curs= MY_TEST($4.m_implicit_cursor); - if (Lex->sp_block_finalize(thd, tmp)) // The outer DECLARE..BEGIN..END + if (unlikely(Lex->sp_block_finalize(thd, tmp))) // The outer DECLARE..BEGIN..END MYSQL_YYABORT; } | labels_declaration_oracle REPEAT_SYM { - if (Lex->sp_push_loop_label(thd, &$1)) + if (unlikely(Lex->sp_push_loop_label(thd, &$1))) MYSQL_YYABORT; } repeat_body pop_sp_loop_label @@ -4534,7 +4693,7 @@ sp_labeled_control: sp_unlabeled_control: LOOP_SYM { - if (Lex->sp_push_loop_empty_label(thd)) + if (unlikely(Lex->sp_push_loop_empty_label(thd))) MYSQL_YYABORT; } loop_body @@ -4543,7 +4702,7 @@ sp_unlabeled_control: } | WHILE_SYM { - if (Lex->sp_push_loop_empty_label(thd)) + if (unlikely(Lex->sp_push_loop_empty_label(thd))) MYSQL_YYABORT; Lex->sphead->reset_lex(thd); } @@ -4554,15 +4713,15 @@ sp_unlabeled_control: | FOR_SYM { // See "The FOR LOOP statement" comments in sql_lex.cc - if (Lex->maybe_start_compound_statement(thd)) + if (unlikely(Lex->maybe_start_compound_statement(thd))) MYSQL_YYABORT; Lex->sp_block_init(thd); // The outer DECLARE..BEGIN..END block } sp_for_loop_index_and_bounds { - if (Lex->sp_push_loop_empty_label(thd)) // The inner WHILE block + if (unlikely(Lex->sp_push_loop_empty_label(thd))) // The inner WHILE block MYSQL_YYABORT; - if (Lex->sp_for_loop_condition_test(thd, $3)) + if (unlikely(Lex->sp_for_loop_condition_test(thd, $3))) MYSQL_YYABORT; } LOOP_SYM @@ -4571,15 +4730,15 @@ sp_unlabeled_control: { Lex_spblock tmp; tmp.curs= MY_TEST($3.m_implicit_cursor); - if (Lex->sp_for_loop_finalize(thd, $3)) + if (unlikely(Lex->sp_for_loop_finalize(thd, $3))) MYSQL_YYABORT; Lex->sp_pop_loop_empty_label(thd); // The inner WHILE block - if (Lex->sp_block_finalize(thd, tmp)) // The outer DECLARE..BEGIN..END + if (unlikely(Lex->sp_block_finalize(thd, tmp))) // The outer DECLARE..BEGIN..END MYSQL_YYABORT; } | REPEAT_SYM { - if (Lex->sp_push_loop_empty_label(thd)) + if (unlikely(Lex->sp_push_loop_empty_label(thd))) MYSQL_YYABORT; } repeat_body @@ -4798,7 +4957,7 @@ tablespace_name: LEX *lex= Lex; lex->alter_tablespace_info= (new (thd->mem_root) st_alter_tablespace()); - if (lex->alter_tablespace_info == NULL) + if (unlikely(lex->alter_tablespace_info == NULL)) MYSQL_YYABORT; lex->alter_tablespace_info->tablespace_name= $1.str; lex->sql_command= SQLCOM_ALTER_TABLESPACE; @@ -4811,7 +4970,7 @@ logfile_group_name: LEX *lex= Lex; lex->alter_tablespace_info= (new (thd->mem_root) st_alter_tablespace()); - if (lex->alter_tablespace_info == NULL) + if (unlikely(lex->alter_tablespace_info == NULL)) MYSQL_YYABORT; lex->alter_tablespace_info->logfile_group_name= $1.str; lex->sql_command= SQLCOM_ALTER_TABLESPACE; @@ -4888,7 +5047,7 @@ opt_ts_nodegroup: NODEGROUP_SYM opt_equal real_ulong_num { LEX *lex= Lex; - if (lex->alter_tablespace_info->nodegroup_id != UNDEF_NODEGROUP) + if (unlikely(lex->alter_tablespace_info->nodegroup_id != UNDEF_NODEGROUP)) my_yyabort_error((ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"NODEGROUP")); lex->alter_tablespace_info->nodegroup_id= $3; } @@ -4898,7 +5057,7 @@ opt_ts_comment: COMMENT_SYM opt_equal TEXT_STRING_sys { LEX *lex= Lex; - if (lex->alter_tablespace_info->ts_comment != NULL) + if (unlikely(lex->alter_tablespace_info->ts_comment != NULL)) my_yyabort_error((ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"COMMENT")); lex->alter_tablespace_info->ts_comment= $3.str; } @@ -4908,7 +5067,7 @@ opt_ts_engine: opt_storage ENGINE_SYM opt_equal storage_engines { LEX *lex= Lex; - if (lex->alter_tablespace_info->storage_engine != NULL) + if (unlikely(lex->alter_tablespace_info->storage_engine != NULL)) my_yyabort_error((ER_FILEGROUP_OPTION_ONLY_ONCE, MYF(0), "STORAGE ENGINE")); lex->alter_tablespace_info->storage_engine= $4; @@ -4929,7 +5088,7 @@ ts_wait: | NO_WAIT_SYM { LEX *lex= Lex; - if (!(lex->alter_tablespace_info->wait_until_completed)) + if (unlikely(!(lex->alter_tablespace_info->wait_until_completed))) my_yyabort_error((ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"NO_WAIT")); lex->alter_tablespace_info->wait_until_completed= FALSE; } @@ -4939,40 +5098,8 @@ size_number: real_ulonglong_num { $$= $1;} | IDENT_sys { - ulonglong number; - uint text_shift_number= 0; - longlong prefix_number; - const char *start_ptr= $1.str; - size_t str_len= $1.length; - const char *end_ptr= start_ptr + str_len; - int error; - prefix_number= my_strtoll10(start_ptr, (char**) &end_ptr, &error); - if ((start_ptr + str_len - 1) == end_ptr) - { - switch (end_ptr[0]) - { - case 'g': - case 'G': - text_shift_number+=10; - /* fall through */ - case 'm': - case 'M': - text_shift_number+=10; - /* fall through */ - case 'k': - case 'K': - text_shift_number+=10; - break; - default: - my_yyabort_error((ER_WRONG_SIZE_NUMBER, MYF(0))); - } - if (prefix_number >> 31) - my_yyabort_error((ER_SIZE_OVERFLOW_ERROR, MYF(0))); - number= prefix_number << text_shift_number; - } - else - my_yyabort_error((ER_WRONG_SIZE_NUMBER, MYF(0))); - $$= number; + if ($1.to_size_number(&$$)) + MYSQL_YYABORT; } ; @@ -5001,7 +5128,7 @@ create_body: Lex->create_info.add(DDL_options_st::OPT_LIKE); TABLE_LIST *src_table= Lex->select_lex.add_table_to_list(thd, $1, NULL, 0, TL_READ, MDL_SHARED_READ); - if (! src_table) + if (unlikely(! src_table)) MYSQL_YYABORT; /* CREATE TABLE ... LIKE is not allowed for views. */ src_table->required_type= TABLE_TYPE_NORMAL; @@ -5015,7 +5142,7 @@ create_like: opt_create_select: /* empty */ {} - | opt_duplicate opt_as create_select_query_expression + | opt_duplicate opt_as create_select_query_expression opt_versioning_option ; create_select_query_expression: @@ -5085,11 +5212,8 @@ partitioning: { LEX *lex= Lex; lex->part_info= new (thd->mem_root) partition_info(); - if (!lex->part_info) - { - mem_alloc_error(sizeof(partition_info)); + if (unlikely(!lex->part_info)) MYSQL_YYABORT; - } if (lex->sql_command == SQLCOM_ALTER_TABLE) { lex->alter_info.partition_flags|= ALTER_PARTITION_INFO; @@ -5103,7 +5227,7 @@ have_partitioning: { #ifdef WITH_PARTITION_STORAGE_ENGINE LEX_CSTRING partition_name={STRING_WITH_LEN("partition")}; - if (!plugin_is_ready(&partition_name, MYSQL_STORAGE_ENGINE_PLUGIN)) + if (unlikely(!plugin_is_ready(&partition_name, MYSQL_STORAGE_ENGINE_PLUGIN))) my_yyabort_error((ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-partition")); #else @@ -5116,12 +5240,12 @@ have_partitioning: partition_entry: PARTITION_SYM { - LEX *lex= Lex; - if (!lex->part_info) + if (unlikely(!Lex->part_info)) { thd->parse_error(ER_PARTITION_ENTRY_ERROR); MYSQL_YYABORT; } + DBUG_ASSERT(Lex->part_info->table); /* We enter here when opening the frm file to translate partition info string into part_info data structure. @@ -5162,6 +5286,12 @@ part_type_def: } | LIST_SYM part_column_list { Lex->part_info->part_type= LIST_PARTITION; } + | SYSTEM_TIME_SYM + { + if (unlikely(Lex->part_info->vers_init_info(thd))) + MYSQL_YYABORT; + } + opt_versioning_rotation ; opt_linear: @@ -5204,12 +5334,10 @@ part_field_item: { partition_info *part_info= Lex->part_info; part_info->num_columns++; - if (part_info->part_field_list.push_back($1.str, thd->mem_root)) - { - mem_alloc_error(1); + if (unlikely(part_info->part_field_list.push_back($1.str, + thd->mem_root))) MYSQL_YYABORT; - } - if (part_info->num_columns > MAX_REF_PARTS) + if (unlikely(part_info->num_columns > MAX_REF_PARTS)) my_yyabort_error((ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0), "list of partition fields")); } @@ -5229,8 +5357,8 @@ part_func: '(' remember_name part_func_expr remember_end ')' { partition_info *part_info= Lex->part_info; - if (part_info->set_part_expr(thd, $2 + 1, $3, $4, FALSE)) - { MYSQL_YYABORT; } + if (unlikely(part_info->set_part_expr(thd, $2 + 1, $3, $4, FALSE))) + MYSQL_YYABORT; part_info->num_columns= 1; part_info->column_list= FALSE; } @@ -5239,8 +5367,8 @@ part_func: sub_part_func: '(' remember_name part_func_expr remember_end ')' { - if (Lex->part_info->set_part_expr(thd, $2 + 1, $3, $4, TRUE)) - { MYSQL_YYABORT; } + if (unlikely(Lex->part_info->set_part_expr(thd, $2 + 1, $3, $4, TRUE))) + MYSQL_YYABORT; } ; @@ -5251,7 +5379,7 @@ opt_num_parts: { uint num_parts= $2; partition_info *part_info= Lex->part_info; - if (num_parts == 0) + if (unlikely(num_parts == 0)) my_yyabort_error((ER_NO_PARTS_ERROR, MYF(0), "partitions")); part_info->num_parts= num_parts; @@ -5283,12 +5411,11 @@ sub_part_field_item: ident { partition_info *part_info= Lex->part_info; - if (part_info->subpart_field_list.push_back($1.str, thd->mem_root)) - { - mem_alloc_error(1); + if (unlikely(part_info->subpart_field_list.push_back($1.str, + thd->mem_root))) MYSQL_YYABORT; - } - if (part_info->subpart_field_list.elements > MAX_REF_PARTS) + + if (unlikely(part_info->subpart_field_list.elements > MAX_REF_PARTS)) my_yyabort_error((ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0), "list of subpartition fields")); } @@ -5297,7 +5424,7 @@ sub_part_field_item: part_func_expr: bit_expr { - if (!Lex->safe_to_cache_query) + if (unlikely(!Lex->safe_to_cache_query)) { thd->parse_error(ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR); MYSQL_YYABORT; @@ -5312,7 +5439,7 @@ opt_num_subparts: { uint num_parts= $2; LEX *lex= Lex; - if (num_parts == 0) + if (unlikely(num_parts == 0)) my_yyabort_error((ER_NO_PARTS_ERROR, MYF(0), "subpartitions")); lex->part_info->num_subparts= num_parts; lex->part_info->use_default_num_subpartitions= FALSE; @@ -5323,10 +5450,10 @@ part_defs: /* empty */ { partition_info *part_info= Lex->part_info; - if (part_info->part_type == RANGE_PARTITION) + if (unlikely(part_info->part_type == RANGE_PARTITION)) my_yyabort_error((ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), "RANGE")); - if (part_info->part_type == LIST_PARTITION) + if (unlikely(part_info->part_type == LIST_PARTITION)) my_yyabort_error((ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), "LIST")); } @@ -5336,8 +5463,8 @@ part_defs: uint count_curr_parts= part_info->partitions.elements; if (part_info->num_parts != 0) { - if (part_info->num_parts != - count_curr_parts) + if (unlikely(part_info->num_parts != + count_curr_parts)) { thd->parse_error(ER_PARTITION_WRONG_NO_PART_ERROR); MYSQL_YYABORT; @@ -5362,13 +5489,12 @@ part_definition: partition_info *part_info= Lex->part_info; partition_element *p_elem= new (thd->mem_root) partition_element(); - if (!p_elem || - part_info->partitions.push_back(p_elem, thd->mem_root)) - { - mem_alloc_error(sizeof(partition_element)); + if (unlikely(!p_elem) || + unlikely(part_info->partitions.push_back(p_elem, thd->mem_root))) MYSQL_YYABORT; - } + p_elem->part_state= PART_NORMAL; + p_elem->id= part_info->partitions.elements - 1; part_info->curr_part_elem= p_elem; part_info->current_partition= p_elem; part_info->use_default_partitions= FALSE; @@ -5386,7 +5512,7 @@ part_name: { partition_info *part_info= Lex->part_info; partition_element *p_elem= part_info->curr_part_elem; - if (check_ident_length(&$1)) + if (unlikely(check_ident_length(&$1))) MYSQL_YYABORT; p_elem->partition_name= $1.str; } @@ -5399,12 +5525,12 @@ opt_part_values: partition_info *part_info= lex->part_info; if (! lex->is_partition_management()) { - if (part_info->part_type == RANGE_PARTITION) - my_yyabort_error((ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), - "RANGE", "LESS THAN")); - if (part_info->part_type == LIST_PARTITION) - my_yyabort_error((ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), - "LIST", "IN")); + if (unlikely(part_info->error_if_requires_values())) + MYSQL_YYABORT; + if (unlikely(part_info->part_type == VERSIONING_PARTITION)) + my_yyabort_error((ER_VERS_WRONG_PARTS, MYF(0), + lex->create_last_non_select_table-> + table_name.str)); } else part_info->part_type= HASH_PARTITION; @@ -5415,7 +5541,7 @@ opt_part_values: partition_info *part_info= lex->part_info; if (! lex->is_partition_management()) { - if (part_info->part_type != RANGE_PARTITION) + if (unlikely(part_info->part_type != RANGE_PARTITION)) my_yyabort_error((ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), "RANGE", "LESS THAN")); } @@ -5429,7 +5555,7 @@ opt_part_values: partition_info *part_info= lex->part_info; if (! lex->is_partition_management()) { - if (part_info->part_type != LIST_PARTITION) + if (unlikely(part_info->part_type != LIST_PARTITION)) my_yyabort_error((ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), "LIST", "IN")); } @@ -5437,26 +5563,32 @@ opt_part_values: part_info->part_type= LIST_PARTITION; } part_values_in {} + | CURRENT_SYM + { + if (Lex->part_values_current(thd)) + MYSQL_YYABORT; + } + | HISTORY_SYM + { + if (Lex->part_values_history(thd)) + MYSQL_YYABORT; + } | DEFAULT { LEX *lex= Lex; partition_info *part_info= lex->part_info; if (! lex->is_partition_management()) { - if (part_info->part_type != LIST_PARTITION) + if (unlikely(part_info->part_type != LIST_PARTITION)) my_yyabort_error((ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), "LIST", "DEFAULT")); } else part_info->part_type= LIST_PARTITION; - if (part_info->init_column_part(thd)) - { + if (unlikely(part_info->init_column_part(thd))) MYSQL_YYABORT; - } - if (part_info->add_max_value(thd)) - { + if (unlikely(part_info->add_max_value(thd))) MYSQL_YYABORT; - } } ; @@ -5465,8 +5597,8 @@ part_func_max: { partition_info *part_info= Lex->part_info; - if (part_info->num_columns && - part_info->num_columns != 1U) + if (unlikely(part_info->num_columns && + part_info->num_columns != 1U)) { part_info->print_debug("Kilroy II", NULL); thd->parse_error(ER_PARTITION_COLUMN_LIST_ERROR); @@ -5474,14 +5606,10 @@ part_func_max: } else part_info->num_columns= 1U; - if (part_info->init_column_part(thd)) - { + if (unlikely(part_info->init_column_part(thd))) MYSQL_YYABORT; - } - if (part_info->add_max_value(thd)) - { + if (unlikely(part_info->add_max_value(thd))) MYSQL_YYABORT; - } } | part_value_item {} ; @@ -5495,9 +5623,9 @@ part_values_in: if (part_info->num_columns != 1U) { - if (!lex->is_partition_management() || - part_info->num_columns == 0 || - part_info->num_columns > MAX_REF_PARTS) + if (unlikely(!lex->is_partition_management() || + part_info->num_columns == 0 || + part_info->num_columns > MAX_REF_PARTS)) { part_info->print_debug("Kilroy III", NULL); thd->parse_error(ER_PARTITION_COLUMN_LIST_ERROR); @@ -5510,16 +5638,14 @@ part_values_in: we ADD or REORGANIZE partitions. Also can only happen for LIST partitions. */ - if (part_info->reorganize_into_single_field_col_val(thd)) - { + if (unlikely(part_info->reorganize_into_single_field_col_val(thd))) MYSQL_YYABORT; - } } } | '(' part_value_list ')' { partition_info *part_info= Lex->part_info; - if (part_info->num_columns < 2U) + if (unlikely(part_info->num_columns < 2U)) { thd->parse_error(ER_ROW_SINGLE_PARTITION_FIELD_ERROR); MYSQL_YYABORT; @@ -5538,12 +5664,10 @@ part_value_item: partition_info *part_info= Lex->part_info; part_info->print_debug("( part_value_item", NULL); /* Initialisation code needed for each list of value expressions */ - if (!(part_info->part_type == LIST_PARTITION && - part_info->num_columns == 1U) && - part_info->init_column_part(thd)) - { + if (unlikely(!(part_info->part_type == LIST_PARTITION && + part_info->num_columns == 1U) && + part_info->init_column_part(thd))) MYSQL_YYABORT; - } } part_value_item_list {} ')' @@ -5552,7 +5676,7 @@ part_value_item: part_info->print_debug(") part_value_item", NULL); if (part_info->num_columns == 0) part_info->num_columns= part_info->curr_list_object; - if (part_info->num_columns != part_info->curr_list_object) + if (unlikely(part_info->num_columns != part_info->curr_list_object)) { /* All value items lists must be of equal length, in some cases @@ -5578,15 +5702,13 @@ part_value_expr_item: MAXVALUE_SYM { partition_info *part_info= Lex->part_info; - if (part_info->part_type == LIST_PARTITION) + if (unlikely(part_info->part_type == LIST_PARTITION)) { thd->parse_error(ER_MAXVALUE_IN_VALUES_IN); MYSQL_YYABORT; } - if (part_info->add_max_value(thd)) - { + if (unlikely(part_info->add_max_value(thd))) MYSQL_YYABORT; - } } | bit_expr { @@ -5594,15 +5716,13 @@ part_value_expr_item: partition_info *part_info= lex->part_info; Item *part_expr= $1; - if (!lex->safe_to_cache_query) + if (unlikely(!lex->safe_to_cache_query)) { thd->parse_error(ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR); MYSQL_YYABORT; } - if (part_info->add_column_list_value(thd, part_expr)) - { + if (unlikely(part_info->add_column_list_value(thd, part_expr))) MYSQL_YYABORT; - } } ; @@ -5611,8 +5731,8 @@ opt_sub_partition: /* empty */ { partition_info *part_info= Lex->part_info; - if (part_info->num_subparts != 0 && - !part_info->use_default_subpartitions) + if (unlikely(part_info->num_subparts != 0 && + !part_info->use_default_subpartitions)) { /* We come here when we have defined subpartitions on the first @@ -5627,8 +5747,8 @@ opt_sub_partition: partition_info *part_info= Lex->part_info; if (part_info->num_subparts != 0) { - if (part_info->num_subparts != - part_info->count_curr_subparts) + if (unlikely(part_info->num_subparts != + part_info->count_curr_subparts)) { thd->parse_error(ER_PARTITION_WRONG_NO_SUBPART_ERROR); MYSQL_YYABORT; @@ -5636,7 +5756,7 @@ opt_sub_partition: } else if (part_info->count_curr_subparts > 0) { - if (part_info->partitions.elements > 1) + if (unlikely(part_info->partitions.elements > 1)) { thd->parse_error(ER_PARTITION_WRONG_NO_SUBPART_ERROR); MYSQL_YYABORT; @@ -5659,8 +5779,8 @@ sub_part_definition: partition_element *curr_part= part_info->current_partition; partition_element *sub_p_elem= new (thd->mem_root) partition_element(curr_part); - if (part_info->use_default_subpartitions && - part_info->partitions.elements >= 2) + if (unlikely(part_info->use_default_subpartitions && + part_info->partitions.elements >= 2)) { /* create table t1 (a int) @@ -5676,12 +5796,11 @@ sub_part_definition: thd->parse_error(ER_PARTITION_WRONG_NO_SUBPART_ERROR); MYSQL_YYABORT; } - if (!sub_p_elem || - curr_part->subpartitions.push_back(sub_p_elem, thd->mem_root)) - { - mem_alloc_error(sizeof(partition_element)); + if (unlikely(!sub_p_elem) || + unlikely(curr_part->subpartitions.push_back(sub_p_elem, thd->mem_root))) MYSQL_YYABORT; - } + + sub_p_elem->id= curr_part->subpartitions.elements - 1; part_info->curr_part_elem= sub_p_elem; part_info->use_default_subpartitions= FALSE; part_info->use_default_num_subpartitions= FALSE; @@ -5693,7 +5812,7 @@ sub_part_definition: sub_name: ident_or_text { - if (check_ident_length(&$1)) + if (unlikely(check_ident_length(&$1))) MYSQL_YYABORT; Lex->part_info->curr_part_elem->partition_name= $1.str; } @@ -5738,6 +5857,50 @@ opt_part_option: { Lex->part_info->curr_part_elem->part_comment= $3.str; } ; +opt_versioning_rotation: + /* empty */ {} + | INTERVAL_SYM expr interval opt_versioning_interval_start + { + partition_info *part_info= Lex->part_info; + if (unlikely(part_info->vers_set_interval($2, $3, $4))) + { + my_error(ER_PART_WRONG_VALUE, MYF(0), + Lex->create_last_non_select_table->table_name.str, + "INTERVAL"); + MYSQL_YYABORT; + } + } + | LIMIT ulonglong_num + { + partition_info *part_info= Lex->part_info; + if (unlikely(part_info->vers_set_limit($2))) + { + my_error(ER_PART_WRONG_VALUE, MYF(0), + Lex->create_last_non_select_table->table_name.str, + "LIMIT"); + MYSQL_YYABORT; + } + } + ; + + +opt_versioning_interval_start: + /* empty */ + { + $$= thd->query_start(); + } + | STARTS_SYM ulong_num + { + /* only allowed from mysql_unpack_partition() */ + if (unlikely(!Lex->part_info->table)) + { + thd->parse_error(ER_SYNTAX_ERROR, $1.pos()); + MYSQL_YYABORT; + } + $$= (ulong)$2; + } + ; + /* End of partition parser part */ @@ -5969,7 +6132,7 @@ create_table_option: larger values. 65535 pages, 16kb each means to sample 1GB, which is impractical. If at some point this needs to be extended, then we can store the higher bits from stats_sample_pages in .frm too. */ - if ($3 == 0 || $3 > 0xffff) + if (unlikely($3 == 0 || $3 > 0xffff)) { thd->parse_error(); MYSQL_YYABORT; @@ -6074,43 +6237,70 @@ create_table_option: } | IDENT_sys equal TEXT_STRING_sys { - if ($3.length > ENGINE_OPTION_MAX_LENGTH) + if (unlikely($3.length > ENGINE_OPTION_MAX_LENGTH)) my_yyabort_error((ER_VALUE_TOO_LONG, MYF(0), $1.str)); - new (thd->mem_root) - engine_option_value($1, $3, true, &Lex->create_info.option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, $3, true, + &Lex->create_info.option_list, + &Lex->option_list_last); } | IDENT_sys equal ident { - if ($3.length > ENGINE_OPTION_MAX_LENGTH) + if (unlikely($3.length > ENGINE_OPTION_MAX_LENGTH)) my_yyabort_error((ER_VALUE_TOO_LONG, MYF(0), $1.str)); - new (thd->mem_root) - engine_option_value($1, $3, false, &Lex->create_info.option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, $3, false, + &Lex->create_info.option_list, + &Lex->option_list_last); } | IDENT_sys equal real_ulonglong_num { - new (thd->mem_root) - engine_option_value($1, $3, &Lex->create_info.option_list, - &Lex->option_list_last, thd->mem_root); + (void) new (thd->mem_root) + engine_option_value($1, $3, &Lex->create_info.option_list, + &Lex->option_list_last, thd->mem_root); } | IDENT_sys equal DEFAULT { - new (thd->mem_root) - engine_option_value($1, &Lex->create_info.option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, &Lex->create_info.option_list, + &Lex->option_list_last); } | SEQUENCE_SYM opt_equal choice { Lex->create_info.used_fields|= HA_CREATE_USED_SEQUENCE; Lex->create_info.sequence= ($3 == HA_CHOICE_YES); + } + | versioning_option + ; + +opt_versioning_option: + /* empty */ + | versioning_option + ; + +versioning_option: + WITH_SYSTEM_SYM VERSIONING_SYM + { + if (unlikely(Lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)) + { + if (DBUG_EVALUATE_IF("sysvers_force", 0, 1)) + { + my_error(ER_VERS_TEMPORARY, MYF(0)); + MYSQL_YYABORT; + } + } + else + { + Lex->alter_info.flags|= ALTER_ADD_SYSTEM_VERSIONING; + Lex->create_info.options|= HA_VERSIONED_TABLE; + } } ; default_charset: opt_default charset opt_equal charset_name_or_default { - if (Lex->create_info.add_table_option_default_charset($4)) + if (unlikely(Lex->create_info.add_table_option_default_charset($4))) MYSQL_YYABORT; } ; @@ -6119,13 +6309,11 @@ default_collation: opt_default COLLATE_SYM opt_equal collation_name_or_default { HA_CREATE_INFO *cinfo= &Lex->create_info; - if ((cinfo->used_fields & HA_CREATE_USED_DEFAULT_CHARSET) && - cinfo->default_table_charset && $4 && - !($4= merge_charset_and_collation(cinfo->default_table_charset, - $4))) - { + if (unlikely((cinfo->used_fields & HA_CREATE_USED_DEFAULT_CHARSET) && + cinfo->default_table_charset && $4 && + !($4= merge_charset_and_collation(cinfo->default_table_charset, + $4)))) MYSQL_YYABORT; - } Lex->create_info.default_table_charset= $4; Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET; @@ -6138,7 +6326,7 @@ storage_engines: plugin_ref plugin= ha_resolve_by_name(thd, &$1, thd->lex->create_info.tmp_table()); - if (plugin) + if (likely(plugin)) $$= plugin_hton(plugin); else { @@ -6157,7 +6345,7 @@ known_storage_engines: ident_or_text { plugin_ref plugin; - if ((plugin= ha_resolve_by_name(thd, &$1, false))) + if (likely((plugin= ha_resolve_by_name(thd, &$1, false)))) $$= plugin_hton(plugin); else my_yyabort_error((ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str)); @@ -6204,6 +6392,7 @@ field_list_item: column_def { } | key_def | constraint_def + | period_for_system_time ; column_def: @@ -6217,28 +6406,28 @@ key_def: key_or_index opt_if_not_exists opt_ident opt_USING_key_algorithm { Lex->option_list= NULL; - if (Lex->add_key(Key::MULTIPLE, &$3, $4, $2)) + if (unlikely(Lex->add_key(Key::MULTIPLE, &$3, $4, $2))) MYSQL_YYABORT; } '(' key_list ')' normal_key_options { } | key_or_index opt_if_not_exists ident TYPE_SYM btree_or_rtree { Lex->option_list= NULL; - if (Lex->add_key(Key::MULTIPLE, &$3, $5, $2)) + if (unlikely(Lex->add_key(Key::MULTIPLE, &$3, $5, $2))) MYSQL_YYABORT; } '(' key_list ')' normal_key_options { } | fulltext opt_key_or_index opt_if_not_exists opt_ident { Lex->option_list= NULL; - if (Lex->add_key($1, &$4, HA_KEY_ALG_UNDEF, $3)) + if (unlikely(Lex->add_key($1, &$4, HA_KEY_ALG_UNDEF, $3))) MYSQL_YYABORT; } '(' key_list ')' fulltext_key_options { } | spatial opt_key_or_index opt_if_not_exists opt_ident { Lex->option_list= NULL; - if (Lex->add_key($1, &$4, HA_KEY_ALG_UNDEF, $3)) + if (unlikely(Lex->add_key($1, &$4, HA_KEY_ALG_UNDEF, $3))) MYSQL_YYABORT; } '(' key_list ')' spatial_key_options { } @@ -6247,7 +6436,7 @@ key_def: opt_USING_key_algorithm { Lex->option_list= NULL; - if (Lex->add_key($2, $4.str ? &$4 : &$1, $5, $3)) + if (unlikely(Lex->add_key($2, $4.str ? &$4 : &$1, $5, $3))) MYSQL_YYABORT; } '(' key_list ')' normal_key_options { } @@ -6255,16 +6444,17 @@ key_def: TYPE_SYM btree_or_rtree { Lex->option_list= NULL; - if (Lex->add_key($2, $4.str ? &$4 : &$1, $6, $3)) + if (unlikely(Lex->add_key($2, $4.str ? &$4 : &$1, $6, $3))) MYSQL_YYABORT; } '(' key_list ')' normal_key_options { } | opt_constraint FOREIGN KEY_SYM opt_if_not_exists opt_ident { - if (Lex->check_add_key($4) || - !(Lex->last_key= (new (thd->mem_root) - Key(Key::MULTIPLE, $1.str ? &$1 : &$5, - HA_KEY_ALG_UNDEF, true, $4)))) + if (unlikely(Lex->check_add_key($4)) || + unlikely(!(Lex->last_key= (new (thd->mem_root) + Key(Key::MULTIPLE, + $1.str ? &$1 : &$5, + HA_KEY_ALG_UNDEF, true, $4))))) MYSQL_YYABORT; Lex->option_list= NULL; } @@ -6281,7 +6471,7 @@ key_def: lex->fk_update_opt, lex->fk_match_option, $4)); - if (key == NULL) + if (unlikely(key == NULL)) MYSQL_YYABORT; /* handle_if_exists_options() expectes the two keys in this order: @@ -6303,6 +6493,15 @@ constraint_def: } ; +period_for_system_time: + // If FOR_SYM is followed by SYSTEM_TIME_SYM then they are merged to: FOR_SYSTEM_TIME_SYM . + PERIOD_SYM FOR_SYSTEM_TIME_SYM '(' ident ',' ident ')' + { + Vers_parse_info &info= Lex->vers_get_info(); + info.set_system_time($4, $6); + } + ; + opt_check_constraint: /* empty */ { $$= (Virtual_column_info*) 0; } | check_constraint { $$= $1;} @@ -6311,12 +6510,9 @@ opt_check_constraint: check_constraint: CHECK_SYM '(' expr ')' { - Virtual_column_info *v= - add_virtual_expression(thd, $3); - if (!v) - { + Virtual_column_info *v= add_virtual_expression(thd, $3); + if (unlikely(!v)) MYSQL_YYABORT; - } $$= v; } ; @@ -6336,11 +6532,11 @@ field_spec: LEX *lex=Lex; Create_field *f= new (thd->mem_root) Create_field(); - if (check_string_char_length(&$1, 0, NAME_CHAR_LEN, - system_charset_info, 1)) + if (unlikely(check_string_char_length(&$1, 0, NAME_CHAR_LEN, + system_charset_info, 1))) my_yyabort_error((ER_TOO_LONG_IDENT, MYF(0), $1.str)); - if (!f) + if (unlikely(!f)) MYSQL_YYABORT; lex->init_last_field(f, &$1, NULL); @@ -6353,7 +6549,7 @@ field_spec: $$->check_constraint= $4; - if ($$->check(thd)) + if (unlikely($$->check(thd))) MYSQL_YYABORT; lex->alter_info.create_list.push_back($$, thd->mem_root); @@ -6388,6 +6584,15 @@ opt_serial_attribute_list: | serial_attribute ; +opt_asrow_attribute: + /* empty */ {} + | opt_asrow_attribute_list {} + ; + +opt_asrow_attribute_list: + opt_asrow_attribute_list asrow_attribute {} + | asrow_attribute + ; field_def: opt_attribute @@ -6397,6 +6602,16 @@ field_def: Lex->last_field->flags&= ~NOT_NULL_FLAG; // undo automatic NOT NULL for timestamps } vcol_opt_specifier vcol_opt_attribute + | opt_generated_always AS ROW_SYM START_SYM opt_asrow_attribute + { + if (Lex->last_field_generated_always_as_row_start()) + MYSQL_YYABORT; + } + | opt_generated_always AS ROW_SYM END opt_asrow_attribute + { + if (Lex->last_field_generated_always_as_row_end()) + MYSQL_YYABORT; + } ; opt_generated_always: @@ -6466,7 +6681,7 @@ parse_vcol_expr: expr { Virtual_column_info *v= add_virtual_expression(thd, $3); - if (!v) + if (unlikely(!v)) MYSQL_YYABORT; Lex->last_field->vcol_info= v; } @@ -6476,7 +6691,7 @@ parenthesized_expr: subselect { $$= new (thd->mem_root) Item_singlerow_subselect(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | expr @@ -6484,7 +6699,7 @@ parenthesized_expr: { $3->push_front($1, thd->mem_root); $$= new (thd->mem_root) Item_row(thd, *$3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -6494,10 +6709,8 @@ virtual_column_func: { Virtual_column_info *v= add_virtual_expression(thd, $2); - if (!v) - { + if (unlikely(!v)) MYSQL_YYABORT; - } $$= v; } ; @@ -6508,7 +6721,7 @@ column_default_expr: virtual_column_func | expr_or_literal { - if (!($$= add_virtual_expression(thd, $1))) + if (unlikely(!($$= add_virtual_expression(thd, $1)))) MYSQL_YYABORT; } ; @@ -6541,7 +6754,7 @@ field_type_numeric: { int err; ulonglong tmp_length= my_strtoll10($2.length(), NULL, &err); - if (err || tmp_length > PRECISION_FOR_DOUBLE) + if (unlikely(err || tmp_length > PRECISION_FOR_DOUBLE)) my_yyabort_error((ER_WRONG_FIELD_SPEC, MYF(0), Lex->last_field->field_name.str)); if (tmp_length > PRECISION_FOR_FLOAT) @@ -6925,29 +7138,42 @@ attribute: | ON UPDATE_SYM NOW_SYM opt_default_time_precision { Item *item= new (thd->mem_root) Item_func_now_local(thd, $4); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; Lex->last_field->on_update= item; } | AUTO_INC { Lex->last_field->flags|= AUTO_INCREMENT_FLAG | NOT_NULL_FLAG; } | SERIAL_SYM DEFAULT VALUE_SYM - { + { LEX *lex=Lex; lex->last_field->flags|= AUTO_INCREMENT_FLAG | NOT_NULL_FLAG | UNIQUE_KEY_FLAG; lex->alter_info.flags|= ALTER_ADD_INDEX; } | COLLATE_SYM collation_name { - if (Lex->charset && !my_charset_same(Lex->charset,$2)) + if (unlikely(Lex->charset && !my_charset_same(Lex->charset,$2))) my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0), $2->name,Lex->charset->csname)); Lex->last_field->charset= $2; } + | COMPRESSED_SYM opt_compression_method + { + if (unlikely(Lex->last_field->set_compressed($2))) + MYSQL_YYABORT; + } | serial_attribute ; -serial_attribute: - not NULL_SYM { Lex->last_field->flags|= NOT_NULL_FLAG; } +opt_compression_method: + /* empty */ { $$= NULL; } + | equal ident { $$= $2.str; } + ; + +asrow_attribute: + not NULL_SYM + { + Lex->last_field->flags|= NOT_NULL_FLAG; + } | opt_primary KEY_SYM { LEX *lex=Lex; @@ -6955,32 +7181,59 @@ serial_attribute: lex->alter_info.flags|= ALTER_ADD_INDEX; } | vcol_attribute + ; + +serial_attribute: + asrow_attribute | IDENT_sys equal TEXT_STRING_sys { - if ($3.length > ENGINE_OPTION_MAX_LENGTH) + if (unlikely($3.length > ENGINE_OPTION_MAX_LENGTH)) my_yyabort_error((ER_VALUE_TOO_LONG, MYF(0), $1.str)); - new (thd->mem_root) - engine_option_value($1, $3, true, &Lex->last_field->option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, $3, true, + &Lex->last_field->option_list, + &Lex->option_list_last); } | IDENT_sys equal ident { - if ($3.length > ENGINE_OPTION_MAX_LENGTH) + if (unlikely($3.length > ENGINE_OPTION_MAX_LENGTH)) my_yyabort_error((ER_VALUE_TOO_LONG, MYF(0), $1.str)); - new (thd->mem_root) - engine_option_value($1, $3, false, &Lex->last_field->option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, $3, false, + &Lex->last_field->option_list, + &Lex->option_list_last); } | IDENT_sys equal real_ulonglong_num { - new (thd->mem_root) - engine_option_value($1, $3, &Lex->last_field->option_list, - &Lex->option_list_last, thd->mem_root); + (void) new (thd->mem_root) + engine_option_value($1, $3, &Lex->last_field->option_list, + &Lex->option_list_last, thd->mem_root); } | IDENT_sys equal DEFAULT { - new (thd->mem_root) - engine_option_value($1, &Lex->last_field->option_list, &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, &Lex->last_field->option_list, + &Lex->option_list_last); + } + | with_or_without_system VERSIONING_SYM + { + Lex->last_field->versioning= $1; + Lex->create_info.options|= HA_VERSIONED_TABLE; + } + ; + +with_or_without_system: + WITH_SYSTEM_SYM + { + Lex->alter_info.flags|= ALTER_COLUMN_UNVERSIONED; + Lex->create_info.vers_info.versioned_fields= true; + $$= Column_definition::WITH_VERSIONING; + } + | WITHOUT SYSTEM + { + Lex->alter_info.flags|= ALTER_COLUMN_UNVERSIONED; + Lex->create_info.vers_info.unversioned_fields= true; + $$= Column_definition::WITHOUT_VERSIONING; } ; @@ -6992,7 +7245,7 @@ type_with_opt_collate: if ($2) { - if (!(Lex->charset= merge_charset_and_collation(Lex->charset, $2))) + if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2)))) MYSQL_YYABORT; } Lex->last_field->set_attributes($1, Lex->charset); @@ -7005,7 +7258,7 @@ sp_param_type_with_opt_collate: $$= $1; if ($2) { - if (!(Lex->charset= merge_charset_and_collation(Lex->charset, $2))) + if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2)))) MYSQL_YYABORT; } Lex->last_field->set_attributes($1, Lex->charset); @@ -7020,7 +7273,7 @@ charset: charset_name: ident_or_text { - if (!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0)))) + if (unlikely(!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0))))) my_yyabort_error((ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str)); } | BINARY { $$= &my_charset_bin; } @@ -7039,8 +7292,9 @@ opt_load_data_charset: old_or_new_charset_name: ident_or_text { - if (!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0))) && - !($$=get_old_charset_by_name($1.str))) + if (unlikely(!($$=get_charset_by_csname($1.str, + MY_CS_PRIMARY,MYF(0))) && + !($$=get_old_charset_by_name($1.str)))) my_yyabort_error((ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str)); } | BINARY { $$= &my_charset_bin; } @@ -7054,7 +7308,7 @@ old_or_new_charset_name_or_default: collation_name: ident_or_text { - if (!($$= mysqld_collation_get_by_name($1.str))) + if (unlikely(!($$= mysqld_collation_get_by_name($1.str)))) MYSQL_YYABORT; } ; @@ -7079,7 +7333,7 @@ charset_or_alias: | ASCII_SYM { $$= &my_charset_latin1; } | UNICODE_SYM { - if (!($$= get_charset_by_csname("ucs2", MY_CS_PRIMARY,MYF(0)))) + if (unlikely(!($$= get_charset_by_csname("ucs2", MY_CS_PRIMARY,MYF(0))))) my_yyabort_error((ER_UNKNOWN_CHARACTER_SET, MYF(0), "ucs2")); } ; @@ -7100,7 +7354,7 @@ opt_bin_mod: ws_nweights: '(' real_ulong_num { - if ($2 == 0) + if (unlikely($2 == 0)) { thd->parse_error(); MYSQL_YYABORT; @@ -7191,14 +7445,14 @@ ref_list: ref_list ',' ident { Key_part_spec *key= new (thd->mem_root) Key_part_spec(&$3, 0); - if (key == NULL) + if (unlikely(key == NULL)) MYSQL_YYABORT; Lex->ref_list.push_back(key, thd->mem_root); } | ident { Key_part_spec *key= new (thd->mem_root) Key_part_spec(&$1, 0); - if (key == NULL) + if (unlikely(key == NULL)) MYSQL_YYABORT; LEX *lex= Lex; lex->ref_list.empty(); @@ -7352,37 +7606,38 @@ key_using_alg: all_key_opt: KEY_BLOCK_SIZE opt_equal ulong_num { - Lex->create_info.used_fields|= HA_USES_BLOCK_SIZE; - Lex->create_info.key_block_size= $3; - } + Lex->last_key->key_create_info.block_size= $3; + Lex->last_key->key_create_info.flags|= HA_USES_BLOCK_SIZE; + } | COMMENT_SYM TEXT_STRING_sys { Lex->last_key->key_create_info.comment= $2; } | IDENT_sys equal TEXT_STRING_sys { - if ($3.length > ENGINE_OPTION_MAX_LENGTH) + if (unlikely($3.length > ENGINE_OPTION_MAX_LENGTH)) my_yyabort_error((ER_VALUE_TOO_LONG, MYF(0), $1.str)); - new (thd->mem_root) - engine_option_value($1, $3, true, &Lex->option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, $3, true, &Lex->option_list, + &Lex->option_list_last); } | IDENT_sys equal ident { - if ($3.length > ENGINE_OPTION_MAX_LENGTH) + if (unlikely($3.length > ENGINE_OPTION_MAX_LENGTH)) my_yyabort_error((ER_VALUE_TOO_LONG, MYF(0), $1.str)); - new (thd->mem_root) - engine_option_value($1, $3, false, &Lex->option_list, - &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, $3, false, &Lex->option_list, + &Lex->option_list_last); } | IDENT_sys equal real_ulonglong_num { - new (thd->mem_root) - engine_option_value($1, $3, &Lex->option_list, - &Lex->option_list_last, thd->mem_root); + (void) new (thd->mem_root) + engine_option_value($1, $3, &Lex->option_list, + &Lex->option_list_last, thd->mem_root); } | IDENT_sys equal DEFAULT { - new (thd->mem_root) - engine_option_value($1, &Lex->option_list, &Lex->option_list_last); + (void) new (thd->mem_root) + engine_option_value($1, &Lex->option_list, + &Lex->option_list_last); } ; @@ -7399,7 +7654,7 @@ fulltext_key_opt: all_key_opt | WITH PARSER_SYM IDENT_sys { - if (plugin_is_ready(&$3, MYSQL_FTPARSER_PLUGIN)) + if (likely(plugin_is_ready(&$3, MYSQL_FTPARSER_PLUGIN))) Lex->last_key->key_create_info.parser_name= $3; else my_yyabort_error((ER_FUNCTION_NOT_DEFINED, MYF(0), $3.str)); @@ -7427,16 +7682,16 @@ key_part: ident { $$= new (thd->mem_root) Key_part_spec(&$1, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ident '(' NUM ')' { int key_part_len= atoi($3.str); - if (!key_part_len) + if (unlikely(!key_part_len)) my_yyabort_error((ER_KEY_PART_0, MYF(0), $1.str)); $$= new (thd->mem_root) Key_part_spec(&$1, (uint) key_part_len); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -7446,11 +7701,6 @@ opt_ident: | field_ident { $$= $1; } ; -opt_component: - /* empty */ { $$= null_clex_str; } - | '.' ident { $$= $2; } - ; - string_list: text_string { Lex->last_field->interval_list.push_back($1, thd->mem_root); } @@ -7478,21 +7728,21 @@ alter: } alter_options TABLE_SYM table_ident opt_lock_wait_timeout { - if (!Lex->select_lex.add_table_to_list(thd, $5, NULL, - TL_OPTION_UPDATING, - TL_READ_NO_INSERT, - MDL_SHARED_UPGRADABLE)) + if (unlikely(!Lex->select_lex.add_table_to_list(thd, $5, NULL, + TL_OPTION_UPDATING, + TL_READ_NO_INSERT, + MDL_SHARED_UPGRADABLE))) MYSQL_YYABORT; Lex->select_lex.db= (Lex->select_lex.table_list.first)->db; Lex->create_last_non_select_table= Lex->last_table(); } alter_commands { - if (!Lex->m_sql_cmd) + if (likely(!Lex->m_sql_cmd)) { /* Create a generic ALTER TABLE statment. */ Lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table(); - if (Lex->m_sql_cmd == NULL) + if (unlikely(Lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } } @@ -7506,13 +7756,14 @@ alter: LEX *lex=Lex; lex->sql_command=SQLCOM_ALTER_DB; lex->name= $3; - if (lex->name.str == NULL && lex->copy_db_to(&lex->name)) + if (lex->name.str == NULL && + unlikely(lex->copy_db_to(&lex->name))) MYSQL_YYABORT; } | ALTER DATABASE ident UPGRADE_SYM DATA_SYM DIRECTORY_SYM NAME_SYM { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "DATABASE")); lex->sql_command= SQLCOM_ALTER_DB_UPGRADE; lex->name= $3; @@ -7521,7 +7772,7 @@ alter: { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE")); lex->sp_chistics.init(); } @@ -7536,7 +7787,7 @@ alter: { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "FUNCTION")); lex->sp_chistics.init(); } @@ -7549,7 +7800,7 @@ alter: } | ALTER view_algorithm definer_opt opt_view_suid VIEW_SYM table_ident { - if (Lex->add_alter_view(thd, $2, $4, $6)) + if (unlikely(Lex->add_alter_view(thd, $2, $4, $6))) MYSQL_YYABORT; } view_list_opt AS view_select @@ -7561,7 +7812,7 @@ alter: with the ALTER EVENT below. */ { - if (Lex->add_alter_view(thd, VIEW_ALGORITHM_INHERIT, $3, $5)) + if (unlikely(Lex->add_alter_view(thd, VIEW_ALGORITHM_INHERIT, $3, $5))) MYSQL_YYABORT; } view_list_opt AS view_select @@ -7576,7 +7827,7 @@ alter: Event_parse_data. */ - if (!(Lex->event_parse_data= Event_parse_data::new_instance(thd))) + if (unlikely(!(Lex->event_parse_data= Event_parse_data::new_instance(thd)))) MYSQL_YYABORT; Lex->event_parse_data->identifier= $5; @@ -7589,7 +7840,7 @@ alter: opt_ev_comment opt_ev_sql_stmt { - if (!($7 || $8 || $9 || $10 || $11)) + if (unlikely(!($7 || $8 || $9 || $10 || $11))) { thd->parse_error(); MYSQL_YYABORT; @@ -7647,18 +7898,19 @@ alter: table_ident { LEX *lex= Lex; - if (!(lex->create_info.seq_create_info= new (thd->mem_root) - sequence_definition()) || - !lex->select_lex.add_table_to_list(thd, $5, NULL, - TL_OPTION_SEQUENCE, - TL_WRITE, MDL_EXCLUSIVE)) + if (unlikely(!(lex->create_info.seq_create_info= + new (thd->mem_root) sequence_definition())) || + unlikely(!lex->select_lex.add_table_to_list(thd, $5, NULL, + TL_OPTION_SEQUENCE, + TL_WRITE, + MDL_EXCLUSIVE))) MYSQL_YYABORT; } sequence_defs { /* Create a generic ALTER SEQUENCE statment. */ Lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_sequence($3); - if (Lex->m_sql_cmd == NULL) + if (unlikely(Lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -7689,8 +7941,8 @@ opt_ev_sql_stmt: ; ident_or_empty: - /* empty */ { $$= null_clex_str; } - | ident { $$= $1; } + /* empty */ { $$= Lex_ident_sys(); } + | ident ; alter_commands: @@ -7700,7 +7952,7 @@ alter_commands: Lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_discard_import_tablespace( Sql_cmd_discard_import_tablespace::DISCARD_TABLESPACE); - if (Lex->m_sql_cmd == NULL) + if (unlikely(Lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } | IMPORT TABLESPACE @@ -7708,7 +7960,7 @@ alter_commands: Lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_discard_import_tablespace( Sql_cmd_discard_import_tablespace::IMPORT_TABLESPACE); - if (Lex->m_sql_cmd == NULL) + if (unlikely(Lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } | alter_list @@ -7747,7 +7999,7 @@ alter_commands: DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table_optimize_partition(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } opt_no_write_to_binlog @@ -7760,7 +8012,7 @@ alter_commands: DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table_analyze_partition(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } | CHECK_SYM PARTITION_SYM all_or_alt_part_name_list @@ -7770,7 +8022,7 @@ alter_commands: DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table_check_partition(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } opt_mi_check_type @@ -7783,7 +8035,7 @@ alter_commands: DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table_repair_partition(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } opt_mi_repair_type @@ -7801,7 +8053,7 @@ alter_commands: DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table_truncate_partition(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } | reorg_partition_rule @@ -7811,21 +8063,19 @@ alter_commands: LEX *lex= thd->lex; lex->select_lex.db= $6->db; if (lex->select_lex.db.str == NULL && - lex->copy_db_to(&lex->select_lex.db)) - { + unlikely(lex->copy_db_to(&lex->select_lex.db))) MYSQL_YYABORT; - } lex->name= $6->table; lex->alter_info.partition_flags|= ALTER_PARTITION_EXCHANGE; - if (!lex->select_lex.add_table_to_list(thd, $6, NULL, - TL_OPTION_UPDATING, - TL_READ_NO_INSERT, - MDL_SHARED_NO_WRITE)) + if (unlikely(!lex->select_lex.add_table_to_list(thd, $6, NULL, + TL_OPTION_UPDATING, + TL_READ_NO_INSERT, + MDL_SHARED_NO_WRITE))) MYSQL_YYABORT; DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table_exchange_partition(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -7851,11 +8101,9 @@ add_partition_rule: { LEX *lex= Lex; lex->part_info= new (thd->mem_root) partition_info(); - if (!lex->part_info) - { - mem_alloc_error(sizeof(partition_info)); + if (unlikely(!lex->part_info)) MYSQL_YYABORT; - } + lex->alter_info.partition_flags|= ALTER_PARTITION_ADD; DBUG_ASSERT(!Lex->create_info.if_not_exists()); lex->create_info.set($3); @@ -7883,11 +8131,9 @@ reorg_partition_rule: { LEX *lex= Lex; lex->part_info= new (thd->mem_root) partition_info(); - if (!lex->part_info) - { - mem_alloc_error(sizeof(partition_info)); + if (unlikely(!lex->part_info)) MYSQL_YYABORT; - } + lex->no_write_to_binlog= $3; } reorg_parts_rule @@ -7917,12 +8163,9 @@ alt_part_name_list: alt_part_name_item: ident { - if (Lex->alter_info.partition_names.push_back($1.str, - thd->mem_root)) - { - mem_alloc_error(1); + if (unlikely(Lex->alter_info.partition_names.push_back($1.str, + thd->mem_root))) MYSQL_YYABORT; - } } ; @@ -7952,6 +8195,10 @@ alter_list_item: Lex->create_last_non_select_table= Lex->last_table(); Lex->alter_info.flags|= ALTER_ADD_INDEX; } + | ADD period_for_system_time + { + Lex->alter_info.flags|= ALTER_ADD_PERIOD; + } | add_column '(' create_field_list ')' { LEX *lex=Lex; @@ -7989,7 +8236,7 @@ alter_list_item: LEX *lex=Lex; Alter_drop *ad= (new (thd->mem_root) Alter_drop(Alter_drop::COLUMN, $4.str, $3)); - if (ad == NULL) + if (unlikely(ad == NULL)) MYSQL_YYABORT; lex->alter_info.drop_list.push_back(ad, thd->mem_root); lex->alter_info.flags|= ALTER_PARSER_DROP_COLUMN; @@ -8000,7 +8247,7 @@ alter_list_item: Alter_drop *ad= (new (thd->mem_root) Alter_drop(Alter_drop::CHECK_CONSTRAINT, $4.str, $3)); - if (ad == NULL) + if (unlikely(ad == NULL)) MYSQL_YYABORT; lex->alter_info.drop_list.push_back(ad, thd->mem_root); lex->alter_info.flags|= ALTER_DROP_CHECK_CONSTRAINT; @@ -8010,7 +8257,7 @@ alter_list_item: LEX *lex=Lex; Alter_drop *ad= (new (thd->mem_root) Alter_drop(Alter_drop::FOREIGN_KEY, $5.str, $4)); - if (ad == NULL) + if (unlikely(ad == NULL)) MYSQL_YYABORT; lex->alter_info.drop_list.push_back(ad, thd->mem_root); lex->alter_info.flags|= ALTER_DROP_FOREIGN_KEY; @@ -8021,7 +8268,7 @@ alter_list_item: Alter_drop *ad= (new (thd->mem_root) Alter_drop(Alter_drop::KEY, primary_key_name, FALSE)); - if (ad == NULL) + if (unlikely(ad == NULL)) MYSQL_YYABORT; lex->alter_info.drop_list.push_back(ad, thd->mem_root); lex->alter_info.flags|= ALTER_DROP_INDEX; @@ -8031,7 +8278,7 @@ alter_list_item: LEX *lex=Lex; Alter_drop *ad= (new (thd->mem_root) Alter_drop(Alter_drop::KEY, $4.str, $3)); - if (ad == NULL) + if (unlikely(ad == NULL)) MYSQL_YYABORT; lex->alter_info.drop_list.push_back(ad, thd->mem_root); lex->alter_info.flags|= ALTER_DROP_INDEX; @@ -8050,12 +8297,13 @@ alter_list_item: } | ALTER opt_column opt_if_exists_table_element field_ident SET DEFAULT column_default_expr { - if (Lex->add_alter_list($4.str, $7, $3)) + if (unlikely(Lex->add_alter_list($4.str, $7, $3))) MYSQL_YYABORT; } | ALTER opt_column opt_if_exists_table_element field_ident DROP DEFAULT { - if (Lex->add_alter_list($4.str, (Virtual_column_info*) 0, $3)) + if (unlikely(Lex->add_alter_list($4.str, (Virtual_column_info*) 0, + $3))) MYSQL_YYABORT; } | RENAME opt_to table_ident @@ -8063,12 +8311,11 @@ alter_list_item: LEX *lex=Lex; lex->select_lex.db= $3->db; if (lex->select_lex.db.str == NULL && - lex->copy_db_to(&lex->select_lex.db)) - { + unlikely(lex->copy_db_to(&lex->select_lex.db))) MYSQL_YYABORT; - } - if (check_table_name($3->table.str,$3->table.length, FALSE) || - ($3->db.str && check_db_name((LEX_STRING*) &$3->db))) + if (unlikely(check_table_name($3->table.str,$3->table.length, + FALSE)) || + ($3->db.str && unlikely(check_db_name((LEX_STRING*) &$3->db)))) my_yyabort_error((ER_WRONG_TABLE_NAME, MYF(0), $3->table.str)); lex->name= $3->table; lex->alter_info.flags|= ALTER_RENAME; @@ -8080,10 +8327,10 @@ alter_list_item: $4= thd->variables.collation_database; } $5= $5 ? $5 : $4; - if (!my_charset_same($4,$5)) + if (unlikely(!my_charset_same($4,$5))) my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0), $5->name, $4->csname)); - if (Lex->create_info.add_alter_list_item_convert_to_charset($5)) + if (unlikely(Lex->create_info.add_alter_list_item_convert_to_charset($5))) MYSQL_YYABORT; Lex->alter_info.flags|= ALTER_OPTIONS; } @@ -8108,6 +8355,19 @@ alter_list_item: } | alter_algorithm_option | alter_lock_option + | ADD SYSTEM VERSIONING_SYM + { + Lex->alter_info.flags|= ALTER_ADD_SYSTEM_VERSIONING; + Lex->create_info.options|= HA_VERSIONED_TABLE; + } + | DROP SYSTEM VERSIONING_SYM + { + Lex->alter_info.flags|= ALTER_DROP_SYSTEM_VERSIONING; + } + | DROP PERIOD_SYM FOR_SYSTEM_TIME_SYM + { + Lex->alter_info.flags|= ALTER_DROP_PERIOD; + } ; opt_index_lock_algorithm: @@ -8125,7 +8385,7 @@ alter_algorithm_option: } | ALGORITHM_SYM opt_equal ident { - if (Lex->alter_info.set_requested_algorithm(&$3)) + if (unlikely(Lex->alter_info.set_requested_algorithm(&$3))) my_yyabort_error((ER_UNKNOWN_ALTER_ALGORITHM, MYF(0), $3.str)); } ; @@ -8138,13 +8398,13 @@ alter_lock_option: } | LOCK_SYM opt_equal ident { - if (Lex->alter_info.set_requested_lock(&$3)) + if (unlikely(Lex->alter_info.set_requested_lock(&$3))) my_yyabort_error((ER_UNKNOWN_ALTER_LOCK, MYF(0), $3.str)); } ; opt_column: - /* empty */ {} + /* empty */ {} %prec PREC_BELOW_IDENTIFIER_OPT_SPECIAL_CASE | COLUMN_SYM {} ; @@ -8244,8 +8504,8 @@ start: LEX *lex= Lex; lex->sql_command= SQLCOM_BEGIN; /* READ ONLY and READ WRITE are mutually exclusive. */ - if (($3 & MYSQL_START_TRANS_OPT_READ_WRITE) && - ($3 & MYSQL_START_TRANS_OPT_READ_ONLY)) + if (unlikely(($3 & MYSQL_START_TRANS_OPT_READ_WRITE) && + ($3 & MYSQL_START_TRANS_OPT_READ_ONLY))) { thd->parse_error(); MYSQL_YYABORT; @@ -8313,10 +8573,10 @@ slave_until: | UNTIL_SYM slave_until_opts { LEX *lex=Lex; - if (((lex->mi.log_file_name || lex->mi.pos) && - (lex->mi.relay_log_name || lex->mi.relay_log_pos)) || - !((lex->mi.log_file_name && lex->mi.pos) || - (lex->mi.relay_log_name && lex->mi.relay_log_pos))) + if (unlikely(((lex->mi.log_file_name || lex->mi.pos) && + (lex->mi.relay_log_name || lex->mi.relay_log_pos)) || + !((lex->mi.log_file_name && lex->mi.pos) || + (lex->mi.relay_log_name && lex->mi.relay_log_pos)))) my_yyabort_error((ER_BAD_SLAVE_UNTIL_COND, MYF(0))); } | UNTIL_SYM MASTER_GTID_POS_SYM '=' TEXT_STRING_sys @@ -8371,7 +8631,7 @@ repair: LEX* lex= thd->lex; DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_repair_table(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -8413,7 +8673,7 @@ analyze: LEX* lex= thd->lex; DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_analyze_table(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -8448,7 +8708,7 @@ persistent_column_stat_spec: { LEX* lex= thd->lex; lex->column_list= new (thd->mem_root) List; - if (lex->column_list == NULL) + if (unlikely(lex->column_list == NULL)) MYSQL_YYABORT; } table_column_list @@ -8461,7 +8721,7 @@ persistent_index_stat_spec: { LEX* lex= thd->lex; lex->index_list= new (thd->mem_root) List; - if (lex->index_list == NULL) + if (unlikely(lex->index_list == NULL)) MYSQL_YYABORT; } table_index_list @@ -8535,11 +8795,11 @@ check: CHECK_SYM check_view_or_table { LEX* lex= thd->lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "CHECK")); DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_check_table(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -8584,7 +8844,7 @@ optimize: LEX* lex= thd->lex; DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_optimize_table(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } ; @@ -8611,14 +8871,14 @@ rename: rename_list: user TO_SYM user { - if (Lex->users_list.push_back($1, thd->mem_root) || - Lex->users_list.push_back($3, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($1, thd->mem_root) || + Lex->users_list.push_back($3, thd->mem_root))) MYSQL_YYABORT; } | rename_list ',' user TO_SYM user { - if (Lex->users_list.push_back($3, thd->mem_root) || - Lex->users_list.push_back($5, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($3, thd->mem_root) || + Lex->users_list.push_back($5, thd->mem_root))) MYSQL_YYABORT; } ; @@ -8633,10 +8893,12 @@ table_to_table: { LEX *lex=Lex; SELECT_LEX *sl= lex->current_select; - if (!sl->add_table_to_list(thd, $1,NULL,TL_OPTION_UPDATING, - TL_IGNORE, MDL_EXCLUSIVE) || - !sl->add_table_to_list(thd, $4, NULL, TL_OPTION_UPDATING, - TL_IGNORE, MDL_EXCLUSIVE)) + if (unlikely(!sl->add_table_to_list(thd, $1,NULL, + TL_OPTION_UPDATING, + TL_IGNORE, MDL_EXCLUSIVE)) || + unlikely(!sl->add_table_to_list(thd, $4, NULL, + TL_OPTION_UPDATING, + TL_IGNORE, MDL_EXCLUSIVE))) MYSQL_YYABORT; } ; @@ -8667,9 +8929,10 @@ keycache_list: assign_to_keycache: table_ident cache_keys_spec { - if (!Select->add_table_to_list(thd, $1, NULL, 0, TL_READ, - MDL_SHARED_READ, - Select->pop_index_hints())) + if (unlikely(!Select->add_table_to_list(thd, $1, NULL, 0, TL_READ, + MDL_SHARED_READ, + Select-> + pop_index_hints()))) MYSQL_YYABORT; } ; @@ -8677,9 +8940,10 @@ assign_to_keycache: assign_to_keycache_parts: table_ident adm_partition cache_keys_spec { - if (!Select->add_table_to_list(thd, $1, NULL, 0, TL_READ, - MDL_SHARED_READ, - Select->pop_index_hints())) + if (unlikely(!Select->add_table_to_list(thd, $1, NULL, 0, TL_READ, + MDL_SHARED_READ, + Select-> + pop_index_hints()))) MYSQL_YYABORT; } ; @@ -8713,9 +8977,10 @@ preload_list: preload_keys: table_ident cache_keys_spec opt_ignore_leaves { - if (!Select->add_table_to_list(thd, $1, NULL, $3, TL_READ, - MDL_SHARED_READ, - Select->pop_index_hints())) + if (unlikely(!Select->add_table_to_list(thd, $1, NULL, $3, TL_READ, + MDL_SHARED_READ, + Select-> + pop_index_hints()))) MYSQL_YYABORT; } ; @@ -8723,9 +8988,10 @@ preload_keys: preload_keys_parts: table_ident adm_partition cache_keys_spec opt_ignore_leaves { - if (!Select->add_table_to_list(thd, $1, NULL, $4, TL_READ, - MDL_SHARED_READ, - Select->pop_index_hints())) + if (unlikely(!Select->add_table_to_list(thd, $1, NULL, $4, TL_READ, + MDL_SHARED_READ, + Select-> + pop_index_hints()))) MYSQL_YYABORT; } ; @@ -8774,6 +9040,9 @@ select: select_init: SELECT_SYM select_options_and_item_list select_init3 + | table_value_constructor + | table_value_constructor union_list + | table_value_constructor union_order_or_limit | '(' select_paren ')' | '(' select_paren ')' union_list | '(' select_paren ')' union_order_or_limit @@ -8781,12 +9050,23 @@ select_init: union_list_part2: SELECT_SYM select_options_and_item_list select_init3_union_query_term + | table_value_constructor + | table_value_constructor union_list + | table_value_constructor union_order_or_limit | '(' select_paren_union_query_term ')' | '(' select_paren_union_query_term ')' union_list | '(' select_paren_union_query_term ')' union_order_or_limit ; select_paren: + { + Lex->current_select->set_braces(true); + } + table_value_constructor + { + DBUG_ASSERT(Lex->current_select->braces); + } + | { /* In order to correctly parse UNION's global ORDER BY we need to @@ -8836,6 +9116,15 @@ select_paren_view: /* The equivalent of select_paren for nested queries. */ select_paren_derived: + { + Lex->current_select->set_braces(true); + } + table_value_constructor + { + DBUG_ASSERT(Lex->current_select->braces); + $$= Lex->current_select->master_unit()->first_select(); + } + | { Lex->current_select->set_braces(true); } @@ -8992,11 +9281,76 @@ select_options: /* empty*/ | select_option_list { - if (Select->options & SELECT_DISTINCT && Select->options & SELECT_ALL) + if (unlikely((Select->options & SELECT_DISTINCT) && + (Select->options & SELECT_ALL))) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "ALL", "DISTINCT")); } ; +opt_history_unit: + /* empty*/ %prec PREC_BELOW_IDENTIFIER_OPT_SPECIAL_CASE + { + $$= VERS_UNDEFINED; + } + | TRANSACTION_SYM + { + $$= VERS_TRX_ID; + } + | TIMESTAMP + { + $$= VERS_TIMESTAMP; + } + ; + +history_point: + TIMESTAMP TEXT_STRING + { + Item *item; + if (!(item= create_temporal_literal(thd, $2.str, $2.length, YYCSCL, + MYSQL_TYPE_DATETIME, true))) + MYSQL_YYABORT; + $$= Vers_history_point(VERS_TIMESTAMP, item); + } + | function_call_keyword_timestamp + { + $$= Vers_history_point(VERS_TIMESTAMP, $1); + } + | opt_history_unit bit_expr + { + $$= Vers_history_point($1, $2); + } + ; + +opt_for_system_time_clause: + /* empty */ + { + $$= false; + } + | FOR_SYSTEM_TIME_SYM system_time_expr + { + $$= true; + } + ; + +system_time_expr: + AS OF_SYM history_point + { + Lex->vers_conditions.init(SYSTEM_TIME_AS_OF, $3); + } + | ALL + { + Lex->vers_conditions.init(SYSTEM_TIME_ALL); + } + | FROM history_point TO_SYM history_point + { + Lex->vers_conditions.init(SYSTEM_TIME_FROM_TO, $2, $4); + } + | BETWEEN_SYM history_point AND_SYM history_point + { + Lex->vers_conditions.init(SYSTEM_TIME_BETWEEN, $2, $4); + } + ; + select_option_list: select_option_list select_option | select_option @@ -9010,11 +9364,11 @@ select_option: Allow this flag only on the first top-level SELECT statement, if SQL_CACHE wasn't specified, and only once per query. */ - if (Lex->current_select != &Lex->select_lex) + if (unlikely(Lex->current_select != &Lex->select_lex)) my_yyabort_error((ER_CANT_USE_OPTION_HERE, MYF(0), "SQL_NO_CACHE")); - if (Lex->select_lex.sql_cache == SELECT_LEX::SQL_CACHE) + if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_CACHE)) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "SQL_CACHE", "SQL_NO_CACHE")); - if (Lex->select_lex.sql_cache == SELECT_LEX::SQL_NO_CACHE) + if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_NO_CACHE)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "SQL_NO_CACHE")); Lex->safe_to_cache_query=0; @@ -9027,11 +9381,11 @@ select_option: Allow this flag only on the first top-level SELECT statement, if SQL_NO_CACHE wasn't specified, and only once per query. */ - if (Lex->current_select != &Lex->select_lex) + if (unlikely(Lex->current_select != &Lex->select_lex)) my_yyabort_error((ER_CANT_USE_OPTION_HERE, MYF(0), "SQL_CACHE")); - if (Lex->select_lex.sql_cache == SELECT_LEX::SQL_NO_CACHE) + if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_NO_CACHE)) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "SQL_NO_CACHE", "SQL_CACHE")); - if (Lex->select_lex.sql_cache == SELECT_LEX::SQL_CACHE) + if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_CACHE)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "SQL_CACHE")); Lex->safe_to_cache_query=1; @@ -9067,30 +9421,30 @@ select_item_list: Item *item= new (thd->mem_root) Item_field(thd, &thd->lex->current_select->context, NULL, NULL, &star_clex_str); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; - if (add_item_to_list(thd, item)) + if (unlikely(add_item_to_list(thd, item))) MYSQL_YYABORT; (thd->lex->current_select->with_wild)++; } ; select_item: - remember_name table_wild remember_end + remember_name select_sublist_qualified_asterisk remember_end { - if (add_item_to_list(thd, $2)) + if (unlikely(add_item_to_list(thd, $2))) MYSQL_YYABORT; } | remember_name expr remember_end select_alias { DBUG_ASSERT($1 < $3); - if (add_item_to_list(thd, $2)) + if (unlikely(add_item_to_list(thd, $2))) MYSQL_YYABORT; if ($4.str) { - if (Lex->sql_command == SQLCOM_CREATE_VIEW && - check_column_name($4.str)) + if (unlikely(Lex->sql_command == SQLCOM_CREATE_VIEW && + check_column_name($4.str))) my_yyabort_error((ER_WRONG_COLUMN_NAME, MYF(0), $4.str)); $2->is_autogenerated_name= FALSE; $2->set_name(thd, $4.str, $4.length, system_charset_info); @@ -9210,7 +9564,7 @@ expr: { /* X OR Y */ $$= new (thd->mem_root) Item_cond_or(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } } @@ -9218,7 +9572,7 @@ expr: { /* XOR is a proprietary extension */ $$= new (thd->mem_root) Item_func_xor(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | expr and expr %prec AND_SYM @@ -9260,84 +9614,84 @@ expr: { /* X AND Y */ $$= new (thd->mem_root) Item_cond_and(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } } | NOT_SYM expr %prec NOT_SYM { $$= negate_expression(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS TRUE_SYM %prec IS { $$= new (thd->mem_root) Item_func_istrue(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS not TRUE_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnottrue(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS FALSE_SYM %prec IS { $$= new (thd->mem_root) Item_func_isfalse(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS not FALSE_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnotfalse(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS UNKNOWN_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnull(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS not UNKNOWN_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnotnull(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | bool_pri + | bool_pri %prec PREC_BELOW_NOT ; bool_pri: bool_pri IS NULL_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnull(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri IS not NULL_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnotnull(thd, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri EQUAL_SYM predicate %prec EQUAL_SYM { $$= new (thd->mem_root) Item_func_equal(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri comp_op predicate %prec '=' { $$= (*$2)(0)->create(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bool_pri comp_op all_or_any '(' subselect ')' %prec '=' { $$= all_any_subquery_creator(thd, $1, $2, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | predicate @@ -9347,22 +9701,22 @@ predicate: bit_expr IN_SYM '(' subselect ')' { $$= new (thd->mem_root) Item_in_subselect(thd, $1, $4); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr not IN_SYM '(' subselect ')' { Item *item= new (thd->mem_root) Item_in_subselect(thd, $1, $5); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= negate_expression(thd, item); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr IN_SYM '(' expr ')' { $$= handle_sql2003_note184_exception(thd, $1, true, $4); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr IN_SYM '(' expr ',' expr_list ')' @@ -9370,13 +9724,13 @@ predicate: $6->push_front($4, thd->mem_root); $6->push_front($1, thd->mem_root); $$= new (thd->mem_root) Item_func_in(thd, *$6); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr not IN_SYM '(' expr ')' { $$= handle_sql2003_note184_exception(thd, $1, false, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr not IN_SYM '(' expr ',' expr_list ')' @@ -9384,21 +9738,21 @@ predicate: $7->push_front($5, thd->mem_root); $7->push_front($1, thd->mem_root); Item_func_in *item= new (thd->mem_root) Item_func_in(thd, *$7); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= item->neg_transformer(thd); } | bit_expr BETWEEN_SYM bit_expr AND_SYM predicate { $$= new (thd->mem_root) Item_func_between(thd, $1, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr not BETWEEN_SYM bit_expr AND_SYM predicate { Item_func_between *item; item= new (thd->mem_root) Item_func_between(thd, $1, $4, $6); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= item->neg_transformer(thd); } @@ -9406,125 +9760,151 @@ predicate: { Item *item1= new (thd->mem_root) Item_func_soundex(thd, $1); Item *item4= new (thd->mem_root) Item_func_soundex(thd, $4); - if ((item1 == NULL) || (item4 == NULL)) + if (unlikely(item1 == NULL) || unlikely(item4 == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_func_eq(thd, item1, item4); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | bit_expr LIKE simple_expr opt_escape + | bit_expr LIKE mysql_concatenation_expr opt_escape { $$= new (thd->mem_root) Item_func_like(thd, $1, $3, $4, Lex->escape_used); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | bit_expr not LIKE simple_expr opt_escape + | bit_expr not LIKE mysql_concatenation_expr opt_escape { Item *item= new (thd->mem_root) Item_func_like(thd, $1, $4, $5, Lex->escape_used); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= item->neg_transformer(thd); } | bit_expr REGEXP bit_expr { $$= new (thd->mem_root) Item_func_regex(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr not REGEXP bit_expr { Item *item= new (thd->mem_root) Item_func_regex(thd, $1, $4); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= negate_expression(thd, item); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | bit_expr + | bit_expr %prec PREC_BELOW_NOT ; bit_expr: bit_expr '|' bit_expr %prec '|' { $$= new (thd->mem_root) Item_func_bit_or(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '&' bit_expr %prec '&' { $$= new (thd->mem_root) Item_func_bit_and(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr SHIFT_LEFT bit_expr %prec SHIFT_LEFT { $$= new (thd->mem_root) Item_func_shift_left(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr SHIFT_RIGHT bit_expr %prec SHIFT_RIGHT { $$= new (thd->mem_root) Item_func_shift_right(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + } + | bit_expr ORACLE_CONCAT_SYM bit_expr + { + $$= new (thd->mem_root) Item_func_concat_operator_oracle(thd, + $1, $3); + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '+' bit_expr %prec '+' { $$= new (thd->mem_root) Item_func_plus(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '-' bit_expr %prec '-' { $$= new (thd->mem_root) Item_func_minus(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '+' INTERVAL_SYM expr interval %prec '+' { $$= new (thd->mem_root) Item_date_add_interval(thd, $1, $4, $5, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '-' INTERVAL_SYM expr interval %prec '-' { $$= new (thd->mem_root) Item_date_add_interval(thd, $1, $4, $5, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + } + | INTERVAL_SYM expr interval '+' expr + /* we cannot put interval before - */ + { + $$= new (thd->mem_root) Item_date_add_interval(thd, $5, $2, $3, 0); + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + } + | '+' INTERVAL_SYM expr interval '+' expr %prec NEG + { + $$= new (thd->mem_root) Item_date_add_interval(thd, $6, $3, $4, 0); + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + } + | '-' INTERVAL_SYM expr interval '+' expr %prec NEG + { + $$= new (thd->mem_root) Item_date_add_interval(thd, $6, $3, $4, 1); + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '*' bit_expr %prec '*' { $$= new (thd->mem_root) Item_func_mul(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '/' bit_expr %prec '/' { $$= new (thd->mem_root) Item_func_div(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr DIV_SYM bit_expr %prec DIV_SYM { $$= new (thd->mem_root) Item_func_int_div(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr MOD_SYM bit_expr %prec MOD_SYM { $$= new (thd->mem_root) Item_func_mod(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | bit_expr '^' bit_expr { $$= new (thd->mem_root) Item_func_bit_xor(thd, $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | simple_expr + | mysql_concatenation_expr %prec '^' ; or: @@ -9611,7 +9991,7 @@ dyncall_create_element: LEX *lex= Lex; $$= (DYNCALL_CREATE_DEF *) alloc_root(thd->mem_root, sizeof(DYNCALL_CREATE_DEF)); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; $$->key= $1; $$->value= $3; @@ -9631,7 +10011,7 @@ dyncall_create_list: dyncall_create_element { $$= new (thd->mem_root) List; - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; $$->push_back($1, thd->mem_root); } @@ -9647,8 +10027,8 @@ sp_cursor_name_and_offset: { LEX *lex= Lex; $$.name= $1; - if (!lex->spcont || - !lex->spcont->find_cursor(&$1, &$$.offset, false)) + if (unlikely(!lex->spcont || + !lex->spcont->find_cursor(&$1, &$$.offset, false))) my_yyabort_error((ER_SP_CURSOR_MISMATCH, MYF(0), $1.str)); } ; @@ -9656,26 +10036,26 @@ sp_cursor_name_and_offset: explicit_cursor_attr: sp_cursor_name_and_offset '%' ISOPEN_SYM { - if (!($$= new (thd->mem_root) - Item_func_cursor_isopen(thd, &$1.name, $1.offset))) + if (unlikely(!($$= new (thd->mem_root) + Item_func_cursor_isopen(thd, &$1.name, $1.offset)))) MYSQL_YYABORT; } | sp_cursor_name_and_offset '%' FOUND_SYM { - if (!($$= new (thd->mem_root) - Item_func_cursor_found(thd, &$1.name, $1.offset))) + if (unlikely(!($$= new (thd->mem_root) + Item_func_cursor_found(thd, &$1.name, $1.offset)))) MYSQL_YYABORT; } | sp_cursor_name_and_offset '%' NOTFOUND_SYM { - if (!($$= new (thd->mem_root) - Item_func_cursor_notfound(thd, &$1.name, $1.offset))) + if (unlikely(!($$= new (thd->mem_root) + Item_func_cursor_notfound(thd, &$1.name, $1.offset)))) MYSQL_YYABORT; } | sp_cursor_name_and_offset '%' ROWCOUNT_SYM { - if (!($$= new (thd->mem_root) - Item_func_cursor_rowcount(thd, &$1.name, $1.offset))) + if (unlikely(!($$= new (thd->mem_root) + Item_func_cursor_rowcount(thd, &$1.name, $1.offset)))) MYSQL_YYABORT; } ; @@ -9735,18 +10115,18 @@ column_default_non_parenthesized_expr: { $5->push_front($3, thd->mem_root); $$= new (thd->mem_root) Item_row(thd, *$5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | EXISTS '(' subselect ')' { $$= new (thd->mem_root) Item_exists_subselect(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | '{' ident expr '}' { - if (!($$= $3->make_odbc_literal(thd, &$2))) + if (unlikely(!($$= $3->make_odbc_literal(thd, &$2)))) MYSQL_YYABORT; } | MATCH ident_list_arg AGAINST '(' bit_expr fulltext_options ')' @@ -9754,64 +10134,64 @@ column_default_non_parenthesized_expr: $2->push_front($5, thd->mem_root); Item_func_match *i1= new (thd->mem_root) Item_func_match(thd, *$2, $6); - if (i1 == NULL) + if (unlikely(i1 == NULL)) MYSQL_YYABORT; Select->add_ftfunc_to_list(thd, i1); $$= i1; } | CAST_SYM '(' expr AS cast_type ')' { - if (!($$= $5.create_typecast_item(thd, $3, Lex->charset))) + if (unlikely(!($$= $5.create_typecast_item(thd, $3, Lex->charset)))) MYSQL_YYABORT; } | CASE_SYM when_list_opt_else END { - if (!($$= new(thd->mem_root) Item_func_case_searched(thd, *$2))) + if (unlikely(!($$= new(thd->mem_root) Item_func_case_searched(thd, *$2)))) MYSQL_YYABORT; } | CASE_SYM expr when_list_opt_else END { $3->push_front($2, thd->mem_root); - if (!($$= new (thd->mem_root) Item_func_case_simple(thd, *$3))) + if (unlikely(!($$= new (thd->mem_root) Item_func_case_simple(thd, *$3)))) MYSQL_YYABORT; } | CONVERT_SYM '(' expr ',' cast_type ')' { - if (!($$= $5.create_typecast_item(thd, $3, Lex->charset))) + if (unlikely(!($$= $5.create_typecast_item(thd, $3, Lex->charset)))) MYSQL_YYABORT; } | CONVERT_SYM '(' expr USING charset_name ')' { $$= new (thd->mem_root) Item_func_conv_charset(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DATE_FORMAT_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_date_format(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DATE_FORMAT_SYM '(' expr ',' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_date_format(thd, $3, $5, $7); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DECODE_SYM '(' expr ',' decode_when_list ')' { $5->push_front($3, thd->mem_root); - if (!($$= new (thd->mem_root) Item_func_decode_oracle(thd, *$5))) + if (unlikely(!($$= new (thd->mem_root) Item_func_decode_oracle(thd, *$5)))) MYSQL_YYABORT; } | DEFAULT '(' simple_ident ')' { Item_splocal *il= $3->get_item_splocal(); - if (il) + if (unlikely(il)) my_yyabort_error((ER_WRONG_COLUMN_NAME, MYF(0), il->my_name()->str)); $$= new (thd->mem_root) Item_default_value(thd, Lex->current_context(), $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->default_used= TRUE; } @@ -9819,66 +10199,67 @@ column_default_non_parenthesized_expr: { $$= new (thd->mem_root) Item_insert_value(thd, Lex->current_context(), $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | NEXT_SYM VALUE_SYM FOR_SYM table_ident { - if (!($$= Lex->create_item_func_nextval(thd, $4))) + if (unlikely(!($$= Lex->create_item_func_nextval(thd, $4)))) MYSQL_YYABORT; } | NEXTVAL_SYM '(' table_ident ')' { - if (!($$= Lex->create_item_func_nextval(thd, $3))) + if (unlikely(!($$= Lex->create_item_func_nextval(thd, $3)))) MYSQL_YYABORT; } | PREVIOUS_SYM VALUE_SYM FOR_SYM table_ident { - if (!($$= Lex->create_item_func_lastval(thd, $4))) + if (unlikely(!($$= Lex->create_item_func_lastval(thd, $4)))) MYSQL_YYABORT; } | LASTVAL_SYM '(' table_ident ')' { - if (!($$= Lex->create_item_func_lastval(thd, $3))) + if (unlikely(!($$= Lex->create_item_func_lastval(thd, $3)))) MYSQL_YYABORT; } | SETVAL_SYM '(' table_ident ',' longlong_num ')' { - if (!($$= Lex->create_item_func_setval(thd, $3, $5, 0, 1))) + if (unlikely(!($$= Lex->create_item_func_setval(thd, $3, $5, 0, 1)))) MYSQL_YYABORT; } | SETVAL_SYM '(' table_ident ',' longlong_num ',' bool ')' { - if (!($$= Lex->create_item_func_setval(thd, $3, $5, 0, $7))) + if (unlikely(!($$= Lex->create_item_func_setval(thd, $3, $5, 0, $7)))) MYSQL_YYABORT; } | SETVAL_SYM '(' table_ident ',' longlong_num ',' bool ',' ulonglong_num ')' { - if (!($$= Lex->create_item_func_setval(thd, $3, $5, $9, $7))) + if (unlikely(!($$= Lex->create_item_func_setval(thd, $3, $5, $9, $7)))) + MYSQL_YYABORT; + } + ; + +primary_expr: + column_default_non_parenthesized_expr + | explicit_cursor_attr + | '(' parenthesized_expr ')' { $$= $2; } + ; + +string_factor_expr: + primary_expr + | string_factor_expr COLLATE_SYM collation_name + { + if (unlikely(!($$= new (thd->mem_root) Item_func_set_collation(thd, $1, $3)))) MYSQL_YYABORT; } ; simple_expr: - column_default_non_parenthesized_expr - | explicit_cursor_attr - | simple_expr COLLATE_SYM collation_name %prec NEG - { - if (!($$= new (thd->mem_root) Item_func_set_collation(thd, $1, $3))) - MYSQL_YYABORT; - } - | '(' parenthesized_expr ')' { $$= $2; } - | BINARY simple_expr %prec NEG + string_factor_expr %prec NEG + | BINARY simple_expr { Type_cast_attributes at(&my_charset_bin); - if (!($$= type_handler_long_blob.create_typecast_item(thd, $2, at))) - MYSQL_YYABORT; - } - | simple_expr OR_OR_SYM simple_expr - { - $$= new (thd->mem_root) Item_func_concat_operator_oracle(thd, - $1, $3); - if ($$ == NULL) + if (unlikely(!($$= type_handler_long_blob.create_typecast_item(thd, $2, at)))) MYSQL_YYABORT; } | '+' simple_expr %prec NEG @@ -9888,30 +10269,48 @@ simple_expr: | '-' simple_expr %prec NEG { $$= $2->neg(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | '~' simple_expr %prec NEG { $$= new (thd->mem_root) Item_func_bit_neg(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | not2 simple_expr %prec NEG { $$= negate_expression(thd, $2); - if ($$ == NULL) - MYSQL_YYABORT; - } - | INTERVAL_SYM expr interval '+' expr %prec INTERVAL_SYM - /* we cannot put interval before - */ - { - $$= new (thd->mem_root) Item_date_add_interval(thd, $5, $2, $3, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; +mysql_concatenation_expr: + simple_expr + | mysql_concatenation_expr MYSQL_CONCAT_SYM simple_expr + { + $$= new (thd->mem_root) Item_func_concat(thd, $1, $3); + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + } + ; + +function_call_keyword_timestamp: + TIMESTAMP '(' expr ')' + { + $$= new (thd->mem_root) Item_datetime_typecast(thd, $3, + AUTO_SEC_PART_DIGITS); + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + } + | TIMESTAMP '(' expr ',' expr ')' + { + $$= new (thd->mem_root) Item_func_add_time(thd, $3, $5, 1, 0); + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + } + ; /* Function call syntax using official SQL 2003 keywords. Because the function name is an official token, @@ -9922,20 +10321,20 @@ function_call_keyword: CHAR_SYM '(' expr_list ')' { $$= new (thd->mem_root) Item_func_char(thd, *$3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CHAR_SYM '(' expr_list USING charset_name ')' { $$= new (thd->mem_root) Item_func_char(thd, *$3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CURRENT_USER optional_braces { $$= new (thd->mem_root) Item_func_current_user(thd, Lex->current_context()); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); Lex->safe_to_cache_query= 0; @@ -9944,7 +10343,7 @@ function_call_keyword: { $$= new (thd->mem_root) Item_func_current_role(thd, Lex->current_context()); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); Lex->safe_to_cache_query= 0; @@ -9952,86 +10351,87 @@ function_call_keyword: | DATE_SYM '(' expr ')' { $$= new (thd->mem_root) Item_date_typecast(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DAY_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_dayofmonth(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | HOUR_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_hour(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | INSERT '(' expr ',' expr ',' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_insert(thd, $3, $5, $7, $9); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | INTERVAL_SYM '(' expr ',' expr ')' %prec INTERVAL_SYM + | INTERVAL_SYM '(' expr ',' expr ')' { List *list= new (thd->mem_root) List; - if (list == NULL) + if (unlikely(list == NULL)) + MYSQL_YYABORT; + if (unlikely(list->push_front($5, thd->mem_root)) || + unlikely(list->push_front($3, thd->mem_root))) MYSQL_YYABORT; - list->push_front($5, thd->mem_root); - list->push_front($3, thd->mem_root); Item_row *item= new (thd->mem_root) Item_row(thd, *list); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_func_interval(thd, item); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | INTERVAL_SYM '(' expr ',' expr ',' expr_list ')' %prec INTERVAL_SYM + | INTERVAL_SYM '(' expr ',' expr ',' expr_list ')' { $7->push_front($5, thd->mem_root); $7->push_front($3, thd->mem_root); Item_row *item= new (thd->mem_root) Item_row(thd, *$7); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_func_interval(thd, item); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LEFT '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_left(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MINUTE_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_minute(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MONTH_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_month(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | RIGHT '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_right(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SECOND_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_second(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SQL_SYM '%' ROWCOUNT_SYM { $$= new (thd->mem_root) Item_func_oracle_sql_rowcount(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); Lex->safe_to_cache_query= 0; @@ -10040,31 +10440,22 @@ function_call_keyword: { $$= new (thd->mem_root) Item_time_typecast(thd, $3, AUTO_SEC_PART_DIGITS); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | TIMESTAMP '(' expr ')' + | function_call_keyword_timestamp { - $$= new (thd->mem_root) Item_datetime_typecast(thd, $3, - AUTO_SEC_PART_DIGITS); - if ($$ == NULL) - MYSQL_YYABORT; - } - | TIMESTAMP '(' expr ',' expr ')' - { - $$= new (thd->mem_root) Item_func_add_time(thd, $3, $5, 1, 0); - if ($$ == NULL) - MYSQL_YYABORT; + $$= $1; } | TRIM '(' trim_operands ')' { - if (!($$= $3.make_item_func_trim(thd))) + if (unlikely(!($$= $3.make_item_func_trim(thd)))) MYSQL_YYABORT; } | USER_SYM '(' ')' { $$= new (thd->mem_root) Item_func_user(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); Lex->safe_to_cache_query=0; @@ -10072,7 +10463,7 @@ function_call_keyword: | YEAR_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_year(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -10094,99 +10485,97 @@ function_call_nonkeyword: { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $5, INTERVAL_DAY, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ADDDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')' { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $6, $7, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CURDATE optional_braces { $$= new (thd->mem_root) Item_func_curdate_local(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | CURTIME opt_time_precision { $$= new (thd->mem_root) Item_func_curtime_local(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | DATE_ADD_INTERVAL '(' expr ',' INTERVAL_SYM expr interval ')' - %prec INTERVAL_SYM { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $6, $7, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DATE_SUB_INTERVAL '(' expr ',' INTERVAL_SYM expr interval ')' - %prec INTERVAL_SYM { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $6, $7, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | EXTRACT_SYM '(' interval FROM expr ')' { $$=new (thd->mem_root) Item_extract(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | GET_FORMAT '(' date_time_type ',' expr ')' { $$= new (thd->mem_root) Item_func_get_format(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | NOW_SYM opt_time_precision { $$= new (thd->mem_root) Item_func_now_local(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | POSITION_SYM '(' bit_expr IN_SYM expr ')' { $$= new (thd->mem_root) Item_func_locate(thd, $5, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SUBDATE_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $5, INTERVAL_DAY, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SUBDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')' { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $6, $7, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SUBSTRING '(' expr ',' expr ',' expr ')' { - if (!($$= Lex->make_item_func_substr(thd, $3, $5, $7))) + if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5, $7)))) MYSQL_YYABORT; } | SUBSTRING '(' expr ',' expr ')' { - if (!($$= Lex->make_item_func_substr(thd, $3, $5))) + if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5)))) MYSQL_YYABORT; } | SUBSTRING '(' expr FROM expr FOR_SYM expr ')' { - if (!($$= Lex->make_item_func_substr(thd, $3, $5, $7))) + if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5, $7)))) MYSQL_YYABORT; } | SUBSTRING '(' expr FROM expr ')' { - if (!($$= Lex->make_item_func_substr(thd, $3, $5))) + if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5)))) MYSQL_YYABORT; } | SYSDATE opt_time_precision @@ -10203,45 +10592,45 @@ function_call_nonkeyword: $$= new (thd->mem_root) Item_func_sysdate_local(thd, $2); else $$= new (thd->mem_root) Item_func_now_local(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | TIMESTAMP_ADD '(' interval_time_stamp ',' expr ',' expr ')' { $$= new (thd->mem_root) Item_date_add_interval(thd, $7, $5, $3, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | TIMESTAMP_DIFF '(' interval_time_stamp ',' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_timestamp_diff(thd, $5, $7, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | TRIM_ORACLE '(' trim_operands ')' { - if (!($$= $3.make_item_func_trim_oracle(thd))) + if (unlikely(!($$= $3.make_item_func_trim_oracle(thd)))) MYSQL_YYABORT; } | UTC_DATE_SYM optional_braces { $$= new (thd->mem_root) Item_func_curdate_utc(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | UTC_TIME_SYM opt_time_precision { $$= new (thd->mem_root) Item_func_curtime_utc(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | UTC_TIMESTAMP_SYM opt_time_precision { $$= new (thd->mem_root) Item_func_now_utc(thd, $2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } @@ -10249,28 +10638,28 @@ function_call_nonkeyword: COLUMN_ADD_SYM '(' expr ',' dyncall_create_list ')' { $$= create_func_dyncol_add(thd, $3, *$5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COLUMN_DELETE_SYM '(' expr ',' expr_list ')' { $$= create_func_dyncol_delete(thd, $3, *$5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COLUMN_CHECK_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_dyncol_check(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COLUMN_CREATE_SYM '(' dyncall_create_list ')' { $$= create_func_dyncol_create(thd, *$3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | @@ -10280,7 +10669,7 @@ function_call_nonkeyword: $$= create_func_dyncol_get(thd, $3, $5, $7.type_handler(), $7.length(), $7.dec(), lex->charset); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -10294,50 +10683,50 @@ function_call_conflict: ASCII_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_ascii(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CHARSET '(' expr ')' { $$= new (thd->mem_root) Item_func_charset(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COALESCE '(' expr_list ')' { $$= new (thd->mem_root) Item_func_coalesce(thd, *$3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COLLATION_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_collation(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DATABASE '(' ')' { $$= new (thd->mem_root) Item_func_database(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->safe_to_cache_query=0; } | IF_SYM '(' expr ',' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_if(thd, $3, $5, $7); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | FORMAT_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_format(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | FORMAT_SYM '(' expr ',' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_format(thd, $3, $5, $7); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } /* LAST_VALUE here conflicts with the definition for window functions. @@ -10346,75 +10735,75 @@ function_call_conflict: | LAST_VALUE '(' expr ')' { List *list= new (thd->mem_root) List; - if (list == NULL) + if (unlikely(list == NULL)) MYSQL_YYABORT; list->push_back($3, thd->mem_root); $$= new (thd->mem_root) Item_func_last_value(thd, *list); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LAST_VALUE '(' expr_list ',' expr ')' { $3->push_back($5, thd->mem_root); $$= new (thd->mem_root) Item_func_last_value(thd, *$3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MICROSECOND_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_microsecond(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MOD_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_mod(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | OLD_PASSWORD_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_password(thd, $3, Item_func_password::OLD); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | PASSWORD_SYM '(' expr ')' { Item* i1; i1= new (thd->mem_root) Item_func_password(thd, $3); - if (i1 == NULL) + if (unlikely(i1 == NULL)) MYSQL_YYABORT; $$= i1; } | QUARTER_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_quarter(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | REPEAT_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_repeat(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | REPLACE '(' expr ',' expr ',' expr ')' { - if (!($$= Lex->make_item_func_replace(thd, $3, $5, $7))) + if (unlikely(!($$= Lex->make_item_func_replace(thd, $3, $5, $7)))) MYSQL_YYABORT; } | REVERSE_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_reverse(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ROW_COUNT_SYM '(' ')' { $$= new (thd->mem_root) Item_func_row_count(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); Lex->safe_to_cache_query= 0; @@ -10422,25 +10811,25 @@ function_call_conflict: | TRUNCATE_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_round(thd, $3, $5, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | WEEK_SYM '(' expr ')' { $$= new (thd->mem_root) Item_func_week(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | WEEK_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_func_week(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | WEIGHT_STRING_SYM '(' expr opt_ws_levels ')' { $$= new (thd->mem_root) Item_func_weight_string(thd, $3, 0, 0, $4); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | WEIGHT_STRING_SYM '(' expr AS CHAR_SYM ws_nweights opt_ws_levels ')' @@ -10448,26 +10837,26 @@ function_call_conflict: $$= new (thd->mem_root) Item_func_weight_string(thd, $3, 0, $6, $7 | MY_STRXFRM_PAD_WITH_SPACE); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | WEIGHT_STRING_SYM '(' expr AS BINARY ws_nweights ')' { Item *item= new (thd->mem_root) Item_char_typecast(thd, $3, $6, &my_charset_bin); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_func_weight_string(thd, item, 0, $6, MY_STRXFRM_PAD_WITH_SPACE); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | WEIGHT_STRING_SYM '(' expr ',' ulong_num ',' ulong_num ',' ulong_num ')' { $$= new (thd->mem_root) Item_func_weight_string(thd, $3, $5, $7, $9); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | geometry_function @@ -10475,7 +10864,7 @@ function_call_conflict: #ifdef HAVE_SPATIAL $$= $1; /* $1 may be NULL, GEOM_NEW not tested for out of memory */ - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; #else my_yyabort_error((ER_FEATURE_DISABLED, MYF(0), sym_group_geom.name, @@ -10537,6 +10926,11 @@ geometry_function: Geometry::wkb_polygon, Geometry::wkb_linestring)); } + | WITHIN '(' expr ',' expr ')' + { + $$= GEOM_NEW(thd, Item_func_spatial_precise_rel(thd, $3, $5, + Item_func::SP_WITHIN_FUNC)); + } ; /* @@ -10558,7 +10952,7 @@ function_call_generic: (udf= find_udf($1.str, $1.length)) && udf->type == UDFTYPE_AGGREGATE) { - if (lex->current_select->inc_in_sum_expr()) + if (unlikely(lex->current_select->inc_in_sum_expr())) { thd->parse_error(); MYSQL_YYABORT; @@ -10573,10 +10967,8 @@ function_call_generic: Create_func *builder; Item *item= NULL; - if (check_routine_name(&$1)) - { + if (unlikely(check_routine_name(&$1))) MYSQL_YYABORT; - } /* Implementation note: @@ -10616,45 +11008,13 @@ function_call_generic: } } - if (! ($$= item)) - { + if (unlikely(! ($$= item))) MYSQL_YYABORT; - } } - | ident '.' ident '(' opt_expr_list ')' + | ident_cli '.' ident_cli '(' opt_expr_list ')' { - Create_qfunc *builder; - Item *item= NULL; - - /* - The following in practice calls: - Create_sp_func::create() - and builds a stored function. - - However, it's important to maintain the interface between the - parser and the implementation in item_create.cc clean, - since this will change with WL#2128 (SQL PATH): - - INFORMATION_SCHEMA.version() is the SQL 99 syntax for the native - function version(), - - MySQL.version() is the SQL 2003 syntax for the native function - version() (a vendor can specify any schema). - */ - - if (!$1.str || check_db_name((LEX_STRING*) &$1)) - my_yyabort_error((ER_WRONG_DB_NAME, MYF(0), $1.str)); - if (check_routine_name(&$3)) - { + if (unlikely(!($$= Lex->make_item_func_call_generic(thd, &$1, &$3, $5)))) MYSQL_YYABORT; - } - - builder= find_qualified_function_builder(thd); - DBUG_ASSERT(builder); - item= builder->create_with_db(thd, &$1, &$3, true, $5); - - if (! ($$= item)) - { - MYSQL_YYABORT; - } } ; @@ -10684,7 +11044,7 @@ udf_expr_list: udf_expr { $$= new (thd->mem_root) List; - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; $$->push_back($1, thd->mem_root); } @@ -10726,46 +11086,46 @@ sum_expr: AVG_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_avg(thd, $3, FALSE); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | AVG_SYM '(' DISTINCT in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_avg(thd, $4, TRUE); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | BIT_AND '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_and(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | BIT_OR '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_or(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | BIT_XOR '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_xor(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COUNT_SYM '(' opt_all '*' ')' { Item *item= new (thd->mem_root) Item_int(thd, (int32) 0L, 1); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_sum_count(thd, item); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COUNT_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_count(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | COUNT_SYM '(' DISTINCT @@ -10775,13 +11135,13 @@ sum_expr: ')' { $$= new (thd->mem_root) Item_sum_count(thd, *$5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MIN_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_min(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } /* @@ -10792,55 +11152,55 @@ sum_expr: | MIN_SYM '(' DISTINCT in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_min(thd, $4); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MAX_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_max(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | MAX_SYM '(' DISTINCT in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_max(thd, $4); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | STD_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_std(thd, $3, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | VARIANCE_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_variance(thd, $3, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | STDDEV_SAMP_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_std(thd, $3, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | VAR_SAMP_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_variance(thd, $3, 1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SUM_SYM '(' in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_sum(thd, $3, FALSE); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SUM_SYM '(' DISTINCT in_sum_expr ')' { $$= new (thd->mem_root) Item_sum_sum(thd, $4, TRUE); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | GROUP_CONCAT_SYM '(' opt_distinct @@ -10857,7 +11217,7 @@ sum_expr: sel->gorder_list, $7, $8, sel->select_limit, sel->offset_limit); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; sel->select_limit= NULL; sel->offset_limit= NULL; @@ -10871,25 +11231,25 @@ window_func_expr: window_func OVER_SYM window_name { $$= new (thd->mem_root) Item_window_func(thd, (Item_sum *) $1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; - if (Select->add_window_func((Item_window_func *) $$)) + if (unlikely(Select->add_window_func((Item_window_func *) $$))) MYSQL_YYABORT; } | window_func OVER_SYM window_spec { LEX *lex= Lex; - if (Select->add_window_spec(thd, lex->win_ref, - Select->group_list, - Select->order_list, - lex->win_frame)) + if (unlikely(Select->add_window_spec(thd, lex->win_ref, + Select->group_list, + Select->order_list, + lex->win_frame))) MYSQL_YYABORT; $$= new (thd->mem_root) Item_window_func(thd, (Item_sum *) $1, thd->lex->win_spec); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; - if (Select->add_window_func((Item_window_func *) $$)) + if (unlikely(Select->add_window_func((Item_window_func *) $$))) MYSQL_YYABORT; } ; @@ -10907,63 +11267,63 @@ simple_window_func: ROW_NUMBER_SYM '(' ')' { $$= new (thd->mem_root) Item_sum_row_number(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | RANK_SYM '(' ')' { $$= new (thd->mem_root) Item_sum_rank(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DENSE_RANK_SYM '(' ')' { $$= new (thd->mem_root) Item_sum_dense_rank(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | PERCENT_RANK_SYM '(' ')' { $$= new (thd->mem_root) Item_sum_percent_rank(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CUME_DIST_SYM '(' ')' { $$= new (thd->mem_root) Item_sum_cume_dist(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | NTILE_SYM '(' expr ')' { $$= new (thd->mem_root) Item_sum_ntile(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | FIRST_VALUE_SYM '(' expr ')' { $$= new (thd->mem_root) Item_sum_first_value(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LAST_VALUE '(' expr ')' { $$= new (thd->mem_root) Item_sum_last_value(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | NTH_VALUE_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_sum_nth_value(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | @@ -10971,17 +11331,17 @@ simple_window_func: { /* No second argument defaults to 1. */ Item* item_offset= new (thd->mem_root) Item_uint(thd, 1); - if (item_offset == NULL) + if (unlikely(item_offset == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_sum_lead(thd, $3, item_offset); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LEAD_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_sum_lead(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | @@ -10989,35 +11349,38 @@ simple_window_func: { /* No second argument defaults to 1. */ Item* item_offset= new (thd->mem_root) Item_uint(thd, 1); - if (item_offset == NULL) + if (unlikely(item_offset == NULL)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_sum_lag(thd, $3, item_offset); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LAG_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_sum_lag(thd, $3, $5); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; + + + inverse_distribution_function: percentile_function OVER_SYM '(' opt_window_partition_clause ')' { LEX *lex= Lex; - if (Select->add_window_spec(thd, lex->win_ref, - Select->group_list, - Select->order_list, - NULL)) + if (unlikely(Select->add_window_spec(thd, lex->win_ref, + Select->group_list, + Select->order_list, + NULL))) MYSQL_YYABORT; $$= new (thd->mem_root) Item_window_func(thd, (Item_sum *) $1, thd->lex->win_spec); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; - if (Select->add_window_func((Item_window_func *) $$)) + if (unlikely(Select->add_window_func((Item_window_func *) $$))) MYSQL_YYABORT; } ; @@ -11033,14 +11396,14 @@ percentile_function: { Item *args= new (thd->mem_root) Item_decimal(thd, "0.5", 3, thd->charset()); - if (($$ == NULL) || (thd->is_error())) - { + if (unlikely(args == NULL) || unlikely(thd->is_error())) + MYSQL_YYABORT; + Select->prepare_add_window_spec(thd); + if (unlikely(add_order_to_list(thd, $3,FALSE))) MYSQL_YYABORT; - } - if (add_order_to_list(thd, $3,FALSE)) MYSQL_YYABORT; $$= new (thd->mem_root) Item_sum_percentile_cont(thd, args); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -11049,27 +11412,31 @@ inverse_distribution_function_def: PERCENTILE_CONT_SYM '(' expr ')' { $$= new (thd->mem_root) Item_sum_percentile_cont(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | PERCENTILE_DISC_SYM '(' expr ')' { $$= new (thd->mem_root) Item_sum_percentile_disc(thd, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; order_by_single_element_list: ORDER_SYM BY order_ident order_dir - { if (add_order_to_list(thd, $3,(bool) $4)) MYSQL_YYABORT; } + { + if (unlikely(add_order_to_list(thd, $3,(bool) $4))) + MYSQL_YYABORT; + } ; + window_name: ident { $$= (LEX_CSTRING *) thd->memdup(&$1, sizeof(LEX_CSTRING)); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -11077,7 +11444,7 @@ window_name: variable: '@' { - if (! Lex->parsing_options.allows_variable) + if (unlikely(! Lex->parsing_options.allows_variable)) my_yyabort_error((ER_VIEW_SELECT_VARIABLE, MYF(0))); } variable_aux @@ -11091,7 +11458,7 @@ variable_aux: { Item_func_set_user_var *item; $$= item= new (thd->mem_root) Item_func_set_user_var(thd, &$1, $3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; LEX *lex= Lex; lex->uncacheable(UNCACHEABLE_SIDEEFFECT); @@ -11100,23 +11467,20 @@ variable_aux: | ident_or_text { $$= new (thd->mem_root) Item_func_get_user_var(thd, &$1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; LEX *lex= Lex; lex->uncacheable(UNCACHEABLE_SIDEEFFECT); } - | '@' opt_var_ident_type ident_or_text opt_component + | '@' opt_var_ident_type ident_sysvar_name { - /* disallow "SELECT @@global.global.variable" */ - if ($3.str && $4.str && check_reserved_words(&$3)) - { - thd->parse_error(); + if (unlikely(!($$= Lex->make_item_sysvar(thd, $2, &$3)))) MYSQL_YYABORT; - } - if (!($$= get_system_var(thd, $2, &$3, &$4))) + } + | '@' opt_var_ident_type ident_sysvar_name '.' ident + { + if (unlikely(!($$= Lex->make_item_sysvar(thd, $2, &$3, &$5)))) MYSQL_YYABORT; - if (!((Item_func_get_system_var*) $$)->is_written_to_binlog()) - Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_VARIABLE); } ; @@ -11129,7 +11493,7 @@ opt_gconcat_separator: /* empty */ { $$= new (thd->mem_root) String(",", 1, &my_charset_latin1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | SEPARATOR_SYM text_string { $$ = $2; } @@ -11142,9 +11506,15 @@ opt_gorder_clause: gorder_list: gorder_list ',' order_ident order_dir - { if (add_gorder_to_list(thd, $3,(bool) $4)) MYSQL_YYABORT; } + { + if (unlikely(add_gorder_to_list(thd, $3,(bool) $4))) + MYSQL_YYABORT; + } | order_ident order_dir - { if (add_gorder_to_list(thd, $1,(bool) $2)) MYSQL_YYABORT; } + { + if (unlikely(add_gorder_to_list(thd, $1,(bool) $2))) + MYSQL_YYABORT; + } ; opt_glimit_clause: @@ -11187,11 +11557,13 @@ glimit_options: } ; + + in_sum_expr: opt_all { LEX *lex= Lex; - if (lex->current_select->inc_in_sum_expr()) + if (unlikely(lex->current_select->inc_in_sum_expr())) { thd->parse_error(); MYSQL_YYABORT; @@ -11253,9 +11625,9 @@ expr_list: expr { $$= new (thd->mem_root) List; - if ($$ == NULL) + if (unlikely($$ == NULL) || + unlikely($$->push_back($1, thd->mem_root))) MYSQL_YYABORT; - $$->push_back($1, thd->mem_root); } | expr_list ',' expr { @@ -11273,9 +11645,9 @@ ident_list: simple_ident { $$= new (thd->mem_root) List; - if ($$ == NULL) + if (unlikely($$ == NULL) || + unlikely($$->push_back($1, thd->mem_root))) MYSQL_YYABORT; - $$->push_back($1, thd->mem_root); } | ident_list ',' simple_ident { @@ -11288,25 +11660,27 @@ when_list: WHEN_SYM expr THEN_SYM expr { $$= new (thd->mem_root) List; - if ($$ == NULL) + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + if (unlikely($$->push_back($2, thd->mem_root) || + $$->push_back($4, thd->mem_root))) MYSQL_YYABORT; - $$->push_back($2, thd->mem_root); - $$->push_back($4, thd->mem_root); } | when_list WHEN_SYM expr THEN_SYM expr { - $1->push_back($3, thd->mem_root); - $1->push_back($5, thd->mem_root); + if (unlikely($1->push_back($3, thd->mem_root) || + $1->push_back($5, thd->mem_root))) + MYSQL_YYABORT; $$= $1; } ; - when_list_opt_else: when_list | when_list ELSE expr { - $1->push_back($3, thd->mem_root); + if (unlikely($1->push_back($3, thd->mem_root))) + MYSQL_YYABORT; $$= $1; } ; @@ -11315,15 +11689,17 @@ decode_when_list: expr ',' expr { $$= new (thd->mem_root) List; - if ($$ == NULL) + if (unlikely($$ == NULL) || + unlikely($$->push_back($1, thd->mem_root)) || + unlikely($$->push_back($3, thd->mem_root))) MYSQL_YYABORT; - $$->push_back($1, thd->mem_root); - $$->push_back($3, thd->mem_root); + } | decode_when_list ',' expr { $$= $1; - $$->push_back($3, thd->mem_root); + if (unlikely($$->push_back($3, thd->mem_root))) + MYSQL_YYABORT; } ; @@ -11335,7 +11711,7 @@ table_ref: | join_table { LEX *lex= Lex; - if (!($$= lex->current_select->nest_last_join(thd))) + if (unlikely(!($$= lex->current_select->nest_last_join(thd)))) { thd->parse_error(); MYSQL_YYABORT; @@ -11390,7 +11766,7 @@ join_table: { MYSQL_YYABORT_UNLESS($1 && $3); /* Change the current name resolution context to a local context. */ - if (push_new_name_resolution_context(thd, $1, $3)) + if (unlikely(push_new_name_resolution_context(thd, $1, $3))) MYSQL_YYABORT; Select->parsing_place= IN_ON; } @@ -11407,7 +11783,7 @@ join_table: MYSQL_YYABORT_UNLESS($1 && $3); } '(' using_list ')' - { + { $3->straight=$2; add_join_natural($1,$3,$7,Select); $$=$3; @@ -11425,7 +11801,7 @@ join_table: { MYSQL_YYABORT_UNLESS($1 && $5); /* Change the current name resolution context to a local context. */ - if (push_new_name_resolution_context(thd, $1, $5)) + if (unlikely(push_new_name_resolution_context(thd, $1, $5))) MYSQL_YYABORT; Select->parsing_place= IN_ON; } @@ -11461,14 +11837,14 @@ join_table: { MYSQL_YYABORT_UNLESS($1 && $5); /* Change the current name resolution context to a local context. */ - if (push_new_name_resolution_context(thd, $1, $5)) + if (unlikely(push_new_name_resolution_context(thd, $1, $5))) MYSQL_YYABORT; Select->parsing_place= IN_ON; } expr { LEX *lex= Lex; - if (!($$= lex->current_select->convert_right_join())) + if (unlikely(!($$= lex->current_select->convert_right_join()))) MYSQL_YYABORT; add_join_on(thd, $$, $8); Lex->pop_context(); @@ -11481,7 +11857,7 @@ join_table: USING '(' using_list ')' { LEX *lex= Lex; - if (!($$= lex->current_select->convert_right_join())) + if (unlikely(!($$= lex->current_select->convert_right_join()))) MYSQL_YYABORT; add_join_natural($$,$5,$9,Select); } @@ -11490,7 +11866,7 @@ join_table: MYSQL_YYABORT_UNLESS($1 && $6); add_join_natural($6,$1,NULL,Select); LEX *lex= Lex; - if (!($$= lex->current_select->convert_right_join())) + if (unlikely(!($$= lex->current_select->convert_right_join()))) MYSQL_YYABORT; } ; @@ -11538,19 +11914,23 @@ table_factor: table_primary_ident: { + DBUG_ASSERT(Select); SELECT_LEX *sel= Select; sel->table_join_options= 0; } - table_ident opt_use_partition opt_table_alias opt_key_definition + table_ident opt_use_partition opt_for_system_time_clause opt_table_alias opt_key_definition { - if (!($$= Select->add_table_to_list(thd, $2, $4, - Select->get_table_join_options(), - YYPS->m_lock_type, - YYPS->m_mdl_type, - Select->pop_index_hints(), - $3))) + if (unlikely(!($$= Select->add_table_to_list(thd, $2, $5, + Select->get_table_join_options(), + YYPS->m_lock_type, + YYPS->m_mdl_type, + Select-> + pop_index_hints(), + $3)))) MYSQL_YYABORT; Select->add_joined_table($$); + if ($4) + $$->vers_conditions= Lex->vers_conditions; } ; @@ -11573,11 +11953,11 @@ table_primary_ident: */ table_primary_derived: - '(' get_select_lex select_derived_union ')' opt_table_alias + '(' get_select_lex select_derived_union ')' opt_for_system_time_clause opt_table_alias { /* Use $2 instead of Lex->current_select as derived table will alter value of Lex->current_select. */ - if (!($3 || $5) && $2->embedding && + if (!($3 || $6) && $2->embedding && !$2->embedding->nested_join->join_list.elements) { /* we have a derived table ($3 == NULL) but no alias, @@ -11597,18 +11977,18 @@ table_primary_derived: SELECT_LEX_UNIT *unit= sel->master_unit(); lex->current_select= sel= unit->outer_select(); Table_ident *ti= new (thd->mem_root) Table_ident(unit); - if (ti == NULL) + if (unlikely(ti == NULL)) MYSQL_YYABORT; - if (!($$= sel->add_table_to_list(thd, - ti, $5, 0, - TL_READ, MDL_SHARED_READ))) - + if (unlikely(!($$= sel->add_table_to_list(thd, + ti, $6, 0, + TL_READ, + MDL_SHARED_READ)))) MYSQL_YYABORT; sel->add_joined_table($$); lex->pop_context(); lex->nest_level--; } - else if ($5 != NULL) + else if (unlikely($6 != NULL)) { /* Tables with or without joins within parentheses cannot @@ -11632,25 +12012,33 @@ table_primary_derived: if ($$ && $$->derived && !$$->derived->first_select()->next_select()) $$->select_lex->add_where_field($$->derived->first_select()); + if ($5) + { + MYSQL_YYABORT_UNLESS(!$3); + $$->vers_conditions= Lex->vers_conditions; + } } /* Represents derived table with WITH clause */ | '(' get_select_lex subselect_start with_clause query_expression_body - subselect_end ')' opt_table_alias + subselect_end ')' opt_for_system_time_clause opt_table_alias { LEX *lex=Lex; SELECT_LEX *sel= $2; SELECT_LEX_UNIT *unit= $5->master_unit(); Table_ident *ti= new (thd->mem_root) Table_ident(unit); - if (ti == NULL) + if (unlikely(ti == NULL)) MYSQL_YYABORT; $5->set_with_clause($4); lex->current_select= sel; - if (!($$= sel->add_table_to_list(lex->thd, - ti, $8, 0, - TL_READ, MDL_SHARED_READ))) + if (unlikely(!($$= sel->add_table_to_list(lex->thd, + ti, $9, 0, + TL_READ, + MDL_SHARED_READ)))) MYSQL_YYABORT; sel->add_joined_table($$); + if ($8) + $$->vers_conditions= Lex->vers_conditions; } ; @@ -11677,7 +12065,7 @@ select_derived_union: select_derived | select_derived union_order_or_limit { - if ($1) + if (unlikely($1)) { thd->parse_error(); MYSQL_YYABORT; @@ -11685,16 +12073,16 @@ select_derived_union: } | select_derived union_head_non_top { - if ($1) + if (unlikely($1)) { thd->parse_error(); MYSQL_YYABORT; } } union_list_derived_part2 - | derived_query_specification opt_select_lock_type - | derived_query_specification order_or_limit opt_select_lock_type - | derived_query_specification opt_select_lock_type union_list_derived + | derived_simple_table opt_select_lock_type + | derived_simple_table order_or_limit opt_select_lock_type + | derived_simple_table opt_select_lock_type union_list_derived ; union_list_derived_part2: @@ -11739,9 +12127,9 @@ select_derived: /* for normal joins, $2 != NULL and end_nested_join() != NULL, for derived tables, both must equal NULL */ - if (!($$= $1->end_nested_join(lex->thd)) && $2) + if (unlikely(!($$= $1->end_nested_join(lex->thd)) && $2)) MYSQL_YYABORT; - if (!$2 && $$) + if (unlikely(!$2 && $$)) { thd->parse_error(); MYSQL_YYABORT; @@ -11749,6 +12137,10 @@ select_derived: } ; +derived_simple_table: + derived_query_specification { $$= $1; } + | derived_table_value_constructor { $$= $1; } + ; /* Similar to query_specification, but for derived tables. Example: the inner parenthesized SELECT in this query: @@ -11763,18 +12155,32 @@ derived_query_specification: } ; +derived_table_value_constructor: + VALUES + { + Lex->tvc_start(); + } + values_list + { + if (Lex->tvc_finalize_derived()) + MYSQL_YYABORT; + $$= NULL; + } + ; + + select_derived2: { LEX *lex= Lex; lex->derived_tables|= DERIVED_SUBQUERY; - if (!lex->expr_allows_subselect || - lex->sql_command == (int)SQLCOM_PURGE) + if (unlikely(!lex->expr_allows_subselect || + lex->sql_command == (int)SQLCOM_PURGE)) { thd->parse_error(); MYSQL_YYABORT; } if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE || - mysql_new_select(lex, 1, NULL)) + unlikely(mysql_new_select(lex, 1, NULL))) MYSQL_YYABORT; mysql_init_select(lex); lex->current_select->linkage= DERIVED_TABLE_TYPE; @@ -11795,7 +12201,7 @@ get_select_lex_derived: get_select_lex { LEX *lex= Lex; - if ($1->init_nested_join(lex->thd)) + if (unlikely($1->init_nested_join(lex->thd))) MYSQL_YYABORT; } ; @@ -11879,12 +12285,12 @@ key_usage_list: using_list: ident { - if (!($$= new (thd->mem_root) List)) + if (unlikely(!($$= new (thd->mem_root) List))) MYSQL_YYABORT; String *s= new (thd->mem_root) String((const char *) $1.str, $1.length, system_charset_info); - if (s == NULL) + if (unlikely(unlikely(s == NULL))) MYSQL_YYABORT; $$->push_back(s, thd->mem_root); } @@ -11893,9 +12299,10 @@ using_list: String *s= new (thd->mem_root) String((const char *) $3.str, $3.length, system_charset_info); - if (s == NULL) + if (unlikely(unlikely(s == NULL))) + MYSQL_YYABORT; + if (unlikely($1->push_back(s, thd->mem_root))) MYSQL_YYABORT; - $1->push_back(s, thd->mem_root); $$= $1; } ; @@ -11942,10 +12349,10 @@ table_alias: opt_table_alias: /* empty */ { $$=0; } - | table_alias ident + | table_alias ident_table_alias { $$= (LEX_CSTRING*) thd->memdup(&$2,sizeof(LEX_STRING)); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -11999,7 +12406,7 @@ opt_escape: $$= ((thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) ? new (thd->mem_root) Item_string_ascii(thd, "", 0) : new (thd->mem_root) Item_string_ascii(thd, "\\", 1)); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -12015,9 +12422,15 @@ opt_group_clause: group_list: group_list ',' order_ident order_dir - { if (add_group_to_list(thd, $3,(bool) $4)) MYSQL_YYABORT; } + { + if (unlikely(add_group_to_list(thd, $3,(bool) $4))) + MYSQL_YYABORT; + } | order_ident order_dir - { if (add_group_to_list(thd, $1,(bool) $2)) MYSQL_YYABORT; } + { + if (unlikely(add_group_to_list(thd, $1,(bool) $2))) + MYSQL_YYABORT; + } ; olap_opt: @@ -12032,7 +12445,7 @@ olap_opt: SQL-2003: GROUP BY ... CUBE(col1, col2, col3) */ LEX *lex=Lex; - if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) + if (unlikely(lex->current_select->linkage == GLOBAL_OPTIONS_TYPE)) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "WITH CUBE", "global union parameters")); lex->current_select->olap= CUBE_TYPE; @@ -12049,7 +12462,7 @@ olap_opt: SQL-2003: GROUP BY ... ROLLUP(col1, col2, col3) */ LEX *lex= Lex; - if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) + if (unlikely(lex->current_select->linkage == GLOBAL_OPTIONS_TYPE)) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "WITH ROLLUP", "global union parameters")); lex->current_select->olap= ROLLUP_TYPE; @@ -12077,10 +12490,10 @@ window_def: window_name AS window_spec { LEX *lex= Lex; - if (Select->add_window_def(thd, $1, lex->win_ref, - Select->group_list, - Select->order_list, - lex->win_frame)) + if (unlikely(Select->add_window_def(thd, $1, lex->win_ref, + Select->group_list, + Select->order_list, + lex->win_frame))) MYSQL_YYABORT; } ; @@ -12098,7 +12511,7 @@ opt_window_ref: | ident { thd->lex->win_ref= (LEX_CSTRING *) thd->memdup(&$1, sizeof(LEX_CSTRING)); - if (thd->lex->win_ref == NULL) + if (unlikely(thd->lex->win_ref == NULL)) MYSQL_YYABORT; } @@ -12122,7 +12535,7 @@ opt_window_frame_clause: lex->frame_top_bound, lex->frame_bottom_bound, $3); - if (lex->win_frame == NULL) + if (unlikely(lex->win_frame == NULL)) MYSQL_YYABORT; } ; @@ -12140,7 +12553,7 @@ window_frame_extent: lex->frame_bottom_bound= new (thd->mem_root) Window_frame_bound(Window_frame_bound::CURRENT, NULL); - if (lex->frame_bottom_bound == NULL) + if (unlikely(lex->frame_bottom_bound == NULL)) MYSQL_YYABORT; } | BETWEEN_SYM window_frame_bound AND_SYM window_frame_bound @@ -12156,21 +12569,21 @@ window_frame_start: { $$= new (thd->mem_root) Window_frame_bound(Window_frame_bound::PRECEDING, NULL); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | CURRENT_SYM ROW_SYM { $$= new (thd->mem_root) Window_frame_bound(Window_frame_bound::CURRENT, NULL); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | literal PRECEDING_SYM { $$= new (thd->mem_root) Window_frame_bound(Window_frame_bound::PRECEDING, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -12181,14 +12594,14 @@ window_frame_bound: { $$= new (thd->mem_root) Window_frame_bound(Window_frame_bound::FOLLOWING, NULL); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | literal FOLLOWING_SYM { $$= new (thd->mem_root) Window_frame_bound(Window_frame_bound::FOLLOWING, $1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -12222,7 +12635,7 @@ alter_order_item: simple_ident_nospvar order_dir { bool ascending= ($2 == 1) ? true : false; - if (add_order_to_list(thd, $1, ascending)) + if (unlikely(add_order_to_list(thd, $1, ascending))) MYSQL_YYABORT; } ; @@ -12242,9 +12655,9 @@ order_clause: LEX *lex=Lex; SELECT_LEX *sel= lex->current_select; SELECT_LEX_UNIT *unit= sel-> master_unit(); - if (sel->linkage != GLOBAL_OPTIONS_TYPE && - sel->olap != UNSPECIFIED_OLAP_TYPE && - (sel->linkage != UNION_TYPE || sel->braces)) + if (unlikely(sel->linkage != GLOBAL_OPTIONS_TYPE && + sel->olap != UNSPECIFIED_OLAP_TYPE && + (sel->linkage != UNION_TYPE || sel->braces))) { my_error(ER_WRONG_USAGE, MYF(0), "CUBE/ROLLUP", "ORDER BY"); @@ -12258,14 +12671,14 @@ order_clause: executed in the same way as the query SELECT ... ORDER BY order_list unless the SELECT construct contains ORDER BY or LIMIT clauses. - Otherwise we create a fake SELECT_LEX if it has not been created - yet. + Otherwise we create a fake SELECT_LEX if it has not been + created yet. */ SELECT_LEX *first_sl= unit->first_select(); - if (!unit->is_unit_op() && - (first_sl->order_list.elements || - first_sl->select_limit) && - unit->add_fake_select_lex(thd)) + if (unlikely(!unit->is_unit_op() && + (first_sl->order_list.elements || + first_sl->select_limit) && + unit->add_fake_select_lex(thd))) MYSQL_YYABORT; } if (sel->master_unit()->is_unit_op() && !sel->braces) @@ -12288,9 +12701,15 @@ order_clause: order_list: order_list ',' order_ident order_dir - { if (add_order_to_list(thd, $3,(bool) $4)) MYSQL_YYABORT; } + { + if (unlikely(add_order_to_list(thd, $3,(bool) $4))) + MYSQL_YYABORT; + } | order_ident order_dir - { if (add_order_to_list(thd, $1,(bool) $2)) MYSQL_YYABORT; } + { + if (unlikely(add_order_to_list(thd, $1,(bool) $2))) + MYSQL_YYABORT; + } ; order_dir: @@ -12361,42 +12780,36 @@ limit_options: ; limit_option: - ident_with_tok_start - { - LEX *lex= thd->lex; - Lex_input_stream *lip= & thd->m_parser_state->m_lip; - if (!($$= lex->create_item_limit(thd, &$1, - $1.m_pos, lip->get_tok_end()))) - MYSQL_YYABORT; - } - | ident_with_tok_start '.' ident - { - LEX *lex= thd->lex; - Lex_input_stream *lip= & thd->m_parser_state->m_lip; - if (!($$= lex->create_item_limit(thd, &$1, &$3, - $1.m_pos, lip->get_ptr()))) - MYSQL_YYABORT; - } + ident_cli + { + if (unlikely(!($$= Lex->create_item_limit(thd, &$1)))) + MYSQL_YYABORT; + } + | ident_cli '.' ident_cli + { + if (unlikely(!($$= Lex->create_item_limit(thd, &$1, &$3)))) + MYSQL_YYABORT; + } | param_marker - { - $1->limit_clause_param= TRUE; - } + { + $1->limit_clause_param= TRUE; + } | ULONGLONG_NUM { $$= new (thd->mem_root) Item_uint(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LONG_NUM { $$= new (thd->mem_root) Item_uint(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | NUM { $$= new (thd->mem_root) Item_uint(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -12508,9 +12921,9 @@ procedure_clause: Item_field *item= new (thd->mem_root) Item_field(thd, &lex->current_select->context, NULL, NULL, &$2); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; - if (add_proc_to_list(thd, item)) + if (unlikely(add_proc_to_list(thd, item))) MYSQL_YYABORT; Lex->uncacheable(UNCACHEABLE_SIDEEFFECT); @@ -12542,7 +12955,7 @@ procedure_list2: procedure_item: remember_name expr remember_end { - if (add_proc_to_list(thd, $2)) + if (unlikely(add_proc_to_list(thd, $2))) MYSQL_YYABORT; if (!$2->name.str || $2->name.str == item_empty_name) $2->set_name(thd, $1, (uint) ($3 - $1), thd->charset()); @@ -12553,7 +12966,8 @@ select_var_list_init: { LEX *lex=Lex; if (!lex->describe && - (!(lex->result= new (thd->mem_root) select_dumpvar(thd)))) + unlikely((!(lex->result= new (thd->mem_root) + select_dumpvar(thd))))) MYSQL_YYABORT; } select_var_list @@ -12569,7 +12983,7 @@ select_var_ident: select_outvar { if (Lex->result) { - if ($1 == NULL) + if (unlikely($1 == NULL)) MYSQL_YYABORT; ((select_dumpvar *)Lex->result)->var_list.push_back($1, thd->mem_root); } @@ -12591,12 +13005,12 @@ select_outvar: } | ident_or_text { - if (!($$= Lex->create_outvar(thd, &$1)) && Lex->result) + if (unlikely(!($$= Lex->create_outvar(thd, &$1)) && Lex->result)) MYSQL_YYABORT; } | ident '.' ident { - if (!($$= Lex->create_outvar(thd, &$1, &$3)) && Lex->result) + if (unlikely(!($$= Lex->create_outvar(thd, &$1, &$3)) && Lex->result)) MYSQL_YYABORT; } ; @@ -12610,10 +13024,11 @@ into_destination: { LEX *lex= Lex; lex->uncacheable(UNCACHEABLE_SIDEEFFECT); - if (!(lex->exchange= - new (thd->mem_root) sql_exchange($2.str, 0)) || - !(lex->result= - new (thd->mem_root) select_export(thd, lex->exchange))) + if (unlikely(!(lex->exchange= + new (thd->mem_root) sql_exchange($2.str, 0))) || + unlikely(!(lex->result= + new (thd->mem_root) + select_export(thd, lex->exchange)))) MYSQL_YYABORT; } opt_load_data_charset @@ -12625,10 +13040,12 @@ into_destination: if (!lex->describe) { lex->uncacheable(UNCACHEABLE_SIDEEFFECT); - if (!(lex->exchange= new (thd->mem_root) sql_exchange($2.str,1))) + if (unlikely(!(lex->exchange= + new (thd->mem_root) sql_exchange($2.str,1)))) MYSQL_YYABORT; - if (!(lex->result= - new (thd->mem_root) select_dump(thd, lex->exchange))) + if (unlikely(!(lex->result= + new (thd->mem_root) + select_dump(thd, lex->exchange)))) MYSQL_YYABORT; } } @@ -12674,16 +13091,16 @@ drop: LEX *lex=Lex; Alter_drop *ad= (new (thd->mem_root) Alter_drop(Alter_drop::KEY, $4.str, $3)); - if (ad == NULL) + if (unlikely(ad == NULL)) MYSQL_YYABORT; lex->sql_command= SQLCOM_DROP_INDEX; lex->alter_info.reset(); lex->alter_info.flags= ALTER_DROP_INDEX; lex->alter_info.drop_list.push_back(ad, thd->mem_root); - if (!lex->current_select->add_table_to_list(thd, $6, NULL, - TL_OPTION_UPDATING, - TL_READ_NO_INSERT, - MDL_SHARED_UPGRADABLE)) + if (unlikely(!lex->current_select-> + add_table_to_list(thd, $6, NULL, TL_OPTION_UPDATING, + TL_READ_NO_INSERT, + MDL_SHARED_UPGRADABLE))) MYSQL_YYABORT; } | DROP DATABASE opt_if_exists ident @@ -12696,7 +13113,7 @@ drop: { LEX *lex= Lex; lex->set_command(SQLCOM_DROP_PACKAGE, $3); - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "PACKAGE")); lex->spname= $4; } @@ -12704,7 +13121,7 @@ drop: { LEX *lex= Lex; lex->set_command(SQLCOM_DROP_PACKAGE_BODY, $4); - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "PACKAGE BODY")); lex->spname= $5; } @@ -12712,13 +13129,13 @@ drop: { LEX *lex= thd->lex; sp_name *spname; - if ($4.str && check_db_name((LEX_STRING*) &$4)) - my_yyabort_error((ER_WRONG_DB_NAME, MYF(0), $4.str)); - if (lex->sphead) + if (unlikely($4.str && check_db_name((LEX_STRING*) &$4))) + my_yyabort_error((ER_WRONG_DB_NAME, MYF(0), $4.str)); + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "FUNCTION")); lex->set_command(SQLCOM_DROP_FUNCTION, $3); spname= new (thd->mem_root) sp_name(&$4, &$6, true); - if (spname == NULL) + if (unlikely(spname == NULL)) MYSQL_YYABORT; lex->spname= spname; } @@ -12727,20 +13144,20 @@ drop: LEX *lex= thd->lex; LEX_CSTRING db= {0, 0}; sp_name *spname; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "FUNCTION")); - if (thd->db.str && lex->copy_db_to(&db)) + if (thd->db.str && unlikely(lex->copy_db_to(&db))) MYSQL_YYABORT; lex->set_command(SQLCOM_DROP_FUNCTION, $3); spname= new (thd->mem_root) sp_name(&db, &$4, false); - if (spname == NULL) + if (unlikely(spname == NULL)) MYSQL_YYABORT; lex->spname= spname; } | DROP PROCEDURE_SYM opt_if_exists sp_name { LEX *lex=Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE")); lex->set_command(SQLCOM_DROP_PROCEDURE, $3); lex->spname= $4; @@ -12809,10 +13226,10 @@ table_list: table_name: table_ident { - if (!Select->add_table_to_list(thd, $1, NULL, - TL_OPTION_UPDATING, - YYPS->m_lock_type, - YYPS->m_mdl_type)) + if (unlikely(!Select->add_table_to_list(thd, $1, NULL, + TL_OPTION_UPDATING, + YYPS->m_lock_type, + YYPS->m_mdl_type))) MYSQL_YYABORT; } ; @@ -12820,12 +13237,12 @@ table_name: table_name_with_opt_use_partition: table_ident opt_use_partition { - if (!Select->add_table_to_list(thd, $1, NULL, - TL_OPTION_UPDATING, - YYPS->m_lock_type, - YYPS->m_mdl_type, - NULL, - $2)) + if (unlikely(!Select->add_table_to_list(thd, $1, NULL, + TL_OPTION_UPDATING, + YYPS->m_lock_type, + YYPS->m_mdl_type, + NULL, + $2))) MYSQL_YYABORT; } ; @@ -12838,10 +13255,12 @@ table_alias_ref_list: table_alias_ref: table_ident_opt_wild { - if (!Select->add_table_to_list(thd, $1, NULL, - TL_OPTION_UPDATING | TL_OPTION_ALIAS, + if (unlikely(!Select-> + add_table_to_list(thd, $1, NULL, + (TL_OPTION_UPDATING | + TL_OPTION_ALIAS), YYPS->m_lock_type, - YYPS->m_mdl_type)) + YYPS->m_mdl_type))) MYSQL_YYABORT; } ; @@ -12926,10 +13345,9 @@ insert_lock_option: | LOW_PRIORITY { $$= TL_WRITE_LOW_PRIORITY; } | DELAYED_SYM { - Lex->keyword_delayed_begin_offset= (uint)(YYLIP->get_tok_start() - - thd->query()); - Lex->keyword_delayed_end_offset= Lex->keyword_delayed_begin_offset + - YYLIP->yyLength() + 1; + // QQ: why was +1? + Lex->keyword_delayed_begin_offset= (uint)($1.pos() - thd->query()); + Lex->keyword_delayed_end_offset= (uint)($1.end() - thd->query()); $$= TL_WRITE_DELAYED; } | HIGH_PRIORITY { $$= TL_WRITE; } @@ -12939,10 +13357,8 @@ replace_lock_option: opt_low_priority { $$= $1; } | DELAYED_SYM { - Lex->keyword_delayed_begin_offset= (uint)(YYLIP->get_tok_start() - - thd->query()); - Lex->keyword_delayed_end_offset= Lex->keyword_delayed_begin_offset + - YYLIP->yyLength() + 1; + Lex->keyword_delayed_begin_offset= (uint)($1.pos() - thd->query()); + Lex->keyword_delayed_end_offset= (uint)($1.end() - thd->query()); $$= TL_WRITE_DELAYED; } ; @@ -12968,8 +13384,9 @@ insert_field_spec: | SET { LEX *lex=Lex; - if (!(lex->insert_list= new (thd->mem_root) List_item) || - lex->many_values.push_back(lex->insert_list, thd->mem_root)) + if (unlikely(!(lex->insert_list= new (thd->mem_root) List_item)) || + unlikely(lex->many_values.push_back(lex->insert_list, + thd->mem_root))) MYSQL_YYABORT; } ident_eq_list @@ -13001,8 +13418,8 @@ ident_eq_value: simple_ident_nospvar equal expr_or_default { LEX *lex=Lex; - if (lex->field_list.push_back($1, thd->mem_root) || - lex->insert_list->push_back($3, thd->mem_root)) + if (unlikely(lex->field_list.push_back($1, thd->mem_root)) || + unlikely(lex->insert_list->push_back($3, thd->mem_root))) MYSQL_YYABORT; } ; @@ -13030,13 +13447,14 @@ opt_by: no_braces: '(' { - if (!(Lex->insert_list= new (thd->mem_root) List_item)) - MYSQL_YYABORT; + if (unlikely(!(Lex->insert_list= new (thd->mem_root) List_item))) + MYSQL_YYABORT; } opt_values ')' { LEX *lex=Lex; - if (lex->many_values.push_back(lex->insert_list, thd->mem_root)) + if (unlikely(lex->many_values.push_back(lex->insert_list, + thd->mem_root))) MYSQL_YYABORT; } ; @@ -13049,12 +13467,12 @@ opt_values: values: values ',' expr_or_default { - if (Lex->insert_list->push_back($3, thd->mem_root)) + if (unlikely(Lex->insert_list->push_back($3, thd->mem_root))) MYSQL_YYABORT; } | expr_or_default { - if (Lex->insert_list->push_back($1, thd->mem_root)) + if (unlikely(Lex->insert_list->push_back($1, thd->mem_root))) MYSQL_YYABORT; } ; @@ -13064,13 +13482,13 @@ expr_or_default: | DEFAULT { $$= new (thd->mem_root) Item_default_value(thd, Lex->current_context()); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | IGNORE_SYM { $$= new (thd->mem_root) Item_ignore_value(thd, Lex->current_context()); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -13104,7 +13522,7 @@ update: LEX *lex= Lex; if (lex->select_lex.table_list.elements > 1) lex->sql_command= SQLCOM_UPDATE_MULTI; - else if (lex->select_lex.get_table_list()->derived) + else if (unlikely(lex->select_lex.get_table_list()->derived)) { /* it is single table update and it is update of derived table */ my_error(ER_NON_UPDATABLE_TABLE, MYF(0), @@ -13129,7 +13547,8 @@ update_list: update_elem: simple_ident_nospvar equal expr_or_default { - if (add_item_to_list(thd, $1) || add_value_to_list(thd, $3)) + if (unlikely(add_item_to_list(thd, $1)) || + unlikely(add_value_to_list(thd, $3))) MYSQL_YYABORT; } ; @@ -13143,8 +13562,8 @@ insert_update_elem: simple_ident_nospvar equal expr_or_default { LEX *lex= Lex; - if (lex->update_list.push_back($1, thd->mem_root) || - lex->value_list.push_back($3, thd->mem_root)) + if (unlikely(lex->update_list.push_back($1, thd->mem_root)) || + unlikely(lex->value_list.push_back($3, thd->mem_root))) MYSQL_YYABORT; } ; @@ -13168,23 +13587,48 @@ delete: lex->ignore= 0; lex->select_lex.init_order(); } - opt_delete_options single_multi + delete_part2 + ; + +opt_delete_system_time: + /* empty */ + { + Lex->vers_conditions.init(SYSTEM_TIME_ALL); + } + | BEFORE_SYM SYSTEM_TIME_SYM history_point + { + Lex->vers_conditions.init(SYSTEM_TIME_BEFORE, $3); + } + ; + +delete_part2: + opt_delete_options single_multi {} + | HISTORY_SYM delete_single_table opt_delete_system_time + { + Lex->last_table()->vers_conditions= Lex->vers_conditions; + } ; -single_multi: +delete_single_table: FROM table_ident opt_use_partition { - if (!Select->add_table_to_list(thd, $2, NULL, TL_OPTION_UPDATING, + if (unlikely(!Select-> + add_table_to_list(thd, $2, NULL, TL_OPTION_UPDATING, YYPS->m_lock_type, YYPS->m_mdl_type, NULL, - $3)) + $3))) MYSQL_YYABORT; YYPS->m_lock_type= TL_READ_DEFAULT; YYPS->m_mdl_type= MDL_SHARED_READ; } - opt_where_clause opt_order_clause - delete_limit_clause {} + ; + +single_multi: + delete_single_table + opt_where_clause + opt_order_clause + delete_limit_clause opt_select_expressions {} | table_wild_list { @@ -13194,7 +13638,7 @@ single_multi: } FROM join_table_list opt_where_clause { - if (multi_delete_set_locks_and_link_aux_tables(Lex)) + if (unlikely(multi_delete_set_locks_and_link_aux_tables(Lex))) MYSQL_YYABORT; } | FROM table_alias_ref_list @@ -13205,7 +13649,7 @@ single_multi: } USING join_table_list opt_where_clause { - if (multi_delete_set_locks_and_link_aux_tables(Lex)) + if (unlikely(multi_delete_set_locks_and_link_aux_tables(Lex))) MYSQL_YYABORT; } ; @@ -13224,27 +13668,31 @@ table_wild_one: ident opt_wild { Table_ident *ti= new (thd->mem_root) Table_ident(&$1); - if (ti == NULL) + if (unlikely(ti == NULL)) MYSQL_YYABORT; - if (!Select->add_table_to_list(thd, + if (unlikely(!Select-> + add_table_to_list(thd, ti, NULL, - TL_OPTION_UPDATING | TL_OPTION_ALIAS, + (TL_OPTION_UPDATING | + TL_OPTION_ALIAS), YYPS->m_lock_type, - YYPS->m_mdl_type)) + YYPS->m_mdl_type))) MYSQL_YYABORT; } | ident '.' ident opt_wild { Table_ident *ti= new (thd->mem_root) Table_ident(thd, &$1, &$3, 0); - if (ti == NULL) + if (unlikely(ti == NULL)) MYSQL_YYABORT; - if (!Select->add_table_to_list(thd, + if (unlikely(!Select-> + add_table_to_list(thd, ti, NULL, - TL_OPTION_UPDATING | TL_OPTION_ALIAS, + (TL_OPTION_UPDATING | + TL_OPTION_ALIAS), YYPS->m_lock_type, - YYPS->m_mdl_type)) + YYPS->m_mdl_type))) MYSQL_YYABORT; } ; @@ -13266,7 +13714,7 @@ opt_delete_option: ; truncate: - TRUNCATE_SYM opt_table_sym + TRUNCATE_SYM { LEX* lex= Lex; lex->sql_command= SQLCOM_TRUNCATE; @@ -13277,12 +13725,12 @@ truncate: YYPS->m_lock_type= TL_WRITE; YYPS->m_mdl_type= MDL_EXCLUSIVE; } - table_name opt_lock_wait_timeout + opt_table_sym table_name opt_lock_wait_timeout { LEX* lex= thd->lex; DBUG_ASSERT(!lex->m_sql_cmd); lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_truncate_table(); - if (lex->m_sql_cmd == NULL) + if (unlikely(lex->m_sql_cmd == NULL)) MYSQL_YYABORT; } opt_truncate_table_storage_clause { } @@ -13380,7 +13828,7 @@ show_param: { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_DATABASES; - if (prepare_schema_table(thd, lex, 0, SCH_SCHEMATA)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_SCHEMATA))) MYSQL_YYABORT; } | opt_full TABLES opt_db wild_and_where @@ -13388,7 +13836,7 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_TABLES; lex->select_lex.db= $3; - if (prepare_schema_table(thd, lex, 0, SCH_TABLE_NAMES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_TABLE_NAMES))) MYSQL_YYABORT; } | opt_full TRIGGERS_SYM opt_db wild_and_where @@ -13396,7 +13844,7 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_TRIGGERS; lex->select_lex.db= $3; - if (prepare_schema_table(thd, lex, 0, SCH_TRIGGERS)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_TRIGGERS))) MYSQL_YYABORT; } | EVENTS_SYM opt_db wild_and_where @@ -13404,7 +13852,7 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_EVENTS; lex->select_lex.db= $2; - if (prepare_schema_table(thd, lex, 0, SCH_EVENTS)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_EVENTS))) MYSQL_YYABORT; } | TABLE_SYM STATUS_SYM opt_db wild_and_where @@ -13412,7 +13860,7 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_TABLE_STATUS; lex->select_lex.db= $3; - if (prepare_schema_table(thd, lex, 0, SCH_TABLES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_TABLES))) MYSQL_YYABORT; } | OPEN_SYM TABLES opt_db wild_and_where @@ -13420,27 +13868,27 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_OPEN_TABLES; lex->select_lex.db= $3; - if (prepare_schema_table(thd, lex, 0, SCH_OPEN_TABLES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_OPEN_TABLES))) MYSQL_YYABORT; } | PLUGINS_SYM { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_PLUGINS; - if (prepare_schema_table(thd, lex, 0, SCH_PLUGINS)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_PLUGINS))) MYSQL_YYABORT; } | PLUGINS_SYM SONAME_SYM TEXT_STRING_sys { Lex->ident= $3; Lex->sql_command= SQLCOM_SHOW_PLUGINS; - if (prepare_schema_table(thd, Lex, 0, SCH_ALL_PLUGINS)) + if (unlikely(prepare_schema_table(thd, Lex, 0, SCH_ALL_PLUGINS))) MYSQL_YYABORT; } | PLUGINS_SYM SONAME_SYM wild_and_where { Lex->sql_command= SQLCOM_SHOW_PLUGINS; - if (prepare_schema_table(thd, Lex, 0, SCH_ALL_PLUGINS)) + if (unlikely(prepare_schema_table(thd, Lex, 0, SCH_ALL_PLUGINS))) MYSQL_YYABORT; } | ENGINE_SYM known_storage_engines show_engine_param @@ -13453,7 +13901,7 @@ show_param: lex->sql_command= SQLCOM_SHOW_FIELDS; if ($5.str) $4->change_db(&$5); - if (prepare_schema_table(thd, lex, $4, SCH_COLUMNS)) + if (unlikely(prepare_schema_table(thd, lex, $4, SCH_COLUMNS))) MYSQL_YYABORT; } | master_or_binary LOGS_SYM @@ -13481,14 +13929,14 @@ show_param: lex->sql_command= SQLCOM_SHOW_KEYS; if ($4.str) $3->change_db(&$4); - if (prepare_schema_table(thd, lex, $3, SCH_STATISTICS)) + if (unlikely(prepare_schema_table(thd, lex, $3, SCH_STATISTICS))) MYSQL_YYABORT; } | opt_storage ENGINES_SYM { LEX *lex=Lex; lex->sql_command= SQLCOM_SHOW_STORAGE_ENGINES; - if (prepare_schema_table(thd, lex, 0, SCH_ENGINES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_ENGINES))) MYSQL_YYABORT; } | AUTHORS_SYM @@ -13526,7 +13974,7 @@ show_param: { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_PROFILE; - if (prepare_schema_table(thd, lex, NULL, SCH_PROFILES) != 0) + if (unlikely(prepare_schema_table(thd, lex, NULL, SCH_PROFILES))) MYSQL_YYABORT; } | opt_var_type STATUS_SYM wild_and_where @@ -13534,7 +13982,7 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_STATUS; lex->option_type= $1; - if (prepare_schema_table(thd, lex, 0, SCH_SESSION_STATUS)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_SESSION_STATUS))) MYSQL_YYABORT; } | opt_full PROCESSLIST_SYM @@ -13544,27 +13992,28 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_VARIABLES; lex->option_type= $1; - if (prepare_schema_table(thd, lex, 0, SCH_SESSION_VARIABLES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_SESSION_VARIABLES))) MYSQL_YYABORT; } | charset wild_and_where { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_CHARSETS; - if (prepare_schema_table(thd, lex, 0, SCH_CHARSETS)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_CHARSETS))) MYSQL_YYABORT; } | COLLATION_SYM wild_and_where { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_COLLATIONS; - if (prepare_schema_table(thd, lex, 0, SCH_COLLATIONS)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_COLLATIONS))) MYSQL_YYABORT; } | GRANTS { Lex->sql_command= SQLCOM_SHOW_GRANTS; - if (!(Lex->grant_user= (LEX_USER*)thd->alloc(sizeof(LEX_USER)))) + if (unlikely(!(Lex->grant_user= + (LEX_USER*)thd->alloc(sizeof(LEX_USER))))) MYSQL_YYABORT; Lex->grant_user->user= current_user_and_current_role; } @@ -13583,7 +14032,7 @@ show_param: { LEX *lex= Lex; lex->sql_command = SQLCOM_SHOW_CREATE; - if (!lex->select_lex.add_table_to_list(thd, $3, NULL,0)) + if (unlikely(!lex->select_lex.add_table_to_list(thd, $3, NULL,0))) MYSQL_YYABORT; lex->create_info.storage_media= HA_SM_DEFAULT; } @@ -13591,7 +14040,7 @@ show_param: { LEX *lex= Lex; lex->sql_command = SQLCOM_SHOW_CREATE; - if (!lex->select_lex.add_table_to_list(thd, $3, NULL, 0)) + if (unlikely(!lex->select_lex.add_table_to_list(thd, $3, NULL, 0))) MYSQL_YYABORT; lex->table_type= TABLE_TYPE_VIEW; } @@ -13599,7 +14048,7 @@ show_param: { LEX *lex= Lex; lex->sql_command = SQLCOM_SHOW_CREATE; - if (!lex->select_lex.add_table_to_list(thd, $3, NULL, 0)) + if (unlikely(!lex->select_lex.add_table_to_list(thd, $3, NULL, 0))) MYSQL_YYABORT; lex->table_type= TABLE_TYPE_SEQUENCE; } @@ -13659,7 +14108,8 @@ show_param: | CREATE USER_SYM { Lex->sql_command= SQLCOM_SHOW_CREATE_USER; - if (!(Lex->grant_user= (LEX_USER*)thd->alloc(sizeof(LEX_USER)))) + if (unlikely(!(Lex->grant_user= + (LEX_USER*)thd->alloc(sizeof(LEX_USER))))) MYSQL_YYABORT; Lex->grant_user->user= current_user; } @@ -13672,28 +14122,28 @@ show_param: { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_STATUS_PROC; - if (prepare_schema_table(thd, lex, 0, SCH_PROCEDURES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_PROCEDURES))) MYSQL_YYABORT; } | FUNCTION_SYM STATUS_SYM wild_and_where { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_STATUS_FUNC; - if (prepare_schema_table(thd, lex, 0, SCH_PROCEDURES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_PROCEDURES))) MYSQL_YYABORT; } | PACKAGE_SYM STATUS_SYM wild_and_where { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_STATUS_PACKAGE; - if (prepare_schema_table(thd, lex, 0, SCH_PROCEDURES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_PROCEDURES))) MYSQL_YYABORT; } | PACKAGE_SYM BODY_SYM STATUS_SYM wild_and_where { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_STATUS_PACKAGE_BODY; - if (prepare_schema_table(thd, lex, 0, SCH_PROCEDURES)) + if (unlikely(prepare_schema_table(thd, lex, 0, SCH_PROCEDURES))) MYSQL_YYABORT; } | PROCEDURE_SYM CODE_SYM sp_name @@ -13719,7 +14169,7 @@ show_param: | describe_command FOR_SYM expr { Lex->sql_command= SQLCOM_SHOW_EXPLAIN; - if (prepare_schema_table(thd, Lex, 0, SCH_EXPLAIN)) + if (unlikely(prepare_schema_table(thd, Lex, 0, SCH_EXPLAIN))) MYSQL_YYABORT; add_value_to_list(thd, $3); } @@ -13729,17 +14179,17 @@ show_param: bool in_plugin; lex->sql_command= SQLCOM_SHOW_GENERIC; ST_SCHEMA_TABLE *table= find_schema_table(thd, &$1, &in_plugin); - if (!table || !table->old_format || !in_plugin) + if (unlikely(!table || !table->old_format || !in_plugin)) { thd->parse_error(ER_SYNTAX_ERROR, $2); MYSQL_YYABORT; } - if (lex->wild && table->idx_field1 < 0) + if (unlikely(lex->wild && table->idx_field1 < 0)) { thd->parse_error(ER_SYNTAX_ERROR, $3); MYSQL_YYABORT; } - if (make_schema_select(thd, Lex->current_select, table)) + if (unlikely(make_schema_select(thd, Lex->current_select, table))) MYSQL_YYABORT; } ; @@ -13794,7 +14244,7 @@ wild_and_where: { Lex->wild= new (thd->mem_root) String($3.str, $3.length, system_charset_info); - if (Lex->wild == NULL) + if (unlikely(Lex->wild == NULL)) MYSQL_YYABORT; $$= $2; } @@ -13817,7 +14267,7 @@ describe: lex->sql_command= SQLCOM_SHOW_FIELDS; lex->select_lex.db= null_clex_str; lex->verbose= 0; - if (prepare_schema_table(thd, lex, $2, SCH_COLUMNS)) + if (unlikely(prepare_schema_table(thd, lex, $2, SCH_COLUMNS))) MYSQL_YYABORT; } opt_describe_column @@ -13863,9 +14313,9 @@ opt_format_json: /* empty */ {} | FORMAT_SYM '=' ident_or_text { - if (!my_strcasecmp(system_charset_info, $3.str, "JSON")) + if (lex_string_eq(&$3, STRING_WITH_LEN("JSON"))) Lex->explain_json= true; - else if (!my_strcasecmp(system_charset_info, $3.str, "TRADITIONAL")) + else if (lex_string_eq(&$3, STRING_WITH_LEN("TRADITIONAL"))) DBUG_ASSERT(Lex->explain_json==false); else my_yyabort_error((ER_UNKNOWN_EXPLAIN_FORMAT, MYF(0), "EXPLAIN", @@ -13881,7 +14331,7 @@ opt_describe_column: Lex->wild= new (thd->mem_root) String((const char*) $1.str, $1.length, system_charset_info); - if (Lex->wild == NULL) + if (unlikely(Lex->wild == NULL)) MYSQL_YYABORT; } ; @@ -13938,8 +14388,9 @@ flush_lock: { Lex->type|= REFRESH_READ_LOCK | $4; } | FOR_SYM { - if (Lex->query_tables == NULL) // Table list can't be empty + if (unlikely(Lex->query_tables == NULL)) { + // Table list can't be empty thd->parse_error(ER_NO_TABLES_USED); MYSQL_YYABORT; } @@ -13962,12 +14413,12 @@ flush_option: { Lex->type|= REFRESH_GENERAL_LOG; } | SLOW LOGS_SYM { Lex->type|= REFRESH_SLOW_LOG; } - | BINARY LOGS_SYM + | BINARY LOGS_SYM opt_delete_gtid_domain { Lex->type|= REFRESH_BINARY_LOG; } | RELAY LOGS_SYM optional_connection_name { LEX *lex= Lex; - if (lex->type & REFRESH_RELAY_LOG) + if (unlikely(lex->type & REFRESH_RELAY_LOG)) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "FLUSH", "RELAY LOGS")); lex->type|= REFRESH_RELAY_LOG; lex->relay_log_connection_name= lex->mi.connection_name; @@ -13988,7 +14439,7 @@ flush_option: | SLAVE optional_connection_name { LEX *lex= Lex; - if (lex->type & REFRESH_SLAVE) + if (unlikely(lex->type & REFRESH_SLAVE)) my_yyabort_error((ER_WRONG_USAGE, MYF(0), "FLUSH","SLAVE")); lex->type|= REFRESH_SLAVE; lex->reset_slave_info.all= false; @@ -14003,14 +14454,15 @@ flush_option: { Lex->type|= REFRESH_GENERIC; ST_SCHEMA_TABLE *table= find_schema_table(thd, &$1); - if (!table || !table->reset_table) + if (unlikely(!table || !table->reset_table)) { thd->parse_error(ER_SYNTAX_ERROR, $2); MYSQL_YYABORT; } - Lex->view_list.push_back((LEX_CSTRING*) - thd->memdup(&$1, sizeof(LEX_CSTRING)), - thd->mem_root); + if (unlikely(Lex->view_list.push_back((LEX_CSTRING*) + thd->memdup(&$1, sizeof(LEX_CSTRING)), + thd->mem_root))) + MYSQL_YYABORT; } ; @@ -14019,6 +14471,24 @@ opt_table_list: | table_list {} ; +opt_delete_gtid_domain: + /* empty */ {} + | DELETE_DOMAIN_ID_SYM '=' '(' delete_domain_id_list ')' + {} + ; +delete_domain_id_list: + /* Empty */ + | delete_domain_id + | delete_domain_id_list ',' delete_domain_id + ; + +delete_domain_id: + ulong_num + { + insert_dynamic(&Lex->delete_gtid_domain, (uchar*) &($1)); + } + ; + optional_flush_tables_arguments: /* empty */ {$$= 0;} | AND_SYM DISABLE_SYM CHECKPOINT_SYM {$$= REFRESH_CHECKPOINT; } @@ -14161,7 +14631,7 @@ load: { LEX *lex= thd->lex; - if (lex->sphead) + if (unlikely(lex->sphead)) { my_error(ER_SP_BADSTATEMENT, MYF(0), $2 == FILETYPE_CSV ? "LOAD DATA" : "LOAD XML"); @@ -14175,14 +14645,17 @@ load: lex->local_file= $5; lex->duplicates= DUP_ERROR; lex->ignore= 0; - if (!(lex->exchange= new (thd->mem_root) sql_exchange($7.str, 0, $2))) + if (unlikely(!(lex->exchange= new (thd->mem_root) + sql_exchange($7.str, 0, $2)))) MYSQL_YYABORT; } opt_duplicate INTO TABLE_SYM table_ident opt_use_partition { LEX *lex=Lex; - if (!Select->add_table_to_list(thd, $12, NULL, TL_OPTION_UPDATING, - $4, MDL_SHARED_WRITE, NULL, $13)) + if (unlikely(!Select->add_table_to_list(thd, $12, NULL, + TL_OPTION_UPDATING, + $4, MDL_SHARED_WRITE, + NULL, $13))) MYSQL_YYABORT; lex->field_list.empty(); lex->update_list.empty(); @@ -14320,7 +14793,7 @@ field_or_var: | '@' ident_or_text { $$= new (thd->mem_root) Item_user_var_as_out_param(thd, &$2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -14339,8 +14812,8 @@ load_data_set_elem: simple_ident_nospvar equal remember_name expr_or_default remember_end { LEX *lex= Lex; - if (lex->update_list.push_back($1, thd->mem_root) || - lex->value_list.push_back($4, thd->mem_root)) + if (unlikely(lex->update_list.push_back($1, thd->mem_root)) || + unlikely(lex->value_list.push_back($4, thd->mem_root))) MYSQL_YYABORT; $4->set_name_no_truncate(thd, $3, (uint) ($5 - $3), thd->charset()); } @@ -14351,22 +14824,22 @@ load_data_set_elem: text_literal: TEXT_STRING { - if (!($$= thd->make_string_literal($1))) + if (unlikely(!($$= thd->make_string_literal($1)))) MYSQL_YYABORT; } | NCHAR_STRING { - if (!($$= thd->make_string_literal_nchar($1))) + if (unlikely(!($$= thd->make_string_literal_nchar($1)))) MYSQL_YYABORT; } | UNDERSCORE_CHARSET TEXT_STRING { - if (!($$= thd->make_string_literal_charset($2, $1))) + if (unlikely(!($$= thd->make_string_literal_charset($2, $1)))) MYSQL_YYABORT; } | text_literal TEXT_STRING_literal { - if (!($$= $1->make_string_literal_concat(thd, &$2))) + if (unlikely(!($$= $1->make_string_literal_concat(thd, &$2)))) MYSQL_YYABORT; } ; @@ -14377,7 +14850,7 @@ text_string: $$= new (thd->mem_root) String($1.str, $1.length, thd->variables.collation_connection); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | hex_or_bin_String { $$= $1; } @@ -14389,7 +14862,7 @@ hex_or_bin_String: { Item *tmp= new (thd->mem_root) Item_hex_hybrid(thd, $1.str, $1.length); - if (tmp == NULL) + if (unlikely(tmp == NULL)) MYSQL_YYABORT; /* it is OK only emulate fix_fields, because we need only @@ -14402,7 +14875,7 @@ hex_or_bin_String: { Item *tmp= new (thd->mem_root) Item_hex_string(thd, $1.str, $1.length); - if (tmp == NULL) + if (unlikely(tmp == NULL)) MYSQL_YYABORT; tmp->quick_fix_field(); $$= tmp->val_str((String*) 0); @@ -14411,7 +14884,7 @@ hex_or_bin_String: { Item *tmp= new (thd->mem_root) Item_bin_string(thd, $1.str, $1.length); - if (tmp == NULL) + if (unlikely(tmp == NULL)) MYSQL_YYABORT; /* it is OK only emulate fix_fields, because we need only @@ -14425,21 +14898,21 @@ hex_or_bin_String: param_marker: PARAM_MARKER { - if (!($$= Lex->add_placeholder(thd, ¶m_clex_str, - YYLIP->get_tok_start(), - YYLIP->get_tok_start() + 1))) + if (unlikely(!($$= Lex->add_placeholder(thd, ¶m_clex_str, + YYLIP->get_tok_start(), + YYLIP->get_tok_start() + 1)))) MYSQL_YYABORT; } - | colon_with_pos ident + | colon_with_pos ident_cli { - if (!($$= Lex->add_placeholder(thd, &null_clex_str, - $1, YYLIP->get_tok_end()))) + if (unlikely(!($$= Lex->add_placeholder(thd, &null_clex_str, + $1, $2.end())))) MYSQL_YYABORT; } | colon_with_pos NUM { - if (!($$= Lex->add_placeholder(thd, &null_clex_str, - $1, YYLIP->get_ptr()))) + if (unlikely(!($$= Lex->add_placeholder(thd, &null_clex_str, + $1, YYLIP->get_ptr())))) MYSQL_YYABORT; } ; @@ -14467,38 +14940,38 @@ literal: */ YYLIP->reduce_digest_token(TOK_GENERIC_VALUE, NULL_SYM); $$= new (thd->mem_root) Item_null(thd); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; YYLIP->next_state= MY_LEX_OPERATOR_OR_IDENT; } | FALSE_SYM { $$= new (thd->mem_root) Item_bool(thd, (char*) "FALSE",0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | TRUE_SYM { $$= new (thd->mem_root) Item_bool(thd, (char*) "TRUE",1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | HEX_NUM { $$= new (thd->mem_root) Item_hex_hybrid(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | HEX_STRING { $$= new (thd->mem_root) Item_hex_string(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | BIN_NUM { $$= new (thd->mem_root) Item_bin_string(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | UNDERSCORE_CHARSET hex_or_bin_String @@ -14511,7 +14984,8 @@ literal: item_str= new (thd->mem_root) Item_string_with_introducer(thd, NULL, $2->ptr(), $2->length(), $1); - if (!item_str || !item_str->check_well_formed_result(true)) + if (unlikely(!item_str || + !item_str->check_well_formed_result(true))) MYSQL_YYABORT; $$= item_str; @@ -14526,7 +15000,7 @@ NUM_literal: Item_int(thd, $1.str, (longlong) my_strtoll10($1.str, NULL, &error), $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | LONG_NUM @@ -14536,31 +15010,27 @@ NUM_literal: Item_int(thd, $1.str, (longlong) my_strtoll10($1.str, NULL, &error), $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ULONGLONG_NUM { $$= new (thd->mem_root) Item_uint(thd, $1.str, $1.length); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | DECIMAL_NUM { $$= new (thd->mem_root) Item_decimal(thd, $1.str, $1.length, thd->charset()); - if (($$ == NULL) || (thd->is_error())) - { + if (unlikely($$ == NULL) || unlikely(thd->is_error())) MYSQL_YYABORT; - } } | FLOAT_NUM { $$= new (thd->mem_root) Item_float(thd, $1.str, $1.length); - if (($$ == NULL) || (thd->is_error())) - { + if (unlikely($$ == NULL) || unlikely(thd->is_error())) MYSQL_YYABORT; - } } ; @@ -14568,20 +15038,26 @@ NUM_literal: temporal_literal: DATE_SYM TEXT_STRING { - if (!($$= create_temporal_literal(thd, $2.str, $2.length, YYCSCL, - MYSQL_TYPE_DATE, true))) + if (unlikely(!($$= create_temporal_literal(thd, $2.str, $2.length, + YYCSCL, + MYSQL_TYPE_DATE, + true)))) MYSQL_YYABORT; } | TIME_SYM TEXT_STRING { - if (!($$= create_temporal_literal(thd, $2.str, $2.length, YYCSCL, - MYSQL_TYPE_TIME, true))) + if (unlikely(!($$= create_temporal_literal(thd, $2.str, $2.length, + YYCSCL, + MYSQL_TYPE_TIME, + true)))) MYSQL_YYABORT; } | TIMESTAMP TEXT_STRING { - if (!($$= create_temporal_literal(thd, $2.str, $2.length, YYCSCL, - MYSQL_TYPE_DATETIME, true))) + if (unlikely(!($$= create_temporal_literal(thd, $2.str, $2.length, + YYCSCL, + MYSQL_TYPE_DATETIME, + true)))) MYSQL_YYABORT; } ; @@ -14601,7 +15077,7 @@ with_clause: { With_clause *with_clause= new With_clause($2, Lex->curr_with_clause); - if (with_clause == NULL) + if (unlikely(with_clause == NULL)) MYSQL_YYABORT; Lex->derived_tables|= DERIVED_WITH; Lex->curr_with_clause= with_clause; @@ -14632,16 +15108,17 @@ with_list_element: opt_with_column_list { $2= new List (Lex->with_column_list); - if ($2 == NULL) + if (unlikely($2 == NULL)) MYSQL_YYABORT; Lex->with_column_list.empty(); } AS '(' remember_name subselect remember_end ')' { With_element *elem= new With_element($1, *$2, $7->master_unit()); - if (elem == NULL || Lex->curr_with_clause->add_with_element(elem)) + if (unlikely(elem == NULL) || + unlikely(Lex->curr_with_clause->add_with_element(elem))) MYSQL_YYABORT; - if (elem->set_unparsed_spec(thd, $6+1, $8)) + if (unlikely(elem->set_unparsed_spec(thd, $6+1, $8))) MYSQL_YYABORT; } ; @@ -14673,7 +15150,7 @@ query_name: ident { $$= (LEX_CSTRING *) thd->memdup(&$1, sizeof(LEX_CSTRING)); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -14692,24 +15169,26 @@ insert_ident: table_wild: ident '.' '*' { - SELECT_LEX *sel= Select; - $$= new (thd->mem_root) Item_field(thd, Lex->current_context(), - NullS, $1.str, &star_clex_str); - if ($$ == NULL) + if (unlikely(!($$= Lex->create_item_qualified_asterisk(thd, &$1)))) MYSQL_YYABORT; - sel->with_wild++; } | ident '.' ident '.' '*' { - SELECT_LEX *sel= Select; - const char* schema= thd->client_capabilities & CLIENT_NO_SCHEMA ? - NullS : $1.str; - $$= new (thd->mem_root) Item_field(thd, Lex->current_context(), - schema, - $3.str, &star_clex_str); - if ($$ == NULL) + if (unlikely(!($$= Lex->create_item_qualified_asterisk(thd, &$1, &$3)))) + MYSQL_YYABORT; + } + ; + +select_sublist_qualified_asterisk: + ident_cli '.' '*' + { + if (unlikely(!($$= Lex->create_item_qualified_asterisk(thd, &$1)))) + MYSQL_YYABORT; + } + | ident_cli '.' ident_cli '.' '*' + { + if (unlikely(!($$= Lex->create_item_qualified_asterisk(thd, &$1, &$3)))) MYSQL_YYABORT; - sel->with_wild++; } ; @@ -14717,21 +15196,32 @@ order_ident: expr { $$=$1; } ; + simple_ident: - ident + ident_cli { - Lex_input_stream *lip= YYLIP; - if (!($$= Lex->create_item_ident(thd, &$1, - lip->get_tok_start_prev(), - lip->get_tok_end()))) + if (unlikely(!($$= Lex->create_item_ident(thd, &$1)))) MYSQL_YYABORT; } - | simple_ident_q2 - | ident '.' ident + | ident_cli '.' ident_cli { - LEX *lex= thd->lex; - if (!($$= lex->create_item_ident(thd, &$1, &$3, - $1.m_pos, YYLIP->get_tok_end()))) + if (unlikely(!($$= Lex->create_item_ident(thd, &$1, &$3)))) + MYSQL_YYABORT; + } + | '.' ident_cli '.' ident_cli + { + Lex_ident_cli empty($2.pos(), 0); + if (unlikely(!($$= Lex->create_item_ident(thd, &empty, &$2, &$4)))) + MYSQL_YYABORT; + } + | ident_cli '.' ident_cli '.' ident_cli + { + if (unlikely(!($$= Lex->create_item_ident(thd, &$1, &$3, &$5)))) + MYSQL_YYABORT; + } + | colon_with_pos ident_cli '.' ident_cli + { + if (unlikely(!($$= Lex->make_item_colon_ident_ident(thd, &$2, &$4)))) MYSQL_YYABORT; } ; @@ -14739,47 +15229,28 @@ simple_ident: simple_ident_nospvar: ident { - if (!($$= Lex->create_item_ident_nosp(thd, &$1))) + if (unlikely(!($$= Lex->create_item_ident_nosp(thd, &$1)))) MYSQL_YYABORT; } - | simple_ident_q { $$= $1; } - ; - -simple_ident_q: - ident '.' ident + | ident '.' ident { - if (!($$= Lex->create_item_ident_nospvar(thd, &$1, &$3))) + if (unlikely(!($$= Lex->create_item_ident_nospvar(thd, &$1, &$3)))) MYSQL_YYABORT; } - | simple_ident_q2 - ; - -simple_ident_q2: - colon_with_pos ident '.' ident + | colon_with_pos ident_cli '.' ident_cli { - LEX *lex= Lex; - if (lex->is_trigger_new_or_old_reference(&$2)) - { - bool new_row= ($2.str[0]=='N' || $2.str[0]=='n'); - if (!($$= lex->create_and_link_Item_trigger_field(thd, - &$4, - new_row))) - MYSQL_YYABORT; - } - else - { - thd->parse_error(); + if (unlikely(!($$= Lex->make_item_colon_ident_ident(thd, &$2, &$4)))) MYSQL_YYABORT; - } } | '.' ident '.' ident { - if (!($$= Lex->create_item_ident(thd, &null_clex_str, &$2, &$4))) + Lex_ident_sys none; + if (unlikely(!($$= Lex->create_item_ident(thd, &none, &$2, &$4)))) MYSQL_YYABORT; } | ident '.' ident '.' ident { - if (!($$= Lex->create_item_ident(thd, &$1, &$3, &$5))) + if (unlikely(!($$= Lex->create_item_ident(thd, &$1, &$3, &$5)))) MYSQL_YYABORT; } ; @@ -14789,17 +15260,19 @@ field_ident: | ident '.' ident '.' ident { TABLE_LIST *table= Select->table_list.first; - if (my_strcasecmp(table_alias_charset, $1.str, table->db.str)) + if (unlikely(my_strcasecmp(table_alias_charset, $1.str, + table->db.str))) my_yyabort_error((ER_WRONG_DB_NAME, MYF(0), $1.str)); - if (my_strcasecmp(table_alias_charset, $3.str, - table->table_name.str)) + if (unlikely(my_strcasecmp(table_alias_charset, $3.str, + table->table_name.str))) my_yyabort_error((ER_WRONG_TABLE_NAME, MYF(0), $3.str)); $$=$5; } | ident '.' ident { TABLE_LIST *table= Select->table_list.first; - if (my_strcasecmp(table_alias_charset, $1.str, table->alias.str)) + if (unlikely(my_strcasecmp(table_alias_charset, $1.str, + table->alias.str))) my_yyabort_error((ER_WRONG_TABLE_NAME, MYF(0), $1.str)); $$=$3; } @@ -14810,20 +15283,20 @@ table_ident: ident { $$= new (thd->mem_root) Table_ident(&$1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ident '.' ident { $$= new (thd->mem_root) Table_ident(thd, &$1, &$3, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | '.' ident { /* For Delphi */ $$= new (thd->mem_root) Table_ident(&$2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -14832,13 +15305,13 @@ table_ident_opt_wild: ident opt_wild { $$= new (thd->mem_root) Table_ident(&$1); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ident '.' ident opt_wild { $$= new (thd->mem_root) Table_ident(thd, &$1, &$3, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -14848,150 +15321,104 @@ table_ident_nodb: { LEX_CSTRING db={(char*) any_db,3}; $$= new (thd->mem_root) Table_ident(thd, &db, &$1, 0); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; -IDENT_sys: - IDENT { $$= $1; } +IDENT_cli: + IDENT | IDENT_QUOTED + ; + +ident_cli: + IDENT + | IDENT_QUOTED + | keyword_ident { $$= $1; } + ; + +IDENT_sys: + IDENT_cli { - if (thd->charset_is_system_charset) - { - CHARSET_INFO *cs= system_charset_info; - size_t wlen= Well_formed_prefix(cs, $1.str, $1.length).length(); - if (wlen < $1.length) - { - ErrConvString err($1.str, $1.length, &my_charset_bin); - my_error(ER_INVALID_CHARACTER_STRING, MYF(0), - cs->csname, err.ptr()); - MYSQL_YYABORT; - } - $$= $1; - } - else - { - LEX_STRING to; - if (thd->convert_with_error(system_charset_info, &to, - thd->charset(), $1.str, $1.length)) - MYSQL_YYABORT; - $$.str= to.str; - $$.length= to.length; - } + if (unlikely(thd->to_ident_sys_alloc(&$$, &$1))) + MYSQL_YYABORT; } ; TEXT_STRING_sys: TEXT_STRING { - if (thd->charset_is_system_charset) - $$= $1; - else - { - LEX_STRING to; - if (thd->convert_string(&to, system_charset_info, - $1.str, $1.length, thd->charset())) - MYSQL_YYABORT; - $$.str= to.str; - $$.length= to.length; - } + if (thd->make_text_string_sys(&$$, &$1)) + MYSQL_YYABORT; } ; TEXT_STRING_literal: TEXT_STRING { - if (thd->charset_is_collation_connection) - $$= $1; - else - { - LEX_STRING to; - if (thd->convert_string(&to, thd->variables.collation_connection, - $1.str, $1.length, thd->charset())) - MYSQL_YYABORT; - $$.str= to.str; - $$.length= to.length; - } + if (thd->make_text_string_connection(&$$, &$1)) + MYSQL_YYABORT; } ; TEXT_STRING_filesystem: TEXT_STRING { - if (thd->charset_is_character_set_filesystem) - $$= $1; - else - { - LEX_STRING to; - if (thd->convert_string(&to, - thd->variables.character_set_filesystem, - $1.str, $1.length, thd->charset())) - MYSQL_YYABORT; - $$.str= to.str; - $$.length= to.length; - } + if (thd->make_text_string_filesystem(&$$, &$1)) + MYSQL_YYABORT; } ; +ident_table_alias: + IDENT_sys + | keyword_table_alias + { + if (unlikely($$.copy_keyword(thd, &$1))) + MYSQL_YYABORT; + } + ; + + +ident_sysvar_name: + IDENT_sys + | keyword_sysvar_name + { + if (unlikely($$.copy_keyword(thd, &$1))) + MYSQL_YYABORT; + } + | TEXT_STRING_sys + { + if (unlikely($$.copy_sys(thd, &$1))) + MYSQL_YYABORT; + } + ; + + ident: IDENT_sys + | keyword_ident { - (LEX_CSTRING &)$$= $1; - $$.m_pos= (char *) YYLIP->get_tok_start_prev(); - } - | keyword - { - $$.str= thd->strmake($1.str, $1.length); - if ($$.str == NULL) + if (unlikely($$.copy_keyword(thd, &$1))) MYSQL_YYABORT; - $$.length= $1.length; - $$.m_pos= (char *) YYLIP->get_tok_start_prev(); - } - ; - -ident_with_tok_start: - IDENT_sys - { - (LEX_CSTRING &)$$= $1; - $$.m_pos= (char *) YYLIP->get_tok_start(); - } - | keyword - { - if (!($$.str= thd->strmake($1.str, $1.length))) - MYSQL_YYABORT; - $$.length= $1.length; - $$.m_pos= (char *) YYLIP->get_tok_start(); } ; ident_directly_assignable: - IDENT_sys { $$=$1; } + IDENT_sys | keyword_directly_assignable { - $$.str= thd->strmake($1.str, $1.length); - if ($$.str == NULL) + if (unlikely($$.copy_keyword(thd, &$1))) MYSQL_YYABORT; - $$.length= $1.length; - } - | keyword_sp - { - $$.str= thd->strmake($1.str, $1.length); - if ($$.str == NULL) - MYSQL_YYABORT; - $$.length= $1.length; } ; label_ident: - IDENT_sys { $$=$1; } - | keyword_sp + IDENT_sys + | keyword_label { - $$.str= thd->strmake($1.str, $1.length); - if ($$.str == NULL) + if (unlikely($$.copy_keyword(thd, &$1))) MYSQL_YYABORT; - $$.length= $1.length; } ; @@ -15003,7 +15430,7 @@ labels_declaration_oracle: label_declaration_oracle: SHIFT_LEFT label_ident SHIFT_RIGHT { - if (Lex->sp_push_goto_label(thd, &$2)) + if (unlikely(Lex->sp_push_goto_label(thd, &$2))) MYSQL_YYABORT; $$= $2; } @@ -15018,28 +15445,28 @@ ident_or_text: user_maybe_role: ident_or_text { - if (!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER)))) + if (unlikely(!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER))))) MYSQL_YYABORT; $$->user = $1; $$->host= null_clex_str; // User or Role, see get_current_user() $$->reset_auth(); - if (check_string_char_length(&$$->user, ER_USERNAME, - username_char_length, - system_charset_info, 0)) + if (unlikely(check_string_char_length(&$$->user, ER_USERNAME, + username_char_length, + system_charset_info, 0))) MYSQL_YYABORT; } | ident_or_text '@' ident_or_text { - if (!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER)))) + if (unlikely(!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER))))) MYSQL_YYABORT; $$->user = $1; $$->host=$3; $$->reset_auth(); - if (check_string_char_length(&$$->user, ER_USERNAME, - username_char_length, - system_charset_info, 0) || - check_host_name(&$$->host)) + if (unlikely(check_string_char_length(&$$->user, ER_USERNAME, + username_char_length, + system_charset_info, 0)) || + unlikely(check_host_name(&$$->host))) MYSQL_YYABORT; if ($$->host.str[0]) { @@ -15061,7 +15488,7 @@ user_maybe_role: } | CURRENT_USER optional_braces { - if (!($$=(LEX_USER*)thd->calloc(sizeof(LEX_USER)))) + if (unlikely(!($$=(LEX_USER*)thd->calloc(sizeof(LEX_USER))))) MYSQL_YYABORT; $$->user= current_user; $$->plugin= empty_clex_str; @@ -15079,74 +15506,143 @@ user: user_maybe_role } ; -/* Keyword that we allow for identifiers (except SP labels) */ -keyword: - keyword_sp {} - | keyword_directly_assignable {} - | keyword_directly_not_assignable {} +/* Keywords which we allow as table aliases. */ +keyword_table_alias: + keyword_data_type + | keyword_sp_block_section + | keyword_sp_head + | keyword_sp_var_and_label + | keyword_sp_var_not_label + | keyword_sysvar_type + | keyword_verb_clause + | FUNCTION_SYM ; +/* Keyword that we allow for identifiers (except SP labels) */ +keyword_ident: + keyword_data_type + | keyword_sp_block_section + | keyword_sp_head + | keyword_sp_var_and_label + | keyword_sp_var_not_label + | keyword_sysvar_type + | keyword_verb_clause + | FUNCTION_SYM + | WINDOW_SYM + ; + +/* + Keywords that we allow for labels in SPs. + Should not include keywords that start a statement or SP characteristics. +*/ +keyword_label: + keyword_data_type + | keyword_sp_var_and_label + | keyword_sysvar_type + | FUNCTION_SYM + ; + +keyword_sysvar_name: + keyword_data_type + | keyword_sp_block_section + | keyword_sp_head + | keyword_sp_var_and_label + | keyword_sp_var_not_label + | keyword_verb_clause + | FUNCTION_SYM + | WINDOW_SYM + ; + +keyword_sp_decl: + keyword_sp_head + | keyword_sp_var_and_label + | keyword_sp_var_not_label + | keyword_sysvar_type + | keyword_verb_clause + | WINDOW_SYM + ; + +keyword_directly_assignable: + keyword_data_type + | keyword_sp_var_and_label + | keyword_sp_var_not_label + | keyword_sysvar_type + | FUNCTION_SYM + | WINDOW_SYM + ; /* Keywords that we allow in Oracle-style direct assignments: xxx := 10; + but do not allow in labels in the default sql_mode: + label: + stmt1; + stmt2; + TODO: check if some of them can migrate to keyword_sp_var_and_label. */ -keyword_directly_assignable: - ASCII_SYM {} - | BACKUP_SYM {} - | BINLOG_SYM {} - | BYTE_SYM {} - | CACHE_SYM {} - | CHECKSUM_SYM {} - | CHECKPOINT_SYM {} - | COLUMN_ADD_SYM {} - | COLUMN_CHECK_SYM {} - | COLUMN_CREATE_SYM {} - | COLUMN_DELETE_SYM {} - | COLUMN_GET_SYM {} - | DEALLOCATE_SYM {} - | EXAMINED_SYM {} - | EXCLUDE_SYM {} - | EXECUTE_SYM {} - | FLUSH_SYM {} - | FOLLOWING_SYM {} - | FORMAT_SYM {} - | GET_SYM {} - | HELP_SYM {} - | HOST_SYM {} - | INSTALL_SYM {} - | OPTION {} - | OPTIONS_SYM {} - | OWNER_SYM {} - | PARSER_SYM {} - | PORT_SYM {} - | PRECEDING_SYM {} - | PREPARE_SYM {} - | REMOVE_SYM {} - | RESET_SYM {} - | RESTORE_SYM {} - | SECURITY_SYM {} - | SERVER_SYM {} - | SIGNED_SYM {} - | SOCKET_SYM {} - | SLAVE {} - | SLAVES {} - | SONAME_SYM {} - | START_SYM {} - | STOP_SYM {} - | STORED_SYM {} - | TIES_SYM {} - | UNICODE_SYM {} - | UNINSTALL_SYM {} - | UNBOUNDED_SYM {} - | WITHIN {} - | WRAPPER_SYM {} - | XA_SYM {} - | UPGRADE_SYM {} +keyword_sp_var_not_label: + ASCII_SYM + | BACKUP_SYM + | BINLOG_SYM + | BYTE_SYM + | CACHE_SYM + | CHECKSUM_SYM + | CHECKPOINT_SYM + | COLUMN_ADD_SYM + | COLUMN_CHECK_SYM + | COLUMN_CREATE_SYM + | COLUMN_DELETE_SYM + | COLUMN_GET_SYM + | DEALLOCATE_SYM + | EXAMINED_SYM + | EXCLUDE_SYM + | EXECUTE_SYM + | FLUSH_SYM + | FOLLOWING_SYM + | FORMAT_SYM + | GET_SYM + | HELP_SYM + | HISTORY_SYM + | HOST_SYM + | INSTALL_SYM + | OPTION + | OPTIONS_SYM + | OWNER_SYM + | PARSER_SYM + | PERIOD_SYM + | PORT_SYM + | PRECEDING_SYM + | PREPARE_SYM + | REMOVE_SYM + | RESET_SYM + | RESTORE_SYM + | SECURITY_SYM + | SERVER_SYM + | SIGNED_SYM + | SOCKET_SYM + | SLAVE + | SLAVES + | SONAME_SYM + | START_SYM + | STOP_SYM + | STORED_SYM + | SYSTEM + | SYSTEM_TIME_SYM + | TIES_SYM + | UNICODE_SYM + | UNINSTALL_SYM + | UNBOUNDED_SYM + | VERSIONING_SYM + | WITHIN + | WITHOUT + | WRAPPER_SYM + | XA_SYM + | UPGRADE_SYM ; /* - Keywords that are allowed as identifiers (e.g. table, column names), + Keywords that can start optional clauses in SP or trigger declarations + Allowed as identifiers (e.g. table, column names), but: - not allowed as SP label names - not allowed as variable names in Oracle-style assignments: @@ -15181,401 +15677,402 @@ keyword_directly_assignable: CREATE TRIGGER .. FOR EACH ROW follows:= 10; CREATE TRIGGER .. FOR EACH ROW FOLLOWS tr1 a:= 10; */ -keyword_directly_not_assignable: - CONTAINS_SYM { /* SP characteristic */ } - | LANGUAGE_SYM { /* SP characteristic */ } - | NO_SYM { /* SP characteristic */ } - | CHARSET { /* SET CHARSET utf8; */ } - | FOLLOWS_SYM { /* Conflicts with assignment in FOR EACH */} - | PRECEDES_SYM { /* Conflicts with assignment in FOR EACH */} - | keyword_sp_verb_clause { } +keyword_sp_head: + CONTAINS_SYM /* SP characteristic */ + | LANGUAGE_SYM /* SP characteristic */ + | NO_SYM /* SP characteristic */ + | CHARSET /* SET CHARSET utf8; */ + | FOLLOWS_SYM /* Conflicts with assignment in FOR EACH */ + | PRECEDES_SYM /* Conflicts with assignment in FOR EACH */ ; /* - * Keywords that we allow for labels in SPs. - * Anything that's the beginning of a statement or characteristics - * must be in keyword above, otherwise we get (harmful) shift/reduce - * conflicts. - */ -keyword_sp: - keyword_sp_data_type - | keyword_sp_not_data_type - | FUNCTION_SYM { /* Oracle-PLSQL-R */} - ; - - -/* - Keywords that start a statement or an SP block section. + Keywords that start a statement. Generally allowed as identifiers (e.g. table, column names) - not allowed as SP label names - not allowed as variable names in Oracle-style assignments: xxx:=10 */ -keyword_sp_verb_clause: - BEGIN_SYM { /* Compound. Reserved in Oracle */ } - | CLOSE_SYM { /* Verb clause. Reserved in Oracle */ } - | COMMIT_SYM { /* Verb clause. Reserved in Oracle */ } - | EXCEPTION_SYM { /* EXCEPTION section in SP blocks */ } - | DO_SYM { /* Verb clause */ } - | END { /* Compound. Reserved in Oracle */ } - | HANDLER_SYM { /* Verb clause */ } - | OPEN_SYM { /* Verb clause. Reserved in Oracle */ } - | REPAIR { /* Verb clause */ } - | ROLLBACK_SYM { /* Verb clause. Reserved in Oracle */ } - | SAVEPOINT_SYM { /* Verb clause. Reserved in Oracle */ } - | SHUTDOWN { /* Verb clause */ } - | TRUNCATE_SYM { /* Verb clause. Reserved in Oracle */ } - ; +keyword_verb_clause: + CLOSE_SYM /* Verb clause. Reserved in Oracle */ + | COMMIT_SYM /* Verb clause. Reserved in Oracle */ + | DO_SYM /* Verb clause */ + | HANDLER_SYM /* Verb clause */ + | OPEN_SYM /* Verb clause. Reserved in Oracle */ + | REPAIR /* Verb clause */ + | ROLLBACK_SYM /* Verb clause. Reserved in Oracle */ + | SAVEPOINT_SYM /* Verb clause. Reserved in Oracle */ + | SHUTDOWN /* Verb clause */ + | TRUNCATE_SYM /* Verb clause. Reserved in Oracle */ + ; + +/* + Keywords that start an SP block section. +*/ +keyword_sp_block_section: + BEGIN_SYM + | EXCEPTION_SYM + | END + ; + +keyword_sysvar_type: + GLOBAL_SYM + | LOCAL_SYM + | SESSION_SYM + ; /* These keywords are generally allowed as identifiers, but not allowed as non-delimited SP variable names in sql_mode=ORACLE. */ -keyword_sp_data_type: - BIT_SYM {} - | BOOLEAN_SYM {} /* PLSQL-R */ - | BOOL_SYM {} - | CLOB {} - | DATE_SYM {} /* Oracle-R, PLSQL-R */ - | DATETIME {} - | ENUM {} - | FIXED_SYM {} - | GEOMETRYCOLLECTION {} - | GEOMETRY_SYM {} - | JSON_SYM {} - | LINESTRING {} - | MEDIUM_SYM {} - | MULTILINESTRING {} - | MULTIPOINT {} - | MULTIPOLYGON {} - | NATIONAL_SYM {} - | NCHAR_SYM {} - | NUMBER_SYM {} /* Oracle-R, PLSQL-R */ - | NVARCHAR_SYM {} - | POINT_SYM {} - | POLYGON {} - | RAW {} /* Oracle-R */ - | ROW_SYM {} - | SERIAL_SYM {} - | TEXT_SYM {} - | TIMESTAMP {} - | TIME_SYM {} /* Oracle-R */ - | VARCHAR2 {} /* Oracle-R, PLSQL-R */ - | YEAR_SYM {} +keyword_data_type: + BIT_SYM + | BOOLEAN_SYM + | BOOL_SYM + | CLOB + | DATE_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2 + | DATETIME + | ENUM + | FIXED_SYM + | GEOMETRYCOLLECTION + | GEOMETRY_SYM + | JSON_SYM + | LINESTRING + | MEDIUM_SYM + | MULTILINESTRING + | MULTIPOINT + | MULTIPOLYGON + | NATIONAL_SYM + | NCHAR_SYM + | NUMBER_SYM + | NVARCHAR_SYM + | POINT_SYM + | POLYGON + | RAW + | ROW_SYM + | SERIAL_SYM + | TEXT_SYM + | TIMESTAMP %prec PREC_BELOW_CONTRACTION_TOKEN2 + | TIME_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2 + | VARCHAR2 + | YEAR_SYM ; -keyword_sp_not_data_type: - ACTION {} - | ADDDATE_SYM {} - | ADMIN_SYM {} - | AFTER_SYM {} - | AGAINST {} - | AGGREGATE_SYM {} - | ALGORITHM_SYM {} - | ALWAYS_SYM {} - | ANY_SYM {} - | AT_SYM {} - | ATOMIC_SYM {} - | AUTHORS_SYM {} - | AUTO_INC {} - | AUTOEXTEND_SIZE_SYM {} - | AUTO_SYM {} - | AVG_ROW_LENGTH {} - | AVG_SYM {} - | BLOCK_SYM {} - | BTREE_SYM {} - | CASCADED {} - | CATALOG_NAME_SYM {} - | CHAIN_SYM {} - | CHANGED {} - | CIPHER_SYM {} - | CLIENT_SYM {} - | CLASS_ORIGIN_SYM {} - | COALESCE {} - | CODE_SYM {} - | COLLATION_SYM {} - | COLUMN_NAME_SYM {} - | COLUMNS {} - | COMMITTED_SYM {} - | COMPACT_SYM {} - | COMPLETION_SYM {} - | COMPRESSED_SYM {} - | CONCURRENT {} - | CONNECTION_SYM {} - | CONSISTENT_SYM {} - | CONSTRAINT_CATALOG_SYM {} - | CONSTRAINT_SCHEMA_SYM {} - | CONSTRAINT_NAME_SYM {} - | CONTEXT_SYM {} - | CONTRIBUTORS_SYM {} - | CURRENT_POS_SYM {} - | CPU_SYM {} - | CUBE_SYM {} +/* + These keywords are fine for both SP variable names and SP labels. +*/ +keyword_sp_var_and_label: + ACTION + | ADDDATE_SYM + | ADMIN_SYM + | AFTER_SYM + | AGAINST + | AGGREGATE_SYM + | ALGORITHM_SYM + | ALWAYS_SYM + | ANY_SYM + | AT_SYM + | ATOMIC_SYM + | AUTHORS_SYM + | AUTO_INC + | AUTOEXTEND_SIZE_SYM + | AUTO_SYM + | AVG_ROW_LENGTH + | AVG_SYM + | BLOCK_SYM + | BTREE_SYM + | CASCADED + | CATALOG_NAME_SYM + | CHAIN_SYM + | CHANGED + | CIPHER_SYM + | CLIENT_SYM + | CLASS_ORIGIN_SYM + | COALESCE + | CODE_SYM + | COLLATION_SYM + | COLUMN_NAME_SYM + | COLUMNS + | COMMITTED_SYM + | COMPACT_SYM + | COMPLETION_SYM + | COMPRESSED_SYM + | CONCURRENT + | CONNECTION_SYM + | CONSISTENT_SYM + | CONSTRAINT_CATALOG_SYM + | CONSTRAINT_SCHEMA_SYM + | CONSTRAINT_NAME_SYM + | CONTEXT_SYM + | CONTRIBUTORS_SYM + | CURRENT_POS_SYM + | CPU_SYM + | CUBE_SYM /* Although a reserved keyword in SQL:2003 (and :2008), not reserved in MySQL per WL#2111 specification. */ - | CURRENT_SYM {} - | CURSOR_NAME_SYM {} - | CYCLE_SYM {} - | DATA_SYM {} - | DATAFILE_SYM {} - | DATE_FORMAT_SYM {} - | DAY_SYM {} - | DECODE_SYM {} - | DEFINER_SYM {} - | DELAY_KEY_WRITE_SYM {} - | DES_KEY_FILE {} - | DIAGNOSTICS_SYM {} - | DIRECTORY_SYM {} - | DISABLE_SYM {} - | DISCARD {} - | DISK_SYM {} - | DUMPFILE {} - | DUPLICATE_SYM {} - | DYNAMIC_SYM {} - | ENDS_SYM {} - | ENGINE_SYM {} - | ENGINES_SYM {} - | ERROR_SYM {} - | ERRORS {} - | ESCAPE_SYM {} - | EVENT_SYM {} - | EVENTS_SYM {} - | EVERY_SYM {} - | EXCHANGE_SYM {} - | EXPANSION_SYM {} - | EXPORT_SYM {} - | EXTENDED_SYM {} - | EXTENT_SIZE_SYM {} - | FAULTS_SYM {} - | FAST_SYM {} - | FOUND_SYM {} - | ENABLE_SYM {} - | FULL {} - | FILE_SYM {} - | FIRST_SYM {} - | GENERAL {} - | GENERATED_SYM {} - | GET_FORMAT {} - | GRANTS {} - | GLOBAL_SYM {} - | HASH_SYM {} - | HARD_SYM {} - | INVISIBLE_SYM {} - | HOSTS_SYM {} - | HOUR_SYM {} - | ID_SYM {} - | IDENTIFIED_SYM {} - | IGNORE_SERVER_IDS_SYM {} - | INCREMENT_SYM {} - | IMMEDIATE_SYM {} /* SQL-2003-R */ - | INVOKER_SYM {} - | IMPORT {} - | INDEXES {} - | INITIAL_SIZE_SYM {} - | IO_SYM {} - | IPC_SYM {} - | ISOLATION {} - | ISOPEN_SYM {} - | ISSUER_SYM {} - | INSERT_METHOD {} - | KEY_BLOCK_SIZE {} - | LAST_VALUE {} - | LAST_SYM {} - | LASTVAL_SYM {} - | LEAVES {} - | LESS_SYM {} - | LEVEL_SYM {} - | LIST_SYM {} - | LOCAL_SYM {} - | LOCKS_SYM {} - | LOGFILE_SYM {} - | LOGS_SYM {} - | MAX_ROWS {} - | MASTER_SYM {} - | MASTER_HEARTBEAT_PERIOD_SYM {} - | MASTER_GTID_POS_SYM {} - | MASTER_HOST_SYM {} - | MASTER_PORT_SYM {} - | MASTER_LOG_FILE_SYM {} - | MASTER_LOG_POS_SYM {} - | MASTER_USER_SYM {} - | MASTER_USE_GTID_SYM {} - | MASTER_PASSWORD_SYM {} - | MASTER_SERVER_ID_SYM {} - | MASTER_CONNECT_RETRY_SYM {} - | MASTER_DELAY_SYM {} - | MASTER_SSL_SYM {} - | MASTER_SSL_CA_SYM {} - | MASTER_SSL_CAPATH_SYM {} - | MASTER_SSL_CERT_SYM {} - | MASTER_SSL_CIPHER_SYM {} - | MASTER_SSL_CRL_SYM {} - | MASTER_SSL_CRLPATH_SYM {} - | MASTER_SSL_KEY_SYM {} - | MAX_CONNECTIONS_PER_HOUR {} - | MAX_QUERIES_PER_HOUR {} - | MAX_SIZE_SYM {} - | MAX_STATEMENT_TIME_SYM {} - | MAX_UPDATES_PER_HOUR {} - | MAX_USER_CONNECTIONS_SYM {} - | MEMORY_SYM {} - | MERGE_SYM {} - | MESSAGE_TEXT_SYM {} - | MICROSECOND_SYM {} - | MIGRATE_SYM {} - | MINUTE_SYM {} - | MINVALUE_SYM {} - | MIN_ROWS {} - | MODIFY_SYM {} - | MODE_SYM {} - | MONTH_SYM {} - | MUTEX_SYM {} - | MYSQL_SYM {} - | MYSQL_ERRNO_SYM {} - | NAME_SYM {} - | NAMES_SYM {} - | NEXT_SYM {} - | NEXTVAL_SYM {} - | NEW_SYM {} - | NOCACHE_SYM {} - | NOCYCLE_SYM {} - | NOMINVALUE_SYM {} - | NOMAXVALUE_SYM {} - | NO_WAIT_SYM {} - | NOWAIT_SYM {} - | NODEGROUP_SYM {} - | NONE_SYM {} - | NOTFOUND_SYM {} - | OF_SYM {} /* SQL-1999-R, Oracle-R */ - | OFFSET_SYM {} - | OLD_PASSWORD_SYM {} - | ONE_SYM {} - | ONLINE_SYM {} - | ONLY_SYM {} - | PACK_KEYS_SYM {} - | PAGE_SYM {} - | PARTIAL {} - | PARTITIONING_SYM {} - | PARTITIONS_SYM {} - | PASSWORD_SYM {} - | PERSISTENT_SYM {} - | PHASE_SYM {} - | PLUGIN_SYM {} - | PLUGINS_SYM {} - | PRESERVE_SYM {} - | PREV_SYM {} - | PREVIOUS_SYM {} - | PRIVILEGES {} - | PROCESS {} - | PROCESSLIST_SYM {} - | PROFILE_SYM {} - | PROFILES_SYM {} - | PROXY_SYM {} - | QUARTER_SYM {} - | QUERY_SYM {} - | QUICK {} - | READ_ONLY_SYM {} - | REBUILD_SYM {} - | RECOVER_SYM {} - | REDO_BUFFER_SIZE_SYM {} - | REDOFILE_SYM {} - | REDUNDANT_SYM {} - | RELAY {} - | RELAYLOG_SYM {} - | RELAY_LOG_FILE_SYM {} - | RELAY_LOG_POS_SYM {} - | RELAY_THREAD {} - | RELOAD {} - | REORGANIZE_SYM {} - | REPEATABLE_SYM {} - | REPLICATION {} - | RESOURCES {} - | RESTART_SYM {} - | RESUME_SYM {} - | RETURNED_SQLSTATE_SYM {} - | RETURNS_SYM {} - | REUSE_SYM {} - | REVERSE_SYM {} - | ROLE_SYM {} - | ROLLUP_SYM {} - | ROUTINE_SYM {} - | ROWCOUNT_SYM {} - | ROW_COUNT_SYM {} - | ROW_FORMAT_SYM {} - | RTREE_SYM {} - | SCHEDULE_SYM {} - | SCHEMA_NAME_SYM {} - | SECOND_SYM {} - | SEQUENCE_SYM {} - | SERIALIZABLE_SYM {} - | SESSION_SYM {} - | SETVAL_SYM {} - | SIMPLE_SYM {} - | SHARE_SYM {} - | SLAVE_POS_SYM {} - | SLOW {} - | SNAPSHOT_SYM {} - | SOFT_SYM {} - | SOUNDS_SYM {} - | SOURCE_SYM {} - | SQL_CACHE_SYM {} - | SQL_BUFFER_RESULT {} - | SQL_NO_CACHE_SYM {} - | SQL_THREAD {} - | STARTS_SYM {} - | STATEMENT_SYM {} - | STATUS_SYM {} - | STORAGE_SYM {} - | STRING_SYM {} - | SUBCLASS_ORIGIN_SYM {} - | SUBDATE_SYM {} - | SUBJECT_SYM {} - | SUBPARTITION_SYM {} - | SUBPARTITIONS_SYM {} - | SUPER_SYM {} - | SUSPEND_SYM {} - | SWAPS_SYM {} - | SWITCHES_SYM {} - | TABLE_NAME_SYM {} - | TABLES {} - | TABLE_CHECKSUM_SYM {} - | TABLESPACE {} - | TEMPORARY {} - | TEMPTABLE_SYM {} - | THAN_SYM {} - | TRANSACTION_SYM {} - | TRANSACTIONAL_SYM {} - | TRIGGERS_SYM {} - | TRIM_ORACLE {} - | TIMESTAMP_ADD {} - | TIMESTAMP_DIFF {} - | TYPES_SYM {} - | TYPE_SYM {} - | UDF_RETURNS_SYM {} - | UNCOMMITTED_SYM {} - | UNDEFINED_SYM {} - | UNDO_BUFFER_SIZE_SYM {} - | UNDOFILE_SYM {} - | UNKNOWN_SYM {} - | UNTIL_SYM {} - | USER_SYM {} - | USE_FRM {} - | VARIABLES {} - | VIEW_SYM {} - | VIRTUAL_SYM {} - | VALUE_SYM {} - | WARNINGS {} - | WAIT_SYM {} - | WEEK_SYM {} - | WEIGHT_STRING_SYM {} - | WORK_SYM {} - | X509_SYM {} - | XML_SYM {} - | VIA_SYM {} + | CURRENT_SYM + | CURSOR_NAME_SYM + | CYCLE_SYM + | DATA_SYM + | DATAFILE_SYM + | DATE_FORMAT_SYM + | DAY_SYM + | DECODE_SYM + | DEFINER_SYM + | DELAY_KEY_WRITE_SYM + | DES_KEY_FILE + | DIAGNOSTICS_SYM + | DIRECTORY_SYM + | DISABLE_SYM + | DISCARD + | DISK_SYM + | DUMPFILE + | DUPLICATE_SYM + | DYNAMIC_SYM + | ENDS_SYM + | ENGINE_SYM + | ENGINES_SYM + | ERROR_SYM + | ERRORS + | ESCAPE_SYM + | EVENT_SYM + | EVENTS_SYM + | EVERY_SYM + + | EXCHANGE_SYM + | EXPANSION_SYM + | EXPORT_SYM + | EXTENDED_SYM + | EXTENT_SIZE_SYM + | FAULTS_SYM + | FAST_SYM + | FOUND_SYM + | ENABLE_SYM + | FULL + | FILE_SYM + | FIRST_SYM + | GENERAL + | GENERATED_SYM + | GET_FORMAT + | GRANTS + | HASH_SYM + | HARD_SYM + | HOSTS_SYM + | HOUR_SYM + | ID_SYM + | IDENTIFIED_SYM + | IGNORE_SERVER_IDS_SYM + | INCREMENT_SYM + | IMMEDIATE_SYM + | INVOKER_SYM + | IMPORT + | INDEXES + | INITIAL_SIZE_SYM + | IO_SYM + | IPC_SYM + | ISOLATION + | ISOPEN_SYM + | ISSUER_SYM + | INSERT_METHOD + | INVISIBLE_SYM + | KEY_BLOCK_SIZE + | LAST_VALUE + | LAST_SYM + | LASTVAL_SYM + | LEAVES + | LESS_SYM + | LEVEL_SYM + | LIST_SYM + | LOCKS_SYM + | LOGFILE_SYM + | LOGS_SYM + | MAX_ROWS + | MASTER_SYM + | MASTER_HEARTBEAT_PERIOD_SYM + | MASTER_GTID_POS_SYM + | MASTER_HOST_SYM + | MASTER_PORT_SYM + | MASTER_LOG_FILE_SYM + | MASTER_LOG_POS_SYM + | MASTER_USER_SYM + | MASTER_USE_GTID_SYM + | MASTER_PASSWORD_SYM + | MASTER_SERVER_ID_SYM + | MASTER_CONNECT_RETRY_SYM + | MASTER_DELAY_SYM + | MASTER_SSL_SYM + | MASTER_SSL_CA_SYM + | MASTER_SSL_CAPATH_SYM + | MASTER_SSL_CERT_SYM + | MASTER_SSL_CIPHER_SYM + | MASTER_SSL_CRL_SYM + | MASTER_SSL_CRLPATH_SYM + | MASTER_SSL_KEY_SYM + | MAX_CONNECTIONS_PER_HOUR + | MAX_QUERIES_PER_HOUR + | MAX_SIZE_SYM + | MAX_STATEMENT_TIME_SYM + | MAX_UPDATES_PER_HOUR + | MAX_USER_CONNECTIONS_SYM + | MEMORY_SYM + | MERGE_SYM + | MESSAGE_TEXT_SYM + | MICROSECOND_SYM + | MIGRATE_SYM + | MINUTE_SYM + | MINVALUE_SYM + | MIN_ROWS + | MODIFY_SYM + | MODE_SYM + | MONTH_SYM + | MUTEX_SYM + | MYSQL_SYM + | MYSQL_ERRNO_SYM + | NAME_SYM + | NAMES_SYM + | NEXT_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2 + | NEXTVAL_SYM + | NEW_SYM + | NOCACHE_SYM + | NOCYCLE_SYM + | NOMINVALUE_SYM + | NOMAXVALUE_SYM + | NO_WAIT_SYM + | NOWAIT_SYM + | NODEGROUP_SYM + | NONE_SYM + | NOTFOUND_SYM + | OF_SYM + | OFFSET_SYM + | OLD_PASSWORD_SYM + | ONE_SYM + | ONLINE_SYM + | ONLY_SYM + + | PACK_KEYS_SYM + | PAGE_SYM + | PARTIAL + | PARTITIONING_SYM + | PARTITIONS_SYM + | PASSWORD_SYM + | PERSISTENT_SYM + | PHASE_SYM + | PLUGIN_SYM + | PLUGINS_SYM + | PRESERVE_SYM + | PREV_SYM + | PREVIOUS_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2 + | PRIVILEGES + | PROCESS + | PROCESSLIST_SYM + | PROFILE_SYM + | PROFILES_SYM + | PROXY_SYM + | QUARTER_SYM + | QUERY_SYM + | QUICK + + | READ_ONLY_SYM + | REBUILD_SYM + | RECOVER_SYM + | REDO_BUFFER_SIZE_SYM + | REDOFILE_SYM + | REDUNDANT_SYM + | RELAY + | RELAYLOG_SYM + | RELAY_LOG_FILE_SYM + | RELAY_LOG_POS_SYM + | RELAY_THREAD + | RELOAD + | REORGANIZE_SYM + | REPEATABLE_SYM + | REPLICATION + | RESOURCES + | RESTART_SYM + | RESUME_SYM + | RETURNED_SQLSTATE_SYM + | RETURNS_SYM + | REUSE_SYM + | REVERSE_SYM + | ROLE_SYM + | ROLLUP_SYM + | ROUTINE_SYM + | ROWCOUNT_SYM + | ROW_COUNT_SYM + | ROW_FORMAT_SYM + | RTREE_SYM + | SCHEDULE_SYM + | SCHEMA_NAME_SYM + | SECOND_SYM + | SEQUENCE_SYM + | SERIALIZABLE_SYM + | SETVAL_SYM + | SIMPLE_SYM + | SHARE_SYM + | SLAVE_POS_SYM + | SLOW + | SNAPSHOT_SYM + | SOFT_SYM + | SOUNDS_SYM + | SOURCE_SYM + | SQL_CACHE_SYM + | SQL_BUFFER_RESULT + | SQL_NO_CACHE_SYM + | SQL_THREAD + | STARTS_SYM + | STATEMENT_SYM + | STATUS_SYM + | STORAGE_SYM + | STRING_SYM + | SUBCLASS_ORIGIN_SYM + | SUBDATE_SYM + | SUBJECT_SYM + | SUBPARTITION_SYM + | SUBPARTITIONS_SYM + | SUPER_SYM + | SUSPEND_SYM + | SWAPS_SYM + | SWITCHES_SYM + | TABLE_NAME_SYM + | TABLES + | TABLE_CHECKSUM_SYM + | TABLESPACE + | TEMPORARY + | TEMPTABLE_SYM + | THAN_SYM + | TRANSACTION_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2 + | TRANSACTIONAL_SYM + | TRIGGERS_SYM + | TRIM_ORACLE + | TIMESTAMP_ADD + | TIMESTAMP_DIFF + | TYPES_SYM + | TYPE_SYM + | UDF_RETURNS_SYM + | UNCOMMITTED_SYM + | UNDEFINED_SYM + | UNDO_BUFFER_SIZE_SYM + | UNDOFILE_SYM + | UNKNOWN_SYM + | UNTIL_SYM + | USER_SYM + | USE_FRM + | VARIABLES + | VIEW_SYM + | VIRTUAL_SYM + | VALUE_SYM + | WARNINGS + | WAIT_SYM + | WEEK_SYM + | WEIGHT_STRING_SYM + | WORK_SYM + | X509_SYM + | XML_SYM + | VIA_SYM ; /* @@ -15602,7 +16099,7 @@ set: set_stmt_option_value_following_option_type_list { LEX *lex= Lex; - if (lex->table_or_sp_used()) + if (unlikely(lex->table_or_sp_used())) my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0), "SET STATEMENT")); lex->stmt_var_list= lex->var_list; lex->var_list.empty(); @@ -15621,8 +16118,8 @@ set_assign: } set_expr_or_default { - if (Lex->set_variable(&$1, $4) || - sp_create_assignment_instr(thd, yychar == YYEMPTY)) + if (unlikely(Lex->set_variable(&$1, $4)) || + unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) MYSQL_YYABORT; } | ident_directly_assignable '.' ident SET_VAR @@ -15636,14 +16133,14 @@ set_assign: { LEX *lex= Lex; DBUG_ASSERT(lex->var_list.is_empty()); - if (lex->set_variable(&$1, &$3, $6) || - lex->sphead->restore_lex(thd)) + if (unlikely(lex->set_variable(&$1, &$3, $6)) || + unlikely(lex->sphead->restore_lex(thd))) MYSQL_YYABORT; } | colon_with_pos ident '.' ident SET_VAR { LEX *lex= Lex; - if (!lex->is_trigger_new_or_old_reference(&$2)) + if (unlikely(!lex->is_trigger_new_or_old_reference(&$2))) { thd->parse_error(ER_SYNTAX_ERROR, $1); MYSQL_YYABORT; @@ -15655,8 +16152,8 @@ set_assign: set_expr_or_default { LEX_CSTRING tmp= { $2.str, $2.length }; - if (Lex->set_trigger_field(&tmp, &$4, $7) || - sp_create_assignment_instr(thd, yychar == YYEMPTY)) + if (unlikely(Lex->set_trigger_field(&tmp, &$4, $7)) || + unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) MYSQL_YYABORT; } ; @@ -15675,7 +16172,7 @@ set_stmt_option_value_following_option_type_list: start_option_value_list: option_value_no_option_type { - if (sp_create_assignment_instr(thd, yychar == YYEMPTY)) + if (unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) MYSQL_YYABORT; } option_value_list_continued @@ -15685,7 +16182,7 @@ start_option_value_list: } transaction_characteristics { - if (sp_create_assignment_instr(thd, yychar == YYEMPTY)) + if (unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) MYSQL_YYABORT; } | option_type @@ -15700,14 +16197,14 @@ start_option_value_list: start_option_value_list_following_option_type: option_value_following_option_type { - if (sp_create_assignment_instr(thd, yychar == YYEMPTY)) - MYSQL_YYABORT; + if (unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) + MYSQL_YYABORT; } option_value_list_continued | TRANSACTION_SYM transaction_characteristics { - if (sp_create_assignment_instr(thd, yychar == YYEMPTY)) - MYSQL_YYABORT; + if (unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) + MYSQL_YYABORT; } ; @@ -15724,8 +16221,8 @@ option_value_list: } option_value { - if (sp_create_assignment_instr(thd, yychar == YYEMPTY)) - MYSQL_YYABORT; + if (unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) + MYSQL_YYABORT; } | option_value_list ',' { @@ -15733,8 +16230,8 @@ option_value_list: } option_value { - if (sp_create_assignment_instr(thd, yychar == YYEMPTY)) - MYSQL_YYABORT; + if (unlikely(sp_create_assignment_instr(thd, yychar == YYEMPTY))) + MYSQL_YYABORT; } ; @@ -15772,17 +16269,17 @@ opt_var_ident_type: option_value_following_option_type: ident equal set_expr_or_default { - if (Lex->set_system_variable(Lex->option_type, &$1, $3)) + if (unlikely(Lex->set_system_variable(Lex->option_type, &$1, $3))) MYSQL_YYABORT; } | ident '.' ident equal set_expr_or_default { - if (Lex->set_system_variable(thd, Lex->option_type, &$1, &$3, $5)) + if (unlikely(Lex->set_system_variable(thd, Lex->option_type, &$1, &$3, $5))) MYSQL_YYABORT; } | DEFAULT '.' ident equal set_expr_or_default { - if (Lex->set_default_system_variable(Lex->option_type, &$3, $5)) + if (unlikely(Lex->set_default_system_variable(Lex->option_type, &$3, $5))) MYSQL_YYABORT; } ; @@ -15791,37 +16288,37 @@ option_value_following_option_type: option_value_no_option_type: ident equal set_expr_or_default { - if (Lex->set_variable(&$1, $3)) + if (unlikely(Lex->set_variable(&$1, $3))) MYSQL_YYABORT; } | ident '.' ident equal set_expr_or_default { - if (Lex->set_variable(&$1, &$3, $5)) + if (unlikely(Lex->set_variable(&$1, &$3, $5))) MYSQL_YYABORT; } | DEFAULT '.' ident equal set_expr_or_default { - if (Lex->set_default_system_variable(Lex->option_type, &$3, $5)) + if (unlikely(Lex->set_default_system_variable(Lex->option_type, &$3, $5))) MYSQL_YYABORT; } | '@' ident_or_text equal expr { - if (Lex->set_user_variable(thd, &$2, $4)) + if (unlikely(Lex->set_user_variable(thd, &$2, $4))) MYSQL_YYABORT; } - | '@' '@' opt_var_ident_type ident equal set_expr_or_default + | '@' '@' opt_var_ident_type ident_sysvar_name equal set_expr_or_default { - if (Lex->set_system_variable($3, &$4, $6)) + if (unlikely(Lex->set_system_variable($3, &$4, $6))) MYSQL_YYABORT; } - | '@' '@' opt_var_ident_type ident '.' ident equal set_expr_or_default + | '@' '@' opt_var_ident_type ident_sysvar_name '.' ident equal set_expr_or_default { - if (Lex->set_system_variable(thd, $3, &$4, &$6, $8)) + if (unlikely(Lex->set_system_variable(thd, $3, &$4, &$6, $8))) MYSQL_YYABORT; } | '@' '@' opt_var_ident_type DEFAULT '.' ident equal set_expr_or_default { - if (Lex->set_default_system_variable($3, &$6, $8)) + if (unlikely(Lex->set_default_system_variable($3, &$6, $8))) MYSQL_YYABORT; } | charset old_or_new_charset_name_or_default @@ -15834,7 +16331,7 @@ option_value_no_option_type: set_var_collation_client(cs2, thd->variables.collation_database, cs2)); - if (var == NULL) + if (unlikely(var == NULL)) MYSQL_YYABORT; lex->var_list.push_back(var, thd->mem_root); } @@ -15843,7 +16340,7 @@ option_value_no_option_type: LEX *lex= Lex; sp_pcontext *spc= lex->spcont; LEX_CSTRING names= { STRING_WITH_LEN("names") }; - if (spc && spc->find_variable(&names, false)) + if (unlikely(spc && spc->find_variable(&names, false))) my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), names.str); else thd->parse_error(); @@ -15856,7 +16353,7 @@ option_value_no_option_type: CHARSET_INFO *cs3; cs2= $2 ? $2 : global_system_variables.character_set_client; cs3= $3 ? $3 : cs2; - if (!my_charset_same(cs2, cs3)) + if (unlikely(!my_charset_same(cs2, cs3))) { my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), cs3->name, cs2->csname); @@ -15864,23 +16361,24 @@ option_value_no_option_type: } set_var_collation_client *var; var= new (thd->mem_root) set_var_collation_client(cs3, cs3, cs3); - if (var == NULL) + if (unlikely(var == NULL) || + unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); } | DEFAULT ROLE_SYM grant_role { LEX *lex = Lex; LEX_USER *user; - if (!(user=(LEX_USER *) thd->calloc(sizeof(LEX_USER)))) + if (unlikely(!(user=(LEX_USER *) thd->calloc(sizeof(LEX_USER))))) MYSQL_YYABORT; user->user= current_user; set_var_default_role *var= (new (thd->mem_root) set_var_default_role(user, $3->user)); - if (var == NULL) + if (unlikely(var == NULL) || + unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); + thd->lex->autocommit= TRUE; if (lex->sphead) lex->sphead->m_flags|= sp_head::HAS_SET_AUTOCOMMIT_STMT; @@ -15890,9 +16388,9 @@ option_value_no_option_type: LEX *lex = Lex; set_var_default_role *var= (new (thd->mem_root) set_var_default_role($5, $3->user)); - if (var == NULL) + if (unlikely(var == NULL) || + unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); thd->lex->autocommit= TRUE; if (lex->sphead) lex->sphead->m_flags|= sp_head::HAS_SET_AUTOCOMMIT_STMT; @@ -15901,18 +16399,18 @@ option_value_no_option_type: { LEX *lex = Lex; set_var_role *var= new (thd->mem_root) set_var_role($2); - if (var == NULL) + if (unlikely(var == NULL) || + unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); } | PASSWORD_SYM opt_for_user text_or_password { LEX *lex = Lex; set_var_password *var= (new (thd->mem_root) set_var_password(lex->definer)); - if (var == NULL) + if (unlikely(var == NULL) || + unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); lex->autocommit= TRUE; if (lex->sphead) lex->sphead->m_flags|= sp_head::HAS_SET_AUTOCOMMIT_STMT; @@ -15932,16 +16430,17 @@ transaction_access_mode: { LEX *lex=Lex; Item *item= new (thd->mem_root) Item_int(thd, (int32) $1); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; set_var *var= (new (thd->mem_root) set_var(thd, lex->option_type, find_sys_var(thd, "tx_read_only"), &null_clex_str, item)); - if (var == NULL) + if (unlikely(var == NULL)) + MYSQL_YYABORT; + if (unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); } ; @@ -15950,16 +16449,16 @@ isolation_level: { LEX *lex=Lex; Item *item= new (thd->mem_root) Item_int(thd, (int32) $3); - if (item == NULL) + if (unlikely(item == NULL)) MYSQL_YYABORT; set_var *var= (new (thd->mem_root) set_var(thd, lex->option_type, find_sys_var(thd, "tx_isolation"), &null_clex_str, item)); - if (var == NULL) + if (unlikely(var == NULL) || + unlikely(lex->var_list.push_back(var, thd->mem_root))) MYSQL_YYABORT; - lex->var_list.push_back(var, thd->mem_root); } ; @@ -15982,9 +16481,10 @@ opt_for_user: sp_pcontext *spc= lex->spcont; LEX_CSTRING pw= { STRING_WITH_LEN("password") }; - if (spc && spc->find_variable(&pw, false)) + if (unlikely(spc && spc->find_variable(&pw, false))) my_yyabort_error((ER_SP_BAD_VAR_SHADOW, MYF(0), pw.str)); - if (!(lex->definer= (LEX_USER*) thd->calloc(sizeof(LEX_USER)))) + if (unlikely(!(lex->definer= (LEX_USER*) + thd->calloc(sizeof(LEX_USER))))) MYSQL_YYABORT; lex->definer->user= current_user; lex->definer->plugin= empty_clex_str; @@ -16011,19 +16511,19 @@ set_expr_or_default: | ON { $$=new (thd->mem_root) Item_string_sys(thd, "ON", 2); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | ALL { $$=new (thd->mem_root) Item_string_sys(thd, "ALL", 3); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } | BINARY { $$=new (thd->mem_root) Item_string_sys(thd, "binary", 6); - if ($$ == NULL) + if (unlikely($$ == NULL)) MYSQL_YYABORT; } ; @@ -16035,7 +16535,7 @@ lock: { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "LOCK")); lex->sql_command= SQLCOM_LOCK_TABLES; } @@ -16048,14 +16548,14 @@ opt_lock_wait_timeout: {} | WAIT_SYM ulong_num { - if (set_statement_var_if_exists(thd, STRING_WITH_LEN("lock_wait_timeout"), $2) || - set_statement_var_if_exists(thd, STRING_WITH_LEN("innodb_lock_wait_timeout"), $2)) + if (unlikely(set_statement_var_if_exists(thd, STRING_WITH_LEN("lock_wait_timeout"), $2)) || + unlikely(set_statement_var_if_exists(thd, STRING_WITH_LEN("innodb_lock_wait_timeout"), $2))) MYSQL_YYABORT; } | NOWAIT_SYM { - if (set_statement_var_if_exists(thd, STRING_WITH_LEN("lock_wait_timeout"), 0) || - set_statement_var_if_exists(thd, STRING_WITH_LEN("innodb_lock_wait_timeout"), 0)) + if (unlikely(set_statement_var_if_exists(thd, STRING_WITH_LEN("lock_wait_timeout"), 0)) || + unlikely(set_statement_var_if_exists(thd, STRING_WITH_LEN("innodb_lock_wait_timeout"), 0))) MYSQL_YYABORT; } ; @@ -16075,12 +16575,13 @@ table_lock: { thr_lock_type lock_type= (thr_lock_type) $3; bool lock_for_write= (lock_type >= TL_WRITE_ALLOW_WRITE); - if (!Select->add_table_to_list(thd, $1, $2, 0, lock_type, + if (unlikely(!Select-> + add_table_to_list(thd, $1, $2, 0, lock_type, (lock_for_write ? lock_type == TL_WRITE_CONCURRENT_INSERT ? MDL_SHARED_WRITE : MDL_SHARED_NO_READ_WRITE : - MDL_SHARED_READ))) + MDL_SHARED_READ)))) MYSQL_YYABORT; } ; @@ -16102,7 +16603,7 @@ unlock: { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "UNLOCK")); lex->sql_command= SQLCOM_UNLOCK_TABLES; } @@ -16118,43 +16619,46 @@ handler: HANDLER_SYM table_ident OPEN_SYM opt_table_alias { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "HANDLER")); lex->sql_command = SQLCOM_HA_OPEN; - if (!lex->current_select->add_table_to_list(thd, $2, $4, 0)) + if (unlikely(!lex->current_select->add_table_to_list(thd, $2, $4, + 0))) MYSQL_YYABORT; } | HANDLER_SYM table_ident_nodb CLOSE_SYM { LEX *lex= Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "HANDLER")); lex->sql_command = SQLCOM_HA_CLOSE; - if (!lex->current_select->add_table_to_list(thd, $2, 0, 0)) + if (unlikely(!lex->current_select->add_table_to_list(thd, $2, 0, + 0))) MYSQL_YYABORT; } | HANDLER_SYM table_ident_nodb READ_SYM { LEX *lex=Lex; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "HANDLER")); lex->expr_allows_subselect= FALSE; lex->sql_command = SQLCOM_HA_READ; lex->ha_rkey_mode= HA_READ_KEY_EXACT; /* Avoid purify warnings */ Item *one= new (thd->mem_root) Item_int(thd, (int32) 1); - if (one == NULL) + if (unlikely(one == NULL)) MYSQL_YYABORT; lex->current_select->select_limit= one; lex->current_select->offset_limit= 0; lex->limit_rows_examined= 0; - if (!lex->current_select->add_table_to_list(thd, $2, 0, 0)) + if (unlikely(!lex->current_select->add_table_to_list(thd, $2, 0, + 0))) MYSQL_YYABORT; } handler_read_or_scan opt_where_clause opt_limit_clause { Lex->expr_allows_subselect= TRUE; /* Stored functions are not supported for HANDLER READ. */ - if (Lex->uses_stored_routines()) + if (unlikely(Lex->uses_stored_routines())) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), "stored functions in HANDLER ... READ"); @@ -16183,7 +16687,7 @@ handler_rkey_function: LEX *lex=Lex; lex->ha_read_mode = RKEY; lex->ha_rkey_mode=$1; - if (!(lex->insert_list= new (thd->mem_root) List_item)) + if (unlikely(!(lex->insert_list= new (thd->mem_root) List_item))) MYSQL_YYABORT; } '(' values ')' @@ -16214,24 +16718,26 @@ revoke_command: } | grant_privileges ON FUNCTION_SYM grant_ident FROM user_and_role_list { - if (Lex->add_grant_command(thd, SQLCOM_REVOKE, TYPE_ENUM_FUNCTION)) + if (unlikely(Lex->add_grant_command(thd, SQLCOM_REVOKE, + TYPE_ENUM_FUNCTION))) MYSQL_YYABORT; } | grant_privileges ON PROCEDURE_SYM grant_ident FROM user_and_role_list { - if (Lex->add_grant_command(thd, SQLCOM_REVOKE, TYPE_ENUM_PROCEDURE)) + if (unlikely(Lex->add_grant_command(thd, SQLCOM_REVOKE, + TYPE_ENUM_PROCEDURE))) MYSQL_YYABORT; } | grant_privileges ON PACKAGE_SYM grant_ident FROM user_and_role_list { - if (Lex->add_grant_command(thd, SQLCOM_REVOKE, - TYPE_ENUM_PACKAGE)) + if (unlikely(Lex->add_grant_command(thd, SQLCOM_REVOKE, + TYPE_ENUM_PACKAGE))) MYSQL_YYABORT; } | grant_privileges ON PACKAGE_SYM BODY_SYM grant_ident FROM user_and_role_list { - if (Lex->add_grant_command(thd, SQLCOM_REVOKE, - TYPE_ENUM_PACKAGE_BODY)) + if (unlikely(Lex->add_grant_command(thd, SQLCOM_REVOKE, + TYPE_ENUM_PACKAGE_BODY))) MYSQL_YYABORT; } | ALL opt_privileges ',' GRANT OPTION FROM user_and_role_list @@ -16248,7 +16754,7 @@ revoke_command: | admin_option_for_role FROM user_and_role_list { Lex->sql_command= SQLCOM_REVOKE_ROLE; - if (Lex->users_list.push_front($1, thd->mem_root)) + if (unlikely(Lex->users_list.push_front($1, thd->mem_root))) MYSQL_YYABORT; } ; @@ -16276,27 +16782,29 @@ grant_command: | grant_privileges ON FUNCTION_SYM grant_ident TO_SYM grant_list opt_require_clause opt_grant_options { - if (Lex->add_grant_command(thd, SQLCOM_GRANT, TYPE_ENUM_FUNCTION)) + if (unlikely(Lex->add_grant_command(thd, SQLCOM_GRANT, + TYPE_ENUM_FUNCTION))) MYSQL_YYABORT; } | grant_privileges ON PROCEDURE_SYM grant_ident TO_SYM grant_list opt_require_clause opt_grant_options { - if (Lex->add_grant_command(thd, SQLCOM_GRANT, TYPE_ENUM_PROCEDURE)) + if (unlikely(Lex->add_grant_command(thd, SQLCOM_GRANT, + TYPE_ENUM_PROCEDURE))) MYSQL_YYABORT; } | grant_privileges ON PACKAGE_SYM grant_ident TO_SYM grant_list opt_require_clause opt_grant_options { - if (Lex->add_grant_command(thd, SQLCOM_GRANT, - TYPE_ENUM_PACKAGE)) + if (unlikely(Lex->add_grant_command(thd, SQLCOM_GRANT, + TYPE_ENUM_PACKAGE))) MYSQL_YYABORT; } | grant_privileges ON PACKAGE_SYM BODY_SYM grant_ident TO_SYM grant_list opt_require_clause opt_grant_options { - if (Lex->add_grant_command(thd, SQLCOM_GRANT, - TYPE_ENUM_PACKAGE_BODY)) + if (unlikely(Lex->add_grant_command(thd, SQLCOM_GRANT, + TYPE_ENUM_PACKAGE_BODY))) MYSQL_YYABORT; } | PROXY_SYM ON user TO_SYM grant_list opt_grant_option @@ -16311,7 +16819,7 @@ grant_command: LEX *lex= Lex; lex->sql_command= SQLCOM_GRANT_ROLE; /* The first role is the one that is granted */ - if (Lex->users_list.push_front($1, thd->mem_root)) + if (unlikely(Lex->users_list.push_front($1, thd->mem_root))) MYSQL_YYABORT; } @@ -16328,12 +16836,12 @@ opt_with_admin_option: role_list: grant_role { - if (Lex->users_list.push_back($1, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($1, thd->mem_root))) MYSQL_YYABORT; } | role_list ',' grant_role { - if (Lex->users_list.push_back($3, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($3, thd->mem_root))) MYSQL_YYABORT; } ; @@ -16341,7 +16849,7 @@ role_list: current_role: CURRENT_ROLE optional_braces { - if (!($$=(LEX_USER*) thd->calloc(sizeof(LEX_USER)))) + if (unlikely(!($$=(LEX_USER*) thd->calloc(sizeof(LEX_USER))))) MYSQL_YYABORT; $$->user= current_role; $$->reset_auth(); @@ -16355,17 +16863,17 @@ grant_role: /* trim end spaces (as they'll be lost in mysql.user anyway) */ $1.length= cs->cset->lengthsp(cs, $1.str, $1.length); ((char*) $1.str)[$1.length] = '\0'; - if ($1.length == 0) + if (unlikely($1.length == 0)) my_yyabort_error((ER_INVALID_ROLE, MYF(0), "")); - if (!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER)))) + if (unlikely(!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER))))) MYSQL_YYABORT; $$->user= $1; $$->host= empty_clex_str; $$->reset_auth(); - if (check_string_char_length(&$$->user, ER_USERNAME, - username_char_length, - cs, 0)) + if (unlikely(check_string_char_length(&$$->user, ER_USERNAME, + username_char_length, + cs, 0))) MYSQL_YYABORT; } | current_role @@ -16434,6 +16942,7 @@ object_privilege: | EVENT_SYM { Lex->grant |= EVENT_ACL;} | TRIGGER_SYM { Lex->grant |= TRIGGER_ACL; } | CREATE TABLESPACE { Lex->grant |= CREATE_TABLESPACE_ACL; } + | DELETE_SYM HISTORY_SYM { Lex->grant |= DELETE_HISTORY_ACL; } ; opt_and: @@ -16450,21 +16959,21 @@ require_list_element: SUBJECT_SYM TEXT_STRING { LEX *lex=Lex; - if (lex->x509_subject) + if (unlikely(lex->x509_subject)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "SUBJECT")); lex->x509_subject=$2.str; } | ISSUER_SYM TEXT_STRING { LEX *lex=Lex; - if (lex->x509_issuer) + if (unlikely(lex->x509_issuer)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "ISSUER")); lex->x509_issuer=$2.str; } | CIPHER_SYM TEXT_STRING { LEX *lex=Lex; - if (lex->ssl_cipher) + if (unlikely(lex->ssl_cipher)) my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "CIPHER")); lex->ssl_cipher=$2.str; } @@ -16474,11 +16983,11 @@ grant_ident: '*' { LEX *lex= Lex; - if (lex->copy_db_to(&lex->current_select->db)) + if (unlikely(lex->copy_db_to(&lex->current_select->db))) MYSQL_YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = DB_ACLS & ~GRANT_ACL; - else if (lex->columns.elements) + else if (unlikely(lex->columns.elements)) my_yyabort_error((ER_ILLEGAL_GRANT_FOR_TABLE, MYF(0))); } | ident '.' '*' @@ -16487,7 +16996,7 @@ grant_ident: lex->current_select->db= $1; if (lex->grant == GLOBAL_ACLS) lex->grant = DB_ACLS & ~GRANT_ACL; - else if (lex->columns.elements) + else if (unlikely(lex->columns.elements)) my_yyabort_error((ER_ILLEGAL_GRANT_FOR_TABLE, MYF(0))); } | '*' '.' '*' @@ -16496,14 +17005,15 @@ grant_ident: lex->current_select->db= null_clex_str; if (lex->grant == GLOBAL_ACLS) lex->grant= GLOBAL_ACLS & ~GRANT_ACL; - else if (lex->columns.elements) + else if (unlikely(lex->columns.elements)) my_yyabort_error((ER_ILLEGAL_GRANT_FOR_TABLE, MYF(0))); } | table_ident { LEX *lex=Lex; - if (!lex->current_select->add_table_to_list(thd, $1,NULL, - TL_OPTION_UPDATING)) + if (unlikely(!lex->current_select-> + add_table_to_list(thd, $1,NULL, + TL_OPTION_UPDATING))) MYSQL_YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = TABLE_ACLS & ~GRANT_ACL; @@ -16513,12 +17023,12 @@ grant_ident: user_list: user { - if (Lex->users_list.push_back($1, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($1, thd->mem_root))) MYSQL_YYABORT; } | user_list ',' user { - if (Lex->users_list.push_back($3, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($3, thd->mem_root))) MYSQL_YYABORT; } ; @@ -16526,12 +17036,12 @@ user_list: grant_list: grant_user { - if (Lex->users_list.push_back($1, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($1, thd->mem_root))) MYSQL_YYABORT; } | grant_list ',' grant_user { - if (Lex->users_list.push_back($3, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($3, thd->mem_root))) MYSQL_YYABORT; } ; @@ -16539,12 +17049,12 @@ grant_list: user_and_role_list: user_or_role { - if (Lex->users_list.push_back($1, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($1, thd->mem_root))) MYSQL_YYABORT; } | user_and_role_list ',' user_or_role { - if (Lex->users_list.push_back($3, thd->mem_root)) + if (unlikely(Lex->users_list.push_back($3, thd->mem_root))) MYSQL_YYABORT; } ; @@ -16557,7 +17067,7 @@ grant_user: { $$= $1; $1->pwtext= $4; - if (Lex->sql_command == SQLCOM_REVOKE) + if (unlikely(Lex->sql_command == SQLCOM_REVOKE)) MYSQL_YYABORT; } | user IDENTIFIED_SYM BY PASSWORD_SYM TEXT_STRING @@ -16599,7 +17109,7 @@ column_list_id: ident { String *new_str= new (thd->mem_root) String((const char*) $1.str,$1.length,system_charset_info); - if (new_str == NULL) + if (unlikely(new_str == NULL)) MYSQL_YYABORT; List_iterator iter(Lex->columns); class LEX_COLUMN *point; @@ -16617,7 +17127,7 @@ column_list_id: { LEX_COLUMN *col= (new (thd->mem_root) LEX_COLUMN(*new_str,lex->which_columns)); - if (col == NULL) + if (unlikely(col == NULL)) MYSQL_YYABORT; lex->columns.push_back(col, thd->mem_root); } @@ -16816,7 +17326,7 @@ union_clause: union_list: unit_type_decl union_option { - if (Lex->add_select_to_union_list((bool)$2, $1, TRUE)) + if (unlikely(Lex->add_select_to_union_list((bool)$2, $1, TRUE))) MYSQL_YYABORT; } union_list_part2 @@ -16832,7 +17342,7 @@ union_list: union_list_view: unit_type_decl union_option { - if (Lex->add_select_to_union_list((bool)$2, $1, TRUE)) + if (unlikely(Lex->add_select_to_union_list((bool)$2, $1, TRUE))) MYSQL_YYABORT; } query_expression_body_view @@ -16873,7 +17383,7 @@ order_or_limit: union_head_non_top: unit_type_decl union_option { - if (Lex->add_select_to_union_list((bool)$2, $1, FALSE)) + if (unlikely(Lex->add_select_to_union_list((bool)$2, $1, FALSE))) MYSQL_YYABORT; } ; @@ -16884,6 +17394,24 @@ union_option: | ALL { $$=0; } ; +simple_table: + query_specification { $$= $1; } + | table_value_constructor { $$= $1; } + ; + +table_value_constructor: + VALUES + { + Lex->tvc_start(); + } + values_list + { + $$= Lex->current_select; + if (Lex->tvc_finalize()) + MYSQL_YYABORT; + } + ; + /* Corresponds to the SQL Standard ::= @@ -16901,13 +17429,13 @@ query_specification: ; query_term_union_not_ready: - query_specification order_or_limit opt_select_lock_type { $$= $1; } + simple_table order_or_limit opt_select_lock_type { $$= $1; } | '(' select_paren_derived ')' union_order_or_limit { $$= $2; } ; query_term_union_ready: - query_specification opt_select_lock_type { $$= $1; } - | '(' select_paren_derived ')' { $$= $2; } + simple_table opt_select_lock_type { $$= $1; } + | '(' select_paren_derived ')' { $$= $2; } ; query_expression_body: @@ -16928,8 +17456,8 @@ subselect: subselect_start: { LEX *lex=Lex; - if (!lex->expr_allows_subselect || - lex->sql_command == (int)SQLCOM_PURGE) + if (unlikely(!lex->expr_allows_subselect || + lex->sql_command == (int)SQLCOM_PURGE)) { thd->parse_error(); MYSQL_YYABORT; @@ -16941,7 +17469,7 @@ subselect_start: (SELECT .. ) UNION ... becomes SELECT * FROM ((SELECT ...) UNION ...) */ - if (mysql_new_select(Lex, 1, NULL)) + if (unlikely(mysql_new_select(Lex, 1, NULL))) MYSQL_YYABORT; } ; @@ -16986,7 +17514,7 @@ query_expression_option: STRAIGHT_JOIN { Select->options|= SELECT_STRAIGHT_JOIN; } | HIGH_PRIORITY { - if (check_simple_select()) + if (unlikely(Lex->check_simple_select(&$1))) MYSQL_YYABORT; YYPS->m_lock_type= TL_READ_HIGH_PRIORITY; YYPS->m_mdl_type= MDL_SHARED_READ; @@ -16998,13 +17526,13 @@ query_expression_option: | SQL_BIG_RESULT { Select->options|= SELECT_BIG_RESULT; } | SQL_BUFFER_RESULT { - if (check_simple_select()) + if (unlikely(Lex->check_simple_select(&$1))) MYSQL_YYABORT; Select->options|= OPTION_BUFFER_RESULT; } | SQL_CALC_FOUND_ROWS { - if (check_simple_select()) + if (unlikely(Lex->check_simple_select(&$1))) MYSQL_YYABORT; Select->options|= OPTION_FOUND_ROWS; } @@ -17116,6 +17644,9 @@ view_select: */ query_expression_body_view: SELECT_SYM select_options_and_item_list select_init3_view + | table_value_constructor + | table_value_constructor union_order_or_limit + | table_value_constructor union_list_view | '(' select_paren_view ')' | '(' select_paren_view ')' union_order_or_limit | '(' select_paren_view ')' union_list_view @@ -17160,7 +17691,7 @@ trigger_tail: remember_name opt_if_not_exists { - if (Lex->add_create_options_with_check($2)) + if (unlikely(Lex->add_create_options_with_check($2))) MYSQL_YYABORT; } sp_name @@ -17187,7 +17718,7 @@ trigger_tail: LEX *lex= thd->lex; Lex_input_stream *lip= YYLIP; - if (lex->sphead) + if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_NO_RECURSIVE_CREATE, MYF(0), "TRIGGER")); lex->stmt_definition_begin= $1; @@ -17197,7 +17728,7 @@ trigger_tail: (*static_cast(&lex->trg_chistics))= ($17); lex->trg_chistics.ordering_clause_end= lip->get_cpp_ptr(); - if (!lex->make_sp_head(thd, $4, &sp_handler_trigger)) + if (unlikely(!lex->make_sp_head(thd, $4, &sp_handler_trigger))) MYSQL_YYABORT; lex->sphead->set_body_start(thd, lip->get_cpp_tok_start()); @@ -17206,14 +17737,14 @@ trigger_tail: { /* $20 */ LEX *lex= Lex; sp_head *sp= lex->sphead; - if (sp->check_unresolved_goto()) + if (unlikely(sp->check_unresolved_goto())) MYSQL_YYABORT; lex->sql_command= SQLCOM_CREATE_TRIGGER; sp->set_stmt_end(thd); sp->restore_thd_mem_root(thd); - if (sp->is_not_allowed_in_function("trigger")) + if (unlikely(sp->is_not_allowed_in_function("trigger"))) MYSQL_YYABORT; /* @@ -17221,11 +17752,11 @@ trigger_tail: sp_proc_stmt alternatives are not saving/restoring LEX, so lex->query_tables can be wiped out. */ - if (!lex->select_lex.add_table_to_list(thd, $10, - (LEX_CSTRING*) 0, - TL_OPTION_UPDATING, - TL_READ_NO_INSERT, - MDL_SHARED_NO_WRITE)) + if (unlikely(!lex->select_lex. + add_table_to_list(thd, $10, (LEX_CSTRING*) 0, + TL_OPTION_UPDATING, + TL_READ_NO_INSERT, + MDL_SHARED_NO_WRITE))) MYSQL_YYABORT; } ; @@ -17241,9 +17772,9 @@ udf_tail: RETURNS_SYM udf_type SONAME_SYM TEXT_STRING_sys { LEX *lex= thd->lex; - if (lex->add_create_options_with_check($1)) + if (unlikely(lex->add_create_options_with_check($1))) MYSQL_YYABORT; - if (is_native_function(thd, & $2)) + if (unlikely(is_native_function(thd, & $2))) my_yyabort_error((ER_NATIVE_FCT_NAME_COLLISION, MYF(0), $2.str)); lex->sql_command= SQLCOM_CREATE_FUNCTION; lex->udf.name= $2; @@ -17263,7 +17794,8 @@ sf_return_type: } sp_param_type_with_opt_collate { - if (Lex->sphead->fill_field_definition(thd, Lex->last_field)) + if (unlikely(Lex->sphead->fill_field_definition(thd, + Lex->last_field))) MYSQL_YYABORT; } ; @@ -17273,8 +17805,8 @@ sf_tail: sp_name { Lex->sql_command= SQLCOM_CREATE_SPFUNCTION; - if (!Lex->make_sp_head_no_recursive(thd, $1, $2, - &sp_handler_function)) + if (unlikely(!Lex->make_sp_head_no_recursive(thd, $1, $2, + &sp_handler_function))) MYSQL_YYABORT; } opt_sp_parenthesized_fdparam_list @@ -17290,9 +17822,9 @@ sf_tail: sp_tail_is sp_body { - if (Lex->sp_body_finalize_function(thd)) + if (unlikely(Lex->sp_body_finalize_function(thd))) MYSQL_YYABORT; - if (Lex->sphead->m_flags & sp_head::HAS_AGGREGATE_INSTR) + if (unlikely(Lex->sphead->m_flags & sp_head::HAS_AGGREGATE_INSTR)) { my_yyabort_error((ER_NOT_AGGREGATE_FUNCTION, MYF(0))); } @@ -17304,8 +17836,8 @@ sp_tail: opt_if_not_exists sp_name { Lex->sql_command= SQLCOM_CREATE_PROCEDURE; - if (!Lex->make_sp_head_no_recursive(thd, $1, $2, - &sp_handler_procedure)) + if (unlikely(!Lex->make_sp_head_no_recursive(thd, $1, $2, + &sp_handler_procedure))) MYSQL_YYABORT; } opt_sp_parenthesized_pdparam_list @@ -17317,7 +17849,7 @@ sp_tail: sp_tail_is sp_body { - if (Lex->sp_body_finalize_procedure(thd)) + if (unlikely(Lex->sp_body_finalize_procedure(thd))) MYSQL_YYABORT; } ; @@ -17325,7 +17857,7 @@ sp_tail: sf_tail_standalone: sf_tail opt_sp_name { - if ($2 && !$2->eq(Lex->sphead)) + if (unlikely($2 && !$2->eq(Lex->sphead))) my_yyabort_error((ER_END_IDENTIFIER_DOES_NOT_MATCH, MYF(0), ErrConvDQName($2).ptr(), ErrConvDQName(Lex->sphead).ptr())); @@ -17335,7 +17867,7 @@ sf_tail_standalone: sp_tail_standalone: sp_tail opt_sp_name { - if ($2 && !$2->eq(Lex->sphead)) + if (unlikely($2 && !$2->eq(Lex->sphead))) my_yyabort_error((ER_END_IDENTIFIER_DOES_NOT_MATCH, MYF(0), ErrConvDQName($2).ptr(), ErrConvDQName(Lex->sphead).ptr())); @@ -17386,37 +17918,38 @@ opt_format_xid: /* empty */ { $$= false; } | FORMAT_SYM '=' ident_or_text { - if (!my_strcasecmp(system_charset_info, $3.str, "SQL")) + if (lex_string_eq(&$3, STRING_WITH_LEN("SQL"))) $$= true; - else if (!my_strcasecmp(system_charset_info, $3.str, "RAW")) + else if (lex_string_eq(&$3, STRING_WITH_LEN("RAW"))) $$= false; else { - my_yyabort_error((ER_UNKNOWN_EXPLAIN_FORMAT, MYF(0), "XA RECOVER", $3.str)); + my_yyabort_error((ER_UNKNOWN_EXPLAIN_FORMAT, MYF(0), + "XA RECOVER", $3.str)); $$= false; } - } - ; + } + ; xid: text_string { MYSQL_YYABORT_UNLESS($1->length() <= MAXGTRIDSIZE); - if (!(Lex->xid=(XID *)thd->alloc(sizeof(XID)))) + if (unlikely(!(Lex->xid=(XID *)thd->alloc(sizeof(XID))))) MYSQL_YYABORT; Lex->xid->set(1L, $1->ptr(), $1->length(), 0, 0); } | text_string ',' text_string { MYSQL_YYABORT_UNLESS($1->length() <= MAXGTRIDSIZE && $3->length() <= MAXBQUALSIZE); - if (!(Lex->xid=(XID *)thd->alloc(sizeof(XID)))) + if (unlikely(!(Lex->xid=(XID *)thd->alloc(sizeof(XID))))) MYSQL_YYABORT; Lex->xid->set(1L, $1->ptr(), $1->length(), $3->ptr(), $3->length()); } | text_string ',' text_string ',' ulong_num { MYSQL_YYABORT_UNLESS($1->length() <= MAXGTRIDSIZE && $3->length() <= MAXBQUALSIZE); - if (!(Lex->xid=(XID *)thd->alloc(sizeof(XID)))) + if (unlikely(!(Lex->xid=(XID *)thd->alloc(sizeof(XID))))) MYSQL_YYABORT; Lex->xid->set($5, $1->ptr(), $1->length(), $3->ptr(), $3->length()); } diff --git a/sql/strfunc.cc b/sql/strfunc.cc index f701c4a09ed..f457f6b29be 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -79,8 +79,9 @@ ulonglong find_set(TYPELIB *lib, const char *str, size_t length, CHARSET_INFO *c var_len= (uint) (pos - start); uint find= cs ? find_type2(lib, start, var_len, cs) : find_type(lib, start, var_len, (bool) 0); - if (!find && *err_len == 0) // report the first error with length > 0 + if (unlikely(!find && *err_len == 0)) { + // report the first error with length > 0 *err_pos= (char*) start; *err_len= var_len; *set_warning= 1; diff --git a/sql/structs.h b/sql/structs.h index 01d99517fed..d530dd73b7c 100644 --- a/sql/structs.h +++ b/sql/structs.h @@ -754,12 +754,6 @@ public: }; -struct Lex_string_with_pos_st: public LEX_CSTRING -{ - const char *m_pos; -}; - - class Load_data_param { protected: diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 0331343ccef..658bb0e2d1c 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -400,7 +400,7 @@ static const char *vers_alter_history_keywords[]= {"ERROR", "KEEP", NullS}; static Sys_var_enum Sys_vers_alter_history( "system_versioning_alter_history", "Versioning ALTER TABLE mode. " "ERROR: Fail ALTER with error; " /* TODO: fail only when history non-empty */ - "KEEP: Keep historical system rows and subject them to ALTER; ", + "KEEP: Keep historical system rows and subject them to ALTER", SESSION_VAR(vers_alter_history), CMD_LINE(REQUIRED_ARG), vers_alter_history_keywords, DEFAULT(VERS_ALTER_HISTORY_ERROR)); @@ -443,13 +443,13 @@ static bool error_if_in_trans_or_substatement(THD *thd, int in_substatement_error, int in_transaction_error) { - if (thd->in_sub_stmt) + if (unlikely(thd->in_sub_stmt)) { my_error(in_substatement_error, MYF(0)); return true; } - if (thd->in_active_multi_stmt_transaction()) + if (unlikely(thd->in_active_multi_stmt_transaction())) { my_error(in_transaction_error, MYF(0)); return true; @@ -470,6 +470,8 @@ static bool check_has_super(sys_var *self, THD *thd, set_var *var) #endif return false; } + + static bool binlog_format_check(sys_var *self, THD *thd, set_var *var) { if (check_has_super(self, thd, var)) @@ -529,9 +531,9 @@ static bool binlog_format_check(sys_var *self, THD *thd, set_var *var) return true; } - if (error_if_in_trans_or_substatement(thd, - ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT, - ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT)) + if (unlikely(error_if_in_trans_or_substatement(thd, + ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT, + ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT))) return true; return false; @@ -566,9 +568,9 @@ static bool binlog_direct_check(sys_var *self, THD *thd, set_var *var) if (var->type == OPT_GLOBAL) return false; - if (error_if_in_trans_or_substatement(thd, - ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT, - ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT)) + if (unlikely(error_if_in_trans_or_substatement(thd, + ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT, + ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT))) return true; return false; @@ -1606,9 +1608,9 @@ static bool check_gtid_seq_no(sys_var *self, THD *thd, set_var *var) if (check_has_super(self, thd, var)) return true; - if (error_if_in_trans_or_substatement(thd, - ER_STORED_FUNCTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO, - ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO)) + if (unlikely(error_if_in_trans_or_substatement(thd, + ER_STORED_FUNCTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO, + ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO))) return true; domain_id= thd->variables.gtid_domain_id; @@ -2369,9 +2371,19 @@ static Sys_var_mybool Sys_old_mode( "old", "Use compatible behavior from previous MariaDB version. See also --old-mode", SESSION_VAR(old_mode), CMD_LINE(OPT_ARG), DEFAULT(FALSE)); -static Sys_var_mybool Sys_old_alter_table( - "old_alter_table", "Use old, non-optimized alter table", - SESSION_VAR(old_alter_table), CMD_LINE(OPT_ARG), DEFAULT(FALSE)); +static const char *alter_algorithm_modes[]= {"DEFAULT", "COPY", "INPLACE", +"NOCOPY", "INSTANT", NULL}; + +static Sys_var_enum Sys_alter_algorithm( + "alter_algorithm", "Specify the alter table algorithm", + SESSION_VAR(alter_algorithm), CMD_LINE(OPT_ARG), + alter_algorithm_modes, DEFAULT(0)); + +static Sys_var_enum Sys_old_alter_table( + "old_alter_table", "Alias for alter_algorithm. " + "Deprecated. Use --alter-algorithm instead.", + SESSION_VAR(alter_algorithm), CMD_LINE(OPT_ARG), + alter_algorithm_modes, DEFAULT(0)); static bool check_old_passwords(sys_var *self, THD *thd, set_var *var) { @@ -2442,8 +2454,7 @@ static Sys_var_ulong Sys_optimizer_search_depth( "optimization, but may produce very bad query plans. If set to 0, " "the system will automatically pick a reasonable value.", SESSION_VAR(optimizer_search_depth), CMD_LINE(REQUIRED_ARG), - VALID_RANGE(0, MAX_TABLES+1), DEFAULT(MAX_TABLES+1), BLOCK_SIZE(1), - NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(0)); + VALID_RANGE(0, MAX_TABLES+1), DEFAULT(MAX_TABLES+1), BLOCK_SIZE(1)); /* this is used in the sigsegv handler */ export const char *optimizer_switch_names[]= @@ -4036,15 +4047,15 @@ static bool check_sql_log_bin(sys_var *self, THD *thd, set_var *var) if (check_has_super(self, thd, var)) return TRUE; - if (var->type == OPT_GLOBAL) + if (unlikely(var->type == OPT_GLOBAL)) { my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0), self->name.str, "SESSION"); return TRUE; } - if (error_if_in_trans_or_substatement(thd, - ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN, - ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN)) + if (unlikely(error_if_in_trans_or_substatement(thd, + ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN, + ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN))) return TRUE; return FALSE; @@ -4177,9 +4188,9 @@ static bool check_skip_replication(sys_var *self, THD *thd, set_var *var) Rows_log_event without Table_map_log_event or transactional updates without the COMMIT). */ - if (error_if_in_trans_or_substatement(thd, - ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION, - ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION)) + if (unlikely(error_if_in_trans_or_substatement(thd, + ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION, + ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION))) return 1; return 0; @@ -4202,11 +4213,24 @@ static Sys_var_harows Sys_select_limit( SESSION_VAR(select_limit), NO_CMD_LINE, VALID_RANGE(0, HA_POS_ERROR), DEFAULT(HA_POS_ERROR), BLOCK_SIZE(1)); +static const char *secure_timestamp_levels[]= {"NO", "SUPER", "REPLICATION", "YES", 0}; +static bool check_timestamp(sys_var *self, THD *thd, set_var *var) +{ + if (opt_secure_timestamp == SECTIME_NO) + return false; + if (opt_secure_timestamp == SECTIME_SUPER) + return check_has_super(self, thd, var); + char buf[1024]; + strxnmov(buf, sizeof(buf), "--secure-timestamp=", + secure_timestamp_levels[opt_secure_timestamp], NULL); + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), buf); + return true; +} static Sys_var_timestamp Sys_timestamp( "timestamp", "Set the time for this client", sys_var::ONLY_SESSION, NO_CMD_LINE, VALID_RANGE(0, TIMESTAMP_MAX_VALUE), - NO_MUTEX_GUARD, IN_BINLOG); + NO_MUTEX_GUARD, IN_BINLOG, ON_CHECK(check_timestamp)); static bool update_last_insert_id(THD *thd, set_var *var) { @@ -5402,9 +5426,7 @@ static const char *wsrep_OSU_method_names[]= { "TOI", "RSU", NullS }; static Sys_var_enum Sys_wsrep_OSU_method( "wsrep_OSU_method", "Method for Online Schema Upgrade", SESSION_VAR(wsrep_OSU_method), CMD_LINE(OPT_ARG), - wsrep_OSU_method_names, DEFAULT(WSREP_OSU_TOI), - NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), - ON_UPDATE(0)); + wsrep_OSU_method_names, DEFAULT(WSREP_OSU_TOI)); static PolyLock_mutex PLock_wsrep_desync(&LOCK_wsrep_desync); static Sys_var_mybool Sys_wsrep_desync ( @@ -5872,9 +5894,7 @@ static Sys_var_enum Sys_binlog_row_image( "before image, and only changed columns are logged in the after image. " "(Default: FULL).", SESSION_VAR(binlog_row_image), CMD_LINE(REQUIRED_ARG), - binlog_row_image_names, DEFAULT(BINLOG_ROW_IMAGE_FULL), - NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL), - ON_UPDATE(NULL)); + binlog_row_image_names, DEFAULT(BINLOG_ROW_IMAGE_FULL)); static bool check_pseudo_slave_mode(sys_var *self, THD *thd, set_var *var) { @@ -6046,3 +6066,13 @@ static Sys_var_uint Sys_in_subquery_conversion_threshold( SESSION_VAR(in_subquery_conversion_threshold), CMD_LINE(OPT_ARG), VALID_RANGE(0, UINT_MAX), DEFAULT(IN_SUBQUERY_CONVERSION_THRESHOLD), BLOCK_SIZE(1)); #endif + +static Sys_var_enum Sys_secure_timestamp( + "secure_timestamp", "Restricts direct setting of a session " + "timestamp. Possible levels are: YES - timestamp cannot deviate from " + "the system clock, REPLICATION - replication thread can adjust " + "timestamp to match the master's, SUPER - a user with this " + "privilege and a replication thread can adjust timestamp, NO - " + "historical behavior, anyone can modify session timestamp", + READ_ONLY GLOBAL_VAR(opt_secure_timestamp), CMD_LINE(REQUIRED_ARG), + secure_timestamp_levels, DEFAULT(SECTIME_NO)); diff --git a/sql/sys_vars.ic b/sql/sys_vars.ic index e04e09e9bc6..498204deb92 100644 --- a/sql/sys_vars.ic +++ b/sql/sys_vars.ic @@ -325,7 +325,7 @@ public: Class specific constructor arguments: char* values[] - 0-terminated list of strings of valid values - Backing store: uint + Backing store: ulong @note Do *not* use "enum FOO" variables as a backing store, there is no @@ -1271,7 +1271,7 @@ public: default_value, res->ptr(), res->length(), &error, &error_len); - if (error) + if (unlikely(error)) { ErrConvString err(error, error_len, res->charset()); my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name.str, err.ptr()); @@ -1370,7 +1370,7 @@ public: That is even while empty (zero-length) values are considered errors by find_set(), these errors are ignored here */ - if (error_len) + if (unlikely(error_len)) { ErrConvString err(error, error_len, res->charset()); my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name.str, err.ptr()); @@ -1465,7 +1465,7 @@ public: plugin= ha_resolve_by_name(thd, &pname, false); else plugin= my_plugin_lock_by_name(thd, &pname, plugin_type); - if (!plugin) + if (unlikely(!plugin)) { // historically different error code if (plugin_type == MYSQL_STORAGE_ENGINE_PLUGIN) @@ -1892,10 +1892,11 @@ public: const char *comment, int flag_args, CMD_LINE getopt, double min_val, double max_val, - PolyLock *lock, enum binlog_status_enum binlog_status_arg) + PolyLock *lock, enum binlog_status_enum binlog_status_arg, + on_check_function on_check_func=0) : Sys_var_double(name_arg, comment, flag_args, 0, sizeof(double), getopt, min_val, - max_val, 0, lock, binlog_status_arg) + max_val, 0, lock, binlog_status_arg, on_check_func) { SYSVAR_ASSERT(scope() == ONLY_SESSION); SYSVAR_ASSERT(getopt.id < 0); // NO_CMD_LINE, because the offset is fake diff --git a/sql/table.cc b/sql/table.cc index 93684cb5221..11e1b9f3b7a 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1,5 +1,5 @@ -/* Copyright (c) 2000, 2015, Oracle and/or its affiliates. - Copyright (c) 2008, 2016, MariaDB +/* Copyright (c) 2000, 2017, Oracle and/or its affiliates. + Copyright (c) 2008, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -677,7 +677,7 @@ err: mysql_file_close(file, MYF(MY_WME)); err_not_open: - if (share->error && !error_given) + if (unlikely(share->error && !error_given)) { share->open_errno= my_errno; open_table_error(share, share->error, share->open_errno); @@ -1856,6 +1856,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, vers_can_native= plugin_hton(se_plugin)->flags & HTON_NATIVE_SYS_VERSIONING; row_start_field= row_start; row_end_field= row_end; + status_var_increment(thd->status_var.feature_system_versioning); } // if (system_period == NULL) for (i=0 ; i < share->fields; i++, strpos+=field_pack_length, field_ptr++) @@ -2804,11 +2805,14 @@ int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write, thd->reset_db(&db); lex_start(thd); - if ((error= parse_sql(thd, & parser_state, NULL) || - sql_unusable_for_discovery(thd, hton, sql_copy))) + if (unlikely((error= parse_sql(thd, & parser_state, NULL) || + sql_unusable_for_discovery(thd, hton, sql_copy)))) goto ret; thd->lex->create_info.db_type= hton; +#ifdef WITH_PARTITION_STORAGE_ENGINE + thd->work_part_info= 0; // For partitioning +#endif if (tabledef_version.str) thd->lex->create_info.tabledef_version= tabledef_version; @@ -2837,7 +2841,7 @@ ret: reenable_binlog(thd); thd->variables.sql_mode= saved_mode; thd->variables.character_set_client= old_cs; - if (thd->is_error() || error) + if (unlikely(thd->is_error() || error)) { thd->clear_error(); my_error(ER_SQL_DISCOVER_ERROR, MYF(0), @@ -2997,14 +3001,14 @@ static bool fix_and_check_vcol_expr(THD *thd, TABLE *table, res.errors= 0; int error= func_expr->walk(&Item::check_vcol_func_processor, 0, &res); - if (error || (res.errors & VCOL_IMPOSSIBLE)) + if (unlikely(error || (res.errors & VCOL_IMPOSSIBLE))) { // this can only happen if the frm was corrupted my_error(ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(0), res.name, vcol->get_vcol_type_name(), vcol->name.str); DBUG_RETURN(1); } - else if (res.errors & VCOL_AUTO_INC) + else if (unlikely(res.errors & VCOL_AUTO_INC)) { /* An auto_increment field may not be used in an expression for @@ -3085,7 +3089,7 @@ unpack_vcol_info_from_frm(THD *thd, MEM_ROOT *mem_root, TABLE *table, lex.last_field= &vcol_storage; error= parse_sql(thd, &parser_state, NULL); - if (error) + if (unlikely(error)) goto end; if (lex.current_select->table_list.first[0].next_global) @@ -3354,7 +3358,8 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, if (share->table_check_constraints || share->field_check_constraints) outparam->check_constraints= check_constraint_ptr; - if (parse_vcol_defs(thd, &outparam->mem_root, outparam, &error_reported)) + if (unlikely(parse_vcol_defs(thd, &outparam->mem_root, outparam, + &error_reported))) { error= OPEN_FRM_CORRUPTED; goto err; @@ -3506,7 +3511,8 @@ partititon_err: /* Set a flag if the table is crashed and it can be auto. repaired */ share->crashed= (outparam->file->auto_repair(ha_err) && !(ha_open_flags & HA_OPEN_FOR_REPAIR)); - outparam->file->print_error(ha_err, MYF(0)); + if (!thd->is_error()) + outparam->file->print_error(ha_err, MYF(0)); error_reported= TRUE; if (ha_err == HA_ERR_TABLE_DEF_CHANGED) @@ -3579,7 +3585,7 @@ partititon_err: table TABLE object to free */ -int closefrm(register TABLE *table) +int closefrm(TABLE *table) { int error=0; DBUG_ENTER("closefrm"); @@ -3616,7 +3622,7 @@ int closefrm(register TABLE *table) /* Deallocate temporary blob storage */ -void free_blobs(register TABLE *table) +void free_blobs(TABLE *table) { uint *ptr, *end; for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ; @@ -3810,17 +3816,6 @@ void append_unescaped(String *res, const char *pos, size_t length) for (; pos != end ; pos++) { -#if defined(USE_MB) && MYSQL_VERSION_ID < 40100 - uint mblen; - if (use_mb(default_charset_info) && - (mblen= my_ismbchar(default_charset_info, pos, end))) - { - res->append(pos, mblen); - pos+= mblen; - continue; - } -#endif - switch (*pos) { case 0: /* Must be escaped for 'mysql' */ res->append('\\'); @@ -4235,7 +4230,7 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def) /* Whether the table definition has already been validated. */ if (table->s->table_field_def_cache == table_def) - DBUG_RETURN(FALSE); + goto end; if (table->s->fields != table_def->count) { @@ -4268,6 +4263,8 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def) is backward compatible. */ } + else + { StringBuffer<1024> sql_type(system_charset_info); sql_type.extra_allocation(256); // Allocate min 256 characters at once for (i=0 ; i < table_def->count; i++, field_def++) @@ -4353,6 +4350,7 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def) error= TRUE; } } + } if (table_def->primary_key_parts) { @@ -4394,9 +4392,19 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def) } } - if (! error) + if (likely(! error)) table->s->table_field_def_cache= table_def; +end: + + if (has_keys && !error && !table->key_info) + { + report_error(0, "Incorrect definition of table %s.%s: " + "indexes are missing", + table->s->db.str, table->alias.c_ptr()); + error= TRUE; + } + DBUG_RETURN(error); } @@ -5258,14 +5266,25 @@ int TABLE_LIST::view_check_option(THD *thd, bool ignore_failure) int TABLE::verify_constraints(bool ignore_failure) { + /* + We have to check is_error() first as we are checking it for each + constraint to catch fatal warnings. + */ + if (in_use->is_error()) + return (VIEW_CHECK_ERROR); + /* go trough check option clauses for fields and table */ if (check_constraints && !(in_use->variables.option_bits & OPTION_NO_CHECK_CONSTRAINT_CHECKS)) { for (Virtual_column_info **chk= check_constraints ; *chk ; chk++) { - /* yes! NULL is ok, see 4.23.3.4 Table check constraints, part 2, SQL:2016 */ - if ((*chk)->expr->val_int() == 0 && !(*chk)->expr->null_value) + /* + yes! NULL is ok. + see 4.23.3.4 Table check constraints, part 2, SQL:2016 + */ + if (((*chk)->expr->val_int() == 0 && !(*chk)->expr->null_value) || + in_use->is_error()) { my_error(ER_CONSTRAINT_FAILED, MYF(ignore_failure ? ME_JUST_WARNING : 0), (*chk)->name.str, @@ -5274,7 +5293,11 @@ int TABLE::verify_constraints(bool ignore_failure) } } } - return(VIEW_CHECK_OK); + /* + We have to check in_use() as checking constraints may have generated + warnings that should be treated as errors + */ + return(!in_use->is_error() ? VIEW_CHECK_OK : VIEW_CHECK_ERROR); } /* @@ -6702,6 +6725,12 @@ void TABLE::mark_columns_per_binlog_row_image() DBUG_ASSERT(FALSE); } } + /* + We have to ensure that all virtual columns that are part of read set + are calculated. + */ + if (vcol_set) + bitmap_union(vcol_set, read_set); file->column_bitmaps_signal(); } @@ -6743,7 +6772,8 @@ bool TABLE::mark_virtual_col(Field *field) /* @brief Mark virtual columns for update/insert commands - @param insert_fl <-> virtual columns are marked for insert command + @param insert_fl true if virtual columns are marked for insert command + For the moment this is not used, may be used in future. @details The function marks virtual columns used in a update/insert commands @@ -6768,7 +6798,8 @@ bool TABLE::mark_virtual_col(Field *field) be added to read_set either. */ -bool TABLE::mark_virtual_columns_for_write(bool insert_fl) +bool TABLE::mark_virtual_columns_for_write(bool insert_fl + __attribute__((unused))) { Field **vfield_ptr, *tmp_vfield; bool bitmap_updated= false; @@ -6778,35 +6809,13 @@ bool TABLE::mark_virtual_columns_for_write(bool insert_fl) { tmp_vfield= *vfield_ptr; if (bitmap_is_set(write_set, tmp_vfield->field_index)) - bitmap_updated= mark_virtual_col(tmp_vfield); + bitmap_updated|= mark_virtual_col(tmp_vfield); else if (tmp_vfield->vcol_info->stored_in_db || - (tmp_vfield->flags & PART_KEY_FLAG)) + (tmp_vfield->flags & (PART_KEY_FLAG | FIELD_IN_PART_FUNC_FLAG))) { - if (insert_fl) - { - bitmap_set_bit(write_set, tmp_vfield->field_index); - mark_virtual_col(tmp_vfield); - bitmap_updated= true; - } - else - { - MY_BITMAP *save_read_set= read_set, *save_vcol_set= vcol_set; - Item *vcol_item= tmp_vfield->vcol_info->expr; - DBUG_ASSERT(vcol_item); - bitmap_clear_all(&tmp_set); - read_set= vcol_set= &tmp_set; - vcol_item->walk(&Item::register_field_in_read_map, 1, 0); - read_set= save_read_set; - vcol_set= save_vcol_set; - if (bitmap_is_overlapping(&tmp_set, write_set)) - { - bitmap_set_bit(write_set, tmp_vfield->field_index); - bitmap_set_bit(vcol_set, tmp_vfield->field_index); - bitmap_union(read_set, &tmp_set); - bitmap_union(vcol_set, &tmp_set); - bitmap_updated= true; - } - } + bitmap_set_bit(write_set, tmp_vfield->field_index); + mark_virtual_col(tmp_vfield); + bitmap_updated= true; } } if (bitmap_updated) @@ -7452,8 +7461,8 @@ bool TABLE_LIST::process_index_hints(TABLE *tbl) } /* - TODO: get rid of tbl->force_index (on if any FORCE INDEX is specified) and - create tbl->force_index_join instead. + TODO: get rid of tbl->force_index (on if any FORCE INDEX is specified) + and create tbl->force_index_join instead. Then use the correct force_index_XX instead of the global one. */ if (!index_join[INDEX_HINT_FORCE].is_clear_all() || @@ -7483,21 +7492,27 @@ bool TABLE_LIST::process_index_hints(TABLE *tbl) } -size_t max_row_length(TABLE *table, const uchar *data) +size_t max_row_length(TABLE *table, MY_BITMAP const *cols, const uchar *data) { TABLE_SHARE *table_s= table->s; size_t length= table_s->reclength + 2 * table_s->fields; uint *const beg= table_s->blob_field; uint *const end= beg + table_s->blob_fields; + my_ptrdiff_t const rec_offset= (my_ptrdiff_t) (data - table->record[0]); + DBUG_ENTER("max_row_length"); for (uint *ptr= beg ; ptr != end ; ++ptr) { - Field_blob* const blob= (Field_blob*) table->field[*ptr]; - length+= blob->get_length((const uchar*) - (data + blob->offset(table->record[0]))) + - HA_KEY_BLOB_LENGTH; + Field * const field= table->field[*ptr]; + if (bitmap_is_set(cols, field->field_index) && + !field->is_null(rec_offset)) + { + Field_blob * const blob= (Field_blob*) field; + length+= blob->get_length(rec_offset) + 8; /* max blob store length */ + } } - return length; + DBUG_PRINT("exit", ("length: %lld", (longlong) length)); + DBUG_RETURN(length); } @@ -7612,7 +7627,7 @@ int TABLE::update_virtual_fields(handler *h, enum_vcol_update_mode update_mode) Query_arena backup_arena; Turn_errors_to_warnings_handler Suppress_errors; int error; - bool handler_pushed= 0; + bool handler_pushed= 0, update_all_columns= 1; DBUG_ASSERT(vfield); if (h->keyread_enabled()) @@ -7629,6 +7644,16 @@ int TABLE::update_virtual_fields(handler *h, enum_vcol_update_mode update_mode) in_use->push_internal_handler(&Suppress_errors); handler_pushed= 1; } + else if (update_mode == VCOL_UPDATE_FOR_REPLACE && + in_use->is_current_stmt_binlog_format_row() && + in_use->variables.binlog_row_image != BINLOG_ROW_IMAGE_MINIMAL) + { + /* + If we are doing a replace with not minimal binary logging, we have to + calculate all virtual columns. + */ + update_all_columns= 1; + } /* Iterate over virtual fields in the table */ for (vfield_ptr= vfield; *vfield_ptr; vfield_ptr++) @@ -7641,8 +7666,8 @@ int TABLE::update_virtual_fields(handler *h, enum_vcol_update_mode update_mode) bool update= 0, swap_values= 0; switch (update_mode) { case VCOL_UPDATE_FOR_READ: - update= !vcol_info->stored_in_db - && bitmap_is_set(vcol_set, vf->field_index); + update= (!vcol_info->stored_in_db && + bitmap_is_set(vcol_set, vf->field_index)); swap_values= 1; break; case VCOL_UPDATE_FOR_DELETE: @@ -7650,8 +7675,9 @@ int TABLE::update_virtual_fields(handler *h, enum_vcol_update_mode update_mode) update= bitmap_is_set(vcol_set, vf->field_index); break; case VCOL_UPDATE_FOR_REPLACE: - update= !vcol_info->stored_in_db && (vf->flags & PART_KEY_FLAG) - && bitmap_is_set(vcol_set, vf->field_index); + update= ((!vcol_info->stored_in_db && (vf->flags & PART_KEY_FLAG) && + bitmap_is_set(vcol_set, vf->field_index)) || + update_all_columns); if (update && (vf->flags & BLOB_FLAG)) { /* @@ -7668,8 +7694,8 @@ int TABLE::update_virtual_fields(handler *h, enum_vcol_update_mode update_mode) case VCOL_UPDATE_INDEXED: case VCOL_UPDATE_INDEXED_FOR_UPDATE: /* Read indexed fields that was not updated in VCOL_UPDATE_FOR_READ */ - update= !vcol_info->stored_in_db && (vf->flags & PART_KEY_FLAG) && - bitmap_is_set(vcol_set, vf->field_index); + update= (!vcol_info->stored_in_db && (vf->flags & PART_KEY_FLAG) && + !bitmap_is_set(vcol_set, vf->field_index)); swap_values= 1; break; } @@ -7789,8 +7815,8 @@ void TABLE::vers_update_fields() { if (!vers_write) return; - if (vers_start_field()->store_timestamp(in_use->systime(), - in_use->systime_sec_part())) + if (vers_start_field()->store_timestamp(in_use->query_start(), + in_use->query_start_sec_part())) DBUG_ASSERT(0); } else @@ -7805,8 +7831,8 @@ void TABLE::vers_update_fields() void TABLE::vers_update_end() { - if (vers_end_field()->store_timestamp(in_use->systime(), - in_use->systime_sec_part())) + if (vers_end_field()->store_timestamp(in_use->query_start(), + in_use->query_start_sec_part())) DBUG_ASSERT(0); } @@ -7957,7 +7983,7 @@ bool TABLE::insert_all_rows_into_tmp_table(THD *thd, tmp_table->file->ha_disable_indexes(HA_KEY_SWITCH_ALL); file->ha_index_or_rnd_end(); - if (file->ha_rnd_init_with_error(1)) + if (unlikely(file->ha_rnd_init_with_error(1))) DBUG_RETURN(1); if (tmp_table->no_rows) @@ -7969,10 +7995,10 @@ bool TABLE::insert_all_rows_into_tmp_table(THD *thd, tmp_table->file->ha_start_bulk_insert(file->stats.records); } - while (!file->ha_rnd_next(tmp_table->record[0])) + while (likely(!file->ha_rnd_next(tmp_table->record[0]))) { write_err= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]); - if (write_err) + if (unlikely(write_err)) { bool is_duplicate; if (tmp_table->file->is_fatal_error(write_err, HA_CHECK_DUP) && @@ -7983,11 +8009,8 @@ bool TABLE::insert_all_rows_into_tmp_table(THD *thd, DBUG_RETURN(1); } - if (thd->check_killed()) - { - thd->send_kill_message(); + if (unlikely(thd->check_killed())) goto err_killed; - } } if (!tmp_table->no_rows && tmp_table->file->ha_end_bulk_insert()) goto err; @@ -8154,7 +8177,21 @@ bool TABLE_LIST::init_derived(THD *thd, bool init_view) (first_table && first_table->is_multitable())) set_multitable(); - unit->derived= this; + if (!unit->derived) + unit->derived= this; + else if (!is_with_table_recursive_reference() && unit->derived != this) + { + if (unit->derived->is_with_table_recursive_reference()) + unit->derived= this; + else if (vers_conditions.eq(unit->derived->vers_conditions)) + vers_conditions.empty(); + else + { + my_error(ER_CONFLICTING_FOR_SYSTEM_TIME, MYF(0)); + return TRUE; + } + } + if (init_view && !view) { /* This is all what we can do for a derived table for now. */ @@ -8281,7 +8318,10 @@ bool TABLE_LIST::change_refs_to_fields() if (!used_items.elements) return FALSE; - materialized_items= (Item**)thd->calloc(sizeof(void*) * table->s->fields); + Item **materialized_items= + (Item **)thd->calloc(sizeof(void *) * table->s->fields); + if (!materialized_items) + return TRUE; while ((ref= (Item_direct_ref*)li++)) { @@ -8440,17 +8480,15 @@ bool TR_table::update(ulonglong start_id, ulonglong end_id) if (!table && open()) return true; - timeval start_time= {thd->systime(), long(thd->systime_sec_part())}; - thd->set_start_time(); - timeval end_time= {thd->systime(), long(thd->systime_sec_part())}; + store(FLD_BEGIN_TS, thd->transaction_time()); + timeval end_time= {thd->query_start(), long(thd->query_start_sec_part())}; store(FLD_TRX_ID, start_id); store(FLD_COMMIT_ID, end_id); - store(FLD_BEGIN_TS, start_time); store(FLD_COMMIT_TS, end_time); store_iso_level(thd->tx_isolation); int error= table->file->ha_write_row(table->record[0]); - if (error) + if (unlikely(error)) table->file->print_error(error, MYF(0)); return error; } @@ -8469,10 +8507,10 @@ bool TR_table::query(ulonglong trx_id) Item *field= newx Item_field(thd, &slex.context, (*this)[FLD_TRX_ID]); Item *value= newx Item_int(thd, trx_id); COND *conds= newx Item_func_eq(thd, field, value); - if ((error= setup_conds(thd, this, dummy, &conds))) + if (unlikely((error= setup_conds(thd, this, dummy, &conds)))) return false; select= make_select(table, 0, 0, conds, NULL, 0, &error); - if (error || !select) + if (unlikely(error || !select)) return false; // FIXME: (performance) force index 'transaction_id' error= init_read_record(&info, thd, table, select, NULL, @@ -8503,11 +8541,11 @@ bool TR_table::query(MYSQL_TIME &commit_time, bool backwards) conds= newx Item_func_ge(thd, field, value); else conds= newx Item_func_le(thd, field, value); - if ((error= setup_conds(thd, this, dummy, &conds))) + if (unlikely((error= setup_conds(thd, this, dummy, &conds)))) return false; // FIXME: (performance) force index 'commit_timestamp' select= make_select(table, 0, 0, conds, NULL, 0, &error); - if (error || !select) + if (unlikely(error || !select)) return false; error= init_read_record(&info, thd, table, select, NULL, 1 /* use_record_cache */, true /* print_error */, @@ -8567,6 +8605,12 @@ bool TR_table::query_sees(bool &result, ulonglong trx_id1, ulonglong trx_id0, return false; } + if (trx_id0 == ULONGLONG_MAX || trx_id1 == 0) + { + result= false; + return false; + } + if (!commit_id1) { if (!query(trx_id1)) @@ -8702,28 +8746,53 @@ bool TR_table::check(bool error) return false; } -void vers_select_conds_t::resolve_units(bool timestamps_only) +bool vers_select_conds_t::resolve_units(THD *thd) { DBUG_ASSERT(type != SYSTEM_TIME_UNSPECIFIED); DBUG_ASSERT(start.item); - start.resolve_unit(timestamps_only); - end.resolve_unit(timestamps_only); + return start.resolve_unit(thd) || + end.resolve_unit(thd); } -void Vers_history_point::resolve_unit(bool timestamps_only) +bool vers_select_conds_t::eq(const vers_select_conds_t &conds) const { - if (item && unit == VERS_UNDEFINED) - { - if (item->type() == Item::FIELD_ITEM || timestamps_only) - unit= VERS_TIMESTAMP; - else if (item->result_type() == INT_RESULT || - item->result_type() == REAL_RESULT) - unit= VERS_TRX_ID; - else - unit= VERS_TIMESTAMP; + if (type != conds.type) + return false; + switch (type) { + case SYSTEM_TIME_UNSPECIFIED: + case SYSTEM_TIME_ALL: + return true; + case SYSTEM_TIME_BEFORE: + DBUG_ASSERT(0); + case SYSTEM_TIME_AS_OF: + return start.eq(conds.start); + case SYSTEM_TIME_FROM_TO: + case SYSTEM_TIME_BETWEEN: + return start.eq(conds.start) && end.eq(conds.end); } + DBUG_ASSERT(0); + return false; } + +bool Vers_history_point::resolve_unit(THD *thd) +{ + if (!item) + return false; + if (!item->fixed && item->fix_fields(thd, &item)) + return true; + return item->this_item()->type_handler_for_system_time()-> + Vers_history_point_resolve_unit(thd, this); +} + + +void Vers_history_point::bad_expression_data_type_error(const char *type) const +{ + my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), + type, "FOR SYSTEM_TIME"); +} + + void Vers_history_point::fix_item() { if (item && item->decimals == 0 && item->type() == Item::FUNC_ITEM && @@ -8731,8 +8800,14 @@ void Vers_history_point::fix_item() item->decimals= 6; } + +bool Vers_history_point::eq(const vers_history_point_t &point) const +{ + return unit == point.unit && item->eq(point.item, false); +} + void Vers_history_point::print(String *str, enum_query_type query_type, - const char *prefix, size_t plen) + const char *prefix, size_t plen) const { const static LEX_CSTRING unit_type[]= { diff --git a/sql/table.h b/sql/table.h index 1e9dad75b70..46a2ec6c49d 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1,8 +1,7 @@ #ifndef TABLE_INCLUDED #define TABLE_INCLUDED -/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2009, 2014, SkySQL Ab. - Copyright (c) 2016, 2017, MariaDB Corporation. +/* Copyright (c) 2000, 2017, Oracle and/or its affiliates. + Copyright (c) 2009, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -487,10 +486,11 @@ typedef struct st_table_field_def class Table_check_intact { protected: + bool has_keys; virtual void report_error(uint code, const char *fmt, ...)= 0; public: - Table_check_intact() {} + Table_check_intact(bool keys= false) : has_keys(keys) {} virtual ~Table_check_intact() {} /** Checks whether a table is intact. */ @@ -505,6 +505,8 @@ class Table_check_intact_log_error : public Table_check_intact { protected: void report_error(uint, const char *fmt, ...); +public: + Table_check_intact_log_error() : Table_check_intact(true) {} }; @@ -1825,8 +1827,22 @@ public: fix_item(); } void empty() { unit= VERS_UNDEFINED; item= NULL; } - void print(String *str, enum_query_type, const char *prefix, size_t plen); - void resolve_unit(bool timestamps_only); + void print(String *str, enum_query_type, const char *prefix, size_t plen) const; + bool resolve_unit(THD *thd); + bool resolve_unit_trx_id(THD *thd) + { + if (unit == VERS_UNDEFINED) + unit= VERS_TRX_ID; + return false; + } + bool resolve_unit_timestamp(THD *thd) + { + if (unit == VERS_UNDEFINED) + unit= VERS_TIMESTAMP; + return false; + } + void bad_expression_data_type_error(const char *type) const; + bool eq(const vers_history_point_t &point) const; }; struct vers_select_conds_t @@ -1855,27 +1871,20 @@ struct vers_select_conds_t end= _end; } - void print(String *str, enum_query_type query_type); + void print(String *str, enum_query_type query_type) const; bool init_from_sysvar(THD *thd); - bool operator== (vers_system_time_t b) - { - return type == b; - } - bool operator!= (vers_system_time_t b) - { - return type != b; - } - operator bool() const + bool is_set() const { return type != SYSTEM_TIME_UNSPECIFIED; } - void resolve_units(bool timestamps_only); + bool resolve_units(THD *thd); bool user_defined() const { return !from_query && type != SYSTEM_TIME_UNSPECIFIED; } + bool eq(const vers_select_conds_t &conds) const; }; /* @@ -2259,7 +2268,7 @@ struct TABLE_LIST /* TABLE_TYPE_UNKNOWN if any type is acceptable */ Table_type required_type; handlerton *db_type; /* table_type for handler */ - char timestamp_buffer[20]; /* buffer for timestamp (19+1) */ + char timestamp_buffer[MAX_DATETIME_WIDTH + 1]; /* This TABLE_LIST object is just placeholder for prelocking, it will be used for implicit LOCK TABLES only and won't be used in real statement. @@ -2291,8 +2300,6 @@ struct TABLE_LIST /* TODO: replace with derived_type */ bool merged; bool merged_for_insert; - /* TRUE <=> don't prepare this derived table/view as it should be merged.*/ - bool skip_prepare_derived; bool sequence; /* Part of NEXTVAL/CURVAL/LASTVAL */ /* @@ -2302,7 +2309,6 @@ struct TABLE_LIST List used_items; /* Sublist (tail) of persistent used_items */ List persistent_used_items; - Item **materialized_items; /* View creation context. */ @@ -2871,7 +2877,7 @@ enum get_table_share_flags { GTS_FORCE_DISCOVERY = 16 }; -size_t max_row_length(TABLE *table, const uchar *data); +size_t max_row_length(TABLE *table, MY_BITMAP const *cols, const uchar *data); void init_mdl_requests(TABLE_LIST *table_list); diff --git a/sql/table_cache.cc b/sql/table_cache.cc index e524e0995e2..2c5d25c0308 100644 --- a/sql/table_cache.cc +++ b/sql/table_cache.cc @@ -747,7 +747,7 @@ TDC_element *tdc_lock_share(THD *thd, const char *db, const char *table_name) char key[MAX_DBKEY_LENGTH]; DBUG_ENTER("tdc_lock_share"); - if (fix_thd_pins(thd)) + if (unlikely(fix_thd_pins(thd))) DBUG_RETURN((TDC_element*) MY_ERRPTR); element= (TDC_element *) lf_hash_search(&tdc_hash, thd->tdc_hash_pins, @@ -756,7 +756,7 @@ TDC_element *tdc_lock_share(THD *thd, const char *db, const char *table_name) if (element) { mysql_mutex_lock(&element->LOCK_table_share); - if (!element->share || element->share->error) + if (unlikely(!element->share || element->share->error)) { mysql_mutex_unlock(&element->LOCK_table_share); element= 0; @@ -838,7 +838,7 @@ retry: /* note that tdc_acquire_share() *always* uses discovery */ open_table_def(thd, share, flags | GTS_USE_DISCOVERY); - if (share->error) + if (checked_unlikely(share->error)) { free_table_share(share); lf_hash_delete(&tdc_hash, thd->tdc_hash_pins, key, key_length); @@ -894,7 +894,7 @@ retry: We found an existing table definition. Return it if we didn't get an error when reading the table definition from file. */ - if (share->error) + if (unlikely(share->error)) { open_table_error(share, share->error, share->open_errno); goto err; diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc index 908d28f6629..f23ec7a1acc 100644 --- a/sql/temporary_tables.cc +++ b/sql/temporary_tables.cc @@ -112,12 +112,14 @@ TABLE *THD::create_and_open_tmp_table(handlerton *hton, @param db [IN] Database name @param table_name [IN] Table name + @param state [IN] State of temp table to open @return Success Pointer to first used table instance. Failure NULL */ TABLE *THD::find_temporary_table(const char *db, - const char *table_name) + const char *table_name, + Temporary_table_state state) { DBUG_ENTER("THD::find_temporary_table"); @@ -134,7 +136,7 @@ TABLE *THD::find_temporary_table(const char *db, key_length= create_tmp_table_def_key(key, db, table_name); locked= lock_temporary_tables(); - table = find_temporary_table(key, key_length, TMP_TABLE_IN_USE); + table= find_temporary_table(key, key_length, state); if (locked) { DBUG_ASSERT(m_tmp_tables_locked); @@ -153,16 +155,12 @@ TABLE *THD::find_temporary_table(const char *db, @return Success Pointer to first used table instance. Failure NULL */ -TABLE *THD::find_temporary_table(const TABLE_LIST *tl) +TABLE *THD::find_temporary_table(const TABLE_LIST *tl, + Temporary_table_state state) { DBUG_ENTER("THD::find_temporary_table"); - - if (!has_temporary_tables()) - { - DBUG_RETURN(NULL); - } - - TABLE *table= find_temporary_table(tl->get_db_name(), tl->get_table_name()); + TABLE *table= find_temporary_table(tl->get_db_name(), tl->get_table_name(), + state); DBUG_RETURN(table); } @@ -1398,7 +1396,8 @@ bool THD::log_events_and_free_tmp_shares() get_stmt_da()->set_overwrite_status(true); transaction.stmt.mark_dropped_temp_table(); - if ((error= (mysql_bin_log.write(&qinfo) || error))) + bool error2= mysql_bin_log.write(&qinfo); + if (unlikely(error|= error2)) { /* If we're here following THD::cleanup, thence the connection diff --git a/sql/thr_malloc.cc b/sql/thr_malloc.cc index e0385c33fd8..fd33ee4c32b 100644 --- a/sql/thr_malloc.cc +++ b/sql/thr_malloc.cc @@ -27,7 +27,7 @@ extern "C" { void sql_alloc_error_handler(void) { THD *thd= current_thd; - if (thd) + if (likely(thd)) { if (! thd->is_error()) { diff --git a/sql/threadpool_common.cc b/sql/threadpool_common.cc index a0d1532e31a..24ab972776c 100644 --- a/sql/threadpool_common.cc +++ b/sql/threadpool_common.cc @@ -171,7 +171,7 @@ void tp_callback(TP_connection *c) c->state = TP_STATE_RUNNING; - if (!thd) + if (unlikely(!thd)) { /* No THD, need to login first. */ DBUG_ASSERT(c->connect); diff --git a/sql/threadpool_win.cc b/sql/threadpool_win.cc index 012f7c5a439..0cc683c631d 100644 --- a/sql/threadpool_win.cc +++ b/sql/threadpool_win.cc @@ -256,7 +256,7 @@ int TP_connection_win::start_io() If skip_completion_port_on_success is set, we need to handle it right here, because completion callback would not be executed by the pool. */ - if(skip_completion_port_on_success) + if (skip_completion_port_on_success) { CancelThreadpoolIo(io); io_completion_callback(callback_instance, this, &overlapped, last_error, @@ -265,7 +265,7 @@ int TP_connection_win::start_io() return 0; } - if(last_error == ERROR_IO_PENDING) + if (last_error == ERROR_IO_PENDING) { return 0; } diff --git a/sql/transaction.cc b/sql/transaction.cc index d8d435e826a..1c2820200d1 100644 --- a/sql/transaction.cc +++ b/sql/transaction.cc @@ -203,6 +203,7 @@ bool trans_begin(THD *thd, uint flags) thd->transaction.all.reset(); thd->has_waiter= false; thd->waiting_on_group_commit= false; + thd->transaction.start_time.reset(thd); if (res) DBUG_RETURN(TRUE); @@ -656,7 +657,7 @@ bool trans_savepoint(THD *thd, LEX_CSTRING name) we'll lose a little bit of memory in transaction mem_root, but it'll be free'd when transaction ends anyway */ - if (ha_savepoint(thd, newsv)) + if (unlikely(ha_savepoint(thd, newsv))) DBUG_RETURN(TRUE); newsv->prev= thd->transaction.savepoints; diff --git a/sql/tztime.cc b/sql/tztime.cc index 277709ad9e9..61fbee1731a 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1768,7 +1768,8 @@ end_with_setting_default_tz: most of them once more, but this is OK for system tables open for READ. */ - if (!(global_system_variables.time_zone= my_tz_find(thd, &tmp_tzname2))) + if (unlikely(!(global_system_variables.time_zone= + my_tz_find(thd, &tmp_tzname2)))) { sql_print_error("Fatal error: Illegal or unknown default time zone '%s'", default_tzname); @@ -1783,7 +1784,7 @@ end_with_close: end_with_cleanup: /* if there were error free time zone describing structs */ - if (return_val) + if (unlikely(return_val)) my_tz_free(); end: delete thd; diff --git a/sql/uniques.cc b/sql/uniques.cc index 8ed1ceda6a1..6bc870133ff 100644 --- a/sql/uniques.cc +++ b/sql/uniques.cc @@ -209,7 +209,7 @@ static double get_merge_many_buffs_cost(uint *buffer, uint last_n_elems, int elem_size, uint compare_factor) { - register int i; + int i; double total_cost= 0.0; uint *buff_elems= buffer; /* #s of elements in each of merged sequences */ @@ -509,7 +509,7 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size, key_length); /* if piece_size is aligned reuse_freed_buffer will always hit */ uint piece_size= max_key_count_per_piece * key_length; - uint bytes_read; /* to hold return value of read_to_buffer */ + ulong bytes_read; /* to hold return value of read_to_buffer */ BUFFPEK *top; int res= 1; uint cnt_ofs= key_length - (with_counters ? sizeof(element_count) : 0); @@ -525,7 +525,7 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size, top->base= merge_buffer + (top - begin) * piece_size; top->max_keys= max_key_count_per_piece; bytes_read= read_to_buffer(file, top, key_length); - if (bytes_read == (uint) (-1)) + if (unlikely(bytes_read == (ulong) -1)) goto end; DBUG_ASSERT(bytes_read); queue_insert(&queue, (uchar *) top); @@ -554,9 +554,9 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size, memcpy(save_key_buff, old_key, key_length); old_key= save_key_buff; bytes_read= read_to_buffer(file, top, key_length); - if (bytes_read == (uint) (-1)) + if (unlikely(bytes_read == (ulong) -1)) goto end; - else if (bytes_read > 0) /* top->key, top->mem_count are reset */ + else if (bytes_read) /* top->key, top->mem_count are reset */ queue_replace_top(&queue); /* in read_to_buffer */ else { @@ -602,7 +602,7 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size, } while (--top->mem_count); bytes_read= read_to_buffer(file, top, key_length); - if (bytes_read == (uint) (-1)) + if (unlikely(bytes_read == (ulong) -1)) goto end; } while (bytes_read); diff --git a/sql/uniques.h b/sql/uniques.h index efc79953bb6..654b3692aaa 100644 --- a/sql/uniques.h +++ b/sql/uniques.h @@ -77,7 +77,7 @@ public: inline static int get_cost_calc_buff_size(size_t nkeys, uint key_size, size_t max_in_memory_size) { - register size_t max_elems_in_tree= + size_t max_elems_in_tree= max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size); return (int) (sizeof(uint)*(1 + nkeys/max_elems_in_tree)); } diff --git a/sql/unireg.cc b/sql/unireg.cc index 26f02c8983a..796101e0efb 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -118,7 +118,7 @@ vers_get_field(HA_CREATE_INFO *create_info, List &create_fields, b List_iterator it(create_fields); Create_field *sql_field = NULL; - const LString_i row_field= row_start ? create_info->vers_info.as_row.start + const Lex_ident row_field= row_start ? create_info->vers_info.as_row.start : create_info->vers_info.as_row.end; DBUG_ASSERT(row_field); @@ -194,7 +194,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING *table, error= pack_vcols(&vcols, create_fields, create_info->check_constraint_list); thd->variables.sql_mode= save_sql_mode; - if (error) + if (unlikely(error)) DBUG_RETURN(frm); if (vcols.length()) @@ -202,7 +202,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING *table, error= pack_header(thd, forminfo, create_fields, create_info, (ulong)data_offset, db_file); - if (error) + if (unlikely(error)) DBUG_RETURN(frm); reclength= uint2korr(forminfo+266); diff --git a/sql/vers_string.h b/sql/vers_string.h index 5460838510e..75abd40d5fa 100644 --- a/sql/vers_string.h +++ b/sql/vers_string.h @@ -17,125 +17,85 @@ #ifndef VERS_STRING_INCLUDED #define VERS_STRING_INCLUDED -struct Compare_strncmp +/* + LEX_CSTRING with comparison semantics. +*/ + +// db and table names: case sensitive (or insensitive) in table_alias_charset +struct Compare_table_names { int operator()(const LEX_CSTRING& a, const LEX_CSTRING& b) const { - return strncmp(a.str, b.str, a.length); - } - static CHARSET_INFO* charset() - { - return system_charset_info; + DBUG_ASSERT(a.str[a.length] == 0); + DBUG_ASSERT(b.str[b.length] == 0); + return my_strnncoll(table_alias_charset, + (uchar*)a.str, a.length, + (uchar*)b.str, b.length); } }; -template -struct Compare_my_strcasecmp +// column names and other identifiers: case insensitive in system_charset_info +struct Compare_identifiers { int operator()(const LEX_CSTRING& a, const LEX_CSTRING& b) const { - DBUG_ASSERT(a.str[a.length] == 0 && b.str[b.length] == 0); - return my_strcasecmp(CS, a.str, b.str); - } - static CHARSET_INFO* charset() - { - return CS; + DBUG_ASSERT(a.str[a.length] == 0); + DBUG_ASSERT(b.str[b.length] == 0); + return my_strcasecmp(system_charset_info, a.str, b.str); } }; -typedef Compare_my_strcasecmp Compare_fs; -typedef Compare_my_strcasecmp Compare_t; - -template -struct LEX_STRING_u : public Storage +class Lex_cstring : public LEX_CSTRING { - LEX_STRING_u() + public: + Lex_cstring() { - Storage::str= NULL; - Storage::length= 0; + str= NULL; + length= 0; } - LEX_STRING_u(const char *_str, size_t _len, CHARSET_INFO *) + Lex_cstring(const char *_str, size_t _len) { - Storage::str= _str; - Storage::length= _len; + str= _str; + length= _len; } - uint32 length() const + void set(const char *_str, size_t _len) { - return (uint32)Storage::length; - } - const char *ptr() const - { - return Storage::str; - } - void set(const char *_str, size_t _len, CHARSET_INFO *) - { - Storage::str= _str; - Storage::length= _len; - } - const LEX_CSTRING& lex_cstring() const - { - return *this; - } - const LEX_STRING& lex_string() const - { - return *(LEX_STRING *)this; + str= _str; + length= _len; } }; -template > -struct XString : public Storage +template +struct Lex_cstring_with_compare : public Lex_cstring { public: - XString() {} - XString(const char *_str, size_t _len) : - Storage(_str, _len, Compare::charset()) + Lex_cstring_with_compare() {} + Lex_cstring_with_compare(const char *_str, size_t _len) : + Lex_cstring(_str, _len) + { } + Lex_cstring_with_compare(const LEX_STRING src) : + Lex_cstring(src.str, src.length) + { } + Lex_cstring_with_compare(const LEX_CSTRING src) : Lex_cstring(src.str, src.length) + { } + Lex_cstring_with_compare(const char *_str) : Lex_cstring(_str, strlen(_str)) + { } + bool streq(const Lex_cstring_with_compare& b) const { - } - XString(const LEX_STRING src) : - Storage(src.str, src.length, Compare::charset()) - { - } - XString(const LEX_CSTRING src) : - Storage(src.str, src.length, Compare::charset()) - { - } - XString(const char *_str) : - Storage(_str, strlen(_str), Compare::charset()) - { - } - bool streq(const XString& b) const - { - return Storage::length() == b.length() && 0 == Compare()(this->lex_cstring(), b.lex_cstring()); + return Lex_cstring::length == b.length && 0 == Compare()(*this, b); } operator const char* () const { - return Storage::ptr(); - } - operator LEX_CSTRING& () const - { - return this->lex_cstring(); - } - operator LEX_STRING () const - { - LEX_STRING res; - res.str= const_cast(this->ptr()); - res.length= this->length(); - return res; + return str; } operator bool () const { - return Storage::ptr() != NULL; + return str != NULL; } }; -typedef XString<> LString; -typedef XString LString_fs; -typedef XString > LString_i; - -typedef XString SString; -typedef XString SString_fs; -typedef XString SString_t; - +typedef Lex_cstring_with_compare Lex_ident; +typedef Lex_cstring_with_compare Lex_table_name; #define XSTRING_WITH_LEN(X) (X).ptr(), (X).length() #define DB_WITH_LEN(X) (X).db.str, (X).db.length diff --git a/sql/wsrep_applier.cc b/sql/wsrep_applier.cc index a5c39887cbd..f2d90def5ef 100644 --- a/sql/wsrep_applier.cc +++ b/sql/wsrep_applier.cc @@ -98,11 +98,11 @@ static wsrep_cb_status_t wsrep_apply_events(THD* thd, DBUG_RETURN(WSREP_CB_FAILURE); } - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); thd->wsrep_query_state= QUERY_EXEC; if (thd->wsrep_conflict_state!= REPLAYING) thd->wsrep_conflict_state= NO_CONFLICT; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); if (!buf_len) WSREP_DEBUG("empty rbr buffer to apply: %lld", (long long) wsrep_thd_trx_seqno(thd)); @@ -197,9 +197,9 @@ static wsrep_cb_status_t wsrep_apply_events(THD* thd, } error: - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); thd->wsrep_query_state= QUERY_IDLE; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); assert(thd->wsrep_exec_mode== REPL_RECV); diff --git a/sql/wsrep_binlog.cc b/sql/wsrep_binlog.cc index 4efd6703d03..cafd41b2653 100644 --- a/sql/wsrep_binlog.cc +++ b/sql/wsrep_binlog.cc @@ -425,7 +425,7 @@ void wsrep_dump_rbr_direct(THD* thd, IO_CACHE* cache) break; } } while ((bytes_in_cache= my_b_fill(cache))); - if(cache->error == -1) + if (cache->error == -1) { WSREP_ERROR("RBR inconsistent"); goto cleanup; diff --git a/sql/wsrep_hton.cc b/sql/wsrep_hton.cc index 8044b7a3548..3ffd5d7706b 100644 --- a/sql/wsrep_hton.cc +++ b/sql/wsrep_hton.cc @@ -240,12 +240,12 @@ static int wsrep_rollback(handlerton *hton, THD *thd, bool all) DBUG_RETURN(0); } - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); switch (thd->wsrep_exec_mode) { case TOTAL_ORDER: case REPL_RECV: - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); WSREP_DEBUG("Avoiding wsrep rollback for failed DDL: %s", thd->query()); DBUG_RETURN(0); default: break; @@ -262,7 +262,7 @@ static int wsrep_rollback(handlerton *hton, THD *thd, bool all) } wsrep_cleanup_transaction(thd); } - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); DBUG_RETURN(0); } @@ -275,7 +275,7 @@ int wsrep_commit(handlerton *hton, THD *thd, bool all) DBUG_RETURN(0); } - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); if ((all || !thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) && (thd->variables.wsrep_on && thd->wsrep_conflict_state != MUST_REPLAY)) { @@ -306,7 +306,7 @@ int wsrep_commit(handlerton *hton, THD *thd, bool all) wsrep_cleanup_transaction(thd); } } - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); DBUG_RETURN(0); } @@ -334,20 +334,20 @@ wsrep_run_wsrep_commit(THD *thd, bool all) if (thd->wsrep_exec_mode == REPL_RECV) { - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); if (thd->wsrep_conflict_state == MUST_ABORT) { if (wsrep_debug) WSREP_INFO("WSREP: must abort for BF"); DBUG_PRINT("wsrep", ("BF apply commit fail")); thd->wsrep_conflict_state = NO_CONFLICT; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); // // TODO: test all calls of the rollback. // rollback must happen automagically innobase_rollback(hton, thd, 1); // DBUG_RETURN(WSREP_TRX_ERROR); } - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); } if (thd->wsrep_exec_mode != LOCAL_STATE) DBUG_RETURN(WSREP_TRX_OK); @@ -359,11 +359,11 @@ wsrep_run_wsrep_commit(THD *thd, bool all) DBUG_PRINT("wsrep", ("replicating commit")); - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); if (thd->wsrep_conflict_state == MUST_ABORT) { DBUG_PRINT("wsrep", ("replicate commit fail")); thd->wsrep_conflict_state = ABORTED; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); if (wsrep_debug) { WSREP_INFO("innobase_commit, abort %s", (thd->query()) ? thd->query() : "void"); @@ -385,7 +385,7 @@ wsrep_run_wsrep_commit(THD *thd, bool all) { mysql_mutex_unlock(&LOCK_wsrep_replaying); - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); mysql_mutex_lock(&thd->mysys_var->mutex); thd_proc_info(thd, "WSREP waiting on replaying"); @@ -413,7 +413,7 @@ wsrep_run_wsrep_commit(THD *thd, bool all) thd->mysys_var->current_cond= 0; mysql_mutex_unlock(&thd->mysys_var->mutex); - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); mysql_mutex_lock(&LOCK_wsrep_replaying); } mysql_mutex_unlock(&LOCK_wsrep_replaying); @@ -421,14 +421,14 @@ wsrep_run_wsrep_commit(THD *thd, bool all) if (thd->wsrep_conflict_state == MUST_ABORT) { DBUG_PRINT("wsrep", ("replicate commit fail")); thd->wsrep_conflict_state = ABORTED; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); WSREP_DEBUG("innobase_commit abort after replaying wait %s", (thd->query()) ? thd->query() : "void"); DBUG_RETURN(WSREP_TRX_CERT_FAIL); } thd->wsrep_query_state = QUERY_COMMITTING; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); cache = get_trans_log(thd); rcode = 0; @@ -500,10 +500,10 @@ wsrep_run_wsrep_commit(THD *thd, bool all) WSREP_DEBUG("thd: %lld seqno: %lld BF aborted by provider, will replay", (longlong) thd->thread_id, (longlong) thd->wsrep_trx_meta.gtid.seqno); - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); thd->wsrep_conflict_state = MUST_REPLAY; DBUG_ASSERT(wsrep_thd_trx_seqno(thd) > 0); - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); mysql_mutex_lock(&LOCK_wsrep_replaying); wsrep_replaying++; WSREP_DEBUG("replaying increased: %d, thd: %lld", @@ -517,7 +517,7 @@ wsrep_run_wsrep_commit(THD *thd, bool all) DBUG_RETURN(WSREP_TRX_ERROR); } - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); DEBUG_SYNC(thd, "wsrep_after_replication"); @@ -574,26 +574,26 @@ wsrep_run_wsrep_commit(THD *thd, bool all) WSREP_LOG_CONFLICT(NULL, thd, FALSE); } } - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); DBUG_RETURN(WSREP_TRX_CERT_FAIL); case WSREP_SIZE_EXCEEDED: WSREP_ERROR("transaction size exceeded"); - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); DBUG_RETURN(WSREP_TRX_SIZE_EXCEEDED); case WSREP_CONN_FAIL: WSREP_ERROR("connection failure"); - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); DBUG_RETURN(WSREP_TRX_ERROR); default: WSREP_ERROR("unknown connection failure"); - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); DBUG_RETURN(WSREP_TRX_ERROR); } thd->wsrep_query_state= QUERY_EXEC; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); DBUG_RETURN(WSREP_TRX_OK); } diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index def52cb2675..0c051d384c9 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -142,7 +142,7 @@ ulong wsrep_running_threads = 0; // # of currently running wsrep threads ulong my_bind_addr; #ifdef HAVE_PSI_INTERFACE -PSI_mutex_key key_LOCK_wsrep_rollback, key_LOCK_wsrep_thd, +PSI_mutex_key key_LOCK_wsrep_rollback, key_LOCK_wsrep_replaying, key_LOCK_wsrep_ready, key_LOCK_wsrep_sst, key_LOCK_wsrep_sst_thread, key_LOCK_wsrep_sst_init, key_LOCK_wsrep_slave_threads, key_LOCK_wsrep_desync, @@ -162,7 +162,6 @@ static PSI_mutex_info wsrep_mutexes[]= { &key_LOCK_wsrep_sst_init, "LOCK_wsrep_sst_init", PSI_FLAG_GLOBAL}, { &key_LOCK_wsrep_sst, "LOCK_wsrep_sst", PSI_FLAG_GLOBAL}, { &key_LOCK_wsrep_rollback, "LOCK_wsrep_rollback", PSI_FLAG_GLOBAL}, - { &key_LOCK_wsrep_thd, "THD::LOCK_wsrep_thd", 0}, { &key_LOCK_wsrep_replaying, "LOCK_wsrep_replaying", PSI_FLAG_GLOBAL}, { &key_LOCK_wsrep_slave_threads, "LOCK_wsrep_slave_threads", PSI_FLAG_GLOBAL}, { &key_LOCK_wsrep_desync, "LOCK_wsrep_desync", PSI_FLAG_GLOBAL}, @@ -1688,16 +1687,16 @@ int wsrep_to_isolation_begin(THD *thd, const char *db_, const char *table_, if (thd->wsrep_exec_mode == REPL_RECV) return 0; - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); if (thd->wsrep_conflict_state == MUST_ABORT) { WSREP_INFO("thread: %lld schema: %s query: %s has been aborted due to multi-master conflict", (longlong) thd->thread_id, thd->get_db(), thd->query()); - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); return WSREP_TRX_FAIL; } - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); DBUG_ASSERT(thd->wsrep_exec_mode == LOCAL_STATE); DBUG_ASSERT(thd->wsrep_trx_meta.gtid.seqno == WSREP_SEQNO_UNDEFINED); @@ -1812,7 +1811,7 @@ bool wsrep_grant_mdl_exception(MDL_context *requestor_ctx, const char* schema= key->db_name(); int schema_len= key->db_name_length(); - mysql_mutex_lock(&request_thd->LOCK_wsrep_thd); + mysql_mutex_lock(&request_thd->LOCK_thd_data); /* We consider granting MDL exceptions only for appliers (BF THD) and ones @@ -1836,19 +1835,19 @@ bool wsrep_grant_mdl_exception(MDL_context *requestor_ctx, if (request_thd->wsrep_exec_mode == TOTAL_ORDER || request_thd->wsrep_exec_mode == REPL_RECV) { - mysql_mutex_unlock(&request_thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&request_thd->LOCK_thd_data); WSREP_MDL_LOG(DEBUG, "MDL conflict ", schema, schema_len, request_thd, granted_thd); ticket->wsrep_report(wsrep_debug); - mysql_mutex_lock(&granted_thd->LOCK_wsrep_thd); + mysql_mutex_lock(&granted_thd->LOCK_thd_data); if (granted_thd->wsrep_exec_mode == TOTAL_ORDER || granted_thd->wsrep_exec_mode == REPL_RECV) { WSREP_MDL_LOG(INFO, "MDL BF-BF conflict", schema, schema_len, request_thd, granted_thd); ticket->wsrep_report(true); - mysql_mutex_unlock(&granted_thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); ret= true; } else if (granted_thd->lex->sql_command == SQLCOM_FLUSH || @@ -1856,7 +1855,7 @@ bool wsrep_grant_mdl_exception(MDL_context *requestor_ctx, { WSREP_DEBUG("BF thread waiting for FLUSH"); ticket->wsrep_report(wsrep_debug); - mysql_mutex_unlock(&granted_thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); ret= false; } else @@ -1881,14 +1880,14 @@ bool wsrep_grant_mdl_exception(MDL_context *requestor_ctx, ticket->wsrep_report(true); } - mysql_mutex_unlock(&granted_thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); wsrep_abort_thd((void *) request_thd, (void *) granted_thd, 1); ret= false; } } else { - mysql_mutex_unlock(&request_thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&request_thd->LOCK_thd_data); } return ret; @@ -2045,9 +2044,9 @@ static inline bool is_replaying_connection(THD *thd) { bool ret; - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); ret= (thd->wsrep_conflict_state == REPLAYING) ? true : false; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); return ret; } @@ -2057,9 +2056,9 @@ static inline bool is_committing_connection(THD *thd) { bool ret; - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); ret= (thd->wsrep_query_state == QUERY_COMMITTING) ? true : false; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); return ret; } @@ -2440,13 +2439,13 @@ wsrep_ws_handle_t* wsrep_thd_ws_handle(THD *thd) void wsrep_thd_LOCK(THD *thd) { - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); } void wsrep_thd_UNLOCK(THD *thd) { - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); } diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h index c6b1f42ce14..5a13575d904 100644 --- a/sql/wsrep_mysqld.h +++ b/sql/wsrep_mysqld.h @@ -266,8 +266,6 @@ extern my_bool wsrep_preordered_opt; extern handlerton *wsrep_hton; #ifdef HAVE_PSI_INTERFACE -extern PSI_mutex_key key_LOCK_wsrep_thd; -extern PSI_cond_key key_COND_wsrep_thd; extern PSI_mutex_key key_LOCK_wsrep_ready; extern PSI_mutex_key key_COND_wsrep_ready; extern PSI_mutex_key key_LOCK_wsrep_sst; diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc index e8dda53c95f..ce6d9688cb3 100644 --- a/sql/wsrep_thd.cc +++ b/sql/wsrep_thd.cc @@ -47,7 +47,7 @@ int wsrep_show_bf_aborts (THD *thd, SHOW_VAR *var, char *buff, return 0; } -/* must have (&thd->LOCK_wsrep_thd) */ +/* must have (&thd->LOCK_thd_data) */ void wsrep_client_rollback(THD *thd) { WSREP_DEBUG("client rollback due to BF abort for (%lld), query: %s", @@ -56,7 +56,7 @@ void wsrep_client_rollback(THD *thd) WSREP_ATOMIC_ADD_LONG(&wsrep_bf_aborts_counter, 1); thd->wsrep_conflict_state= ABORTING; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); trans_rollback(thd); if (thd->locked_tables_mode && thd->lock) @@ -86,7 +86,7 @@ void wsrep_client_rollback(THD *thd) (longlong) thd->thread_id); thd->clear_binlog_table_maps(); } - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); thd->wsrep_conflict_state= ABORTED; } @@ -232,7 +232,7 @@ void wsrep_replay_transaction(THD *thd) thd->get_stmt_da()->reset_diagnostics_area(); thd->wsrep_conflict_state= REPLAYING; - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); thd->reset_for_next_command(); thd->reset_killed(); @@ -272,7 +272,7 @@ void wsrep_replay_transaction(THD *thd) if (thd->wsrep_conflict_state!= REPLAYING) WSREP_WARN("lost replaying mode: %d", thd->wsrep_conflict_state ); - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); switch (rcode) { @@ -328,7 +328,7 @@ void wsrep_replay_transaction(THD *thd) /* we're now in inconsistent state, must abort */ /* http://bazaar.launchpad.net/~codership/codership-mysql/5.6/revision/3962#sql/wsrep_thd.cc */ - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); unireg_abort(1); break; @@ -495,30 +495,30 @@ static void wsrep_rollback_process(THD *thd) */ mysql_mutex_unlock(&LOCK_wsrep_rollback); - mysql_mutex_lock(&aborting->LOCK_wsrep_thd); + mysql_mutex_lock(&aborting->LOCK_thd_data); if (aborting->wsrep_conflict_state== ABORTED) { WSREP_DEBUG("WSREP, thd already aborted: %llu state: %d", (long long)aborting->real_id, aborting->wsrep_conflict_state); - mysql_mutex_unlock(&aborting->LOCK_wsrep_thd); + mysql_mutex_unlock(&aborting->LOCK_thd_data); mysql_mutex_lock(&LOCK_wsrep_rollback); continue; } aborting->wsrep_conflict_state= ABORTING; - mysql_mutex_unlock(&aborting->LOCK_wsrep_thd); + mysql_mutex_unlock(&aborting->LOCK_thd_data); set_current_thd(aborting); aborting->store_globals(); - mysql_mutex_lock(&aborting->LOCK_wsrep_thd); + mysql_mutex_lock(&aborting->LOCK_thd_data); wsrep_client_rollback(aborting); WSREP_DEBUG("WSREP rollbacker aborted thd: (%lld %lld)", (longlong) aborting->thread_id, (longlong) aborting->real_id); - mysql_mutex_unlock(&aborting->LOCK_wsrep_thd); + mysql_mutex_unlock(&aborting->LOCK_thd_data); set_current_thd(thd); thd->store_globals(); @@ -558,10 +558,10 @@ enum wsrep_conflict_state wsrep_thd_conflict_state(THD *thd, my_bool sync) enum wsrep_conflict_state state = NO_CONFLICT; if (thd) { - if (sync) mysql_mutex_lock(&thd->LOCK_wsrep_thd); + if (sync) mysql_mutex_lock(&thd->LOCK_thd_data); state = thd->wsrep_conflict_state; - if (sync) mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + if (sync) mysql_mutex_unlock(&thd->LOCK_thd_data); } return state; } @@ -585,12 +585,12 @@ my_bool wsrep_thd_is_BF(THD *thd, my_bool sync) if (wsrep_thd_is_wsrep(thd)) { if (sync) - mysql_mutex_lock(&thd->LOCK_wsrep_thd); + mysql_mutex_lock(&thd->LOCK_thd_data); status = ((thd->wsrep_exec_mode == REPL_RECV) || (thd->wsrep_exec_mode == TOTAL_ORDER)); if (sync) - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + mysql_mutex_unlock(&thd->LOCK_thd_data); } } return status; @@ -603,12 +603,12 @@ my_bool wsrep_thd_is_BF_or_commit(void *thd_ptr, my_bool sync) if (thd_ptr) { THD* thd = (THD*)thd_ptr; - if (sync) mysql_mutex_lock(&thd->LOCK_wsrep_thd); + if (sync) mysql_mutex_lock(&thd->LOCK_thd_data); status = ((thd->wsrep_exec_mode == REPL_RECV) || (thd->wsrep_exec_mode == TOTAL_ORDER) || (thd->wsrep_exec_mode == LOCAL_COMMIT)); - if (sync) mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + if (sync) mysql_mutex_unlock(&thd->LOCK_thd_data); } return status; } @@ -620,10 +620,10 @@ my_bool wsrep_thd_is_local(void *thd_ptr, my_bool sync) if (thd_ptr) { THD* thd = (THD*)thd_ptr; - if (sync) mysql_mutex_lock(&thd->LOCK_wsrep_thd); + if (sync) mysql_mutex_lock(&thd->LOCK_thd_data); status = (thd->wsrep_exec_mode == LOCAL_STATE); - if (sync) mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + if (sync) mysql_mutex_unlock(&thd->LOCK_thd_data); } return status; } diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index 08d5220cfca..42d2c219ebf 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -898,18 +898,19 @@ int ha_archive::real_write_row(uchar *buf, azio_stream *writer) the bytes required for the length in the header. */ -uint32 ha_archive::max_row_length(const uchar *buf) +uint32 ha_archive::max_row_length(const uchar *record) { uint32 length= (uint32)(table->s->reclength + table->s->fields*2); length+= ARCHIVE_ROW_HEADER_SIZE; + my_ptrdiff_t const rec_offset= record - table->record[0]; uint *ptr, *end; for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ; ptr != end ; ptr++) { - if (!table->field[*ptr]->is_null()) - length += 2 + ((Field_blob*)table->field[*ptr])->get_length(); + if (!table->field[*ptr]->is_null(rec_offset)) + length += 2 + ((Field_blob*)table->field[*ptr])->get_length(rec_offset); } return length; @@ -919,10 +920,9 @@ uint32 ha_archive::max_row_length(const uchar *buf) unsigned int ha_archive::pack_row(uchar *record, azio_stream *writer) { uchar *ptr; - + my_ptrdiff_t const rec_offset= record - table->record[0]; DBUG_ENTER("ha_archive::pack_row"); - if (fix_rec_buff(max_row_length(record))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */ @@ -936,7 +936,7 @@ unsigned int ha_archive::pack_row(uchar *record, azio_stream *writer) for (Field **field=table->field ; *field ; field++) { - if (!((*field)->is_null())) + if (!((*field)->is_null(rec_offset))) ptr= (*field)->pack(ptr, record + (*field)->offset(record)); } diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index 7c222eb3c80..490ca3a5fba 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -427,7 +427,7 @@ int TXTFAM::DeleteSortedRows(PGLOBAL g) for (i = 0; i < Posar->GetNval(); i++) { if ((irc = InitDelete(g, Posar->GetIntValue(ix[i]), - Sosar->GetIntValue(ix[i])) == RC_FX)) + Sosar->GetIntValue(ix[i]))) == RC_FX) goto err; // Now delete the sorted rows diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 1bb689000f8..0a483b22cf3 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -108,13 +108,8 @@ #define MYSQL_SERVER 1 #define DONT_DEFINE_VOID #include -#include "sql_class.h" -#include "create_options.h" -#include "mysql_com.h" -#include "field.h" #include "sql_parse.h" #include "sql_base.h" -#include #include "sql_partition.h" #undef OFFSET @@ -238,8 +233,6 @@ uint GetWorkSize(void); void SetWorkSize(uint); extern "C" const char *msglang(void); -static char *strz(PGLOBAL g, LEX_STRING &ls); - static void PopUser(PCONNECT xp); static PCONNECT GetUser(THD *thd, PCONNECT xp); static PGLOBAL GetPlug(THD *thd, PCONNECT& lxp); @@ -5571,7 +5564,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, } // endif p } else if (ttp != TAB_ODBC || !(fnc & (FNC_TABLE | FNC_COL))) - tab = (char *) table_s->table_name.str; // Default value + tab = (char*)table_s->table_name.str; // Default value } // endif tab @@ -6882,12 +6875,12 @@ bool ha_connect::NoFieldOptionChange(TABLE *tab) @retval HA_ALTER_ERROR Unexpected error. @retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported, must use copy. @retval HA_ALTER_INPLACE_EXCLUSIVE_LOCK Supported, but requires X lock. - @retval HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE + @retval HA_ALTER_INPLACE_COPY_LOCK Supported, but requires SNW lock during main phase. Prepare phase requires X lock. @retval HA_ALTER_INPLACE_SHARED_LOCK Supported, but requires SNW lock. - @retval HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE + @retval HA_ALTER_INPLACE_COPY_NO_LOCK Supported, concurrent reads/writes allowed. However, prepare phase requires X lock. diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index ca51712af0c..3cc541c608f 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -41,8 +41,6 @@ static PJSON JsonNew(PGLOBAL g, JTYP type); static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp = NULL); static PJSNX JsnxNew(PGLOBAL g, PJSON jsp, int type, int len = 64); -void json_array_deinit(UDF_INIT* initid); - static uint JsonGrpSize = 10; /*********************************************************************************/ @@ -1622,7 +1620,7 @@ static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n, if (AllocSarea(g, ml)) { char errmsg[MAX_STR]; - snprintf(errmsg, sizeof(errmsg)-1, MSG(WORK_AREA), g->Message); + snprintf(errmsg, sizeof(errmsg) - 1, MSG(WORK_AREA), g->Message); strcpy(g->Message, errmsg); return true; } // endif SareaAlloc diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc index dd04d667e58..5aef6d9c660 100644 --- a/storage/connect/mycat.cc +++ b/storage/connect/mycat.cc @@ -18,7 +18,7 @@ /* ------------- */ /* Version 1.6 */ /* */ -/* Author: Olivier Bertrand 2012 - 2017 */ +/* Author: Olivier Bertrand 2012 - 2018 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -31,10 +31,7 @@ #define DONT_DEFINE_VOID #include -#if defined(__WIN__) -//#include -//#include -#elif defined(UNIX) +#if defined(UNIX) #include #include #endif diff --git a/storage/connect/mysql-test/connect/r/grant.result b/storage/connect/mysql-test/connect/r/grant.result index 118d75408be..c43ad3a5b79 100644 --- a/storage/connect/mysql-test/connect/r/grant.result +++ b/storage/connect/mysql-test/connect/r/grant.result @@ -26,7 +26,7 @@ fname VARCHAR(256) NOT NULL, ftype CHAR(4) NOT NULL, size DOUBLE(12,0) NOT NULL flag=5 ) ENGINE=CONNECT TABLE_TYPE=DIR FILE_NAME='*.*'; -SELECT fname, ftype, size FROM t1 WHERE size>0; +SELECT fname, ftype, size FROM t1 WHERE size>0 AND ftype!='.opt'; fname ftype size t1 .frm 1081 connection user; diff --git a/storage/connect/mysql-test/connect/t/grant.test b/storage/connect/mysql-test/connect/t/grant.test index 738f156d8a4..c4a91904e73 100644 --- a/storage/connect/mysql-test/connect/t/grant.test +++ b/storage/connect/mysql-test/connect/t/grant.test @@ -28,7 +28,7 @@ CREATE TABLE t1 ( ) ENGINE=CONNECT TABLE_TYPE=DIR FILE_NAME='*.*'; # "size>0" to skip directory names on Windows --replace_result $MYSQLD_DATADIR DATADIR/ -SELECT fname, ftype, size FROM t1 WHERE size>0; +SELECT fname, ftype, size FROM t1 WHERE size>0 AND ftype!='.opt'; --connection user SELECT user(); diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp index ac102d03208..887527e38ab 100644 --- a/storage/connect/plugutil.cpp +++ b/storage/connect/plugutil.cpp @@ -559,7 +559,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) if (trace(1)) htrc("PlugSubAlloc: %s\n", g->Message); - abort(); + throw 1234; } /* endif size OS32 code */ /*********************************************************************/ diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp index 7be6c0e8328..139e4199ed9 100644 --- a/storage/connect/tabext.cpp +++ b/storage/connect/tabext.cpp @@ -286,7 +286,7 @@ bool TDBEXT::MakeSrcdef(PGLOBAL g) char *catp = strstr(Srcdef, "%s"); if (catp) { - char *fil1= 0, *fil2; + char *fil1 = 0, *fil2; PCSZ ph = ((EXTDEF*)To_Def)->Phpos; if (!ph) diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp index adbfb2168ae..275b5edaeae 100644 --- a/storage/connect/tabjdbc.cpp +++ b/storage/connect/tabjdbc.cpp @@ -572,7 +572,7 @@ bool TDBJDBC::OpenDB(PGLOBAL g) if (Memory < 3) { // Method will depend on cursor type - if ((Rbuf = Jcp->Rewind(Query->GetStr())) < 0) + if ((Rbuf = Query ? Jcp->Rewind(Query->GetStr()) : 0) < 0) if (Mode != MODE_READX) { Jcp->Close(); return true; diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 6151d924a9f..9e4f5ab987d 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -195,7 +195,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL); if (!(tdp->Database = SetPath(g, db))) - return 0ULL; + return 0; tdp->Objname = GetStringTableOption(g, topt, "Object", NULL); tdp->Base = GetIntegerTableOption(g, topt, "Base", 0) ? 1 : 0; @@ -243,14 +243,14 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) tjsp = new(g) TDBJSON(tdp, new(g) MAPFAM(tdp)); if (tjsp->MakeDocument(g)) - return 0ULL; + return 0; jsp = (tjsp->GetDoc()) ? tjsp->GetDoc()->GetValue(0) : NULL; } else { if (!(tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0))) if (!mgo) { sprintf(g->Message, "LRECL must be specified for pretty=%d", tdp->Pretty); - return 0ULL; + return 0; } else tdp->Lrecl = 8192; // Should be enough @@ -269,14 +269,14 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) tjnp = new(g) TDBJSN(tdp, new(g) CMGFAM(tdp)); #else sprintf(g->Message, "Mongo %s Driver not available", "C"); - return 0ULL; + return 0; #endif } else if (tdp->Driver && toupper(*tdp->Driver) == 'J') { #if defined(JAVA_SUPPORT) tjnp = new(g) TDBJSN(tdp, new(g) JMGFAM(tdp)); #else sprintf(g->Message, "Mongo %s Driver not available", "Java"); - return 0ULL; + return 0; #endif } else { // Driver not specified #if defined(CMGO_SUPPORT) @@ -285,7 +285,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) tjnp = new(g) TDBJSN(tdp, new(g) JMGFAM(tdp)); #else sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); - return 0ULL; + return 0; #endif } // endif Driver @@ -304,7 +304,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) tjnp->SetG(G); if (tjnp->OpenDB(g)) - return 0ULL; + return 0; switch (tjnp->ReadDB(g)) { case RC_EF: diff --git a/storage/federatedx/federatedx_io_mysql.cc b/storage/federatedx/federatedx_io_mysql.cc index a83e54513d3..cc4d8ca7c70 100644 --- a/storage/federatedx/federatedx_io_mysql.cc +++ b/storage/federatedx/federatedx_io_mysql.cc @@ -453,7 +453,8 @@ int federatedx_io_mysql::actual_query(const char *buffer, size_t length) mysql.reconnect= 1; } - error= mysql_real_query(&mysql, buffer, (ulong)length); + if (!(error= mysql_real_query(&mysql, STRING_WITH_LEN("set time_zone='+00:00'")))) + error= mysql_real_query(&mysql, buffer, (ulong)length); DBUG_RETURN(error); } diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc index 8c21906fe84..57cf66f4b18 100644 --- a/storage/federatedx/ha_federatedx.cc +++ b/storage/federatedx/ha_federatedx.cc @@ -318,6 +318,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "sql_servers.h" #include "sql_analyse.h" // append_escaped() #include "sql_show.h" // append_identifier() +#include "tztime.h" // my_tz_find() #ifdef I_AM_PARANOID #define MIN_PORT 1023 @@ -340,6 +341,8 @@ static const uint sizeof_trailing_comma= sizeof(", ") - 1; static const uint sizeof_trailing_and= sizeof(" AND ") - 1; static const uint sizeof_trailing_where= sizeof(" WHERE ") - 1; +static Time_zone *UTC= 0; + /* Static declaration for handerton */ static handler *federatedx_create_handler(handlerton *hton, TABLE_SHARE *table, @@ -860,8 +863,10 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record, Field **field; int column= 0; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + Time_zone *saved_time_zone= table->in_use->variables.time_zone; DBUG_ENTER("ha_federatedx::convert_row_to_internal_format"); + table->in_use->variables.time_zone= UTC; lengths= io->fetch_lengths(result); for (field= table->field; *field; field++, column++) @@ -885,6 +890,7 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record, } (*field)->move_field_offset(-old_ptr); } + table->in_use->variables.time_zone= saved_time_zone; dbug_tmp_restore_column_map(table->write_set, old_map); DBUG_RETURN(0); } @@ -1213,6 +1219,7 @@ bool ha_federatedx::create_where_from_key(String *to, char tmpbuff[FEDERATEDX_QUERY_BUFFER_SIZE]; String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info); const key_range *ranges[2]= { start_key, end_key }; + Time_zone *saved_time_zone= table->in_use->variables.time_zone; my_bitmap_map *old_map; DBUG_ENTER("ha_federatedx::create_where_from_key"); @@ -1220,6 +1227,7 @@ bool ha_federatedx::create_where_from_key(String *to, if (start_key == NULL && end_key == NULL) DBUG_RETURN(1); + table->in_use->variables.time_zone= UTC; old_map= dbug_tmp_use_all_columns(table, table->write_set); for (uint i= 0; i <= 1; i++) { @@ -1397,6 +1405,7 @@ prepare_for_next_key_part: } } dbug_tmp_restore_column_map(table->write_set, old_map); + table->in_use->variables.time_zone= saved_time_zone; if (both_not_null) if (tmp.append(STRING_WITH_LEN(") "))) @@ -1412,6 +1421,7 @@ prepare_for_next_key_part: err: dbug_tmp_restore_column_map(table->write_set, old_map); + table->in_use->variables.time_zone= saved_time_zone; DBUG_RETURN(1); } @@ -1582,6 +1592,12 @@ static FEDERATEDX_SHARE *get_share(const char *table_name, TABLE *table) mysql_mutex_lock(&federatedx_mutex); + if (unlikely(!UTC)) + { + String tz_00_name(STRING_WITH_LEN("+00:00"), &my_charset_bin); + UTC= my_tz_find(current_thd, &tz_00_name); + } + tmp_share.share_key= table_name; tmp_share.share_key_length= (int)strlen(table_name); if (parse_url(&mem_root, &tmp_share, table->s, 0)) @@ -1978,9 +1994,11 @@ int ha_federatedx::write_row(uchar *buf) String insert_field_value_string(insert_field_value_buffer, sizeof(insert_field_value_buffer), &my_charset_bin); + Time_zone *saved_time_zone= table->in_use->variables.time_zone; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); DBUG_ENTER("ha_federatedx::write_row"); + table->in_use->variables.time_zone= UTC; values_string.length(0); insert_field_value_string.length(0); @@ -2033,6 +2051,7 @@ int ha_federatedx::write_row(uchar *buf) } } dbug_tmp_restore_column_map(table->read_set, old_map); + table->in_use->variables.time_zone= saved_time_zone; /* if there were no fields, we don't want to add a closing paren @@ -2340,6 +2359,8 @@ int ha_federatedx::update_row(const uchar *old_data, const uchar *new_data) field=oldvalue */ + Time_zone *saved_time_zone= table->in_use->variables.time_zone; + table->in_use->variables.time_zone= UTC; for (Field **field= table->field; *field; field++) { if (bitmap_is_set(table->write_set, (*field)->field_index)) @@ -2391,6 +2412,7 @@ int ha_federatedx::update_row(const uchar *old_data, const uchar *new_data) where_string.append(STRING_WITH_LEN(" AND ")); } } + table->in_use->variables.time_zone= saved_time_zone; /* Remove last ', '. This works as there must be at least on updated field */ update_string.length(update_string.length() - sizeof_trailing_comma); @@ -2451,6 +2473,8 @@ int ha_federatedx::delete_row(const uchar *buf) share->table_name_length, ident_quote_char); delete_string.append(STRING_WITH_LEN(" WHERE ")); + Time_zone *saved_time_zone= table->in_use->variables.time_zone; + table->in_use->variables.time_zone= UTC; for (Field **field= table->field; *field; field++) { Field *cur_field= *field; @@ -2478,6 +2502,7 @@ int ha_federatedx::delete_row(const uchar *buf) delete_string.append(STRING_WITH_LEN(" AND ")); } } + table->in_use->variables.time_zone= saved_time_zone; // Remove trailing AND delete_string.length(delete_string.length() - sizeof_trailing_and); @@ -3216,7 +3241,10 @@ int ha_federatedx::delete_all_rows() query.length(0); query.set_charset(system_charset_info); - query.append(STRING_WITH_LEN("TRUNCATE ")); + if (thd->lex->sql_command == SQLCOM_TRUNCATE) + query.append(STRING_WITH_LEN("TRUNCATE ")); + else + query.append(STRING_WITH_LEN("DELETE FROM ")); append_ident(&query, share->table_name, share->table_name_length, ident_quote_char); @@ -3583,6 +3611,8 @@ int ha_federatedx::discover_assisted(handlerton *hton, THD* thd, MYSQL mysql; char buf[1024]; String query(buf, sizeof(buf), cs); + static LEX_CSTRING cut_clause={STRING_WITH_LEN(" WITH SYSTEM VERSIONING")}; + int cut_offset; MYSQL_RES *res; MYSQL_ROW rdata; ulong *rlen; @@ -3593,8 +3623,7 @@ int ha_federatedx::discover_assisted(handlerton *hton, THD* thd, mysql_init(&mysql); mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, cs->csname); - mysql_options(&mysql, MYSQL_OPT_USE_THREAD_SPECIFIC_MEMORY, - (char*) &my_true); + mysql_options(&mysql, MYSQL_OPT_USE_THREAD_SPECIFIC_MEMORY, (char*)&my_true); if (!mysql_real_connect(&mysql, tmp_share.hostname, tmp_share.username, tmp_share.password, tmp_share.database, @@ -3618,6 +3647,10 @@ int ha_federatedx::discover_assisted(handlerton *hton, THD* thd, goto err2; query.copy(rdata[1], rlen[1], cs); + cut_offset= (int)query.length() - (int)cut_clause.length; + if (cut_offset > 0 && !memcmp(query.ptr() + cut_offset, + cut_clause.str, cut_clause.length)) + query.length(cut_offset); query.append(STRING_WITH_LEN(" CONNECTION='"), cs); query.append_for_single_quote(table_s->connect_string.str, table_s->connect_string.length); diff --git a/storage/heap/heapdef.h b/storage/heap/heapdef.h index a20fe6836a2..4fcdffb66b1 100644 --- a/storage/heap/heapdef.h +++ b/storage/heap/heapdef.h @@ -96,7 +96,7 @@ extern uint hp_rb_key_length(HP_KEYDEF *keydef, const uchar *key); extern uint hp_rb_null_key_length(HP_KEYDEF *keydef, const uchar *key); extern uint hp_rb_var_key_length(HP_KEYDEF *keydef, const uchar *key); extern my_bool hp_if_null_in_key(HP_KEYDEF *keyinfo, const uchar *record); -extern int hp_close(register HP_INFO *info); +extern int hp_close(HP_INFO *info); extern void hp_clear(HP_SHARE *info); extern void hp_clear_keys(HP_SHARE *info); extern uint hp_rb_pack_key(HP_KEYDEF *keydef, uchar *key, const uchar *old, diff --git a/storage/heap/hp_hash.c b/storage/heap/hp_hash.c index 35a3cd20ee8..847483ba9bf 100644 --- a/storage/heap/hp_hash.c +++ b/storage/heap/hp_hash.c @@ -754,7 +754,6 @@ uint hp_rb_make_key(HP_KEYDEF *keydef, uchar *key, uchar *pos= (uchar*) rec + seg->start; DBUG_ASSERT(seg->type != HA_KEYTYPE_BIT); -#ifdef HAVE_ISNAN if (seg->type == HA_KEYTYPE_FLOAT) { float nr; @@ -778,7 +777,6 @@ uint hp_rb_make_key(HP_KEYDEF *keydef, uchar *key, continue; } } -#endif pos+= length; while (length--) { diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt index e588fae47e5..e0bc7006770 100644 --- a/storage/innobase/CMakeLists.txt +++ b/storage/innobase/CMakeLists.txt @@ -153,7 +153,7 @@ MYSQL_ADD_PLUGIN(innobase ${INNOBASE_SOURCES} STORAGE_ENGINE DEFAULT RECOMPILE_FOR_EMBEDDED LINK_LIBRARIES ${ZLIB_LIBRARY} - ${CRC32_VPMSUM_LIBRARY} + ${CRC32_LIBRARY} ${NUMA_LIBRARY} ${LIBSYSTEMD} ${LINKER_SCRIPT}) @@ -179,6 +179,7 @@ IF(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") mtr/mtr0mtr.cc row/row0merge.cc row/row0mysql.cc + row/row0trunc.cc srv/srv0srv.cc COMPILE_FLAGS "-O0" ) diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index 07cec1844a6..31e9a4cad1c 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -207,7 +207,7 @@ btr_root_fseg_validate( ut_a(mach_read_from_4(seg_header + FSEG_HDR_SPACE) == space); ut_a(offset >= FIL_PAGE_DATA); - ut_a(offset <= UNIV_PAGE_SIZE - FIL_PAGE_DATA_END); + ut_a(offset <= srv_page_size - FIL_PAGE_DATA_END); return(TRUE); } #endif /* UNIV_BTR_DEBUG */ @@ -336,7 +336,7 @@ btr_root_fseg_adjust_on_import( ulint offset = mach_read_from_2(seg_header + FSEG_HDR_OFFSET); if (offset < FIL_PAGE_DATA - || offset > UNIV_PAGE_SIZE - FIL_PAGE_DATA_END) { + || offset > srv_page_size - FIL_PAGE_DATA_END) { return(FALSE); @@ -435,7 +435,7 @@ btr_page_create( { page_t* page = buf_block_get_frame(block); - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); if (page_zip) { page_create_zip(block, index, level, 0, NULL, mtr); @@ -712,7 +712,7 @@ btr_page_free_for_ibuf( { page_t* root; - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); root = btr_root_get(index, mtr); flst_add_first(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, @@ -738,7 +738,7 @@ btr_page_free_low( fseg_header_t* seg_header; page_t* root; - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); /* The page gets invalid for optimistic searches: increment the frame modify clock */ @@ -755,7 +755,7 @@ btr_page_free_low( // TODO(jonaso): scrub only what is actually needed page_t* page = buf_block_get_frame(block); memset(page + PAGE_HEADER, 0, - UNIV_PAGE_SIZE - PAGE_HEADER); + srv_page_size - PAGE_HEADER); #ifdef UNIV_DEBUG_SCRUBBING fprintf(stderr, "btr_page_free_low: scrub blob page %lu/%lu\n", @@ -853,7 +853,7 @@ btr_page_free_low( /* The page was marked free in the allocation bitmap, but it should remain buffer-fixed until mtr_commit(mtr) or until it is explicitly freed from the mini-transaction. */ - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); /* TODO: Discard any operations on the page from the redo log and remove the block from the flush list and the buffer pool. This would free up buffer pool earlier and reduce writes to @@ -1257,7 +1257,7 @@ btr_create( /* Not enough space for new segment, free root segment before return. */ btr_free_root(block, mtr); - if (!dict_table_is_temporary(index->table)) { + if (!index->table->is_temporary()) { btr_free_root_invalidate(block, mtr); } @@ -1332,7 +1332,7 @@ btr_create( Note: Insert Buffering is disabled for temporary tables given that most temporary tables are smaller in size and short-lived. */ if (!(type & DICT_CLUSTERED) - && (index == NULL || !dict_table_is_temporary(index->table))) { + && (index == NULL || !index->table->is_temporary())) { ibuf_reset_free_bits(block); } @@ -1496,7 +1496,7 @@ ib_uint64_t btr_read_autoinc_with_fallback(const dict_table_t* table, unsigned col_no) { ut_ad(table->persistent_autoinc); - ut_ad(!dict_table_is_temporary(table)); + ut_ad(!table->is_temporary()); dict_index_t* index = dict_table_get_first_index(table); @@ -1599,7 +1599,7 @@ btr_page_reorganize_low( bool log_compressed; bool is_spatial; - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); btr_assert_not_corrupted(block, index); #ifdef UNIV_ZIP_DEBUG ut_a(!page_zip || page_zip_validate(page_zip, page, index)); @@ -1654,7 +1654,7 @@ btr_page_reorganize_low( During redo log apply, dict_index_is_sec_or_ibuf() always holds, even for clustered indexes. */ - ut_ad(recovery || dict_table_is_temporary(index->table) + ut_ad(recovery || index->table->is_temporary() || !page_is_leaf(temp_page) || !dict_index_is_sec_or_ibuf(index) || page_get_max_trx_id(page) != 0); @@ -1684,18 +1684,18 @@ btr_page_reorganize_low( ut_a(!memcmp(PAGE_HEADER + PAGE_N_RECS + page, PAGE_HEADER + PAGE_N_RECS + temp_page, PAGE_DATA - (PAGE_HEADER + PAGE_N_RECS))); - ut_a(!memcmp(UNIV_PAGE_SIZE - FIL_PAGE_DATA_END + page, - UNIV_PAGE_SIZE - FIL_PAGE_DATA_END + temp_page, + ut_a(!memcmp(srv_page_size - FIL_PAGE_DATA_END + page, + srv_page_size - FIL_PAGE_DATA_END + temp_page, FIL_PAGE_DATA_END)); #endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ memcpy(PAGE_HEADER + page, PAGE_HEADER + temp_page, PAGE_N_RECS - PAGE_N_DIR_SLOTS); memcpy(PAGE_DATA + page, PAGE_DATA + temp_page, - UNIV_PAGE_SIZE - PAGE_DATA - FIL_PAGE_DATA_END); + srv_page_size - PAGE_DATA - FIL_PAGE_DATA_END); #if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG - ut_a(!memcmp(page, temp_page, UNIV_PAGE_SIZE)); + ut_a(!memcmp(page, temp_page, srv_page_size)); #endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ goto func_exit; @@ -1905,7 +1905,7 @@ btr_page_empty( { page_t* page = buf_block_get_frame(block); - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); ut_ad(page_zip == buf_block_get_page_zip(block)); #ifdef UNIV_ZIP_DEBUG ut_a(!page_zip || page_zip_validate(page_zip, page, index)); @@ -1995,8 +1995,7 @@ btr_root_raise_and_insert( ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK)); - ut_ad(mtr_is_block_fix( - mtr, root_block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, root_block, MTR_MEMO_PAGE_X_FIX)); /* Allocate a new page to the tree. Root splitting is done by first moving the root records to the new page, emptying the root, putting @@ -2103,7 +2102,7 @@ btr_root_raise_and_insert( rtr_page_cal_mbr(index, new_block, &new_mbr, *heap); node_ptr = rtr_index_build_node_ptr( - index, &new_mbr, rec, new_page_no, *heap, level); + index, &new_mbr, rec, new_page_no, *heap); } else { node_ptr = dict_index_build_node_ptr( index, rec, new_page_no, *heap, level); @@ -2153,7 +2152,7 @@ btr_root_raise_and_insert( /* We play safe and reset the free bits for the new page */ if (!dict_index_is_clust(index) - && !dict_table_is_temporary(index->table)) { + && !index->table->is_temporary()) { ibuf_reset_free_bits(new_block); } @@ -2325,7 +2324,7 @@ btr_page_get_split_rec( /* free_space is now the free space of a created new page */ total_data = page_get_data_size(page) + insert_size; - total_n_recs = page_get_n_recs(page) + 1; + total_n_recs = ulint(page_get_n_recs(page)) + 1; ut_ad(total_n_recs >= 2); total_space = total_data + page_dir_calc_reserved_space(total_n_recs); @@ -2436,7 +2435,7 @@ btr_page_insert_fits( /* free_space is now the free space of a created new page */ total_data = page_get_data_size(page) + insert_size; - total_n_recs = page_get_n_recs(page) + 1; + total_n_recs = ulint(page_get_n_recs(page)) + 1; /* We determine which records (from rec to end_rec, not including end_rec) will end up on the other half page from tuple when it is @@ -2608,9 +2607,8 @@ btr_attach_half_pages( buf_block_t* prev_block = NULL; buf_block_t* next_block = NULL; - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); - ut_ad(mtr_is_block_fix( - mtr, new_block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains(mtr, new_block, MTR_MEMO_PAGE_X_FIX)); /* Create a memory heap where the data tuple is stored */ heap = mem_heap_create(1024); @@ -2794,8 +2792,7 @@ btr_insert_into_right_sibling( ut_ad(mtr_memo_contains_flagged( mtr, dict_index_get_lock(cursor->index), MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK)); - ut_ad(mtr_is_block_fix( - mtr, block, MTR_MEMO_PAGE_X_FIX, cursor->index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); ut_ad(heap); if (next_page_no == FIL_NULL || !page_rec_is_supremum( @@ -2842,7 +2839,7 @@ btr_insert_into_right_sibling( if (is_leaf && next_block->page.size.is_compressed() && !dict_index_is_clust(cursor->index) - && !dict_table_is_temporary(cursor->index->table)) { + && !cursor->index->table->is_temporary()) { /* Reset the IBUF_BITMAP_FREE bits, because page_cur_tuple_insert() will have attempted page reorganize before failing. */ @@ -2884,7 +2881,7 @@ btr_insert_into_right_sibling( if (is_leaf && !dict_index_is_clust(cursor->index) - && !dict_table_is_temporary(cursor->index->table)) { + && !cursor->index->table->is_temporary()) { /* Update the free bits of the B-tree page in the insert buffer bitmap. */ @@ -2977,8 +2974,7 @@ func_start: page = buf_block_get_frame(block); page_zip = buf_block_get_page_zip(block); - ut_ad(mtr_is_block_fix( - mtr, block, MTR_MEMO_PAGE_X_FIX, cursor->index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); ut_ad(!page_is_empty(page)); /* try to insert to the next page if possible before split */ @@ -3297,7 +3293,7 @@ insert_empty: insert_failed: /* We play safe and reset the free bits for new_page */ if (!dict_index_is_clust(cursor->index) - && !dict_table_is_temporary(cursor->index->table)) { + && !cursor->index->table->is_temporary()) { ibuf_reset_free_bits(new_block); ibuf_reset_free_bits(block); } @@ -3315,7 +3311,7 @@ func_exit: left and right pages in the same mtr */ if (!dict_index_is_clust(cursor->index) - && !dict_table_is_temporary(cursor->index->table) + && !cursor->index->table->is_temporary() && page_is_leaf(page)) { ibuf_update_free_bits_for_two_pages_low( @@ -3350,7 +3346,7 @@ btr_level_list_remove_func( { ut_ad(page != NULL); ut_ad(mtr != NULL); - ut_ad(mtr_is_page_fix(mtr, page, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX)); ut_ad(space == page_get_space_id(page)); /* Get the previous and next page numbers of page */ @@ -3484,7 +3480,7 @@ btr_node_ptr_delete( ibool compressed; dberr_t err; - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); /* Delete node pointer on father page */ btr_page_get_father(index, block, mtr, &cursor); @@ -3526,7 +3522,7 @@ btr_lift_page_up( buf_block_t* block_orig = block; ut_ad(!page_has_siblings(page)); - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); page_level = btr_page_get_level(page); root_page_no = dict_index_get_page(index); @@ -3536,7 +3532,8 @@ btr_lift_page_up( ulint* offsets = NULL; mem_heap_t* heap = mem_heap_create( sizeof(*offsets) - * (REC_OFFS_HEADER_SIZE + 1 + 1 + index->n_fields)); + * (REC_OFFS_HEADER_SIZE + 1 + 1 + + unsigned(index->n_fields))); buf_block_t* b; if (dict_index_is_spatial(index)) { @@ -3593,8 +3590,8 @@ btr_lift_page_up( page_level = btr_page_get_level(page); ut_ad(!page_has_siblings(page)); - ut_ad(mtr_is_block_fix( - mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains( + mtr, block, MTR_MEMO_PAGE_X_FIX)); father_block = blocks[0]; father_page_zip = buf_block_get_page_zip(father_block); @@ -3687,7 +3684,7 @@ btr_lift_page_up( /* We play it safe and reset the free bits for the father */ if (!dict_index_is_clust(index) - && !dict_table_is_temporary(index->table)) { + && !index->table->is_temporary()) { ibuf_reset_free_bits(father_block); } ut_ad(page_validate(father_page, index)); @@ -3753,7 +3750,7 @@ btr_compress( } #endif /* UNIV_DEBUG */ - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); const page_size_t page_size(index->table->space->flags); @@ -3898,8 +3895,7 @@ retry: /* Check if parent entry needs to be updated */ mbr_changed = rtr_merge_mbr_changed( &cursor2, &father_cursor, - offsets2, offsets, &new_mbr, - merge_block, block, index); + offsets2, offsets, &new_mbr); } rec_t* orig_pred = page_copy_rec_list_start( @@ -3944,8 +3940,7 @@ retry: merge_page, &new_mbr, NULL, mtr); #endif } else { - rtr_node_ptr_delete( - index, &father_cursor, block, mtr); + rtr_node_ptr_delete(&father_cursor, mtr); } /* No GAP lock needs to be worrying about */ @@ -4007,9 +4002,7 @@ retry: #ifdef UNIV_BTR_DEBUG memcpy(fil_page_prev, merge_page + FIL_PAGE_PREV, 4); #endif /* UNIV_BTR_DEBUG */ -#if FIL_NULL != 0xffffffff -# error "FIL_NULL != 0xffffffff" -#endif + compile_time_assert(FIL_NULL == 0xffffffffU); memset(merge_page + FIL_PAGE_PREV, 0xff, 4); } @@ -4093,9 +4086,7 @@ retry: rtr_merge_and_update_mbr(&father_cursor, &cursor2, offsets, offsets2, - merge_page, - merge_block, - block, index, mtr); + merge_page, mtr); } else { /* Otherwise, we will keep the node ptr of merge page and delete the father node ptr. @@ -4104,9 +4095,7 @@ retry: rtr_merge_and_update_mbr(&cursor2, &father_cursor, offsets2, offsets, - merge_page, - merge_block, - block, index, mtr); + merge_page, mtr); } lock_mutex_enter(); lock_prdt_page_free_from_discard( @@ -4135,7 +4124,7 @@ retry: } if (!dict_index_is_clust(index) - && !dict_table_is_temporary(index->table) + && !index->table->is_temporary() && page_is_leaf(merge_page)) { /* Update the free bits of the B-tree page in the insert buffer bitmap. This has to be done in a @@ -4172,7 +4161,7 @@ retry: write the bits accurately in a separate mini-transaction. */ ibuf_update_free_bits_if_full(merge_block, - UNIV_PAGE_SIZE, + srv_page_size, ULINT_UNDEFINED); } } @@ -4259,8 +4248,7 @@ btr_discard_only_page_on_level( ut_a(page_level == btr_page_get_level(page)); ut_a(!page_has_siblings(page)); - ut_ad(mtr_is_block_fix( - mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); btr_search_drop_page_hash_index(block); if (dict_index_is_spatial(index)) { @@ -4351,7 +4339,7 @@ btr_discard_page( ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK)); - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); MONITOR_INC(MONITOR_INDEX_DISCARD); @@ -4431,7 +4419,7 @@ btr_discard_page( node ptr, so, we need to get father node ptr first and then delete it. */ rtr_page_get_father(index, block, mtr, cursor, &father_cursor); - rtr_node_ptr_delete(index, &father_cursor, block, mtr); + rtr_node_ptr_delete(&father_cursor, mtr); } else { btr_node_ptr_delete(index, block, mtr); } @@ -4533,7 +4521,7 @@ btr_print_recursive( ulint i = 0; mtr_t mtr2; - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_SX_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_SX_FIX)); ib::info() << "NODE ON LEVEL " << btr_page_get_level(page) << " page " << block->page.id; @@ -4627,7 +4615,7 @@ btr_check_node_ptr( btr_cur_t cursor; page_t* page = buf_block_get_frame(block); - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); if (dict_index_get_page(index) == block->page.id.page_no()) { diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc index 87316ec7638..9486f37f2f4 100644 --- a/storage/innobase/btr/btr0bulk.cc +++ b/storage/innobase/btr/btr0bulk.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -31,7 +31,7 @@ Created 03/11/2014 Shaohua Wang #include "ibuf0ibuf.h" /** Innodb B-tree index fill factor for bulk load. */ -long innobase_fill_factor; +uint innobase_fill_factor; /** Initialize members, allocate page if needed and start mtr. Note: we commit all mtrs on failure. @@ -120,7 +120,7 @@ PageBulk::init() } if (dict_index_is_sec_or_ibuf(m_index) - && !dict_table_is_temporary(m_index->table) + && !m_index->table->is_temporary() && page_is_leaf(new_page)) { page_update_max_trx_id(new_block, NULL, m_trx_id, mtr); } @@ -140,16 +140,16 @@ PageBulk::init() m_reserved_space = dict_index_get_space_reserve(); } else { m_reserved_space = - UNIV_PAGE_SIZE * (100 - innobase_fill_factor) / 100; + srv_page_size * (100 - innobase_fill_factor) / 100; } m_padding_space = - UNIV_PAGE_SIZE - dict_index_zip_pad_optimal_page_size(m_index); + srv_page_size - dict_index_zip_pad_optimal_page_size(m_index); m_heap_top = page_header_get_ptr(new_page, PAGE_HEAP_TOP); m_rec_no = page_header_get_field(new_page, PAGE_N_RECS); ut_d(m_total_data = 0); - page_header_set_field(m_page, NULL, PAGE_HEAP_TOP, UNIV_PAGE_SIZE - 1); + page_header_set_field(m_page, NULL, PAGE_HEAP_TOP, srv_page_size - 1); return(DB_SUCCESS); } @@ -212,7 +212,7 @@ PageBulk::insert( - page_dir_calc_reserved_space(m_rec_no); ut_ad(m_free_space >= rec_size + slot_size); - ut_ad(m_heap_top + rec_size < m_page + UNIV_PAGE_SIZE); + ut_ad(m_heap_top + rec_size < m_page + srv_page_size); m_free_space -= rec_size + slot_size; m_heap_top += rec_size; @@ -234,7 +234,7 @@ PageBulk::finish() /* To pass the debug tests we have to set these dummy values in the debug version */ - page_dir_set_n_slots(m_page, NULL, UNIV_PAGE_SIZE / 2); + page_dir_set_n_slots(m_page, NULL, srv_page_size / 2); #endif ulint count = 0; @@ -309,7 +309,7 @@ PageBulk::commit( /* Set no free space left and no buffered changes in ibuf. */ if (!dict_index_is_clust(m_index) - && !dict_table_is_temporary(m_index->table) + && !m_index->table->is_temporary() && page_is_leaf(m_page)) { ibuf_set_bitmap_for_bulk_load( m_block, innobase_fill_factor == 100); @@ -462,15 +462,14 @@ PageBulk::copyOut( page_rec_is_leaf(split_rec), ULINT_UNDEFINED, &m_heap); - m_free_space += rec_get_end(last_rec, offsets) - - m_heap_top + m_free_space += ulint(rec_get_end(last_rec, offsets) - m_heap_top) + page_dir_calc_reserved_space(m_rec_no) - page_dir_calc_reserved_space(n); - ut_ad(m_free_space > 0); + ut_ad(lint(m_free_space) > 0); m_rec_no = n; #ifdef UNIV_DEBUG - m_total_data -= rec_get_end(last_rec, offsets) - m_heap_top; + m_total_data -= ulint(rec_get_end(last_rec, offsets) - m_heap_top); #endif /* UNIV_DEBUG */ } @@ -726,7 +725,7 @@ BtrBulk::pageCommit( void BtrBulk::logFreeCheck() { - if (log_sys->check_flush_or_checkpoint) { + if (log_sys.check_flush_or_checkpoint) { release(); log_free_check(); @@ -918,7 +917,7 @@ BtrBulk::finish(dberr_t err) { ulint last_page_no = FIL_NULL; - ut_ad(!dict_table_is_temporary(m_index->table)); + ut_ad(!m_index->table->is_temporary()); if (m_page_bulks->size() == 0) { /* The table is empty. The root page of the index tree diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 982a80edcea..11dc5b43059 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -75,18 +75,14 @@ enum btr_op_t { BTR_DELMARK_OP /*!< Mark a record for deletion */ }; -/** Modification types for the B-tree operation. */ +/** Modification types for the B-tree operation. + Note that the order must be DELETE, BOTH, INSERT !! + */ enum btr_intention_t { BTR_INTENTION_DELETE, BTR_INTENTION_BOTH, BTR_INTENTION_INSERT }; -#if BTR_INTENTION_DELETE > BTR_INTENTION_BOTH -#error "BTR_INTENTION_DELETE > BTR_INTENTION_BOTH" -#endif -#if BTR_INTENTION_BOTH > BTR_INTENTION_INSERT -#error "BTR_INTENTION_BOTH > BTR_INTENTION_INSERT" -#endif /** For the index->lock scalability improvement, only possibility of clear performance regression observed was caused by grown huge history list length. @@ -122,7 +118,7 @@ uint btr_cur_limit_optimistic_insert_debug; /** In the optimistic insert, if the insert does not fit, but this much space can be released by page reorganize, then it is reorganized */ -#define BTR_CUR_PAGE_REORGANIZE_LIMIT (UNIV_PAGE_SIZE / 32) +#define BTR_CUR_PAGE_REORGANIZE_LIMIT (srv_page_size / 32) /** The structure of a BLOB part header */ /* @{ */ @@ -209,16 +205,6 @@ btr_rec_free_externally_stored_fields( /*==================== B-TREE SEARCH =========================*/ -#if MTR_MEMO_PAGE_S_FIX != RW_S_LATCH -#error "MTR_MEMO_PAGE_S_FIX != RW_S_LATCH" -#endif -#if MTR_MEMO_PAGE_X_FIX != RW_X_LATCH -#error "MTR_MEMO_PAGE_X_FIX != RW_X_LATCH" -#endif -#if MTR_MEMO_PAGE_SX_FIX != RW_SX_LATCH -#error "MTR_MEMO_PAGE_SX_FIX != RW_SX_LATCH" -#endif - /** Latches the leaf page or pages requested. @param[in] block leaf page where the search converged @param[in] page_id page id of the leaf @@ -243,6 +229,10 @@ btr_cur_latch_leaves( bool spatial; btr_latch_leaves_t latch_leaves = {{NULL, NULL, NULL}, {0, 0, 0}}; + compile_time_assert(int(MTR_MEMO_PAGE_S_FIX) == int(RW_S_LATCH)); + compile_time_assert(int(MTR_MEMO_PAGE_X_FIX) == int(RW_X_LATCH)); + compile_time_assert(int(MTR_MEMO_PAGE_SX_FIX) == int(RW_SX_LATCH)); + spatial = dict_index_is_spatial(cursor->index) && cursor->rtr_info; ut_ad(buf_page_in_file(&block->page)); @@ -574,7 +564,8 @@ btr_cur_instant_root_init(dict_index_t* index, const page_t* page) index root pages of ROW_FORMAT=COMPACT or ROW_FORMAT=DYNAMIC when instant ADD COLUMN is not used. */ ut_ad(!page_is_comp(page) || !page_get_instant(page)); - index->n_core_null_bytes = UT_BITS_IN_BYTES(index->n_nullable); + index->n_core_null_bytes = UT_BITS_IN_BYTES( + unsigned(index->n_nullable)); return false; case FIL_PAGE_TYPE_INSTANT: break; @@ -593,7 +584,7 @@ btr_cur_instant_root_init(dict_index_t* index, const page_t* page) ut_ad(!index->is_dummy); ut_d(index->is_dummy = true); index->n_core_null_bytes = n == index->n_fields - ? UT_BITS_IN_BYTES(index->n_nullable) + ? UT_BITS_IN_BYTES(unsigned(index->n_nullable)) : UT_BITS_IN_BYTES(index->get_n_nullable(n)); ut_d(index->is_dummy = false); return false; @@ -720,7 +711,7 @@ btr_cur_get_and_clear_intention( /* both or unknown */ intention = BTR_INTENTION_BOTH; } - *latch_mode &= ~(BTR_LATCH_FOR_INSERT | BTR_LATCH_FOR_DELETE); + *latch_mode &= ulint(~(BTR_LATCH_FOR_INSERT | BTR_LATCH_FOR_DELETE)); return(intention); } @@ -867,7 +858,7 @@ btr_cur_will_modify_tree( page_size.physical()) < rec_size * 2 + page_get_data_size(page) + page_dir_calc_reserved_space( - page_get_n_recs(page) + 2) + 1) { + ulint(page_get_n_recs(page)) + 2) + 1) { return(true); } } @@ -960,7 +951,7 @@ btr_cur_search_to_nth_level_func( page_cur_mode_t search_mode = PAGE_CUR_UNSUPP; ulint buf_mode; ulint estimate; - ulint node_ptr_max_size = UNIV_PAGE_SIZE / 2; + ulint node_ptr_max_size = srv_page_size / 2; page_cur_t* page_cursor; btr_op_t btr_op; ulint root_height = 0; /* remove warning */ @@ -1061,7 +1052,7 @@ btr_cur_search_to_nth_level_func( /* Operations on the clustered index cannot be buffered. */ ut_ad(btr_op == BTR_NO_OP || !dict_index_is_clust(index)); /* Operations on the temporary table(indexes) cannot be buffered. */ - ut_ad(btr_op == BTR_NO_OP || !dict_table_is_temporary(index->table)); + ut_ad(btr_op == BTR_NO_OP || !index->table->is_temporary()); /* Operation on the spatial index cannot be buffered. */ ut_ad(btr_op == BTR_NO_OP || !dict_index_is_spatial(index)); @@ -1514,7 +1505,7 @@ retry_page_get: root_height = height; cursor->tree_height = root_height + 1; - if (UNIV_UNLIKELY(dict_index_is_spatial(index))) { + if (dict_index_is_spatial(index)) { ut_ad(cursor->rtr_info); node_seq_t seq_no = rtr_get_current_ssn_id(index); @@ -1727,7 +1718,7 @@ retry_page_get: } lock_prdt_lock(block, &prdt, index, LOCK_S, - LOCK_PREDICATE, cursor->thr, mtr); + LOCK_PREDICATE, cursor->thr); if (rw_latch == RW_NO_LATCH && height != 0) { rw_lock_s_unlock(&(block->lock)); @@ -2245,7 +2236,7 @@ btr_cur_open_at_index_side_func( mtr_t* mtr) /*!< in/out: mini-transaction */ { page_cur_t* page_cursor; - ulint node_ptr_max_size = UNIV_PAGE_SIZE / 2; + ulint node_ptr_max_size = srv_page_size / 2; ulint height; ulint root_height = 0; /* remove warning */ rec_t* node_ptr; @@ -2265,14 +2256,14 @@ btr_cur_open_at_index_side_func( rec_offs_init(offsets_); estimate = latch_mode & BTR_ESTIMATE; - latch_mode &= ~BTR_ESTIMATE; + latch_mode &= ulint(~BTR_ESTIMATE); ut_ad(level != ULINT_UNDEFINED); bool s_latch_by_caller; s_latch_by_caller = latch_mode & BTR_ALREADY_S_LATCHED; - latch_mode &= ~BTR_ALREADY_S_LATCHED; + latch_mode &= ulint(~BTR_ALREADY_S_LATCHED); lock_intention = btr_cur_get_and_clear_intention(&latch_mode); @@ -2616,7 +2607,7 @@ btr_cur_open_at_rnd_pos_func( mtr_t* mtr) /*!< in: mtr */ { page_cur_t* page_cursor; - ulint node_ptr_max_size = UNIV_PAGE_SIZE / 2; + ulint node_ptr_max_size = srv_page_size / 2; ulint height; rec_t* node_ptr; ulint savepoint; @@ -2928,9 +2919,8 @@ btr_cur_insert_if_possible( ut_ad(dtuple_check_typed(tuple)); - ut_ad(mtr_is_block_fix( - mtr, btr_cur_get_block(cursor), - MTR_MEMO_PAGE_X_FIX, cursor->index->table)); + ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor), + MTR_MEMO_PAGE_X_FIX)); page_cursor = btr_cur_get_page_cur(cursor); /* Now, try the insert */ @@ -3120,7 +3110,7 @@ btr_cur_optimistic_insert( page = buf_block_get_frame(block); index = cursor->index; - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); ut_ad(!dict_index_is_online_ddl(index) || dict_index_is_clust(index) || (flags & BTR_CREATE_FLAG)); @@ -3273,7 +3263,7 @@ fail_err: if (*rec) { } else if (page_size.is_compressed()) { - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); /* Reset the IBUF_BITMAP_FREE bits, because page_cur_tuple_insert() will have attempted page reorganize before failing. */ @@ -3334,7 +3324,7 @@ fail_err: if (leaf && !dict_index_is_clust(index) - && !dict_table_is_temporary(index->table)) { + && !index->table->is_temporary()) { /* Update the free bits of the B-tree page in the insert buffer bitmap. */ @@ -3412,9 +3402,8 @@ btr_cur_pessimistic_insert( ut_ad(mtr_memo_contains_flagged( mtr, dict_index_get_lock(btr_cur_get_index(cursor)), MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK)); - ut_ad(mtr_is_block_fix( - mtr, btr_cur_get_block(cursor), - MTR_MEMO_PAGE_X_FIX, cursor->index->table)); + ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor), + MTR_MEMO_PAGE_X_FIX)); ut_ad(!dict_index_is_online_ddl(index) || dict_index_is_clust(index) || (flags & BTR_CREATE_FLAG)); @@ -3488,7 +3477,7 @@ btr_cur_pessimistic_insert( || dict_index_is_spatial(index)); if (!(flags & BTR_NO_LOCKING_FLAG)) { - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); if (dict_index_is_spatial(index)) { /* Do nothing */ } else { @@ -3524,7 +3513,7 @@ btr_cur_pessimistic_insert( if (entry->info_bits & REC_INFO_MIN_REC_FLAG) { ut_ad(entry->info_bits == REC_INFO_DEFAULT_ROW); ut_ad(index->is_instant()); - ut_ad((flags & ~BTR_KEEP_IBUF_BITMAP) + ut_ad((flags & ulint(~BTR_KEEP_IBUF_BITMAP)) == BTR_NO_LOCKING_FLAG); } else { btr_search_update_hash_on_insert( @@ -3710,7 +3699,7 @@ btr_cur_parse_update_in_place( rec_offset = mach_read_from_2(ptr); ptr += 2; - ut_a(rec_offset <= UNIV_PAGE_SIZE); + ut_a(rec_offset <= srv_page_size); heap = mem_heap_create(256); @@ -3830,7 +3819,7 @@ out_of_space: /* Out of space: reset the free bits. */ if (!dict_index_is_clust(index) - && !dict_table_is_temporary(index->table) + && !index->table->is_temporary() && page_is_leaf(page)) { ibuf_reset_free_bits(page_cur_get_block(cursor)); } @@ -3899,7 +3888,7 @@ btr_cur_update_in_place( /* Check that enough space is available on the compressed page. */ if (page_zip) { - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); if (!btr_cur_update_alloc_zip( page_zip, btr_cur_get_page_cur(cursor), @@ -3993,7 +3982,7 @@ func_exit: && !dict_index_is_clust(index) && page_is_leaf(buf_block_get_frame(block))) { /* Update the free bits in the insert buffer. */ - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); ibuf_update_free_bits_zip(block, mtr); } @@ -4108,7 +4097,7 @@ btr_cur_optimistic_update( index = cursor->index; ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG)); ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table)); - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); /* This is intended only for leaf page updates */ ut_ad(page_is_leaf(page)); /* The insert buffer tree should never be updated in place. */ @@ -4197,7 +4186,7 @@ any_extern: #endif /* UNIV_ZIP_DEBUG */ if (page_zip) { - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); if (page_zip_rec_needs_ext(new_rec_size, page_is_comp(page), dict_index_get_n_fields(index), @@ -4337,9 +4326,9 @@ func_exit: && !dict_index_is_clust(index)) { /* Update the free bits in the insert buffer. */ if (page_zip) { - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); ibuf_update_free_bits_zip(block, mtr); - } else if (!dict_table_is_temporary(index->table)) { + } else if (!index->table->is_temporary()) { ibuf_update_free_bits_low(block, max_ins_size, mtr); } } @@ -4459,11 +4448,11 @@ btr_cur_pessimistic_update( ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK)); - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); #ifdef UNIV_ZIP_DEBUG ut_a(!page_zip || page_zip_validate(page_zip, page, index)); #endif /* UNIV_ZIP_DEBUG */ - ut_ad(!page_zip || !dict_table_is_temporary(index->table)); + ut_ad(!page_zip || !index->table->is_temporary()); /* The insert buffer tree should never be updated in place. */ ut_ad(!dict_index_is_ibuf(index)); ut_ad(trx_id > 0 @@ -4495,7 +4484,7 @@ btr_cur_pessimistic_update( && optim_err != DB_ZIP_OVERFLOW && !dict_index_is_clust(index) && page_is_leaf(page)) { - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); ibuf_update_free_bits_zip(block, mtr); } @@ -4696,9 +4685,9 @@ btr_cur_pessimistic_update( This is the same block which was skipped by BTR_KEEP_IBUF_BITMAP. */ if (page_zip) { - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); ibuf_update_free_bits_zip(block, mtr); - } else if (!dict_table_is_temporary(index->table)) { + } else if (!index->table->is_temporary()) { ibuf_update_free_bits_low(block, max_ins_size, mtr); } @@ -4730,7 +4719,7 @@ btr_cur_pessimistic_update( This is the same block which was skipped by BTR_KEEP_IBUF_BITMAP. */ if (!dict_index_is_clust(index) - && !dict_table_is_temporary(index->table) + && !index->table->is_temporary() && page_is_leaf(page)) { ibuf_reset_free_bits(block); } @@ -4782,7 +4771,7 @@ btr_cur_pessimistic_update( max_trx_id is ignored for temp tables because it not required for MVCC. */ if (dict_index_is_sec_or_ibuf(index) - && !dict_table_is_temporary(index->table)) { + && !index->table->is_temporary()) { /* Update PAGE_MAX_TRX_ID in the index page header. It was not updated by btr_cur_pessimistic_insert() because of BTR_NO_LOCKING_FLAG. */ @@ -4938,7 +4927,7 @@ btr_cur_parse_del_mark_set_clust_rec( offset = mach_read_from_2(ptr); ptr += 2; - ut_a(offset <= UNIV_PAGE_SIZE); + ut_a(offset <= srv_page_size); /* In delete-marked records, DB_TRX_ID must always refer to an existing undo log record. */ @@ -5132,7 +5121,7 @@ btr_cur_parse_del_mark_set_sec_rec( offset = mach_read_from_2(ptr); ptr += 2; - ut_a(offset <= UNIV_PAGE_SIZE); + ut_a(offset <= srv_page_size); if (page) { rec = page + offset; @@ -5243,9 +5232,8 @@ btr_cur_compress_if_useful( ut_ad(mtr_memo_contains_flagged( mtr, dict_index_get_lock(btr_cur_get_index(cursor)), MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK)); - ut_ad(mtr_is_block_fix( - mtr, btr_cur_get_block(cursor), - MTR_MEMO_PAGE_X_FIX, cursor->index->table)); + ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor), + MTR_MEMO_PAGE_X_FIX)); if (dict_index_is_spatial(cursor->index)) { const page_t* page = btr_cur_get_page(cursor); @@ -5297,8 +5285,8 @@ btr_cur_optimistic_delete_func( ut_ad(flags == 0 || flags == BTR_CREATE_FLAG); ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor), MTR_MEMO_PAGE_X_FIX)); - ut_ad(mtr_is_block_fix(mtr, btr_cur_get_block(cursor), - MTR_MEMO_PAGE_X_FIX, cursor->index->table)); + ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor), + MTR_MEMO_PAGE_X_FIX)); ut_ad(mtr->is_named_space(cursor->index->table->space)); /* This is intended only for leaf page deletions */ @@ -5416,7 +5404,7 @@ btr_cur_optimistic_delete_func( into non-leaf pages, into clustered indexes, or into the change buffer. */ if (!dict_index_is_clust(cursor->index) - && !dict_table_is_temporary(cursor->index->table) + && !cursor->index->table->is_temporary() && !dict_index_is_ibuf(cursor->index)) { ibuf_update_free_bits_low(block, max_ins, mtr); } @@ -5489,7 +5477,7 @@ btr_cur_pessimistic_delete( ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK)); - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); ut_ad(mtr->is_named_space(index->table->space)); if (!has_reserved_extents) { @@ -5759,31 +5747,28 @@ the number of pages between slot1->page and slot2->page (which is n_rows_on_prev_level). In this case we set is_n_rows_exact to FALSE. @return number of rows, not including the borders (exact or estimated) */ static -int64_t +ha_rows btr_estimate_n_rows_in_range_on_level( /*==================================*/ dict_index_t* index, /*!< in: index */ btr_path_t* slot1, /*!< in: left border */ btr_path_t* slot2, /*!< in: right border */ - int64_t n_rows_on_prev_level, /*!< in: number of rows + ha_rows n_rows_on_prev_level, /*!< in: number of rows on the previous level for the same descend paths; used to determine the number of pages on this level */ - ibool* is_n_rows_exact) /*!< out: TRUE if the returned + bool* is_n_rows_exact) /*!< out: TRUE if the returned value is exact i.e. not an estimation */ { - int64_t n_rows; - ulint n_pages_read; + ha_rows n_rows = 0; + uint n_pages_read = 0; ulint level; - n_rows = 0; - n_pages_read = 0; - /* Assume by default that we will scan all pages between slot1->page_no and slot2->page_no. */ - *is_n_rows_exact = TRUE; + *is_n_rows_exact = true; /* Add records from slot1->page_no which are to the right of the record which serves as a left border of the range, if any @@ -5899,7 +5884,7 @@ btr_estimate_n_rows_in_range_on_level( inexact: - *is_n_rows_exact = FALSE; + *is_n_rows_exact = false; /* We did interrupt before reaching slot2->page */ @@ -5907,8 +5892,7 @@ inexact: /* The number of pages on this level is n_rows_on_prev_level, multiply it by the average number of recs per page so far */ - n_rows = n_rows_on_prev_level - * n_rows / n_pages_read; + n_rows = n_rows_on_prev_level * n_rows / n_pages_read; } else { /* The tree changed before we could even start with slot1->page_no */ @@ -5927,7 +5911,7 @@ static const unsigned rows_in_range_max_retries = 4; /** We pretend that a range has that many records if the tree keeps changing for rows_in_range_max_retries retries while we try to estimate the records in a given range. */ -static const int64_t rows_in_range_arbitrary_ret_val = 10; +static const ha_rows rows_in_range_arbitrary_ret_val = 10; /** Estimates the number of rows in a given index range. @param[in] index index @@ -5944,7 +5928,7 @@ rows_in_range_arbitrary_ret_val as a result (if nth_attempt >= rows_in_range_max_retries and the tree is modified between the two dives). */ static -int64_t +ha_rows btr_estimate_n_rows_in_range_low( dict_index_t* index, const dtuple_t* tuple1, @@ -5958,14 +5942,14 @@ btr_estimate_n_rows_in_range_low( btr_cur_t cursor; btr_path_t* slot1; btr_path_t* slot2; - ibool diverged; - ibool diverged_lot; + bool diverged; + bool diverged_lot; ulint divergence_level; - int64_t n_rows; - ibool is_n_rows_exact; + ha_rows n_rows; + bool is_n_rows_exact; ulint i; mtr_t mtr; - int64_t table_n_rows; + ha_rows table_n_rows; table_n_rows = dict_table_get_n_rows(index->table); @@ -6101,16 +6085,16 @@ btr_estimate_n_rows_in_range_low( /* We have the path information for the range in path1 and path2 */ n_rows = 0; - is_n_rows_exact = TRUE; + is_n_rows_exact = true; /* This becomes true when the two paths do not pass through the same pages anymore. */ - diverged = FALSE; + diverged = false; /* This becomes true when the paths are not the same or adjacent any more. This means that they pass through the same or neighboring-on-the-same-level pages only. */ - diverged_lot = FALSE; + diverged_lot = false; /* This is the level where paths diverged a lot. */ divergence_level = 1000000; @@ -6233,15 +6217,12 @@ btr_estimate_n_rows_in_range_low( return(rows_in_range_arbitrary_ret_val); } - const int64_t ret = - btr_estimate_n_rows_in_range_low( - index, tuple1, mode1, - tuple2, mode2, nth_attempt + 1); - - return(ret); + return btr_estimate_n_rows_in_range_low( + index, tuple1, mode1, + tuple2, mode2, nth_attempt + 1); } - diverged = TRUE; + diverged = true; if (slot1->nth_rec < slot2->nth_rec) { /* We do not count the borders (nor the left @@ -6254,7 +6235,7 @@ btr_estimate_n_rows_in_range_low( and slot2, so on the level below the slots will point to non-adjacent pages. */ - diverged_lot = TRUE; + diverged_lot = true; divergence_level = i; } } else { @@ -6276,7 +6257,7 @@ btr_estimate_n_rows_in_range_low( if (slot1->nth_rec < slot1->n_recs || slot2->nth_rec > 1) { - diverged_lot = TRUE; + diverged_lot = true; divergence_level = i; n_rows = 0; @@ -6306,7 +6287,7 @@ btr_estimate_n_rows_in_range_low( @param[in] tuple2 range end, may also be empty tuple @param[in] mode2 search mode for range end @return estimated number of rows */ -int64_t +ha_rows btr_estimate_n_rows_in_range( dict_index_t* index, const dtuple_t* tuple1, @@ -6314,10 +6295,8 @@ btr_estimate_n_rows_in_range( const dtuple_t* tuple2, page_cur_mode_t mode2) { - const int64_t ret = btr_estimate_n_rows_in_range_low( - index, tuple1, mode1, tuple2, mode2, 1 /* first attempt */); - - return(ret); + return btr_estimate_n_rows_in_range_low( + index, tuple1, mode1, tuple2, mode2, 1); } /*******************************************************************//** @@ -6710,12 +6689,12 @@ btr_rec_get_externally_stored_len( btr_rec_get_field_ref(rec, offsets, i) + BTR_EXTERN_LEN + 4); - total_extern_len += ut_calc_align(extern_len, - UNIV_PAGE_SIZE); + total_extern_len += ut_calc_align( + extern_len, ulint(srv_page_size)); } } - return(total_extern_len / UNIV_PAGE_SIZE); + return total_extern_len >> srv_page_size_shift; } /*******************************************************************//** @@ -6893,10 +6872,10 @@ btr_push_update_extern_fields( uf->orig_len); /* Copy the locally stored prefix. */ memcpy(buf, data, - uf->orig_len + unsigned(uf->orig_len) - BTR_EXTERN_FIELD_REF_SIZE); /* Copy the BLOB pointer. */ - memcpy(buf + uf->orig_len + memcpy(buf + unsigned(uf->orig_len) - BTR_EXTERN_FIELD_REF_SIZE, data + len - BTR_EXTERN_FIELD_REF_SIZE, BTR_EXTERN_FIELD_REF_SIZE); @@ -6940,7 +6919,6 @@ static void btr_blob_free( /*==========*/ - dict_index_t* index, /*!< in: index */ buf_block_t* block, /*!< in: buffer block */ ibool all, /*!< in: TRUE=remove also the compressed page if there is one */ @@ -6950,7 +6928,7 @@ btr_blob_free( ulint space = block->page.id.space(); ulint page_no = block->page.id.page_no(); - ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); mtr_commit(mtr); @@ -7139,8 +7117,7 @@ btr_store_big_rec_extern_fields( ut_ad(rec_offs_any_extern(offsets)); ut_ad(mtr_memo_contains_flagged(btr_mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK)); - ut_ad(mtr_is_block_fix( - btr_mtr, rec_block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains(btr_mtr, rec_block, MTR_MEMO_PAGE_X_FIX)); ut_ad(buf_block_get_frame(rec_block) == page_align(rec)); ut_a(dict_index_is_clust(index)); @@ -7166,7 +7143,7 @@ btr_store_big_rec_extern_fields( heap = mem_heap_create(250000); page_zip_set_alloc(&c_stream, heap); - err = deflateInit2(&c_stream, page_zip_level, + err = deflateInit2(&c_stream, int(page_zip_level), Z_DEFLATED, 15, 7, Z_DEFAULT_STRATEGY); ut_a(err == Z_OK); } @@ -7454,7 +7431,7 @@ next_zip_page: /* Commit mtr and release the uncompressed page frame to save memory. */ - btr_blob_free(index, block, FALSE, &mtr); + btr_blob_free(block, FALSE, &mtr); if (err == Z_STREAM_END) { break; @@ -7636,11 +7613,11 @@ btr_free_externally_stored_field( ulint next_page_no; mtr_t mtr; - ut_ad(dict_index_is_clust(index)); + ut_ad(index->is_primary()); ut_ad(mtr_memo_contains_flagged(local_mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK)); - ut_ad(mtr_is_page_fix( - local_mtr, field_ref, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains_page(local_mtr, field_ref, + MTR_MEMO_PAGE_X_FIX)); ut_ad(!rec || rec_offs_validate(rec, index, offsets)); ut_ad(!rec || field_ref == btr_rec_get_field_ref(rec, offsets, i)); ut_ad(local_mtr->is_named_space( @@ -7679,7 +7656,7 @@ btr_free_externally_stored_field( mtr.set_spaces(*local_mtr); mtr.set_log_mode(local_mtr->get_log_mode()); - ut_ad(!dict_table_is_temporary(index->table) + ut_ad(!index->table->is_temporary() || local_mtr->get_log_mode() == MTR_LOG_NO_REDO); const page_t* p = page_align(field_ref); @@ -7782,7 +7759,7 @@ btr_free_externally_stored_field( } /* Commit mtr and release the BLOB block to save memory. */ - btr_blob_free(index, ext_block, TRUE, &mtr); + btr_blob_free(ext_block, TRUE, &mtr); } } @@ -7807,8 +7784,8 @@ btr_rec_free_externally_stored_fields( ulint i; ut_ad(rec_offs_validate(rec, index, offsets)); - ut_ad(mtr_is_page_fix(mtr, rec, MTR_MEMO_PAGE_X_FIX, index->table)); - ut_ad(dict_index_is_clust(index)); + ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)); + ut_ad(index->is_primary()); ut_ad(page_rec_is_leaf(rec)); /* Free possible externally stored fields in the record */ @@ -7846,7 +7823,7 @@ btr_rec_free_updated_extern_fields( ulint i; ut_ad(rec_offs_validate(rec, index, offsets)); - ut_ad(mtr_is_page_fix(mtr, rec, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)); /* Free possible externally stored fields in the record */ diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc index e56df37107a..678668a7809 100644 --- a/storage/innobase/btr/btr0defragment.cc +++ b/storage/innobase/btr/btr0defragment.cc @@ -456,7 +456,7 @@ btr_defragment_merge_pages( // Estimate how many records can be moved from the from_page to // the to_page. if (page_size.is_compressed()) { - ulint page_diff = UNIV_PAGE_SIZE - *max_data_size; + ulint page_diff = srv_page_size - *max_data_size; max_ins_size_to_use = (max_ins_size_to_use > page_diff) ? max_ins_size_to_use - page_diff : 0; } @@ -529,7 +529,7 @@ btr_defragment_merge_pages( } else { ibuf_update_free_bits_if_full( to_block, - UNIV_PAGE_SIZE, + srv_page_size, ULINT_UNDEFINED); } } @@ -669,7 +669,7 @@ btr_defragment_n_pages( // For compressed pages, we take compression failures into account. if (page_size.is_compressed()) { ulint size = 0; - int i = 0; + uint i = 0; // We estimate the optimal data size of the index use samples of // data size. These samples are taken when pages failed to // compress due to insertion on the page. We use the average @@ -683,7 +683,7 @@ btr_defragment_n_pages( size += index->stat_defrag_data_size_sample[i]; } if (i != 0) { - size = size / i; + size /= i; optimal_page_size = ut_min(optimal_page_size, size); } max_data_size = optimal_page_size; diff --git a/storage/innobase/btr/btr0pcur.cc b/storage/innobase/btr/btr0pcur.cc index c29f96b1125..46f563a7379 100644 --- a/storage/innobase/btr/btr0pcur.cc +++ b/storage/innobase/btr/btr0pcur.cc @@ -127,6 +127,8 @@ btr_pcur_store_position( mtr, dict_index_get_lock(index), MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK))); + cursor->old_stored = true; + if (page_is_empty(page)) { /* It must be an empty index tree; NOTE that in this case we do not store the modify_clock, but always do a search @@ -136,10 +138,7 @@ btr_pcur_store_position( ut_ad(page_is_leaf(page)); ut_ad(page_get_page_no(page) == index->page); - cursor->old_stored = true; - if (page_rec_is_supremum_low(offs)) { - cursor->rel_pos = BTR_PCUR_AFTER_LAST_IN_TREE; } else { cursor->rel_pos = BTR_PCUR_BEFORE_FIRST_IN_TREE; @@ -149,21 +148,25 @@ btr_pcur_store_position( } if (page_rec_is_supremum_low(offs)) { - rec = page_rec_get_prev(rec); + ut_ad(!page_rec_is_infimum(rec)); + ut_ad(!rec_is_default_row(rec, index)); + cursor->rel_pos = BTR_PCUR_AFTER; - } else if (page_rec_is_infimum_low(offs)) { - rec = page_rec_get_next(rec); + if (rec_is_default_row(rec, index)) { + rec = page_rec_get_next(rec); + ut_ad(!page_rec_is_supremum(rec)); + } + cursor->rel_pos = BTR_PCUR_BEFORE; } else { cursor->rel_pos = BTR_PCUR_ON; } - cursor->old_stored = true; cursor->old_rec = dict_index_copy_rec_order_prefix( index, rec, &cursor->old_n_fields, &cursor->old_rec_buf, &cursor->buf_size); @@ -490,7 +493,7 @@ btr_pcur_move_backward_from_page( ut_ad(cursor->latch_mode != BTR_NO_LATCHES); ut_ad(btr_pcur_is_before_first_on_page(cursor)); - ut_ad(!btr_pcur_is_before_first_in_tree(cursor, mtr)); + ut_ad(!btr_pcur_is_before_first_in_tree(cursor)); latch_mode = cursor->latch_mode; @@ -562,7 +565,7 @@ btr_pcur_move_to_prev( if (btr_pcur_is_before_first_on_page(cursor)) { - if (btr_pcur_is_before_first_in_tree(cursor, mtr)) { + if (btr_pcur_is_before_first_in_tree(cursor)) { return(FALSE); } diff --git a/storage/innobase/btr/btr0scrub.cc b/storage/innobase/btr/btr0scrub.cc index df3435828be..22e997b60aa 100644 --- a/storage/innobase/btr/btr0scrub.cc +++ b/storage/innobase/btr/btr0scrub.cc @@ -133,7 +133,7 @@ btr_scrub_lock_dict_func(ulint space_id, bool lock_to_close_table, if (lock_to_close_table) { } else if (fil_space_t* space = fil_space_acquire(space_id)) { bool stopping = space->is_stopping(); - fil_space_release(space); + space->release(); if (stopping) { return false; } @@ -209,7 +209,7 @@ btr_scrub_table_close_for_thread( btr_scrub_table_close(scrub_data->current_table); mutex_exit(&dict_sys->mutex); } - fil_space_release(space); + space->release(); } scrub_data->current_table = NULL; @@ -668,7 +668,7 @@ btr_scrub_free_page( * it will be found by scrubbing thread again */ memset(buf_block_get_frame(block) + PAGE_HEADER, 0, - UNIV_PAGE_SIZE - PAGE_HEADER); + srv_page_size - PAGE_HEADER); mach_write_to_2(buf_block_get_frame(block) + FIL_PAGE_TYPE, FIL_PAGE_TYPE_ALLOCATED); diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc index 53272c80834..038e262cfad 100644 --- a/storage/innobase/btr/btr0sea.cc +++ b/storage/innobase/btr/btr0sea.cc @@ -224,8 +224,7 @@ btr_search_check_free_space_in_heap(const dict_index_t* index) /** Creates and initializes the adaptive search system at a database start. @param[in] hash_size hash table size. */ -void -btr_search_sys_create(ulint hash_size) +void btr_search_sys_create(ulint hash_size) { /* Search System is divided into n parts. Each part controls access to distinct set of hash buckets from @@ -266,8 +265,7 @@ btr_search_sys_create(ulint hash_size) /** Resize hash index hash table. @param[in] hash_size hash index hash table size */ -void -btr_search_sys_resize(ulint hash_size) +void btr_search_sys_resize(ulint hash_size) { /* Step-1: Lock all search latches in exclusive mode. */ btr_search_x_lock_all(); @@ -303,10 +301,14 @@ btr_search_sys_resize(ulint hash_size) } /** Frees the adaptive search system at a database shutdown. */ -void -btr_search_sys_free() +void btr_search_sys_free() { - ut_ad(btr_search_sys != NULL && btr_search_latches != NULL); + if (!btr_search_sys) { + ut_ad(!btr_search_latches); + return; + } + + ut_ad(btr_search_latches); /* Step-1: Release the hash tables. */ for (ulint i = 0; i < btr_ahi_parts; ++i) { @@ -351,9 +353,7 @@ btr_search_disable_ref_count( /** Disable the adaptive hash search system and empty the index. @param[in] need_mutex need to acquire dict_sys->mutex */ -void -btr_search_disable( - bool need_mutex) +void btr_search_disable(bool need_mutex) { dict_table_t* table; @@ -406,8 +406,7 @@ btr_search_disable( } /** Enable the adaptive hash search system. */ -void -btr_search_enable() +void btr_search_enable() { buf_pool_mutex_enter_all(); if (srv_buf_pool_old_size != srv_buf_pool_size) { @@ -432,7 +431,7 @@ btr_search_info_get_ref_count( { ulint ret = 0; - if (!btr_search_enabled || !index->table->space) { + if (!btr_search_enabled) { return(ret); } @@ -566,14 +565,10 @@ block->n_hash_helps, n_fields, n_bytes, left_side are NOT protected by any semaphore, to save CPU time! Do not assume the fields are consistent. @return TRUE if building a (new) hash index on the block is recommended @param[in,out] info search info -@param[in,out] block buffer block -@param[in] cursor cursor */ +@param[in,out] block buffer block */ static bool -btr_search_update_block_hash_info( - btr_search_t* info, - buf_block_t* block, - const btr_cur_t* cursor) +btr_search_update_block_hash_info(btr_search_t* info, buf_block_t* block) { ut_ad(!btr_search_own_any(RW_LOCK_S)); ut_ad(!btr_search_own_any(RW_LOCK_X)); @@ -1080,8 +1075,7 @@ fail: block->buf_fix_count == 0 or it is an index page which has already been removed from the buf_pool->page_hash i.e.: it is in state BUF_BLOCK_REMOVE_HASH */ -void -btr_search_drop_page_hash_index(buf_block_t* block) +void btr_search_drop_page_hash_index(buf_block_t* block) { ulint n_fields; ulint n_bytes; @@ -1151,7 +1145,7 @@ retry: #endif ut_ad(btr_search_enabled); - ut_ad(block->page.id.space() == index->table->space->id); + ut_ad(block->page.id.space() == index->table->space_id); ut_a(index_id == index->id); ut_a(!dict_index_is_ibuf(index)); #ifdef UNIV_DEBUG @@ -1277,9 +1271,8 @@ cleanup: ut_free(folds); } -/** Drop any adaptive hash index entries that may point to an index -page that may be in the buffer pool, when a page is evicted from the -buffer pool or freed in a file segment. +/** Drop possible adaptive hash index entries when a page is evicted +from the buffer pool or freed in a file, or the index is being dropped. @param[in] page_id page id */ void btr_search_drop_page_hash_when_freed(const page_id_t& page_id) { @@ -1406,6 +1399,13 @@ btr_search_build_page_hash_index( return; } + rec = page_rec_get_next_const(page_get_infimum_rec(page)); + + if (rec_is_default_row(rec, index)) { + rec = page_rec_get_next_const(rec); + if (!--n_recs) return; + } + /* Calculate and cache fold values and corresponding records into an array for fast insertion to the hash index */ @@ -1417,12 +1417,6 @@ btr_search_build_page_hash_index( ut_a(index->id == btr_page_get_index_id(page)); - rec = page_rec_get_next_const(page_get_infimum_rec(page)); - - if (rec_is_default_row(rec, index)) { - rec = page_rec_get_next_const(rec); - } - offsets = rec_get_offsets( rec, index, offsets, true, btr_search_get_n_fields(n_fields, n_bytes), @@ -1549,8 +1543,7 @@ btr_search_info_update_slow(btr_search_t* info, btr_cur_t* cursor) btr_search_info_update_hash(info, cursor); - bool build_index = btr_search_update_block_hash_info( - info, block, cursor); + bool build_index = btr_search_update_block_hash_info(info, block); if (build_index || (cursor->flag == BTR_CUR_HASH_FAIL)) { @@ -1651,8 +1644,7 @@ btr_search_move_or_delete_hash_entries( /** Updates the page hash index when a single record is deleted from a page. @param[in] cursor cursor which was positioned on the record to delete using btr_cur_search_, the record is not yet deleted.*/ -void -btr_search_update_hash_on_delete(btr_cur_t* cursor) +void btr_search_update_hash_on_delete(btr_cur_t* cursor) { hash_table_t* table; buf_block_t* block; @@ -2063,7 +2055,7 @@ btr_search_hash_table_validate(ulint hash_table_id) ut_a(!dict_index_is_ibuf(block->index)); ut_ad(block->page.id.space() - == block->index->table->space->id); + == block->index->table->space_id); page_index_id = btr_page_get_index_id(block->frame); diff --git a/storage/innobase/buf/buf0buddy.cc b/storage/innobase/buf/buf0buddy.cc index a1457956767..27a45654966 100644 --- a/storage/innobase/buf/buf0buddy.cc +++ b/storage/innobase/buf/buf0buddy.cc @@ -73,10 +73,6 @@ list. This value is stamped at BUF_BUDDY_STAMP_OFFSET offset */ value by the consumer of the block */ #define BUF_BUDDY_STAMP_NONFREE 0XFFFFFFFFUL -#if BUF_BUDDY_STAMP_FREE >= BUF_BUDDY_STAMP_NONFREE -# error "BUF_BUDDY_STAMP_FREE >= BUF_BUDDY_STAMP_NONFREE" -#endif - /** Return type of buf_buddy_is_free() */ enum buf_buddy_state_t { BUF_BUDDY_STATE_FREE, /*!< If the buddy to completely free */ @@ -114,6 +110,7 @@ buf_buddy_stamp_is_free( /*====================*/ const buf_buddy_free_t* buf) /*!< in: block to check */ { + compile_time_assert(BUF_BUDDY_STAMP_FREE < BUF_BUDDY_STAMP_NONFREE); return(mach_read_from_4(buf->stamp.bytes + BUF_BUDDY_STAMP_OFFSET) == BUF_BUDDY_STAMP_FREE); } @@ -138,13 +135,12 @@ buf_buddy_stamp_free( Stamps a buddy nonfree. @param[in,out] buf block to stamp @param[in] i block size */ -#define buf_buddy_stamp_nonfree(buf, i) do { \ - buf_buddy_mem_invalid(buf, i); \ - memset(buf->stamp.bytes + BUF_BUDDY_STAMP_OFFSET, 0xff, 4); \ -} while (0) -#if BUF_BUDDY_STAMP_NONFREE != 0xffffffff -# error "BUF_BUDDY_STAMP_NONFREE != 0xffffffff" -#endif +static inline void buf_buddy_stamp_nonfree(buf_buddy_free_t* buf, ulint i) +{ + buf_buddy_mem_invalid(buf, i); + compile_time_assert(BUF_BUDDY_STAMP_NONFREE == 0xffffffffU); + memset(buf->stamp.bytes + BUF_BUDDY_STAMP_OFFSET, 0xff, 4); +} /**********************************************************************//** Get the offset of the buddy of a compressed page frame. @@ -160,7 +156,7 @@ buf_buddy_get( ut_ad(size >= BUF_BUDDY_LOW); ut_ad(BUF_BUDDY_LOW <= UNIV_ZIP_SIZE_MIN); ut_ad(size < BUF_BUDDY_HIGH); - ut_ad(BUF_BUDDY_HIGH == UNIV_PAGE_SIZE); + ut_ad(BUF_BUDDY_HIGH == srv_page_size); ut_ad(!ut_align_offset(page, size)); if (((ulint) page) & size) { @@ -376,7 +372,7 @@ buf_buddy_alloc_zip( } /**********************************************************************//** -Deallocate a buffer frame of UNIV_PAGE_SIZE. */ +Deallocate a buffer frame of srv_page_size. */ static void buf_buddy_block_free( @@ -390,7 +386,7 @@ buf_buddy_block_free( ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(!mutex_own(&buf_pool->zip_mutex)); - ut_a(!ut_align_offset(buf, UNIV_PAGE_SIZE)); + ut_a(!ut_align_offset(buf, srv_page_size)); HASH_SEARCH(hash, buf_pool->zip_hash, fold, buf_page_t*, bpage, ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_MEMORY @@ -403,8 +399,8 @@ buf_buddy_block_free( ut_d(bpage->in_zip_hash = FALSE); HASH_DELETE(buf_page_t, hash, buf_pool->zip_hash, fold, bpage); - ut_d(memset(buf, 0, UNIV_PAGE_SIZE)); - UNIV_MEM_INVALID(buf, UNIV_PAGE_SIZE); + ut_d(memset(buf, 0, srv_page_size)); + UNIV_MEM_INVALID(buf, srv_page_size); block = (buf_block_t*) bpage; buf_page_mutex_enter(block); @@ -432,7 +428,7 @@ buf_buddy_block_register( buf_block_set_state(block, BUF_BLOCK_MEMORY); ut_a(block->frame); - ut_a(!ut_align_offset(block->frame, UNIV_PAGE_SIZE)); + ut_a(!ut_align_offset(block->frame, srv_page_size)); ut_ad(!block->page.in_page_hash); ut_ad(!block->page.in_zip_hash); @@ -489,8 +485,8 @@ buf_buddy_alloc_low( buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */ ulint i, /*!< in: index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */ - ibool* lru) /*!< in: pointer to a variable that - will be assigned TRUE if storage was + bool* lru) /*!< in: pointer to a variable that + will be assigned true if storage was allocated from the LRU list and buf_pool->mutex was temporarily released */ @@ -522,7 +518,7 @@ buf_buddy_alloc_low( /* Try replacing an uncompressed page in the buffer pool. */ buf_pool_mutex_exit(buf_pool); block = buf_LRU_get_free_block(buf_pool); - *lru = TRUE; + *lru = true; buf_pool_mutex_enter(buf_pool); alloc_big: @@ -765,7 +761,7 @@ func_exit: @param[in] buf_pool buffer pool instance @param[in] buf block to be reallocated, must be pointed to by the buffer pool -@param[in] size block size, up to UNIV_PAGE_SIZE +@param[in] size block size, up to srv_page_size @retval false if failed because of no free blocks. */ bool buf_buddy_realloc( diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 03edb023bdc..064a0eff506 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2013, 2018, MariaDB Corporation. @@ -288,8 +288,8 @@ reachable via buf_pool->chunks[]. The chains of free memory blocks (buf_pool->zip_free[]) are used by the buddy allocator (buf0buddy.cc) to keep track of currently unused -memory blocks of size sizeof(buf_page_t)..UNIV_PAGE_SIZE / 2. These -blocks are inside the UNIV_PAGE_SIZE-sized memory blocks of type +memory blocks of size sizeof(buf_page_t)..srv_page_size / 2. These +blocks are inside the srv_page_size-sized memory blocks of type BUF_BLOCK_MEMORY that the buddy allocator requests from the buffer pool. The buddy allocator is solely used for allocating control blocks for compressed pages (buf_page_t) and compressed page frames. @@ -542,7 +542,8 @@ buf_get_total_list_size_in_bytes( for statistics purpose */ buf_pools_list_size->LRU_bytes += buf_pool->stat.LRU_bytes; buf_pools_list_size->unzip_LRU_bytes += - UT_LIST_GET_LEN(buf_pool->unzip_LRU) * UNIV_PAGE_SIZE; + UT_LIST_GET_LEN(buf_pool->unzip_LRU) + << srv_page_size_shift; buf_pools_list_size->flush_list_bytes += buf_pool->stat.flush_list_bytes; } @@ -854,7 +855,7 @@ buf_page_is_corrupted( ib::info() << "Log sequence number at the start " << mach_read_from_4(read_buf + FIL_PAGE_LSN + 4) << " and the end " - << mach_read_from_4(read_buf + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM + 4) + << mach_read_from_4(read_buf + srv_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM + 4) << " do not match"; #endif /* UNIV_INNOCHECKSUM */ return(true); @@ -909,9 +910,7 @@ buf_page_is_corrupted( checksum_field2 = mach_read_from_4( read_buf + page_size.logical() - FIL_PAGE_END_LSN_OLD_CHKSUM); -#if FIL_PAGE_LSN % 8 -#error "FIL_PAGE_LSN must be 64 bit aligned" -#endif + compile_time_assert(!(FIL_PAGE_LSN % 8)); /* declare empty pages non-corrupted */ if (checksum_field1 == 0 @@ -1188,15 +1187,14 @@ buf_madvise_do_dump() { int ret= 0; buf_pool_t* buf_pool; - ulint n; buf_chunk_t* chunk; - /* mirrors allocation in log_sys_init() */ - if (log_sys->buf) - { - ret+= madvise(log_sys->first_in_use ? log_sys->buf - : log_sys->buf - log_sys->buf_size, - log_sys->buf_size, + /* mirrors allocation in log_t::create() */ + if (log_sys.buf) { + ret+= madvise(log_sys.first_in_use + ? log_sys.buf + : log_sys.buf - srv_log_buffer_size, + srv_log_buffer_size * 2, MADV_DODUMP); } /* mirrors recv_sys_init() */ @@ -1207,7 +1205,7 @@ buf_madvise_do_dump() buf_pool_mutex_enter_all(); - for (int i= 0; i < srv_buf_pool_instances; i++) + for (ulong i= 0; i < srv_buf_pool_instances; i++) { buf_pool = buf_pool_from_array(i); chunk = buf_pool->chunks; @@ -1462,7 +1460,7 @@ buf_block_init( buf_block_t* block, /*!< in: pointer to control block */ byte* frame) /*!< in: pointer to buffer frame */ { - UNIV_MEM_DESC(frame, UNIV_PAGE_SIZE); + UNIV_MEM_DESC(frame, srv_page_size); /* This function should only be executed at database startup or by buf_pool_resize(). Either way, adaptive hash index must not exist. */ @@ -1545,10 +1543,12 @@ buf_chunk_init( /* Round down to a multiple of page size, although it already should be. */ - mem_size = ut_2pow_round(mem_size, UNIV_PAGE_SIZE); + mem_size = ut_2pow_round(mem_size, ulint(srv_page_size)); /* Reserve space for the block descriptors. */ - mem_size += ut_2pow_round((mem_size / UNIV_PAGE_SIZE) * (sizeof *block) - + (UNIV_PAGE_SIZE - 1), UNIV_PAGE_SIZE); + mem_size += ut_2pow_round((mem_size >> srv_page_size_shift) + * (sizeof *block) + + (srv_page_size - 1), + ulint(srv_page_size)); DBUG_EXECUTE_IF("ib_buf_chunk_init_fails", return(NULL);); @@ -1582,12 +1582,12 @@ buf_chunk_init( chunk->blocks = (buf_block_t*) chunk->mem; /* Align a pointer to the first frame. Note that when - os_large_page_size is smaller than UNIV_PAGE_SIZE, + os_large_page_size is smaller than srv_page_size, we may allocate one fewer block than requested. When it is bigger, we may allocate more blocks than requested. */ - frame = (byte*) ut_align(chunk->mem, UNIV_PAGE_SIZE); - chunk->size = chunk->mem_pfx.m_size / UNIV_PAGE_SIZE + frame = (byte*) ut_align(chunk->mem, srv_page_size); + chunk->size = (chunk->mem_pfx.m_size >> srv_page_size_shift) - (frame != chunk->mem); /* Subtract the space needed for block descriptors. */ @@ -1595,7 +1595,7 @@ buf_chunk_init( ulint size = chunk->size; while (frame < (byte*) (chunk->blocks + size)) { - frame += UNIV_PAGE_SIZE; + frame += srv_page_size; size--; } @@ -1611,7 +1611,7 @@ buf_chunk_init( for (i = chunk->size; i--; ) { buf_block_init(buf_pool, block, frame); - UNIV_MEM_INVALID(block->frame, UNIV_PAGE_SIZE); + UNIV_MEM_INVALID(block->frame, srv_page_size); /* Add the block to the free list */ UT_LIST_ADD_LAST(buf_pool->free, &block->page); @@ -1620,7 +1620,7 @@ buf_chunk_init( ut_ad(buf_pool_from_block(block) == buf_pool); block++; - frame += UNIV_PAGE_SIZE; + frame += srv_page_size; } buf_pool_register_chunk(chunk); @@ -1864,7 +1864,8 @@ buf_pool_init_instance( ut_min(BUF_READ_AHEAD_PAGES, ut_2_power_up(buf_pool->curr_size / BUF_READ_AHEAD_PORTION)); - buf_pool->curr_pool_size = buf_pool->curr_size * UNIV_PAGE_SIZE; + buf_pool->curr_pool_size = buf_pool->curr_size + << srv_page_size_shift; buf_pool->old_size = buf_pool->curr_size; buf_pool->n_chunks_new = buf_pool->n_chunks; @@ -2128,7 +2129,7 @@ buf_page_realloc( if (buf_page_can_relocate(&block->page)) { mutex_enter(&new_block->mutex); - memcpy(new_block->frame, block->frame, UNIV_PAGE_SIZE); + memcpy(new_block->frame, block->frame, srv_page_size); memcpy(&new_block->page, &block->page, sizeof block->page); /* relocate LRU list */ @@ -2192,7 +2193,7 @@ buf_page_realloc( buf_block_modify_clock_inc(block); memset(block->frame + FIL_PAGE_OFFSET, 0xff, 4); memset(block->frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, 0xff, 4); - UNIV_MEM_INVALID(block->frame, UNIV_PAGE_SIZE); + UNIV_MEM_INVALID(block->frame, srv_page_size); buf_block_set_state(block, BUF_BLOCK_REMOVE_HASH); block->page.id.reset(); @@ -2315,7 +2316,7 @@ buf_frame_will_withdrawn( while (chunk < echunk) { if (ptr >= chunk->blocks->frame && ptr < (chunk->blocks + chunk->size - 1)->frame - + UNIV_PAGE_SIZE) { + + srv_page_size) { return(true); } ++chunk; @@ -2653,7 +2654,7 @@ buf_pool_resize() ut_ad(srv_buf_pool_chunk_unit > 0); new_instance_size = srv_buf_pool_size / srv_buf_pool_instances; - new_instance_size /= UNIV_PAGE_SIZE; + new_instance_size >>= srv_page_size_shift; buf_resize_status("Resizing buffer pool from " ULINTPF " to " ULINTPF " (unit=" ULINTPF ").", @@ -2672,7 +2673,8 @@ buf_pool_resize() buf_pool->curr_size = new_instance_size; - buf_pool->n_chunks_new = new_instance_size * UNIV_PAGE_SIZE + buf_pool->n_chunks_new = + (new_instance_size << srv_page_size_shift) / srv_buf_pool_chunk_unit; buf_pool_mutex_exit(buf_pool); @@ -2911,6 +2913,9 @@ withdraw_retry: = buf_pool->n_chunks; warning = true; buf_pool->chunks_old = NULL; + for (ulint j = 0; j < buf_pool->n_chunks_new; j++) { + buf_pool_register_chunk(&(buf_pool->chunks[j])); + } goto calc_buf_pool_size; } @@ -3005,7 +3010,7 @@ calc_buf_pool_size: ut_2_power_up(buf_pool->curr_size / BUF_READ_AHEAD_PORTION)); buf_pool->curr_pool_size - = buf_pool->curr_size * UNIV_PAGE_SIZE; + = buf_pool->curr_size << srv_page_size_shift; curr_size += buf_pool->curr_pool_size; buf_pool->old_size = buf_pool->curr_size; } @@ -3057,7 +3062,8 @@ calc_buf_pool_size: buf_resize_status("Resizing also other hash tables."); /* normalize lock_sys */ - srv_lock_table_size = 5 * (srv_buf_pool_size / UNIV_PAGE_SIZE); + srv_lock_table_size = 5 + * (srv_buf_pool_size >> srv_page_size_shift); lock_sys.resize(srv_lock_table_size); /* normalize btr_search_sys */ @@ -3970,7 +3976,7 @@ buf_zip_decompress( if (page_zip_decompress(&block->page.zip, block->frame, TRUE)) { if (space) { - fil_space_release_for_io(space); + space->release_for_io(); } return(TRUE); } @@ -3989,7 +3995,7 @@ buf_zip_decompress( /* Copy to uncompressed storage. */ memcpy(block->frame, frame, block->page.size.physical()); if (space) { - fil_space_release_for_io(space); + space->release_for_io(); } return(TRUE); @@ -4014,7 +4020,7 @@ err_exit: dict_set_corrupted_by_space(space); } - fil_space_release_for_io(space); + space->release_for_io(); } return(FALSE); @@ -4045,16 +4051,16 @@ buf_block_from_ahi(const byte* ptr) chunk = (--it)->second; } - ulint offs = ptr - chunk->blocks->frame; + ulint offs = ulint(ptr - chunk->blocks->frame); - offs >>= UNIV_PAGE_SIZE_SHIFT; + offs >>= srv_page_size_shift; ut_a(offs < chunk->size); buf_block_t* block = &chunk->blocks[offs]; /* The function buf_chunk_init() invokes buf_block_init() so that - block[n].frame == block->frame + n * UNIV_PAGE_SIZE. Check it. */ + block[n].frame == block->frame + n * srv_page_size. Check it. */ ut_ad(block->frame == page_align(ptr)); /* Read the state of the block without holding a mutex. A state transition from BUF_BLOCK_FILE_PAGE to @@ -4387,6 +4393,11 @@ loop: ibuf_inside(mtr)); retries = 0; + } else if (mode == BUF_GET_POSSIBLY_FREED) { + if (err) { + *err = local_err; + } + return NULL; } else if (retries < BUF_PAGE_READ_MAX_RETRIES) { ++retries; @@ -4418,7 +4429,7 @@ loop: = fil_space_acquire_for_io( page_id.space())) { bool set = dict_set_corrupted_by_space(space); - fil_space_release_for_io(space); + space->release_for_io(); if (set) { return NULL; } @@ -5253,7 +5264,7 @@ buf_page_init( /* Silence valid Valgrind warnings about uninitialized data being written to data files. There are some unused bytes on some pages that InnoDB does not initialize. */ - UNIV_MEM_VALID(block->frame, UNIV_PAGE_SIZE); + UNIV_MEM_VALID(block->frame, srv_page_size); } #endif /* UNIV_DEBUG_VALGRIND */ @@ -5337,7 +5348,7 @@ buf_page_init_for_read( buf_page_t* watch_page; rw_lock_t* hash_lock; mtr_t mtr; - ibool lru = FALSE; + bool lru = false; void* data; buf_pool_t* buf_pool = buf_pool_get(page_id); @@ -5628,7 +5639,7 @@ buf_page_create( if (page_size.is_compressed()) { void* data; - ibool lru; + bool lru; /* Prevent race conditions during buf_buddy_alloc(), which may release and reacquire buf_pool->mutex, @@ -5864,7 +5875,7 @@ static dberr_t buf_page_check_corrupt(buf_page_t* bpage, fil_space_t* space) { - ut_ad(space->n_pending_ios > 0); + ut_ad(space->pending_io()); byte* dst_frame = (bpage->zip.data) ? bpage->zip.data : ((buf_block_t*) bpage)->frame; @@ -5987,7 +5998,7 @@ buf_page_io_complete(buf_page_t* bpage, bool dblwr, bool evict) my_atomic_addlint(&buf_pool->n_pend_unzip, 1); ibool ok = buf_zip_decompress((buf_block_t*) bpage, FALSE); - my_atomic_addlint(&buf_pool->n_pend_unzip, -1); + my_atomic_addlint(&buf_pool->n_pend_unzip, ulint(-1)); if (!ok) { ib::info() << "Page " @@ -6044,7 +6055,7 @@ database_corrupted: buf_mark_space_corrupt(bpage, space); ib::info() << "Simulated IMPORT " "corruption"; - fil_space_release_for_io(space); + space->release_for_io(); return(err); } err = DB_SUCCESS; @@ -6086,7 +6097,7 @@ database_corrupted: } buf_mark_space_corrupt(bpage, space); - fil_space_release_for_io(space); + space->release_for_io(); return(err); } } @@ -6129,7 +6140,7 @@ database_corrupted: } - fil_space_release_for_io(space); + space->release_for_io(); } else { /* io_type == BUF_IO_WRITE */ if (bpage->slot) { @@ -7340,25 +7351,25 @@ buf_pool_reserve_tmp_slot( /* Allocate temporary memory for encryption/decryption */ if (free_slot->crypt_buf == NULL) { - free_slot->crypt_buf = static_cast(aligned_malloc(UNIV_PAGE_SIZE, UNIV_PAGE_SIZE)); - memset(free_slot->crypt_buf, 0, UNIV_PAGE_SIZE); + free_slot->crypt_buf = static_cast(aligned_malloc(srv_page_size, srv_page_size)); + memset(free_slot->crypt_buf, 0, srv_page_size); } /* For page compressed tables allocate temporary memory for compression/decompression */ if (compressed && free_slot->comp_buf == NULL) { - ulint size = UNIV_PAGE_SIZE; + ulint size = srv_page_size; /* Both snappy and lzo compression methods require that output buffer used for compression is bigger than input buffer. Increase the allocated buffer size accordingly. */ -#if HAVE_SNAPPY +#if defined(HAVE_SNAPPY) size = snappy_max_compressed_length(size); #endif -#if HAVE_LZO +#if defined(HAVE_LZO) size += LZO1X_1_15_MEM_COMPRESS; #endif - free_slot->comp_buf = static_cast(aligned_malloc(size, UNIV_PAGE_SIZE)); + free_slot->comp_buf = static_cast(aligned_malloc(size, srv_page_size)); memset(free_slot->comp_buf, 0, size); } @@ -7380,7 +7391,7 @@ buf_page_encrypt_before_write( byte* src_frame) { ut_ad(space->id == bpage->id.space()); - bpage->real_size = UNIV_PAGE_SIZE; + bpage->real_size = srv_page_size; fil_page_type_validate(src_frame); @@ -7481,7 +7492,7 @@ static bool buf_page_decrypt_after_read(buf_page_t* bpage, fil_space_t* space) { - ut_ad(space->n_pending_ios > 0); + ut_ad(space->pending_io()); ut_ad(space->id == bpage->id.space()); bool compressed = bpage->size.is_compressed(); @@ -7575,8 +7586,8 @@ buf_page_decrypt_after_read(buf_page_t* bpage, fil_space_t* space) } } - ut_ad(space->n_pending_ios > 0); - return (success); + ut_ad(space->pending_io()); + return success; } /** diff --git a/storage/innobase/buf/buf0checksum.cc b/storage/innobase/buf/buf0checksum.cc index 4b56cc81e98..732a58ed85a 100644 --- a/storage/innobase/buf/buf0checksum.cc +++ b/storage/innobase/buf/buf0checksum.cc @@ -49,7 +49,7 @@ when it is written to a file and also checked for a match when reading from the file. When reading we allow both normal CRC32 and CRC-legacy-big-endian variants. Note that we must be careful to calculate the same value on 32-bit and 64-bit architectures. -@param[in] page buffer page (UNIV_PAGE_SIZE bytes) +@param[in] page buffer page (srv_page_size bytes) @param[in] use_legacy_big_endian if true then use big endian byteorder when converting byte strings to integers @return checksum */ @@ -76,7 +76,7 @@ buf_calc_page_crc32( const uint32_t c2 = crc32_func( page + FIL_PAGE_DATA, - UNIV_PAGE_SIZE - FIL_PAGE_DATA - FIL_PAGE_END_LSN_OLD_CHKSUM); + srv_page_size - FIL_PAGE_DATA - FIL_PAGE_END_LSN_OLD_CHKSUM); return(c1 ^ c2); } @@ -103,7 +103,7 @@ buf_calc_page_new_checksum(const byte* page) FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION - FIL_PAGE_OFFSET) + ut_fold_binary(page + FIL_PAGE_DATA, - UNIV_PAGE_SIZE - FIL_PAGE_DATA + srv_page_size - FIL_PAGE_DATA - FIL_PAGE_END_LSN_OLD_CHKSUM); return(static_cast(checksum)); } diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc index 505aebc7217..e219589c4a2 100644 --- a/storage/innobase/buf/buf0dblwr.cc +++ b/storage/innobase/buf/buf0dblwr.cc @@ -149,11 +149,11 @@ buf_dblwr_init( ut_zalloc_nokey(buf_size * sizeof(bool))); buf_dblwr->write_buf_unaligned = static_cast( - ut_malloc_nokey((1 + buf_size) * UNIV_PAGE_SIZE)); + ut_malloc_nokey((1 + buf_size) << srv_page_size_shift)); buf_dblwr->write_buf = static_cast( ut_align(buf_dblwr->write_buf_unaligned, - UNIV_PAGE_SIZE)); + srv_page_size)); buf_dblwr->buf_block_arr = static_cast( ut_zalloc_nokey(buf_size * sizeof(void*))); @@ -214,7 +214,8 @@ too_small: << "Cannot create doublewrite buffer: " "the first file in innodb_data_file_path" " must be at least " - << (3 * (FSP_EXTENT_SIZE * UNIV_PAGE_SIZE) >> 20) + << (3 * (FSP_EXTENT_SIZE + >> (20U - srv_page_size_shift))) << "M."; mtr.commit(); return(false); @@ -363,10 +364,10 @@ buf_dblwr_init_or_load_pages( /* We do the file i/o past the buffer pool */ unaligned_read_buf = static_cast( - ut_malloc_nokey(3 * UNIV_PAGE_SIZE)); + ut_malloc_nokey(3U << srv_page_size_shift)); read_buf = static_cast( - ut_align(unaligned_read_buf, UNIV_PAGE_SIZE)); + ut_align(unaligned_read_buf, srv_page_size)); /* Read the trx sys header to check if we are using the doublewrite buffer */ @@ -376,8 +377,8 @@ buf_dblwr_init_or_load_pages( err = os_file_read( read_request, - file, read_buf, TRX_SYS_PAGE_NO * UNIV_PAGE_SIZE, - UNIV_PAGE_SIZE); + file, read_buf, TRX_SYS_PAGE_NO << srv_page_size_shift, + srv_page_size); if (err != DB_SUCCESS) { @@ -425,8 +426,8 @@ buf_dblwr_init_or_load_pages( /* Read the pages from the doublewrite buffer to memory */ err = os_file_read( read_request, - file, buf, block1 * UNIV_PAGE_SIZE, - TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE); + file, buf, block1 << srv_page_size_shift, + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE << srv_page_size_shift); if (err != DB_SUCCESS) { @@ -442,9 +443,9 @@ buf_dblwr_init_or_load_pages( err = os_file_read( read_request, file, - buf + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE, - block2 * UNIV_PAGE_SIZE, - TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE); + buf + (TRX_SYS_DOUBLEWRITE_BLOCK_SIZE << srv_page_size_shift), + block2 << srv_page_size_shift, + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE << srv_page_size_shift); if (err != DB_SUCCESS) { @@ -484,8 +485,8 @@ buf_dblwr_init_or_load_pages( err = os_file_write( write_request, path, file, page, - source_page_no * UNIV_PAGE_SIZE, - UNIV_PAGE_SIZE); + source_page_no << srv_page_size_shift, + srv_page_size); if (err != DB_SUCCESS) { ib::error() @@ -503,7 +504,7 @@ buf_dblwr_init_or_load_pages( recv_dblwr.add(page); } - page += univ_page_size.physical(); + page += srv_page_size; } if (reset_space_ids) { @@ -529,10 +530,10 @@ buf_dblwr_process() } unaligned_read_buf = static_cast( - ut_malloc_nokey(2 * UNIV_PAGE_SIZE)); + ut_malloc_nokey(2U << srv_page_size_shift)); read_buf = static_cast( - ut_align(unaligned_read_buf, UNIV_PAGE_SIZE)); + ut_align(unaligned_read_buf, srv_page_size)); for (recv_dblwr_t::list::iterator i = recv_dblwr.pages.begin(); i != recv_dblwr.pages.end(); @@ -791,14 +792,14 @@ buf_dblwr_check_page_lsn( } if (memcmp(page + (FIL_PAGE_LSN + 4), - page + (UNIV_PAGE_SIZE + page + (srv_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM + 4), 4)) { const ulint lsn1 = mach_read_from_4( page + FIL_PAGE_LSN + 4); const ulint lsn2 = mach_read_from_4( - page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM + page + srv_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM + 4); ib::error() << "The page to be written seems corrupt!" @@ -1001,7 +1002,7 @@ try_again: for (ulint len2 = 0, i = 0; i < buf_dblwr->first_free; - len2 += UNIV_PAGE_SIZE, i++) { + len2 += srv_page_size, i++) { const buf_block_t* block; @@ -1024,8 +1025,8 @@ try_again: } /* Write out the first block of the doublewrite buffer */ - len = ut_min(TRX_SYS_DOUBLEWRITE_BLOCK_SIZE, - buf_dblwr->first_free) * UNIV_PAGE_SIZE; + len = std::min(TRX_SYS_DOUBLEWRITE_BLOCK_SIZE, + buf_dblwr->first_free) << srv_page_size_shift; fil_io(IORequestWrite, true, page_id_t(TRX_SYS_SPACE, buf_dblwr->block1), univ_page_size, @@ -1038,10 +1039,10 @@ try_again: /* Write out the second block of the doublewrite buffer. */ len = (buf_dblwr->first_free - TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) - * UNIV_PAGE_SIZE; + << srv_page_size_shift; write_buf = buf_dblwr->write_buf - + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE; + + (TRX_SYS_DOUBLEWRITE_BLOCK_SIZE << srv_page_size_shift); fil_io(IORequestWrite, true, page_id_t(TRX_SYS_SPACE, buf_dblwr->block2), univ_page_size, @@ -1123,7 +1124,7 @@ try_again: } byte* p = buf_dblwr->write_buf - + univ_page_size.physical() * buf_dblwr->first_free; + + srv_page_size * buf_dblwr->first_free; /* We request frame here to get correct buffer in case of encryption and/or page compression */ @@ -1136,7 +1137,7 @@ try_again: memcpy(p, frame, bpage->size.physical()); memset(p + bpage->size.physical(), 0x0, - univ_page_size.physical() - bpage->size.physical()); + srv_page_size - bpage->size.physical()); } else { ut_a(buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE); @@ -1266,20 +1267,20 @@ retry: void * frame = buf_page_get_frame(bpage); if (bpage->size.is_compressed()) { - memcpy(buf_dblwr->write_buf + univ_page_size.physical() * i, + memcpy(buf_dblwr->write_buf + srv_page_size * i, frame, bpage->size.physical()); - memset(buf_dblwr->write_buf + univ_page_size.physical() * i + memset(buf_dblwr->write_buf + srv_page_size * i + bpage->size.physical(), 0x0, - univ_page_size.physical() - bpage->size.physical()); + srv_page_size - bpage->size.physical()); fil_io(IORequestWrite, true, page_id_t(TRX_SYS_SPACE, offset), univ_page_size, 0, - univ_page_size.physical(), - (void *)(buf_dblwr->write_buf + univ_page_size.physical() * i), + srv_page_size, + (void *)(buf_dblwr->write_buf + srv_page_size * i), NULL); } else { /* It is a regular page. Write it directly to the @@ -1289,7 +1290,7 @@ retry: page_id_t(TRX_SYS_SPACE, offset), univ_page_size, 0, - univ_page_size.physical(), + srv_page_size, (void*) frame, NULL); } diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc index 3cdf0160f25..a0c4baad64d 100644 --- a/storage/innobase/buf/buf0dump.cc +++ b/storage/innobase/buf/buf0dump.cc @@ -698,7 +698,7 @@ buf_load() if (this_space_id != cur_space_id) { if (space != NULL) { - fil_space_release(space); + space->release(); } cur_space_id = this_space_id; @@ -730,7 +730,7 @@ buf_load() if (buf_load_abort_flag) { if (space != NULL) { - fil_space_release(space); + space->release(); } buf_load_abort_flag = FALSE; ut_free(dump); @@ -761,7 +761,7 @@ buf_load() } if (space != NULL) { - fil_space_release(space); + space->release(); } ut_free(dump); diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 4bd99ccd7d8..59762152b41 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -902,7 +902,7 @@ buf_flush_init_for_writing( /* Write the newest modification lsn to the page header and trailer */ mach_write_to_8(page + FIL_PAGE_LSN, newest_lsn); - mach_write_to_8(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM, + mach_write_to_8(page + srv_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM, newest_lsn); if (block && srv_page_size == 16384) { @@ -967,7 +967,7 @@ buf_flush_init_for_writing( } } - uint32_t checksum; + uint32_t checksum= 0; switch (srv_checksum_algorithm_t(srv_checksum_algorithm)) { case SRV_CHECKSUM_ALGORITHM_INNODB: @@ -998,7 +998,7 @@ buf_flush_init_for_writing( new enum is added and not handled here */ } - mach_write_to_4(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM, + mach_write_to_4(page + srv_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM, checksum); } @@ -1138,7 +1138,7 @@ buf_flush_write_block_low( ut_ad(err == DB_SUCCESS); } - fil_space_release_for_io(space); + space->release_for_io(); /* Increment the counter of I/O operations used for selecting LRU policy. */ @@ -2439,7 +2439,7 @@ page_cleaner_flush_pages_recommendation( cur_lsn = log_get_lsn_nowait(); - /* log_get_lsn_nowait tries to get log_sys->mutex with + /* log_get_lsn_nowait tries to get log_sys.mutex with mutex_enter_nowait, if this does not succeed function returns 0, do not use that value to update stats. */ if (cur_lsn == 0) { @@ -2778,8 +2778,8 @@ pc_flush_slot(void) { ulint lru_tm = 0; ulint list_tm = 0; - int lru_pass = 0; - int list_pass = 0; + ulint lru_pass = 0; + ulint list_pass = 0; mutex_enter(&page_cleaner.mutex); @@ -2983,17 +2983,10 @@ buf_flush_page_cleaner_disabled_loop(void) } /** Disables page cleaner threads (coordinator and workers). -It's used by: SET GLOBAL innodb_page_cleaner_disabled_debug = 1 (0). -@param[in] thd thread handle -@param[in] var pointer to system variable -@param[out] var_ptr where the formal string goes @param[in] save immediate result from check function */ -void -buf_flush_page_cleaner_disabled_debug_update( - THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save) +void buf_flush_page_cleaner_disabled_debug_update(THD*, + st_mysql_sys_var*, void*, + const void* save) { if (!page_cleaner.is_running) { return; diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index 7cc61294b8f..252f37b4495 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -59,9 +59,6 @@ static const ulint BUF_LRU_OLD_TOLERANCE = 20; (that is, when there are more than BUF_LRU_OLD_MIN_LEN blocks). @see buf_LRU_old_adjust_len */ #define BUF_LRU_NON_OLD_MIN_LEN 5 -#if BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN -# error "BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN" -#endif /** When dropping the search hash index entries before deleting an ibd file, we build a local array of pages belonging to that tablespace @@ -228,24 +225,17 @@ particular space id. @param[in] count number of entries in array */ static void -buf_LRU_drop_page_hash_batch( - ulint space_id, - const ulint* arr, - ulint count) +buf_LRU_drop_page_hash_batch(ulint space_id, const ulint* arr, ulint count) { ut_ad(count <= BUF_LRU_DROP_SEARCH_SIZE); - for (ulint i = 0; i < count; ++i, ++arr) { + for (const ulint* const end = arr + count; arr != end; ) { /* While our only caller buf_LRU_drop_page_hash_for_tablespace() is being executed for DROP TABLE or similar, - the table cannot be evicted from the buffer pool. - Note: this should not be executed for DROP TABLESPACE, - because DROP TABLESPACE would be refused if tables existed - in the tablespace, and a previous DROP TABLE would have - already removed the AHI entries. */ + the table cannot be evicted from the buffer pool. */ btr_search_drop_page_hash_when_freed( - page_id_t(space_id, *arr)); + page_id_t(space_id, *arr++)); } } @@ -362,6 +352,28 @@ next_page: buf_LRU_drop_page_hash_batch(id, page_arr, num_entries); ut_free(page_arr); } + +/** Drop the adaptive hash index for a tablespace. +@param[in,out] table table */ +void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table) +{ + for (dict_index_t* index = dict_table_get_first_index(table); + index != NULL; + index = dict_table_get_next_index(index)) { + if (btr_search_info_get_ref_count(btr_search_get_info(index), + index)) { + goto drop_ahi; + } + } + + return; +drop_ahi: + ulint id = table->space_id; + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i), + id); + } +} #endif /* BTR_CUR_HASH_ADAPT */ /******************************************************************//** @@ -685,26 +697,13 @@ buf_flush_dirty_pages( @param[in] id tablespace identifier @param[in] observer flush observer, or NULL if nothing is to be written */ -void -buf_LRU_flush_or_remove_pages( - ulint id, - FlushObserver* observer -#ifdef BTR_CUR_HASH_ADAPT - , bool drop_ahi /*!< whether to drop the adaptive hash index */ -#endif /* BTR_CUR_HASH_ADAPT */ - ) +void buf_LRU_flush_or_remove_pages(ulint id, FlushObserver* observer) { /* Pages in the system tablespace must never be discarded. */ ut_ad(id || observer); for (ulint i = 0; i < srv_buf_pool_instances; i++) { - buf_pool_t* buf_pool = buf_pool_from_array(i); -#ifdef BTR_CUR_HASH_ADAPT - if (drop_ahi) { - buf_LRU_drop_page_hash_for_tablespace(buf_pool, id); - } -#endif /* BTR_CUR_HASH_ADAPT */ - buf_flush_dirty_pages(buf_pool, id, observer); + buf_flush_dirty_pages(buf_pool_from_array(i), id, observer); } if (observer && !observer->is_interrupted()) { @@ -951,7 +950,7 @@ buf_LRU_get_free_only( assert_block_ahi_empty(block); buf_block_set_state(block, BUF_BLOCK_READY_FOR_USE); - UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE); + UNIV_MEM_ALLOC(block->frame, srv_page_size); ut_ad(buf_pool_from_block(block) == buf_pool); @@ -998,7 +997,7 @@ buf_LRU_check_size_of_non_data_objects( " Check that your transactions do not set too many" " row locks, or review if" " innodb_buffer_pool_size=" - << (buf_pool->curr_size >> (20 - UNIV_PAGE_SIZE_SHIFT)) + << (buf_pool->curr_size >> (20U - srv_page_size_shift)) << "M could be bigger."; } else if (!recv_recovery_is_on() && buf_pool->curr_size == buf_pool->old_size @@ -1021,7 +1020,7 @@ buf_LRU_check_size_of_non_data_objects( " set too many row locks." " innodb_buffer_pool_size=" << (buf_pool->curr_size >> - (20 - UNIV_PAGE_SIZE_SHIFT)) << "M." + (20U - srv_page_size_shift)) << "M." " Starting the InnoDB Monitor to print" " diagnostics."; @@ -1208,9 +1207,11 @@ buf_LRU_old_adjust_len( ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(buf_pool->LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN); ut_ad(buf_pool->LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX); -#if BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5) -# error "BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)" -#endif + compile_time_assert(BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN + > BUF_LRU_OLD_RATIO_DIV + * (BUF_LRU_OLD_TOLERANCE + 5)); + compile_time_assert(BUF_LRU_NON_OLD_MIN_LEN < BUF_LRU_OLD_MIN_LEN); + #ifdef UNIV_LRU_DEBUG /* buf_pool->LRU_old must be the first item in the LRU list whose "old" flag is set. */ @@ -1758,10 +1759,10 @@ func_exit: order to avoid bogus Valgrind warnings.*/ UNIV_MEM_VALID(((buf_block_t*) bpage)->frame, - UNIV_PAGE_SIZE); + srv_page_size); btr_search_drop_page_hash_index((buf_block_t*) bpage); UNIV_MEM_INVALID(((buf_block_t*) bpage)->frame, - UNIV_PAGE_SIZE); + srv_page_size); if (b != NULL) { @@ -1827,10 +1828,10 @@ buf_LRU_block_free_non_file_page( buf_block_set_state(block, BUF_BLOCK_NOT_USED); - UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE); + UNIV_MEM_ALLOC(block->frame, srv_page_size); #ifdef UNIV_DEBUG /* Wipe contents of page to reveal possible stale pointers to it */ - memset(block->frame, '\0', UNIV_PAGE_SIZE); + memset(block->frame, '\0', srv_page_size); #else /* Wipe page_no and space_id */ memset(block->frame + FIL_PAGE_OFFSET, 0xfe, 4); @@ -1871,7 +1872,7 @@ buf_LRU_block_free_non_file_page( ut_d(block->page.in_free_list = TRUE); } - UNIV_MEM_FREE(block->frame, UNIV_PAGE_SIZE); + UNIV_MEM_FREE(block->frame, srv_page_size); } /******************************************************************//** @@ -1920,7 +1921,7 @@ buf_LRU_block_remove_hashed( case BUF_BLOCK_FILE_PAGE: UNIV_MEM_ASSERT_W(bpage, sizeof(buf_block_t)); UNIV_MEM_ASSERT_W(((buf_block_t*) bpage)->frame, - UNIV_PAGE_SIZE); + srv_page_size); buf_block_modify_clock_inc((buf_block_t*) bpage); if (bpage->zip.data) { const page_t* page = ((buf_block_t*) bpage)->frame; @@ -1949,11 +1950,11 @@ buf_LRU_block_remove_hashed( break; case FIL_PAGE_INDEX: case FIL_PAGE_RTREE: -#ifdef UNIV_ZIP_DEBUG +#if defined UNIV_ZIP_DEBUG && defined BTR_CUR_HASH_ADAPT ut_a(page_zip_validate( &bpage->zip, page, ((buf_block_t*) bpage)->index)); -#endif /* UNIV_ZIP_DEBUG */ +#endif /* UNIV_ZIP_DEBUG && BTR_CUR_HASH_ADAPT */ break; default: ib::error() << "The compressed page to be" @@ -2069,7 +2070,7 @@ buf_LRU_block_remove_hashed( memset(((buf_block_t*) bpage)->frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, 0xff, 4); UNIV_MEM_INVALID(((buf_block_t*) bpage)->frame, - UNIV_PAGE_SIZE); + srv_page_size); buf_page_set_state(bpage, BUF_BLOCK_REMOVE_HASH); /* Question: If we release bpage and hash mutex here diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 598da3ff706..00b2ac378db 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -184,7 +184,7 @@ buf_read_page_low( type = 0; sync = true; } - fil_space_release(space); + space->release(); }); IORequest request(type | IORequest::READ); @@ -314,7 +314,7 @@ buf_read_ahead_random( if (high > space->size) { high = space->size; } - fil_space_release(space); + space->release(); } else { return(0); } @@ -337,7 +337,7 @@ buf_read_ahead_random( if (fil_space_t* space = fil_space_acquire( page_id.space())) { bool skip = !strcmp(space->name, "test/t1"); - fil_space_release(space); + space->release(); if (skip) { high = space->size; buf_pool_mutex_exit(buf_pool); @@ -610,7 +610,7 @@ buf_read_ahead_linear( if (fil_space_t* space = fil_space_acquire(page_id.space())) { space_size = space->size; - fil_space_release(space); + space->release(); if (high > space_size) { /* The area is not whole */ @@ -838,7 +838,7 @@ buf_read_ibuf_merge_pages( in the arrays */ { #ifdef UNIV_IBUF_DEBUG - ut_a(n_stored < UNIV_PAGE_SIZE); + ut_a(n_stored < srv_page_size); #endif for (ulint i = 0; i < n_stored; i++) { diff --git a/storage/innobase/data/data0data.cc b/storage/innobase/data/data0data.cc index 6601edfec9d..809a60d6431 100644 --- a/storage/innobase/data/data0data.cc +++ b/storage/innobase/data/data0data.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -346,7 +346,7 @@ dfield_print_also_hex( val = mach_read_from_1(data); if (!(prtype & DATA_UNSIGNED)) { - val &= ~0x80; + val &= ~0x80U; fprintf(stderr, "%ld", (long) val); } else { fprintf(stderr, "%lu", (ulong) val); @@ -357,7 +357,7 @@ dfield_print_also_hex( val = mach_read_from_2(data); if (!(prtype & DATA_UNSIGNED)) { - val &= ~0x8000; + val &= ~0x8000U; fprintf(stderr, "%ld", (long) val); } else { fprintf(stderr, "%lu", (ulong) val); @@ -368,7 +368,7 @@ dfield_print_also_hex( val = mach_read_from_3(data); if (!(prtype & DATA_UNSIGNED)) { - val &= ~0x800000; + val &= ~0x800000U; fprintf(stderr, "%ld", (long) val); } else { fprintf(stderr, "%lu", (ulong) val); diff --git a/storage/innobase/dict/dict0boot.cc b/storage/innobase/dict/dict0boot.cc index 047cc3abe14..cd4f6cab91c 100644 --- a/storage/innobase/dict/dict0boot.cc +++ b/storage/innobase/dict/dict0boot.cc @@ -368,7 +368,7 @@ dict_boot(void) ut_a(index); ut_ad(!table->is_instant()); table->indexes.start->n_core_null_bytes = UT_BITS_IN_BYTES( - table->indexes.start->n_nullable); + unsigned(table->indexes.start->n_nullable)); /*-------------------------*/ index = dict_mem_index_create(table, "ID_IND", DICT_UNIQUE, 1); @@ -410,7 +410,7 @@ dict_boot(void) ut_a(index); ut_ad(!table->is_instant()); table->indexes.start->n_core_null_bytes = UT_BITS_IN_BYTES( - table->indexes.start->n_nullable); + unsigned(table->indexes.start->n_nullable)); /*-------------------------*/ table = dict_mem_table_create("SYS_INDEXES", fil_system.sys_space, @@ -453,7 +453,7 @@ dict_boot(void) ut_a(index); ut_ad(!table->is_instant()); table->indexes.start->n_core_null_bytes = UT_BITS_IN_BYTES( - table->indexes.start->n_nullable); + unsigned(table->indexes.start->n_nullable)); /*-------------------------*/ table = dict_mem_table_create("SYS_FIELDS", fil_system.sys_space, @@ -482,7 +482,7 @@ dict_boot(void) ut_a(index); ut_ad(!table->is_instant()); table->indexes.start->n_core_null_bytes = UT_BITS_IN_BYTES( - table->indexes.start->n_nullable); + unsigned(table->indexes.start->n_nullable)); mtr_commit(&mtr); diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc index fa9eeeae620..801d96fd410 100644 --- a/storage/innobase/dict/dict0crea.cc +++ b/storage/innobase/dict/dict0crea.cc @@ -101,13 +101,11 @@ dict_create_sys_tables_tuple( /* If there is any virtual column, encode it in N_COLS */ mach_write_to_4(ptr, dict_table_encode_n_col( - static_cast(table->n_cols - - DATA_N_SYS_COLS), - static_cast(table->n_v_def)) - | ((table->flags & DICT_TF_COMPACT) << 31)); + ulint(table->n_cols - DATA_N_SYS_COLS), + ulint(table->n_v_def)) + | (ulint(table->flags & DICT_TF_COMPACT) << 31)); dfield_set_data(dfield, ptr, 4); - /* 5: TYPE (table flags) -----------------------------*/ dfield = dtuple_get_nth_field( entry, DICT_COL__SYS_TABLES__TYPE); @@ -194,7 +192,7 @@ dict_create_sys_columns_tuple( v_col_no = column->ind; } else { column = dict_table_get_nth_col(table, i); - ut_ad(!dict_col_is_virtual(column)); + ut_ad(!column->is_virtual()); } sys_columns = dict_sys->sys_columns; @@ -833,7 +831,7 @@ dict_create_index_tree_step( dberr_t err = DB_SUCCESS; - if (!index->is_readable() || dict_table_is_discarded(index->table)) { + if (!index->is_readable()) { node->page_no = FIL_NULL; } else { index->set_modified(mtr); @@ -875,11 +873,7 @@ dict_create_index_tree_in_mem( mtr_t mtr; ut_ad(mutex_own(&dict_sys->mutex)); - - if (index->type == DICT_FTS) { - /* FTS index does not need an index tree */ - return(DB_SUCCESS); - } + ut_ad(!(index->type & DICT_FTS)); mtr_start(&mtr); mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO); @@ -887,7 +881,7 @@ dict_create_index_tree_in_mem( /* Currently this function is being used by temp-tables only. Import/Discard of temp-table is blocked and so this assert. */ ut_ad(index->is_readable()); - ut_ad(!dict_table_is_discarded(index->table)); + ut_ad(!(index->table->flags2 & DICT_TF2_DISCARDED)); index->page = btr_create(index->type, index->table->space, index->id, index, NULL, &mtr); @@ -1350,9 +1344,10 @@ dict_create_index_step( == ((dict_index_is_clust(node->index) && node->table->supports_instant()) ? dict_index_t::NO_CORE_NULL_BYTES - : UT_BITS_IN_BYTES(node->index->n_nullable))); + : UT_BITS_IN_BYTES( + unsigned(node->index->n_nullable)))); node->index->n_core_null_bytes = UT_BITS_IN_BYTES( - node->index->n_nullable); + unsigned(node->index->n_nullable)); node->state = INDEX_CREATE_INDEX_TREE; } @@ -1931,7 +1926,8 @@ dict_create_add_foreign_to_dictionary( foreign->referenced_table_name); pars_info_add_int4_literal(info, "n_cols", - foreign->n_fields + (foreign->type << 24)); + ulint(foreign->n_fields) + | (ulint(foreign->type) << 24)); DBUG_PRINT("dict_create_add_foreign_to_dictionary", ("'%s', '%s', '%s', %d", foreign->id, name, diff --git a/storage/innobase/dict/dict0defrag_bg.cc b/storage/innobase/dict/dict0defrag_bg.cc index 7b9a0373c48..949bbbc0d74 100644 --- a/storage/innobase/dict/dict0defrag_bg.cc +++ b/storage/innobase/dict/dict0defrag_bg.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2016, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2016, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -232,7 +232,7 @@ dict_stats_process_entry_from_defrag_pool() ? dict_table_find_index_on_id(table, index_id) : NULL; - if (!index || dict_index_is_corrupted(index)) { + if (!index || index->is_corrupted()) { if (table) { dict_table_close(table, TRUE, FALSE); } diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index 08d489a63c4..e7acead269c 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. Copyright (c) 2013, 2018, MariaDB Corporation. @@ -251,7 +251,7 @@ dict_get_db_name_len( const char* s; s = strchr(name, '/'); ut_a(s); - return(s - name); + return ulint(s - name); } /** Reserve the dictionary system mutex. */ @@ -434,7 +434,8 @@ dict_table_try_drop_aborted( ut_ad(table->id == table_id); } - if (table && table->get_ref_count() == ref_count && table->drop_aborted) { + if (table && table->get_ref_count() == ref_count && table->drop_aborted + && !UT_LIST_GET_FIRST(table->locks)) { /* Silence a debug assertion in row_merge_drop_indexes(). */ ut_d(table->acquire()); row_merge_drop_indexes(trx, table, TRUE); @@ -619,12 +620,12 @@ const char* dict_col_t::name(const dict_table_t& table) const const char *s; if (is_virtual()) { - col_nr = reinterpret_cast(this) - - table.v_cols; + col_nr = size_t(reinterpret_cast(this) + - table.v_cols); ut_ad(col_nr < table.n_v_def); s = table.v_col_names; } else { - col_nr = this - table.cols; + col_nr = size_t(this - table.cols); ut_ad(col_nr < table.n_def); s = table.col_names; } @@ -1242,28 +1243,19 @@ dict_table_add_system_columns( DATA_ROW_ID | DATA_NOT_NULL, DATA_ROW_ID_LEN); -#if DATA_ROW_ID != 0 -#error "DATA_ROW_ID != 0" -#endif + compile_time_assert(DATA_ROW_ID == 0); dict_mem_table_add_col(table, heap, "DB_TRX_ID", DATA_SYS, DATA_TRX_ID | DATA_NOT_NULL, DATA_TRX_ID_LEN); -#if DATA_TRX_ID != 1 -#error "DATA_TRX_ID != 1" -#endif - + compile_time_assert(DATA_TRX_ID == 1); dict_mem_table_add_col(table, heap, "DB_ROLL_PTR", DATA_SYS, DATA_ROLL_PTR | DATA_NOT_NULL, DATA_ROLL_PTR_LEN); -#if DATA_ROLL_PTR != 2 -#error "DATA_ROLL_PTR != 2" -#endif + compile_time_assert(DATA_ROLL_PTR == 2); /* This check reminds that if a new system column is added to the program, it should be dealt with here */ -#if DATA_N_SYS_COLS != 3 -#error "DATA_N_SYS_COLS != 3" -#endif + compile_time_assert(DATA_N_SYS_COLS == 3); } /** Add the table definition to the data dictionary cache */ @@ -1436,6 +1428,13 @@ dict_make_room_in_cache( if (dict_table_can_be_evicted(table)) { + DBUG_EXECUTE_IF("crash_if_fts_table_is_evicted", + { + if (table->fts && + dict_table_has_fts_index(table)) { + ut_ad(0); + } + };); dict_table_remove_from_cache_low(table, TRUE); ++n_evicted; @@ -1599,12 +1598,12 @@ dict_table_rename_in_cache( /* If the table is stored in a single-table tablespace, rename the .ibd file and rebuild the .isl file if needed. */ - if (dict_table_is_discarded(table)) { + if (!table->space) { bool exists; char* filepath; ut_ad(dict_table_is_file_per_table(table)); - ut_ad(!dict_table_is_temporary(table)); + ut_ad(!table->is_temporary()); /* Make sure the data_dir_path is set. */ dict_get_and_save_data_dir_path(table, true); @@ -1624,11 +1623,7 @@ dict_table_rename_in_cache( return(DB_OUT_OF_MEMORY); } - fil_delete_tablespace(table->space->id -#ifdef BTR_CUR_HASH_ADAPT - , true -#endif /* BTR_CUR_HASH_ADAPT */ - ); + fil_delete_tablespace(table->space_id); /* Delete any temp file hanging around. */ if (os_file_status(filepath, &exists, &ftype) @@ -1645,7 +1640,7 @@ dict_table_rename_in_cache( const char* old_path = UT_LIST_GET_FIRST(table->space->chain) ->name; - ut_ad(!dict_table_is_temporary(table)); + ut_ad(!table->is_temporary()); if (DICT_TF_HAS_DATA_DIR(table->flags)) { new_path = os_file_make_new_pathname( @@ -2077,19 +2072,13 @@ dict_col_name_is_reserved( /*======================*/ const char* name) /*!< in: column name */ { - /* This check reminds that if a new system column is added to - the program, it should be dealt with here. */ -#if DATA_N_SYS_COLS != 3 -#error "DATA_N_SYS_COLS != 3" -#endif - static const char* reserved_names[] = { "DB_ROW_ID", "DB_TRX_ID", "DB_ROLL_PTR" }; - ulint i; + compile_time_assert(UT_ARR_SIZE(reserved_names) == DATA_N_SYS_COLS); - for (i = 0; i < UT_ARR_SIZE(reserved_names); i++) { + for (ulint i = 0; i < UT_ARR_SIZE(reserved_names); i++) { if (innobase_strcasecmp(name, reserved_names[i]) == 0) { return(TRUE); @@ -2117,13 +2106,13 @@ dict_index_node_ptr_max_size( /* This is universal index for change buffer. The max size of the entry is about max key length * 2. (index key + primary key to be inserted to the index) - (The max key length is UNIV_PAGE_SIZE / 16 * 3 at + (The max key length is srv_page_size / 16 * 3 at ha_innobase::max_supported_key_length(), considering MAX_KEY_LENGTH = 3072 at MySQL imposes the 3500 historical InnoDB value for 16K page size case.) For the universal index, node_ptr contains most of the entry. And 512 is enough to contain ibuf columns and meta-data */ - return(UNIV_PAGE_SIZE / 8 * 3 + 512); + return(srv_page_size / 8 * 3 + 512); } comp = dict_table_is_comp(index->table); @@ -2136,11 +2125,11 @@ dict_index_node_ptr_max_size( if (comp) { /* Include the "null" flags in the maximum possible record size. */ - rec_max_size += UT_BITS_IN_BYTES(index->n_nullable); + rec_max_size += UT_BITS_IN_BYTES(unsigned(index->n_nullable)); } else { /* For each column, include a 2-byte offset and a "null" flag. */ - rec_max_size += 2 * index->n_fields; + rec_max_size += 2 * unsigned(index->n_fields); } /* Compute the maximum possible record size. */ @@ -2224,10 +2213,10 @@ dict_index_too_big_for_tree( const page_size_t page_size(dict_tf_get_page_size(table->flags)); if (page_size.is_compressed() - && page_size.physical() < univ_page_size.physical()) { + && page_size.physical() < srv_page_size) { /* On a compressed page, two records must fit in the uncompressed page modification log. On compressed pages - with size.physical() == univ_page_size.physical(), + with size.physical() == srv_page_size, this limit will never be reached. */ ut_ad(comp); /* The maximum allowed record size is the size of @@ -2263,14 +2252,15 @@ dict_index_too_big_for_tree( if (comp) { /* Include the "null" flags in the maximum possible record size. */ - rec_max_size += UT_BITS_IN_BYTES(new_index->n_nullable); + rec_max_size += UT_BITS_IN_BYTES( + unsigned(new_index->n_nullable)); } else { /* For each column, include a 2-byte offset and a "null" flag. The 1-byte format is only used in short records that do not contain externally stored columns. Such records could never exceed the page limit, even when using the 2-byte format. */ - rec_max_size += 2 * new_index->n_fields; + rec_max_size += 2 * unsigned(new_index->n_fields); } /* Compute the maximum possible record size. */ @@ -2364,6 +2354,44 @@ add_field_size: return(FALSE); } +/** Clears the virtual column's index list before index is +being freed. +@param[in] index Index being freed */ +void dict_index_remove_from_v_col_list(dict_index_t* index) +{ + /* Index is not completely formed */ + if (!index->cached) { + return; + } + if (dict_index_has_virtual(index)) { + const dict_col_t* col; + const dict_v_col_t* vcol; + + for (ulint i = 0; i < dict_index_get_n_fields(index); i++) { + col = dict_index_get_nth_col(index, i); + if (col->is_virtual()) { + vcol = reinterpret_cast( + col); + /* This could be NULL, when we do add + virtual column, add index together. We do not + need to track this virtual column's index */ + if (vcol->v_indexes == NULL) { + continue; + } + dict_v_idx_list::iterator it; + for (it = vcol->v_indexes->begin(); + it != vcol->v_indexes->end(); ++it) { + dict_v_idx_t v_index = *it; + if (v_index.index == index) { + vcol->v_indexes->erase(it); + break; + } + } + } + } + } +} + /** Adds an index to the dictionary cache, with possible indexing newly added column. @param[in] index index; NOTE! The index memory @@ -2418,7 +2446,7 @@ dict_index_add_to_cache( ? dict_index_build_internal_fts(index) : dict_index_build_internal_non_clust(index); new_index->n_core_null_bytes = UT_BITS_IN_BYTES( - new_index->n_nullable); + unsigned(new_index->n_nullable)); } /* Set the n_fields value in new_index to the actual defined @@ -2561,28 +2589,13 @@ dict_index_remove_from_cache_low( zero. See also: dict_table_can_be_evicted() */ do { - ulint ref_count = btr_search_info_get_ref_count(info, index); - - if (ref_count == 0) { + if (!btr_search_info_get_ref_count(info, index)) { break; } - /* Sleep for 10ms before trying again. */ - os_thread_sleep(10000); - ++retries; + buf_LRU_drop_page_hash_for_tablespace(table); - if (retries % 500 == 0) { - /* No luck after 5 seconds of wait. */ - ib::error() << "Waited for " << retries / 100 - << " secs for hash index" - " ref_count (" << ref_count << ") to drop to 0." - " index: " << index->name - << " table: " << table->name; - } - - /* To avoid a hang here we commit suicide if the - ref_count doesn't drop to zero in 600 seconds. */ - ut_a(retries < 60000); + ut_a(++retries < 10000); } while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict); #endif /* BTR_CUR_HASH_ADAPT */ @@ -2605,7 +2618,7 @@ dict_index_remove_from_cache_low( for (ulint i = 0; i < dict_index_get_n_fields(index); i++) { col = dict_index_get_nth_col(index, i); - if (dict_col_is_virtual(col)) { + if (col->is_virtual()) { vcol = reinterpret_cast( col); @@ -2758,7 +2771,7 @@ dict_index_add_col( dict_field_t* field; const char* col_name; - if (dict_col_is_virtual(col)) { + if (col->is_virtual()) { dict_v_col_t* v_col = reinterpret_cast(col); /* When v_col->v_indexes==NULL, @@ -2780,7 +2793,7 @@ dict_index_add_col( dict_mem_index_add_field(index, col_name, prefix_len); - field = dict_index_get_nth_field(index, index->n_def - 1); + field = dict_index_get_nth_field(index, unsigned(index->n_def) - 1); field->col = col; field->fixed_len = static_cast( @@ -2798,12 +2811,11 @@ dict_index_add_col( if (field->fixed_len > DICT_MAX_FIXED_COL_LEN) { field->fixed_len = 0; } -#if DICT_MAX_FIXED_COL_LEN != 768 + /* The comparison limit above must be constant. If it were changed, the disk format of some fixed-length columns would change, which would be a disaster. */ -# error "DICT_MAX_FIXED_COL_LEN != 768" -#endif + compile_time_assert(DICT_MAX_FIXED_COL_LEN == 768); if (!(col->prtype & DATA_NOT_NULL)) { index->n_nullable++; @@ -2969,7 +2981,8 @@ dict_index_build_internal_clust( /* Create a new index object with certainly enough fields */ new_index = dict_mem_index_create(index->table, index->name, index->type, - index->n_fields + table->n_cols); + unsigned(index->n_fields + + table->n_cols)); /* Copy other relevant data from the old index struct to the new struct: it inherits the values */ @@ -2988,7 +3001,7 @@ dict_index_build_internal_clust( new_index->n_uniq = new_index->n_def; } else { /* Also the row id is needed to identify the entry */ - new_index->n_uniq = 1 + new_index->n_def; + new_index->n_uniq = 1 + unsigned(new_index->n_def); } new_index->trx_id_offset = 0; @@ -2997,15 +3010,9 @@ dict_index_build_internal_clust( trx_id_pos = new_index->n_def; -#if DATA_ROW_ID != 0 -# error "DATA_ROW_ID != 0" -#endif -#if DATA_TRX_ID != 1 -# error "DATA_TRX_ID != 1" -#endif -#if DATA_ROLL_PTR != 2 -# error "DATA_ROLL_PTR != 2" -#endif + compile_time_assert(DATA_ROW_ID == 0); + compile_time_assert(DATA_TRX_ID == 1); + compile_time_assert(DATA_ROLL_PTR == 2); if (!dict_index_is_unique(index)) { dict_index_add_col(new_index, table, @@ -3095,7 +3102,7 @@ dict_index_build_internal_clust( new_index->n_core_null_bytes = table->supports_instant() ? dict_index_t::NO_CORE_NULL_BYTES - : UT_BITS_IN_BYTES(new_index->n_nullable); + : UT_BITS_IN_BYTES(unsigned(new_index->n_nullable)); new_index->cached = TRUE; return(new_index); @@ -3134,7 +3141,7 @@ dict_index_build_internal_non_clust( /* Create a new index */ new_index = dict_mem_index_create( index->table, index->name, index->type, - index->n_fields + 1 + clust_index->n_uniq); + ulint(index->n_fields + 1 + clust_index->n_uniq)); /* Copy other relevant data from the old index struct to the new struct: it inherits the values */ @@ -3155,7 +3162,7 @@ dict_index_build_internal_non_clust( field = dict_index_get_nth_field(new_index, i); - if (dict_col_is_virtual(field->col)) { + if (field->col->is_virtual()) { continue; } @@ -3790,7 +3797,7 @@ dict_scan_id( ptr++; } - len = ptr - s; + len = ulint(ptr - s); } if (heap == NULL) { @@ -3811,7 +3818,7 @@ dict_scan_id( } } *d++ = 0; - len = d - str; + len = ulint(d - str); ut_ad(*s == quote); ut_ad(s + 1 == ptr); } else { @@ -4030,7 +4037,7 @@ dict_scan_table_name( for (s = scan_name; *s; s++) { if (*s == '.') { database_name = scan_name; - database_name_len = s - scan_name; + database_name_len = ulint(s - scan_name); scan_name = ++s; break;/* to do: multiple dots? */ } @@ -4342,7 +4349,7 @@ dict_foreign_push_index_error( const char* col_name; field = dict_index_get_nth_field(err_index, err_col); - col_name = dict_col_is_virtual(field->col) + col_name = field->col->is_virtual() ? "(null)" : dict_table_get_col_name( table, dict_col_get_no(field->col)); @@ -4609,6 +4616,11 @@ loop: /**********************************************************/ /* The following call adds the foreign key constraints to the data dictionary system tables on disk */ + trx->op_info = "adding foreign keys"; + + trx_start_if_not_started_xa(trx, true); + + trx_set_dict_operation(trx, TRX_DICT_OP_TABLE); error = dict_create_add_foreigns_to_dictionary( local_fk_set, table, trx); @@ -4823,23 +4835,6 @@ col_loop1: return(DB_CANNOT_ADD_CONSTRAINT); } - /* Don't allow foreign keys on partitioned tables yet. */ - ptr1 = dict_scan_to(ptr, "PARTITION"); - if (ptr1) { - ptr1 = dict_accept(cs, ptr1, "PARTITION", &success); - if (success && my_isspace(cs, *ptr1)) { - ptr2 = dict_accept(cs, ptr1, "BY", &success); - if (success) { - my_error(ER_FOREIGN_KEY_ON_PARTITIONED,MYF(0)); - return(DB_CANNOT_ADD_CONSTRAINT); - } - } - } - if (dict_table_is_partition(table)) { - my_error(ER_FOREIGN_KEY_ON_PARTITIONED,MYF(0)); - return(DB_CANNOT_ADD_CONSTRAINT); - } - /* Let us create a constraint struct */ foreign = dict_mem_foreign_create(); @@ -5594,19 +5589,19 @@ dict_index_copy_rec_order_prefix( UNIV_PREFETCH_R(rec); if (dict_index_is_ibuf(index)) { - ut_a(!dict_table_is_comp(index->table)); + ut_ad(!dict_table_is_comp(index->table)); n = rec_get_n_fields_old(rec); } else { if (page_rec_is_leaf(rec)) { n = dict_index_get_n_unique_in_tree(index); + } else if (dict_index_is_spatial(index)) { + ut_ad(dict_index_get_n_unique_in_tree_nonleaf(index) + == DICT_INDEX_SPATIAL_NODEPTR_SIZE); + /* For R-tree, we have to compare + the child page numbers as well. */ + n = DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1; } else { - n = dict_index_get_n_unique_in_tree_nonleaf(index); - /* For internal node of R-tree, since we need to - compare the page no field, so, we need to copy this - field as well. */ - if (dict_index_is_spatial(index)) { - n++; - } + n = dict_index_get_n_unique_in_tree(index); } } @@ -6700,11 +6695,18 @@ void dict_close(void) /*============*/ { - ulint i; + if (dict_sys == NULL) { + /* This should only happen if a failure occurred + during redo log processing. */ + return; + } + + /* Acquire only because it's a pre-condition. */ + mutex_enter(&dict_sys->mutex); /* Free the hash elements. We don't remove them from the table because we are going to destroy the table anyway. */ - for (i = 0; i < hash_get_n_cells(dict_sys->table_hash); i++) { + for (ulint i = 0; i < hash_get_n_cells(dict_sys->table_id_hash); i++) { dict_table_t* table; table = static_cast( @@ -6716,12 +6718,7 @@ dict_close(void) table = static_cast( HASH_GET_NEXT(name_hash, prev_table)); ut_ad(prev_table->magic_n == DICT_TABLE_MAGIC_N); - /* Acquire only because it's a pre-condition. */ - mutex_enter(&dict_sys->mutex); - dict_table_remove_from_cache(prev_table); - - mutex_exit(&dict_sys->mutex); } } @@ -6731,6 +6728,7 @@ dict_close(void) therefore we don't delete the individual elements. */ hash_table_free(dict_sys->table_id_hash); + mutex_exit(&dict_sys->mutex); mutex_free(&dict_sys->mutex); rw_lock_free(dict_operation_lock); @@ -6740,6 +6738,11 @@ dict_close(void) mutex_free(&dict_foreign_err_mutex); + if (dict_foreign_err_file) { + fclose(dict_foreign_err_file); + dict_foreign_err_file = NULL; + } + ut_free(dict_sys); dict_sys = NULL; @@ -6899,7 +6902,7 @@ dict_foreign_qualify_index( return(false); } - if (dict_col_is_virtual(field->col)) { + if (field->col->is_virtual()) { for (ulint j = 0; j < table->n_v_def; j++) { col_name = dict_table_get_v_col_name(table, j); if (innobase_strcasecmp(field->name,col_name) == 0) { @@ -6978,7 +6981,7 @@ dict_index_zip_pad_update( /* Only do increment if it won't increase padding beyond max pad size. */ if (info->pad + ZIP_PAD_INCR - < (UNIV_PAGE_SIZE * zip_pad_max) / 100) { + < (srv_page_size * zip_pad_max) / 100) { /* Use atomics even though we have the mutex. This is to ensure that we are able to read info->pad atomically. */ @@ -7004,7 +7007,7 @@ dict_index_zip_pad_update( /* Use atomics even though we have the mutex. This is to ensure that we are able to read info->pad atomically. */ - my_atomic_addlint(&info->pad, -ZIP_PAD_INCR); + my_atomic_addlint(&info->pad, ulint(-ZIP_PAD_INCR)); info->n_rounds = 0; @@ -7074,17 +7077,17 @@ dict_index_zip_pad_optimal_page_size( if (!zip_failure_threshold_pct) { /* Disabled by user. */ - return(UNIV_PAGE_SIZE); + return(srv_page_size); } pad = my_atomic_loadlint(&index->zip_pad.pad); - ut_ad(pad < UNIV_PAGE_SIZE); - sz = UNIV_PAGE_SIZE - pad; + ut_ad(pad < srv_page_size); + sz = srv_page_size - pad; /* Min size allowed by user. */ ut_ad(zip_pad_max < 100); - min_sz = (UNIV_PAGE_SIZE * (100 - zip_pad_max)) / 100; + min_sz = (srv_page_size * (100 - zip_pad_max)) / 100; return(ut_max(sz, min_sz)); } diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc index 2e7058ebdd3..1f1a6c0bc48 100644 --- a/storage/innobase/dict/dict0load.cc +++ b/storage/innobase/dict/dict0load.cc @@ -144,7 +144,6 @@ dict_load_column_low( /** Load a virtual column "mapping" (to base columns) information from a SYS_VIRTUAL record @param[in,out] table table -@param[in,out] heap memory heap @param[in,out] column mapped base column's dict_column_t @param[in,out] table_id table id @param[in,out] pos virtual column position @@ -156,7 +155,6 @@ static const char* dict_load_virtual_low( dict_table_t* table, - mem_heap_t* heap, dict_col_t** column, table_id_t* table_id, ulint* pos, @@ -467,7 +465,6 @@ dict_process_sys_columns_rec( /** This function parses a SYS_VIRTUAL record and extracts virtual column information -@param[in,out] heap heap memory @param[in] rec current SYS_COLUMNS rec @param[in,out] table_id table id @param[in,out] pos virtual column position @@ -475,7 +472,6 @@ information @return error message, or NULL on success */ const char* dict_process_sys_virtual_rec( - mem_heap_t* heap, const rec_t* rec, table_id_t* table_id, ulint* pos, @@ -484,7 +480,7 @@ dict_process_sys_virtual_rec( const char* err_msg; /* Parse the record, and get "dict_col_t" struct filled */ - err_msg = dict_load_virtual_low(NULL, heap, NULL, table_id, + err_msg = dict_load_virtual_low(NULL, NULL, table_id, pos, base_pos, rec); return(err_msg); @@ -1138,7 +1134,7 @@ dict_sys_tables_type_valid(ulint type, bool not_redundant) if (!not_redundant) { /* SYS_TABLES.TYPE must be 1 or 1|DICT_TF_MASK_NO_ROLLBACK for ROW_FORMAT=REDUNDANT. */ - return !(type & ~(1 | DICT_TF_MASK_NO_ROLLBACK)); + return !(type & ~(1U | DICT_TF_MASK_NO_ROLLBACK)); } if (type >= 1U << DICT_TF_POS_UNUSED) { @@ -1417,12 +1413,13 @@ dict_check_sys_tables( continue; } - /* If the table is not a predefined tablespace then it must - be in a file-per-table tablespace. - Note that flags2 is not available for REDUNDANT tables, - so don't check those. */ - ut_ad(!DICT_TF_GET_COMPACT(flags) - || flags2 & DICT_TF2_USE_FILE_PER_TABLE); + /* For tables or partitions using .ibd files, the flag + DICT_TF2_USE_FILE_PER_TABLE was not set in MIX_LEN + before MySQL 5.6.5. The flag should not have been + introduced in persistent storage. MariaDB will keep + setting the flag when writing SYS_TABLES entries for + newly created or rebuilt tables or partitions, but + will otherwise ignore the flag. */ /* Now that we have the proper name for this tablespace, look to see if it is already in the tablespace cache. */ @@ -1692,7 +1689,6 @@ static const char* dict_load_virtual_del = "delete-marked record in SYS_VIRTUAL" /** Load a virtual column "mapping" (to base columns) information from a SYS_VIRTUAL record @param[in,out] table table -@param[in,out] heap memory heap @param[in,out] column mapped base column's dict_column_t @param[in,out] table_id table id @param[in,out] pos virtual column position @@ -1704,7 +1700,6 @@ static const char* dict_load_virtual_low( dict_table_t* table, - mem_heap_t* heap, dict_col_t** column, table_id_t* table_id, ulint* pos, @@ -1964,7 +1959,7 @@ dict_load_virtual_one_col( ut_a(btr_pcur_is_on_user_rec(&pcur)); - err_msg = dict_load_virtual_low(table, heap, + err_msg = dict_load_virtual_low(table, &v_col->base_col[i - skipped], NULL, &pos, NULL, rec); @@ -2031,7 +2026,7 @@ dict_load_field_low( ulint len; unsigned pos_and_prefix_len; unsigned prefix_len; - ibool first_field; + bool first_field; ulint position; /* Either index or sys_field is supplied, not both */ @@ -2443,8 +2438,9 @@ dict_load_indexes( && static_cast(*field) == static_cast(*TEMP_INDEX_PREFIX_STR)) { /* Skip indexes whose name starts with - TEMP_INDEX_PREFIX, because they will - be dropped during crash recovery. */ + TEMP_INDEX_PREFIX_STR, because they will + be dropped by row_merge_drop_temp_indexes() + during crash recovery. */ goto next_rec; } } @@ -2486,10 +2482,10 @@ dict_load_indexes( } ut_ad(index); + ut_ad(!dict_index_is_online_ddl(index)); /* Check whether the index is corrupted */ - if (dict_index_is_corrupted(index)) { - + if (index->is_corrupted()) { ib::error() << "Index " << index->name << " of table " << table->name << " is corrupted"; @@ -3007,10 +3003,7 @@ err_exit: table = NULL; goto func_exit; } else { - dict_index_t* clust_index; - clust_index = dict_table_get_first_index(table); - - if (dict_index_is_corrupted(clust_index)) { + if (table->indexes.start->is_corrupted()) { table->corrupted = true; } } @@ -3061,14 +3054,11 @@ err_exit: if (!srv_force_recovery || !index - || !dict_index_is_clust(index)) { - + || !index->is_primary()) { dict_table_remove_from_cache(table); table = NULL; - - } else if (dict_index_is_corrupted(index) + } else if (index->is_corrupted() && table->is_readable()) { - /* It is possible we force to load a corrupted clustered index if srv_load_corrupted is set. Mark the table as corrupted in this case */ diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc index d5d734acf66..e6c5050d949 100644 --- a/storage/innobase/dict/dict0mem.cc +++ b/storage/innobase/dict/dict0mem.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. Copyright (c) 2013, 2018, MariaDB Corporation. @@ -166,7 +166,7 @@ dict_mem_table_create( table->space_id = space ? space->id : ULINT_UNDEFINED; table->n_t_cols = unsigned(n_cols + DATA_N_SYS_COLS); table->n_v_cols = (unsigned int) (n_v_cols); - table->n_cols = table->n_t_cols - table->n_v_cols; + table->n_cols = unsigned(table->n_t_cols - table->n_v_cols); table->cols = static_cast( mem_heap_alloc(heap, table->n_cols * sizeof(dict_col_t))); @@ -276,7 +276,7 @@ dict_add_col_name( s += strlen(s) + 1; } - old_len = s - col_names; + old_len = unsigned(s - col_names); } else { old_len = 0; } @@ -435,7 +435,7 @@ dict_mem_table_add_s_col( dict_table_t* table, ulint num_base) { - ulint i = table->n_def - 1; + unsigned i = unsigned(table->n_def) - 1; dict_col_t* col = dict_table_get_nth_col(table, i); dict_s_col_t s_col; @@ -495,13 +495,13 @@ dict_mem_table_col_rename_low( /* We need to adjust all affected index->field pointers, as in dict_index_add_col(). First, copy table->col_names. */ - ulint prefix_len = s - t_col_names; + ulint prefix_len = ulint(s - t_col_names); for (; i < n_col; i++) { s += strlen(s) + 1; } - ulint full_len = s - t_col_names; + ulint full_len = ulint(s - t_col_names); char* col_names; if (to_len > from_len) { @@ -534,12 +534,12 @@ dict_mem_table_col_rename_low( /* if is_virtual and that in field->col does not match, continue */ if ((!is_virtual) != - (!dict_col_is_virtual(field->col))) { + (!field->col->is_virtual())) { continue; } ulint name_ofs - = field->name - t_col_names; + = ulint(field->name - t_col_names); if (name_ofs <= prefix_len) { field->name = col_names + name_ofs; } else { @@ -1045,7 +1045,7 @@ dict_mem_index_add_field( index->n_def++; - field = dict_index_get_nth_field(index, index->n_def - 1); + field = dict_index_get_nth_field(index, unsigned(index->n_def) - 1); field->name = name; field->prefix_len = (unsigned int) prefix_len; @@ -1079,6 +1079,7 @@ dict_mem_index_free( UT_DELETE(index->rtr_track->rtr_active); } + dict_index_remove_from_v_col_list(index); mem_heap_free(index->heap); } @@ -1104,7 +1105,7 @@ dict_mem_create_temporary_tablename( char* name; const char* dbend = strchr(dbtab, '/'); ut_ad(dbend); - size_t dblen = dbend - dbtab + 1; + size_t dblen = size_t(dbend - dbtab) + 1; /* Increment a randomly initialized number for each temp file. */ my_atomic_add32((int32*) &dict_temp_file_num, 1); @@ -1248,8 +1249,9 @@ void dict_table_t::instant_add_column(const dict_table_t& table) const char* end = table.col_names; for (unsigned i = table.n_cols; i--; ) end += strlen(end) + 1; - col_names = static_cast(mem_heap_dup(heap, table.col_names, - end - table.col_names)); + col_names = static_cast( + mem_heap_dup(heap, table.col_names, + ulint(end - table.col_names))); const dict_col_t* const old_cols = cols; const dict_col_t* const old_cols_end = cols + n_cols; cols = static_cast(mem_heap_dup(heap, table.cols, @@ -1258,7 +1260,7 @@ void dict_table_t::instant_add_column(const dict_table_t& table) /* Preserve the default values of previously instantly added columns. */ - for (unsigned i = n_cols - DATA_N_SYS_COLS; i--; ) { + for (unsigned i = unsigned(n_cols) - DATA_N_SYS_COLS; i--; ) { cols[i].def_val = old_cols[i].def_val; } @@ -1276,7 +1278,7 @@ void dict_table_t::instant_add_column(const dict_table_t& table) } const unsigned old_n_cols = n_cols; - const unsigned n_add = table.n_cols - n_cols; + const unsigned n_add = unsigned(table.n_cols - n_cols); n_t_def += n_add; n_t_cols += n_add; @@ -1349,14 +1351,17 @@ dict_table_t::rollback_instant( for (unsigned i = index->n_fields - n_remove; i < index->n_fields; i++) { - index->n_nullable -= index->fields[i].col->is_nullable(); + if (index->fields[i].col->is_nullable()) { + index->n_nullable--; + } } index->n_fields -= n_remove; index->n_def = index->n_fields; if (index->n_core_fields > index->n_fields) { index->n_core_fields = index->n_fields; - index->n_core_null_bytes = UT_BITS_IN_BYTES(index->n_nullable); + index->n_core_null_bytes + = UT_BITS_IN_BYTES(unsigned(index->n_nullable)); } const dict_col_t* const new_cols = cols; @@ -1423,7 +1428,9 @@ void dict_table_t::rollback_instant(unsigned n) DBUG_ASSERT(!memcmp(sys, system, sizeof system)); for (unsigned i = index->n_fields - n_remove; i < index->n_fields; i++) { - index->n_nullable -= index->fields[i].col->is_nullable(); + if (index->fields[i].col->is_nullable()) { + index->n_nullable--; + } } index->n_fields -= n_remove; index->n_def = index->n_fields; diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index 88460978ade..51dcdc9e140 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -158,9 +158,8 @@ dict_stats_should_ignore_index( /*===========================*/ const dict_index_t* index) /*!< in: index */ { - return((index->type & DICT_FTS) - || dict_index_is_corrupted(index) - || dict_index_is_spatial(index) + return((index->type & (DICT_FTS | DICT_SPATIAL)) + || index->is_corrupted() || index->to_be_dropped || !index->is_committed()); } @@ -919,7 +918,7 @@ dict_stats_update_transient( index = dict_table_get_first_index(table); - if (dict_table_is_discarded(table)) { + if (!table->space) { /* Nothing to do. */ dict_stats_empty_table(table, true); return; @@ -1039,10 +1038,10 @@ dict_stats_analyze_index_level( memset(n_diff, 0x0, n_uniq * sizeof(n_diff[0])); /* Allocate space for the offsets header (the allocation size at - offsets[0] and the REC_OFFS_HEADER_SIZE bytes), and n_fields + 1, + offsets[0] and the REC_OFFS_HEADER_SIZE bytes), and n_uniq + 1, so that this will never be less than the size calculated in rec_get_offsets_func(). */ - i = (REC_OFFS_HEADER_SIZE + 1 + 1) + index->n_fields; + i = (REC_OFFS_HEADER_SIZE + 1 + 1) + n_uniq; heap = mem_heap_create((2 * sizeof *rec_offsets) * i); rec_offsets = static_cast( @@ -1158,8 +1157,7 @@ dict_stats_analyze_index_level( n_uniq, &heap); prev_rec = rec_copy_prefix_to_buf( - prev_rec, index, - rec_offs_n_fields(prev_rec_offsets), + prev_rec, index, n_uniq, &prev_rec_buf, &prev_rec_buf_size); prev_rec_is_copied = true; @@ -1232,7 +1230,7 @@ dict_stats_analyze_index_level( btr_pcur_move_to_next_user_rec() will release the latch on the page that prev_rec is on */ prev_rec = rec_copy_prefix_to_buf( - rec, index, rec_offs_n_fields(rec_offsets), + rec, index, n_uniq, &prev_rec_buf, &prev_rec_buf_size); prev_rec_is_copied = true; @@ -2231,7 +2229,7 @@ dict_stats_update_persistent( index = dict_table_get_first_index(table); if (index == NULL - || dict_index_is_corrupted(index) + || index->is_corrupted() || (index->type | DICT_UNIQUE) != (DICT_CLUSTERED | DICT_UNIQUE)) { /* Table definition is corrupt */ @@ -2332,7 +2330,7 @@ dict_stats_save_index_stat( pars_info_add_str_literal(pinfo, "table_name", table_utf8); pars_info_add_str_literal(pinfo, "index_name", index->name); UNIV_MEM_ASSERT_RW_ABORT(&last_update, 4); - pars_info_add_int4_literal(pinfo, "last_update", (lint)last_update); + pars_info_add_int4_literal(pinfo, "last_update", uint32(last_update)); UNIV_MEM_ASSERT_RW_ABORT(stat_name, strlen(stat_name)); pars_info_add_str_literal(pinfo, "stat_name", stat_name); UNIV_MEM_ASSERT_RW_ABORT(&stat_value, 8); @@ -2464,7 +2462,7 @@ dict_stats_save( pars_info_add_str_literal(pinfo, "database_name", db_utf8); pars_info_add_str_literal(pinfo, "table_name", table_utf8); - pars_info_add_int4_literal(pinfo, "last_update", (lint)now); + pars_info_add_int4_literal(pinfo, "last_update", uint32(now)); pars_info_add_ull_literal(pinfo, "n_rows", table->stat_n_rows); pars_info_add_ull_literal(pinfo, "clustered_index_size", table->stat_clustered_index_size); @@ -2910,7 +2908,7 @@ dict_stats_fetch_index_stats_step( /* extract 12 from "n_diff_pfx12..." into n_pfx note that stat_name does not have a terminating '\0' */ - n_pfx = (num_ptr[0] - '0') * 10 + (num_ptr[1] - '0'); + n_pfx = ulong(num_ptr[0] - '0') * 10 + ulong(num_ptr[1] - '0'); ulint n_uniq = index->n_uniq; diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc index 6c9a17c8a7c..6fde5654dd1 100644 --- a/storage/innobase/dict/dict0stats_bg.cc +++ b/storage/innobase/dict/dict0stats_bg.cc @@ -110,6 +110,7 @@ dict_stats_recalc_pool_deinit() recalc_pool->clear(); UT_DELETE(recalc_pool); + recalc_pool = NULL; } /*****************************************************************//** @@ -307,6 +308,10 @@ dict_stats_thread_deinit() ut_a(!srv_read_only_mode); ut_ad(!srv_dict_stats_thread_active); + if (recalc_pool == NULL) { + return; + } + dict_stats_recalc_pool_deinit(); dict_defrag_pool_deinit(); @@ -349,7 +354,7 @@ dict_stats_process_entry_from_recalc_pool() return; } - ut_ad(!dict_table_is_temporary(table)); + ut_ad(!table->is_temporary()); if (!fil_table_accessible(table)) { dict_table_close(table, TRUE, FALSE); @@ -394,16 +399,9 @@ dict_stats_process_entry_from_recalc_pool() #ifdef UNIV_DEBUG /** Disables dict stats thread. It's used by: SET GLOBAL innodb_dict_stats_disabled_debug = 1 (0). -@param[in] thd thread handle -@param[in] var pointer to system variable -@param[out] var_ptr where the formal string goes @param[in] save immediate result from check function */ -void -dict_stats_disabled_debug_update( - THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save) +void dict_stats_disabled_debug_update(THD*, st_mysql_sys_var*, void*, + const void* save) { /* This method is protected by mutex, as every SET GLOBAL .. */ ut_ad(dict_stats_disabled_event != NULL); diff --git a/storage/innobase/eval/eval0eval.cc b/storage/innobase/eval/eval0eval.cc index fa0b265b3db..6cc63b3a004 100644 --- a/storage/innobase/eval/eval0eval.cc +++ b/storage/innobase/eval/eval0eval.cc @@ -585,7 +585,7 @@ eval_instr( /* We have already matched j characters */ if (j == len2) { - int_val = i + 1; + int_val = lint(i) + 1; goto match_found; } @@ -781,7 +781,7 @@ eval_predefined( } /* allocate the string */ - data = eval_node_ensure_val_buf(func_node, int_len + 1); + data = eval_node_ensure_val_buf(func_node, ulint(int_len) + 1); /* add terminating NUL character */ data[int_len] = 0; @@ -804,7 +804,7 @@ eval_predefined( } } - dfield_set_len(que_node_get_val(func_node), int_len); + dfield_set_len(que_node_get_val(func_node), ulint(int_len)); return; @@ -833,12 +833,11 @@ eval_func( { que_node_t* arg; ulint fclass; - ulint func; ut_ad(que_node_get_type(func_node) == QUE_NODE_FUNC); fclass = func_node->fclass; - func = func_node->func; + const int func = func_node->func; arg = func_node->args; diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc index 6b6faa370cf..b826d7ada19 100644 --- a/storage/innobase/fil/fil0crypt.cc +++ b/storage/innobase/fil/fil0crypt.cc @@ -469,7 +469,6 @@ byte* fil_parse_write_crypt_data( byte* ptr, const byte* end_ptr, - const buf_block_t* block, dberr_t* err) { /* check that redo log entry is complete */ @@ -525,7 +524,7 @@ fil_parse_write_crypt_data( /* update fil_space memory cache with crypt_data */ if (fil_space_t* space = fil_space_acquire_silent(space_id)) { crypt_data = fil_space_set_crypt_data(space, crypt_data); - fil_space_release(space); + space->release(); /* Check is used key found from encryption plugin */ if (crypt_data->should_encrypt() && !crypt_data->is_key_found()) { @@ -658,14 +657,14 @@ fil_space_encrypt( fil_space_crypt_t* crypt_data = space->crypt_data; const page_size_t page_size(space->flags); - ut_ad(space->n_pending_ios > 0); + ut_ad(space->pending_io()); byte* tmp = fil_encrypt_buf(crypt_data, space->id, offset, lsn, src_frame, page_size, dst_frame); #ifdef UNIV_DEBUG if (tmp) { /* Verify that encrypted buffer is not corrupted */ - byte* tmp_mem = (byte *)malloc(UNIV_PAGE_SIZE); + byte* tmp_mem = (byte *)malloc(srv_page_size); dberr_t err = DB_SUCCESS; byte* src = src_frame; bool page_compressed_encrypted = (mach_read_from_2(tmp+FIL_PAGE_TYPE) == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED); @@ -673,9 +672,9 @@ fil_space_encrypt( byte* uncomp_mem = NULL; if (page_compressed_encrypted) { - comp_mem = (byte *)malloc(UNIV_PAGE_SIZE); - uncomp_mem = (byte *)malloc(UNIV_PAGE_SIZE); - memcpy(comp_mem, src_frame, UNIV_PAGE_SIZE); + comp_mem = (byte *)malloc(srv_page_size); + uncomp_mem = (byte *)malloc(srv_page_size); + memcpy(comp_mem, src_frame, srv_page_size); fil_decompress_page(uncomp_mem, comp_mem, srv_page_size, NULL); src = uncomp_mem; @@ -686,7 +685,7 @@ fil_space_encrypt( /* Need to decompress the page if it was also compressed */ if (page_compressed_encrypted) { - memcpy(comp_mem, tmp_mem, UNIV_PAGE_SIZE); + memcpy(comp_mem, tmp_mem, srv_page_size); fil_decompress_page(tmp_mem, comp_mem, srv_page_size, NULL); } @@ -830,7 +829,7 @@ fil_space_decrypt( *decrypted = false; ut_ad(space->crypt_data != NULL && space->crypt_data->is_encrypted()); - ut_ad(space->n_pending_ios > 0); + ut_ad(space->pending_io()); bool encrypted = fil_space_decrypt(space->crypt_data, tmp_frame, page_size, src_frame, &err); @@ -1173,7 +1172,7 @@ fil_crypt_space_needs_rotation( return false; } - ut_ad(space->n_pending_ops > 0); + ut_ad(space->referenced()); fil_space_crypt_t *crypt_data = space->crypt_data; @@ -1470,7 +1469,7 @@ fil_crypt_find_space_to_rotate( if (state->should_shutdown()) { if (state->space) { - fil_space_release(state->space); + state->space->release(); state->space = NULL; } return false; @@ -1479,7 +1478,7 @@ fil_crypt_find_space_to_rotate( if (state->first) { state->first = false; if (state->space) { - fil_space_release(state->space); + state->space->release(); } state->space = NULL; } @@ -1586,7 +1585,7 @@ fil_crypt_find_page_to_rotate( ulint batch = srv_alloc_time * state->allocated_iops; fil_space_t* space = state->space; - ut_ad(!space || space->n_pending_ops > 0); + ut_ad(!space || space->referenced()); /* If space is marked to be dropped stop rotation. */ if (!space || space->is_stopping()) { @@ -1644,7 +1643,7 @@ fil_crypt_get_page_throttle_func( fil_space_t* space = state->space; const page_size_t page_size = page_size_t(space->flags); const page_id_t page_id(space->id, offset); - ut_ad(space->n_pending_ops > 0); + ut_ad(space->referenced()); /* Before reading from tablespace we need to make sure that the tablespace is not about to be dropped or truncated. */ @@ -1727,7 +1726,7 @@ btr_scrub_get_block_and_allocation_status( buf_block_t *block = NULL; fil_space_t* space = state->space; - ut_ad(space->n_pending_ops > 0); + ut_ad(space->referenced()); mtr_start(&local_mtr); @@ -1779,7 +1778,7 @@ fil_crypt_rotate_page( ulint sleeptime_ms = 0; fil_space_crypt_t *crypt_data = space->crypt_data; - ut_ad(space->n_pending_ops > 0); + ut_ad(space->referenced()); ut_ad(offset > 0); /* In fil_crypt_thread where key rotation is done we have @@ -1957,7 +1956,7 @@ fil_crypt_rotate_pages( ulint end = std::min(state->offset + state->batch, state->space->free_limit); - ut_ad(state->space->n_pending_ops > 0); + ut_ad(state->space->referenced()); for (; state->offset < end; state->offset++) { @@ -1974,6 +1973,12 @@ fil_crypt_rotate_pages( continue; } + /* If space is marked as stopping, stop rotating + pages. */ + if (state->space->is_stopping()) { + break; + } + fil_crypt_rotate_page(key_state, state); } } @@ -1990,7 +1995,7 @@ fil_crypt_flush_space( fil_space_t* space = state->space; fil_space_crypt_t *crypt_data = space->crypt_data; - ut_ad(space->n_pending_ops > 0); + ut_ad(space->referenced()); /* flush tablespace pages so that there are no pages left with old key */ lsn_t end_lsn = crypt_data->rotate_state.end_lsn; @@ -2022,6 +2027,10 @@ fil_crypt_flush_space( crypt_data->type = CRYPT_SCHEME_UNENCRYPTED; } + if (space->is_stopping()) { + return; + } + /* update page 0 */ mtr_t mtr; mtr.start(); @@ -2041,18 +2050,13 @@ fil_crypt_flush_space( /*********************************************************************** Complete rotating a space -@param[in,out] key_state Key state @param[in,out] state Rotation state */ -static -void -fil_crypt_complete_rotate_space( - const key_state_t* key_state, - rotate_thread_t* state) +static void fil_crypt_complete_rotate_space(rotate_thread_t* state) { fil_space_crypt_t *crypt_data = state->space->crypt_data; ut_ad(crypt_data); - ut_ad(state->space->n_pending_ops > 0); + ut_ad(state->space->referenced()); /* Space might already be dropped */ if (!state->space->is_stopping()) { @@ -2210,9 +2214,8 @@ DECLARE_THREAD(fil_crypt_thread)( /* If space is marked as stopping, release space and stop rotation. */ if (thr.space->is_stopping()) { - fil_crypt_complete_rotate_space( - &new_state, &thr); - fil_space_release(thr.space); + fil_crypt_complete_rotate_space(&thr); + thr.space->release(); thr.space = NULL; break; } @@ -2223,7 +2226,7 @@ DECLARE_THREAD(fil_crypt_thread)( /* complete rotation */ if (thr.space) { - fil_crypt_complete_rotate_space(&new_state, &thr); + fil_crypt_complete_rotate_space(&thr); } /* force key state refresh */ @@ -2239,7 +2242,7 @@ DECLARE_THREAD(fil_crypt_thread)( /* release current space if shutting down */ if (thr.space) { - fil_space_release(thr.space); + thr.space->release(); thr.space = NULL; } @@ -2440,7 +2443,7 @@ fil_space_crypt_get_status( { memset(status, 0, sizeof(*status)); - ut_ad(space->n_pending_ops > 0); + ut_ad(space->referenced()); /* If there is no crypt data and we have not yet read page 0 for this tablespace, we need to read it before @@ -2505,7 +2508,7 @@ fil_space_get_scrub_status( { memset(status, 0, sizeof(*status)); - ut_ad(space->n_pending_ops > 0); + ut_ad(space->referenced()); fil_space_crypt_t* crypt_data = space->crypt_data; status->space = space->id; @@ -2639,7 +2642,7 @@ fil_space_verify_crypt_checksum( checksum2 = checksum1; } else { checksum2 = mach_read_from_4( - page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM); + page + srv_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM); valid = buf_page_is_checksum_valid_crc32( page, checksum1, checksum2, false /* FIXME: also try the original crc32 that was diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 6cf30856117..07482354683 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -354,7 +354,7 @@ The caller should hold an InnoDB table lock or a MDL that prevents the tablespace from being dropped during the operation, or the caller should be in single-threaded crash recovery mode (no user connections that could drop tablespaces). -If this is not the case, fil_space_acquire() and fil_space_release() +If this is not the case, fil_space_acquire() and fil_space_t::release() should be used instead. @param[in] id tablespace ID @return tablespace, or NULL if not found */ @@ -1035,11 +1035,11 @@ fil_space_extend_must_retry( const page_size_t pageSize(space->flags); const ulint page_size = pageSize.physical(); - /* fil_read_first_page() expects UNIV_PAGE_SIZE bytes. - fil_node_open_file() expects at least 4 * UNIV_PAGE_SIZE bytes.*/ + /* fil_read_first_page() expects srv_page_size bytes. + fil_node_open_file() expects at least 4 * srv_page_size bytes.*/ os_offset_t new_size = std::max( os_offset_t(size - file_start_page_no) * page_size, - os_offset_t(FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE)); + os_offset_t(FIL_IBD_FILE_INITIAL_SIZE << srv_page_size_shift)); *success = os_file_set_size(node->name, node->handle, new_size, FSP_FLAGS_HAS_PAGE_COMPRESSION(space->flags)); @@ -1067,7 +1067,7 @@ fil_space_extend_must_retry( space->size += file_size - node->size; node->size = file_size; const ulint pages_in_MiB = node->size - & ~((1 << (20 - UNIV_PAGE_SIZE_SHIFT)) - 1); + & ~ulint((1U << (20U - srv_page_size_shift)) - 1); fil_node_complete_io(node,IORequestRead); @@ -1198,7 +1198,8 @@ fil_mutex_enter_and_prepare_for_io( } } - if (ulint size = ulint(UNIV_UNLIKELY(space->recv_size))) { + ulint size = space->recv_size; + if (UNIV_UNLIKELY(size != 0)) { ut_ad(node); bool success; if (fil_space_extend_must_retry(space, node, size, @@ -1354,10 +1355,10 @@ fil_space_free_low( ut_ad(srv_fast_shutdown == 2 || !srv_was_started || space->max_lsn == 0); - /* Wait for fil_space_release_for_io(); after + /* Wait for fil_space_t::release_for_io(); after fil_space_detach(), the tablespace cannot be found, so fil_space_acquire_for_io() would return NULL */ - while (space->n_pending_ios) { + while (space->pending_io()) { os_thread_sleep(100); } @@ -2005,6 +2006,10 @@ fil_close_log_files( } mutex_exit(&fil_system.mutex); + + if (free) { + log_sys.log.close(); + } } /*******************************************************************//** @@ -2041,18 +2046,18 @@ fil_write_flushed_lsn( byte* buf; dberr_t err = DB_TABLESPACE_NOT_FOUND; - buf1 = static_cast(ut_malloc_nokey(2 * UNIV_PAGE_SIZE)); - buf = static_cast(ut_align(buf1, UNIV_PAGE_SIZE)); + buf1 = static_cast(ut_malloc_nokey(2U << srv_page_size_shift)); + buf = static_cast(ut_align(buf1, srv_page_size)); const page_id_t page_id(TRX_SYS_SPACE, 0); - err = fil_read(page_id, univ_page_size, 0, univ_page_size.physical(), + err = fil_read(page_id, univ_page_size, 0, srv_page_size, buf); if (err == DB_SUCCESS) { mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, lsn); err = fil_write(page_id, univ_page_size, 0, - univ_page_size.physical(), buf); + srv_page_size, buf); fil_flush_file_spaces(FIL_TYPE_TABLESPACE); } @@ -2085,7 +2090,7 @@ fil_space_acquire_low(ulint id, bool silent) } else if (space->is_stopping()) { space = NULL; } else { - space->n_pending_ops++; + space->acquire(); } mutex_exit(&fil_system.mutex); @@ -2093,18 +2098,6 @@ fil_space_acquire_low(ulint id, bool silent) return(space); } -/** Release a tablespace acquired with fil_space_acquire(). -@param[in,out] space tablespace to release */ -void -fil_space_release(fil_space_t* space) -{ - mutex_enter(&fil_system.mutex); - ut_ad(space->magic_n == FIL_SPACE_MAGIC_N); - ut_ad(space->n_pending_ops > 0); - space->n_pending_ops--; - mutex_exit(&fil_system.mutex); -} - /** Acquire a tablespace for reading or writing a block, when it could be dropped concurrently. @param[in] id tablespace ID @@ -2118,7 +2111,7 @@ fil_space_acquire_for_io(ulint id) fil_space_t* space = fil_space_get_by_id(id); if (space) { - space->n_pending_ios++; + space->acquire_for_io(); } mutex_exit(&fil_system.mutex); @@ -2126,18 +2119,6 @@ fil_space_acquire_for_io(ulint id) return(space); } -/** Release a tablespace acquired with fil_space_acquire_for_io(). -@param[in,out] space tablespace to release */ -void -fil_space_release_for_io(fil_space_t* space) -{ - mutex_enter(&fil_system.mutex); - ut_ad(space->magic_n == FIL_SPACE_MAGIC_N); - ut_ad(space->n_pending_ios > 0); - space->n_pending_ios--; - mutex_exit(&fil_system.mutex); -} - /********************************************************//** Creates the database directory for a table if it does not exist yet. */ void @@ -2153,12 +2134,13 @@ fil_create_directory_for_tablename( len = strlen(fil_path_to_mysql_datadir); namend = strchr(name, '/'); ut_a(namend); - path = static_cast(ut_malloc_nokey(len + (namend - name) + 2)); + path = static_cast( + ut_malloc_nokey(len + ulint(namend - name) + 2)); memcpy(path, fil_path_to_mysql_datadir, len); path[len] = '/'; - memcpy(path + len + 1, name, namend - name); - path[len + (namend - name) + 1] = 0; + memcpy(path + len + 1, name, ulint(namend - name)); + path[len + ulint(namend - name) + 1] = 0; os_normalize_path(path); @@ -2362,9 +2344,9 @@ fil_op_replay_rename( ut_a(namend != NULL); char* dir = static_cast( - ut_malloc_nokey(namend - new_name + 1)); + ut_malloc_nokey(ulint(namend - new_name) + 1)); - memcpy(dir, new_name, namend - new_name); + memcpy(dir, new_name, ulint(namend - new_name)); dir[namend - new_name] = '\0'; bool success = os_file_create_directory(dir, false); @@ -2373,7 +2355,7 @@ fil_op_replay_rename( ulint dirlen = 0; if (const char* dirend = strrchr(dir, OS_PATH_SEPARATOR)) { - dirlen = dirend - dir + 1; + dirlen = ulint(dirend - dir) + 1; } ut_free(dir); @@ -2392,7 +2374,7 @@ fil_op_replay_rename( strlen(new_name + dirlen) - 4 /* remove ".ibd" */); - ut_ad(new_table[namend - new_name - dirlen] + ut_ad(new_table[ulint(namend - new_name) - dirlen] == OS_PATH_SEPARATOR); #if OS_PATH_SEPARATOR != '/' new_table[namend - new_name - dirlen] = '/'; @@ -2428,7 +2410,7 @@ fil_check_pending_ops(const fil_space_t* space, ulint count) return 0; } - if (ulint n_pending_ops = space->n_pending_ops) { + if (ulint n_pending_ops = my_atomic_loadlint(&space->n_pending_ops)) { if (count > 5000) { ib::warn() << "Trying to close/delete/truncate" @@ -2456,7 +2438,7 @@ fil_check_pending_io( ulint count) /*!< in: number of attempts so far */ { ut_ad(mutex_own(&fil_system.mutex)); - ut_a(space->n_pending_ops == 0); + ut_ad(!space->referenced()); switch (operation) { case FIL_OPERATION_DELETE: @@ -2518,12 +2500,11 @@ fil_check_pending_operations( if (sp) { sp->stop_new_ops = true; if (sp->crypt_data) { - sp->n_pending_ops++; + sp->acquire(); mutex_exit(&fil_system.mutex); fil_space_crypt_close_tablespace(sp); mutex_enter(&fil_system.mutex); - ut_ad(sp->n_pending_ops > 0); - sp->n_pending_ops--; + sp->release(); } } @@ -2716,11 +2697,7 @@ fil_delete_tablespace( To deal with potential read requests, we will check the ::stop_new_ops flag in fil_io(). */ - buf_LRU_flush_or_remove_pages(id, NULL -#ifdef BTR_CUR_HASH_ADAPT - , drop_ahi -#endif /* BTR_CUR_HASH_ADAPT */ - ); + buf_LRU_flush_or_remove_pages(id, NULL); /* If it is a delete then also delete any generated files, otherwise when we drop the database the remove directory will fail. */ @@ -2756,7 +2733,7 @@ fil_delete_tablespace( the fil_system::mutex. */ if (const fil_space_t* s = fil_space_get_by_id(id)) { ut_a(s == space); - ut_a(space->n_pending_ops == 0); + ut_a(!space->referenced()); ut_a(UT_LIST_GET_LEN(space->chain) == 1); fil_node_t* node = UT_LIST_GET_FIRST(space->chain); ut_a(node->n_pending == 0); @@ -2827,7 +2804,8 @@ bool fil_truncate_tablespace(fil_space_t* space, ulint size_in_pages) bool success = os_file_truncate(node->name, node->handle, 0); if (success) { - os_offset_t size = os_offset_t(size_in_pages) * UNIV_PAGE_SIZE; + os_offset_t size = os_offset_t(size_in_pages) + << srv_page_size_shift; success = os_file_set_size( node->name, node->handle, size, @@ -3143,7 +3121,7 @@ func_exit: log_mutex_enter(); } - /* log_sys->mutex is above fil_system.mutex in the latching order */ + /* log_sys.mutex is above fil_system.mutex in the latching order */ ut_ad(log_mutex_own()); mutex_enter(&fil_system.mutex); ut_ad(space->name == old_space_name); @@ -3274,7 +3252,7 @@ fil_ibd_create( if (!os_file_set_size( path, file, - os_offset_t(size) << UNIV_PAGE_SIZE_SHIFT, is_compressed)) { + os_offset_t(size) << srv_page_size_shift, is_compressed)) { *err = DB_OUT_OF_FILE_SPACE; err_exit: os_file_close(file); @@ -3295,11 +3273,11 @@ err_exit: with zeros from the call of os_file_set_size(), until a buffer pool flush would write to it. */ - buf2 = static_cast(ut_malloc_nokey(3 * UNIV_PAGE_SIZE)); + buf2 = static_cast(ut_malloc_nokey(3U << srv_page_size_shift)); /* Align the memory for file i/o if we might have O_DIRECT set */ - page = static_cast(ut_align(buf2, UNIV_PAGE_SIZE)); + page = static_cast(ut_align(buf2, srv_page_size)); - memset(page, '\0', UNIV_PAGE_SIZE); + memset(page, '\0', srv_page_size); flags |= FSP_FLAGS_PAGE_SSIZE(); fsp_header_init_fields(page, space_id, flags); @@ -3317,7 +3295,7 @@ err_exit: } else { page_zip_des_t page_zip; page_zip_set_size(&page_zip, page_size.physical()); - page_zip.data = page + UNIV_PAGE_SIZE; + page_zip.data = page + srv_page_size; #ifdef UNIV_DEBUG page_zip.m_start = #endif /* UNIV_DEBUG */ @@ -3830,7 +3808,7 @@ fil_path_to_space_name( while (const char* t = static_cast( memchr(tablename, OS_PATH_SEPARATOR, - end - tablename))) { + ulint(end - tablename)))) { dbname = tablename; tablename = t + 1; } @@ -3842,7 +3820,7 @@ fil_path_to_space_name( ut_ad(end - tablename > 4); ut_ad(memcmp(end - 4, DOT_IBD, 4) == 0); - char* name = mem_strdupl(dbname, end - dbname - 4); + char* name = mem_strdupl(dbname, ulint(end - dbname) - 4); ut_ad(name[tablename - dbname - 1] == OS_PATH_SEPARATOR); #if OS_PATH_SEPARATOR != '/' @@ -4049,7 +4027,8 @@ fil_ibd_load( /* Every .ibd file is created >= 4 pages in size. Smaller files cannot be OK. */ - minimum_size = FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE; + minimum_size = os_offset_t(FIL_IBD_FILE_INITIAL_SIZE) + << srv_page_size_shift; if (size == static_cast(-1)) { /* The following call prints an error message */ @@ -4401,15 +4380,13 @@ fil_io( ut_ad(req_type.validate()); ut_ad(len > 0); - ut_ad(byte_offset < UNIV_PAGE_SIZE); + ut_ad(byte_offset < srv_page_size); ut_ad(!page_size.is_compressed() || byte_offset == 0); - ut_ad(UNIV_PAGE_SIZE == (ulong)(1 << UNIV_PAGE_SIZE_SHIFT)); -#if (1 << UNIV_PAGE_SIZE_SHIFT_MAX) != UNIV_PAGE_SIZE_MAX -# error "(1 << UNIV_PAGE_SIZE_SHIFT_MAX) != UNIV_PAGE_SIZE_MAX" -#endif -#if (1 << UNIV_PAGE_SIZE_SHIFT_MIN) != UNIV_PAGE_SIZE_MIN -# error "(1 << UNIV_PAGE_SIZE_SHIFT_MIN) != UNIV_PAGE_SIZE_MIN" -#endif + ut_ad(srv_page_size == 1UL << srv_page_size_shift); + compile_time_assert((1U << UNIV_PAGE_SIZE_SHIFT_MAX) + == UNIV_PAGE_SIZE_MAX); + compile_time_assert((1U << UNIV_PAGE_SIZE_SHIFT_MIN) + == UNIV_PAGE_SIZE_MIN); ut_ad(fil_validate_skip()); /* ibuf bitmap pages must be read in the sync AIO mode: */ @@ -4592,11 +4569,11 @@ fil_io( if (!page_size.is_compressed()) { offset = ((os_offset_t) cur_page_no - << UNIV_PAGE_SIZE_SHIFT) + byte_offset; + << srv_page_size_shift) + byte_offset; ut_a(node->size - cur_page_no - >= ((byte_offset + len + (UNIV_PAGE_SIZE - 1)) - / UNIV_PAGE_SIZE)); + >= ((byte_offset + len + (srv_page_size - 1)) + >> srv_page_size_shift)); } else { ulint size_shift; @@ -4705,7 +4682,26 @@ fil_aio_wait( switch (purpose) { case FIL_TYPE_LOG: srv_set_io_thread_op_info(segment, "complete io for log"); - log_io_complete(static_cast(message)); + /* We use synchronous writing of the logs + and can only end up here when writing a log checkpoint! */ + ut_a(ptrdiff_t(message) == 1); + /* It was a checkpoint write */ + switch (srv_flush_t(srv_file_flush_method)) { + case SRV_O_DSYNC: + case SRV_NOSYNC: + break; + case SRV_FSYNC: + case SRV_LITTLESYNC: + case SRV_O_DIRECT: + case SRV_O_DIRECT_NO_FSYNC: +#ifdef _WIN32 + case SRV_ALL_O_DIRECT_FSYNC: +#endif + fil_flush(SRV_LOG_SPACE_FIRST_ID); + } + + DBUG_PRINT("ib_log", ("checkpoint info written")); + log_sys.complete_checkpoint(); return; case FIL_TYPE_TABLESPACE: case FIL_TYPE_TEMPORARY: @@ -4738,7 +4734,7 @@ fil_aio_wait( << ": " << ut_strerr(err); } - fil_space_release_for_io(space); + space->release_for_io(); } return; } @@ -4772,7 +4768,7 @@ fil_flush( void fil_flush(fil_space_t* space) { - ut_ad(space->n_pending_ios > 0); + ut_ad(space->pending_io()); ut_ad(space->purpose == FIL_TYPE_TABLESPACE || space->purpose == FIL_TYPE_IMPORT); @@ -5048,8 +5044,7 @@ fil_mtr_rename_log( const char* old_path = old_table->space->chain.start->name; /* Temp filepath must not exist. */ dberr_t err = fil_rename_tablespace_check( - old_path, tmp_path, - dict_table_is_discarded(old_table)); + old_path, tmp_path, !old_table->space); if (err != DB_SUCCESS) { ut_free(tmp_path); return(err); @@ -5071,8 +5066,7 @@ fil_mtr_rename_log( TABLE starts and ends with a file_per-table tablespace. */ if (!old_table->space_id) { dberr_t err = fil_rename_tablespace_check( - new_path, old_path, - dict_table_is_discarded(new_table)); + new_path, old_path, !new_table->space); if (err != DB_SUCCESS) { ut_free(old_path); return(err); @@ -5110,11 +5104,11 @@ fil_space_validate_for_mtr_commit( to quiesce. This is not a problem, because ibuf_merge_or_delete_for_page() would call fil_space_acquire() before mtr_start() and - fil_space_release() after mtr_commit(). This is why + fil_space_t::release() after mtr_commit(). This is why n_pending_ops should not be zero if stop_new_ops is set. */ ut_ad(!space->stop_new_ops || space->is_being_truncated /* TRUNCATE sets stop_new_ops */ - || space->n_pending_ops > 0); + || space->referenced()); } #endif /* UNIV_DEBUG */ @@ -5140,12 +5134,12 @@ fil_names_dirty( { ut_ad(log_mutex_own()); ut_ad(recv_recovery_is_on()); - ut_ad(log_sys->lsn != 0); + ut_ad(log_sys.lsn != 0); ut_ad(space->max_lsn == 0); ut_d(fil_space_validate_for_mtr_commit(space)); UT_LIST_ADD_LAST(fil_system.named_spaces, space); - space->max_lsn = log_sys->lsn; + space->max_lsn = log_sys.lsn; } /** Write MLOG_FILE_NAME records when a non-predefined persistent @@ -5160,7 +5154,7 @@ fil_names_dirty_and_write( { ut_ad(log_mutex_own()); ut_d(fil_space_validate_for_mtr_commit(space)); - ut_ad(space->max_lsn == log_sys->lsn); + ut_ad(space->max_lsn == log_sys.lsn); UT_LIST_ADD_LAST(fil_system.named_spaces, space); fil_names_write(space, mtr); @@ -5197,8 +5191,8 @@ fil_names_clear( ut_ad(log_mutex_own()); - if (log_sys->append_on_checkpoint) { - mtr_write_log(log_sys->append_on_checkpoint); + if (log_sys.append_on_checkpoint) { + mtr_write_log(log_sys.append_on_checkpoint); do_write = true; } @@ -5330,7 +5324,7 @@ truncate_t::truncate( : space->size; const bool success = os_file_truncate( - path, node->handle, trunc_size * UNIV_PAGE_SIZE); + path, node->handle, trunc_size << srv_page_size_shift); if (!success) { ib::error() << "Cannot truncate file " << path @@ -5412,7 +5406,7 @@ test_make_filepath() /** Return the next fil_space_t. Once started, the caller must keep calling this until it returns NULL. -fil_space_acquire() and fil_space_release() are invoked here which +fil_space_t::acquire() and fil_space_t::release() are invoked here which blocks a concurrent operation from dropping the tablespace. @param[in] prev_space Pointer to the previous fil_space_t. If NULL, use the first fil_space_t on fil_system.space_list. @@ -5425,31 +5419,27 @@ fil_space_next(fil_space_t* prev_space) mutex_enter(&fil_system.mutex); - if (prev_space == NULL) { + if (!space) { space = UT_LIST_GET_FIRST(fil_system.space_list); - - /* We can trust that space is not NULL because at least the - system tablespace is always present and loaded first. */ - space->n_pending_ops++; } else { - ut_ad(space->n_pending_ops > 0); + ut_a(space->referenced()); /* Move on to the next fil_space_t */ - space->n_pending_ops--; + space->release(); space = UT_LIST_GET_NEXT(space_list, space); + } - /* Skip spaces that are being created by - fil_ibd_create(), or dropped, or !tablespace. */ - while (space != NULL - && (UT_LIST_GET_LEN(space->chain) == 0 - || space->is_stopping() - || space->purpose != FIL_TYPE_TABLESPACE)) { - space = UT_LIST_GET_NEXT(space_list, space); - } + /* Skip spaces that are being created by + fil_ibd_create(), or dropped, or !tablespace. */ + while (space != NULL + && (UT_LIST_GET_LEN(space->chain) == 0 + || space->is_stopping() + || space->purpose != FIL_TYPE_TABLESPACE)) { + space = UT_LIST_GET_NEXT(space_list, space); + } - if (space != NULL) { - space->n_pending_ops++; - } + if (space != NULL) { + space->acquire(); } mutex_exit(&fil_system.mutex); @@ -5468,7 +5458,7 @@ fil_space_remove_from_keyrotation(fil_space_t* space) ut_ad(mutex_own(&fil_system.mutex)); ut_ad(space); - if (space->n_pending_ops == 0 && space->is_in_rotation_list) { + if (space->is_in_rotation_list && !space->referenced()) { space->is_in_rotation_list = false; ut_a(UT_LIST_GET_LEN(fil_system.rotation_list) > 0); UT_LIST_REMOVE(fil_system.rotation_list, space); @@ -5478,7 +5468,7 @@ fil_space_remove_from_keyrotation(fil_space_t* space) /** Return the next fil_space_t from key rotation list. Once started, the caller must keep calling this until it returns NULL. -fil_space_acquire() and fil_space_release() are invoked here which +fil_space_t::acquire() and fil_space_t::release() are invoked here which blocks a concurrent operation from dropping the tablespace. @param[in] prev_space Pointer to the previous fil_space_t. If NULL, use the first fil_space_t on fil_system.space_list. @@ -5495,8 +5485,7 @@ fil_space_keyrotate_next( if (UT_LIST_GET_LEN(fil_system.rotation_list) == 0) { if (space) { - ut_ad(space->n_pending_ops > 0); - space->n_pending_ops--; + space->release(); fil_space_remove_from_keyrotation(space); } mutex_exit(&fil_system.mutex); @@ -5509,10 +5498,8 @@ fil_space_keyrotate_next( /* We can trust that space is not NULL because we checked list length above */ } else { - ut_ad(space->n_pending_ops > 0); - /* Move on to the next fil_space_t */ - space->n_pending_ops--; + space->release(); old = space; space = UT_LIST_GET_NEXT(rotation_list, space); @@ -5533,7 +5520,7 @@ fil_space_keyrotate_next( } if (space != NULL) { - space->n_pending_ops++; + space->acquire(); } mutex_exit(&fil_system.mutex); diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc index 2dc9abd2fca..507f452aa20 100644 --- a/storage/innobase/fil/fil0pagecompress.cc +++ b/storage/innobase/fil/fil0pagecompress.cc @@ -100,7 +100,7 @@ fil_compress_page( int comp_level = int(level); ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE; ulint write_size = 0; -#if HAVE_LZO +#if defined(HAVE_LZO) lzo_uint write_size_lzo = write_size; #endif /* Cache to avoid change during function execution */ @@ -117,17 +117,17 @@ fil_compress_page( if (!out_buf) { allocated = true; - ulint size = UNIV_PAGE_SIZE; + ulint size = srv_page_size; /* Both snappy and lzo compression methods require that output buffer used for compression is bigger than input buffer. Increase the allocated buffer size accordingly. */ -#if HAVE_SNAPPY +#if defined(HAVE_SNAPPY) if (comp_method == PAGE_SNAPPY_ALGORITHM) { size = snappy_max_compressed_length(size); } #endif -#if HAVE_LZO +#if defined(HAVE_LZO) if (comp_method == PAGE_LZO_ALGORITHM) { size += LZO1X_1_15_MEM_COMPRESS; } @@ -155,14 +155,14 @@ fil_compress_page( /* If no compression level was provided to this table, use system default level */ if (comp_level == 0) { - comp_level = page_zip_level; + comp_level = int(page_zip_level); } DBUG_LOG("compress", "Preparing for space " << (space ? space->id : 0) << " '" << (space ? space->name : "(import)") << "' len " << len); - write_size = UNIV_PAGE_SIZE - header_len; + write_size = srv_page_size - header_len; switch(comp_method) { #ifdef HAVE_LZ4 @@ -185,11 +185,11 @@ fil_compress_page( #ifdef HAVE_LZO case PAGE_LZO_ALGORITHM: err = lzo1x_1_15_compress( - buf, len, out_buf+header_len, &write_size_lzo, out_buf+UNIV_PAGE_SIZE); + buf, len, out_buf+header_len, &write_size_lzo, out_buf+srv_page_size); write_size = write_size_lzo; - if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) { + if (err != LZO_E_OK || write_size > srv_page_size-header_len) { goto err_exit; } @@ -209,7 +209,7 @@ fil_compress_page( &out_pos, (size_t)write_size); - if (err != LZMA_OK || out_pos > UNIV_PAGE_SIZE-header_len) { + if (err != LZMA_OK || out_pos > srv_page_size-header_len) { write_size = out_pos; goto err_exit; } @@ -232,7 +232,7 @@ fil_compress_page( 0, 0); - if (err != BZ_OK || write_size > UNIV_PAGE_SIZE-header_len) { + if (err != BZ_OK || write_size > srv_page_size-header_len) { goto err_exit; } break; @@ -243,7 +243,7 @@ fil_compress_page( case PAGE_SNAPPY_ALGORITHM: { snappy_status cstatus; - write_size = snappy_max_compressed_length(UNIV_PAGE_SIZE); + write_size = snappy_max_compressed_length(srv_page_size); cstatus = snappy_compress( (const char *)buf, @@ -251,7 +251,7 @@ fil_compress_page( (char *)(out_buf+header_len), (size_t*)&write_size); - if (cstatus != SNAPPY_OK || write_size > UNIV_PAGE_SIZE-header_len) { + if (cstatus != SNAPPY_OK || write_size > srv_page_size-header_len) { err = (int)cstatus; goto err_exit; } @@ -310,9 +310,9 @@ fil_compress_page( byte *comp_page; byte *uncomp_page; - comp_page = static_cast(ut_malloc_nokey(UNIV_PAGE_SIZE)); - uncomp_page = static_cast(ut_malloc_nokey(UNIV_PAGE_SIZE)); - memcpy(comp_page, out_buf, UNIV_PAGE_SIZE); + comp_page = static_cast(ut_malloc_nokey(srv_page_size)); + uncomp_page = static_cast(ut_malloc_nokey(srv_page_size)); + memcpy(comp_page, out_buf, srv_page_size); fil_decompress_page(uncomp_page, comp_page, ulong(len), NULL); @@ -439,8 +439,8 @@ fil_decompress_page( // If no buffer was given, we need to allocate temporal buffer if (page_buf == NULL) { - in_buf = static_cast(ut_malloc_nokey(UNIV_PAGE_SIZE)); - memset(in_buf, 0, UNIV_PAGE_SIZE); + in_buf = static_cast(ut_malloc_nokey(srv_page_size)); + memset(in_buf, 0, srv_page_size); } else { in_buf = page_buf; } @@ -472,7 +472,7 @@ fil_decompress_page( /* Get the actual size of compressed page */ actual_size = mach_read_from_2(buf+FIL_PAGE_DATA); /* Check if payload size is corrupted */ - if (actual_size == 0 || actual_size > UNIV_PAGE_SIZE) { + if (actual_size == 0 || actual_size > srv_page_size) { ib::error() << "Corruption: We try to uncompress corrupted page" << " actual size: " << actual_size << " compression method: " @@ -527,7 +527,7 @@ fil_decompress_page( olen = olen_lzo; - if (err != LZO_E_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) { + if (err != LZO_E_OK || (olen == 0 || olen > srv_page_size)) { len = olen; goto err_exit; if (return_error) { @@ -557,7 +557,7 @@ fil_decompress_page( len); - if (ret != LZMA_OK || (dst_pos == 0 || dst_pos > UNIV_PAGE_SIZE)) { + if (ret != LZMA_OK || (dst_pos == 0 || dst_pos > srv_page_size)) { len = dst_pos; goto err_exit; if (return_error) { @@ -570,7 +570,7 @@ fil_decompress_page( #endif /* HAVE_LZMA */ #ifdef HAVE_BZIP2 case PAGE_BZIP2_ALGORITHM: { - unsigned int dst_pos = UNIV_PAGE_SIZE; + unsigned int dst_pos = srv_page_size; err = BZ2_bzBuffToBuffDecompress( (char *)in_buf, @@ -580,7 +580,7 @@ fil_decompress_page( 1, 0); - if (err != BZ_OK || (dst_pos == 0 || dst_pos > UNIV_PAGE_SIZE)) { + if (err != BZ_OK || (dst_pos == 0 || dst_pos > srv_page_size)) { len = dst_pos; goto err_exit; if (return_error) { @@ -594,7 +594,7 @@ fil_decompress_page( case PAGE_SNAPPY_ALGORITHM: { snappy_status cstatus; - ulint olen = UNIV_PAGE_SIZE; + ulint olen = srv_page_size; cstatus = snappy_uncompress( (const char *)(buf+header_len), @@ -602,7 +602,7 @@ fil_decompress_page( (char *)in_buf, (size_t*)&olen); - if (cstatus != SNAPPY_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) { + if (cstatus != SNAPPY_OK || (olen == 0 || olen > srv_page_size)) { err = (int)cstatus; len = olen; goto err_exit; @@ -651,6 +651,6 @@ err_exit: << fil_get_compression_alg_name(compression_alg) << "."; buf_page_print(buf, univ_page_size); - fil_space_release_for_io(space); + space->release_for_io(); ut_ad(0); } diff --git a/storage/innobase/fsp/fsp0file.cc b/storage/innobase/fsp/fsp0file.cc index 2a0cb5a8fee..4611052ab48 100644 --- a/storage/innobase/fsp/fsp0file.cc +++ b/storage/innobase/fsp/fsp0file.cc @@ -302,7 +302,7 @@ Datafile::read_first_page(bool read_only_mode) /* Align the memory for a possible read from a raw device */ m_first_page = static_cast( - ut_align(m_first_page_buf, UNIV_PAGE_SIZE)); + ut_align(m_first_page_buf, srv_page_size)); IORequest request; dberr_t err = DB_ERROR; @@ -529,7 +529,7 @@ err_exit: /* Check if the whole page is blank. */ if (!m_space_id && !m_flags) { const byte* b = m_first_page; - ulint nonzero_bytes = UNIV_PAGE_SIZE; + ulint nonzero_bytes = srv_page_size; while (*b == '\0' && --nonzero_bytes != 0) { @@ -550,13 +550,13 @@ err_exit: const page_size_t page_size(m_flags); - if (univ_page_size.logical() != page_size.logical()) { - /* Page size must be univ_page_size. */ + if (srv_page_size != page_size.logical()) { + /* Logical size must be innodb_page_size. */ ib::error() << "Data file '" << m_filepath << "' uses page size " << page_size.logical() << ", but the innodb_page_size" " start-up parameter is " - << univ_page_size.logical(); + << srv_page_size; free_first_page(); return(DB_ERROR); } @@ -683,8 +683,8 @@ Datafile::find_space_id() bool noncompressed_ok = false; /* For noncompressed pages, the page size must be - equal to univ_page_size.physical(). */ - if (page_size == univ_page_size.physical()) { + equal to srv_page_size. */ + if (page_size == srv_page_size) { noncompressed_ok = !buf_page_is_corrupted( false, page, univ_page_size, NULL); } @@ -698,11 +698,11 @@ Datafile::find_space_id() assume the page is compressed if univ_page_size. logical() is equal to or less than 16k and the page_size we are checking is equal to or less than - univ_page_size.logical(). */ - if (univ_page_size.logical() <= UNIV_PAGE_SIZE_DEF - && page_size <= univ_page_size.logical()) { + srv_page_size. */ + if (srv_page_size <= UNIV_PAGE_SIZE_DEF + && page_size <= srv_page_size) { const page_size_t compr_page_size( - page_size, univ_page_size.logical(), + page_size, srv_page_size, true); compressed_ok = !buf_page_is_corrupted( @@ -831,7 +831,10 @@ open that file, and read the contents into m_filepath. dberr_t RemoteDatafile::open_link_file() { - set_link_filepath(NULL); + if (m_link_filepath == NULL) { + m_link_filepath = fil_make_filepath(NULL, name(), ISL, false); + } + m_filepath = read_link_file(m_link_filepath); return(m_filepath == NULL ? DB_CANNOT_OPEN_FILE : DB_SUCCESS); @@ -897,18 +900,6 @@ RemoteDatafile::shutdown() } } -/** Set the link filepath. Use default datadir, the base name of -the path provided without its suffix, plus DOT_ISL. -@param[in] path filepath which contains a basename to use. - If NULL, use m_name as the basename. */ -void -RemoteDatafile::set_link_filepath(const char* path) -{ - if (m_link_filepath == NULL) { - m_link_filepath = fil_make_filepath(NULL, name(), ISL, false); - } -} - /** Creates a new InnoDB Symbolic Link (ISL) file. It is always created under the 'datadir' of MySQL. The datadir is the directory of a running mysqld program. We can refer to it by simply using the path ".". diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index 0c25fae9626..e46cb0d7cf1 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -587,7 +587,7 @@ fsp_init_file_page_low( { page_t* page = buf_block_get_frame(block); - memset(page, 0, UNIV_PAGE_SIZE); + memset(page, 0, srv_page_size); mach_write_to_4(page + FIL_PAGE_OFFSET, block->page.id.page_no()); mach_write_to_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, @@ -1989,7 +1989,7 @@ fseg_create( ut_ad(mtr); ut_ad(byte_offset + FSEG_HEADER_SIZE - <= UNIV_PAGE_SIZE - FIL_PAGE_DATA_END); + <= srv_page_size - FIL_PAGE_DATA_END); mtr_x_lock(&space->latch, mtr); const page_size_t page_size(space->flags); diff --git a/storage/innobase/fsp/fsp0sysspace.cc b/storage/innobase/fsp/fsp0sysspace.cc index 1344e02bcfb..2a120989532 100644 --- a/storage/innobase/fsp/fsp0sysspace.cc +++ b/storage/innobase/fsp/fsp0sysspace.cc @@ -353,7 +353,7 @@ SysTablespace::check_size( So we need to round the size downward to a megabyte.*/ const ulint rounded_size_pages = static_cast( - size >> UNIV_PAGE_SIZE_SHIFT); + size >> srv_page_size_shift); /* If last file */ if (&file == &m_files.back() && m_auto_extend_last_file) { @@ -397,16 +397,16 @@ SysTablespace::set_size( /* We created the data file and now write it full of zeros */ ib::info() << "Setting file '" << file.filepath() << "' size to " - << (file.m_size >> (20 - UNIV_PAGE_SIZE_SHIFT)) << " MB." + << (file.m_size >> (20U - srv_page_size_shift)) << " MB." " Physically writing the file full; Please wait ..."; bool success = os_file_set_size( file.m_filepath, file.m_handle, - static_cast(file.m_size) << UNIV_PAGE_SIZE_SHIFT); + static_cast(file.m_size) << srv_page_size_shift); if (success) { ib::info() << "File '" << file.filepath() << "' size is now " - << (file.m_size >> (20 - UNIV_PAGE_SIZE_SHIFT)) + << (file.m_size >> (20U - srv_page_size_shift)) << " MB."; } else { ib::error() << "Could not set the file size of '" @@ -766,11 +766,10 @@ SysTablespace::check_file_spec( } if (!m_auto_extend_last_file - && get_sum_of_sizes() < min_expected_size / UNIV_PAGE_SIZE) { - + && get_sum_of_sizes() + < (min_expected_size >> srv_page_size_shift)) { ib::error() << "Tablespace size must be at least " - << min_expected_size / (1024 * 1024) << " MB"; - + << (min_expected_size >> 20) << " MB"; return(DB_ERROR); } @@ -943,16 +942,16 @@ SysTablespace::open_or_create( /** Normalize the file size, convert from megabytes to number of pages. */ void -SysTablespace::normalize() +SysTablespace::normalize_size() { files_t::iterator end = m_files.end(); for (files_t::iterator it = m_files.begin(); it != end; ++it) { - it->m_size *= (1024 * 1024) / UNIV_PAGE_SIZE; + it->m_size <<= (20U - srv_page_size_shift); } - m_last_file_size_max *= (1024 * 1024) / UNIV_PAGE_SIZE; + m_last_file_size_max <<= (20U - srv_page_size_shift); } diff --git a/storage/innobase/fts/fts0config.cc b/storage/innobase/fts/fts0config.cc index 7ad7459ea6a..6b6042dee66 100644 --- a/storage/innobase/fts/fts0config.cc +++ b/storage/innobase/fts/fts0config.cc @@ -422,7 +422,7 @@ fts_config_set_ulint( ut_a(FTS_MAX_INT_LEN < FTS_MAX_CONFIG_VALUE_LEN); - value.f_len = snprintf( + value.f_len = (ulint) snprintf( (char*) value.f_str, FTS_MAX_INT_LEN, ULINTPF, int_value); error = fts_config_set_value(trx, fts_table, name, &value); diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index c06de6a9add..d874725c374 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -502,7 +502,6 @@ fts_load_user_stopword( stopword_info); graph = fts_parse_sql_no_dict_lock( - NULL, info, "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" @@ -1932,7 +1931,7 @@ fts_create_common_tables( pars_info_bind_id(info, true, "config_table", fts_name); graph = fts_parse_sql_no_dict_lock( - &fts_table, info, fts_config_table_insert_values_sql); + info, fts_config_table_insert_values_sql); error = fts_eval_sql(trx, graph); @@ -2005,7 +2004,7 @@ fts_create_one_index_table( ? DATA_VARCHAR : DATA_VARMYSQL, field->col->prtype, FTS_MAX_WORD_LEN_IN_CHAR - * field->col->mbmaxlen); + * unsigned(field->col->mbmaxlen)); dict_mem_table_add_col(new_table, heap, "first_doc_id", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, @@ -2311,7 +2310,7 @@ fts_trx_create( savep != NULL; savep = UT_LIST_GET_NEXT(trx_savepoints, savep)) { - fts_savepoint_take(trx, ftt, savep->name); + fts_savepoint_take(ftt, savep->name); } return(ftt); @@ -2810,7 +2809,7 @@ fts_update_sync_doc_id( info = pars_info_create(); - id_len = snprintf( + id_len = (ulint) snprintf( (char*) id, sizeof(id), FTS_DOC_ID_FORMAT, doc_id + 1); pars_info_bind_varchar_literal(info, "doc_id", id, id_len); @@ -3561,7 +3560,7 @@ fts_add_doc_by_id( dict_index_copy_types(clust_ref, clust_index, n_fields); row_build_row_ref_in_tuple( - clust_ref, rec, fts_id_index, NULL, NULL); + clust_ref, rec, fts_id_index, NULL); btr_pcur_open_with_no_init( clust_index, clust_ref, PAGE_CUR_LE, @@ -4487,7 +4486,7 @@ fts_sync_table( ut_ad(table->fts); - if (!dict_table_is_discarded(table) && table->fts->cache + if (table->space && table->fts->cache && !dict_table_is_corrupted(table)) { err = fts_sync(table->fts->cache->sync, unlock_cache, wait, has_dict); @@ -4725,7 +4724,7 @@ fts_tokenize_add_word_for_parser( MYSQL_FTPARSER_PARAM* param, /* in: parser paramter */ const char* word, /* in: token word */ int word_len, /* in: word len */ - MYSQL_FTPARSER_BOOLEAN_INFO* boolean_info) /* in: word boolean info */ + MYSQL_FTPARSER_BOOLEAN_INFO*) { fts_string_t str; fts_tokenize_param_t* fts_param; @@ -4737,9 +4736,9 @@ fts_tokenize_add_word_for_parser( ut_ad(result_doc != NULL); str.f_str = (byte*)(word); - str.f_len = word_len; + str.f_len = ulint(word_len); str.f_n_char = fts_get_token_size( - const_cast(param->cs), word, word_len); + const_cast(param->cs), word, str.f_len); /* JAN: TODO: MySQL 5.7 FTS ut_ad(boolean_info->position >= 0); @@ -5634,7 +5633,6 @@ Take a FTS savepoint. */ void fts_savepoint_take( /*===============*/ - trx_t* trx, /*!< in: transaction */ fts_trx_t* fts_trx, /*!< in: fts transaction */ const char* name) /*!< in: savepoint name */ { @@ -5912,7 +5910,7 @@ fts_savepoint_rollback( ut_a(ib_vector_size(savepoints) > 0); /* Restore the savepoint. */ - fts_savepoint_take(trx, trx->fts_trx, name); + fts_savepoint_take(trx->fts_trx, name); } } @@ -5942,7 +5940,7 @@ fts_is_aux_table_name( if (ptr != NULL) { /* We will start the match after the '/' */ ++ptr; - len = end - ptr; + len = ulint(end - ptr); } /* All auxiliary tables are prefixed with "FTS_" and the name @@ -5969,7 +5967,7 @@ fts_is_aux_table_name( /* Skip the underscore. */ ++ptr; ut_a(end > ptr); - len = end - ptr; + len = ulint(end - ptr); /* First search the common table suffix array. */ for (i = 0; fts_common_tables[i] != NULL; ++i) { @@ -6000,7 +5998,7 @@ fts_is_aux_table_name( /* Skip the underscore. */ ++ptr; ut_a(end > ptr); - len = end - ptr; + len = ulint(end - ptr); /* Search the FT index specific array. */ for (i = 0; i < FTS_NUM_AUX_INDEX; ++i) { @@ -6528,7 +6526,7 @@ fts_check_corrupt_index( if (index->id == aux_table->index_id) { ut_ad(index->type & DICT_FTS); dict_table_close(table, true, false); - return(dict_index_is_corrupted(index)); + return index->is_corrupted(); } } @@ -7175,7 +7173,6 @@ fts_drop_orphaned_tables(void) pars_info_bind_function(info, "my_func", fts_read_tables, tables); graph = fts_parse_sql_no_dict_lock( - NULL, info, "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc index 2d8944f0290..4d93451a40d 100644 --- a/storage/innobase/fts/fts0opt.cc +++ b/storage/innobase/fts/fts0opt.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2016, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -41,6 +41,9 @@ Completed 2011/7/10 Sunny and Jimmy Yang /** The FTS optimize thread's work queue. */ static ib_wqueue_t* fts_optimize_wq; +/** The FTS vector to store fts_slot_t */ +static ib_vector_t* fts_slots; + /** Time to wait for a message. */ static const ulint FTS_QUEUE_WAIT_IN_USECS = 5000000; @@ -635,9 +638,9 @@ fts_zip_read_word( ptr[len] = 0; zip->zp->next_out = ptr; - zip->zp->avail_out = len; + zip->zp->avail_out = uInt(len); - word->f_len = len; + word->f_len = ulint(len); len = 0; } break; @@ -690,15 +693,15 @@ fts_fetch_index_words( /* Skip the duplicate words. */ if (zip->word.f_len == static_cast(len) - && !memcmp(zip->word.f_str, data, len)) { + && !memcmp(zip->word.f_str, data, zip->word.f_len)) { return(TRUE); } ut_a(len <= FTS_MAX_WORD_LEN); - memcpy(zip->word.f_str, data, len); - zip->word.f_len = len; + zip->word.f_len = ulint(len); + memcpy(zip->word.f_str, data, zip->word.f_len); ut_a(zip->zp->avail_in == 0); ut_a(zip->zp->next_in == NULL); @@ -727,7 +730,7 @@ fts_fetch_index_words( case Z_OK: if (zip->zp->avail_in == 0) { zip->zp->next_in = static_cast(data); - zip->zp->avail_in = len; + zip->zp->avail_in = uInt(len); ut_a(len <= FTS_MAX_WORD_LEN); len = 0; } @@ -1158,7 +1161,7 @@ fts_optimize_encode_node( ++src; /* Number of encoded pos bytes to copy. */ - pos_enc_len = src - enc->src_ilist_ptr; + pos_enc_len = ulint(src - enc->src_ilist_ptr); /* Total number of bytes required for copy. */ enc_len += pos_enc_len; @@ -1230,7 +1233,7 @@ fts_optimize_node( enc->src_ilist_ptr = src_node->ilist; } - copied = enc->src_ilist_ptr - src_node->ilist; + copied = ulint(enc->src_ilist_ptr - src_node->ilist); /* While there is data in the source node and space to copy into in the destination node. */ @@ -1251,7 +1254,7 @@ test_again: fts_update_t* update; update = (fts_update_t*) ib_vector_get( - del_vec, *del_pos); + del_vec, ulint(*del_pos)); del_doc_id = update->doc_id; } @@ -1295,7 +1298,7 @@ test_again: } /* Bytes copied so for from source. */ - copied = enc->src_ilist_ptr - src_node->ilist; + copied = ulint(enc->src_ilist_ptr - src_node->ilist); } if (copied >= src_node->ilist_size) { @@ -1402,7 +1405,7 @@ fts_optimize_word( ut_a(enc.src_ilist_ptr != NULL); /* Determine the numer of bytes copied to dst_node. */ - copied = enc.src_ilist_ptr - src_node->ilist; + copied = ulint(enc.src_ilist_ptr - src_node->ilist); /* Can't copy more than whats in the vlc array. */ ut_a(copied <= src_node->ilist_size); @@ -2976,9 +2979,6 @@ DECLARE_THREAD(fts_optimize_thread)( /*================*/ void* arg) /*!< in: work queue*/ { - mem_heap_t* heap; - ib_vector_t* tables; - ib_alloc_t* heap_alloc; ulint current = 0; ibool done = FALSE; ulint n_tables = 0; @@ -2988,10 +2988,10 @@ DECLARE_THREAD(fts_optimize_thread)( ut_ad(!srv_read_only_mode); my_thread_init(); - heap = mem_heap_create(sizeof(dict_table_t*) * 64); - heap_alloc = ib_heap_allocator_create(heap); + ut_ad(fts_slots); - tables = ib_vector_create(heap_alloc, sizeof(fts_slot_t), 4); + /* Assign number of tables added in fts_slots_t to n_tables */ + n_tables = ib_vector_size(fts_slots); while (!done && srv_shutdown_state == SRV_SHUTDOWN_NONE) { @@ -3005,10 +3005,10 @@ DECLARE_THREAD(fts_optimize_thread)( fts_slot_t* slot; - ut_a(ib_vector_size(tables) > 0); + ut_a(ib_vector_size(fts_slots) > 0); slot = static_cast( - ib_vector_get(tables, current)); + ib_vector_get(fts_slots, current)); /* Handle the case of empty slots. */ if (slot->state != FTS_STATE_EMPTY) { @@ -3021,8 +3021,8 @@ DECLARE_THREAD(fts_optimize_thread)( ++current; /* Wrap around the counter. */ - if (current >= ib_vector_size(tables)) { - n_optimize = fts_optimize_how_many(tables); + if (current >= ib_vector_size(fts_slots)) { + n_optimize = fts_optimize_how_many(fts_slots); current = 0; } @@ -3036,7 +3036,7 @@ DECLARE_THREAD(fts_optimize_thread)( /* Timeout ? */ if (msg == NULL) { - if (fts_is_sync_needed(tables)) { + if (fts_is_sync_needed(fts_slots)) { fts_need_sync = true; } @@ -3057,7 +3057,7 @@ DECLARE_THREAD(fts_optimize_thread)( case FTS_MSG_ADD_TABLE: ut_a(!done); if (fts_optimize_new_table( - tables, + fts_slots, static_cast( msg->ptr))) { ++n_tables; @@ -3067,7 +3067,7 @@ DECLARE_THREAD(fts_optimize_thread)( case FTS_MSG_OPTIMIZE_TABLE: if (!done) { fts_optimize_start_table( - tables, + fts_slots, static_cast( msg->ptr)); } @@ -3075,7 +3075,7 @@ DECLARE_THREAD(fts_optimize_thread)( case FTS_MSG_DEL_TABLE: if (fts_optimize_del_table( - tables, static_cast( + fts_slots, static_cast( msg->ptr))) { --n_tables; } @@ -3098,7 +3098,7 @@ DECLARE_THREAD(fts_optimize_thread)( mem_heap_free(msg->heap); if (!done) { - n_optimize = fts_optimize_how_many(tables); + n_optimize = fts_optimize_how_many(fts_slots); } else { n_optimize = 0; } @@ -3110,11 +3110,11 @@ DECLARE_THREAD(fts_optimize_thread)( if (n_tables > 0) { ulint i; - for (i = 0; i < ib_vector_size(tables); i++) { + for (i = 0; i < ib_vector_size(fts_slots); i++) { fts_slot_t* slot; slot = static_cast( - ib_vector_get(tables, i)); + ib_vector_get(fts_slots, i)); if (slot->state != FTS_STATE_EMPTY) { fts_optimize_sync_table(slot->table_id); @@ -3122,7 +3122,7 @@ DECLARE_THREAD(fts_optimize_thread)( } } - ib_vector_free(tables); + ib_vector_free(fts_slots); ib::info() << "FTS optimize thread exiting."; @@ -3142,14 +3142,52 @@ void fts_optimize_init(void) /*===================*/ { + mem_heap_t* heap; + ib_alloc_t* heap_alloc; + dict_table_t* table; + ut_ad(!srv_read_only_mode); /* For now we only support one optimize thread. */ ut_a(fts_optimize_wq == NULL); + /* Create FTS optimize work queue */ fts_optimize_wq = ib_wqueue_create(); - fts_opt_shutdown_event = os_event_create(0); ut_a(fts_optimize_wq != NULL); + + /* Create FTS vector to store fts_slot_t */ + heap = mem_heap_create(sizeof(dict_table_t*) * 64); + heap_alloc = ib_heap_allocator_create(heap); + fts_slots = ib_vector_create(heap_alloc, sizeof(fts_slot_t), 4); + + /* Add fts tables to the fts_slots vector which were skipped during restart */ + std::vector table_vector; + std::vector::iterator it; + + mutex_enter(&dict_sys->mutex); + for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); + table != NULL; + table = UT_LIST_GET_NEXT(table_LRU, table)) { + if (table->fts && + dict_table_has_fts_index(table)) { + if (fts_optimize_new_table(fts_slots, + table)){ + table_vector.push_back(table); + } + } + } + + /* It is better to call dict_table_prevent_eviction() + outside the above loop because it operates on + dict_sys->table_LRU list.*/ + for (it=table_vector.begin();it!=table_vector.end();++it) { + dict_table_prevent_eviction(*it); + } + + mutex_exit(&dict_sys->mutex); + table_vector.clear(); + + fts_opt_shutdown_event = os_event_create(0); last_check_sync_time = ut_time(); os_thread_create(fts_optimize_thread, fts_optimize_wq, NULL); diff --git a/storage/innobase/fts/fts0plugin.cc b/storage/innobase/fts/fts0plugin.cc index b7a05deeb34..7f4f5161148 100644 --- a/storage/innobase/fts/fts0plugin.cc +++ b/storage/innobase/fts/fts0plugin.cc @@ -32,26 +32,12 @@ Created 2013/06/04 Shaohua Wang /******************************************************************//** FTS default parser init @return 0 */ -static -int -fts_default_parser_init( -/*====================*/ - MYSQL_FTPARSER_PARAM *param) /*!< in: plugin parser param */ -{ - return(0); -} +static int fts_default_parser_init(MYSQL_FTPARSER_PARAM*) { return 0; } /******************************************************************//** FTS default parser deinit @return 0 */ -static -int -fts_default_parser_deinit( -/*======================*/ - MYSQL_FTPARSER_PARAM *param) /*!< in: plugin parser param */ -{ - return(0); -} +static int fts_default_parser_deinit(MYSQL_FTPARSER_PARAM*) { return 0; } /******************************************************************//** FTS default parser parse from ft_static.c in MYISAM. @@ -134,7 +120,7 @@ fts_query_add_word_for_parser( case FT_TOKEN_WORD: term_node = fts_ast_create_node_term_for_parser( - state, word, word_len); + state, word, ulint(word_len)); if (info->trunc) { fts_ast_term_set_wildcard(term_node); @@ -251,7 +237,7 @@ fts_parse_query_internal( int ret = param->mysql_add_word( param, reinterpret_cast(w.pos), - w.len, &info); + int(w.len), &info); if (ret) { return(ret); } diff --git a/storage/innobase/fts/fts0que.cc b/storage/innobase/fts/fts0que.cc index 598573c2192..6a0496bedcd 100644 --- a/storage/innobase/fts/fts0que.cc +++ b/storage/innobase/fts/fts0que.cc @@ -1750,7 +1750,7 @@ fts_query_match_phrase_add_word_for_parser( MYSQL_FTPARSER_PARAM* param, /*!< in: parser param */ const char* word, /*!< in: token */ int word_len, /*!< in: token length */ - MYSQL_FTPARSER_BOOLEAN_INFO* info) /*!< in: token info */ + MYSQL_FTPARSER_BOOLEAN_INFO*) { fts_phrase_param_t* phrase_param; fts_phrase_t* phrase; @@ -1772,8 +1772,8 @@ fts_query_match_phrase_add_word_for_parser( } match.f_str = (uchar *)(word); - match.f_len = word_len; - match.f_n_char = fts_get_token_size(phrase->charset, word, word_len); + match.f_len = ulint(word_len); + match.f_n_char= fts_get_token_size(phrase->charset, word, match.f_len); if (match.f_len > 0) { /* Get next token to match. */ @@ -1905,7 +1905,7 @@ fts_query_match_phrase( &phrase_param, phrase->parser, ptr, - (end - ptr))) { + ulint(end - ptr))) { break; } } else { @@ -3296,7 +3296,7 @@ fts_query_filter_doc_ids( ++ptr; /* Bytes decoded so far */ - decoded = ptr - (byte*) data; + decoded = ulint(ptr - (byte*) data); /* We simply collect the matching documents and the positions here and match later. */ @@ -3920,7 +3920,6 @@ fts_query_can_optimize( } /** FTS Query entry point. -@param[in] trx transaction @param[in] index fts index to search @param[in] flags FTS search mode @param[in] query_str FTS query @@ -3929,7 +3928,6 @@ fts_query_can_optimize( @return DB_SUCCESS if successful otherwise error code */ dberr_t fts_query( - trx_t* trx, dict_index_t* index, uint flags, const byte* query_str, @@ -4105,7 +4103,7 @@ fts_query( << diff_time / 1000 << " secs: " << diff_time % 1000 << " millisec: row(s) " << ((*result)->rankings_by_id - ? rbt_size((*result)->rankings_by_id) + ? lint(rbt_size((*result)->rankings_by_id)) : -1); /* Log memory consumption & result size */ diff --git a/storage/innobase/fts/fts0sql.cc b/storage/innobase/fts/fts0sql.cc index ae2186c2d30..6f66486ed6e 100644 --- a/storage/innobase/fts/fts0sql.cc +++ b/storage/innobase/fts/fts0sql.cc @@ -116,7 +116,8 @@ fts_get_table_name_prefix( prefix_name_len = dbname_len + 4 + len + 1; - prefix_name = static_cast(ut_malloc_nokey(prefix_name_len)); + prefix_name = static_cast( + ut_malloc_nokey(unsigned(prefix_name_len))); len = sprintf(prefix_name, "%.*sFTS_%s", dbname_len, fts_table->parent, table_id); @@ -198,16 +199,13 @@ Parse an SQL string. que_t* fts_parse_sql_no_dict_lock( /*=======================*/ - fts_table_t* fts_table, /*!< in: FTS aux table info */ pars_info_t* info, /*!< in: info struct, or NULL */ const char* sql) /*!< in: SQL string to evaluate */ { char* str; que_t* graph; -#ifdef UNIV_DEBUG ut_ad(mutex_own(&dict_sys->mutex)); -#endif str = ut_str3cat(fts_sql_begin, sql, fts_sql_end); diff --git a/storage/innobase/gis/gis0geo.cc b/storage/innobase/gis/gis0geo.cc index 436249c0026..71d637d62d5 100644 --- a/storage/innobase/gis/gis0geo.cc +++ b/storage/innobase/gis/gis0geo.cc @@ -30,6 +30,7 @@ Created 2013/03/27 Allen Lai and Jimmy Yang #include "mach0data.h" #include +#include /* These definitions are for comparing 2 mbrs. */ @@ -72,7 +73,6 @@ rtree_add_point_to_mbr( where point is stored */ uchar* end, /*!< in: end of wkb. */ uint n_dims, /*!< in: dimensions. */ - uchar byte_order, /*!< in: byte order. */ double* mbr) /*!< in/out: mbr, which must be of length n_dims * 2. */ { @@ -112,11 +112,10 @@ rtree_get_point_mbr( where point is stored. */ uchar* end, /*!< in: end of wkb. */ uint n_dims, /*!< in: dimensions. */ - uchar byte_order, /*!< in: byte order. */ double* mbr) /*!< in/out: mbr, must be of length n_dims * 2. */ { - return rtree_add_point_to_mbr(wkb, end, n_dims, byte_order, mbr); + return rtree_add_point_to_mbr(wkb, end, n_dims, mbr); } @@ -131,7 +130,6 @@ rtree_get_linestring_mbr( where point is stored. */ uchar* end, /*!< in: end of wkb. */ uint n_dims, /*!< in: dimensions. */ - uchar byte_order, /*!< in: byte order. */ double* mbr) /*!< in/out: mbr, must be of length n_dims * 2. */ { @@ -142,8 +140,7 @@ rtree_get_linestring_mbr( for (; n_points > 0; --n_points) { /* Add next point to mbr */ - if (rtree_add_point_to_mbr(wkb, end, n_dims, - byte_order, mbr)) { + if (rtree_add_point_to_mbr(wkb, end, n_dims, mbr)) { return(-1); } } @@ -162,7 +159,6 @@ rtree_get_polygon_mbr( where point is stored. */ uchar* end, /*!< in: end of wkb. */ uint n_dims, /*!< in: dimensions. */ - uchar byte_order, /*!< in: byte order. */ double* mbr) /*!< in/out: mbr, must be of length n_dims * 2. */ { @@ -178,8 +174,7 @@ rtree_get_polygon_mbr( for (; n_points > 0; --n_points) { /* Add next point to mbr */ - if (rtree_add_point_to_mbr(wkb, end, n_dims, - byte_order, mbr)) { + if (rtree_add_point_to_mbr(wkb, end, n_dims, mbr)) { return(-1); } } @@ -205,11 +200,10 @@ rtree_get_geometry_mbr( by itself. */ { int res; - uchar byte_order = 2; uint wkb_type = 0; uint n_items; - byte_order = *(*wkb); + /* byte_order = *(*wkb); */ ++(*wkb); wkb_type = uint4korr((*wkb)); @@ -217,24 +211,22 @@ rtree_get_geometry_mbr( switch ((enum wkbType) wkb_type) { case wkbPoint: - res = rtree_get_point_mbr(wkb, end, n_dims, byte_order, mbr); + res = rtree_get_point_mbr(wkb, end, n_dims, mbr); break; case wkbLineString: - res = rtree_get_linestring_mbr(wkb, end, n_dims, - byte_order, mbr); + res = rtree_get_linestring_mbr(wkb, end, n_dims, mbr); break; case wkbPolygon: - res = rtree_get_polygon_mbr(wkb, end, n_dims, byte_order, mbr); + res = rtree_get_polygon_mbr(wkb, end, n_dims, mbr); break; case wkbMultiPoint: n_items = uint4korr((*wkb)); (*wkb) += 4; for (; n_items > 0; --n_items) { - byte_order = *(*wkb); + /* byte_order = *(*wkb); */ ++(*wkb); (*wkb) += 4; - if (rtree_get_point_mbr(wkb, end, n_dims, - byte_order, mbr)) { + if (rtree_get_point_mbr(wkb, end, n_dims, mbr)) { return(-1); } } @@ -244,11 +236,10 @@ rtree_get_geometry_mbr( n_items = uint4korr((*wkb)); (*wkb) += 4; for (; n_items > 0; --n_items) { - byte_order = *(*wkb); + /* byte_order = *(*wkb); */ ++(*wkb); (*wkb) += 4; - if (rtree_get_linestring_mbr(wkb, end, n_dims, - byte_order, mbr)) { + if (rtree_get_linestring_mbr(wkb, end, n_dims, mbr)) { return(-1); } } @@ -258,11 +249,10 @@ rtree_get_geometry_mbr( n_items = uint4korr((*wkb)); (*wkb) += 4; for (; n_items > 0; --n_items) { - byte_order = *(*wkb); + /* byte_order = *(*wkb); */ ++(*wkb); (*wkb) += 4; - if (rtree_get_polygon_mbr(wkb, end, n_dims, - byte_order, mbr)) { + if (rtree_get_polygon_mbr(wkb, end, n_dims, mbr)) { return(-1); } } @@ -366,7 +356,7 @@ mbr_join_square( /* Check if finite (not infinity or NaN), so we don't get NaN in calculations */ - if (!isfinite(square)) { + if (!std::isfinite(square)) { return DBL_MAX; } @@ -402,7 +392,7 @@ copy_coords( /*========*/ double* dst, /*!< in/out: destination. */ const double* src, /*!< in: source. */ - int n_dim) /*!< in: dimensions. */ + int) { memcpy(dst, src, DATA_MBR_LEN); } @@ -624,7 +614,7 @@ rtree_key_cmp( /*==========*/ page_cur_mode_t mode, /*!< in: compare method. */ const uchar* b, /*!< in: first key. */ - int b_len, /*!< in: first key len. */ + int, const uchar* a, /*!< in: second key. */ int a_len) /*!< in: second key len. */ { diff --git a/storage/innobase/gis/gis0rtree.cc b/storage/innobase/gis/gis0rtree.cc index ee0817947fc..eac904e5b24 100644 --- a/storage/innobase/gis/gis0rtree.cc +++ b/storage/innobase/gis/gis0rtree.cc @@ -38,6 +38,7 @@ Created 2013/03/27 Allen Lai and Jimmy Yang #include "trx0undo.h" #include "srv0mon.h" #include "gis0geo.h" +#include /*************************************************************//** Initial split nodes info for R-tree split. @@ -71,7 +72,7 @@ rtr_page_split_initialize_nodes( page = buf_block_get_frame(block); n_uniq = dict_index_get_n_unique_in_tree(cursor->index); - n_recs = page_get_n_recs(page) + 1; + n_recs = ulint(page_get_n_recs(page)) + 1; /*We reserve 2 MBRs memory space for temp result of split algrithm. And plus the new mbr that need to insert, we @@ -133,10 +134,8 @@ rtr_index_build_node_ptr( pointer */ ulint page_no,/*!< in: page number to put in node pointer */ - mem_heap_t* heap, /*!< in: memory heap where pointer + mem_heap_t* heap) /*!< in: memory heap where pointer created */ - ulint level) /*!< in: level of rec in tree: - 0 means leaf level */ { dtuple_t* tuple; dfield_t* field; @@ -291,7 +290,6 @@ rtr_update_mbr_field( ulint up_match = 0; ulint low_match = 0; ulint child; - ulint level; ulint rec_info; page_zip_des_t* page_zip; bool ins_suc = true; @@ -310,7 +308,7 @@ rtr_update_mbr_field( page_zip = buf_block_get_page_zip(block); child = btr_node_ptr_get_child_page_no(rec, offsets); - level = btr_page_get_level(buf_block_get_frame(block)); + const bool is_leaf = page_is_leaf(block->frame); if (new_rec) { child_rec = new_rec; @@ -319,7 +317,7 @@ rtr_update_mbr_field( } dtuple_t* node_ptr = rtr_index_build_node_ptr( - index, mbr, child_rec, child, heap, level); + index, mbr, child_rec, child, heap); /* We need to remember the child page no of cursor2, since page could be reorganized or insert a new rec before it. */ @@ -429,7 +427,7 @@ rtr_update_mbr_field( ut_ad(old_rec != insert_rec); page_cur_position(old_rec, block, &page_cur); - offsets2 = rec_get_offsets(old_rec, index, NULL, !level, + offsets2 = rec_get_offsets(old_rec, index, NULL, is_leaf, ULINT_UNDEFINED, &heap); page_cur_delete_rec(&page_cur, index, offsets2, mtr); @@ -459,7 +457,7 @@ update_mbr: cur2_rec = cursor2->page_cur.rec; offsets2 = rec_get_offsets(cur2_rec, index, NULL, - !level, + is_leaf, ULINT_UNDEFINED, &heap); cur2_rec_info = rec_get_info_bits(cur2_rec, @@ -519,7 +517,7 @@ update_mbr: if (ins_suc) { btr_cur_position(index, insert_rec, block, cursor); offsets = rec_get_offsets(insert_rec, - index, offsets, !level, + index, offsets, is_leaf, ULINT_UNDEFINED, &heap); } @@ -534,7 +532,7 @@ update_mbr: cur2_rec = btr_cur_get_rec(cursor2); offsets2 = rec_get_offsets(cur2_rec, index, NULL, - !level, + is_leaf, ULINT_UNDEFINED, &heap); /* If the cursor2 position is on a wrong rec, we @@ -548,7 +546,7 @@ update_mbr: while (!page_rec_is_supremum(cur2_rec)) { offsets2 = rec_get_offsets(cur2_rec, index, NULL, - !level, + is_leaf, ULINT_UNDEFINED, &heap); cur2_pno = btr_node_ptr_get_child_page_no( @@ -634,7 +632,6 @@ rtr_adjust_upper_level( buf_block_t* new_block, /*!< in/out: the new half page */ rtr_mbr_t* mbr, /*!< in: MBR on the old page */ rtr_mbr_t* new_mbr, /*!< in: MBR on the new page */ - ulint direction, /*!< in: FSP_UP or FSP_DOWN */ mtr_t* mtr) /*!< in: mtr */ { page_t* page; @@ -653,10 +650,8 @@ rtr_adjust_upper_level( ulint next_page_no; ulint space; page_cur_t* page_cursor; - rtr_mbr_t parent_mbr; lock_prdt_t prdt; lock_prdt_t new_prdt; - lock_prdt_t parent_prdt; dberr_t err; big_rec_t* dummy_big_rec; rec_t* rec; @@ -686,8 +681,6 @@ rtr_adjust_upper_level( page_cursor = btr_cur_get_page_cur(&cursor); - rtr_get_mbr_from_rec(page_cursor->rec, offsets, &parent_mbr); - rtr_update_mbr_field(&cursor, offsets, NULL, page, mbr, NULL, mtr); /* Already updated parent MBR, reset in our path */ @@ -703,7 +696,7 @@ rtr_adjust_upper_level( node_ptr_upper = rtr_index_build_node_ptr( index, new_mbr, page_rec_get_next(page_get_infimum_rec(new_page)), - new_page_no, heap, level); + new_page_no, heap); ulint up_match = 0; ulint low_match = 0; @@ -742,11 +735,9 @@ rtr_adjust_upper_level( prdt.op = 0; new_prdt.data = static_cast(new_mbr); new_prdt.op = 0; - parent_prdt.data = static_cast(&parent_mbr); - parent_prdt.op = 0; lock_prdt_update_parent(block, new_block, &prdt, &new_prdt, - &parent_prdt, index->table->space->id, + index->table->space->id, page_cursor->block->page.id.page_no()); mem_heap_free(heap); @@ -913,7 +904,7 @@ rtr_split_page_move_rec_list( same temp-table in parallel. max_trx_id is ignored for temp tables because it not required for MVCC. */ - if (is_leaf && !dict_table_is_temporary(index->table)) { + if (is_leaf && !index->table->is_temporary()) { page_update_max_trx_id(new_block, NULL, page_get_max_trx_id(page), mtr); @@ -1001,7 +992,6 @@ rtr_page_split_and_insert( page_t* page; page_t* new_page; ulint page_no; - byte direction; ulint hint_page_no; buf_block_t* new_block; page_zip_des_t* page_zip; @@ -1065,7 +1055,7 @@ func_start: *heap, cursor, offsets, tuple, &buf_pos); /* Divide all mbrs to two groups. */ - n_recs = page_get_n_recs(page) + 1; + n_recs = ulint(page_get_n_recs(page)) + 1; end_split_node = rtr_split_node_array + n_recs; @@ -1091,9 +1081,8 @@ func_start: static_cast(first_rec)); /* Allocate a new page to the index */ - direction = FSP_UP; hint_page_no = page_no + 1; - new_block = btr_page_alloc(cursor->index, hint_page_no, direction, + new_block = btr_page_alloc(cursor->index, hint_page_no, FSP_UP, page_level, mtr, mtr); new_page_zip = buf_block_get_page_zip(new_block); btr_page_create(new_block, new_page_zip, cursor->index, @@ -1268,12 +1257,12 @@ after_insert: /* Check any predicate locks need to be moved/copied to the new page */ - lock_prdt_update_split(block, new_block, &prdt, &new_prdt, + lock_prdt_update_split(new_block, &prdt, &new_prdt, cursor->index->table->space->id, page_no); /* Adjust the upper level. */ rtr_adjust_upper_level(cursor, flags, block, new_block, - &mbr, &new_mbr, direction, mtr); + &mbr, &new_mbr, mtr); /* Save the new ssn to the root page, since we need to reinit the first ssn value from it after restart server. */ @@ -1297,7 +1286,7 @@ after_insert: if (!rec) { /* We play safe and reset the free bits for new_page */ if (!dict_index_is_clust(cursor->index) - && !dict_table_is_temporary(cursor->index->table)) { + && !cursor->index->table->is_temporary()) { ibuf_reset_free_bits(new_block); ibuf_reset_free_bits(block); } @@ -1334,7 +1323,6 @@ dberr_t rtr_ins_enlarge_mbr( /*================*/ btr_cur_t* btr_cur, /*!< in: btr cursor */ - que_thr_t* thr, /*!< in: query thread */ mtr_t* mtr) /*!< in: mtr */ { dberr_t err = DB_SUCCESS; @@ -1445,7 +1433,7 @@ rtr_page_copy_rec_list_end_no_locks( btr_assert_not_corrupted(new_block, index); ut_a(page_is_comp(new_page) == page_rec_is_comp(rec)); - ut_a(mach_read_from_2(new_page + UNIV_PAGE_SIZE - 10) == (ulint) + ut_a(mach_read_from_2(new_page + srv_page_size - 10) == (ulint) (page_is_comp(new_page) ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM)); cur_rec = page_rec_get_next( @@ -1668,10 +1656,7 @@ rtr_merge_mbr_changed( btr_cur_t* cursor2, /*!< in: the other cursor */ ulint* offsets, /*!< in: rec offsets */ ulint* offsets2, /*!< in: rec offsets */ - rtr_mbr_t* new_mbr, /*!< out: MBR to update */ - buf_block_t* merge_block, /*!< in: page to merge */ - buf_block_t* block, /*!< in: page be merged */ - dict_index_t* index) /*!< in: index */ + rtr_mbr_t* new_mbr) /*!< out: MBR to update */ { double* mbr; double mbr1[SPDIMS * 2]; @@ -1716,9 +1701,6 @@ rtr_merge_and_update_mbr( ulint* offsets, /*!< in: rec offsets */ ulint* offsets2, /*!< in: rec offsets */ page_t* child_page, /*!< in: the page. */ - buf_block_t* merge_block, /*!< in: page to merge */ - buf_block_t* block, /*!< in: page be merged */ - dict_index_t* index, /*!< in: index */ mtr_t* mtr) /*!< in: mtr */ { dberr_t err = DB_SUCCESS; @@ -1728,8 +1710,7 @@ rtr_merge_and_update_mbr( ut_ad(dict_index_is_spatial(cursor->index)); changed = rtr_merge_mbr_changed(cursor, cursor2, offsets, offsets2, - &new_mbr, merge_block, - block, index); + &new_mbr); /* Update the mbr field of the rec. And will delete the record pointed by cursor2 */ @@ -1739,7 +1720,7 @@ rtr_merge_and_update_mbr( err = DB_ERROR; } } else { - rtr_node_ptr_delete(cursor2->index, cursor2, block, mtr); + rtr_node_ptr_delete(cursor2, mtr); } return(err); @@ -1750,10 +1731,8 @@ Deletes on the upper level the node pointer to a page. */ void rtr_node_ptr_delete( /*================*/ - dict_index_t* index, /*!< in: index tree */ btr_cur_t* cursor, /*!< in: search cursor, contains information about parent nodes in search */ - buf_block_t* block, /*!< in: page whose node pointer is deleted */ mtr_t* mtr) /*!< in: mtr */ { ibool compressed; @@ -1843,12 +1822,14 @@ rtr_rec_cal_increase( @param[in] tuple range tuple containing mbr, may also be empty tuple @param[in] mode search mode @return estimated number of rows */ -int64_t +ha_rows rtr_estimate_n_rows_in_range( dict_index_t* index, const dtuple_t* tuple, page_cur_mode_t mode) { + ut_ad(dict_index_is_spatial(index)); + /* Check tuple & mode */ if (tuple->n_fields == 0) { return(HA_POS_ERROR); @@ -1870,64 +1851,48 @@ rtr_estimate_n_rows_in_range( ); /* Read mbr from tuple. */ - const dfield_t* dtuple_field; - ulint dtuple_f_len MY_ATTRIBUTE((unused)); rtr_mbr_t range_mbr; double range_area; - byte* range_mbr_ptr; + const byte* range_mbr_ptr; - dtuple_field = dtuple_get_nth_field(tuple, 0); - dtuple_f_len = dfield_get_len(dtuple_field); - range_mbr_ptr = reinterpret_cast(dfield_get_data(dtuple_field)); + const dfield_t* dtuple_field = dtuple_get_nth_field(tuple, 0); + ut_ad(dfield_get_len(dtuple_field) >= DATA_MBR_LEN); + range_mbr_ptr = reinterpret_cast( + dfield_get_data(dtuple_field)); - ut_ad(dtuple_f_len >= DATA_MBR_LEN); rtr_read_mbr(range_mbr_ptr, &range_mbr); range_area = (range_mbr.xmax - range_mbr.xmin) * (range_mbr.ymax - range_mbr.ymin); /* Get index root page. */ mtr_t mtr; - buf_block_t* block; - page_t* page; - ulint n_recs; mtr.start(); index->set_modified(mtr); mtr_s_lock(&index->lock, &mtr); - block = btr_block_get( + buf_block_t* block = btr_block_get( page_id_t(index->table->space->id, index->page), page_size_t(index->table->space->flags), RW_S_LATCH, index, &mtr); - page = buf_block_get_frame(block); - n_recs = page_header_get_field(page, PAGE_N_RECS); + const page_t* page = buf_block_get_frame(block); + const unsigned n_recs = page_header_get_field(page, PAGE_N_RECS); if (n_recs == 0) { mtr.commit(); return(HA_POS_ERROR); } - rec_t* rec; - byte* field; - ulint len; - ulint* offsets = NULL; - mem_heap_t* heap; - - heap = mem_heap_create(512); - rec = page_rec_get_next(page_get_infimum_rec(page)); - offsets = rec_get_offsets(rec, index, offsets, page_rec_is_leaf(rec), - ULINT_UNDEFINED, &heap); - /* Scan records in root page and calculate area. */ double area = 0; - while (!page_rec_is_supremum(rec)) { + for (const rec_t* rec = page_rec_get_next( + page_get_infimum_rec(block->frame)); + !page_rec_is_supremum(rec); + rec = page_rec_get_next_const(rec)) { rtr_mbr_t mbr; double rec_area; - field = rec_get_nth_field(rec, offsets, 0, &len); - ut_ad(len == DATA_MBR_LEN); - - rtr_read_mbr(field, &mbr); + rtr_read_mbr(rec, &mbr); rec_area = (mbr.xmax - mbr.xmin) * (mbr.ymax - mbr.ymin); @@ -1944,8 +1909,8 @@ rtr_estimate_n_rows_in_range( case PAGE_CUR_WITHIN: case PAGE_CUR_MBR_EQUAL: if (rtree_key_cmp( - PAGE_CUR_WITHIN, range_mbr_ptr, - DATA_MBR_LEN, field, DATA_MBR_LEN) + PAGE_CUR_WITHIN, range_mbr_ptr, + DATA_MBR_LEN, rec, DATA_MBR_LEN) == 0) { area += 1; } @@ -1959,22 +1924,23 @@ rtr_estimate_n_rows_in_range( switch (mode) { case PAGE_CUR_CONTAIN: case PAGE_CUR_INTERSECT: - area += rtree_area_overlapping(range_mbr_ptr, - field, DATA_MBR_LEN) / rec_area; + area += rtree_area_overlapping( + range_mbr_ptr, rec, DATA_MBR_LEN) + / rec_area; break; case PAGE_CUR_DISJOINT: area += 1; - area -= rtree_area_overlapping(range_mbr_ptr, - field, DATA_MBR_LEN) / rec_area; + area -= rtree_area_overlapping( + range_mbr_ptr, rec, DATA_MBR_LEN) + / rec_area; break; case PAGE_CUR_WITHIN: case PAGE_CUR_MBR_EQUAL: - if (rtree_key_cmp( - PAGE_CUR_WITHIN, range_mbr_ptr, - DATA_MBR_LEN, field, DATA_MBR_LEN) - == 0) { + if (!rtree_key_cmp( + PAGE_CUR_WITHIN, range_mbr_ptr, + DATA_MBR_LEN, rec, DATA_MBR_LEN)) { area += range_area / rec_area; } @@ -1983,17 +1949,14 @@ rtr_estimate_n_rows_in_range( ut_error; } } - - rec = page_rec_get_next(rec); } mtr.commit(); - mem_heap_free(heap); - if (!isfinite(area)) { + if (!std::isfinite(area)) { return(HA_POS_ERROR); } - return(static_cast(dict_table_get_n_rows(index->table) - * area / n_recs)); + area /= n_recs; + return ha_rows(dict_table_get_n_rows(index->table) * area); } diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc index 095b421dc48..4f24f298196 100644 --- a/storage/innobase/gis/gis0sea.cc +++ b/storage/innobase/gis/gis0sea.cc @@ -304,7 +304,7 @@ rtr_pcur_getnext_from_path( rtr_info->thr); } new_split = true; -#if UNIV_GIS_DEBUG +#if defined(UNIV_GIS_DEBUG) fprintf(stderr, "GIS_DIAG: Splitted page found: %d, %ld\n", static_cast(need_parent), next_page_no); @@ -407,8 +407,7 @@ rtr_pcur_getnext_from_path( } lock_prdt_lock(block, &prdt, index, LOCK_S, - LOCK_PREDICATE, btr_cur->rtr_info->thr, - mtr); + LOCK_PREDICATE, btr_cur->rtr_info->thr); if (rw_latch == RW_NO_LATCH) { rw_lock_s_unlock(&(block->lock)); @@ -729,7 +728,7 @@ rtr_page_get_father_node_ptr( rtr_get_mbr_from_rec(user_rec, offsets, &mbr); tuple = rtr_index_build_node_ptr( - index, &mbr, user_rec, page_no, heap, level); + index, &mbr, user_rec, page_no, heap); if (sea_cur && !sea_cur->rtr_info) { sea_cur = NULL; @@ -1454,7 +1453,7 @@ rtr_leaf_push_match_rec( data_len = rec_offs_data_size(offsets) + rec_offs_extra_size(offsets); match_rec->used += data_len; - ut_ad(match_rec->used < UNIV_PAGE_SIZE); + ut_ad(match_rec->used < srv_page_size); } /**************************************************************//** diff --git a/storage/innobase/ha/ha0ha.cc b/storage/innobase/ha/ha0ha.cc index da542d4f742..fa1a9bc5db9 100644 --- a/storage/innobase/ha/ha0ha.cc +++ b/storage/innobase/ha/ha0ha.cc @@ -60,7 +60,8 @@ ib_create( if (n_sync_obj == 0) { table->heap = mem_heap_create_typed( - ut_min(static_cast(4096), + std::min( + 4096, MEM_MAX_ALLOC_IN_BUF / 2 - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)), type); @@ -84,7 +85,8 @@ ib_create( for (ulint i = 0; i < n_sync_obj; i++) { table->heaps[i] = mem_heap_create_typed( - ut_min(static_cast(4096), + std::min( + 4096, MEM_MAX_ALLOC_IN_BUF / 2 - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)), type); @@ -126,7 +128,8 @@ ib_recreate( for (ulint i = 0; i < new_table->n_sync_obj; i++) { new_table->heaps[i] = mem_heap_create_typed( - ut_min(static_cast(4096), + std::min( + 4096, MEM_MAX_ALLOC_IN_BUF / 2 - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)), MEM_HEAP_FOR_PAGE_HASH); @@ -192,7 +195,7 @@ ha_clear( #ifdef BTR_CUR_HASH_ADAPT # if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG /** Maximum number of records in a page */ -static const lint MAX_N_POINTERS +static const ulint MAX_N_POINTERS = UNIV_PAGE_SIZE_MAX / REC_N_NEW_EXTRA_BYTES; # endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ @@ -242,8 +245,8 @@ ha_insert_for_fold_func( buf_block_t* prev_block = prev_node->block; ut_a(prev_block->frame == page_align(prev_node->data)); - ut_a(my_atomic_addlint( - &prev_block->n_pointers, -1) + ut_a(my_atomic_addlint(&prev_block->n_pointers, + ulint(-1)) < MAX_N_POINTERS); ut_a(my_atomic_addlint(&block->n_pointers, 1) < MAX_N_POINTERS); @@ -339,7 +342,7 @@ ha_delete_hash_node( #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG if (table->adaptive) { ut_a(del_node->block->frame = page_align(del_node->data)); - ut_a(my_atomic_addlint(&del_node->block->n_pointers, -1) + ut_a(my_atomic_addlint(&del_node->block->n_pointers, ulint(-1)) < MAX_N_POINTERS); } #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ @@ -382,7 +385,8 @@ ha_search_and_update_if_found_func( if (node) { #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG if (table->adaptive) { - ut_a(my_atomic_addlint(&node->block->n_pointers, -1) + ut_a(my_atomic_addlint(&node->block->n_pointers, + ulint(-1)) < MAX_N_POINTERS); ut_a(my_atomic_addlint(&new_block->n_pointers, 1) < MAX_N_POINTERS); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index eaa9900e0ec..58faed432dc 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1,10 +1,10 @@ /***************************************************************************** -Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2013, 2018, MariaDB Corporation. +Copyright (c) 2000, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2012, Facebook Inc. +Copyright (c) 2013, 2018, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -187,32 +187,30 @@ static const long AUTOINC_OLD_STYLE_LOCKING = 0; static const long AUTOINC_NEW_STYLE_LOCKING = 1; static const long AUTOINC_NO_LOCKING = 2; -static long innobase_log_buffer_size; -static long innobase_open_files=0; +static ulong innobase_open_files; static long innobase_autoinc_lock_mode; -static ulong innobase_commit_concurrency = 0; -static ulong innobase_read_io_threads; -static ulong innobase_write_io_threads; +static ulong innobase_commit_concurrency; -static long long innobase_buffer_pool_size; +static ulonglong innobase_buffer_pool_size; /** Percentage of the buffer pool to reserve for 'old' blocks. Connected to buf_LRU_old_ratio. */ static uint innobase_old_blocks_pct; -/* The default values for the following char* start-up parameters -are determined in innobase_init below: */ - -static char* innobase_data_home_dir; static char* innobase_data_file_path; static char* innobase_temp_data_file_path; -static char* innobase_change_buffering; + +/* The default values for the following char* start-up parameters +are determined in innodb_init_params(). */ + +static char* innobase_data_home_dir; static char* innobase_enable_monitor_counter; static char* innobase_disable_monitor_counter; static char* innobase_reset_monitor_counter; static char* innobase_reset_all_monitor_counter; -static char* innobase_file_flush_method; +static ulong innodb_change_buffering; +static ulong innodb_flush_method; /* This variable can be set in the server configure file, specifying stopword table to be used */ @@ -222,7 +220,6 @@ static char* innobase_server_stopword_table; values */ static my_bool innobase_use_atomic_writes; -static my_bool innobase_use_doublewrite; static my_bool innobase_use_checksums; static my_bool innobase_locks_unsafe_for_binlog; static my_bool innobase_rollback_on_timeout; @@ -329,7 +326,7 @@ thd_destructor_proxy(void *) need to be purged, so they have to be shut down before purge threads if slow shutdown is requested. */ srv_shutdown_bg_undo_sources(); - srv_purge_wakeup(); + srv_purge_shutdown(); destroy_thd(thd); mysql_cond_destroy(&thd_destructor_cond); @@ -433,6 +430,30 @@ static TYPELIB innodb_lock_schedule_algorithm_typelib = { NULL }; +/** Names of allowed values of innodb_flush_method */ +const char* innodb_flush_method_names[] = { + "fsync", + "O_DSYNC", + "littlesync", + "nosync", + "O_DIRECT", + "O_DIRECT_NO_FSYNC", +#ifdef _WIN32 + "unbuffered", + "async_unbuffered" /* alias for "unbuffered" */, + "normal" /* alias for "fsync" */, +#endif + NullS +}; + +/** Enumeration of innodb_flush_method */ +TYPELIB innodb_flush_method_typelib = { + array_elements(innodb_flush_method_names) - 1, + "innodb_flush_method_typelib", + innodb_flush_method_names, + NULL +}; + /* The following counter is used to convey information to InnoDB about server activity: in case of normal DML ops it is not sensible to call srv_active_wake_master_thread after each @@ -444,13 +465,22 @@ static ulong innobase_active_counter = 0; static hash_table_t* innobase_open_tables; /** Allowed values of innodb_change_buffering */ -static const char* innobase_change_buffering_values[IBUF_USE_COUNT] = { +static const char* innodb_change_buffering_names[] = { "none", /* IBUF_USE_NONE */ "inserts", /* IBUF_USE_INSERT */ "deletes", /* IBUF_USE_DELETE_MARK */ "changes", /* IBUF_USE_INSERT_DELETE_MARK */ "purges", /* IBUF_USE_DELETE */ - "all" /* IBUF_USE_ALL */ + "all", /* IBUF_USE_ALL */ + NullS +}; + +/** Enumeration of innodb_change_buffering */ +static TYPELIB innodb_change_buffering_typelib = { + array_elements(innodb_change_buffering_names) - 1, + "innodb_change_buffering_typelib", + innodb_change_buffering_names, + NULL }; /** Retrieve the FTS Relevance Ranking result for doc with doc_id @@ -602,7 +632,6 @@ static PSI_mutex_info all_innodb_mutexes[] = { PSI_KEY(srv_misc_tmpfile_mutex), PSI_KEY(srv_monitor_file_mutex), PSI_KEY(buf_dblwr_mutex), - PSI_KEY(trx_undo_mutex), PSI_KEY(trx_pool_mutex), PSI_KEY(trx_pool_manager_mutex), PSI_KEY(srv_sys_mutex), @@ -750,7 +779,7 @@ static int innodb_tmpdir_validate( THD* thd, - struct st_mysql_sys_var* var, + struct st_mysql_sys_var*, void* save, struct st_mysql_value* value) { @@ -1500,9 +1529,7 @@ static int innobase_commit_concurrency_validate( /*=================================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to system - variable */ + THD*, st_mysql_sys_var*, void* save, /*!< out: immediate result for update function */ struct st_mysql_value* value) /*!< in: incoming string */ @@ -1664,10 +1691,7 @@ extern "C" time_t thd_start_time(const THD* thd); /******************************************************************//** Get the thread start time. @return the thread start time in seconds since the epoch. */ -ulint -thd_start_time_in_secs( -/*===================*/ - THD* thd) /*!< in: thread handle, or NULL */ +ulint thd_start_time_in_secs(THD*) { // FIXME: This function should be added to the server code. //return(thd_start_time(thd)); @@ -1814,19 +1838,6 @@ thd_lock_wait_timeout( return(THDVAR(thd, lock_wait_timeout)); } -/******************************************************************//** -Set the time waited for the lock for the current query. */ -void -thd_set_lock_wait_time( -/*===================*/ - THD* thd, /*!< in/out: thread handle */ - ulint value) /*!< in: time waited for the lock */ -{ - if (thd) { - thd_storage_lock_wait(thd, value); - } -} - /** Get the value of innodb_tmpdir. @param[in] thd thread handle, or NULL to query the global innodb_tmpdir. @@ -1889,7 +1900,7 @@ Converts an InnoDB error code to a MySQL error code and also tells to MySQL about a possible transaction rollback inside InnoDB caused by a lock wait timeout or a deadlock. @return MySQL error code */ -int +static int convert_error_code_to_mysql( /*========================*/ dberr_t error, /*!< in: InnoDB error code */ @@ -2108,17 +2119,6 @@ innobase_mysql_print_thd( putc('\n', f); } -/******************************************************************//** -Get the error message format string. -@return the format string or 0 if not found. */ -const char* -innobase_get_err_msg( -/*=================*/ - int error_code) /*!< in: MySQL error code */ -{ - return(my_get_err_msg(error_code)); -} - /******************************************************************//** Get the variable length bounds of the given character set. */ void @@ -2383,24 +2383,8 @@ static int mysql_tmpfile_path(const char *path, const char *prefix) DBUG_ASSERT((strlen(path) + strlen(prefix)) <= FN_REFLEN); char filename[FN_REFLEN]; - File fd = create_temp_file(filename, path, prefix, -#ifdef __WIN__ - O_BINARY | O_TRUNC | O_SEQUENTIAL | - O_SHORT_LIVED | -#endif /* __WIN__ */ - O_CREAT | O_EXCL | O_RDWR | O_TEMPORARY, - MYF(MY_WME)); - if (fd >= 0) { -#ifndef __WIN__ - /* - This can be removed once the following bug is fixed: - Bug #28903 create_temp_file() doesn't honor O_TEMPORARY option - (file not removed) (Unix) - */ - unlink(filename); -#endif /* !__WIN__ */ - } - + File fd = create_temp_file(filename, path, prefix, O_BINARY | O_SEQUENTIAL, + MYF(MY_WME | MY_TEMPORARY)); return fd; } @@ -2408,19 +2392,18 @@ static int mysql_tmpfile_path(const char *path, const char *prefix) path. If the path is NULL, then it will be created in tmpdir. @param[in] path location for creating temporary file @return temporary file descriptor, or < 0 on error */ -int +os_file_t innobase_mysql_tmpfile( const char* path) { #ifdef WITH_INNODB_DISALLOW_WRITES os_event_wait(srv_allow_writes_event); #endif /* WITH_INNODB_DISALLOW_WRITES */ - int fd2 = -1; File fd; DBUG_EXECUTE_IF( "innobase_tmpfile_creation_failure", - return(-1); + return(OS_FILE_CLOSED); ); if (path == NULL) { @@ -2429,50 +2412,55 @@ innobase_mysql_tmpfile( fd = mysql_tmpfile_path(path, "ib"); } - if (fd >= 0) { - /* Copy the file descriptor, so that the additional resources - allocated by create_temp_file() can be freed by invoking - my_close(). + if (fd < 0) + return OS_FILE_CLOSED; - Because the file descriptor returned by this function - will be passed to fdopen(), it will be closed by invoking - fclose(), which in turn will invoke close() instead of - my_close(). */ + /* Copy the file descriptor, so that the additional resources + allocated by create_temp_file() can be freed by invoking + my_close(). + + Because the file descriptor returned by this function + will be passed to fdopen(), it will be closed by invoking + fclose(), which in turn will invoke close() instead of + my_close(). */ #ifdef _WIN32 - /* Note that on Windows, the integer returned by mysql_tmpfile - has no relation to C runtime file descriptor. Here, we need - to call my_get_osfhandle to get the HANDLE and then convert it - to C runtime filedescriptor. */ - { - HANDLE hFile = my_get_osfhandle(fd); - HANDLE hDup; - BOOL bOK = DuplicateHandle( - GetCurrentProcess(), - hFile, GetCurrentProcess(), - &hDup, 0, FALSE, DUPLICATE_SAME_ACCESS); - if (bOK) { - fd2 = _open_osfhandle((intptr_t) hDup, 0); - } else { - my_osmaperr(GetLastError()); - fd2 = -1; - } - } -#else - fd2 = dup(fd); -#endif - if (fd2 < 0) { - char errbuf[MYSYS_STRERROR_SIZE]; - DBUG_PRINT("error",("Got error %d on dup",fd2)); - set_my_errno(errno); - my_error(EE_OUT_OF_FILERESOURCES, - MYF(0), - "ib*", errno, - my_strerror(errbuf, sizeof(errbuf), errno)); - } - my_close(fd, MYF(MY_WME)); + /* Note that on Windows, the integer returned by mysql_tmpfile + has no relation to C runtime file descriptor. Here, we need + to call my_get_osfhandle to get the HANDLE and then convert it + to C runtime filedescriptor. */ + + HANDLE hFile = my_get_osfhandle(fd); + HANDLE hDup; + BOOL bOK = DuplicateHandle( + GetCurrentProcess(), + hFile, GetCurrentProcess(), + &hDup, 0, FALSE, DUPLICATE_SAME_ACCESS); + my_close(fd, MYF(MY_WME)); + + if (!bOK) { + my_osmaperr(GetLastError()); + goto error; } - return(fd2); + return hDup; +#else + int fd2 = dup(fd); + my_close(fd, MYF(MY_WME)); + if (fd2 < 0) { + set_my_errno(errno); + goto error; + } + return fd2; +#endif + +error: + char errbuf[MYSYS_STRERROR_SIZE]; + + my_error(EE_OUT_OF_FILERESOURCES, + MYF(0), + "ib*", errno, + my_strerror(errbuf, sizeof(errbuf), errno)); + return (OS_FILE_CLOSED); } /*********************************************************************//** @@ -2861,7 +2849,7 @@ innobase_copy_frm_flags_from_create_info( ibool ps_on; ibool ps_off; - if (dict_table_is_temporary(innodb_table) + if (innodb_table->is_temporary() || innodb_table->no_rollback()) { /* Temp tables do not use persistent stats. */ ps_on = FALSE; @@ -2897,7 +2885,7 @@ innobase_copy_frm_flags_from_table_share( ibool ps_on; ibool ps_off; - if (dict_table_is_temporary(innodb_table)) { + if (innodb_table->is_temporary()) { /* Temp tables do not use persistent stats */ ps_on = FALSE; ps_off = TRUE; @@ -3122,7 +3110,7 @@ innobase_query_caching_of_table_permitted( const char* full_name, /*!< in: normalized path to the table */ uint full_name_len, /*!< in: length of the normalized path to the table */ - ulonglong *unused) /*!< unused for this engine */ + ulonglong *) { char norm_name[1000]; trx_t* trx = check_trx_exists(thd); @@ -3176,13 +3164,11 @@ innobase_invalidate_query_cache( /*============================*/ trx_t* trx, /*!< in: transaction which modifies the table */ - const char* full_name, /*!< in: concatenation of + const char* full_name) /*!< in: concatenation of database name, path separator, table name, null char NUL; NOTE that in Windows this is always in LOWER CASE! */ - ulint full_name_len) /*!< in: full name length where - also the null chars count */ { /* Note that the sync0mutex.h rank of the query cache mutex is just above the InnoDB trx_sys_t->lock. The caller of this function must @@ -3193,12 +3179,12 @@ innobase_invalidate_query_cache( char db_name[NAME_CHAR_LEN * MY_CS_MBMAXLEN + 1]; const char *key_ptr; size_t tabname_len; - size_t dbname_len; // Extract the database name. key_ptr= strchr(full_name, '/'); DBUG_ASSERT(key_ptr != NULL); // Database name should be present - memcpy(db_name, full_name, (dbname_len= (key_ptr - full_name))); + size_t dbname_len= size_t(key_ptr - full_name); + memcpy(db_name, full_name, dbname_len); db_name[dbname_len]= '\0'; /* Construct the key("db-name\0table$name\0") for the query cache using @@ -3267,9 +3253,9 @@ innobase_quote_identifier( if (q == EOF) { quoted_identifier.append(id); } else { - quoted_identifier += (unsigned char)q; + quoted_identifier += char(q); quoted_identifier.append(id); - quoted_identifier += (unsigned char)q; + quoted_identifier += char(q); } return (quoted_identifier); @@ -3337,12 +3323,13 @@ innobase_convert_name( } /* Print the database name and table name separately. */ - s = innobase_convert_identifier(s, bufend - s, id, slash - id, thd); + s = innobase_convert_identifier(s, ulint(bufend - s), + id, ulint(slash - id), thd); if (s < bufend) { *s++ = '.'; - s = innobase_convert_identifier(s, bufend - s, + s = innobase_convert_identifier(s, ulint(bufend - s), slash + 1, idlen - - (slash - id) - 1, + - ulint(slash - id) - 1, thd); } @@ -3371,8 +3358,8 @@ innobase_format_name( /**********************************************************************//** Determines if the currently running transaction has been interrupted. -@return TRUE if interrupted */ -ibool +@return true if interrupted */ +bool trx_is_interrupted( /*===============*/ const trx_t* trx) /*!< in: transaction */ @@ -3408,7 +3395,7 @@ ha_innobase::reset_template(void) } ); - m_prebuilt->keep_other_fields_on_keyread = 0; + m_prebuilt->keep_other_fields_on_keyread = false; m_prebuilt->read_just_key = 0; m_prebuilt->in_fts_query = 0; @@ -3504,28 +3491,15 @@ innobase_space_shutdown() DBUG_VOID_RETURN; } -/*********************************************************************//** -Free any resources that were allocated and return failure. +/** Free any resources that were allocated and return failure. @return always return 1 */ -static -int -innobase_init_abort() -/*=================*/ +static int innodb_init_abort() { - DBUG_ENTER("innobase_init_abort"); + DBUG_ENTER("innodb_init_abort"); innobase_space_shutdown(); DBUG_RETURN(1); } -/** Return partitioning flags. */ -static uint innobase_partition_flags() -{ - /* JAN: TODO: MYSQL 5.7 - return(HA_CAN_EXCHANGE_PARTITION | HA_CANNOT_PARTITION_FK); - */ - return (0); -} - /** Update log_checksum_algorithm_ptr with a pointer to the function corresponding to whether checksums are enabled. @param[in,out] thd client session, or NULL if at startup @@ -3602,100 +3576,79 @@ static ulonglong innodb_prepare_commit_versioned(THD* thd, ulonglong *trx_id) return 0; } -/*********************************************************************//** -Opens an InnoDB database. -@return 0 on success, 1 on failure */ -static -int -innobase_init( -/*==========*/ - void *p) /*!< in: InnoDB handlerton */ +/** Initialize and normalize innodb_buffer_pool_size. */ +static void innodb_buffer_pool_size_init() { - static char current_dir[3]; /*!< Set if using current lib */ - int err; + if (srv_buf_pool_size >= BUF_POOL_SIZE_THRESHOLD) { + + if (srv_buf_pool_instances == srv_buf_pool_instances_default) { +#if defined(_WIN32) && !defined(_WIN64) + /* Do not allocate too large of a buffer pool on + Windows 32-bit systems, which can have trouble + allocating larger single contiguous memory blocks. */ + srv_buf_pool_size = ulint( + ut_uint64_align_up(srv_buf_pool_size, + srv_buf_pool_chunk_unit)); + srv_buf_pool_instances = std::min( + MAX_BUFFER_POOLS, + ulong(srv_buf_pool_size + / srv_buf_pool_chunk_unit)); +#else /* defined(_WIN32) && !defined(_WIN64) */ + /* Default to 8 instances when size > 1GB. */ + srv_buf_pool_instances = 8; +#endif /* defined(_WIN32) && !defined(_WIN64) */ + } + } else { + /* If buffer pool is less than 1 GiB, assume fewer + threads. Also use only one buffer pool instance. */ + if (srv_buf_pool_instances != srv_buf_pool_instances_default + && srv_buf_pool_instances != 1) { + /* We can't distinguish whether the user has explicitly + started mysqld with --innodb-buffer-pool-instances=0, + (srv_buf_pool_instances_default is 0) or has not + specified that option at all. Thus we have the + limitation that if the user started with =0, we + will not emit a warning here, but we should actually + do so. */ + ib::info() + << "Adjusting innodb_buffer_pool_instances" + " from " << srv_buf_pool_instances << " to 1" + " since innodb_buffer_pool_size is less than " + << BUF_POOL_SIZE_THRESHOLD / (1024 * 1024) + << " MiB"; + } + + srv_buf_pool_instances = 1; + } + + if (srv_buf_pool_chunk_unit * srv_buf_pool_instances + > srv_buf_pool_size) { + /* Size unit of buffer pool is larger than srv_buf_pool_size. + adjust srv_buf_pool_chunk_unit for srv_buf_pool_size. */ + srv_buf_pool_chunk_unit + = static_cast(srv_buf_pool_size) + / srv_buf_pool_instances; + if (srv_buf_pool_size % srv_buf_pool_instances != 0) { + ++srv_buf_pool_chunk_unit; + } + } + + srv_buf_pool_size = buf_pool_size_align(srv_buf_pool_size); + innobase_buffer_pool_size = srv_buf_pool_size; +} + +/** Initialize, validate and normalize the InnoDB startup parameters. +@return failure code +@retval 0 on success +@retval HA_ERR_OUT_OF_MEM when out of memory +@retval HA_ERR_INITIALIZATION when some parameters are out of range */ +static int innodb_init_params() +{ + DBUG_ENTER("innodb_init_params"); + + static char current_dir[3]; char *default_path; ulong num_pll_degree; - ulint srv_buf_pool_size_org = 0; - - DBUG_ENTER("innobase_init"); - handlerton* innobase_hton= (handlerton*) p; - innodb_hton_ptr = innobase_hton; - - innobase_hton->state = SHOW_OPTION_YES; - innobase_hton->db_type = DB_TYPE_INNODB; - innobase_hton->savepoint_offset = sizeof(trx_named_savept_t); - innobase_hton->close_connection = innobase_close_connection; - innobase_hton->kill_query = innobase_kill_query; - innobase_hton->savepoint_set = innobase_savepoint; - innobase_hton->savepoint_rollback = innobase_rollback_to_savepoint; - - innobase_hton->savepoint_rollback_can_release_mdl = - innobase_rollback_to_savepoint_can_release_mdl; - - innobase_hton->savepoint_release = innobase_release_savepoint; - innobase_hton->prepare_ordered= NULL; - innobase_hton->commit_ordered= innobase_commit_ordered; - innobase_hton->commit = innobase_commit; - innobase_hton->rollback = innobase_rollback; - innobase_hton->prepare = innobase_xa_prepare; - innobase_hton->recover = innobase_xa_recover; - innobase_hton->commit_by_xid = innobase_commit_by_xid; - innobase_hton->rollback_by_xid = innobase_rollback_by_xid; - innobase_hton->commit_checkpoint_request=innobase_checkpoint_request; - innobase_hton->create = innobase_create_handler; - - innobase_hton->drop_database = innobase_drop_database; - innobase_hton->panic = innobase_end; - innobase_hton->partition_flags= innobase_partition_flags; - - innobase_hton->start_consistent_snapshot = - innobase_start_trx_and_assign_read_view; - - innobase_hton->flush_logs = innobase_flush_logs; - innobase_hton->show_status = innobase_show_status; - innobase_hton->flags = - HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS | HTON_NATIVE_SYS_VERSIONING; - -#ifdef WITH_WSREP - innobase_hton->abort_transaction=wsrep_abort_transaction; - innobase_hton->set_checkpoint=innobase_wsrep_set_checkpoint; - innobase_hton->get_checkpoint=innobase_wsrep_get_checkpoint; - innobase_hton->fake_trx_id=wsrep_fake_trx_id; -#endif /* WITH_WSREP */ - - if (srv_file_per_table) { - innobase_hton->tablefile_extensions = ha_innobase_exts; - } - - innobase_hton->table_options = innodb_table_option_list; - - /* System Versioning */ - innobase_hton->prepare_commit_versioned - = innodb_prepare_commit_versioned; - - innodb_remember_check_sysvar_funcs(); - - ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR); - -#ifndef DBUG_OFF - static const char test_filename[] = "-@"; - char test_tablename[sizeof test_filename - + sizeof(srv_mysql50_table_name_prefix) - 1]; - if ((sizeof(test_tablename)) - 1 - != filename_to_tablename(test_filename, - test_tablename, - sizeof(test_tablename), true) - || strncmp(test_tablename, - srv_mysql50_table_name_prefix, - sizeof(srv_mysql50_table_name_prefix) - 1) - || strcmp(test_tablename - + sizeof(srv_mysql50_table_name_prefix) - 1, - test_filename)) { - - sql_print_error("tablename encoding has been changed"); - DBUG_RETURN(innobase_init_abort()); - } -#endif /* DBUG_OFF */ /* Check that values don't overflow on 32-bit systems. */ if (sizeof(ulint) == 4) { @@ -3703,26 +3656,19 @@ innobase_init( sql_print_error( "innodb_buffer_pool_size can't be over 4GB" " on 32-bit systems"); - - DBUG_RETURN(innobase_init_abort()); + DBUG_RETURN(HA_ERR_OUT_OF_MEM); } } - os_file_set_umask(my_umask); - - /* Setup the memory alloc/free tracing mechanisms before calling - any functions that could possibly allocate memory. */ - ut_new_boot(); - /* The buffer pool needs to be able to accommodate enough many pages, even for larger pages */ - if (UNIV_PAGE_SIZE > UNIV_PAGE_SIZE_DEF + if (srv_page_size > UNIV_PAGE_SIZE_DEF && innobase_buffer_pool_size < (24 * 1024 * 1024)) { ib::info() << "innodb_page_size=" - << UNIV_PAGE_SIZE << " requires " + << srv_page_size << " requires " << "innodb_buffer_pool_size > 24M current " << innobase_buffer_pool_size; - goto error; + DBUG_RETURN(HA_ERR_INITIALIZATION); } #ifdef WITH_WSREP @@ -3739,7 +3685,7 @@ innobase_init( sql_print_error("InnoDB: innodb_compression_algorithm = %lu unsupported.\n" "InnoDB: liblz4 is not installed. \n", innodb_compression_algorithm); - goto error; + DBUG_RETURN(HA_ERR_INITIALIZATION); } #endif @@ -3748,7 +3694,7 @@ innobase_init( sql_print_error("InnoDB: innodb_compression_algorithm = %lu unsupported.\n" "InnoDB: liblzo is not installed. \n", innodb_compression_algorithm); - goto error; + DBUG_RETURN(HA_ERR_INITIALIZATION); } #endif @@ -3757,7 +3703,7 @@ innobase_init( sql_print_error("InnoDB: innodb_compression_algorithm = %lu unsupported.\n" "InnoDB: liblzma is not installed. \n", innodb_compression_algorithm); - goto error; + DBUG_RETURN(HA_ERR_INITIALIZATION); } #endif @@ -3766,7 +3712,7 @@ innobase_init( sql_print_error("InnoDB: innodb_compression_algorithm = %lu unsupported.\n" "InnoDB: libbz2 is not installed. \n", innodb_compression_algorithm); - goto error; + DBUG_RETURN(HA_ERR_INITIALIZATION); } #endif @@ -3775,7 +3721,7 @@ innobase_init( sql_print_error("InnoDB: innodb_compression_algorithm = %lu unsupported.\n" "InnoDB: libsnappy is not installed. \n", innodb_compression_algorithm); - goto error; + DBUG_RETURN(HA_ERR_INITIALIZATION); } #endif @@ -3783,9 +3729,18 @@ innobase_init( && !encryption_key_id_exists(FIL_DEFAULT_ENCRYPTION_KEY)) { sql_print_error("InnoDB: cannot enable encryption, " "encryption plugin is not available"); - goto error; + DBUG_RETURN(HA_ERR_INITIALIZATION); } +#ifdef _WIN32 + if (!is_filename_allowed(srv_buf_dump_filename, + strlen(srv_buf_dump_filename), FALSE)) { + sql_print_error("InnoDB: innodb_buffer_pool_filename" + " cannot have colon (:) in the file name."); + DBUG_RETURN(HA_ERR_INITIALIZATION); + } +#endif + /* First calculate the default path for innodb_data_home_dir etc., in case the user has not given any value. @@ -3823,13 +3778,7 @@ innobase_init( if (!srv_page_size_shift) { sql_print_error("InnoDB: Invalid page size=%lu.\n", srv_page_size); - DBUG_RETURN(innobase_init_abort()); - } - - /* Set default InnoDB temp data file size to 12 MB and let it be - auto-extending. */ - if (!innobase_data_file_path) { - innobase_data_file_path = (char*) "ibdata1:12M:autoextend"; + DBUG_RETURN(HA_ERR_INITIALIZATION); } /* This is the first time univ_page_size is used. @@ -3844,34 +3793,31 @@ innobase_init( /* Supports raw devices */ if (!srv_sys_space.parse_params(innobase_data_file_path, true)) { - DBUG_RETURN(innobase_init_abort()); + ib::error() << "Unable to parse innodb_data_file_path=" + << innobase_data_file_path; + DBUG_RETURN(HA_ERR_INITIALIZATION); } - /* Set default InnoDB temp data file size to 12 MB and let it be - auto-extending. */ - - if (!innobase_temp_data_file_path) { - innobase_temp_data_file_path = (char*) "ibtmp1:12M:autoextend"; - } - - /* We set the temporary tablspace id later, after recovery. - The temp tablespace doesn't support raw devices. - Set the name and path. */ srv_tmp_space.set_name("innodb_temporary"); srv_tmp_space.set_path(srv_data_home); srv_tmp_space.set_flags(FSP_FLAGS_PAGE_SSIZE()); if (!srv_tmp_space.parse_params(innobase_temp_data_file_path, false)) { - DBUG_RETURN(innobase_init_abort()); + ib::error() << "Unable to parse innodb_temp_data_file_path=" + << innobase_temp_data_file_path; + DBUG_RETURN(HA_ERR_INITIALIZATION); } /* Perform all sanity check before we take action of deleting files*/ if (srv_sys_space.intersection(&srv_tmp_space)) { sql_print_error("%s and %s file names seem to be the same.", srv_tmp_space.name(), srv_sys_space.name()); - DBUG_RETURN(innobase_init_abort()); + DBUG_RETURN(HA_ERR_INITIALIZATION); } + srv_sys_space.normalize_size(); + srv_tmp_space.normalize_size(); + /* ------------ UNDO tablespaces files ---------------------*/ if (!srv_undo_dir) { srv_undo_dir = default_path; @@ -3881,7 +3827,7 @@ innobase_init( if (strchr(srv_undo_dir, ';')) { sql_print_error("syntax error in innodb_undo_directory"); - DBUG_RETURN(innobase_init_abort()); + DBUG_RETURN(HA_ERR_INITIALIZATION); } /* -------------- All log files ---------------------------*/ @@ -3896,33 +3842,22 @@ innobase_init( if (strchr(srv_log_group_home_dir, ';')) { sql_print_error("syntax error in innodb_log_group_home_dir"); - DBUG_RETURN(innobase_init_abort()); + DBUG_RETURN(HA_ERR_INITIALIZATION); } - if (innobase_change_buffering) { - ulint use; - - for (use = 0; - use < UT_ARR_SIZE(innobase_change_buffering_values); - use++) { - if (!innobase_strcasecmp( - innobase_change_buffering, - innobase_change_buffering_values[use])) { - ibuf_use = (ibuf_use_t) use; - goto innobase_change_buffering_inited_ok; - } - } - - sql_print_error("InnoDB: invalid value" - " innodb_change_buffering=%s", - innobase_change_buffering); - DBUG_RETURN(innobase_init_abort()); + if (srv_n_log_files * srv_log_file_size + >= 512ULL * 1024ULL * 1024ULL * 1024ULL) { + /* log_block_convert_lsn_to_no() limits the returned block + number to 1G and given that OS_FILE_LOG_BLOCK_SIZE is 512 + bytes, then we have a limit of 512 GB. If that limit is to + be raised, then log_block_convert_lsn_to_no() must be + modified. */ + ib::error() << "Combined size of log files must be < 512 GB"; + DBUG_RETURN(HA_ERR_INITIALIZATION); } -innobase_change_buffering_inited_ok: - ut_a((ulint) ibuf_use < UT_ARR_SIZE(innobase_change_buffering_values)); - innobase_change_buffering = (char*) - innobase_change_buffering_values[ibuf_use]; + DBUG_ASSERT(innodb_change_buffering <= IBUF_USE_ALL); + ibuf_use = ibuf_use_t(innodb_change_buffering); /* Check that interdependent parameters have sane values. */ if (srv_max_buf_pool_modified_pct < srv_max_dirty_pages_pct_lwm) { @@ -3958,24 +3893,13 @@ innobase_change_buffering_inited_ok: srv_io_capacity = srv_max_io_capacity; } - if (!is_filename_allowed(srv_buf_dump_filename, - strlen(srv_buf_dump_filename), FALSE)) { - sql_print_error("InnoDB: innodb_buffer_pool_filename" - " cannot have colon (:) in the file name."); - DBUG_RETURN(innobase_init_abort()); - } - - /* --------------------------------------------------*/ - - srv_file_flush_method_str = innobase_file_flush_method; - if (UNIV_PAGE_SIZE_DEF != srv_page_size) { ib::info() << "innodb_page_size=" << srv_page_size; srv_max_undo_log_size = std::max( srv_max_undo_log_size, ulonglong(SRV_UNDO_TABLESPACE_SIZE_IN_PAGES) - * srv_page_size); + << srv_page_size_shift); } if (srv_log_write_ahead_size > srv_page_size) { @@ -3995,14 +3919,7 @@ innobase_change_buffering_inited_ok: } } - srv_log_buffer_size = (ulint) innobase_log_buffer_size; - - srv_buf_pool_size = (ulint) innobase_buffer_pool_size; - - srv_n_read_io_threads = (ulint) innobase_read_io_threads; - srv_n_write_io_threads = (ulint) innobase_write_io_threads; - - srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite; + srv_buf_pool_size = ulint(innobase_buffer_pool_size); if (!innobase_use_checksums) { ib::warn() << "Setting innodb_checksums to OFF is DEPRECATED." @@ -4037,16 +3954,16 @@ innobase_change_buffering_inited_ok: } } - if (innobase_open_files > (long) open_files_limit) { + if (innobase_open_files > open_files_limit) { ib::warn() << "innodb_open_files " << innobase_open_files << " should not be greater" << "than the open_files_limit " << open_files_limit; - if (innobase_open_files > (long) tc_size) { + if (innobase_open_files > tc_size) { innobase_open_files = tc_size; } } - srv_max_n_open_files = (ulint) innobase_open_files; + srv_max_n_open_files = innobase_open_files; srv_innodb_status = (ibool) innobase_create_status_file; srv_print_verbose_log = mysqld_embedded ? 0 : 1; @@ -4083,15 +4000,153 @@ innobase_change_buffering_inited_ok: unbuffered) */ #ifndef _WIN32 - if (!innobase_file_flush_method || - !strstr(innobase_file_flush_method, "O_DIRECT")) { - innobase_file_flush_method = - srv_file_flush_method_str = (char*)"O_DIRECT"; + switch (innodb_flush_method) { + case SRV_O_DIRECT: + case SRV_O_DIRECT_NO_FSYNC: + break; + default: + innodb_flush_method = SRV_O_DIRECT; fprintf(stderr, "InnoDB: using O_DIRECT due to atomic writes.\n"); } #endif } + if (srv_read_only_mode) { + ib::info() << "Started in read only mode"; + srv_use_doublewrite_buf = FALSE; + } + +#ifdef LINUX_NATIVE_AIO + if (srv_use_native_aio) { + ib::info() << "Using Linux native AIO"; + } +#elif !defined _WIN32 + /* Currently native AIO is supported only on windows and linux + and that also when the support is compiled in. In all other + cases, we ignore the setting of innodb_use_native_aio. */ + srv_use_native_aio = FALSE; +#endif + +#ifndef _WIN32 + ut_ad(innodb_flush_method <= SRV_O_DIRECT_NO_FSYNC); +#else + switch (innodb_flush_method) { + case SRV_ALL_O_DIRECT_FSYNC + 1 /* "async_unbuffered"="unbuffered" */: + innodb_flush_method = SRV_ALL_O_DIRECT_FSYNC; + break; + case SRV_ALL_O_DIRECT_FSYNC + 2 /* "normal"="fsync" */: + innodb_flush_method = SRV_FSYNC; + break; + default: + ut_ad(innodb_flush_method <= SRV_ALL_O_DIRECT_FSYNC); + } +#endif + srv_file_flush_method = srv_flush_t(innodb_flush_method); + + innodb_buffer_pool_size_init(); + + if (srv_n_page_cleaners > srv_buf_pool_instances) { + /* limit of page_cleaner parallelizability + is number of buffer pool instances. */ + srv_n_page_cleaners = srv_buf_pool_instances; + } + + srv_lock_table_size = 5 * (srv_buf_pool_size >> srv_page_size_shift); + DBUG_RETURN(0); +} + +/** Initialize the InnoDB storage engine plugin. +@param[in,out] p InnoDB handlerton +@return error code +@retval 0 on success */ +static int innodb_init(void* p) +{ + DBUG_ENTER("innodb_init"); + handlerton* innobase_hton= static_cast(p); + innodb_hton_ptr = innobase_hton; + + innobase_hton->state = SHOW_OPTION_YES; + innobase_hton->db_type = DB_TYPE_INNODB; + innobase_hton->savepoint_offset = sizeof(trx_named_savept_t); + innobase_hton->close_connection = innobase_close_connection; + innobase_hton->kill_query = innobase_kill_query; + innobase_hton->savepoint_set = innobase_savepoint; + innobase_hton->savepoint_rollback = innobase_rollback_to_savepoint; + + innobase_hton->savepoint_rollback_can_release_mdl = + innobase_rollback_to_savepoint_can_release_mdl; + + innobase_hton->savepoint_release = innobase_release_savepoint; + innobase_hton->prepare_ordered= NULL; + innobase_hton->commit_ordered= innobase_commit_ordered; + innobase_hton->commit = innobase_commit; + innobase_hton->rollback = innobase_rollback; + innobase_hton->prepare = innobase_xa_prepare; + innobase_hton->recover = innobase_xa_recover; + innobase_hton->commit_by_xid = innobase_commit_by_xid; + innobase_hton->rollback_by_xid = innobase_rollback_by_xid; + innobase_hton->commit_checkpoint_request=innobase_checkpoint_request; + innobase_hton->create = innobase_create_handler; + + innobase_hton->drop_database = innobase_drop_database; + innobase_hton->panic = innobase_end; + + innobase_hton->start_consistent_snapshot = + innobase_start_trx_and_assign_read_view; + + innobase_hton->flush_logs = innobase_flush_logs; + innobase_hton->show_status = innobase_show_status; + innobase_hton->flags = + HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS + | HTON_NATIVE_SYS_VERSIONING; + +#ifdef WITH_WSREP + innobase_hton->abort_transaction=wsrep_abort_transaction; + innobase_hton->set_checkpoint=innobase_wsrep_set_checkpoint; + innobase_hton->get_checkpoint=innobase_wsrep_get_checkpoint; + innobase_hton->fake_trx_id=wsrep_fake_trx_id; +#endif /* WITH_WSREP */ + + innobase_hton->tablefile_extensions = ha_innobase_exts; + innobase_hton->table_options = innodb_table_option_list; + + /* System Versioning */ + innobase_hton->prepare_commit_versioned + = innodb_prepare_commit_versioned; + + innodb_remember_check_sysvar_funcs(); + + compile_time_assert(DATA_MYSQL_TRUE_VARCHAR == MYSQL_TYPE_VARCHAR); + +#ifndef DBUG_OFF + static const char test_filename[] = "-@"; + char test_tablename[sizeof test_filename + + sizeof(srv_mysql50_table_name_prefix) - 1]; + DBUG_ASSERT(sizeof test_tablename - 1 + == filename_to_tablename(test_filename, + test_tablename, + sizeof test_tablename, true)); + DBUG_ASSERT(!strncmp(test_tablename, + srv_mysql50_table_name_prefix, + sizeof srv_mysql50_table_name_prefix - 1)); + DBUG_ASSERT(!strcmp(test_tablename + + sizeof srv_mysql50_table_name_prefix - 1, + test_filename)); +#endif /* DBUG_OFF */ + + os_file_set_umask(my_umask); + + /* Setup the memory alloc/free tracing mechanisms before calling + any functions that could possibly allocate memory. */ + ut_new_boot(); + + if (int error = innodb_init_params()) { + DBUG_RETURN(error); + } + + /* After this point, error handling has to use + innodb_init_abort(). */ + #ifdef HAVE_PSI_INTERFACE /* Register keys with MySQL performance schema */ int count; @@ -4123,35 +4178,20 @@ innobase_change_buffering_inited_ok: mysql_cond_register("innodb", all_innodb_conds, count); #endif /* HAVE_PSI_INTERFACE */ - /* Set buffer pool size to default for fast startup when mysqld is - run with --help --verbose options. */ - /* JAN: TODO: MySQL 5.7 has opt_verbose - if (opt_help && opt_verbose - && srv_buf_pool_size > srv_buf_pool_def_size) { - ib::warn() << "Setting innodb_buf_pool_size to " - << srv_buf_pool_def_size << " for fast startup, " - << "when running with --help --verbose options."; - srv_buf_pool_size_org = srv_buf_pool_size; - srv_buf_pool_size = srv_buf_pool_def_size; - } - */ + bool create_new_db = false; - err = innobase_start_or_create_for_mysql(); + /* Check whether the data files exist. */ + dberr_t err = srv_sys_space.check_file_spec(&create_new_db, 5U << 20); - if (srv_buf_pool_size_org != 0) { - /* Set the original value back to show in help. */ - srv_buf_pool_size_org = - buf_pool_size_align(srv_buf_pool_size_org); - innobase_buffer_pool_size = - static_cast(srv_buf_pool_size_org); - } else { - innobase_buffer_pool_size = - static_cast(srv_buf_pool_size); + if (err != DB_SUCCESS) { + DBUG_RETURN(innodb_init_abort()); } + err = srv_start(create_new_db); + if (err != DB_SUCCESS) { innodb_shutdown(); - DBUG_RETURN(innobase_init_abort()); + DBUG_RETURN(innodb_init_abort()); } else if (!srv_read_only_mode) { mysql_thread_create(thd_destructor_thread_key, &thd_destructor_thread, @@ -4203,7 +4243,6 @@ innobase_change_buffering_inited_ok: /* Turn on monitor counters that are default on */ srv_mon_default_on(); - /* Unit Tests */ #ifdef UNIV_ENABLE_UNIT_TEST_GET_PARENT_DIR unit_test_os_file_get_parent_dir(); @@ -4224,9 +4263,6 @@ innobase_change_buffering_inited_ok: #endif /* UNIV_ENABLE_UNIT_TEST_ROW_RAW_FORMAT_INT */ DBUG_RETURN(0); - -error: - DBUG_RETURN(1); } /** Shut down the InnoDB storage engine. @@ -4407,11 +4443,8 @@ innobase_commit_ordered_2( If the binary log is not enabled, or the transaction is not written to the binary log, the file name will be a NULL pointer. */ - ulonglong pos; - - thd_binlog_pos(thd, &trx->mysql_log_file_name, &pos); - - trx->mysql_log_offset = static_cast(pos); + thd_binlog_pos(thd, &trx->mysql_log_file_name, + &trx->mysql_log_offset); /* Don't do write + flush right now. For group commit to work we want to do the flush later. */ @@ -4739,7 +4772,6 @@ UNIV_INTERN void innobase_mysql_log_notify( /*======================*/ - ib_uint64_t write_lsn, /*!< in: LSN written to log file */ ib_uint64_t flush_lsn) /*!< in: LSN flushed to disk */ { struct pending_checkpoint * pending; @@ -4829,7 +4861,7 @@ innobase_rollback_to_savepoint( char name[64]; - longlong2str((ulint) savepoint, name, 36); + longlong2str(longlong(savepoint), name, 36); int64_t mysql_binlog_cache_pos; @@ -4898,7 +4930,7 @@ innobase_release_savepoint( /* TODO: use provided savepoint data area to store savepoint data */ - longlong2str((ulint) savepoint, name, 36); + longlong2str(longlong(savepoint), name, 36); error = trx_release_savepoint_for_mysql(trx, name); @@ -4937,12 +4969,12 @@ innobase_savepoint( /* TODO: use provided savepoint data area to store savepoint data */ char name[64]; - longlong2str((ulint) savepoint,name,36); + longlong2str(longlong(savepoint), name, 36); dberr_t error = trx_savepoint_for_mysql(trx, name, 0); if (error == DB_SUCCESS && trx->fts_trx != NULL) { - fts_savepoint_take(trx, trx->fts_trx, name); + fts_savepoint_take(trx->fts_trx, name); } DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL)); @@ -5026,7 +5058,6 @@ static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels) { DBUG_ENTER("innobase_kill_query"); #ifdef WITH_WSREP - wsrep_thd_LOCK(thd); if (wsrep_thd_get_conflict_state(thd) != NO_CONFLICT) { /* if victim has been signaled by BF thread and/or aborting is already progressing, following query aborting is not necessary @@ -5034,10 +5065,8 @@ static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels) Also, BF thread should own trx mutex for the victim, which would conflict with trx_mutex_enter() below */ - wsrep_thd_UNLOCK(thd); DBUG_VOID_RETURN; } - wsrep_thd_UNLOCK(thd); #endif /* WITH_WSREP */ if (trx_t* trx = thd_to_trx(thd)) { @@ -5093,9 +5122,7 @@ ha_innobase::table_flags() const /* Need to use tx_isolation here since table flags is (also) called before prebuilt is inited. */ - ulong const tx_isolation = thd_tx_isolation(thd); - - if (tx_isolation <= ISO_READ_COMMITTED) { + if (thd_tx_isolation(thd) <= ISO_READ_COMMITTED) { return(flags); } @@ -5214,7 +5241,7 @@ ha_innobase::max_supported_key_length() const Note: Handle 16k and 32k pages the same here since the limits are higher than imposed by MySQL. */ - switch (UNIV_PAGE_SIZE) { + switch (srv_page_size) { case 4096: return(768); case 8192: @@ -5609,7 +5636,7 @@ innobase_vcol_build_templ( mysql_row_templ_t* templ, ulint col_no) { - if (dict_col_is_virtual(col)) { + if (col->is_virtual()) { templ->is_virtual = true; templ->col_no = col_no; templ->clust_rec_field_no = ULINT_UNDEFINED; @@ -5666,7 +5693,7 @@ innobase_build_v_templ( const dict_add_v_col_t* add_v, bool locked) { - ulint ncol = ib_table->n_cols - DATA_N_SYS_COLS; + ulint ncol = unsigned(ib_table->n_cols) - DATA_N_SYS_COLS; ulint n_v_col = ib_table->n_v_cols; bool marker[REC_MAX_N_FIELDS]; @@ -6022,7 +6049,7 @@ static void initialize_auto_increment(dict_table_t* table, const Field* field) { - ut_ad(!dict_table_is_temporary(table)); + ut_ad(!table->is_temporary()); const unsigned col_no = innodb_col_no(field); @@ -6143,7 +6170,7 @@ no_such_table: MONITOR_INC(MONITOR_TABLE_OPEN); - if (dict_table_is_discarded(ib_table)) { + if ((ib_table->flags2 & DICT_TF2_DISCARDED)) { ib_senderrf(thd, IB_LOG_LEVEL_WARN, ER_TABLESPACE_DISCARDED, @@ -6315,13 +6342,13 @@ no_such_table: } /* Index block size in InnoDB: used by MySQL in query optimization */ - stats.block_size = UNIV_PAGE_SIZE; + stats.block_size = srv_page_size; /* Init table lock structure */ thr_lock_data_init(&m_share->lock, &lock, NULL); if (m_prebuilt->table == NULL - || dict_table_is_temporary(m_prebuilt->table) + || m_prebuilt->table->is_temporary() || m_prebuilt->table->persistent_autoinc || !m_prebuilt->table->is_readable()) { } else if (const Field* ai = table->found_next_number_field) { @@ -6380,7 +6407,11 @@ platforms. @return dictionary table object or NULL if not found */ dict_table_t* ha_innobase::open_dict_table( - const char* table_name, + const char* +#ifdef _WIN32 + table_name +#endif + , const char* norm_name, bool is_partition, dict_err_ignore_t ignore_err) @@ -6524,7 +6555,7 @@ ha_innobase::close() #ifdef WITH_WSREP UNIV_INTERN -int +ulint wsrep_innobase_mysql_sort( /*======================*/ /* out: str contains sort string */ @@ -6538,7 +6569,7 @@ wsrep_innobase_mysql_sort( { CHARSET_INFO* charset; enum_field_types mysql_tp; - int ret_length = str_length; + ulint ret_length = str_length; DBUG_ASSERT(str_length != UNIV_SQL_NULL); @@ -6775,7 +6806,7 @@ innobase_mysql_fts_get_token( for (;;) { if (doc >= end) { - return(doc - start); + return ulint(doc - start); } int ctype; @@ -6817,7 +6848,7 @@ innobase_mysql_fts_get_token( token->f_len = (uint) (doc - token->f_str) - mwc; token->f_n_char = length; - return(doc - start); + return ulint(doc - start); } /** Converts a MySQL type to an InnoDB type. Note that this function returns @@ -8675,8 +8706,7 @@ wsrep_calc_row_hash( const uchar* row, /*!< in: row in MySQL format */ TABLE* table, /*!< in: table in MySQL data dictionary */ - row_prebuilt_t* prebuilt, /*!< in: InnoDB prebuilt struct */ - THD* thd) /*!< in: user thread */ + row_prebuilt_t* prebuilt) /*!< in: InnoDB prebuilt struct */ { Field* field; enum_field_types field_mysql_type; @@ -8817,7 +8847,13 @@ ha_innobase::update_row( goto func_exit; } - { + if (!uvect->n_fields) { + /* This is the same as success, but instructs + MySQL that the row is not really updated and it + should not increase the count of updated rows. + This is fix for http://bugs.mysql.com/29157 */ + DBUG_RETURN(HA_ERR_RECORD_IS_THE_SAME); + } else { const bool vers_set_fields = m_prebuilt->versioned_write && m_prebuilt->upd_node->update->affects_versioned(); const bool vers_ins_row = vers_set_fields @@ -8874,20 +8910,12 @@ ha_innobase::update_row( innobase_srv_conc_exit_innodb(m_prebuilt); func_exit: - - err = convert_error_code_to_mysql( - error, m_prebuilt->table->flags, m_user_thd); - - /* If success and no columns were updated. */ - if (err == 0 && uvect->n_fields == 0) { - - /* This is the same as success, but instructs - MySQL that the row is not really updated and it - should not increase the count of updated rows. - This is fix for http://bugs.mysql.com/29157 */ - err = HA_ERR_RECORD_IS_THE_SAME; - } else if (err == HA_FTS_INVALID_DOCID) { + if (error == DB_FTS_INVALID_DOCID) { + err = HA_FTS_INVALID_DOCID; my_error(HA_FTS_INVALID_DOCID, MYF(0)); + } else { + err = convert_error_code_to_mysql( + error, m_prebuilt->table->flags, m_user_thd); } /* Tell InnoDB server that there might be work for @@ -9066,8 +9094,7 @@ int ha_innobase::index_init( /*====================*/ uint keynr, /*!< in: key (index) number */ - bool sorted) /*!< in: 1 if result MUST be sorted - according to index */ + bool) { DBUG_ENTER("index_init"); @@ -9215,13 +9242,13 @@ ha_innobase::index_read( dict_index_t* index = m_prebuilt->index; - if (index == NULL || dict_index_is_corrupted(index)) { + if (index == NULL || index->is_corrupted()) { m_prebuilt->index_usable = FALSE; DBUG_RETURN(HA_ERR_CRASHED); } if (!m_prebuilt->index_usable) { - DBUG_RETURN(dict_index_is_corrupted(index) + DBUG_RETURN(index->is_corrupted() ? HA_ERR_INDEX_CORRUPT : HA_ERR_TABLE_DEF_CHANGED); } @@ -9253,8 +9280,7 @@ ha_innobase::index_read( m_prebuilt->srch_key_val_len, index, (byte*) key_ptr, - (ulint) key_len, - m_prebuilt->trx); + (ulint) key_len); DBUG_ASSERT(m_prebuilt->search_tuple->n_fields > 0); } else { @@ -9480,14 +9506,14 @@ ha_innobase::change_active_index( m_prebuilt->trx, m_prebuilt->index); if (!m_prebuilt->index_usable) { - if (dict_index_is_corrupted(m_prebuilt->index)) { + if (m_prebuilt->index->is_corrupted()) { char table_name[MAX_FULL_NAME_LEN + 1]; innobase_format_name( table_name, sizeof table_name, m_prebuilt->index->table->name.m_name); - if (dict_index_is_clust(m_prebuilt->index)) { + if (m_prebuilt->index->is_primary()) { ut_ad(m_prebuilt->index->table->corrupted); push_warning_printf( m_user_thd, Sql_condition::WARN_LEVEL_WARN, @@ -9673,8 +9699,7 @@ int ha_innobase::index_next_same( /*=========================*/ uchar* buf, /*!< in/out: buffer for the row */ - const uchar* key, /*!< in: key value */ - uint keylen) /*!< in: key value length */ + const uchar*, uint) { return(general_fetch(buf, ROW_SEL_NEXT, m_last_match_mode)); } @@ -9937,7 +9962,7 @@ ha_innobase::ft_init_ext( } /* If tablespace is discarded, we should return here */ - if (dict_table_is_discarded(ft_table)) { + if (!ft_table->space) { my_error(ER_NO_SUCH_TABLE, MYF(0), table->s->db.str, table->s->table_name.str); return(NULL); @@ -9966,7 +9991,8 @@ ha_innobase::ft_init_ext( const_cast(query)); // FIXME: support ft_init_ext_with_hints(), pass LIMIT - dberr_t error = fts_query(trx, index, flags, q, query_len, &result); + // FIXME: use trx + dberr_t error = fts_query(index, flags, q, query_len, &result); if (error != DB_SUCCESS) { my_error(convert_error_code_to_mysql(error, 0, NULL), MYF(0)); @@ -9988,25 +10014,6 @@ ha_innobase::ft_init_ext( return(reinterpret_cast(fts_hdl)); } -/**********************************************************************//** -Initialize FT index scan -@return FT_INFO structure if successful or NULL */ - -FT_INFO* -ha_innobase::ft_init_ext_with_hints( -/*================================*/ - uint keynr, /* in: key num */ - String* key, /* in: key */ - void* hints) /* in: hints */ -{ - /* TODO Implement function properly working with FT hint. */ -#ifdef MYSQL_FT_INIT_EXT - return(ft_init_ext(hints->get_flags(), keynr, key)); -#else - return NULL; -#endif -} - /*****************************************************************//** Set up search tuple for a query through FTS_DOC_ID_INDEX on supplied Doc ID. This is used by MySQL to retrieve the documents @@ -10322,7 +10329,7 @@ wsrep_append_foreign_key( } ut_a(idx); - key[0] = (char)i; + key[0] = byte(i); rcode = wsrep_rec_get_foreign_key( &key[1], &len, rec, index, idx, @@ -10410,7 +10417,6 @@ wsrep_append_key( THD *thd, trx_t *trx, TABLE_SHARE *table_share, - TABLE *table, const char* key, uint16_t key_len, bool shared @@ -10523,7 +10529,7 @@ ha_innobase::wsrep_append_keys( if (!is_null) { rcode = wsrep_append_key( - thd, trx, table_share, table, keyval, + thd, trx, table_share, keyval, len, shared); if (rcode) { @@ -10577,7 +10583,7 @@ ha_innobase::wsrep_append_keys( record0, &is_null); if (!is_null) { rcode = wsrep_append_key( - thd, trx, table_share, table, + thd, trx, table_share, keyval0, len+1, shared); if (rcode) { @@ -10600,7 +10606,6 @@ ha_innobase::wsrep_append_keys( if (!is_null && memcmp(key0, key1, len)) { rcode = wsrep_append_key( thd, trx, table_share, - table, keyval1, len+1, shared); if (rcode) DBUG_RETURN(rcode); } @@ -10614,9 +10619,9 @@ ha_innobase::wsrep_append_keys( uchar digest[16]; int rcode; - wsrep_calc_row_hash(digest, record0, table, m_prebuilt, thd); + wsrep_calc_row_hash(digest, record0, table, m_prebuilt); - if ((rcode = wsrep_append_key(thd, trx, table_share, table, + if ((rcode = wsrep_append_key(thd, trx, table_share, (const char*) digest, 16, shared))) { DBUG_RETURN(rcode); @@ -10624,9 +10629,8 @@ ha_innobase::wsrep_append_keys( if (record1) { wsrep_calc_row_hash( - digest, record1, table, m_prebuilt, thd); + digest, record1, table, m_prebuilt); if ((rcode = wsrep_append_key(thd, trx, table_share, - table, (const char*) digest, 16, shared))) { DBUG_RETURN(rcode); @@ -10789,7 +10793,7 @@ innodb_base_col_setup( const Field* field, dict_v_col_t* v_col) { - int n = 0; + ulint n = 0; prepare_vcol_for_base_setup(table, field, v_col); @@ -11212,7 +11216,6 @@ create_index( dict_index_t* index; int error; const KEY* key; - ulint ind_type; ulint* field_lengths; DBUG_ENTER("create_index"); @@ -11222,17 +11225,13 @@ create_index( /* Assert that "GEN_CLUST_INDEX" cannot be used as non-primary index */ ut_a(innobase_strcasecmp(key->name.str, innobase_index_reserve_name) != 0); - ind_type = 0; - if (key->flags & HA_SPATIAL) { - ind_type = DICT_SPATIAL; - } else if (key->flags & HA_FULLTEXT) { - ind_type = DICT_FTS; - } - - if (ind_type != 0) - { + if (key->flags & (HA_SPATIAL | HA_FULLTEXT)) { + /* Only one of these can be specified at a time. */ + ut_ad(~key->flags & (HA_SPATIAL | HA_FULLTEXT)); + ut_ad(!(key->flags & HA_NOSAME)); index = dict_mem_index_create(table, key->name.str, - ind_type, + (key->flags & HA_SPATIAL) + ? DICT_SPATIAL : DICT_FTS, key->user_defined_key_parts); for (ulint i = 0; i < key->user_defined_key_parts; i++) { @@ -11255,7 +11254,7 @@ create_index( table->flags, NULL)); } - ind_type = 0; + ulint ind_type = 0; if (key_num == form->s->primary_key) { ind_type |= DICT_CLUSTERED; @@ -11459,12 +11458,12 @@ create_table_info_t::create_options_are_invalid() case 8: case 16: /* The maximum KEY_BLOCK_SIZE (KBS) is - UNIV_PAGE_SIZE_MAX. But if UNIV_PAGE_SIZE is + UNIV_PAGE_SIZE_MAX. But if srv_page_size is smaller than UNIV_PAGE_SIZE_MAX, the maximum KBS is also smaller. */ kbs_max = ut_min( - 1 << (UNIV_PAGE_SSIZE_MAX - 1), - 1 << (PAGE_ZIP_SSIZE_MAX - 1)); + 1U << (UNIV_PAGE_SSIZE_MAX - 1), + 1U << (PAGE_ZIP_SSIZE_MAX - 1)); if (m_create_info->key_block_size > kbs_max) { push_warning_printf( m_thd, Sql_condition::WARN_LEVEL_WARN, @@ -11560,7 +11559,7 @@ create_table_info_t::create_options_are_invalid() /* Don't support compressed table when page size > 16k. */ if ((has_key_block_size || row_format == ROW_TYPE_COMPRESSED) - && UNIV_PAGE_SIZE > UNIV_PAGE_SIZE_DEF) { + && srv_page_size > UNIV_PAGE_SIZE_DEF) { push_warning(m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: Cannot create a COMPRESSED table" @@ -11739,7 +11738,7 @@ ha_innobase::update_create_info( create_info->auto_increment_value = stats.auto_increment_value; } - if (dict_table_is_temporary(m_prebuilt->table)) { + if (m_prebuilt->table->is_temporary()) { return; } @@ -11772,7 +11771,11 @@ innobase_fts_load_stopword( @return 0 if successful, otherwise, error number */ int create_table_info_t::parse_table_name( - const char* name) + const char* +#ifdef _WIN32 + name +#endif + ) { DBUG_ENTER("parse_table_name"); @@ -11972,7 +11975,7 @@ index_bad: if (row_type == ROW_TYPE_COMPRESSED && zip_allowed) { /* ROW_FORMAT=COMPRESSED without KEY_BLOCK_SIZE implies half the maximum KEY_BLOCK_SIZE(*1k) or - UNIV_PAGE_SIZE, whichever is less. */ + srv_page_size, whichever is less. */ zip_ssize = zip_ssize_max - 1; } } @@ -12023,7 +12026,7 @@ index_bad: } /* Don't support compressed table when page size > 16k. */ - if (zip_allowed && zip_ssize && UNIV_PAGE_SIZE > UNIV_PAGE_SIZE_DEF) { + if (zip_allowed && zip_ssize && srv_page_size > UNIV_PAGE_SIZE_DEF) { push_warning(m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: Cannot create a COMPRESSED table" @@ -12051,8 +12054,7 @@ index_bad: m_use_data_dir, options->page_compressed, options->page_compression_level == 0 ? - default_compression_level : static_cast(options->page_compression_level), - 0); + default_compression_level : ulint(options->page_compression_level)); if (m_form->s->table_type == TABLE_TYPE_SEQUENCE) { m_flags |= DICT_TF_MASK_NO_ROLLBACK; @@ -12151,7 +12153,7 @@ innobase_parse_hint_from_comment( } /* update SYS_INDEX table */ - if (!dict_table_is_temporary(table)) { + if (!table->is_temporary()) { for (uint i = 0; i < table_share->keys; i++) { is_found[i] = false; } @@ -12597,7 +12599,7 @@ create_table_info_t::create_table_update_dict() dict_table_autoinc_lock(innobase_table); dict_table_autoinc_initialize(innobase_table, autoinc); - if (dict_table_is_temporary(innobase_table)) { + if (innobase_table->is_temporary()) { /* AUTO_INCREMENT is not persistent for TEMPORARY TABLE. Temporary tables are never evicted. Keep the counter in memory only. */ @@ -12622,10 +12624,9 @@ create_table_info_t::create_table_update_dict() dict_table_autoinc_unlock(innobase_table); } - dict_table_close(innobase_table, FALSE, FALSE); - innobase_parse_hint_from_comment(m_thd, innobase_table, m_form->s); + dict_table_close(innobase_table, FALSE, FALSE); DBUG_RETURN(0); } @@ -12739,7 +12740,7 @@ ha_innobase::discard_or_import_tablespace( dict_table_t* dict_table = m_prebuilt->table; - if (dict_table_is_temporary(dict_table)) { + if (dict_table->is_temporary()) { ib_senderrf( m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, @@ -13334,7 +13335,7 @@ ha_innobase::records_in_range( dict_index_t* index; dtuple_t* range_start; dtuple_t* range_end; - int64_t n_rows; + ha_rows n_rows; page_cur_mode_t mode1; page_cur_mode_t mode2; mem_heap_t* heap; @@ -13354,7 +13355,7 @@ ha_innobase::records_in_range( /* There exists possibility of not being able to find requested index due to inconsistency between MySQL and InoDB dictionary info. Necessary message should have been printed in innobase_get_index() */ - if (dict_table_is_discarded(m_prebuilt->table)) { + if (!m_prebuilt->table->space) { n_rows = HA_POS_ERROR; goto func_exit; } @@ -13362,7 +13363,7 @@ ha_innobase::records_in_range( n_rows = HA_POS_ERROR; goto func_exit; } - if (dict_index_is_corrupted(index)) { + if (index->is_corrupted()) { n_rows = HA_ERR_INDEX_CORRUPT; goto func_exit; } @@ -13386,8 +13387,7 @@ ha_innobase::records_in_range( m_prebuilt->srch_key_val_len, index, (byte*) (min_key ? min_key->key : (const uchar*) 0), - (ulint) (min_key ? min_key->length : 0), - m_prebuilt->trx); + (ulint) (min_key ? min_key->length : 0)); DBUG_ASSERT(min_key ? range_start->n_fields > 0 @@ -13399,8 +13399,7 @@ ha_innobase::records_in_range( m_prebuilt->srch_key_val_len, index, (byte*) (max_key ? max_key->key : (const uchar*) 0), - (ulint) (max_key ? max_key->length : 0), - m_prebuilt->trx); + (ulint) (max_key ? max_key->length : 0)); DBUG_ASSERT(max_key ? range_end->n_fields > 0 @@ -13484,8 +13483,8 @@ ha_innobase::estimate_rows_upper_bound() ut_a(stat_n_leaf_pages > 0); - local_data_file_length = - ((ulonglong) stat_n_leaf_pages) * UNIV_PAGE_SIZE; + local_data_file_length = ulonglong(stat_n_leaf_pages) + << srv_page_size_shift; /* Calculate a minimum length for a clustered index record and from that an upper bound for the number of rows. Since we only calculate @@ -13578,16 +13577,6 @@ ha_innobase::read_time( return(ranges + (double) rows / (double) total_rows * time_for_scan); } -/******************************************************************//** -Return the size of the InnoDB memory buffer. */ - -longlong -ha_innobase::get_memory_buffer_size() const -/*=======================================*/ -{ - return(innobase_buffer_pool_size); -} - /** Update the system variable with the given value of the InnoDB buffer pool size. @param[in] buf_pool_size given value of buffer pool size.*/ @@ -13607,7 +13596,7 @@ match. In this case, we have to take into account if we generated a default clustered index for the table @return the key number used inside MySQL */ static -int +unsigned innobase_get_mysql_key_number_for_index( /*====================================*/ INNOBASE_SHARE* share, /*!< in: share structure for index @@ -13633,8 +13622,7 @@ innobase_get_mysql_key_number_for_index( i++; } - if (dict_index_is_clust(index) - && dict_index_is_auto_gen_clust(index)) { + if (dict_index_is_auto_gen_clust(index)) { ut_a(i > 0); i--; } @@ -13688,13 +13676,13 @@ innobase_get_mysql_key_number_for_index( " index.", index->name()); } - return(-1); + return(~0U); } } ut_error; - return(-1); + return(~0U); } /*********************************************************************//** @@ -14136,7 +14124,7 @@ ha_innobase::info_low( errkey = (unsigned int) ( (m_prebuilt->trx->error_key_num == ULINT_UNDEFINED) - ? ~0 + ? ~0U : m_prebuilt->trx->error_key_num); } } @@ -14170,10 +14158,7 @@ each index tree. This does NOT calculate exact statistics on the table. @return HA_ADMIN_* error code or HA_ADMIN_OK */ int -ha_innobase::analyze( -/*=================*/ - THD* thd, /*!< in: connection thread handle */ - HA_CHECK_OPT* check_opt) /*!< in: currently ignored */ +ha_innobase::analyze(THD*, HA_CHECK_OPT*) { /* Simply call info_low() with all the flags and request recalculation of the statistics */ @@ -14218,7 +14203,7 @@ ha_innobase::defragment_table( for (index = dict_table_get_first_index(table); index; index = dict_table_get_next_index(index)) { - if (dict_index_is_corrupted(index)) { + if (index->is_corrupted()) { continue; } @@ -14310,7 +14295,7 @@ int ha_innobase::optimize( /*==================*/ THD* thd, /*!< in: connection thread handle */ - HA_CHECK_OPT* check_opt) /*!< in: currently ignored */ + HA_CHECK_OPT*) { /* FTS-FIXME: Since MySQL doesn't support engine-specific commands, @@ -14322,17 +14307,15 @@ ha_innobase::optimize( This works OK otherwise, but MySQL locks the entire table during calls to OPTIMIZE, which is undesirable. */ - /* TODO: Defragment is disabled for now */ if (srv_defragment) { - int err; - - err = defragment_table(m_prebuilt->table->name.m_name, NULL, false); + int err= defragment_table( + m_prebuilt->table->name.m_name, NULL, false); if (err == 0) { return (HA_ADMIN_OK); } else { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - err, + uint(err), "InnoDB: Cannot defragment table %s: returned error code %d\n", m_prebuilt->table->name, err); @@ -14346,7 +14329,7 @@ ha_innobase::optimize( if (innodb_optimize_fulltext_only) { if (m_prebuilt->table->fts && m_prebuilt->table->fts->cache - && !dict_table_is_discarded(m_prebuilt->table)) { + && m_prebuilt->table->space) { fts_sync_table(m_prebuilt->table, false, true, false); fts_optimize_table(m_prebuilt->table); } @@ -14388,7 +14371,7 @@ ha_innobase::check( build_template(true); } - if (dict_table_is_discarded(m_prebuilt->table)) { + if (!m_prebuilt->table->space) { ib_senderrf( thd, @@ -14417,7 +14400,7 @@ ha_innobase::check( clustered index, we will do so here */ index = dict_table_get_first_index(m_prebuilt->table); - if (!dict_index_is_corrupted(index)) { + if (!index->is_corrupted()) { dict_set_corrupted( index, m_prebuilt->trx, "CHECK TABLE"); } @@ -14455,7 +14438,7 @@ ha_innobase::check( } if (!(check_opt->flags & T_QUICK) - && !dict_index_is_corrupted(index)) { + && !index->is_corrupted()) { /* Enlarge the fatal lock wait timeout during CHECK TABLE. */ my_atomic_addlong( @@ -14507,7 +14490,7 @@ ha_innobase::check( DBUG_EXECUTE_IF( "dict_set_index_corrupted", - if (!dict_index_is_clust(index)) { + if (!index->is_primary()) { m_prebuilt->index_usable = FALSE; // row_mysql_lock_data_dictionary(m_prebuilt->trx); dict_set_corrupted(index, m_prebuilt->trx, "dict_set_index_corrupted"); @@ -14515,7 +14498,7 @@ ha_innobase::check( }); if (UNIV_UNLIKELY(!m_prebuilt->index_usable)) { - if (dict_index_is_corrupted(m_prebuilt->index)) { + if (index->is_corrupted()) { push_warning_printf( m_user_thd, Sql_condition::WARN_LEVEL_WARN, @@ -14555,7 +14538,7 @@ ha_innobase::check( DBUG_EXECUTE_IF( "dict_set_index_corrupted", - if (!dict_index_is_clust(index)) { + if (!index->is_primary()) { ret = DB_CORRUPTION; }); @@ -15316,7 +15299,7 @@ ha_innobase::start_stmt( m_prebuilt->hint_need_to_fetch_extra_cols = 0; reset_template(); - if (dict_table_is_temporary(m_prebuilt->table) + if (m_prebuilt->table->is_temporary() && m_mysql_has_locked && m_prebuilt->select_lock_type == LOCK_NONE) { dberr_t error; @@ -15325,6 +15308,7 @@ ha_innobase::start_stmt( case SQLCOM_INSERT: case SQLCOM_UPDATE: case SQLCOM_DELETE: + case SQLCOM_REPLACE: init_table_handle_for_HANDLER(); m_prebuilt->select_lock_type = LOCK_X; m_prebuilt->stored_select_lock_type = LOCK_X; @@ -15502,7 +15486,7 @@ ha_innobase::external_lock( && thd_sql_command(thd) == SQLCOM_FLUSH && lock_type == F_RDLCK) { - if (dict_table_is_discarded(m_prebuilt->table)) { + if (!m_prebuilt->table->space) { ib_senderrf(trx->mysql_thd, IB_LOG_LEVEL_ERROR, ER_TABLESPACE_DISCARDED, table->s->table_name.str); @@ -15705,7 +15689,7 @@ innodb_show_status( bytes of text. */ char* str; - ssize_t flen; + size_t flen; mutex_enter(&srv_monitor_file_mutex); rewind(srv_monitor_file); @@ -15715,11 +15699,12 @@ innodb_show_status( os_file_set_eof(srv_monitor_file); - if ((flen = ftell(srv_monitor_file)) < 0) { + flen = size_t(ftell(srv_monitor_file)); + if (ssize_t(flen) < 0) { flen = 0; } - ssize_t usable_len; + size_t usable_len; if (flen > MAX_STATUS_SIZE) { usable_len = MAX_STATUS_SIZE; @@ -15732,7 +15717,7 @@ innodb_show_status( read the contents of the temporary file */ if (!(str = (char*) my_malloc(//PSI_INSTRUMENT_ME, - usable_len + 1, MYF(0)))) { + usable_len + 1, MYF(0)))) { mutex_exit(&srv_monitor_file_mutex); DBUG_RETURN(1); } @@ -15742,19 +15727,18 @@ innodb_show_status( if (flen < MAX_STATUS_SIZE) { /* Display the entire output. */ flen = fread(str, 1, flen, srv_monitor_file); - } else if (trx_list_end < (ulint) flen + } else if (trx_list_end < flen && trx_list_start < trx_list_end - && trx_list_start + (flen - trx_list_end) + && trx_list_start + flen - trx_list_end < MAX_STATUS_SIZE - sizeof truncated_msg - 1) { /* Omit the beginning of the list of active transactions. */ - ssize_t len = fread(str, 1, trx_list_start, srv_monitor_file); + size_t len = fread(str, 1, trx_list_start, srv_monitor_file); memcpy(str + len, truncated_msg, sizeof truncated_msg - 1); len += sizeof truncated_msg - 1; usable_len = (MAX_STATUS_SIZE - 1) - len; - fseek(srv_monitor_file, - static_cast(flen - usable_len), SEEK_SET); + fseek(srv_monitor_file, long(flen - usable_len), SEEK_SET); len += fread(str + len, 1, usable_len, srv_monitor_file); flen = len; } else { @@ -15894,12 +15878,10 @@ struct ShowStatus { spins=N,waits=N,calls=N" The user has to parse the dataunfortunately - @param[in,out] hton the innodb handlerton @param[in,out] thd the MySQL query thread of the caller @param[in,out] stat_print function for printing statistics @return true on success. */ bool to_string( - handlerton* hton, THD* thd, stat_print_fn* stat_print) UNIV_NOTHROW; @@ -15915,13 +15897,11 @@ We store the metrics in the "Status" column as: spins=N,waits=N,calls=N" The user has to parse the dataunfortunately -@param[in,out] hton the innodb handlerton @param[in,out] thd the MySQL query thread of the caller @param[in,out] stat_print function for printing statistics @return true on success. */ bool ShowStatus::to_string( - handlerton* hton, THD* thd, stat_print_fn* stat_print) UNIV_NOTHROW @@ -15970,7 +15950,11 @@ ShowStatus::to_string( static int innodb_show_mutex_status( - handlerton* hton, + handlerton* +#ifndef DBUG_OFF + hton +#endif + , THD* thd, stat_print_fn* stat_print) { @@ -15982,7 +15966,7 @@ innodb_show_mutex_status( mutex_monitor.iterate(collector); - if (!collector.to_string(hton, thd, stat_print)) { + if (!collector.to_string(thd, stat_print)) { DBUG_RETURN(1); } @@ -15997,7 +15981,11 @@ innodb_show_mutex_status( static int innodb_show_rwlock_status( - handlerton* hton, + handlerton* +#ifndef DBUG_OFF + hton +#endif + , THD* thd, stat_print_fn* stat_print) { @@ -16307,7 +16295,7 @@ ha_innobase::store_lock( DBUG_ASSERT(EQ_CURRENT_THD(thd)); const bool in_lock_tables = thd_in_lock_tables(thd); - const uint sql_command = thd_sql_command(thd); + const int sql_command = thd_sql_command(thd); if (srv_read_only_mode && (sql_command == SQLCOM_UPDATE @@ -16964,8 +16952,7 @@ innobase_get_at_most_n_mbchars( characters, and we can store in the column prefix index the whole string. */ - char_length = my_charpos(charset, str, - str + data_len, (int) n_chars); + char_length= my_charpos(charset, str, str + data_len, n_chars); if (char_length > data_len) { char_length = data_len; } @@ -17194,10 +17181,7 @@ void innodb_io_capacity_max_update( /*===========================*/ THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ + st_mysql_sys_var*, void*, const void* save) /*!< in: immediate result from check function */ { @@ -17229,10 +17213,7 @@ void innodb_io_capacity_update( /*======================*/ THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ + st_mysql_sys_var*, void*, const void* save) /*!< in: immediate result from check function */ { @@ -17264,10 +17245,7 @@ void innodb_max_dirty_pages_pct_update( /*==============================*/ THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ + st_mysql_sys_var*, void*, const void* save) /*!< in: immediate result from check function */ { @@ -17298,10 +17276,7 @@ void innodb_max_dirty_pages_pct_lwm_update( /*==================================*/ THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ + st_mysql_sys_var*, void*, const void* save) /*!< in: immediate result from check function */ { @@ -17363,8 +17338,7 @@ int innodb_stopword_table_validate( /*===========================*/ THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to system - variable */ + st_mysql_sys_var*, void* save, /*!< out: immediate result for update function */ struct st_mysql_value* value) /*!< in: incoming string */ @@ -17399,17 +17373,10 @@ innodb_stopword_table_validate( /** Update the system variable innodb_buffer_pool_size using the "saved" value. This function is registered as a callback with MySQL. -@param[in] thd thread handle -@param[in] var pointer to system variable -@param[out] var_ptr where the formal string goes @param[in] save immediate result from check function */ static void -innodb_buffer_pool_size_update( - THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save) +innodb_buffer_pool_size_update(THD*,st_mysql_sys_var*,void*, const void* save) { longlong in_val = *static_cast(save); @@ -17431,9 +17398,7 @@ static int innodb_internal_table_validate( /*===========================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to system - variable */ + THD*, st_mysql_sys_var*, void* save, /*!< out: immediate result for update function */ struct st_mysql_value* value) /*!< in: incoming string */ @@ -17483,9 +17448,7 @@ static void innodb_internal_table_update( /*=========================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ + THD*, st_mysql_sys_var*, void* var_ptr,/*!< out: where the formal string goes */ const void* save) /*!< in: immediate result @@ -17516,15 +17479,8 @@ Update the system variable innodb_adaptive_hash_index using the "saved" value. This function is registered as a callback with MySQL. */ static void -innodb_adaptive_hash_index_update( -/*==============================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ - const void* save) /*!< in: immediate result - from check function */ +innodb_adaptive_hash_index_update(THD*, st_mysql_sys_var*, void*, + const void* save) { if (*(my_bool*) save) { btr_search_enable(); @@ -17539,15 +17495,7 @@ Update the system variable innodb_cmp_per_index using the "saved" value. This function is registered as a callback with MySQL. */ static void -innodb_cmp_per_index_update( -/*========================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ - const void* save) /*!< in: immediate result - from check function */ +innodb_cmp_per_index_update(THD*, st_mysql_sys_var*, void*, const void* save) { /* Reset the stats whenever we enable the table INFORMATION_SCHEMA.innodb_cmp_per_index. */ @@ -17563,15 +17511,7 @@ Update the system variable innodb_old_blocks_pct using the "saved" value. This function is registered as a callback with MySQL. */ static void -innodb_old_blocks_pct_update( -/*=========================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ - const void* save) /*!< in: immediate result - from check function */ +innodb_old_blocks_pct_update(THD*, st_mysql_sys_var*, void*, const void* save) { innobase_old_blocks_pct = static_cast( buf_LRU_old_ratio_update( @@ -17583,15 +17523,8 @@ Update the system variable innodb_old_blocks_pct using the "saved" value. This function is registered as a callback with MySQL. */ static void -innodb_change_buffer_max_size_update( -/*=================================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ - const void* save) /*!< in: immediate result - from check function */ +innodb_change_buffer_max_size_update(THD*, st_mysql_sys_var*, void*, + const void* save) { srv_change_buffer_max_size = (*static_cast(save)); @@ -17606,15 +17539,7 @@ static ulong srv_saved_page_number_debug = 0; Save an InnoDB page number. */ static void -innodb_save_page_no( -/*================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ - const void* save) /*!< in: immediate result - from check function */ +innodb_save_page_no(THD*, st_mysql_sys_var*, void*, const void* save) { srv_saved_page_number_debug = *static_cast(save); @@ -17626,15 +17551,7 @@ innodb_save_page_no( Make the first page of given user tablespace dirty. */ static void -innodb_make_page_dirty( -/*===================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ - const void* save) /*!< in: immediate result - from check function */ +innodb_make_page_dirty(THD*, st_mysql_sys_var*, void*, const void* save) { mtr_t mtr; ulong space_id = *static_cast(save); @@ -17645,7 +17562,7 @@ innodb_make_page_dirty( } if (srv_saved_page_number_debug >= space->size) { - fil_space_release(space); + space->release(); return; } @@ -17667,111 +17584,9 @@ innodb_make_page_dirty( MLOG_2BYTES, &mtr); } mtr.commit(); - fil_space_release(space); + space->release(); } #endif // UNIV_DEBUG -/*************************************************************//** -Find the corresponding ibuf_use_t value that indexes into -innobase_change_buffering_values[] array for the input -change buffering option name. -@return corresponding IBUF_USE_* value for the input variable -name, or IBUF_USE_COUNT if not able to find a match */ -static -ibuf_use_t -innodb_find_change_buffering_value( -/*===============================*/ - const char* input_name) /*!< in: input change buffering - option name */ -{ - for (ulint i = 0; - i < UT_ARR_SIZE(innobase_change_buffering_values); - ++i) { - - /* found a match */ - if (!innobase_strcasecmp( - input_name, innobase_change_buffering_values[i])) { - return(static_cast(i)); - } - } - - /* Did not find any match */ - return(IBUF_USE_COUNT); -} - -/*************************************************************//** -Check if it is a valid value of innodb_change_buffering. This function is -registered as a callback with MySQL. -@return 0 for valid innodb_change_buffering */ -static -int -innodb_change_buffering_validate( -/*=============================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to system - variable */ - void* save, /*!< out: immediate result - for update function */ - struct st_mysql_value* value) /*!< in: incoming string */ -{ - const char* change_buffering_input; - char buff[STRING_BUFFER_USUAL_SIZE]; - int len = sizeof(buff); - - ut_a(save != NULL); - ut_a(value != NULL); - - change_buffering_input = value->val_str(value, buff, &len); - - if (change_buffering_input != NULL) { - ibuf_use_t use; - - use = innodb_find_change_buffering_value( - change_buffering_input); - - if (use != IBUF_USE_COUNT) { - /* Find a matching change_buffering option value. */ - *static_cast(save) = - innobase_change_buffering_values[use]; - - return(0); - } - } - - /* No corresponding change buffering option for user supplied - "change_buffering_input" */ - return(1); -} - -/****************************************************************//** -Update the system variable innodb_change_buffering using the "saved" -value. This function is registered as a callback with MySQL. */ -static -void -innodb_change_buffering_update( -/*===========================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ - const void* save) /*!< in: immediate result - from check function */ -{ - ibuf_use_t use; - - ut_a(var_ptr != NULL); - ut_a(save != NULL); - - use = innodb_find_change_buffering_value( - *static_cast(save)); - - ut_a(use < IBUF_USE_COUNT); - - ibuf_use = use; - *static_cast(var_ptr) = - *static_cast(save); -} - /*************************************************************//** Just emit a warning that the usage of the variable is deprecated. @return 0 */ @@ -17780,10 +17595,7 @@ void innodb_stats_sample_pages_update( /*=============================*/ THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ + st_mysql_sys_var*, void*, const void* save) /*!< in: immediate result from check function */ { @@ -18059,9 +17871,7 @@ static int innodb_monitor_validate( /*====================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to system - variable */ + THD*, st_mysql_sys_var*, void* save, /*!< out: immediate result for update function */ struct st_mysql_value* value) /*!< in: incoming string */ @@ -18236,9 +18046,7 @@ innodb_srv_buf_dump_filename_validate( ut_a(save != NULL); ut_a(value != NULL); - const char* buf_name = value->val_str(value, buff, &len); - - if (buf_name != NULL) { + if (const char* buf_name = value->val_str(value, buff, &len)) { if (is_filename_allowed(buf_name, len, FALSE)){ *static_cast(save) = buf_name; return(0); @@ -18307,13 +18115,8 @@ SET GLOBAL innodb_buffer_pool_evict='uncompressed' evicts all uncompressed page frames of compressed tablespaces. */ static void -innodb_buffer_pool_evict_update( -/*============================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var*var, /*!< in: pointer to system variable */ - void* var_ptr,/*!< out: ignored */ - const void* save) /*!< in: immediate result - from check function */ +innodb_buffer_pool_evict_update(THD*, st_mysql_sys_var*, void*, + const void* save) { if (const char* op = *static_cast(save)) { if (!strcmp(op, "uncompressed")) { @@ -18341,8 +18144,7 @@ void innodb_enable_monitor_update( /*=========================*/ THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ + st_mysql_sys_var*, void* var_ptr,/*!< out: where the formal string goes */ const void* save) /*!< in: immediate result @@ -18359,8 +18161,7 @@ void innodb_disable_monitor_update( /*==========================*/ THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ + st_mysql_sys_var*, void* var_ptr,/*!< out: where the formal string goes */ const void* save) /*!< in: immediate result @@ -18378,8 +18179,7 @@ void innodb_reset_monitor_update( /*========================*/ THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ + st_mysql_sys_var*, void* var_ptr,/*!< out: where the formal string goes */ const void* save) /*!< in: immediate result @@ -18397,8 +18197,7 @@ void innodb_reset_all_monitor_update( /*============================*/ THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ + st_mysql_sys_var*, void* var_ptr,/*!< out: where the formal string goes */ const void* save) /*!< in: immediate result @@ -18410,15 +18209,8 @@ innodb_reset_all_monitor_update( static void -innodb_defragment_frequency_update( -/*===============================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ - const void* save) /*!< in: immediate result - from check function */ +innodb_defragment_frequency_update(THD*, st_mysql_sys_var*, void*, + const void* save) { srv_defragment_frequency = (*static_cast(save)); srv_defragment_interval = ut_microseconds_to_timer( @@ -18458,13 +18250,8 @@ innodb_enable_monitor_at_startup( for (char* option = my_strtok_r(str, sep, &last); option; option = my_strtok_r(NULL, sep, &last)) { - ulint ret; char* option_name; - - ret = innodb_monitor_valid_byname(&option_name, option); - - /* The name is validated if ret == 0 */ - if (!ret) { + if (!innodb_monitor_valid_byname(&option_name, option)) { innodb_monitor_update(NULL, NULL, &option, MONITOR_TURN_ON, FALSE); } else { @@ -18477,13 +18264,7 @@ innodb_enable_monitor_at_startup( /****************************************************************//** Callback function for accessing the InnoDB variables from MySQL: SHOW VARIABLES. */ -static -int -show_innodb_vars( -/*=============*/ - THD* thd, - SHOW_VAR* var, - char* buff) +static int show_innodb_vars(THD*, SHOW_VAR* var, char*) { innodb_export_status(); var->type = SHOW_ARRAY; @@ -18602,17 +18383,7 @@ static uint innodb_merge_threshold_set_all_debug /** Wait for the background drop list to become empty. */ static void -wait_background_drop_list_empty( - THD* thd /*!< in: thread handle */ - MY_ATTRIBUTE((unused)), - struct st_mysql_sys_var* var /*!< in: pointer to system - variable */ - MY_ATTRIBUTE((unused)), - void* var_ptr /*!< out: where the formal - string goes */ - MY_ATTRIBUTE((unused)), - const void* save) /*!< in: immediate result from - check function */ +wait_background_drop_list_empty(THD*, st_mysql_sys_var*, void*, const void*) { row_wait_for_background_drop_list_empty(); } @@ -18621,30 +18392,19 @@ wait_background_drop_list_empty( Force innodb to checkpoint. */ static void -checkpoint_now_set( -/*===============*/ - THD* thd /*!< in: thread handle */ - MY_ATTRIBUTE((unused)), - struct st_mysql_sys_var* var /*!< in: pointer to system - variable */ - MY_ATTRIBUTE((unused)), - void* var_ptr /*!< out: where the formal - string goes */ - MY_ATTRIBUTE((unused)), - const void* save) /*!< in: immediate result from - check function */ +checkpoint_now_set(THD*, st_mysql_sys_var*, void*, const void* save) { if (*(my_bool*) save) { - while (log_sys->last_checkpoint_lsn + while (log_sys.last_checkpoint_lsn + SIZE_OF_MLOG_CHECKPOINT - + (log_sys->append_on_checkpoint != NULL - ? log_sys->append_on_checkpoint->size() : 0) - < log_sys->lsn) { + + (log_sys.append_on_checkpoint != NULL + ? log_sys.append_on_checkpoint->size() : 0) + < log_sys.lsn) { log_make_checkpoint_at(LSN_MAX, TRUE); fil_flush_file_spaces(FIL_TYPE_LOG); } - dberr_t err = fil_write_flushed_lsn(log_sys->lsn); + dberr_t err = fil_write_flushed_lsn(log_sys.lsn); if (err != DB_SUCCESS) { ib::warn() << "Checkpoint set failed " << err; @@ -18656,18 +18416,7 @@ checkpoint_now_set( Force a dirty pages flush now. */ static void -buf_flush_list_now_set( -/*===================*/ - THD* thd /*!< in: thread handle */ - MY_ATTRIBUTE((unused)), - struct st_mysql_sys_var* var /*!< in: pointer to system - variable */ - MY_ATTRIBUTE((unused)), - void* var_ptr /*!< out: where the formal - string goes */ - MY_ATTRIBUTE((unused)), - const void* save) /*!< in: immediate result from - check function */ +buf_flush_list_now_set(THD*, st_mysql_sys_var*, void*, const void* save) { if (*(my_bool*) save) { buf_flush_sync_all_buf_pools(); @@ -18676,17 +18425,11 @@ buf_flush_list_now_set( /** Override current MERGE_THRESHOLD setting for all indexes at dictionary now. -@param[in] thd thread handle -@param[in] var pointer to system variable -@param[out] var_ptr where the formal string goes @param[in] save immediate result from check function */ static void -innodb_merge_threshold_set_all_debug_update( - THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save) +innodb_merge_threshold_set_all_debug_update(THD*, st_mysql_sys_var*, void*, + const void* save) { innodb_merge_threshold_set_all_debug = (*static_cast(save)); @@ -18813,10 +18556,7 @@ void innodb_log_write_ahead_size_update( /*===============================*/ THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ + st_mysql_sys_var*, void*, const void* save) /*!< in: immediate result from check function */ { @@ -18827,8 +18567,8 @@ innodb_log_write_ahead_size_update( val = val * 2; } - if (val > UNIV_PAGE_SIZE) { - val = UNIV_PAGE_SIZE; + if (val > srv_page_size) { + val = srv_page_size; push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_ARGUMENTS, "innodb_log_write_ahead_size cannot" @@ -18837,7 +18577,7 @@ innodb_log_write_ahead_size_update( ER_WRONG_ARGUMENTS, "Setting innodb_log_write_ahead_size" " to %lu", - UNIV_PAGE_SIZE); + srv_page_size); } else if (val != in_val) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_ARGUMENTS, @@ -18859,12 +18599,8 @@ which control InnoDB "status monitor" output to the error log. @param[in] save to-be-assigned value */ static void -innodb_status_output_update( -/*========================*/ - THD* thd __attribute__((unused)), - struct st_mysql_sys_var* var __attribute__((unused)), - void* var_ptr __attribute__((unused)), - const void* save __attribute__((unused))) +innodb_status_output_update(THD*, st_mysql_sys_var*, void* var_ptr, + const void* save) { *static_cast(var_ptr) = *static_cast(save); /* Wakeup server monitor thread. */ @@ -18875,15 +18611,8 @@ innodb_status_output_update( Update the system variable innodb_encryption_threads */ static void -innodb_encryption_threads_update( -/*=============================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ - const void* save) /*!< in: immediate result - from check function */ +innodb_encryption_threads_update(THD*, st_mysql_sys_var*, void*, + const void* save) { fil_crypt_set_thread_cnt(*static_cast(save)); } @@ -18892,15 +18621,8 @@ innodb_encryption_threads_update( Update the system variable innodb_encryption_rotate_key_age */ static void -innodb_encryption_rotate_key_age_update( -/*====================================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ - const void* save) /*!< in: immediate result - from check function */ +innodb_encryption_rotate_key_age_update(THD*, st_mysql_sys_var*, void*, + const void* save) { fil_crypt_set_rotate_key_age(*static_cast(save)); } @@ -18909,15 +18631,8 @@ innodb_encryption_rotate_key_age_update( Update the system variable innodb_encryption_rotation_iops */ static void -innodb_encryption_rotation_iops_update( -/*===================================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ - const void* save) /*!< in: immediate result - from check function */ +innodb_encryption_rotation_iops_update(THD*, st_mysql_sys_var*, void*, + const void* save) { fil_crypt_set_rotation_iops(*static_cast(save)); } @@ -18926,31 +18641,19 @@ innodb_encryption_rotation_iops_update( Update the system variable innodb_encrypt_tables*/ static void -innodb_encrypt_tables_update( -/*=========================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ - const void* save) /*!< in: immediate result - from check function */ +innodb_encrypt_tables_update(THD*, st_mysql_sys_var*, void*, const void* save) { fil_crypt_set_encrypt_tables(*static_cast(save)); } /** Update the innodb_log_checksums parameter. -@param[in] thd thread handle -@param[in] var system variable +@param[in,out] thd client connection @param[out] var_ptr current value @param[in] save immediate result from check function */ static void -innodb_log_checksums_update( - THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save) +innodb_log_checksums_update(THD* thd, st_mysql_sys_var*, void* var_ptr, + const void* save) { *static_cast(var_ptr) = innodb_log_checksums_func_update( thd, *static_cast(save)); @@ -19205,7 +18908,7 @@ static int wsrep_abort_transaction( /*====================*/ - handlerton* hton, + handlerton*, THD *bf_thd, THD *victim_thd, my_bool signal) @@ -19269,12 +18972,7 @@ innobase_wsrep_get_checkpoint( return 0; } -static -void -wsrep_fake_trx_id( -/*==============*/ - handlerton *hton, - THD *thd) /*!< in: user thread handle */ +static void wsrep_fake_trx_id(handlerton *, THD *thd) { trx_id_t trx_id = trx_sys.get_new_trx_id(); WSREP_DEBUG("innodb fake trx id: " TRX_ID_FMT " thd: %s", @@ -19328,7 +19026,7 @@ static MYSQL_SYSVAR_STR(data_home_dir, innobase_data_home_dir, "The common part for InnoDB table spaces.", NULL, NULL, NULL); -static MYSQL_SYSVAR_BOOL(doublewrite, innobase_use_doublewrite, +static MYSQL_SYSVAR_BOOL(doublewrite, srv_use_doublewrite_buf, PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, "Enable InnoDB doublewrite buffer (enabled by default)." " Disable with --skip-innodb-doublewrite.", @@ -19456,9 +19154,11 @@ static MYSQL_SYSVAR_ULONG(flush_log_at_trx_commit, srv_flush_log_at_trx_commit, " guarantees in case of crash. 0 and 2 can be faster than 1 or 3.", NULL, NULL, 1, 0, 3, 0); -static MYSQL_SYSVAR_STR(flush_method, innobase_file_flush_method, +static MYSQL_SYSVAR_ENUM(flush_method, innodb_flush_method, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "With which method to flush data.", NULL, NULL, NULL); + "With which method to flush data.", + NULL, NULL, IF_WIN(SRV_ALL_O_DIRECT_FSYNC, SRV_FSYNC), + &innodb_flush_method_typelib); static MYSQL_SYSVAR_BOOL(force_load_corrupted, srv_load_corrupted, PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, @@ -19660,13 +19360,13 @@ BUF_POOL_SIZE_THRESHOLD (srv/srv0start.cc), then srv_buf_pool_instances_default can be removed and 8 used instead. The problem with the current setup is that with 128MiB default buffer pool size and 8 instances by default we would emit a warning when no options are specified. */ -static MYSQL_SYSVAR_LONGLONG(buffer_pool_size, innobase_buffer_pool_size, +static MYSQL_SYSVAR_ULONGLONG(buffer_pool_size, innobase_buffer_pool_size, PLUGIN_VAR_RQCMDARG, "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.", innodb_buffer_pool_size_validate, innodb_buffer_pool_size_update, - static_cast(srv_buf_pool_def_size), - static_cast(srv_buf_pool_min_size), + srv_buf_pool_def_size, + srv_buf_pool_min_size, LLONG_MAX, 1024*1024L); static MYSQL_SYSVAR_ULONG(buffer_pool_chunk_size, srv_buf_pool_chunk_unit, @@ -19839,7 +19539,7 @@ static MYSQL_SYSVAR_BOOL(deadlock_detect, innobase_deadlock_detect, " and we rely on innodb_lock_wait_timeout in case of deadlock.", NULL, NULL, TRUE); -static MYSQL_SYSVAR_LONG(fill_factor, innobase_fill_factor, +static MYSQL_SYSVAR_UINT(fill_factor, innobase_fill_factor, PLUGIN_VAR_RQCMDARG, "Percentage of B-tree page filled during bulk insert", NULL, NULL, 100, 10, 100, 0); @@ -19910,12 +19610,12 @@ static MYSQL_SYSVAR_BOOL(optimize_fulltext_only, innodb_optimize_fulltext_only, "Only optimize the Fulltext index of the table", NULL, NULL, FALSE); -static MYSQL_SYSVAR_ULONG(read_io_threads, innobase_read_io_threads, +static MYSQL_SYSVAR_ULONG(read_io_threads, srv_n_read_io_threads, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Number of background read I/O threads in InnoDB.", NULL, NULL, 4, 1, 64, 0); -static MYSQL_SYSVAR_ULONG(write_io_threads, innobase_write_io_threads, +static MYSQL_SYSVAR_ULONG(write_io_threads, srv_n_write_io_threads, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Number of background write I/O threads in InnoDB.", NULL, NULL, 4, 1, 64, 0); @@ -19931,10 +19631,10 @@ static MYSQL_SYSVAR_ULONG(page_size, srv_page_size, NULL, NULL, UNIV_PAGE_SIZE_DEF, UNIV_PAGE_SIZE_MIN, UNIV_PAGE_SIZE_MAX, 0); -static MYSQL_SYSVAR_LONG(log_buffer_size, innobase_log_buffer_size, +static MYSQL_SYSVAR_ULONG(log_buffer_size, srv_log_buffer_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "The size of the buffer which InnoDB uses to write log to the log files on disk.", - NULL, NULL, 16*1024*1024L, 256*1024L, LONG_MAX, 1024); + NULL, NULL, 16L << 20, 256L << 10, LONG_MAX, 1024); static MYSQL_SYSVAR_ULONGLONG(log_file_size, srv_log_file_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, @@ -19967,10 +19667,10 @@ static MYSQL_SYSVAR_UINT(old_blocks_time, buf_LRU_old_threshold_ms, " The timeout is disabled if 0.", NULL, NULL, 1000, 0, UINT_MAX32, 0); -static MYSQL_SYSVAR_LONG(open_files, innobase_open_files, +static MYSQL_SYSVAR_ULONG(open_files, innobase_open_files, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "How many files at the maximum InnoDB keeps open at the same time.", - NULL, NULL, 0L, 0L, LONG_MAX, 0); + NULL, NULL, 0, 0, LONG_MAX, 0); static MYSQL_SYSVAR_ULONG(sync_spin_loops, srv_n_spin_wait_rounds, PLUGIN_VAR_RQCMDARG, @@ -20014,12 +19714,12 @@ static MYSQL_SYSVAR_ULONG(thread_sleep_delay, srv_thread_sleep_delay, static MYSQL_SYSVAR_STR(data_file_path, innobase_data_file_path, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Path to individual files and their sizes.", - NULL, NULL, NULL); + NULL, NULL, "ibdata1:12M:autoextend"); static MYSQL_SYSVAR_STR(temp_data_file_path, innobase_temp_data_file_path, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Path to files and their sizes making temp-tablespace.", - NULL, NULL, NULL); + NULL, NULL, "ibtmp1:12M:autoextend"); static MYSQL_SYSVAR_STR(undo_directory, srv_undo_dir, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, @@ -20097,12 +19797,10 @@ static MYSQL_SYSVAR_BOOL(numa_interleave, srv_numa_interleave, NULL, NULL, FALSE); #endif /* HAVE_LIBNUMA */ -static MYSQL_SYSVAR_STR(change_buffering, innobase_change_buffering, +static MYSQL_SYSVAR_ENUM(change_buffering, innodb_change_buffering, PLUGIN_VAR_RQCMDARG, - "Buffer changes to reduce random access:" - " OFF, ON, inserting, deleting, changing, or purging.", - innodb_change_buffering_validate, - innodb_change_buffering_update, "all"); + "Buffer changes to secondary indexes.", + NULL, NULL, IBUF_USE_ALL, &innodb_change_buffering_typelib); static MYSQL_SYSVAR_UINT(change_buffer_max_size, srv_change_buffer_max_size, @@ -20152,14 +19850,8 @@ static my_bool innobase_disallow_writes = FALSE; An "update" method for innobase_disallow_writes variable. */ static void -innobase_disallow_writes_update( -/*============================*/ - THD* thd, /* in: thread handle */ - st_mysql_sys_var* var, /* in: pointer to system - variable */ - void* var_ptr, /* out: pointer to dynamic - variable */ - const void* save) /* in: temporary storage */ +innobase_disallow_writes_update(THD*, st_mysql_sys_var*, + void* var_ptr, const void* save) { *(my_bool*)var_ptr = *(my_bool*)save; ut_a(srv_allow_writes_event); @@ -20678,7 +20370,7 @@ maria_declare_plugin(innobase) plugin_author, "Supports transactions, row-level locking, foreign keys and encryption for tables", PLUGIN_LICENSE_GPL, - innobase_init, /* Plugin Init */ + innodb_init, /* Plugin Init */ NULL, /* Plugin Deinit */ INNODB_VERSION_SHORT, innodb_status_variables_export,/* status variables */ @@ -20751,13 +20443,13 @@ innodb_params_adjust() = MYSQL_SYSVAR_NAME(undo_logs).def_val = srv_available_undo_logs; MYSQL_SYSVAR_NAME(max_undo_log_size).max_val - = 1ULL << (32 + UNIV_PAGE_SIZE_SHIFT); + = 1ULL << (32U + srv_page_size_shift); MYSQL_SYSVAR_NAME(max_undo_log_size).min_val = MYSQL_SYSVAR_NAME(max_undo_log_size).def_val = ulonglong(SRV_UNDO_TABLESPACE_SIZE_IN_PAGES) - * srv_page_size; + << srv_page_size_shift; MYSQL_SYSVAR_NAME(max_undo_log_size).max_val - = 1ULL << (32 + UNIV_PAGE_SIZE_SHIFT); + = 1ULL << (32U + srv_page_size_shift); } /**************************************************************************** @@ -20959,7 +20651,7 @@ innobase_rename_vc_templ( if (is_part != NULL) { *is_part = '\0'; - tbnamelen = is_part - tbname; + tbnamelen = ulint(is_part - tbname); } dbnamelen = filename_to_tablename(dbname, t_dbname, @@ -21061,7 +20753,7 @@ innobase_get_computed_value( if (!heap || index->table->vc_templ->rec_len >= REC_VERSION_56_MAX_INDEX_COL_LEN) { if (*local_heap == NULL) { - *local_heap = mem_heap_create(UNIV_PAGE_SIZE); + *local_heap = mem_heap_create(srv_page_size); } buf = static_cast(mem_heap_alloc( @@ -21102,7 +20794,7 @@ innobase_get_computed_value( if (row_field->ext) { if (*local_heap == NULL) { - *local_heap = mem_heap_create(UNIV_PAGE_SIZE); + *local_heap = mem_heap_create(srv_page_size); } data = btr_copy_externally_stored_field( @@ -21240,7 +20932,7 @@ ib_senderrf( { va_list args; char* str = NULL; - const char* format = innobase_get_err_msg(code); + const char* format = my_get_err_msg(code); /* If the caller wants to push a message to the client then the caller must pass a valid session handle. */ @@ -21449,7 +21141,6 @@ ib_warn_row_too_big(const dict_table_t* table) /** Validate the requested buffer pool size. Also, reserve the necessary memory needed for buffer pool resize. @param[in] thd thread handle -@param[in] var pointer to system variable @param[out] save immediate result for update function @param[in] value incoming string @return 0 on success, 1 on failure. @@ -21458,13 +21149,11 @@ static int innodb_buffer_pool_size_validate( THD* thd, - struct st_mysql_sys_var* var, + st_mysql_sys_var*, void* save, struct st_mysql_value* value) { longlong intbuf; - - value->val_int(value, &intbuf); if (!srv_was_started) { @@ -21510,12 +21199,11 @@ innodb_buffer_pool_size_validate( return(1); } - ulint requested_buf_pool_size - = buf_pool_size_align(static_cast(intbuf)); + ulint requested_buf_pool_size = buf_pool_size_align(ulint(intbuf)); - *static_cast(save) = requested_buf_pool_size; + *static_cast(save) = requested_buf_pool_size; - if (srv_buf_pool_size == static_cast(intbuf)) { + if (srv_buf_pool_size == ulint(intbuf)) { buf_pool_mutex_exit_all(); /* nothing to do */ return(0); @@ -21563,7 +21251,7 @@ innodb_compression_algorithm_validate( for update function */ struct st_mysql_value* value) /*!< in: incoming string */ { - long compression_algorithm; + ulong compression_algorithm; DBUG_ENTER("innobase_compression_algorithm_validate"); if (check_sysvar_enum(thd, var, save, value)) { @@ -21685,7 +21373,7 @@ UNIV_INTERN void ib_push_warning( trx_t* trx, /*!< in: trx */ - ulint error, /*!< in: error code to push as warning */ + dberr_t error, /*!< in: error code to push as warning */ const char *format,/*!< in: warning message */ ...) { @@ -21699,9 +21387,9 @@ ib_push_warning( buf = (char *)my_malloc(MAX_BUF_SIZE, MYF(MY_WME)); vsprintf(buf,format, args); - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - convert_error_code_to_mysql((dberr_t)error, 0, thd), - buf); + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + uint(convert_error_code_to_mysql(error, 0, thd)), buf); my_free(buf); va_end(args); } @@ -21713,7 +21401,7 @@ UNIV_INTERN void ib_push_warning( void* ithd, /*!< in: thd */ - ulint error, /*!< in: error code to push as warning */ + dberr_t error, /*!< in: error code to push as warning */ const char *format,/*!< in: warning message */ ...) { @@ -21731,9 +21419,9 @@ ib_push_warning( buf = (char *)my_malloc(MAX_BUF_SIZE, MYF(MY_WME)); vsprintf(buf,format, args); - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - convert_error_code_to_mysql((dberr_t)error, 0, thd), - buf); + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + uint(convert_error_code_to_mysql(error, 0, thd)), buf); my_free(buf); va_end(args); } diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index ae4da973b4f..d4fda9d43c5 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -132,8 +132,6 @@ public: double read_time(uint index, uint ranges, ha_rows rows); - longlong get_memory_buffer_size() const; - int delete_all_rows(); int write_row(uchar * buf); @@ -188,12 +186,6 @@ public: FT_INFO* ft_init_ext(uint flags, uint inx, String* key); - FT_INFO* ft_init_ext_with_hints( - uint inx, - String* key, - void* hints); - //Ft_hints* hints); - int ft_read(uchar* buf); void position(const uchar *record); @@ -306,12 +298,24 @@ public: by ALTER TABLE and holding data used during in-place alter. @retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported - @retval HA_ALTER_INPLACE_NO_LOCK Supported - @retval HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE - Supported, but requires lock during main phase and - exclusive lock during prepare phase. - @retval HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE - Supported, prepare phase requires exclusive lock. */ + @retval HA_ALTER_INPLACE_INSTANT + MDL_EXCLUSIVE is needed for executing prepare_inplace_alter_table() + and commit_inplace_alter_table(). inplace_alter_table() + will not be called. + @retval HA_ALTER_INPLACE_COPY_NO_LOCK + MDL_EXCLUSIVE in prepare_inplace_alter_table(), which can be downgraded + to LOCK=NONE for rebuilding the table in inplace_alter_table() + @retval HA_ALTER_INPLACE_COPY_LOCK + MDL_EXCLUSIVE in prepare_inplace_alter_table(), which can be downgraded + to LOCK=SHARED for rebuilding the table in inplace_alter_table() + @retval HA_ALTER_INPLACE_NOCOPY_NO_LOCK + MDL_EXCLUSIVE in prepare_inplace_alter_table(), which can be downgraded + to LOCK=NONE for inplace_alter_table() which will not rebuild the table + @retval HA_ALTER_INPLACE_NOCOPY_LOCK + MDL_EXCLUSIVE in prepare_inplace_alter_table(), which can be downgraded + to LOCK=SHARED for inplace_alter_table() which will not rebuild + the table. */ + enum_alter_inplace_result check_if_supported_inplace_alter( TABLE* altered_table, Alter_inplace_info* ha_alter_info); @@ -913,19 +917,6 @@ innodb_base_col_setup_for_stored( create_table_info_t::normalize_table_name_low(norm_name, name, FALSE) #endif /* _WIN32 */ -/** Converts an InnoDB error code to a MySQL error code. -Also tells to MySQL about a possible transaction rollback inside InnoDB caused -by a lock wait timeout or a deadlock. -@param[in] error InnoDB error code. -@param[in] flags InnoDB table flags or 0. -@param[in] thd MySQL thread or NULL. -@return MySQL error code */ -int -convert_error_code_to_mysql( - dberr_t error, - ulint flags, - THD* thd); - /** Converts a search mode flag understood by MySQL to a flag understood by InnoDB. @param[in] find_flag MySQL search mode flag. diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 1e0c4087b2d..769eced242c 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2005, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2005, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2013, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -72,6 +72,11 @@ static const alter_table_operations INNOBASE_ONLINE_CREATE = ALTER_ADD_NON_UNIQUE_NON_PRIM_INDEX | ALTER_ADD_UNIQUE_INDEX; +/** Operations that require filling in default values for columns */ +static const alter_table_operations INNOBASE_DEFAULTS + = ALTER_COLUMN_NOT_NULLABLE + | ALTER_ADD_STORED_BASE_COLUMN; + /** Operations for rebuilding a table in place */ static const alter_table_operations INNOBASE_ALTER_REBUILD = ALTER_ADD_PK_INDEX @@ -79,10 +84,9 @@ static const alter_table_operations INNOBASE_ALTER_REBUILD | ALTER_CHANGE_CREATE_OPTION /* CHANGE_CREATE_OPTION needs to check create_option_need_rebuild() */ | ALTER_COLUMN_NULLABLE - | ALTER_COLUMN_NOT_NULLABLE + | INNOBASE_DEFAULTS | ALTER_STORED_COLUMN_ORDER | ALTER_DROP_STORED_COLUMN - | ALTER_ADD_STORED_BASE_COLUMN | ALTER_RECREATE_TABLE /* | ALTER_STORED_COLUMN_TYPE @@ -99,7 +103,6 @@ static const alter_table_operations INNOBASE_ALTER_DATA /** Operations for altering a table that InnoDB does not care about */ static const alter_table_operations INNOBASE_INPLACE_IGNORE = ALTER_COLUMN_DEFAULT - | ALTER_CHANGE_COLUMN_DEFAULT | ALTER_PARTITIONED | ALTER_COLUMN_COLUMN_FORMAT | ALTER_COLUMN_STORAGE_TYPE @@ -115,17 +118,21 @@ static const alter_table_operations INNOBASE_FOREIGN_OPERATIONS /** Operations that InnoDB cares about and can perform without rebuild */ static const alter_table_operations INNOBASE_ALTER_NOREBUILD = INNOBASE_ONLINE_CREATE - | INNOBASE_FOREIGN_OPERATIONS | ALTER_DROP_NON_UNIQUE_NON_PRIM_INDEX | ALTER_DROP_UNIQUE_INDEX #ifdef MYSQL_RENAME_INDEX | ALTER_RENAME_INDEX #endif + ; + +/** Operations that can be performed instantly, without inplace_alter_table() */ +static const alter_table_operations INNOBASE_ALTER_INSTANT + = ALTER_VIRTUAL_COLUMN_ORDER | ALTER_COLUMN_NAME - | ALTER_COLUMN_EQUAL_PACK_LENGTH | ALTER_ADD_VIRTUAL_COLUMN - | ALTER_DROP_VIRTUAL_COLUMN - | ALTER_VIRTUAL_COLUMN_ORDER; + | INNOBASE_FOREIGN_OPERATIONS + | ALTER_COLUMN_EQUAL_PACK_LENGTH + | ALTER_DROP_VIRTUAL_COLUMN; struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx { @@ -173,8 +180,8 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx const char** col_names; /** added AUTO_INCREMENT column position, or ULINT_UNDEFINED */ const ulint add_autoinc; - /** default values of ADD COLUMN, or NULL */ - const dtuple_t* add_cols; + /** default values of ADD and CHANGE COLUMN, or NULL */ + const dtuple_t* defaults; /** autoinc sequence to use */ ib_sequence_t sequence; /** temporary table name to use for old table when renaming tables */ @@ -200,6 +207,9 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx /** original column names of the table */ const char* const old_col_names; + /** Whether alter ignore issued. */ + const bool ignore; + ha_innobase_inplace_ctx(row_prebuilt_t*& prebuilt_arg, dict_index_t** drop_arg, ulint num_to_drop_arg, @@ -216,7 +226,7 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx ulint add_autoinc_arg, ulonglong autoinc_col_min_value_arg, ulonglong autoinc_col_max_value_arg, - ulint num_to_drop_vcol_arg) : + bool ignore_flag) : inplace_alter_handler_ctx(), prebuilt (prebuilt_arg), add_index (0), add_key_numbers (0), num_to_add_index (0), @@ -229,7 +239,7 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx new_table (new_table_arg), instant_table (0), col_map (0), col_names (col_names_arg), add_autoinc (add_autoinc_arg), - add_cols (0), + defaults (0), sequence(prebuilt->trx->mysql_thd, autoinc_col_min_value_arg, autoinc_col_max_value_arg), tmp_name (0), @@ -243,7 +253,8 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx m_stage(NULL), old_n_cols(prebuilt_arg->table->n_cols), old_cols(prebuilt_arg->table->cols), - old_col_names(prebuilt_arg->table->col_names) + old_col_names(prebuilt_arg->table->col_names), + ignore(ignore_flag) { ut_ad(old_n_cols >= DATA_N_SYS_COLS); #ifdef UNIV_DEBUG @@ -518,7 +529,8 @@ innobase_need_rebuild( const Alter_inplace_info* ha_alter_info, const TABLE* table) { - if ((ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE) + if ((ha_alter_info->handler_flags & ~(INNOBASE_INPLACE_IGNORE + | INNOBASE_ALTER_INSTANT)) == ALTER_CHANGE_CREATE_OPTION) { return create_option_need_rebuild(ha_alter_info, table); } @@ -674,30 +686,165 @@ instant_alter_column_possible( || !create_option_need_rebuild(ha_alter_info, table); } +/** Check whether the non-const default value for the field +@param[in] field field which could be added or changed +@return true if the non-const default is present. */ +static bool is_non_const_value(Field* field) +{ + return field->default_value + && field->default_value->flags + & uint(~(VCOL_SESSION_FUNC | VCOL_TIME_FUNC)); +} + +/** Set default value for the field. +@param[in] field field which could be added or changed +@return true if the default value is set. */ +static bool set_default_value(Field* field) +{ + /* The added/changed NOT NULL column lacks a DEFAULT value, + or the DEFAULT is the same for all rows. + (Time functions, such as CURRENT_TIMESTAMP(), + are evaluated from a timestamp that is assigned + at the start of the statement. Session + functions, such as USER(), always evaluate the + same within a statement.) */ + + ut_ad(!is_non_const_value(field)); + + /* Compute the DEFAULT values of non-constant columns + (VCOL_SESSION_FUNC | VCOL_TIME_FUNC). */ + switch (field->set_default()) { + case 0: /* OK */ + case 3: /* DATETIME to TIME or DATE conversion */ + return true; + case -1: /* OOM, or GEOMETRY type mismatch */ + case 1: /* A number adjusted to the min/max value */ + case 2: /* String truncation, or conversion problem */ + break; + } + + return false; +} + +/** Check whether the table has the FTS_DOC_ID column +@param[in] table InnoDB table with fulltext index +@param[in] altered_table MySQL table with fulltext index +@param[out] fts_doc_col_no The column number for Doc ID, + or ULINT_UNDEFINED if it is of wrong type +@param[out] num_v Number of virtual column +@param[in] check_only check only whether fts doc id exist. +@return whether there exists an FTS_DOC_ID column */ +static +bool +innobase_fts_check_doc_id_col( + const dict_table_t* table, + const TABLE* altered_table, + ulint* fts_doc_col_no, + ulint* num_v, + bool check_only=false) +{ + *fts_doc_col_no = ULINT_UNDEFINED; + + const uint n_cols = altered_table->s->fields; + ulint i; + int err = 0; + *num_v = 0; + + for (i = 0; i < n_cols; i++) { + const Field* field = altered_table->field[i]; + + if (innobase_is_v_fld(field)) { + (*num_v)++; + } + + if (my_strcasecmp(system_charset_info, + field->field_name.str, FTS_DOC_ID_COL_NAME)) { + continue; + } + + if (strcmp(field->field_name.str, FTS_DOC_ID_COL_NAME)) { + err = ER_WRONG_COLUMN_NAME; + } else if (field->type() != MYSQL_TYPE_LONGLONG + || field->pack_length() != 8 + || field->real_maybe_null() + || !(field->flags & UNSIGNED_FLAG) + || innobase_is_v_fld(field)) { + err = ER_INNODB_FT_WRONG_DOCID_COLUMN; + } else { + *fts_doc_col_no = i - *num_v; + } + + if (err && !check_only) { + my_error(err, MYF(0), field->field_name.str); + } + + return(true); + } + + if (!table) { + return(false); + } + + /* Not to count the virtual columns */ + i -= *num_v; + + for (; i + DATA_N_SYS_COLS < (uint) table->n_cols; i++) { + const char* name = dict_table_get_col_name(table, i); + + if (strcmp(name, FTS_DOC_ID_COL_NAME) == 0) { +#ifdef UNIV_DEBUG + const dict_col_t* col; + + col = dict_table_get_nth_col(table, i); + + /* Because the FTS_DOC_ID does not exist in + the MySQL data dictionary, this must be the + internally created FTS_DOC_ID column. */ + ut_ad(col->mtype == DATA_INT); + ut_ad(col->len == 8); + ut_ad(col->prtype & DATA_NOT_NULL); + ut_ad(col->prtype & DATA_UNSIGNED); +#endif /* UNIV_DEBUG */ + *fts_doc_col_no = i; + return(true); + } + } + + return(false); +} + /** Check if InnoDB supports a particular alter table in-place @param altered_table TABLE object for new version of table. @param ha_alter_info Structure describing changes to be done by ALTER TABLE and holding data used during in-place alter. @retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported -@retval HA_ALTER_INPLACE_NO_LOCK Supported -@retval HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE Supported, but requires -lock during main phase and exclusive lock during prepare phase. -@retval HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE Supported, prepare phase -requires exclusive lock (any transactions that have accessed the table -must commit or roll back first, and no transactions can access the table -while prepare_inplace_alter_table() is executing) +@retval HA_ALTER_INPLACE_INSTANT +MDL_EXCLUSIVE is needed for executing prepare_inplace_alter_table() +and commit_inplace_alter_table(). inplace_alter_table() will not be called. +@retval HA_ALTER_INPLACE_COPY_NO_LOCK +MDL_EXCLUSIVE in prepare_inplace_alter_table(), which can be downgraded to +LOCK=NONE for rebuilding the table in inplace_alter_table() +@retval HA_ALTER_INPLACE_COPY_LOCK +MDL_EXCLUSIVE in prepare_inplace_alter_table(), which can be downgraded to +LOCK=SHARED for rebuilding the table in inplace_alter_table() +@retval HA_ALTER_INPLACE_NOCOPY_NO_LOCK +MDL_EXCLUSIVE in prepare_inplace_alter_table(), which can be downgraded to +LOCK=NONE for inplace_alter_table() which will not rebuild the table +@retval HA_ALTER_INPLACE_NOCOPY_LOCK +MDL_EXCLUSIVE in prepare_inplace_alter_table(), which can be downgraded to +LOCK=SHARED for inplace_alter_table() which will not rebuild the table */ enum_alter_inplace_result ha_innobase::check_if_supported_inplace_alter( -/*==========================================*/ TABLE* altered_table, Alter_inplace_info* ha_alter_info) { DBUG_ENTER("check_if_supported_inplace_alter"); - if ((table->versioned(VERS_TIMESTAMP) || altered_table->versioned(VERS_TIMESTAMP)) + if ((table->versioned(VERS_TIMESTAMP) + || altered_table->versioned(VERS_TIMESTAMP)) && innobase_need_rebuild(ha_alter_info, table)) { ha_alter_info->unsupported_reason = "Not implemented for system-versioned tables"; @@ -713,7 +860,7 @@ ha_innobase::check_if_supported_inplace_alter( if (high_level_read_only) { ha_alter_info->unsupported_reason = - innobase_get_err_msg(ER_READ_ONLY_MODE); + my_get_err_msg(ER_READ_ONLY_MODE); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -724,7 +871,7 @@ ha_innobase::check_if_supported_inplace_alter( return an error too. This is how we effectively deny adding too many columns to a table. */ ha_alter_info->unsupported_reason = - innobase_get_err_msg(ER_TOO_MANY_FIELDS); + my_get_err_msg(ER_TOO_MANY_FIELDS); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -732,14 +879,16 @@ ha_innobase::check_if_supported_inplace_alter( if (ha_alter_info->handler_flags & ~(INNOBASE_INPLACE_IGNORE + | INNOBASE_ALTER_INSTANT | INNOBASE_ALTER_NOREBUILD | INNOBASE_ALTER_REBUILD)) { if (ha_alter_info->handler_flags & ALTER_STORED_COLUMN_TYPE) { - ha_alter_info->unsupported_reason = innobase_get_err_msg( + ha_alter_info->unsupported_reason = my_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE); } + DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -747,7 +896,7 @@ ha_innobase::check_if_supported_inplace_alter( check_foreigns is turned off */ if ((ha_alter_info->handler_flags & ALTER_ADD_FOREIGN_KEY) && m_prebuilt->trx->check_foreigns) { - ha_alter_info->unsupported_reason = innobase_get_err_msg( + ha_alter_info->unsupported_reason = my_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -761,7 +910,7 @@ ha_innobase::check_if_supported_inplace_alter( #endif if (!(ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE)) { - DBUG_RETURN(HA_ALTER_INPLACE_NO_LOCK); + DBUG_RETURN(HA_ALTER_INPLACE_INSTANT); } /* Only support NULL -> NOT NULL change if strict table sql_mode @@ -771,7 +920,7 @@ ha_innobase::check_if_supported_inplace_alter( if ((ha_alter_info->handler_flags & ALTER_COLUMN_NOT_NULLABLE) && !thd_is_strict_mode(m_user_thd)) { - ha_alter_info->unsupported_reason = innobase_get_err_msg( + ha_alter_info->unsupported_reason = my_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -781,7 +930,7 @@ ha_innobase::check_if_supported_inplace_alter( if ((ha_alter_info->handler_flags & (ALTER_ADD_PK_INDEX | ALTER_DROP_PK_INDEX)) == ALTER_DROP_PK_INDEX) { - ha_alter_info->unsupported_reason = innobase_get_err_msg( + ha_alter_info->unsupported_reason = my_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -798,7 +947,7 @@ ha_innobase::check_if_supported_inplace_alter( if (UNIV_UNLIKELY(my_primary_key >= MAX_KEY) && !dict_index_is_auto_gen_clust( dict_table_get_first_index(m_prebuilt->table))) { - ha_alter_info->unsupported_reason = innobase_get_err_msg( + ha_alter_info->unsupported_reason = my_get_err_msg( ER_PRIMARY_CANT_HAVE_NULL); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -817,8 +966,9 @@ ha_innobase::check_if_supported_inplace_alter( */ for (ulint i = 0, icol= 0; i < table->s->fields; i++) { const Field* field = table->field[i]; - const dict_col_t* col = dict_table_get_nth_col(m_prebuilt->table, icol); - ulint unsigned_flag; + const dict_col_t* col = dict_table_get_nth_col( + m_prebuilt->table, icol); + ulint unsigned_flag; if (!field->stored_in_db()) { continue; @@ -826,7 +976,8 @@ ha_innobase::check_if_supported_inplace_alter( icol++; - if (col->mtype != get_innobase_type_from_mysql_type(&unsigned_flag, field)) { + if (col->mtype != get_innobase_type_from_mysql_type( + &unsigned_flag, field)) { DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -843,7 +994,7 @@ ha_innobase::check_if_supported_inplace_alter( use "Copy" method. */ if (m_prebuilt->table->dict_frm_mismatch) { - ha_alter_info->unsupported_reason = innobase_get_err_msg( + ha_alter_info->unsupported_reason = my_get_err_msg( ER_NO_SUCH_INDEX); ib_push_frm_error(m_user_thd, m_prebuilt->table, altered_table, n_indexes, true); @@ -894,7 +1045,7 @@ ha_innobase::check_if_supported_inplace_alter( /* We should be able to do the operation in-place. See if we can do it online (LOCK=NONE). */ - bool online = true; + bool online = true; List_iterator_fast cf_it( ha_alter_info->alter_info->create_list); @@ -916,7 +1067,8 @@ ha_innobase::check_if_supported_inplace_alter( } for (KEY_PART_INFO* key_part = new_key->key_part; - key_part < new_key->key_part + new_key->user_defined_key_parts; + key_part < (new_key->key_part + + new_key->user_defined_key_parts); key_part++) { const Create_field* new_field; @@ -960,7 +1112,7 @@ ha_innobase::check_if_supported_inplace_alter( system_charset_info, key_part->field->field_name.str, FTS_DOC_ID_COL_NAME)) { - ha_alter_info->unsupported_reason = innobase_get_err_msg( + ha_alter_info->unsupported_reason = my_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -975,8 +1127,12 @@ ha_innobase::check_if_supported_inplace_alter( column values during online ALTER. */ DBUG_ASSERT(key_part->field == altered_table -> found_next_number_field); - ha_alter_info->unsupported_reason = innobase_get_err_msg( - ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC); + + if (ha_alter_info->online) { + ha_alter_info->unsupported_reason = my_get_err_msg( + ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC); + } + online = false; } @@ -992,20 +1148,25 @@ ha_innobase::check_if_supported_inplace_alter( DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } - ha_alter_info->unsupported_reason = - MSG_UNSUPPORTED_ALTER_ONLINE_ON_VIRTUAL_COLUMN; + if (ha_alter_info->online + && !ha_alter_info->unsupported_reason) { + ha_alter_info->unsupported_reason = + MSG_UNSUPPORTED_ALTER_ONLINE_ON_VIRTUAL_COLUMN; + } + online = false; } } } - DBUG_ASSERT(!m_prebuilt->table->fts || m_prebuilt->table->fts->doc_col - <= table->s->fields); - DBUG_ASSERT(!m_prebuilt->table->fts || m_prebuilt->table->fts->doc_col - < dict_table_get_n_user_cols(m_prebuilt->table)); + DBUG_ASSERT(!m_prebuilt->table->fts + || (m_prebuilt->table->fts->doc_col <= table->s->fields)); - if (m_prebuilt->table->fts - && innobase_fulltext_exist(altered_table)) { + DBUG_ASSERT(!m_prebuilt->table->fts + || (m_prebuilt->table->fts->doc_col + < dict_table_get_n_user_cols(m_prebuilt->table))); + + if (m_prebuilt->table->fts && innobase_fulltext_exist(altered_table)) { /* FULLTEXT indexes are supposed to remain. */ /* Disallow DROP INDEX FTS_DOC_ID_INDEX */ @@ -1014,7 +1175,7 @@ ha_innobase::check_if_supported_inplace_alter( system_charset_info, ha_alter_info->index_drop_buffer[i]->name.str, FTS_DOC_ID_INDEX_NAME)) { - ha_alter_info->unsupported_reason = innobase_get_err_msg( + ha_alter_info->unsupported_reason = my_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -1034,7 +1195,7 @@ ha_innobase::check_if_supported_inplace_alter( system_charset_info, (*fp)->field_name.str, FTS_DOC_ID_COL_NAME)) { - ha_alter_info->unsupported_reason = innobase_get_err_msg( + ha_alter_info->unsupported_reason = my_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -1080,20 +1241,26 @@ ha_innobase::check_if_supported_inplace_alter( /* No DEFAULT value is specified. We can report errors for any NULL values for - the TIMESTAMP. + the TIMESTAMP. */ - FIXME: Allow any DEFAULT - expression whose value does - not change during ALTER TABLE. - This would require a fix in - row_merge_read_clustered_index() - to try to replace the DEFAULT - value before reporting - DB_INVALID_NULL. */ goto next_column; } break; default: + /* Changing from NULL to NOT NULL and + set the default constant values. */ + if (f->real_maybe_null() + && !(*af)->real_maybe_null()) { + + if (is_non_const_value(*af)) { + break; + } + + if (!set_default_value(*af)) { + break; + } + } + /* For any other data type, NULL values are not converted. (An AUTO_INCREMENT attribute cannot @@ -1107,34 +1274,18 @@ ha_innobase::check_if_supported_inplace_alter( } ha_alter_info->unsupported_reason - = innobase_get_err_msg( + = my_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL); - } else if (!(*af)->default_value - || !((*af)->default_value->flags - & ~(VCOL_SESSION_FUNC | VCOL_TIME_FUNC))) { + } else if (!is_non_const_value(*af)) { + n_add_cols++; if (af < &altered_table->field[table_share->fields]) { add_column_not_last = true; } - /* The added NOT NULL column lacks a DEFAULT value, - or the DEFAULT is the same for all rows. - (Time functions, such as CURRENT_TIMESTAMP(), - are evaluated from a timestamp that is assigned - at the start of the statement. Session - functions, such as USER(), always evaluate the - same within a statement.) */ - /* Compute the DEFAULT values of non-constant columns - (VCOL_SESSION_FUNC | VCOL_TIME_FUNC). */ - switch ((*af)->set_default()) { - case 0: /* OK */ - case 3: /* DATETIME to TIME or DATE conversion */ + if (set_default_value(*af)) { goto next_column; - case -1: /* OOM, or GEOMETRY type mismatch */ - case 1: /* A number adjusted to the min/max value */ - case 2: /* String truncation, or conversion problem */ - break; } } @@ -1149,16 +1300,17 @@ next_column: == n_stored_cols && m_prebuilt->table->supports_instant() && instant_alter_column_possible(ha_alter_info, table)) { - /* We can perform instant ADD COLUMN, because all - columns are going to be added after existing ones - (and not after hidden InnoDB columns, such as FTS_DOC_ID). */ - /* MDEV-14246 FIXME: return HA_ALTER_INPLACE_NO_LOCK and - perform all work in ha_innobase::commit_inplace_alter_table(), - to avoid an unnecessary MDL upgrade/downgrade cycle. */ - DBUG_RETURN(HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE); + DBUG_RETURN(HA_ALTER_INPLACE_INSTANT); } + if (!(ha_alter_info->handler_flags & ~(INNOBASE_ALTER_INSTANT + | INNOBASE_INPLACE_IGNORE))) { + DBUG_RETURN(HA_ALTER_INPLACE_INSTANT); + } + + bool fts_need_rebuild = false; + if (!online) { /* We already determined that only a non-locking operation is possible. */ @@ -1175,26 +1327,31 @@ next_column: refuse to rebuild the table natively altogether. */ if (m_prebuilt->table->fts) { cannot_create_many_fulltext_index: - ha_alter_info->unsupported_reason = innobase_get_err_msg( - ER_INNODB_FT_LIMIT); + ha_alter_info->unsupported_reason = + my_get_err_msg(ER_INNODB_FT_LIMIT); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } - if (innobase_spatial_exist(altered_table)) { - ha_alter_info->unsupported_reason = - innobase_get_err_msg( - ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS); - } else if (!innobase_fulltext_exist(altered_table)) { - /* MDEV-14341 FIXME: Remove this limitation. */ - ha_alter_info->unsupported_reason = - "online rebuild with indexed virtual columns"; - } else { - ha_alter_info->unsupported_reason = - innobase_get_err_msg( - ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS); + if (ha_alter_info->online + && !ha_alter_info->unsupported_reason) { + + if (innobase_spatial_exist(altered_table)) { + ha_alter_info->unsupported_reason = my_get_err_msg( + ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS); + } else if (!innobase_fulltext_exist(altered_table)) { + /* MDEV-14341 FIXME: Remove this limitation. */ + ha_alter_info->unsupported_reason = + "online rebuild with indexed virtual columns"; + } else { + ha_alter_info->unsupported_reason = my_get_err_msg( + ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS); + } } - } else if ((ha_alter_info->handler_flags - & ALTER_ADD_NON_UNIQUE_NON_PRIM_INDEX)) { + + } + + if (ha_alter_info->handler_flags + & ALTER_ADD_NON_UNIQUE_NON_PRIM_INDEX) { /* ADD FULLTEXT|SPATIAL INDEX requires a lock. We could do ADD FULLTEXT INDEX without a lock if the @@ -1219,30 +1376,72 @@ cannot_create_many_fulltext_index: if (add_fulltext) { goto cannot_create_many_fulltext_index; } + add_fulltext = true; - ha_alter_info->unsupported_reason = innobase_get_err_msg( - ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS); + if (ha_alter_info->online + && !ha_alter_info->unsupported_reason) { + ha_alter_info->unsupported_reason = my_get_err_msg( + ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS); + } + online = false; + + /* Full text search index exists, check + whether the table already has DOC ID column. + If not, InnoDB have to rebuild the table to + add a Doc ID hidden column and change + primary index. */ + ulint fts_doc_col_no; + ulint num_v = 0; + + fts_need_rebuild = + !innobase_fts_check_doc_id_col( + m_prebuilt->table, + altered_table, + &fts_doc_col_no, &num_v, true); } + if (online && (key->flags & HA_SPATIAL)) { - ha_alter_info->unsupported_reason = innobase_get_err_msg( - ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS); + + if (ha_alter_info->online) { + ha_alter_info->unsupported_reason = my_get_err_msg( + ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS); + } + online = false; } } } // FIXME: implement Online DDL for system-versioned tables - if ((table->versioned(VERS_TRX_ID) || altered_table->versioned(VERS_TRX_ID)) + if ((table->versioned(VERS_TRX_ID) + || altered_table->versioned(VERS_TRX_ID)) && innobase_need_rebuild(ha_alter_info, table)) { - ha_alter_info->unsupported_reason = - "Not implemented for system-versioned tables"; + + if (ha_alter_info->online) { + ha_alter_info->unsupported_reason = + "Not implemented for system-versioned tables"; + } + online = false; } + if (fts_need_rebuild || innobase_need_rebuild(ha_alter_info, table)) { + DBUG_RETURN(online + ? HA_ALTER_INPLACE_COPY_NO_LOCK + : HA_ALTER_INPLACE_COPY_LOCK); + } + + if (ha_alter_info->unsupported_reason) { + } else if (ha_alter_info->handler_flags & INNOBASE_ONLINE_CREATE) { + ha_alter_info->unsupported_reason = "ADD INDEX"; + } else { + ha_alter_info->unsupported_reason = "DROP INDEX"; + } + DBUG_RETURN(online - ? HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE - : HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE); + ? HA_ALTER_INPLACE_NOCOPY_NO_LOCK + : HA_ALTER_INPLACE_NOCOPY_LOCK); } /*************************************************************//** @@ -1481,12 +1680,10 @@ no_match: Find an index whose first fields are the columns in the array in the same order and is not marked for deletion @return matching index, NULL if not found */ -static MY_ATTRIBUTE((nonnull(1,2,6), warn_unused_result)) +static MY_ATTRIBUTE((nonnull(1,5), warn_unused_result)) dict_index_t* innobase_find_fk_index( /*===================*/ - Alter_inplace_info* ha_alter_info, - /*!< in: alter table info */ dict_table_t* table, /*!< in: table */ const char** col_names, /*!< in: column names, or NULL @@ -1667,7 +1864,6 @@ innobase_get_foreign_key_info( } index = innobase_find_fk_index( - ha_alter_info, table, col_names, drop_index, n_drop_index, column_names, i); @@ -2444,92 +2640,6 @@ innobase_create_index_def( DBUG_VOID_RETURN; } -/*******************************************************************//** -Check whether the table has the FTS_DOC_ID column -@return whether there exists an FTS_DOC_ID column */ -static -bool -innobase_fts_check_doc_id_col( -/*==========================*/ - const dict_table_t* table, /*!< in: InnoDB table with - fulltext index */ - const TABLE* altered_table, - /*!< in: MySQL table with - fulltext index */ - ulint* fts_doc_col_no, - /*!< out: The column number for - Doc ID, or ULINT_UNDEFINED - if it is of wrong type */ - ulint* num_v) /*!< out: number of virtual column */ -{ - *fts_doc_col_no = ULINT_UNDEFINED; - - const uint n_cols = altered_table->s->fields; - ulint i; - - *num_v = 0; - - for (i = 0; i < n_cols; i++) { - const Field* field = altered_table->field[i]; - - if (innobase_is_v_fld(field)) { - (*num_v)++; - } - - if (my_strcasecmp(system_charset_info, - field->field_name.str, FTS_DOC_ID_COL_NAME)) { - continue; - } - - if (strcmp(field->field_name.str, FTS_DOC_ID_COL_NAME)) { - my_error(ER_WRONG_COLUMN_NAME, MYF(0), - field->field_name.str); - } else if (field->type() != MYSQL_TYPE_LONGLONG - || field->pack_length() != 8 - || field->real_maybe_null() - || !(field->flags & UNSIGNED_FLAG) - || innobase_is_v_fld(field)) { - my_error(ER_INNODB_FT_WRONG_DOCID_COLUMN, MYF(0), - field->field_name.str); - } else { - *fts_doc_col_no = i - *num_v; - } - - return(true); - } - - if (!table) { - return(false); - } - - /* Not to count the virtual columns */ - i -= *num_v; - - for (; i + DATA_N_SYS_COLS < (uint) table->n_cols; i++) { - const char* name = dict_table_get_col_name(table, i); - - if (strcmp(name, FTS_DOC_ID_COL_NAME) == 0) { -#ifdef UNIV_DEBUG - const dict_col_t* col; - - col = dict_table_get_nth_col(table, i); - - /* Because the FTS_DOC_ID does not exist in - the MySQL data dictionary, this must be the - internally created FTS_DOC_ID column. */ - ut_ad(col->mtype == DATA_INT); - ut_ad(col->len == 8); - ut_ad(col->prtype & DATA_NOT_NULL); - ut_ad(col->prtype & DATA_UNSIGNED); -#endif /* UNIV_DEBUG */ - *fts_doc_col_no = i; - return(true); - } - } - - return(false); -} - /*******************************************************************//** Check whether the table has a unique index with FTS_DOC_ID_INDEX_NAME on the Doc ID column. @@ -2603,7 +2713,7 @@ innobase_fts_check_doc_id_index( && field->col->mtype == DATA_INT && field->col->len == 8 && field->col->prtype & DATA_NOT_NULL - && !dict_col_is_virtual(field->col)) { + && !field->col->is_virtual()) { if (fts_doc_col_no) { *fts_doc_col_no = dict_col_get_no(field->col); } @@ -2744,14 +2854,9 @@ innobase_create_key_defs( ulint primary_key_number; if (new_primary) { - if (n_add == 0) { - DBUG_ASSERT(got_default_clust); - DBUG_ASSERT(altered_table->s->primary_key - == 0); - primary_key_number = 0; - } else { - primary_key_number = *add; - } + DBUG_ASSERT(n_add || got_default_clust); + DBUG_ASSERT(n_add || !altered_table->s->primary_key); + primary_key_number = altered_table->s->primary_key; } else if (got_default_clust) { /* Create the GEN_CLUST_INDEX */ index_def_t* index = indexdef++; @@ -2761,7 +2866,7 @@ innobase_create_key_defs( index->ind_type = DICT_CLUSTERED; index->name = innobase_index_reserve_name; index->rebuild = true; - index->key_number = ~0; + index->key_number = ~0U; primary_key_number = ULINT_UNDEFINED; goto created_clustered; } else { @@ -3102,12 +3207,11 @@ column that is being dropped or modified to NOT NULL. @retval true Not allowed (will call my_error()) @retval false Allowed */ -MY_ATTRIBUTE((pure, nonnull(1,2,3,4), warn_unused_result)) +MY_ATTRIBUTE((pure, nonnull(1,2,3), warn_unused_result)) static bool innobase_check_foreigns( Alter_inplace_info* ha_alter_info, - const TABLE* altered_table, const TABLE* old_table, const dict_table_t* user_table, dict_foreign_t** drop_fk, @@ -3179,7 +3283,7 @@ adding columns. @param table MySQL table as it is before the ALTER operation @param new_table InnoDB table corresponding to MySQL altered_table @param old_table InnoDB table corresponding to MYSQL table -@param add_cols Default values for ADD COLUMN, or NULL if no ADD COLUMN +@param defaults Default values for ADD COLUMN, or NULL if no ADD COLUMN @param heap Memory heap where allocated @return array of integers, mapping column numbers in the table to column numbers in altered_table */ @@ -3192,7 +3296,7 @@ innobase_build_col_map( const TABLE* table, const dict_table_t* new_table, const dict_table_t* old_table, - dtuple_t* add_cols, + dtuple_t* defaults, mem_heap_t* heap) { DBUG_ENTER("innobase_build_col_map"); @@ -3204,14 +3308,14 @@ innobase_build_col_map( DBUG_ASSERT(dict_table_get_n_cols(old_table) + dict_table_get_n_v_cols(old_table) >= table->s->fields + DATA_N_SYS_COLS); - DBUG_ASSERT(!!add_cols == !!(ha_alter_info->handler_flags - & ALTER_ADD_COLUMN)); - DBUG_ASSERT(!add_cols || dtuple_get_n_fields(add_cols) + DBUG_ASSERT(!!defaults == !!(ha_alter_info->handler_flags + & INNOBASE_DEFAULTS)); + DBUG_ASSERT(!defaults || dtuple_get_n_fields(defaults) == dict_table_get_n_cols(new_table)); ulint* col_map = static_cast( mem_heap_alloc( - heap, (old_table->n_cols + old_table->n_v_cols) + heap, unsigned(old_table->n_cols + old_table->n_v_cols) * sizeof *col_map)); List_iterator_fast cf_it( @@ -3248,6 +3352,22 @@ innobase_build_col_map( } if (new_field->field == field) { + + const Field* altered_field = + altered_table->field[i + num_v]; + + if (field->real_maybe_null() + && !altered_field->real_maybe_null()) { + /* Don't consider virtual column. + NULL to NOT NULL is not applicable + for virtual column. */ + innobase_build_col_map_add( + heap, dtuple_get_nth_field( + defaults, i), + altered_field, + dict_table_is_comp(new_table)); + } + col_map[old_i - num_old_v] = i; goto found_col; } @@ -3255,7 +3375,7 @@ innobase_build_col_map( ut_ad(!is_v); innobase_build_col_map_add( - heap, dtuple_get_nth_field(add_cols, i), + heap, dtuple_get_nth_field(defaults, i), altered_table->field[i + num_v], dict_table_is_comp(new_table)); found_col: @@ -3541,10 +3661,11 @@ innobase_pk_order_preserved( const bool old_pk_column = old_field < old_n_uniq; if (old_pk_column) { - new_field_order = old_field; + new_field_order = lint(old_field); } else if (innobase_pk_col_is_existing(new_col_no, col_map, old_n_cols)) { - new_field_order = old_n_uniq + existing_field_count++; + new_field_order = lint(old_n_uniq + + existing_field_count++); } else { /* Skip newly added column. */ continue; @@ -3848,7 +3969,6 @@ prepare_inplace_add_virtual( /** Collect virtual column info for its addition @param[in] ha_alter_info Data used during in-place alter -@param[in] altered_table MySQL table that is being altered to @param[in] table MySQL table as it is before the ALTER operation @retval true Failure @retval false Success */ @@ -3856,7 +3976,6 @@ static bool prepare_inplace_drop_virtual( Alter_inplace_info* ha_alter_info, - const TABLE* altered_table, const TABLE* table) { ha_innobase_inplace_ctx* ctx; @@ -4083,8 +4202,6 @@ innodb_update_n_cols(const dict_table_t* table, ulint n_cols, trx_t* trx) /** Update system table for adding virtual column(s) @param[in] ha_alter_info Data used during in-place alter -@param[in] altered_table MySQL table that is being altered -@param[in] table MySQL table as it is before the ALTER operation @param[in] user_table InnoDB table @param[in] trx transaction @retval true Failure @@ -4093,8 +4210,6 @@ static bool innobase_add_virtual_try( Alter_inplace_info* ha_alter_info, - const TABLE* altered_table, - const TABLE* table, const dict_table_t* user_table, trx_t* trx) { @@ -4118,18 +4233,17 @@ innobase_add_virtual_try( } - ulint n_col = user_table->n_cols - DATA_N_SYS_COLS; - ulint n_v_col = user_table->n_v_cols + ulint n_col = unsigned(user_table->n_cols) - DATA_N_SYS_COLS; + ulint n_v_col = unsigned(user_table->n_v_cols) + ctx->num_to_add_vcol - ctx->num_to_drop_vcol; ulint new_n = dict_table_encode_n_col(n_col, n_v_col) - + ((user_table->flags & DICT_TF_COMPACT) << 31); + + (unsigned(user_table->flags & DICT_TF_COMPACT) << 31); return innodb_update_n_cols(user_table, new_n, trx); } /** Insert into SYS_COLUMNS and insert/update the 'default row' for instant ADD COLUMN. -@param[in,out] ha_alter_info Data used during in-place alter @param[in,out] ctx ALTER TABLE context for the current partition @param[in] altered_table MySQL table that is being altered @param[in] table MySQL table as it is before the ALTER operation @@ -4139,7 +4253,6 @@ for instant ADD COLUMN. static bool innobase_add_instant_try( - Alter_inplace_info* ha_alter_info, ha_innobase_inplace_ctx*ctx, const TABLE* altered_table, const TABLE* table, @@ -4241,14 +4354,15 @@ innobase_add_instant_try( } if (innodb_update_n_cols(user_table, dict_table_encode_n_col( - user_table->n_cols - DATA_N_SYS_COLS, + unsigned(user_table->n_cols) + - DATA_N_SYS_COLS, user_table->n_v_cols) | (user_table->flags & DICT_TF_COMPACT) << 31, trx)) { return true; } - unsigned i = user_table->n_cols - DATA_N_SYS_COLS; + unsigned i = unsigned(user_table->n_cols) - DATA_N_SYS_COLS; byte trx_id[DATA_TRX_ID_LEN], roll_ptr[DATA_ROLL_PTR_LEN]; dfield_set_data(dtuple_get_nth_field(row, i++), field_ref_zero, DATA_ROW_ID_LEN); @@ -4487,11 +4601,11 @@ innobase_drop_one_virtual_sys_columns( for (ulint i = v_col->v_pos + 1; i < table->n_v_cols; i++) { dict_v_col_t* t_col = dict_table_get_nth_v_col(table, i); ulint old_p = dict_create_v_col_pos( - t_col->v_pos - n_prev_dropped, - t_col->m_col.ind - n_prev_dropped); + t_col->v_pos - n_prev_dropped, + t_col->m_col.ind - n_prev_dropped); ulint new_p = dict_create_v_col_pos( - t_col->v_pos - 1 - n_prev_dropped, - t_col->m_col.ind - 1 - n_prev_dropped); + t_col->v_pos - 1 - n_prev_dropped, + ulint(t_col->m_col.ind) - 1 - n_prev_dropped); error = innobase_update_v_pos_sys_columns( table, old_p, new_p, trx); @@ -4540,8 +4654,6 @@ innobase_drop_one_virtual_sys_virtual( /** Update system table for dropping virtual column(s) @param[in] ha_alter_info Data used during in-place alter -@param[in] altered_table MySQL table that is being altered -@param[in] table MySQL table as it is before the ALTER operation @param[in] user_table InnoDB table @param[in] trx transaction @retval true Failure @@ -4550,8 +4662,6 @@ static bool innobase_drop_virtual_try( Alter_inplace_info* ha_alter_info, - const TABLE* altered_table, - const TABLE* table, const dict_table_t* user_table, trx_t* trx) { @@ -4587,10 +4697,11 @@ innobase_drop_virtual_try( } - ulint n_col = user_table->n_cols - DATA_N_SYS_COLS; - ulint n_v_col = user_table->n_v_cols - ctx->num_to_drop_vcol; + ulint n_col = unsigned(user_table->n_cols) - DATA_N_SYS_COLS; + ulint n_v_col = unsigned(user_table->n_v_cols) + - ctx->num_to_drop_vcol; ulint new_n = dict_table_encode_n_col(n_col, n_v_col) - + ((user_table->flags & DICT_TF_COMPACT) << 31); + | ((user_table->flags & DICT_TF_COMPACT) << 31); return innodb_update_n_cols(user_table, new_n, trx); } @@ -4763,7 +4874,7 @@ prepare_inplace_alter_table_dict( DBUG_ASSERT(!add_fts_doc_id || add_fts_doc_id_idx); DBUG_ASSERT(!add_fts_doc_id_idx || innobase_fulltext_exist(altered_table)); - DBUG_ASSERT(!ctx->add_cols); + DBUG_ASSERT(!ctx->defaults); DBUG_ASSERT(!ctx->add_index); DBUG_ASSERT(!ctx->add_key_numbers); DBUG_ASSERT(!ctx->num_to_add_index); @@ -4774,8 +4885,7 @@ prepare_inplace_alter_table_dict( if (ha_alter_info->handler_flags & ALTER_DROP_VIRTUAL_COLUMN) { - if (prepare_inplace_drop_virtual( - ha_alter_info, altered_table, old_table)) { + if (prepare_inplace_drop_virtual(ha_alter_info, old_table)) { DBUG_RETURN(true); } } @@ -4792,6 +4902,15 @@ prepare_inplace_alter_table_dict( if (ha_alter_info->handler_flags & ALTER_ADD_NON_UNIQUE_NON_PRIM_INDEX) { + for (ulint i = 0; i < ctx->num_to_add_vcol; i++) { + /* Set mbminmax for newly added column */ + dict_col_t& col = ctx->add_vcol[i].m_col; + ulint mbminlen, mbmaxlen; + dtype_get_mblen(col.mtype, col.prtype, + &mbminlen, &mbmaxlen); + col.mbminlen = mbminlen; + col.mbmaxlen = mbmaxlen; + } add_v = static_cast( mem_heap_alloc(ctx->heap, sizeof *add_v)); add_v->n_v_col = ctx->num_to_add_vcol; @@ -4889,7 +5008,7 @@ prepare_inplace_alter_table_dict( if (new_clustered) { if (innobase_check_foreigns( - ha_alter_info, altered_table, old_table, + ha_alter_info, old_table, user_table, ctx->drop_fk, ctx->num_to_drop_fk)) { new_clustered_failed: DBUG_ASSERT(ctx->trx != ctx->prebuilt->trx); @@ -4931,7 +5050,7 @@ new_clustered_failed: part ? part : "", partlen + 1); ulint n_cols = 0; ulint n_v_cols = 0; - dtuple_t* add_cols; + dtuple_t* defaults; ulint z = 0; for (uint i = 0; i < altered_table->s->fields; i++) { @@ -5099,23 +5218,21 @@ new_clustered_failed: dict_table_add_system_columns(ctx->new_table, ctx->heap); - if (ha_alter_info->handler_flags - & ALTER_ADD_COLUMN) { - add_cols = dtuple_create_with_vcol( + if (ha_alter_info->handler_flags & INNOBASE_DEFAULTS) { + defaults = dtuple_create_with_vcol( ctx->heap, dict_table_get_n_cols(ctx->new_table), dict_table_get_n_v_cols(ctx->new_table)); - dict_table_copy_types(add_cols, ctx->new_table); + dict_table_copy_types(defaults, ctx->new_table); } else { - add_cols = NULL; + defaults = NULL; } ctx->col_map = innobase_build_col_map( ha_alter_info, altered_table, old_table, - ctx->new_table, user_table, - add_cols, ctx->heap); - ctx->add_cols = add_cols; + ctx->new_table, user_table, defaults, ctx->heap); + ctx->defaults = defaults; } else { DBUG_ASSERT(!innobase_need_rebuild(ha_alter_info, old_table)); DBUG_ASSERT(old_table->s->primary_key @@ -5125,8 +5242,7 @@ new_clustered_failed: = dict_table_get_first_index(user_table); index != NULL; index = dict_table_get_next_index(index)) { - if (!index->to_be_dropped - && dict_index_is_corrupted(index)) { + if (!index->to_be_dropped && index->is_corrupted()) { my_error(ER_CHECK_NO_SUCH_TABLE, MYF(0)); goto error_handled; } @@ -5136,8 +5252,7 @@ new_clustered_failed: = dict_table_get_first_index(user_table); index != NULL; index = dict_table_get_next_index(index)) { - if (!index->to_be_dropped - && dict_index_is_corrupted(index)) { + if (!index->to_be_dropped && index->is_corrupted()) { my_error(ER_CHECK_NO_SUCH_TABLE, MYF(0)); goto error_handled; } @@ -5188,7 +5303,7 @@ new_clustered_failed: goto not_instant_add_column; } - for (uint i = ctx->old_table->n_cols - DATA_N_SYS_COLS; + for (uint i = uint(ctx->old_table->n_cols) - DATA_N_SYS_COLS; i--; ) { if (ctx->col_map[i] != i) { goto not_instant_add_column; @@ -5473,7 +5588,8 @@ new_table_failed: /* Initialize the AUTO_INCREMENT sequence to the rebuilt table from the old one. */ - if (!old_table->found_next_number_field) { + if (!old_table->found_next_number_field + || !user_table->space) { } else if (ib_uint64_t autoinc = btr_read_autoinc(clust_index)) { btr_write_autoinc(new_clust_index, autoinc); @@ -5488,7 +5604,8 @@ new_table_failed: clust_index, ctx->new_table, !(ha_alter_info->handler_flags & ALTER_ADD_PK_INDEX), - ctx->add_cols, ctx->col_map, path); + ctx->defaults, ctx->col_map, path, + ctx->ignore); rw_lock_x_unlock(&clust_index->lock); if (!ok) { @@ -5532,7 +5649,7 @@ error_handling_drop_uncached: by a modification log. */ } else if (!ctx->online || !user_table->is_readable() - || dict_table_is_discarded(user_table)) { + || !user_table->space) { /* No need to allocate a modification log. */ DBUG_ASSERT(!index->online_log); } else { @@ -5546,7 +5663,7 @@ error_handling_drop_uncached: ctx->prebuilt->trx, index, NULL, true, NULL, NULL, - path); + path, ctx->ignore); rw_lock_x_unlock(&index->lock); if (!ok) { @@ -6053,18 +6170,24 @@ alter_fill_stored_column( dict_s_col_list** s_cols, mem_heap_t** s_heap) { - ulint n_cols = altered_table->s->fields; + ulint n_cols = altered_table->s->fields; + ulint stored_col_no = 0; for (ulint i = 0; i < n_cols; i++) { Field* field = altered_table->field[i]; dict_s_col_t s_col; + if (!innobase_is_v_fld(field)) { + stored_col_no++; + } + if (!innobase_is_s_fld(field)) { continue; } ulint num_base = 0; - dict_col_t* col = dict_table_get_nth_col(table, i); + dict_col_t* col = dict_table_get_nth_col(table, + stored_col_no); s_col.m_col = col; s_col.s_pos = i; @@ -6214,8 +6337,7 @@ ha_innobase::prepare_inplace_alter_table( if (indexed_table->corrupted || dict_table_get_first_index(indexed_table) == NULL - || dict_index_is_corrupted( - dict_table_get_first_index(indexed_table))) { + || dict_table_get_first_index(indexed_table)->is_corrupted()) { /* The clustered index is corrupted. */ my_error(ER_CHECK_NO_SUCH_TABLE, MYF(0)); DBUG_RETURN(true); @@ -6403,7 +6525,8 @@ check_if_ok_to_rename: n_drop_fk = 0; if (ha_alter_info->handler_flags - & (INNOBASE_ALTER_NOREBUILD | INNOBASE_ALTER_REBUILD)) { + & (INNOBASE_ALTER_NOREBUILD | INNOBASE_ALTER_REBUILD + | INNOBASE_ALTER_INSTANT)) { heap = mem_heap_create(1024); if (ha_alter_info->handler_flags @@ -6502,7 +6625,7 @@ found_fk: " with name %s", key->name); } else { ut_ad(!index->to_be_dropped); - if (!dict_index_is_clust(index)) { + if (!index->is_primary()) { drop_index[n_drop_index++] = index; } else { drop_primary = index; @@ -6603,7 +6726,7 @@ check_if_can_drop_indexes: for (dict_index_t* index = dict_table_get_first_index(indexed_table); index != NULL; index = dict_table_get_next_index(index)) { - if (!index->to_be_dropped && dict_index_is_corrupted(index)) { + if (!index->to_be_dropped && index->is_corrupted()) { my_error(ER_INDEX_CORRUPT, MYF(0), index->name()); goto err_exit; } @@ -6704,7 +6827,8 @@ err_exit: } if (!(ha_alter_info->handler_flags & INNOBASE_ALTER_DATA) - || ((ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE) + || ((ha_alter_info->handler_flags & ~(INNOBASE_INPLACE_IGNORE + | INNOBASE_ALTER_INSTANT)) == ALTER_CHANGE_CREATE_OPTION && !create_option_need_rebuild(ha_alter_info, table))) { @@ -6718,11 +6842,12 @@ err_exit: add_fk, n_add_fk, ha_alter_info->online, heap, indexed_table, - col_names, ULINT_UNDEFINED, 0, 0, 0); + col_names, ULINT_UNDEFINED, 0, 0, + ha_alter_info->ignore); } DBUG_ASSERT(m_prebuilt->trx->dict_operation_lock_mode == 0); - if (ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE) { + if (ha_alter_info->handler_flags & ~(INNOBASE_INPLACE_IGNORE)) { online_retry_drop_indexes( m_prebuilt->table, m_user_thd); @@ -6731,8 +6856,7 @@ err_exit: if ((ha_alter_info->handler_flags & ALTER_DROP_VIRTUAL_COLUMN) - && prepare_inplace_drop_virtual( - ha_alter_info, altered_table, table)) { + && prepare_inplace_drop_virtual(ha_alter_info, table)) { DBUG_RETURN(true); } @@ -6761,12 +6885,6 @@ err_exit: add_fts_doc_id = true; add_fts_doc_id_idx = true; - push_warning_printf( - m_user_thd, - Sql_condition::WARN_LEVEL_WARN, - HA_ERR_WRONG_INDEX, - "InnoDB rebuilding table to add" - " column " FTS_DOC_ID_COL_NAME); } else if (fts_doc_col_no == ULINT_UNDEFINED) { goto err_exit; } @@ -6853,7 +6971,7 @@ found_col: heap, m_prebuilt->table, col_names, add_autoinc_col_no, ha_alter_info->create_info->auto_increment_value, - autoinc_col_max_value, 0); + autoinc_col_max_value, ha_alter_info->ignore); DBUG_RETURN(prepare_inplace_alter_table_dict( ha_alter_info, altered_table, table, @@ -7014,8 +7132,7 @@ ok_exit: ctx->m_stage = UT_NEW_NOKEY(ut_stage_alter_t(pk)); - if (!m_prebuilt->table->is_readable() - || dict_table_is_discarded(m_prebuilt->table)) { + if (!m_prebuilt->table->is_readable()) { goto all_done; } @@ -7084,10 +7201,9 @@ ok_exit: m_prebuilt->table, ctx->new_table, ctx->online, ctx->add_index, ctx->add_key_numbers, ctx->num_to_add_index, - altered_table, ctx->add_cols, ctx->col_map, + altered_table, ctx->defaults, ctx->col_map, ctx->add_autoinc, ctx->sequence, ctx->skip_pk_sort, - ctx->m_stage, add_v, eval_table, - ha_alter_info->handler_flags & ALTER_DROP_HISTORICAL); + ctx->m_stage, add_v, eval_table); #ifndef DBUG_OFF oom: @@ -7246,7 +7362,7 @@ check_col_exists_in_indexes( const dict_col_t* idx_col = dict_index_get_nth_col(index, i); - if (is_v && dict_col_is_virtual(idx_col)) { + if (is_v && idx_col->is_virtual()) { const dict_v_col_t* v_col = reinterpret_cast< const dict_v_col_t*>(idx_col); if (v_col->v_pos == col_no) { @@ -7254,7 +7370,7 @@ check_col_exists_in_indexes( } } - if (!is_v && !dict_col_is_virtual(idx_col) + if (!is_v && !idx_col->is_virtual() && dict_col_get_no(idx_col) == col_no) { return(true); } @@ -7332,12 +7448,16 @@ rollback_inplace_alter_table( row_mysql_lock_data_dictionary(ctx->trx); if (ctx->need_rebuild()) { - dberr_t err = DB_SUCCESS; - ulint flags = ctx->new_table->flags; - /* DML threads can access ctx->new_table via the online rebuild log. Free it first. */ innobase_online_rebuild_log_free(prebuilt->table); + } + + if (!ctx->new_table) { + ut_ad(ctx->need_rebuild()); + } else if (ctx->need_rebuild()) { + dberr_t err= DB_SUCCESS; + ulint flags = ctx->new_table->flags; /* Since the FTS index specific auxiliary tables has not yet registered with "table->fts" by fts_add_index(), @@ -7515,8 +7635,7 @@ innobase_rename_column_try( ulint nth_col, const char* from, const char* to, - bool new_clustered, - bool is_virtual) + bool new_clustered) { pars_info_t* info; dberr_t error; @@ -7752,8 +7871,7 @@ innobase_rename_columns_try( col_n, cf->field->field_name.str, cf->field_name.str, - ctx->need_rebuild(), - is_virtual)) { + ctx->need_rebuild())) { return(true); } goto processed_field; @@ -7992,16 +8110,17 @@ innobase_rename_or_enlarge_columns_cache( @param ha_alter_info Data used during in-place alter @param ctx In-place ALTER TABLE context @param altered_table MySQL table that is being altered -@param old_table MySQL table as it is before the ALTER operation */ +@param old_table MySQL table as it is before the ALTER operation +@return whether the operation failed (and my_error() was called) */ static MY_ATTRIBUTE((nonnull)) -void +bool commit_set_autoinc( Alter_inplace_info* ha_alter_info, ha_innobase_inplace_ctx*ctx, const TABLE* altered_table, const TABLE* old_table) { - DBUG_ENTER("commit_get_autoinc"); + DBUG_ENTER("commit_set_autoinc"); if (!altered_table->found_next_number_field) { /* There is no AUTO_INCREMENT column in the table @@ -8022,6 +8141,13 @@ commit_set_autoinc( & ALTER_CHANGE_CREATE_OPTION) && (ha_alter_info->create_info->used_fields & HA_CREATE_USED_AUTO)) { + + if (!ctx->old_table->space) { + my_error(ER_TABLESPACE_DISCARDED, MYF(0), + old_table->s->table_name.str); + DBUG_RETURN(true); + } + /* An AUTO_INCREMENT value was supplied by the user. It must be persisted to the data file. */ const Field* ai = old_table->found_next_number_field; @@ -8101,7 +8227,7 @@ commit_set_autoinc( between prepare_inplace and commit_inplace. */ } - DBUG_VOID_RETURN; + DBUG_RETURN(false); } /** Add or drop foreign key constraints to the data dictionary tables, @@ -8338,7 +8464,7 @@ commit_try_rebuild( DBUG_ASSERT(dict_index_get_online_status(index) == ONLINE_INDEX_COMPLETE); DBUG_ASSERT(index->is_committed()); - if (dict_index_is_corrupted(index)) { + if (index->is_corrupted()) { my_error(ER_INDEX_CORRUPT, MYF(0), index->name()); DBUG_RETURN(true); } @@ -8438,7 +8564,7 @@ commit_try_rebuild( /* The new table must inherit the flag from the "parent" table. */ - if (dict_table_is_discarded(user_table)) { + if (!user_table->space) { rebuilt_table->file_unreadable = true; rebuilt_table->flags2 |= DICT_TF2_DISCARDED; } @@ -8490,8 +8616,7 @@ commit_cache_rebuild( DBUG_ENTER("commit_cache_rebuild"); DEBUG_SYNC_C("commit_cache_rebuild"); DBUG_ASSERT(ctx->need_rebuild()); - DBUG_ASSERT(dict_table_is_discarded(ctx->old_table) - == dict_table_is_discarded(ctx->new_table)); + DBUG_ASSERT(!ctx->old_table->space == !ctx->new_table->space); const char* old_name = mem_heap_strdup( ctx->heap, ctx->old_table->name.m_name); @@ -8535,7 +8660,7 @@ get_col_list_to_be_dropped( const dict_col_t* idx_col = dict_index_get_nth_col(index, col); - if (dict_col_is_virtual(idx_col)) { + if (idx_col->is_virtual()) { const dict_v_col_t* v_col = reinterpret_cast< const dict_v_col_t*>(idx_col); @@ -8587,7 +8712,7 @@ commit_try_norebuild( DBUG_ASSERT(dict_index_get_online_status(index) == ONLINE_INDEX_COMPLETE); DBUG_ASSERT(!index->is_committed()); - if (dict_index_is_corrupted(index)) { + if (index->is_corrupted()) { /* Report a duplicate key error for the index that was flagged corrupted, most likely @@ -8688,22 +8813,17 @@ commit_try_norebuild( if ((ha_alter_info->handler_flags & ALTER_DROP_VIRTUAL_COLUMN) - && innobase_drop_virtual_try( - ha_alter_info, altered_table, old_table, - ctx->old_table, trx)) { + && innobase_drop_virtual_try(ha_alter_info, ctx->old_table, trx)) { DBUG_RETURN(true); } if ((ha_alter_info->handler_flags & ALTER_ADD_VIRTUAL_COLUMN) - && innobase_add_virtual_try( - ha_alter_info, altered_table, old_table, - ctx->old_table, trx)) { + && innobase_add_virtual_try(ha_alter_info, ctx->old_table, trx)) { DBUG_RETURN(true); } - if (innobase_add_instant_try(ha_alter_info, ctx, altered_table, - old_table, trx)) { + if (innobase_add_instant_try(ctx, altered_table, old_table, trx)) { DBUG_RETURN(true); } @@ -8767,7 +8887,7 @@ commit_cache_norebuild( (after renaming the indexes), so that in the event of a crash, crash recovery will drop the indexes, because it drops all indexes whose - names start with TEMP_INDEX_PREFIX. Once we + names start with TEMP_INDEX_PREFIX_STR. Once we have started dropping an index tree, there is no way to roll it back. */ @@ -8838,8 +8958,6 @@ Remove statistics for dropped indexes, add statistics for created indexes and rename statistics for renamed indexes. @param ha_alter_info Data used during in-place alter @param ctx In-place ALTER TABLE context -@param altered_table MySQL table that is being altered -@param table_name Table name in MySQL @param thd MySQL connection */ static @@ -8848,8 +8966,6 @@ alter_stats_norebuild( /*==================*/ Alter_inplace_info* ha_alter_info, ha_innobase_inplace_ctx* ctx, - TABLE* altered_table, - const char* table_name, THD* thd) { ulint i; @@ -8951,7 +9067,7 @@ alter_stats_rebuild( { DBUG_ENTER("alter_stats_rebuild"); - if (dict_table_is_discarded(table) + if (!table->space || !dict_stats_is_persistent_enabled(table)) { DBUG_VOID_RETURN; } @@ -9079,20 +9195,6 @@ ha_innobase::commit_inplace_alter_table( ut_ad(m_prebuilt->table == ctx0->old_table); ha_alter_info->group_commit_ctx = NULL; - /* Free the ctx->trx of other partitions, if any. We will only - use the ctx0->trx here. Others may have been allocated in - the prepare stage. */ - - for (inplace_alter_handler_ctx** pctx = &ctx_array[1]; *pctx; - pctx++) { - ha_innobase_inplace_ctx* ctx - = static_cast(*pctx); - - if (ctx->trx) { - trx_free(ctx->trx); - } - } - trx_start_if_not_started_xa(m_prebuilt->trx, true); for (inplace_alter_handler_ctx** pctx = ctx_array; *pctx; pctx++) { @@ -9249,9 +9351,11 @@ ha_innobase::commit_inplace_alter_table( DBUG_ASSERT(new_clustered == ctx->need_rebuild()); - commit_set_autoinc(ha_alter_info, ctx, altered_table, table); + fail = commit_set_autoinc(ha_alter_info, ctx, altered_table, + table); - if (ctx->need_rebuild()) { + if (fail) { + } else if (ctx->need_rebuild()) { ctx->tmp_name = dict_mem_create_temporary_tablename( ctx->heap, ctx->new_table->name.m_name, ctx->new_table->id); @@ -9491,10 +9595,6 @@ foreign_fail: m_share->idx_trans_tbl.index_count = 0; } - if (trx == ctx0->trx) { - ctx0->trx = NULL; - } - /* Tell the InnoDB server that there might be work for utility threads: */ @@ -9517,10 +9617,30 @@ foreign_fail: } row_mysql_unlock_data_dictionary(trx); - trx_free(trx); + if (trx != ctx0->trx) { + trx_free(trx); + } DBUG_RETURN(true); } + if (trx == ctx0->trx) { + ctx0->trx = NULL; + } + + /* Free the ctx->trx of other partitions, if any. We will only + use the ctx0->trx here. Others may have been allocated in + the prepare stage. */ + + for (inplace_alter_handler_ctx** pctx = &ctx_array[1]; *pctx; + pctx++) { + ha_innobase_inplace_ctx* ctx + = static_cast(*pctx); + + if (ctx->trx) { + trx_free(ctx->trx); + } + } + if (ctx0->num_to_drop_vcol || ctx0->num_to_add_vcol) { DBUG_ASSERT(ctx0->old_table->get_ref_count() == 1); @@ -9722,9 +9842,7 @@ foreign_fail: (*pctx); DBUG_ASSERT(!ctx->need_rebuild()); - alter_stats_norebuild( - ha_alter_info, ctx, altered_table, - table->s->table_name.str, m_user_thd); + alter_stats_norebuild(ha_alter_info, ctx, m_user_thd); DBUG_INJECT_CRASH("ib_commit_inplace_crash", crash_inject_count++); } diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index 637f82d04ec..25dd9e9993d 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -87,11 +87,6 @@ in i_s_page_type[] array */ #define I_S_PAGE_TYPE_BITS 4 -/* Check if we can hold all page types */ -#if I_S_PAGE_TYPE_LAST >= 1 << I_S_PAGE_TYPE_BITS -# error i_s_page_type[] is too large -#endif - /** Name string for File Page Types */ static buf_page_desc_t i_s_page_type[] = { {"ALLOCATED", FIL_PAGE_TYPE_ALLOCATED}, @@ -261,60 +256,13 @@ field_store_string( const char* str) /*!< in: NUL-terminated utf-8 string, or NULL */ { - int ret; - - if (str != NULL) { - - ret = field->store(str, static_cast(strlen(str)), - system_charset_info); - field->set_notnull(); - } else { - - ret = 0; /* success */ + if (!str) { field->set_null(); - } - - return(ret); -} - -/*******************************************************************//** -Store the name of an index in a MYSQL_TYPE_VARCHAR field. -Handles the names of incomplete secondary indexes. -@return 0 on success */ -static -int -field_store_index_name( -/*===================*/ - Field* field, /*!< in/out: target field for - storage */ - const char* index_name) /*!< in: NUL-terminated utf-8 - index name, possibly starting with - TEMP_INDEX_PREFIX */ -{ - int ret; - - ut_ad(index_name != NULL); - ut_ad(field->real_type() == MYSQL_TYPE_VARCHAR || - field->real_type() == MYSQL_TYPE_NULL); - - /* Since TEMP_INDEX_PREFIX is not a valid UTF8, we need to convert - it to something else. */ - if (*index_name == *TEMP_INDEX_PREFIX_STR) { - char buf[NAME_LEN + 1]; - buf[0] = '?'; - memcpy(buf + 1, index_name + 1, strlen(index_name)); - ret = field->store( - buf, static_cast(strlen(buf)), - system_charset_info); - } else { - ret = field->store( - index_name, static_cast(strlen(index_name)), - system_charset_info); + return 0; } field->set_notnull(); - - return(ret); + return field->store(str, uint(strlen(str)), system_charset_info); } /*******************************************************************//** @@ -331,7 +279,7 @@ field_store_ulint( if (n != ULINT_UNDEFINED) { - ret = field->store(n, true); + ret = field->store(longlong(n), true); field->set_notnull(); } else { @@ -937,12 +885,8 @@ fill_innodb_locks_from_cache( buf, uint(bufend - buf), system_charset_info)); /* lock_index */ - if (row->lock_index != NULL) { - OK(field_store_index_name(fields[IDX_LOCK_INDEX], - row->lock_index)); - } else { - fields[IDX_LOCK_INDEX]->set_null(); - } + OK(field_store_string(fields[IDX_LOCK_INDEX], + row->lock_index)); /* lock_space */ OK(field_store_ulint(fields[IDX_LOCK_SPACE], @@ -1716,7 +1660,6 @@ i_s_cmp_per_index_fill_low( for (iter = snap.begin(), i = 0; iter != snap.end(); iter++, i++) { - char name[192]; dict_index_t* index = dict_index_find_on_id_low(iter->first); if (index != NULL) { @@ -1727,38 +1670,39 @@ i_s_cmp_per_index_fill_low( db_utf8, sizeof(db_utf8), table_utf8, sizeof(table_utf8)); - field_store_string(fields[IDX_DATABASE_NAME], db_utf8); - field_store_string(fields[IDX_TABLE_NAME], table_utf8); - field_store_index_name(fields[IDX_INDEX_NAME], - index->name); + status = field_store_string(fields[IDX_DATABASE_NAME], + db_utf8) + || field_store_string(fields[IDX_TABLE_NAME], + table_utf8) + || field_store_string(fields[IDX_INDEX_NAME], + index->name); } else { /* index not found */ - snprintf(name, sizeof(name), - "index_id:" IB_ID_FMT, iter->first); - field_store_string(fields[IDX_DATABASE_NAME], - "unknown"); - field_store_string(fields[IDX_TABLE_NAME], - "unknown"); - field_store_string(fields[IDX_INDEX_NAME], - name); + char name[MY_INT64_NUM_DECIMAL_DIGITS + + sizeof "index_id: "]; + fields[IDX_DATABASE_NAME]->set_null(); + fields[IDX_TABLE_NAME]->set_null(); + fields[IDX_INDEX_NAME]->set_notnull(); + status = fields[IDX_INDEX_NAME]->store( + name, + uint(snprintf(name, sizeof name, + "index_id: " IB_ID_FMT, + iter->first)), + system_charset_info); } - fields[IDX_COMPRESS_OPS]->store( - iter->second.compressed, true); - - fields[IDX_COMPRESS_OPS_OK]->store( - iter->second.compressed_ok, true); - - fields[IDX_COMPRESS_TIME]->store( - iter->second.compressed_usec / 1000000, true); - - fields[IDX_UNCOMPRESS_OPS]->store( - iter->second.decompressed, true); - - fields[IDX_UNCOMPRESS_TIME]->store( - iter->second.decompressed_usec / 1000000, true); - - if (schema_table_store_record(thd, table)) { + if (status + || fields[IDX_COMPRESS_OPS]->store( + iter->second.compressed, true) + || fields[IDX_COMPRESS_OPS_OK]->store( + iter->second.compressed_ok, true) + || fields[IDX_COMPRESS_TIME]->store( + iter->second.compressed_usec / 1000000, true) + || fields[IDX_UNCOMPRESS_OPS]->store( + iter->second.decompressed, true) + || fields[IDX_UNCOMPRESS_TIME]->store( + iter->second.decompressed_usec / 1000000, true) + || schema_table_store_record(thd, table)) { status = 1; break; } @@ -1766,8 +1710,9 @@ i_s_cmp_per_index_fill_low( threads to proceed. This could eventually result in the contents of INFORMATION_SCHEMA.innodb_cmp_per_index being inconsistent, but it is an acceptable compromise. */ - if (i % 1000 == 0) { + if (i == 1000) { mutex_exit(&dict_sys->mutex); + i = 0; mutex_enter(&dict_sys->mutex); } } @@ -4852,6 +4797,8 @@ i_s_innodb_buffer_page_fill( TABLE* table; Field** fields; + compile_time_assert(I_S_PAGE_TYPE_LAST < 1 << I_S_PAGE_TYPE_BITS); + DBUG_ENTER("i_s_innodb_buffer_page_fill"); table = tables->table; @@ -4917,9 +4864,11 @@ i_s_innodb_buffer_page_fill( mutex_enter(&dict_sys->mutex); - if (const dict_index_t* index = - dict_index_get_if_in_cache_low( - page_info->index_id)) { + const dict_index_t* index = + dict_index_get_if_in_cache_low( + page_info->index_id); + + if (index) { table_name_end = innobase_convert_name( table_name, sizeof(table_name), index->table->name.m_name, @@ -4932,17 +4881,22 @@ i_s_innodb_buffer_page_fill( table_name_end - table_name), system_charset_info) - || field_store_index_name( - fields - [IDX_BUFFER_PAGE_INDEX_NAME], - index->name); + || fields[IDX_BUFFER_PAGE_INDEX_NAME] + ->store(index->name, + uint(strlen(index->name)), + system_charset_info); } mutex_exit(&dict_sys->mutex); OK(ret); - fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull(); + if (index) { + fields[IDX_BUFFER_PAGE_TABLE_NAME] + ->set_notnull(); + fields[IDX_BUFFER_PAGE_INDEX_NAME] + ->set_notnull(); + } } OK(fields[IDX_BUFFER_PAGE_NUM_RECS]->store( @@ -4955,10 +4909,7 @@ i_s_innodb_buffer_page_fill( page_info->zip_ssize ? (UNIV_ZIP_SIZE_MIN >> 1) << page_info->zip_ssize : 0, true)); - -#if BUF_PAGE_STATE_BITS > 3 -# error "BUF_PAGE_STATE_BITS > 3, please ensure that all 1<(page_info->page_state); switch (state) { @@ -5636,9 +5587,11 @@ i_s_innodb_buf_page_lru_fill( mutex_enter(&dict_sys->mutex); - if (const dict_index_t* index = - dict_index_get_if_in_cache_low( - page_info->index_id)) { + const dict_index_t* index = + dict_index_get_if_in_cache_low( + page_info->index_id); + + if (index) { table_name_end = innobase_convert_name( table_name, sizeof(table_name), index->table->name.m_name, @@ -5651,17 +5604,22 @@ i_s_innodb_buf_page_lru_fill( table_name_end - table_name), system_charset_info) - || field_store_index_name( - fields - [IDX_BUF_LRU_PAGE_INDEX_NAME], - index->name); + || fields[IDX_BUF_LRU_PAGE_INDEX_NAME] + ->store(index->name, + uint(strlen(index->name)), + system_charset_info); } mutex_exit(&dict_sys->mutex); OK(ret); - fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull(); + if (index) { + fields[IDX_BUF_LRU_PAGE_TABLE_NAME] + ->set_notnull(); + fields[IDX_BUF_LRU_PAGE_INDEX_NAME] + ->set_notnull(); + } } OK(fields[IDX_BUF_LRU_PAGE_NUM_RECS]->store( @@ -5916,12 +5874,8 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_buffer_page_lru = /*******************************************************************//** Unbind a dynamic INFORMATION_SCHEMA table. -@return 0 on success */ -static -int -i_s_common_deinit( -/*==============*/ - void* p) /*!< in/out: table schema object */ +@return 0 */ +static int i_s_common_deinit(void*) { DBUG_ENTER("i_s_common_deinit"); @@ -6611,7 +6565,15 @@ i_s_dict_fill_sys_indexes( fields = table_to_fill->field; - OK(field_store_index_name(fields[SYS_INDEX_NAME], index->name)); + if (*index->name == *TEMP_INDEX_PREFIX_STR) { + /* Since TEMP_INDEX_PREFIX_STR is not valid UTF-8, we + need to convert it to something else. */ + *const_cast(index->name()) = '?'; + } + + OK(fields[SYS_INDEX_NAME]->store(index->name, + uint(strlen(index->name)), + system_charset_info)); OK(fields[SYS_INDEX_ID]->store(longlong(index->id), true)); @@ -6876,7 +6838,7 @@ i_s_dict_fill_sys_columns( OK(field_store_string(fields[SYS_COLUMN_NAME], col_name)); - if (dict_col_is_virtual(column)) { + if (column->is_virtual()) { ulint pos = dict_create_v_col_pos(nth_v_col, column->ind); OK(fields[SYS_COLUMN_POSITION]->store(pos, true)); } else { @@ -7119,7 +7081,6 @@ i_s_sys_virtual_fill_table( const rec_t* rec; ulint pos; ulint base_pos; - mem_heap_t* heap; mtr_t mtr; DBUG_ENTER("i_s_sys_virtual_fill_table"); @@ -7130,7 +7091,6 @@ i_s_sys_virtual_fill_table( DBUG_RETURN(0); } - heap = mem_heap_create(1000); mutex_enter(&dict_sys->mutex); mtr_start(&mtr); @@ -7142,7 +7102,7 @@ i_s_sys_virtual_fill_table( /* populate a dict_col_t structure with information from a SYS_VIRTUAL row */ - err_msg = dict_process_sys_virtual_rec(heap, rec, + err_msg = dict_process_sys_virtual_rec(rec, &table_id, &pos, &base_pos); @@ -7158,8 +7118,6 @@ i_s_sys_virtual_fill_table( err_msg); } - mem_heap_empty(heap); - /* Get the next record */ mutex_enter(&dict_sys->mutex); mtr_start(&mtr); @@ -7168,7 +7126,6 @@ i_s_sys_virtual_fill_table( mtr_commit(&mtr); mutex_exit(&dict_sys->mutex); - mem_heap_free(heap); DBUG_RETURN(0); } @@ -8633,16 +8590,17 @@ i_s_tablespaces_encryption_fill_table( for (fil_space_t* space = UT_LIST_GET_FIRST(fil_system.space_list); space; space = UT_LIST_GET_NEXT(space_list, space)) { - if (space->purpose == FIL_TYPE_TABLESPACE) { - space->n_pending_ops++; + if (space->purpose == FIL_TYPE_TABLESPACE + && !space->is_stopping()) { + space->acquire(); mutex_exit(&fil_system.mutex); if (int err = i_s_dict_fill_tablespaces_encryption( thd, space, tables->table)) { - fil_space_release(space); + space->release(); DBUG_RETURN(err); } mutex_enter(&fil_system.mutex); - space->n_pending_ops--; + space->release(); } } @@ -8904,16 +8862,17 @@ i_s_tablespaces_scrubbing_fill_table( for (fil_space_t* space = UT_LIST_GET_FIRST(fil_system.space_list); space; space = UT_LIST_GET_NEXT(space_list, space)) { - if (space->purpose == FIL_TYPE_TABLESPACE) { - space->n_pending_ops++; + if (space->purpose == FIL_TYPE_TABLESPACE + && !space->is_stopping()) { + space->acquire(); mutex_exit(&fil_system.mutex); if (int err = i_s_dict_fill_tablespaces_scrubbing( thd, space, tables->table)) { - fil_space_release(space); + space->release(); DBUG_RETURN(err); } mutex_enter(&fil_system.mutex); - space->n_pending_ops--; + space->release(); } } diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index 1ff3cb594c6..04cee74e6ea 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -36,9 +36,6 @@ my_bool srv_ibuf_disable_background_merge; /** Number of bits describing a single page */ #define IBUF_BITS_PER_PAGE 4 -#if IBUF_BITS_PER_PAGE % 2 -# error "IBUF_BITS_PER_PAGE must be an even number!" -#endif /** The start address for an insert buffer bitmap page bitmap */ #define IBUF_BITMAP PAGE_DATA @@ -258,9 +255,6 @@ type, counter, and some flags. */ /* @{ */ #define IBUF_REC_INFO_SIZE 4 /*!< Combined size of info fields at the beginning of the fourth field */ -#if IBUF_REC_INFO_SIZE >= DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE -# error "IBUF_REC_INFO_SIZE >= DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE" -#endif /* Offsets for the fields at the beginning of the fourth field */ #define IBUF_REC_OFFSET_COUNTER 0 /*!< Operation counter */ @@ -442,7 +436,7 @@ ibuf_count_set( ulint val) { ibuf_count_check(page_id); - ut_a(val < UNIV_PAGE_SIZE); + ut_a(val < srv_page_size); ibuf_counts[page_id.space()][page_id.page_no()] = val; } @@ -454,6 +448,10 @@ void ibuf_close(void) /*============*/ { + if (ibuf == NULL) { + return; + } + mutex_free(&ibuf_pessimistic_insert_mutex); mutex_free(&ibuf_mutex); @@ -510,7 +508,7 @@ ibuf_init_at_db_start(void) buffer pool size. Once ibuf struct is initialized this value is updated with the user supplied size by calling ibuf_max_size_update(). */ - ibuf->max_size = ((buf_pool_get_curr_size() / UNIV_PAGE_SIZE) + ibuf->max_size = ((buf_pool_get_curr_size() >> srv_page_size_shift) * CHANGE_BUFFER_DEFAULT_SIZE) / 100; mutex_create(LATCH_ID_IBUF, &ibuf_mutex); @@ -584,7 +582,7 @@ ibuf_max_size_update( ulint new_val) /*!< in: new value in terms of percentage of the buffer pool size */ { - ulint new_size = ((buf_pool_get_curr_size() / UNIV_PAGE_SIZE) + ulint new_size = ((buf_pool_get_curr_size() >> srv_page_size_shift) * new_val) / 100; mutex_enter(&ibuf_mutex); ibuf->max_size = new_size; @@ -607,6 +605,7 @@ ibuf_bitmap_page_init( fil_page_set_type(page, FIL_PAGE_IBUF_BITMAP); /* Write all zeros to the bitmap */ + compile_time_assert(!(IBUF_BITS_PER_PAGE % 2)); byte_offset = UT_BITS_IN_BYTES(block->page.size.physical() * IBUF_BITS_PER_PAGE); @@ -690,9 +689,7 @@ ibuf_bitmap_page_get_bits_low( ulint value; ut_ad(bit < IBUF_BITS_PER_PAGE); -#if IBUF_BITS_PER_PAGE % 2 -# error "IBUF_BITS_PER_PAGE % 2 != 0" -#endif + compile_time_assert(!(IBUF_BITS_PER_PAGE % 2)); ut_ad(mtr_memo_contains_page(mtr, page, latch_type)); bit_offset = (page_id.page_no() % page_size.physical()) @@ -701,7 +698,7 @@ ibuf_bitmap_page_get_bits_low( byte_offset = bit_offset / 8; bit_offset = bit_offset % 8; - ut_ad(byte_offset + IBUF_BITMAP < UNIV_PAGE_SIZE); + ut_ad(byte_offset + IBUF_BITMAP < srv_page_size); map_byte = mach_read_from_1(page + IBUF_BITMAP + byte_offset); @@ -738,9 +735,7 @@ ibuf_bitmap_page_set_bits( ulint map_byte; ut_ad(bit < IBUF_BITS_PER_PAGE); -#if IBUF_BITS_PER_PAGE % 2 -# error "IBUF_BITS_PER_PAGE % 2 != 0" -#endif + compile_time_assert(!(IBUF_BITS_PER_PAGE % 2)); ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX)); ut_ad(mtr->is_named_space(page_id.space())); #ifdef UNIV_IBUF_COUNT_DEBUG @@ -754,7 +749,7 @@ ibuf_bitmap_page_set_bits( byte_offset = bit_offset / 8; bit_offset = bit_offset % 8; - ut_ad(byte_offset + IBUF_BITMAP < UNIV_PAGE_SIZE); + ut_ad(byte_offset + IBUF_BITMAP < srv_page_size); map_byte = mach_read_from_1(page + IBUF_BITMAP + byte_offset); @@ -1326,6 +1321,8 @@ ibuf_rec_get_info_func( types = rec_get_nth_field_old(rec, IBUF_REC_FIELD_METADATA, &len); info_len_local = len % DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE; + compile_time_assert(IBUF_REC_INFO_SIZE + < DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE); switch (info_len_local) { case 0: @@ -1625,7 +1622,8 @@ ibuf_build_entry_from_ibuf_rec_func( ibuf_dummy_index_add_col(index, dfield_get_type(field), len); } - index->n_core_null_bytes = UT_BITS_IN_BYTES(index->n_nullable); + index->n_core_null_bytes + = UT_BITS_IN_BYTES(unsigned(index->n_nullable)); /* Prevent an ut_ad() failure in page_zip_write_rec() by adding system columns to the dummy table pointed to by the @@ -1915,7 +1913,7 @@ ibuf_entry_build( field = dtuple_get_nth_field(tuple, IBUF_REC_FIELD_METADATA); - dfield_set_data(field, type_info, ti - type_info); + dfield_set_data(field, type_info, ulint(ti - type_info)); /* Set all the types in the new tuple binary */ @@ -1980,11 +1978,8 @@ ibuf_search_tuple_build( /*********************************************************************//** Checks if there are enough pages in the free list of the ibuf tree that we dare to start a pessimistic insert to the insert buffer. -@return TRUE if enough free pages in list */ -UNIV_INLINE -ibool -ibuf_data_enough_free_for_insert(void) -/*==================================*/ +@return whether enough free pages in list */ +static inline bool ibuf_data_enough_free_for_insert() { ut_ad(mutex_own(&ibuf_mutex)); @@ -2383,7 +2378,7 @@ ibuf_get_merge_page_nos_func( && prev_space_id == first_space_id) || (volume_for_page > ((IBUF_MERGE_THRESHOLD - 1) - * 4 * UNIV_PAGE_SIZE + * 4U << srv_page_size_shift / IBUF_PAGE_SIZE_PER_FREE_SPACE) / IBUF_MERGE_THRESHOLD)) { @@ -2880,7 +2875,7 @@ ibuf_get_volume_buffered_count_func( types = rec_get_nth_field_old(rec, IBUF_REC_FIELD_METADATA, &len); - switch (UNIV_EXPECT(len % DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE, + switch (UNIV_EXPECT(int(len % DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE), IBUF_REC_INFO_SIZE)) { default: ut_error; @@ -2967,7 +2962,7 @@ get_volume_comp: Gets an upper limit for the combined size of entries buffered in the insert buffer for a given page. @return upper limit for the volume of buffered inserts for the index -page, in bytes; UNIV_PAGE_SIZE, if the entries for the index page span +page, in bytes; srv_page_size, if the entries for the index page span several pages in the insert buffer */ static ulint @@ -3068,7 +3063,7 @@ ibuf_get_volume_buffered( do not have the x-latch on it, and cannot acquire one because of the latching order: we have to give up */ - return(UNIV_PAGE_SIZE); + return(srv_page_size); } if (page_no != ibuf_rec_get_page_no(mtr, rec) @@ -3138,7 +3133,7 @@ count_later: /* We give up */ - return(UNIV_PAGE_SIZE); + return(srv_page_size); } if (page_no != ibuf_rec_get_page_no(mtr, rec) @@ -3712,7 +3707,7 @@ ibuf_insert( ut_ad(page_id.space() != SRV_TMP_SPACE_ID); ut_a(!dict_index_is_clust(index)); - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); no_counter = use <= IBUF_USE_INSERT; @@ -3727,8 +3722,6 @@ ibuf_insert( case IBUF_USE_INSERT_DELETE_MARK: case IBUF_USE_ALL: goto check_watch; - case IBUF_USE_COUNT: - break; } break; case IBUF_OP_DELETE_MARK: @@ -3742,8 +3735,6 @@ ibuf_insert( case IBUF_USE_ALL: ut_ad(!no_counter); goto check_watch; - case IBUF_USE_COUNT: - break; } break; case IBUF_OP_DELETE: @@ -3757,8 +3748,6 @@ ibuf_insert( case IBUF_USE_ALL: ut_ad(!no_counter); goto skip_watch; - case IBUF_USE_COUNT: - break; } break; case IBUF_OP_COUNT: @@ -4267,15 +4256,12 @@ ibuf_restore_pos( return(TRUE); } - if (fil_space_get_flags(space) == ULINT_UNDEFINED) { - /* The tablespace has been dropped. It is possible - that another thread has deleted the insert buffer - entry. Do not complain. */ - ibuf_btr_pcur_commit_specify_mtr(pcur, mtr); - } else { - ib::error() << "ibuf cursor restoration fails!." + if (fil_space_t* s = fil_space_acquire_silent(space)) { + ib::error() << "ibuf cursor restoration fails!" " ibuf record inserted to page " - << space << ":" << page_no; + << space << ":" << page_no + << " in file " << s->chain.start->name; + s->release(); ib::error() << BUG_REPORT_MSG; @@ -4285,10 +4271,9 @@ ibuf_restore_pos( rec_print_old(stderr, page_rec_get_next(btr_pcur_get_rec(pcur))); - - ib::fatal() << "Failed to restore ibuf position."; } + ibuf_btr_pcur_commit_specify_mtr(pcur, mtr); return(FALSE); } @@ -4513,7 +4498,7 @@ ibuf_merge_or_delete_for_page( if (!bitmap_bits) { /* No inserts buffered for this page */ - fil_space_release(space); + space->release(); return; } } @@ -4593,8 +4578,7 @@ loop: } if (!btr_pcur_is_on_user_rec(&pcur)) { - ut_ad(btr_pcur_is_after_last_in_tree(&pcur, &mtr)); - + ut_ad(btr_pcur_is_after_last_in_tree(&pcur)); goto reset_bit; } @@ -4655,8 +4639,8 @@ loop: volume += page_dir_calc_reserved_space(1); - ut_a(volume <= 4 * UNIV_PAGE_SIZE - / IBUF_PAGE_SIZE_PER_FREE_SPACE); + ut_a(volume <= (4U << srv_page_size_shift) + / IBUF_PAGE_SIZE_PER_FREE_SPACE); #endif ibuf_insert_to_index_page( entry, block, dummy_index, &mtr); @@ -4778,7 +4762,7 @@ reset_bit: ibuf_mtr_commit(&mtr); if (space) { - fil_space_release(space); + space->release(); } btr_pcur_close(&pcur); @@ -4831,8 +4815,7 @@ loop: &pcur, &mtr); if (!btr_pcur_is_on_user_rec(&pcur)) { - ut_ad(btr_pcur_is_after_last_in_tree(&pcur, &mtr)); - + ut_ad(btr_pcur_is_after_last_in_tree(&pcur)); goto leave_loop; } diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h index a4f93280ff1..628c11bd096 100644 --- a/storage/innobase/include/btr0btr.h +++ b/storage/innobase/include/btr0btr.h @@ -2,7 +2,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2014, 2017, MariaDB Corporation. +Copyright (c) 2014, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -45,7 +45,7 @@ Created 6/2/1994 Heikki Tuuri /** Maximum record size which can be stored on a page, without using the special big record storage structure */ -#define BTR_PAGE_MAX_REC_SIZE (UNIV_PAGE_SIZE / 2 - 200) +#define BTR_PAGE_MAX_REC_SIZE (srv_page_size / 2 - 200) /** @brief Maximum depth of a B-tree in InnoDB. @@ -153,23 +153,23 @@ free the pages of externally stored fields. */ record is in spatial index */ #define BTR_RTREE_DELETE_MARK 524288U -#define BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode) \ - ((latch_mode) & btr_latch_mode(~(BTR_INSERT \ - | BTR_DELETE_MARK \ - | BTR_RTREE_UNDO_INS \ - | BTR_RTREE_DELETE_MARK \ - | BTR_DELETE \ - | BTR_ESTIMATE \ - | BTR_IGNORE_SEC_UNIQUE \ - | BTR_ALREADY_S_LATCHED \ - | BTR_LATCH_FOR_INSERT \ - | BTR_LATCH_FOR_DELETE \ - | BTR_MODIFY_EXTERNAL))) +#define BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode) \ + ((latch_mode) & ulint(~(BTR_INSERT \ + | BTR_DELETE_MARK \ + | BTR_RTREE_UNDO_INS \ + | BTR_RTREE_DELETE_MARK \ + | BTR_DELETE \ + | BTR_ESTIMATE \ + | BTR_IGNORE_SEC_UNIQUE \ + | BTR_ALREADY_S_LATCHED \ + | BTR_LATCH_FOR_INSERT \ + | BTR_LATCH_FOR_DELETE \ + | BTR_MODIFY_EXTERNAL))) -#define BTR_LATCH_MODE_WITHOUT_INTENTION(latch_mode) \ - ((latch_mode) & btr_latch_mode(~(BTR_LATCH_FOR_INSERT \ - | BTR_LATCH_FOR_DELETE \ - | BTR_MODIFY_EXTERNAL))) +#define BTR_LATCH_MODE_WITHOUT_INTENTION(latch_mode) \ + ((latch_mode) & ulint(~(BTR_LATCH_FOR_INSERT \ + | BTR_LATCH_FOR_DELETE \ + | BTR_MODIFY_EXTERNAL))) /**************************************************************//** Report that an index page is corrupted. */ diff --git a/storage/innobase/include/btr0bulk.h b/storage/innobase/include/btr0bulk.h index 0f0051131a6..6a7b532f277 100644 --- a/storage/innobase/include/btr0bulk.h +++ b/storage/innobase/include/btr0bulk.h @@ -33,7 +33,7 @@ Created 03/11/2014 Shaohua Wang #include /** Innodb B-tree index fill factor for bulk load. */ -extern long innobase_fill_factor; +extern uint innobase_fill_factor; /* The proper function call sequence of PageBulk is as below: @@ -294,7 +294,8 @@ public: mem_heap_free(m_heap); UT_DELETE(m_page_bulks); ut_d(my_atomic_addlint( - &m_index->table->space->redo_skipped_count, -1)); + &m_index->table->space->redo_skipped_count, + ulint(-1))); } /** Initialization diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h index 8d8fe0bc236..dc1f6dd82d8 100644 --- a/storage/innobase/include/btr0cur.h +++ b/storage/innobase/include/btr0cur.h @@ -28,6 +28,7 @@ Created 10/16/1994 Heikki Tuuri #define btr0cur_h #include "univ.i" +#include "my_base.h" #include "dict0dict.h" #include "page0cur.h" #include "btr0types.h" @@ -600,7 +601,7 @@ btr_cur_parse_del_mark_set_sec_rec( @param[in] tuple2 range end, may also be empty tuple @param[in] mode2 search mode for range end @return estimated number of rows */ -int64_t +ha_rows btr_estimate_n_rows_in_range( dict_index_t* index, const dtuple_t* tuple1, @@ -834,7 +835,7 @@ btr_cur_latch_leaves( /** In the pessimistic delete, if the page data size drops below this limit, merging it to a neighbor is tried */ #define BTR_CUR_PAGE_COMPRESS_LIMIT(index) \ - ((UNIV_PAGE_SIZE * (ulint)((index)->merge_threshold)) / 100) + ((srv_page_size * (ulint)((index)->merge_threshold)) / 100) /** A slot in the path array. We store here info on a search path down the tree. Each slot contains data on a single level of the tree. */ @@ -982,11 +983,11 @@ We store locally a long enough prefix of each column so that we can determine the ordering parts of each index record without looking into the externally stored part. */ /*-------------------------------------- @{ */ -#define BTR_EXTERN_SPACE_ID 0 /*!< space id where stored */ -#define BTR_EXTERN_PAGE_NO 4 /*!< page no where stored */ -#define BTR_EXTERN_OFFSET 8 /*!< offset of BLOB header +#define BTR_EXTERN_SPACE_ID 0U /*!< space id where stored */ +#define BTR_EXTERN_PAGE_NO 4U /*!< page no where stored */ +#define BTR_EXTERN_OFFSET 8U /*!< offset of BLOB header on that page */ -#define BTR_EXTERN_LEN 12 /*!< 8 bytes containing the +#define BTR_EXTERN_LEN 12U /*!< 8 bytes containing the length of the externally stored part of the BLOB. The 2 highest bits are diff --git a/storage/innobase/include/btr0cur.ic b/storage/innobase/include/btr0cur.ic index 4ab3819ad75..adcd92e2fc8 100644 --- a/storage/innobase/include/btr0cur.ic +++ b/storage/innobase/include/btr0cur.ic @@ -129,9 +129,8 @@ btr_cur_compress_recommendation( { const page_t* page; - ut_ad(mtr_is_block_fix( - mtr, btr_cur_get_block(cursor), - MTR_MEMO_PAGE_X_FIX, cursor->index->table)); + ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor), + MTR_MEMO_PAGE_X_FIX)); page = btr_cur_get_page(cursor); diff --git a/storage/innobase/include/btr0pcur.h b/storage/innobase/include/btr0pcur.h index 1d8690a3c90..0858796e5f6 100644 --- a/storage/innobase/include/btr0pcur.h +++ b/storage/innobase/include/btr0pcur.h @@ -426,21 +426,11 @@ btr_pcur_is_before_first_on_page( /*********************************************************//** Checks if the persistent cursor is before the first user record in the index tree. */ -UNIV_INLINE -ibool -btr_pcur_is_before_first_in_tree( -/*=============================*/ - btr_pcur_t* cursor, /*!< in: persistent cursor */ - mtr_t* mtr); /*!< in: mtr */ +static inline bool btr_pcur_is_before_first_in_tree(btr_pcur_t* cursor); /*********************************************************//** Checks if the persistent cursor is after the last user record in the index tree. */ -UNIV_INLINE -ibool -btr_pcur_is_after_last_in_tree( -/*===========================*/ - btr_pcur_t* cursor, /*!< in: persistent cursor */ - mtr_t* mtr); /*!< in: mtr */ +static inline bool btr_pcur_is_after_last_in_tree(btr_pcur_t* cursor); /*********************************************************//** Moves the persistent cursor to the next record on the same page. */ UNIV_INLINE diff --git a/storage/innobase/include/btr0pcur.ic b/storage/innobase/include/btr0pcur.ic index e12564fe547..3a082a08c45 100644 --- a/storage/innobase/include/btr0pcur.ic +++ b/storage/innobase/include/btr0pcur.ic @@ -209,12 +209,7 @@ btr_pcur_is_on_user_rec( /*********************************************************//** Checks if the persistent cursor is before the first user record in the index tree. */ -UNIV_INLINE -ibool -btr_pcur_is_before_first_in_tree( -/*=============================*/ - btr_pcur_t* cursor, /*!< in: persistent cursor */ - mtr_t* mtr) /*!< in: mtr */ +static inline bool btr_pcur_is_before_first_in_tree(btr_pcur_t* cursor) { ut_ad(cursor->pos_state == BTR_PCUR_IS_POSITIONED); ut_ad(cursor->latch_mode != BTR_NO_LATCHES); @@ -226,12 +221,7 @@ btr_pcur_is_before_first_in_tree( /*********************************************************//** Checks if the persistent cursor is after the last user record in the index tree. */ -UNIV_INLINE -ibool -btr_pcur_is_after_last_in_tree( -/*===========================*/ - btr_pcur_t* cursor, /*!< in: persistent cursor */ - mtr_t* mtr) /*!< in: mtr */ +static inline bool btr_pcur_is_after_last_in_tree(btr_pcur_t* cursor) { ut_ad(cursor->pos_state == BTR_PCUR_IS_POSITIONED); ut_ad(cursor->latch_mode != BTR_NO_LATCHES); @@ -307,9 +297,7 @@ btr_pcur_move_to_next_user_rec( cursor->old_stored = false; loop: if (btr_pcur_is_after_last_on_page(cursor)) { - - if (btr_pcur_is_after_last_in_tree(cursor, mtr)) { - + if (btr_pcur_is_after_last_in_tree(cursor)) { return(FALSE); } @@ -344,19 +332,15 @@ btr_pcur_move_to_next( cursor->old_stored = false; if (btr_pcur_is_after_last_on_page(cursor)) { - - if (btr_pcur_is_after_last_in_tree(cursor, mtr)) { - + if (btr_pcur_is_after_last_in_tree(cursor)) { return(FALSE); } btr_pcur_move_to_next_page(cursor, mtr); - return(TRUE); } btr_pcur_move_to_next_on_page(cursor); - return(TRUE); } diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h index 04fb7014afe..de45bc7b39f 100644 --- a/storage/innobase/include/btr0sea.h +++ b/storage/innobase/include/btr0sea.h @@ -38,26 +38,20 @@ Created 2/17/1996 Heikki Tuuri /** Creates and initializes the adaptive search system at a database start. @param[in] hash_size hash table size. */ -void -btr_search_sys_create(ulint hash_size); +void btr_search_sys_create(ulint hash_size); /** Resize hash index hash table. @param[in] hash_size hash index hash table size */ -void -btr_search_sys_resize(ulint hash_size); +void btr_search_sys_resize(ulint hash_size); /** Frees the adaptive search system at a database shutdown. */ -void -btr_search_sys_free(); +void btr_search_sys_free(); /** Disable the adaptive hash search system and empty the index. @param need_mutex need to acquire dict_sys->mutex */ -void -btr_search_disable( - bool need_mutex); +void btr_search_disable(bool need_mutex); /** Enable the adaptive hash search system. */ -void -btr_search_enable(); +void btr_search_enable(); /** Returns the value of ref_count. The value is protected by latch. @param[in] info search info @@ -123,14 +117,11 @@ btr_search_move_or_delete_hash_entries( block->buf_fix_count == 0 or it is an index page which has already been removed from the buf_pool->page_hash i.e.: it is in state BUF_BLOCK_REMOVE_HASH */ -void -btr_search_drop_page_hash_index(buf_block_t* block); +void btr_search_drop_page_hash_index(buf_block_t* block); -/** Drop any adaptive hash index entries that may point to an index -page that may be in the buffer pool, when a page is evicted from the -buffer pool or freed in a file segment. -@param[in] page_id page id -@param[in] page_size page size */ +/** Drop possible adaptive hash index entries when a page is evicted +from the buffer pool or freed in a file, or the index is being dropped. +@param[in] page_id page id */ void btr_search_drop_page_hash_when_freed(const page_id_t& page_id); /** Updates the page hash index when a single record is inserted on a page. @@ -153,69 +144,52 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch); /** Updates the page hash index when a single record is deleted from a page. @param[in] cursor cursor which was positioned on the record to delete using btr_cur_search_, the record is not yet deleted.*/ -void -btr_search_update_hash_on_delete(btr_cur_t* cursor); +void btr_search_update_hash_on_delete(btr_cur_t* cursor); /** Validates the search system. @return true if ok */ -bool -btr_search_validate(); +bool btr_search_validate(); /** Lock all search latches in exclusive mode. */ -UNIV_INLINE -void -btr_search_x_lock_all(); +static inline void btr_search_x_lock_all(); /** Unlock all search latches from exclusive mode. */ -UNIV_INLINE -void -btr_search_x_unlock_all(); +static inline void btr_search_x_unlock_all(); /** Lock all search latches in shared mode. */ -UNIV_INLINE -void -btr_search_s_lock_all(); +static inline void btr_search_s_lock_all(); #ifdef UNIV_DEBUG /** Check if thread owns all the search latches. @param[in] mode lock mode check @retval true if owns all of them @retval false if does not own some of them */ -UNIV_INLINE -bool -btr_search_own_all(ulint mode); +static inline bool btr_search_own_all(ulint mode); /** Check if thread owns any of the search latches. @param[in] mode lock mode check @retval true if owns any of them @retval false if owns no search latch */ -UNIV_INLINE -bool -btr_search_own_any(ulint mode); +static inline bool btr_search_own_any(ulint mode); #endif /* UNIV_DEBUG */ /** Unlock all search latches from shared mode. */ -UNIV_INLINE -void -btr_search_s_unlock_all(); +static inline void btr_search_s_unlock_all(); /** Get the latch based on index attributes. A latch is selected from an array of latches using pair of index-id, space-id. @param[in] index index handler @return latch */ -UNIV_INLINE -rw_lock_t* -btr_get_search_latch(const dict_index_t* index); +static inline rw_lock_t* btr_get_search_latch(const dict_index_t* index); /** Get the hash-table based on index attributes. A table is selected from an array of tables using pair of index-id, space-id. @param[in] index index handler @return hash table */ -UNIV_INLINE -hash_table_t* -btr_get_search_table(const dict_index_t* index); +static inline hash_table_t* btr_get_search_table(const dict_index_t* index); #else /* BTR_CUR_HASH_ADAPT */ # define btr_search_sys_create(size) +# define btr_search_sys_free() # define btr_search_drop_page_hash_index(block) # define btr_search_s_lock_all(index) # define btr_search_s_unlock_all(index) @@ -230,15 +204,11 @@ btr_get_search_table(const dict_index_t* index); /** Create and initialize search info. @param[in,out] heap heap where created @return own: search info struct */ -UNIV_INLINE -btr_search_t* -btr_search_info_create(mem_heap_t* heap) +static inline btr_search_t* btr_search_info_create(mem_heap_t* heap) MY_ATTRIBUTE((nonnull, warn_unused_result)); /** @return the search info of an index */ -UNIV_INLINE -btr_search_t* -btr_search_get_info(dict_index_t* index) +static inline btr_search_t* btr_search_get_info(dict_index_t* index) { return(index->search_info); } diff --git a/storage/innobase/include/btr0sea.ic b/storage/innobase/include/btr0sea.ic index fba97835395..efa3667d229 100644 --- a/storage/innobase/include/btr0sea.ic +++ b/storage/innobase/include/btr0sea.ic @@ -31,9 +31,7 @@ Created 2/17/1996 Heikki Tuuri /** Create and initialize search info. @param[in,out] heap heap where created @return own: search info struct */ -UNIV_INLINE -btr_search_t* -btr_search_info_create(mem_heap_t* heap) +static inline btr_search_t* btr_search_info_create(mem_heap_t* heap) { btr_search_t* info = static_cast( mem_heap_zalloc(heap, sizeof(btr_search_t))); @@ -54,7 +52,7 @@ btr_search_info_update_slow(btr_search_t* info, btr_cur_t* cursor); /*********************************************************************//** Updates the search info. */ -UNIV_INLINE +static inline void btr_search_info_update( /*===================*/ @@ -87,9 +85,7 @@ btr_search_info_update( } /** Lock all search latches in exclusive mode. */ -UNIV_INLINE -void -btr_search_x_lock_all() +static inline void btr_search_x_lock_all() { for (ulint i = 0; i < btr_ahi_parts; ++i) { rw_lock_x_lock(btr_search_latches[i]); @@ -97,9 +93,7 @@ btr_search_x_lock_all() } /** Unlock all search latches from exclusive mode. */ -UNIV_INLINE -void -btr_search_x_unlock_all() +static inline void btr_search_x_unlock_all() { for (ulint i = 0; i < btr_ahi_parts; ++i) { rw_lock_x_unlock(btr_search_latches[i]); @@ -107,9 +101,7 @@ btr_search_x_unlock_all() } /** Lock all search latches in shared mode. */ -UNIV_INLINE -void -btr_search_s_lock_all() +static inline void btr_search_s_lock_all() { for (ulint i = 0; i < btr_ahi_parts; ++i) { rw_lock_s_lock(btr_search_latches[i]); @@ -117,9 +109,7 @@ btr_search_s_lock_all() } /** Unlock all search latches from shared mode. */ -UNIV_INLINE -void -btr_search_s_unlock_all() +static inline void btr_search_s_unlock_all() { for (ulint i = 0; i < btr_ahi_parts; ++i) { rw_lock_s_unlock(btr_search_latches[i]); @@ -131,9 +121,7 @@ btr_search_s_unlock_all() @param[in] mode lock mode check @retval true if owns all of them @retval false if does not own some of them */ -UNIV_INLINE -bool -btr_search_own_all(ulint mode) +static inline bool btr_search_own_all(ulint mode) { for (ulint i = 0; i < btr_ahi_parts; ++i) { if (!rw_lock_own(btr_search_latches[i], mode)) { @@ -147,9 +135,7 @@ btr_search_own_all(ulint mode) @param[in] mode lock mode check @retval true if owns any of them @retval false if owns no search latch */ -UNIV_INLINE -bool -btr_search_own_any(ulint mode) +static inline bool btr_search_own_any(ulint mode) { for (ulint i = 0; i < btr_ahi_parts; ++i) { if (rw_lock_own(btr_search_latches[i], mode)) { @@ -163,12 +149,11 @@ btr_search_own_any(ulint mode) /** Get the adaptive hash search index latch for a b-tree. @param[in] index b-tree index @return latch */ -UNIV_INLINE -rw_lock_t* -btr_get_search_latch(const dict_index_t* index) +static inline rw_lock_t* btr_get_search_latch(const dict_index_t* index) { ut_ad(index != NULL); - ut_ad(index->table->space->id == index->table->space_id); + ut_ad(!index->table->space + || index->table->space->id == index->table->space_id); ulint ifold = ut_fold_ulint_pair(ulint(index->id), index->table->space_id); @@ -180,9 +165,7 @@ btr_get_search_latch(const dict_index_t* index) A table is selected from an array of tables using pair of index-id, space-id. @param[in] index index handler @return hash table */ -UNIV_INLINE -hash_table_t* -btr_get_search_table(const dict_index_t* index) +static inline hash_table_t* btr_get_search_table(const dict_index_t* index) { ut_ad(index != NULL); ut_ad(index->table->space->id == index->table->space_id); diff --git a/storage/innobase/include/buf0buddy.h b/storage/innobase/include/buf0buddy.h index f56ac2e5e70..8befc038f23 100644 --- a/storage/innobase/include/buf0buddy.h +++ b/storage/innobase/include/buf0buddy.h @@ -48,9 +48,9 @@ buf_buddy_alloc( the page resides */ ulint size, /*!< in: compressed page size (between UNIV_ZIP_SIZE_MIN and - UNIV_PAGE_SIZE) */ - ibool* lru) /*!< in: pointer to a variable - that will be assigned TRUE if + srv_page_size) */ + bool* lru) /*!< in: pointer to a variable + that will be assigned true if storage was allocated from the LRU list and buf_pool->mutex was temporarily released */ @@ -67,14 +67,14 @@ buf_buddy_free( void* buf, /*!< in: block to be freed, must not be pointed to by the buffer pool */ ulint size) /*!< in: block size, - up to UNIV_PAGE_SIZE */ + up to srv_page_size */ MY_ATTRIBUTE((nonnull)); /** Reallocate a block. @param[in] buf_pool buffer pool instance @param[in] buf block to be reallocated, must be pointed to by the buffer pool -@param[in] size block size, up to UNIV_PAGE_SIZE +@param[in] size block size, up to srv_page_size @retval false if failed because of no free blocks. */ bool buf_buddy_realloc( diff --git a/storage/innobase/include/buf0buddy.ic b/storage/innobase/include/buf0buddy.ic index 2b6d76df009..d166ab8441c 100644 --- a/storage/innobase/include/buf0buddy.ic +++ b/storage/innobase/include/buf0buddy.ic @@ -42,8 +42,8 @@ buf_buddy_alloc_low( buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */ ulint i, /*!< in: index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */ - ibool* lru) /*!< in: pointer to a variable that - will be assigned TRUE if storage was + bool* lru) /*!< in: pointer to a variable that + will be assigned true if storage was allocated from the LRU list and buf_pool->mutex was temporarily released */ @@ -96,9 +96,9 @@ buf_buddy_alloc( the page resides */ ulint size, /*!< in: compressed page size (between UNIV_ZIP_SIZE_MIN and - UNIV_PAGE_SIZE) */ - ibool* lru) /*!< in: pointer to a variable - that will be assigned TRUE if + srv_page_size) */ + bool* lru) /*!< in: pointer to a variable + that will be assigned true if storage was allocated from the LRU list and buf_pool->mutex was temporarily released */ @@ -106,7 +106,7 @@ buf_buddy_alloc( ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(ut_is_2pow(size)); ut_ad(size >= UNIV_ZIP_SIZE_MIN); - ut_ad(size <= UNIV_PAGE_SIZE); + ut_ad(size <= srv_page_size); return((byte*) buf_buddy_alloc_low(buf_pool, buf_buddy_get_slot(size), lru)); @@ -123,12 +123,12 @@ buf_buddy_free( void* buf, /*!< in: block to be freed, must not be pointed to by the buffer pool */ ulint size) /*!< in: block size, - up to UNIV_PAGE_SIZE */ + up to srv_page_size */ { ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(ut_is_2pow(size)); ut_ad(size >= UNIV_ZIP_SIZE_MIN); - ut_ad(size <= UNIV_PAGE_SIZE); + ut_ad(size <= srv_page_size); buf_buddy_free_low(buf_pool, buf, buf_buddy_get_slot(size)); } diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 4c2f04996da..9136c25acfd 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -1577,7 +1577,7 @@ public: bool encrypted; /*!< page is still encrypted */ ulint real_size; /*!< Real size of the page - Normal pages == UNIV_PAGE_SIZE + Normal pages == srv_page_size page compressed pages, payload size alligned to sector boundary. */ @@ -1712,9 +1712,9 @@ struct buf_block_t{ buf_pool->page_hash can point to buf_page_t or buf_block_t */ byte* frame; /*!< pointer to buffer frame which - is of size UNIV_PAGE_SIZE, and + is of size srv_page_size, and aligned to an address divisible by - UNIV_PAGE_SIZE */ + srv_page_size */ BPageLock lock; /*!< read-write lock of the buffer frame */ UT_LIST_NODE_T(buf_block_t) unzip_LRU; @@ -1874,7 +1874,7 @@ struct buf_block_t{ /**********************************************************************//** Compute the hash fold value for blocks in buf_pool->zip_hash. */ /* @{ */ -#define BUF_POOL_ZIP_FOLD_PTR(ptr) ((ulint) (ptr) / UNIV_PAGE_SIZE) +#define BUF_POOL_ZIP_FOLD_PTR(ptr) (ulint(ptr) >> srv_page_size_shift) #define BUF_POOL_ZIP_FOLD(b) BUF_POOL_ZIP_FOLD_PTR((b)->frame) #define BUF_POOL_ZIP_FOLD_BPAGE(b) BUF_POOL_ZIP_FOLD((buf_block_t*) (b)) /* @} */ diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic index 38c52d5e608..8314797e78d 100644 --- a/storage/innobase/include/buf0buf.ic +++ b/storage/innobase/include/buf0buf.ic @@ -2,7 +2,7 @@ Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. -Copyright (c) 2014, 2017, MariaDB Corporation. +Copyright (c) 2014, 2018, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -115,7 +115,7 @@ ulint buf_pool_get_n_pages(void) /*======================*/ { - return(buf_pool_get_curr_size() / UNIV_PAGE_SIZE); + return buf_pool_get_curr_size() >> srv_page_size_shift; } /********************************************************************//** @@ -761,7 +761,7 @@ buf_frame_align( ut_ad(ptr); - frame = (buf_frame_t*) ut_align_down(ptr, UNIV_PAGE_SIZE); + frame = (buf_frame_t*) ut_align_down(ptr, srv_page_size); return(frame); } @@ -778,11 +778,11 @@ buf_ptr_get_fsp_addr( fil_addr_t* addr) /*!< out: page offset and byte offset */ { const page_t* page = (const page_t*) ut_align_down(ptr, - UNIV_PAGE_SIZE); + srv_page_size); *space = mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); addr->page = mach_read_from_4(page + FIL_PAGE_OFFSET); - addr->boffset = ut_align_offset(ptr, UNIV_PAGE_SIZE); + addr->boffset = ut_align_offset(ptr, srv_page_size); } /**********************************************************************//** @@ -867,7 +867,7 @@ buf_frame_copy( { ut_ad(buf && frame); - ut_memcpy(buf, frame, UNIV_PAGE_SIZE); + ut_memcpy(buf, frame, srv_page_size); return(buf); } @@ -955,7 +955,7 @@ ulint buf_block_fix( buf_page_t* bpage) { - return(my_atomic_add32((int32*) &bpage->buf_fix_count, 1) + 1); + return uint32(my_atomic_add32((int32*) &bpage->buf_fix_count, 1) + 1); } /** Increments the bufferfix count. @@ -1003,9 +1003,10 @@ ulint buf_block_unfix( buf_page_t* bpage) { - ulint count = my_atomic_add32((int32*) &bpage->buf_fix_count, -1) - 1; - ut_ad(count + 1 != 0); - return(count); + uint32 count = uint32(my_atomic_add32((int32*) &bpage->buf_fix_count, + -1)); + ut_ad(count != 0); + return count - 1; } /** Decrements the bufferfix count. @@ -1424,8 +1425,8 @@ bool buf_pool_is_obsolete( ulint withdraw_clock) { - return(buf_pool_withdrawing - || buf_withdraw_clock != withdraw_clock); + return(UNIV_UNLIKELY(buf_pool_withdrawing + || buf_withdraw_clock != withdraw_clock)); } /** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit, diff --git a/storage/innobase/include/buf0checksum.h b/storage/innobase/include/buf0checksum.h index 20955a5b2e6..dc0dbafa4c4 100644 --- a/storage/innobase/include/buf0checksum.h +++ b/storage/innobase/include/buf0checksum.h @@ -36,7 +36,7 @@ when it is written to a file and also checked for a match when reading from the file. When reading we allow both normal CRC32 and CRC-legacy-big-endian variants. Note that we must be careful to calculate the same value on 32-bit and 64-bit architectures. -@param[in] page buffer page (UNIV_PAGE_SIZE bytes) +@param[in] page buffer page (srv_page_size bytes) @param[in] use_legacy_big_endian if true then use big endian byteorder when converting byte strings to integers @return checksum */ diff --git a/storage/innobase/include/buf0dblwr.h b/storage/innobase/include/buf0dblwr.h index 598609e2be4..5d2e5e9fdf7 100644 --- a/storage/innobase/include/buf0dblwr.h +++ b/storage/innobase/include/buf0dblwr.h @@ -131,7 +131,7 @@ struct buf_dblwr_t{ doublewrite block (64 pages) */ ulint block2; /*!< page number of the second block */ ulint first_free;/*!< first free position in write_buf - measured in units of UNIV_PAGE_SIZE */ + measured in units of srv_page_size */ ulint b_reserved;/*!< number of slots currently reserved for batch flush. */ os_event_t b_event;/*!< event where threads wait for a @@ -150,7 +150,7 @@ struct buf_dblwr_t{ buffer. */ byte* write_buf;/*!< write buffer used in writing to the doublewrite buffer, aligned to an - address divisible by UNIV_PAGE_SIZE + address divisible by srv_page_size (which is required by Windows aio) */ byte* write_buf_unaligned;/*!< pointer to write_buf, but unaligned */ diff --git a/storage/innobase/include/buf0flu.h b/storage/innobase/include/buf0flu.h index 198f122e5a4..ace2e2f6ef1 100644 --- a/storage/innobase/include/buf0flu.h +++ b/storage/innobase/include/buf0flu.h @@ -217,16 +217,10 @@ buf_flush_ready_for_replace( #ifdef UNIV_DEBUG /** Disables page cleaner threads (coordinator and workers). It's used by: SET GLOBAL innodb_page_cleaner_disabled_debug = 1 (0). -@param[in] thd thread handle -@param[in] var pointer to system variable -@param[out] var_ptr where the formal string goes @param[in] save immediate result from check function */ -void -buf_flush_page_cleaner_disabled_debug_update( - THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save); +void buf_flush_page_cleaner_disabled_debug_update(THD*, + st_mysql_sys_var*, void*, + const void* save); #endif /* UNIV_DEBUG */ /******************************************************************//** diff --git a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h index 547a09ae319..f811b5a6811 100644 --- a/storage/innobase/include/buf0lru.h +++ b/storage/innobase/include/buf0lru.h @@ -51,18 +51,20 @@ These are low-level functions /** Minimum LRU list length for which the LRU_old pointer is defined */ #define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */ +#ifdef BTR_CUR_HASH_ADAPT +struct dict_table_t; +/** Drop the adaptive hash index for a tablespace. +@param[in,out] table table */ +void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table); +#else +# define buf_LRU_drop_page_hash_for_tablespace(table) +#endif /* BTR_CUR_HASH_ADAPT */ + /** Empty the flush list for all pages belonging to a tablespace. @param[in] id tablespace identifier @param[in,out] observer flush observer, or NULL if nothing is to be written */ -void -buf_LRU_flush_or_remove_pages( - ulint id, - FlushObserver* observer -#ifdef BTR_CUR_HASH_ADAPT - , bool drop_ahi = false /*!< whether to drop the adaptive hash index */ -#endif /* BTR_CUR_HASH_ADAPT */ - ); +void buf_LRU_flush_or_remove_pages(ulint id, FlushObserver* observer); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /********************************************************************//** diff --git a/storage/innobase/include/buf0types.h b/storage/innobase/include/buf0types.h index 719699f5ee2..2847e328515 100644 --- a/storage/innobase/include/buf0types.h +++ b/storage/innobase/include/buf0types.h @@ -113,7 +113,7 @@ is_checksum_strict(ulint algo) #define BUF_BUDDY_LOW (1U << BUF_BUDDY_LOW_SHIFT) /** Actual number of buddy sizes based on current page size */ -#define BUF_BUDDY_SIZES (UNIV_PAGE_SIZE_SHIFT - BUF_BUDDY_LOW_SHIFT) +#define BUF_BUDDY_SIZES (srv_page_size_shift - BUF_BUDDY_LOW_SHIFT) /** Maximum number of buddy sizes based on the max page size */ #define BUF_BUDDY_SIZES_MAX (UNIV_PAGE_SIZE_SHIFT_MAX \ @@ -121,7 +121,7 @@ is_checksum_strict(ulint algo) /** twice the maximum block size of the buddy system; the underlying memory is aligned by this amount: -this must be equal to UNIV_PAGE_SIZE */ +this must be equal to srv_page_size */ #define BUF_BUDDY_HIGH (BUF_BUDDY_LOW << BUF_BUDDY_SIZES) /* @} */ diff --git a/storage/innobase/include/data0type.h b/storage/innobase/include/data0type.h index 2eaa9042e9c..b999106fee0 100644 --- a/storage/innobase/include/data0type.h +++ b/storage/innobase/include/data0type.h @@ -365,9 +365,9 @@ dtype_form_prtype(ulint old_prtype, ulint charset_coll) Determines if a MySQL string type is a subset of UTF-8. This function may return false negatives, in case further character-set collation codes are introduced in MySQL later. -@return TRUE if a subset of UTF-8 */ +@return whether a subset of UTF-8 */ UNIV_INLINE -ibool +bool dtype_is_utf8( /*==========*/ ulint prtype);/*!< in: precise data type */ diff --git a/storage/innobase/include/data0type.ic b/storage/innobase/include/data0type.ic index c0b32953cff..56a588562ee 100644 --- a/storage/innobase/include/data0type.ic +++ b/storage/innobase/include/data0type.ic @@ -43,9 +43,9 @@ dtype_get_charset_coll( Determines if a MySQL string type is a subset of UTF-8. This function may return false negatives, in case further character-set collation codes are introduced in MySQL later. -@return TRUE if a subset of UTF-8 */ +@return whether a subset of UTF-8 */ UNIV_INLINE -ibool +bool dtype_is_utf8( /*==========*/ ulint prtype) /*!< in: precise data type */ @@ -58,10 +58,10 @@ dtype_is_utf8( case 33: /* utf8_general_ci */ case 83: /* utf8_bin */ case 254: /* utf8_general_cs */ - return(TRUE); + return true; } - return(FALSE); + return false; } /*********************************************************************//** @@ -235,9 +235,8 @@ dtype_new_store_for_order_and_null_size( ulint prefix_len)/*!< in: prefix length to replace type->len, or 0 */ { -#if 6 != DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE -#error "6 != DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE" -#endif + compile_time_assert(6 == DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE); + ulint len; ut_ad(type); @@ -280,10 +279,7 @@ dtype_read_for_order_and_null_size( dtype_t* type, /*!< in: type struct */ const byte* buf) /*!< in: buffer for stored type order info */ { -#if 4 != DATA_ORDER_NULL_TYPE_BUF_SIZE -# error "4 != DATA_ORDER_NULL_TYPE_BUF_SIZE" -#endif - + compile_time_assert(4 == DATA_ORDER_NULL_TYPE_BUF_SIZE); type->mtype = buf[0] & 63; type->prtype = buf[1]; @@ -309,11 +305,7 @@ dtype_new_read_for_order_and_null_size( dtype_t* type, /*!< in: type struct */ const byte* buf) /*!< in: buffer for stored type order info */ { - ulint charset_coll; - -#if 6 != DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE -#error "6 != DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE" -#endif + compile_time_assert(6 == DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE); type->mtype = buf[0] & 63; type->prtype = buf[1]; @@ -328,7 +320,7 @@ dtype_new_read_for_order_and_null_size( type->len = mach_read_from_2(buf + 2); - charset_coll = mach_read_from_2(buf + 4) & CHAR_COLL_MASK; + ulint charset_coll = mach_read_from_2(buf + 4) & CHAR_COLL_MASK; if (dtype_is_string_type(type->mtype)) { ut_a(charset_coll <= MAX_CHAR_COLL_NUM); @@ -479,6 +471,7 @@ dtype_get_fixed_size_low( return(0); } #endif /* UNIV_DEBUG */ + /* fall through */ case DATA_CHAR: case DATA_FIXBINARY: case DATA_INT: @@ -552,6 +545,7 @@ dtype_get_min_size_low( return(0); } #endif /* UNIV_DEBUG */ + /* fall through */ case DATA_CHAR: case DATA_FIXBINARY: case DATA_INT: diff --git a/storage/innobase/include/dict0boot.ic b/storage/innobase/include/dict0boot.ic index e40c3f844e3..845a0a3888d 100644 --- a/storage/innobase/include/dict0boot.ic +++ b/storage/innobase/include/dict0boot.ic @@ -58,10 +58,7 @@ dict_sys_read_row_id( /*=================*/ const byte* field) /*!< in: record field */ { -#if DATA_ROW_ID_LEN != 6 -# error "DATA_ROW_ID_LEN != 6" -#endif - + compile_time_assert(DATA_ROW_ID_LEN == 6); return(mach_read_from_6(field)); } @@ -74,10 +71,7 @@ dict_sys_write_row_id( byte* field, /*!< in: record field */ row_id_t row_id) /*!< in: row id */ { -#if DATA_ROW_ID_LEN != 6 -# error "DATA_ROW_ID_LEN != 6" -#endif - + compile_time_assert(DATA_ROW_ID_LEN == 6); mach_write_to_6(field, row_id); } diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h index af8684ff08d..5538b3e98ec 100644 --- a/storage/innobase/include/dict0dict.h +++ b/storage/innobase/include/dict0dict.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. Copyright (c) 2013, 2018, MariaDB Corporation. @@ -683,7 +683,7 @@ dict_table_get_next_index( /* Skip corrupted index */ #define dict_table_skip_corrupt_index(index) \ - while (index && dict_index_is_corrupted(index)) { \ + while (index && index->is_corrupted()) { \ index = dict_table_get_next_index(index); \ } @@ -939,8 +939,7 @@ dict_table_has_atomic_blobs(const dict_table_t* table) @param[in] zip_ssize Zip Shift Size @param[in] use_data_dir Table uses DATA DIRECTORY @param[in] page_compressed Table uses page compression -@param[in] page_compression_level Page compression level -@param[in] not_used For future */ +@param[in] page_compression_level Page compression level */ UNIV_INLINE void dict_tf_set( @@ -949,8 +948,7 @@ dict_tf_set( ulint zip_ssize, bool use_data_dir, bool page_compressed, - ulint page_compression_level, - ulint not_used); + ulint page_compression_level); /** Convert a 32 bit integer table flags to the 32 bit FSP Flags. Fsp Flags are written into the tablespace header at the offset @@ -1079,7 +1077,9 @@ dict_make_room_in_cache( ulint max_tables, /*!< in: max tables allowed in cache */ ulint pct_check); /*!< in: max percent to check */ -#define BIG_ROW_SIZE 1024 +/** Clears the virtual column's index list before index is being freed. +@param[in] index Index being freed */ +void dict_index_remove_from_v_col_list(dict_index_t* index); /** Adds an index to the dictionary cache, with possible indexing newly added column. @@ -1771,16 +1771,6 @@ dict_table_is_corrupted( const dict_table_t* table) /*!< in: table */ MY_ATTRIBUTE((nonnull, warn_unused_result)); -/**********************************************************************//** -Check whether the index is corrupted. -@return nonzero for corrupted index, zero for valid indexes */ -UNIV_INLINE -ulint -dict_index_is_corrupted( -/*====================*/ - const dict_index_t* index) /*!< in: index */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); - /**********************************************************************//** Flags an index and table corrupted both in the data dictionary cache and in the system table SYS_INDEXES. */ @@ -1844,18 +1834,6 @@ dict_tf2_is_valid( ulint flags, ulint flags2); -/********************************************************************//** -Check if the tablespace for the table has been discarded. -@return true if the tablespace has been discarded. */ -UNIV_INLINE -bool -dict_table_is_discarded( -/*====================*/ - const dict_table_t* table) /*!< in: table to check */ - MY_ATTRIBUTE((warn_unused_result)); - -#define dict_table_is_temporary(table) (table)->is_temporary() - /*********************************************************************//** This function should be called whenever a page is successfully compressed. Updates the compression padding information. */ @@ -1896,7 +1874,6 @@ dict_index_node_ptr_max_size( /*=========================*/ const dict_index_t* index) /*!< in: index */ MY_ATTRIBUTE((warn_unused_result)); -#define dict_col_is_virtual(col) (col)->is_virtual() /** encode number of columns and number of virtual columns in one 4 bytes value. We could do this because the number of columns in diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic index 01710a0e93a..4c6c45a3b1e 100644 --- a/storage/innobase/include/dict0dict.ic +++ b/storage/innobase/include/dict0dict.ic @@ -311,7 +311,7 @@ dict_index_is_spatial( ut_ad(index); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - return(index->type & DICT_SPATIAL); + return ulint(UNIV_EXPECT(index->type & DICT_SPATIAL, 0)); } /** Check whether the index contains a virtual column @@ -372,7 +372,7 @@ dict_table_get_n_user_cols( /* n_cols counts stored columns only. A table may contain virtual columns and no user-specified stored columns at all. */ ut_ad(table->n_cols >= DATA_N_SYS_COLS); - return(table->n_cols - DATA_N_SYS_COLS); + return unsigned(table->n_cols) - DATA_N_SYS_COLS; } /********************************************************************//** @@ -519,13 +519,8 @@ dict_table_get_sys_col( ulint sys) /*!< in: DATA_ROW_ID, ... */ { dict_col_t* col; - - ut_ad(table); - ut_ad(sys < DATA_N_SYS_COLS); - ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); - - col = dict_table_get_nth_col(table, table->n_cols - DATA_N_SYS_COLS - + sys); + col = dict_table_get_nth_col(table, + dict_table_get_sys_col_no(table, sys)); ut_ad(col->mtype == DATA_SYS); ut_ad(col->prtype == (sys | DATA_NOT_NULL)); @@ -547,7 +542,7 @@ dict_table_get_sys_col_no( ut_ad(sys < DATA_N_SYS_COLS); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); - return(table->n_cols - DATA_N_SYS_COLS + sys); + return unsigned(table->n_cols) + (sys - DATA_N_SYS_COLS); } /********************************************************************//** @@ -560,11 +555,6 @@ dict_table_is_comp( const dict_table_t* table) /*!< in: table */ { ut_ad(table); - -#if DICT_TF_COMPACT != 1 -#error "DICT_TF_COMPACT must be 1" -#endif - return (table->flags & DICT_TF_COMPACT) != 0; } @@ -600,8 +590,8 @@ dict_tf_is_valid_not_redundant(ulint flags) for the uncompressed page format */ return(false); } else if (zip_ssize > PAGE_ZIP_SSIZE_MAX - || zip_ssize > UNIV_PAGE_SIZE_SHIFT - || UNIV_PAGE_SIZE_SHIFT > UNIV_ZIP_SIZE_SHIFT_MAX) { + || zip_ssize > srv_page_size_shift + || srv_page_size_shift > UNIV_ZIP_SIZE_SHIFT_MAX) { /* KEY_BLOCK_SIZE is out of bounds, or ROW_FORMAT=COMPRESSED is not supported with this innodb_page_size (only up to 16KiB) */ @@ -703,8 +693,7 @@ dict_tf_get_rec_format( @param[in] zip_ssize Zip Shift Size @param[in] use_data_dir Table uses DATA DIRECTORY @param[in] page_compressed Table uses page compression -@param[in] page_compression_level Page compression level -@param[in] not_used For future */ +@param[in] page_compression_level Page compression level */ UNIV_INLINE void dict_tf_set( @@ -714,8 +703,7 @@ dict_tf_set( ulint zip_ssize, bool use_data_dir, bool page_compressed, - ulint page_compression_level, - ulint not_used) + ulint page_compression_level) { switch (format) { case REC_FORMAT_REDUNDANT: @@ -857,7 +845,7 @@ dict_tf_get_page_size( ut_ad(zip_size <= UNIV_ZIP_SIZE_MAX); - return(page_size_t(zip_size, univ_page_size.logical(), true)); + return(page_size_t(zip_size, srv_page_size, true)); } /*********************************************************************//** @@ -1187,7 +1175,7 @@ ulint dict_index_get_space_reserve(void) /*==============================*/ { - return(UNIV_PAGE_SIZE / 16); + return(srv_page_size / 16); } /********************************************************************//** @@ -1402,33 +1390,6 @@ dict_table_is_corrupted( return(table->corrupted); } -/********************************************************************//** -Check whether the index is corrupted. -@return nonzero for corrupted index, zero for valid indexes */ -UNIV_INLINE -ulint -dict_index_is_corrupted( -/*====================*/ - const dict_index_t* index) /*!< in: index */ -{ - ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - - return((index->type & DICT_CORRUPT) - || (index->table && index->table->corrupted)); -} - -/********************************************************************//** -Check if the tablespace for the table has been discarded. -@return true if the tablespace has been discarded. */ -UNIV_INLINE -bool -dict_table_is_discarded( -/*====================*/ - const dict_table_t* table) /*!< in: table to check */ -{ - return(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_DISCARDED)); -} - /** Check if the table is found is a file_per_table tablespace. This test does not use table flags2 since some REDUNDANT tables in the system tablespace may have garbage in the MIX_LEN field where flags2 is @@ -1450,16 +1411,8 @@ bool dict_table_is_file_per_table( const dict_table_t* table) /*!< in: table to check */ { - bool is_file_per_table = table->space != fil_system.sys_space + return table->space != fil_system.sys_space && table->space != fil_system.temp_space; - - /* If the table is file-per-table and it is not redundant, then - it should have the flags2 bit for DICT_TF2_USE_FILE_PER_TABLE. */ - ut_ad(!is_file_per_table - || !DICT_TF_GET_COMPACT(table->flags) - || DICT_TF2_FLAG_IS_SET(table, DICT_TF2_USE_FILE_PER_TABLE)); - - return(is_file_per_table); } /** Get reference count. diff --git a/storage/innobase/include/dict0load.h b/storage/innobase/include/dict0load.h index 8256ebb24cd..9b798353afd 100644 --- a/storage/innobase/include/dict0load.h +++ b/storage/innobase/include/dict0load.h @@ -233,7 +233,6 @@ information @return error message, or NULL on success */ const char* dict_process_sys_virtual_rec( - mem_heap_t* heap, const rec_t* rec, table_id_t* table_id, ulint* pos, diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index 6b3c0282c7b..6c116b9a428 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -109,7 +109,7 @@ are described in fsp0fsp.h. */ /** dict_table_t::flags bit 0 is equal to 0 if the row format = Redundant */ #define DICT_TF_REDUNDANT 0 /*!< Redundant row format. */ /** dict_table_t::flags bit 0 is equal to 1 if the row format = Compact */ -#define DICT_TF_COMPACT 1 /*!< Compact row format. */ +#define DICT_TF_COMPACT 1U /*!< Compact row format. */ /** This bitmask is used in SYS_TABLES.N_COLS to set and test whether the Compact page format is used, i.e ROW_FORMAT != REDUNDANT */ @@ -866,8 +866,8 @@ struct dict_index_t{ in a clustered index record, if the fields before it are known to be of a fixed size, 0 otherwise */ -#if (1< 0); DBUG_ASSERT(n_prefix <= n_fields); unsigned n = n_nullable; for (; n_prefix < n_fields; n_prefix++) { const dict_col_t* col = fields[n_prefix].col; - DBUG_ASSERT(is_dummy || col->is_instant()); DBUG_ASSERT(!col->is_virtual()); n -= col->is_nullable(); } @@ -1093,7 +1094,7 @@ struct dict_index_t{ fields[i].col->remove_instant(); } n_core_fields = n_fields; - n_core_null_bytes = UT_BITS_IN_BYTES(n_nullable); + n_core_null_bytes = UT_BITS_IN_BYTES(unsigned(n_nullable)); } /** Check if record in clustered index is historical row. @@ -1453,7 +1454,7 @@ struct dict_table_t { /** @return whether the table supports transactions */ bool no_rollback() const { - return !(~flags & DICT_TF_MASK_NO_ROLLBACK); + return !(~unsigned(flags) & DICT_TF_MASK_NO_ROLLBACK); } /** @return whether this is a temporary table */ bool is_temporary() const @@ -1514,7 +1515,7 @@ struct dict_table_t { void inc_fk_checks() { #ifdef UNIV_DEBUG - lint fk_checks= + lint fk_checks= (lint) #endif my_atomic_addlint(&n_foreign_key_checks_running, 1); ut_ad(fk_checks >= 0); @@ -1522,9 +1523,9 @@ struct dict_table_t { void dec_fk_checks() { #ifdef UNIV_DEBUG - lint fk_checks= + lint fk_checks= (lint) #endif - my_atomic_addlint(&n_foreign_key_checks_running, -1); + my_atomic_addlint(&n_foreign_key_checks_running, ulint(-1)); ut_ad(fk_checks > 0); } @@ -1920,10 +1921,7 @@ inline void dict_index_t::set_modified(mtr_t& mtr) const mtr.set_named_space(table->space); } -inline bool dict_index_t::is_readable() const -{ - return(UNIV_LIKELY(!table->file_unreadable)); -} +inline bool dict_index_t::is_readable() const { return table->is_readable(); } inline bool dict_index_t::is_instant() const { @@ -1936,6 +1934,13 @@ inline bool dict_index_t::is_instant() const return(n_core_fields != n_fields); } +inline bool dict_index_t::is_corrupted() const +{ + return UNIV_UNLIKELY(online_status >= ONLINE_INDEX_ABORTED + || (type & DICT_CORRUPT) + || (table && table->corrupted)); +} + /*******************************************************************//** Initialise the table lock list. */ void diff --git a/storage/innobase/include/dict0stats_bg.h b/storage/innobase/include/dict0stats_bg.h index e66666b66a3..d66afdd4b25 100644 --- a/storage/innobase/include/dict0stats_bg.h +++ b/storage/innobase/include/dict0stats_bg.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2012, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -116,16 +116,9 @@ dict_stats_thread_deinit(); #ifdef UNIV_DEBUG /** Disables dict stats thread. It's used by: SET GLOBAL innodb_dict_stats_disabled_debug = 1 (0). -@param[in] thd thread handle -@param[in] var pointer to system variable -@param[out] var_ptr where the formal string goes @param[in] save immediate result from check function */ -void -dict_stats_disabled_debug_update( - THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save); +void dict_stats_disabled_debug_update(THD*, st_mysql_sys_var*, void*, + const void* save); #endif /* UNIV_DEBUG */ /*****************************************************************//** diff --git a/storage/innobase/include/dict0types.h b/storage/innobase/include/dict0types.h index f9ecf9b341d..f2fcae69bd5 100644 --- a/storage/innobase/include/dict0types.h +++ b/storage/innobase/include/dict0types.h @@ -28,6 +28,7 @@ Created 1/8/1996 Heikki Tuuri #define dict0types_h #include +#include struct dict_sys_t; struct dict_col_t; @@ -114,7 +115,7 @@ struct table_name_t } /** @return the length of the schema name, in bytes */ - size_t dblen() const { return dbend() - m_name; } + size_t dblen() const { return size_t(dbend() - m_name); } /** Determine the filename-safe encoded table name. @return the filename-safe encoded table name */ diff --git a/storage/innobase/include/dyn0buf.h b/storage/innobase/include/dyn0buf.h index 3126c8e4683..4b6c808b47c 100644 --- a/storage/innobase/include/dyn0buf.h +++ b/storage/innobase/include/dyn0buf.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2013, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -32,14 +33,13 @@ Created 2013-03-16 Sunny Bains #include "dyn0types.h" /** Class that manages dynamic buffers. It uses a UT_LIST of -dyn_buf_t::block_t instances. We don't use STL containers in +mtr_buf_t::block_t instances. We don't use STL containers in order to avoid the overhead of heap calls. Using a custom memory allocator doesn't solve the problem either because we have to get the memory from somewhere. We can't use the block_t::m_data as the backend for the custom allocator because we would like the data in the blocks to be contiguous. */ -template -class dyn_buf_t { +class mtr_buf_t { public: class block_t; @@ -47,17 +47,19 @@ public: typedef UT_LIST_NODE_T(block_t) block_node_t; typedef UT_LIST_BASE_NODE_T(block_t) block_list_t; + /** SIZE - sizeof(m_node) + sizeof(m_used) */ + enum { MAX_DATA_SIZE = DYN_ARRAY_DATA_SIZE + - sizeof(block_node_t) + sizeof(ib_uint32_t) }; + class block_t { public: block_t() { - ut_ad(MAX_DATA_SIZE <= (2 << 15)); + compile_time_assert(MAX_DATA_SIZE <= (2 << 15)); init(); } - ~block_t() { } - /** Gets the number of used bytes in a block. @return number of bytes used */ @@ -112,12 +114,12 @@ public: /** @return pointer to start of reserved space */ template - Type push(ib_uint32_t size) + Type push(uint32_t size) { Type ptr = reinterpret_cast(end()); m_used += size; - ut_ad(m_used <= static_cast(MAX_DATA_SIZE)); + ut_ad(m_used <= uint32_t(MAX_DATA_SIZE)); return(ptr); } @@ -131,7 +133,7 @@ public: ut_ad(ptr <= begin() + m_buf_end); /* We have done the boundary check above */ - m_used = static_cast(ptr - begin()); + m_used = uint32_t(ptr - begin()); ut_ad(m_used <= MAX_DATA_SIZE); ut_d(m_buf_end = 0); @@ -154,13 +156,6 @@ public: ulint m_magic_n; #endif /* UNIV_DEBUG */ - /** SIZE - sizeof(m_node) + sizeof(m_used) */ - enum { - MAX_DATA_SIZE = SIZE - - sizeof(block_node_t) - + sizeof(ib_uint32_t) - }; - /** Storage */ byte m_data[MAX_DATA_SIZE]; @@ -169,15 +164,13 @@ public: /** number of data bytes used in this block; DYN_BLOCK_FULL_FLAG is set when the block becomes full */ - ib_uint32_t m_used; + uint32_t m_used; - friend class dyn_buf_t; + friend class mtr_buf_t; }; - enum { MAX_DATA_SIZE = block_t::MAX_DATA_SIZE}; - /** Default constructor */ - dyn_buf_t() + mtr_buf_t() : m_heap(), m_size() @@ -187,7 +180,7 @@ public: } /** Destructor */ - ~dyn_buf_t() + ~mtr_buf_t() { erase(); } @@ -252,7 +245,7 @@ public: @param size in bytes of the element @return pointer to the element */ template - Type push(ib_uint32_t size) + Type push(uint32_t size) { ut_ad(size > 0); ut_ad(size <= MAX_DATA_SIZE); @@ -272,17 +265,11 @@ public: Pushes n bytes. @param str string to write @param len string length */ - void push(const byte* ptr, ib_uint32_t len) + void push(const byte* ptr, uint32_t len) { while (len > 0) { - ib_uint32_t n_copied; - - if (len >= MAX_DATA_SIZE) { - n_copied = MAX_DATA_SIZE; - } else { - n_copied = len; - } - + uint32_t n_copied = std::min(len, + uint32_t(MAX_DATA_SIZE)); ::memmove(push(n_copied), ptr, n_copied); ptr += n_copied; @@ -298,7 +285,7 @@ public: const Type at(ulint pos) const { block_t* block = const_cast( - const_cast(this)->find(pos)); + const_cast(this)->find(pos)); return(reinterpret_cast(block->begin() + pos)); } @@ -391,8 +378,8 @@ public: private: // Disable copying - dyn_buf_t(const dyn_buf_t&); - dyn_buf_t& operator=(const dyn_buf_t&); + mtr_buf_t(const mtr_buf_t&); + mtr_buf_t& operator=(const mtr_buf_t&); /** Add the block to the end of the list*/ @@ -404,7 +391,7 @@ private: } /** @return the last block in the list */ - block_t* back() + block_t* back() const { return(UT_LIST_GET_LAST(m_list)); } @@ -484,8 +471,6 @@ private: block_t m_first_block; }; -typedef dyn_buf_t mtr_buf_t; - /** mtr_buf_t copier */ struct mtr_buf_copy_t { /** The copied buffer */ diff --git a/storage/innobase/include/fil0crypt.h b/storage/innobase/include/fil0crypt.h index 13b3ec4e37e..5238213135f 100644 --- a/storage/innobase/include/fil0crypt.h +++ b/storage/innobase/include/fil0crypt.h @@ -27,9 +27,9 @@ Created 04/01/2015 Jan Lindström #define fil0crypt_h #ifndef UNIV_INNOCHECKSUM - #include "os0event.h" #include "my_crypt.h" +#include "fil0fil.h" #endif /*! UNIV_INNOCHECKSUM */ /** @@ -296,7 +296,6 @@ fil_space_destroy_crypt_data( Parse a MLOG_FILE_WRITE_CRYPT_DATA log entry @param[in] ptr Log entry start @param[in] end_ptr Log entry end -@param[in] block buffer block @param[out] err DB_SUCCESS or DB_DECRYPTION_FAILED @return position on log buffer */ UNIV_INTERN @@ -304,7 +303,6 @@ byte* fil_parse_write_crypt_data( byte* ptr, const byte* end_ptr, - const buf_block_t* block, dberr_t* err) MY_ATTRIBUTE((warn_unused_result)); diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index e4d803370ae..8f4e9d10fd9 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -36,7 +36,7 @@ Created 10/25/1995 Heikki Tuuri #include "ibuf0types.h" // Forward declaration -extern ibool srv_use_doublewrite_buf; +extern my_bool srv_use_doublewrite_buf; extern struct buf_dblwr_t* buf_dblwr; struct trx_t; class page_id_t; @@ -82,7 +82,7 @@ struct fil_space_t { /*!< LSN of the most recent fil_names_write_if_was_clean(). Reset to 0 by fil_names_clear(). - Protected by log_sys->mutex. + Protected by log_sys.mutex. If and only if this is nonzero, the tablespace will be in named_spaces. */ bool stop_ios;/*!< true if we want to rename the @@ -144,14 +144,14 @@ struct fil_space_t { dropped. An example is change buffer merge. The tablespace cannot be dropped while this is nonzero, or while fil_node_t::n_pending is nonzero. - Protected by fil_system.mutex. */ + Protected by fil_system.mutex and my_atomic_loadlint() and friends. */ ulint n_pending_ops; /** Number of pending block read or write operations (when a write is imminent or a read has recently completed). The tablespace object cannot be freed while this is nonzero, but it can be detached from fil_system. Note that fil_node_t::n_pending tracks actual pending I/O requests. - Protected by fil_system.mutex. */ + Protected by fil_system.mutex and my_atomic_loadlint() and friends. */ ulint n_pending_ios; hash_node_t hash; /*!< hash chain node */ hash_node_t name_hash;/*!< hash chain the name_hash table */ @@ -245,6 +245,38 @@ struct fil_space_t { bool open(); /** Close each file. Only invoked on fil_system.temp_space. */ void close(); + + /** Acquire a tablespace reference. */ + void acquire() { my_atomic_addlint(&n_pending_ops, 1); } + /** Release a tablespace reference. */ + void release() + { + ut_ad(referenced()); + my_atomic_addlint(&n_pending_ops, ulint(-1)); + } + /** @return whether references are being held */ + bool referenced() { return my_atomic_loadlint(&n_pending_ops); } + /** @return whether references are being held */ + bool referenced() const + { + return const_cast(this)->referenced(); + } + + /** Acquire a tablespace reference for I/O. */ + void acquire_for_io() { my_atomic_addlint(&n_pending_ios, 1); } + /** Release a tablespace reference for I/O. */ + void release_for_io() + { + ut_ad(pending_io()); + my_atomic_addlint(&n_pending_ios, ulint(-1)); + } + /** @return whether I/O is pending */ + bool pending_io() { return my_atomic_loadlint(&n_pending_ios); } + /** @return whether I/O is pending */ + bool pending_io() const + { + return const_cast(this)->pending_io(); + } }; /** Value of fil_space_t::magic_n */ @@ -254,7 +286,7 @@ struct fil_space_t { struct fil_node_t { /** tablespace containing this file */ fil_space_t* space; - /** file name; protected by fil_system.mutex and log_sys->mutex. */ + /** file name; protected by fil_system.mutex and log_sys.mutex. */ char* name; /** file handle (valid if is_open) */ pfs_os_file_t handle; @@ -333,15 +365,15 @@ typedef byte fil_faddr_t; /*!< 'type' definition in C: an address #endif /* !UNIV_INNOCHECKSUM */ /** Initial size of a single-table tablespace in pages */ -#define FIL_IBD_FILE_INITIAL_SIZE 4 +#define FIL_IBD_FILE_INITIAL_SIZE 4U /** 'null' (undefined) page offset in the context of file spaces */ #define FIL_NULL ULINT32_UNDEFINED -#define FIL_ADDR_PAGE 0 /* first in address is the page offset */ -#define FIL_ADDR_BYTE 4 /* then comes 2-byte byte offset within page*/ -#define FIL_ADDR_SIZE 6 /* address size is 6 bytes */ +#define FIL_ADDR_PAGE 0U /* first in address is the page offset */ +#define FIL_ADDR_BYTE 4U /* then comes 2-byte byte offset within page*/ +#define FIL_ADDR_SIZE 6U /* address size is 6 bytes */ #ifndef UNIV_INNOCHECKSUM @@ -361,15 +393,15 @@ extern const fil_addr_t fil_addr_null; page belongs to (== 0) but in later versions the 'new' checksum of the page */ -#define FIL_PAGE_OFFSET 4 /*!< page offset inside space */ -#define FIL_PAGE_PREV 8 /*!< if there is a 'natural' +#define FIL_PAGE_OFFSET 4U /*!< page offset inside space */ +#define FIL_PAGE_PREV 8U /*!< if there is a 'natural' predecessor of the page, its offset. Otherwise FIL_NULL. This field is not set on BLOB pages, which are stored as a singly-linked list. See also FIL_PAGE_NEXT. */ -#define FIL_PAGE_NEXT 12 /*!< if there is a 'natural' successor +#define FIL_PAGE_NEXT 12U /*!< if there is a 'natural' successor of the page, its offset. Otherwise FIL_NULL. B-tree index pages @@ -379,9 +411,9 @@ extern const fil_addr_t fil_addr_null; FIL_PAGE_PREV and FIL_PAGE_NEXT in the collation order of the smallest user record on each page. */ -#define FIL_PAGE_LSN 16 /*!< lsn of the end of the newest +#define FIL_PAGE_LSN 16U /*!< lsn of the end of the newest modification log record to the page */ -#define FIL_PAGE_TYPE 24 /*!< file page type: FIL_PAGE_INDEX,..., +#define FIL_PAGE_TYPE 24U /*!< file page type: FIL_PAGE_INDEX,..., 2 bytes. The contents of this field can only @@ -396,7 +428,7 @@ extern const fil_addr_t fil_addr_null; MySQL/InnoDB 5.1.7 or later, the contents of this field is valid for all uncompressed pages. */ -#define FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION 26 /*!< for the first page +#define FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION 26U /*!< for the first page in a system tablespace data file (ibdata*, not *.ibd): the file has been flushed to disk at least up @@ -410,7 +442,7 @@ extern const fil_addr_t fil_addr_null; #define FIL_RTREE_SPLIT_SEQ_NUM FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION /** starting from 4.1.x this contains the space id of the page */ -#define FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID 34 +#define FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID 34U #define FIL_PAGE_SPACE_ID FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID @@ -514,7 +546,7 @@ The caller should hold an InnoDB table lock or a MDL that prevents the tablespace from being dropped during the operation, or the caller should be in single-threaded crash recovery mode (no user connections that could drop tablespaces). -If this is not the case, fil_space_acquire() and fil_space_release() +If this is not the case, fil_space_acquire() and fil_space_t::release() should be used instead. @param[in] id tablespace ID @return tablespace, or NULL if not found */ @@ -596,7 +628,7 @@ public: for which a MLOG_FILE_NAME record has been written since the latest redo log checkpoint. - Protected only by log_sys->mutex. */ + Protected only by log_sys.mutex. */ UT_LIST_BASE_NODE_T(fil_space_t) rotation_list; /*!< list of all file spaces needing key rotation.*/ @@ -792,11 +824,6 @@ fil_space_acquire_silent(ulint id) return (fil_space_acquire_low(id, true)); } -/** Release a tablespace acquired with fil_space_acquire(). -@param[in,out] space tablespace to release */ -void -fil_space_release(fil_space_t* space); - /** Acquire a tablespace for reading or writing a block, when it could be dropped concurrently. @param[in] id tablespace ID @@ -805,14 +832,9 @@ when it could be dropped concurrently. fil_space_t* fil_space_acquire_for_io(ulint id); -/** Release a tablespace acquired with fil_space_acquire_for_io(). -@param[in,out] space tablespace to release */ -void -fil_space_release_for_io(fil_space_t* space); - /** Return the next fil_space_t. Once started, the caller must keep calling this until it returns NULL. -fil_space_acquire() and fil_space_release() are invoked here which +fil_space_acquire() and fil_space_t::release() are invoked here which blocks a concurrent operation from dropping the tablespace. @param[in,out] prev_space Pointer to the previous fil_space_t. If NULL, use the first fil_space_t on fil_system.space_list. @@ -825,7 +847,7 @@ fil_space_next( /** Return the next fil_space_t from key rotation list. Once started, the caller must keep calling this until it returns NULL. -fil_space_acquire() and fil_space_release() are invoked here which +fil_space_acquire() and fil_space_t::release() are invoked here which blocks a concurrent operation from dropping the tablespace. @param[in,out] prev_space Pointer to the previous fil_space_t. If NULL, use the first fil_space_t on fil_system.space_list. @@ -1304,8 +1326,8 @@ fil_names_write_if_was_clean( } const bool was_clean = space->max_lsn == 0; - ut_ad(space->max_lsn <= log_sys->lsn); - space->max_lsn = log_sys->lsn; + ut_ad(space->max_lsn <= log_sys.lsn); + space->max_lsn = log_sys.lsn; if (was_clean) { fil_names_dirty_and_write(space, mtr); diff --git a/storage/innobase/include/fsp0file.h b/storage/innobase/include/fsp0file.h index 68e9f687fcd..794d44373e8 100644 --- a/storage/innobase/include/fsp0file.h +++ b/storage/innobase/include/fsp0file.h @@ -417,7 +417,8 @@ private: /** Flags to use for opening the data file */ os_file_create_t m_open_flags; - /** size in database pages */ + /** size in megabytes or pages; converted from megabytes to + pages in SysTablespace::normalize_size() */ ulint m_size; /** ordinal position of this datafile in the tablespace */ @@ -480,7 +481,7 @@ public: /* No op - base constructor is called. */ } - RemoteDatafile(const char* name, ulint size, ulint order) + RemoteDatafile(const char*, ulint, ulint) : m_link_filepath() { @@ -502,12 +503,6 @@ public: return(m_link_filepath); } - /** Set the link filepath. Use default datadir, the base name of - the path provided without its suffix, plus DOT_ISL. - @param[in] path filepath which contains a basename to use. - If NULL, use m_name as the basename. */ - void set_link_filepath(const char* path); - /** Create a link filename based on the contents of m_name, open that file, and read the contents into m_filepath. @retval DB_SUCCESS if remote linked tablespace file is opened and read. diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h index 888020c8f85..161d7a30ea4 100644 --- a/storage/innobase/include/fsp0fsp.h +++ b/storage/innobase/include/fsp0fsp.h @@ -45,8 +45,8 @@ Created 12/18/1995 Heikki Tuuri /** @return the PAGE_SSIZE flags for the current innodb_page_size */ #define FSP_FLAGS_PAGE_SSIZE() \ - ((UNIV_PAGE_SIZE == UNIV_PAGE_SIZE_ORIG) ? \ - 0 : (UNIV_PAGE_SIZE_SHIFT - UNIV_ZIP_SIZE_SHIFT_MIN + 1) \ + ((srv_page_size == UNIV_PAGE_SIZE_ORIG) ? \ + 0U : (srv_page_size_shift - UNIV_ZIP_SIZE_SHIFT_MIN + 1) \ << FSP_FLAGS_POS_PAGE_SSIZE) /* @defgroup Compatibility macros for MariaDB 10.1.0 through 10.1.20; @@ -318,7 +318,7 @@ UNIV_INLINE ulint fsp_get_extent_size_in_pages(const page_size_t& page_size) { - return(FSP_EXTENT_SIZE * UNIV_PAGE_SIZE / page_size.physical()); + return (FSP_EXTENT_SIZE << srv_page_size_shift) / page_size.physical(); } /**********************************************************************//** diff --git a/storage/innobase/include/fsp0fsp.ic b/storage/innobase/include/fsp0fsp.ic index 2da3320eef7..38d890fd2f3 100644 --- a/storage/innobase/include/fsp0fsp.ic +++ b/storage/innobase/include/fsp0fsp.ic @@ -92,21 +92,15 @@ xdes_calc_descriptor_page( const page_size_t& page_size, ulint offset) { -#ifndef DOXYGEN /* Doxygen gets confused by these */ -# if UNIV_PAGE_SIZE_MAX <= XDES_ARR_OFFSET \ - + (UNIV_PAGE_SIZE_MAX / FSP_EXTENT_SIZE_MAX) \ - * XDES_SIZE_MAX -# error -# endif -# if UNIV_ZIP_SIZE_MIN <= XDES_ARR_OFFSET \ - + (UNIV_ZIP_SIZE_MIN / FSP_EXTENT_SIZE_MIN) \ - * XDES_SIZE_MIN -# error -# endif -#endif /* !DOXYGEN */ + compile_time_assert(UNIV_PAGE_SIZE_MAX > XDES_ARR_OFFSET + + (UNIV_PAGE_SIZE_MAX / FSP_EXTENT_SIZE_MAX) + * XDES_SIZE_MAX); + compile_time_assert(UNIV_PAGE_SIZE_MIN > XDES_ARR_OFFSET + + (UNIV_PAGE_SIZE_MIN / FSP_EXTENT_SIZE_MIN) + * XDES_SIZE_MIN); - ut_ad(UNIV_PAGE_SIZE > XDES_ARR_OFFSET - + (UNIV_PAGE_SIZE / FSP_EXTENT_SIZE) + ut_ad(srv_page_size > XDES_ARR_OFFSET + + (srv_page_size / FSP_EXTENT_SIZE) * XDES_SIZE); ut_ad(UNIV_ZIP_SIZE_MIN > XDES_ARR_OFFSET + (UNIV_ZIP_SIZE_MIN / FSP_EXTENT_SIZE) diff --git a/storage/innobase/include/fsp0sysspace.h b/storage/innobase/include/fsp0sysspace.h index de83e64d285..80b006f2dd7 100644 --- a/storage/innobase/include/fsp0sysspace.h +++ b/storage/innobase/include/fsp0sysspace.h @@ -103,7 +103,7 @@ public: void shutdown(); /** Normalize the file size, convert to extents. */ - void normalize(); + void normalize_size(); /** @return true if a new raw device was created. */ @@ -139,8 +139,8 @@ public: @return the autoextend increment in pages. */ ulint get_autoextend_increment() const { - return(sys_tablespace_auto_extend_increment - * ((1024 * 1024) / UNIV_PAGE_SIZE)); + return sys_tablespace_auto_extend_increment + << (20 - srv_page_size_shift); } /** diff --git a/storage/innobase/include/fsp0types.h b/storage/innobase/include/fsp0types.h index 080311e010c..f7a5befa6ae 100644 --- a/storage/innobase/include/fsp0types.h +++ b/storage/innobase/include/fsp0types.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2017, MariaDB Corporation. +Copyright (c) 2014, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -59,11 +59,8 @@ page size | file space extent size 32 KiB | 64 pages = 2 MiB 64 KiB | 64 pages = 4 MiB */ -#define FSP_EXTENT_SIZE ((UNIV_PAGE_SIZE <= (16384) ? \ - (1048576 / UNIV_PAGE_SIZE) : \ - ((UNIV_PAGE_SIZE <= (32768)) ? \ - (2097152 / UNIV_PAGE_SIZE) : \ - (4194304 / UNIV_PAGE_SIZE)))) +#define FSP_EXTENT_SIZE (srv_page_size_shift < 14 ? \ + (1048576U >> srv_page_size_shift) : 64U) /** File space extent size (four megabyte) in pages for MAX page size */ #define FSP_EXTENT_SIZE_MAX (4194304 / UNIV_PAGE_SIZE_MAX) @@ -151,38 +148,38 @@ enum fsp_reserve_t { /* Number of pages described in a single descriptor page: currently each page description takes less than 1 byte; a descriptor page is repeated every this many file pages */ -/* #define XDES_DESCRIBED_PER_PAGE UNIV_PAGE_SIZE */ -/* This has been replaced with either UNIV_PAGE_SIZE or page_zip->size. */ +/* #define XDES_DESCRIBED_PER_PAGE srv_page_size */ +/* This has been replaced with either srv_page_size or page_zip->size. */ /** @name The space low address page map The pages at FSP_XDES_OFFSET and FSP_IBUF_BITMAP_OFFSET are repeated every XDES_DESCRIBED_PER_PAGE pages in every tablespace. */ /* @{ */ /*--------------------------------------*/ -#define FSP_XDES_OFFSET 0 /* !< extent descriptor */ -#define FSP_IBUF_BITMAP_OFFSET 1 /* !< insert buffer bitmap */ +#define FSP_XDES_OFFSET 0U /* !< extent descriptor */ +#define FSP_IBUF_BITMAP_OFFSET 1U /* !< insert buffer bitmap */ /* The ibuf bitmap pages are the ones whose page number is the number above plus a multiple of XDES_DESCRIBED_PER_PAGE */ -#define FSP_FIRST_INODE_PAGE_NO 2 /*!< in every tablespace */ +#define FSP_FIRST_INODE_PAGE_NO 2U /*!< in every tablespace */ /* The following pages exist in the system tablespace (space 0). */ -#define FSP_IBUF_HEADER_PAGE_NO 3 /*!< insert buffer +#define FSP_IBUF_HEADER_PAGE_NO 3U /*!< insert buffer header page, in tablespace 0 */ -#define FSP_IBUF_TREE_ROOT_PAGE_NO 4 /*!< insert buffer +#define FSP_IBUF_TREE_ROOT_PAGE_NO 4U /*!< insert buffer B-tree root page in tablespace 0 */ /* The ibuf tree root page number in tablespace 0; its fseg inode is on the page number FSP_FIRST_INODE_PAGE_NO */ -#define FSP_TRX_SYS_PAGE_NO 5 /*!< transaction +#define FSP_TRX_SYS_PAGE_NO 5U /*!< transaction system header, in tablespace 0 */ -#define FSP_FIRST_RSEG_PAGE_NO 6 /*!< first rollback segment +#define FSP_FIRST_RSEG_PAGE_NO 6U /*!< first rollback segment page, in tablespace 0 */ -#define FSP_DICT_HDR_PAGE_NO 7 /*!< data dictionary header +#define FSP_DICT_HDR_PAGE_NO 7U /*!< data dictionary header page, in tablespace 0 */ /*--------------------------------------*/ /* @} */ diff --git a/storage/innobase/include/fts0fts.h b/storage/innobase/include/fts0fts.h index 04bb26da7da..be2fe7e040b 100644 --- a/storage/innobase/include/fts0fts.h +++ b/storage/innobase/include/fts0fts.h @@ -561,7 +561,6 @@ fts_commit( MY_ATTRIBUTE((warn_unused_result)); /** FTS Query entry point. -@param[in] trx transaction @param[in] index fts index to search @param[in] flags FTS search mode @param[in] query_str FTS query @@ -570,7 +569,6 @@ fts_commit( @return DB_SUCCESS if successful otherwise error code */ dberr_t fts_query( - trx_t* trx, dict_index_t* index, uint flags, const byte* query_str, @@ -736,7 +734,6 @@ Take a FTS savepoint. */ void fts_savepoint_take( /*===============*/ - trx_t* trx, /*!< in: transaction */ fts_trx_t* fts_trx, /*!< in: fts transaction */ const char* name); /*!< in: savepoint name */ diff --git a/storage/innobase/include/fts0priv.h b/storage/innobase/include/fts0priv.h index f9d5d07a44c..aef6e816461 100644 --- a/storage/innobase/include/fts0priv.h +++ b/storage/innobase/include/fts0priv.h @@ -319,7 +319,6 @@ the dict mutex que_t* fts_parse_sql_no_dict_lock( /*=======================*/ - fts_table_t* fts_table, /*!< in: table with FTS index */ pars_info_t* info, /*!< in: parser info */ const char* sql) /*!< in: SQL string to evaluate */ MY_ATTRIBUTE((warn_unused_result)); diff --git a/storage/innobase/include/fts0tokenize.h b/storage/innobase/include/fts0tokenize.h index 15726aea1de..909d2ce07ba 100644 --- a/storage/innobase/include/fts0tokenize.h +++ b/storage/innobase/include/fts0tokenize.h @@ -144,7 +144,7 @@ fts_get_word( } } - info->prev = *doc; + info->prev = char(*doc); info->yesno = (FTB_YES == ' ') ? 1 : (info->quot != 0); info->weight_adjust = info->wasign = 0; } diff --git a/storage/innobase/include/fts0types.ic b/storage/innobase/include/fts0types.ic index a8712751412..487e7c33b63 100644 --- a/storage/innobase/include/fts0types.ic +++ b/storage/innobase/include/fts0types.ic @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -172,7 +172,6 @@ fts_select_index_by_hash( const byte* str, ulint len) { - int char_len; ulong nr1 = 1; ulong nr2 = 4; @@ -187,9 +186,9 @@ fts_select_index_by_hash( char_len = my_mbcharlen_ptr(cs, reinterpret_cast(str), reinterpret_cast(str + len)); */ - char_len = cs->cset->charlen(cs, str, str+len); + size_t char_len = size_t(cs->cset->charlen(cs, str, str + len)); - ut_ad(static_cast(char_len) <= len); + ut_ad(char_len <= len); /* Get collation hash code */ cs->coll->hash_sort(cs, str, char_len, &nr1, &nr2); diff --git a/storage/innobase/include/fut0fut.ic b/storage/innobase/include/fut0fut.ic index 6fe031876e6..56be971f233 100644 --- a/storage/innobase/include/fut0fut.ic +++ b/storage/innobase/include/fut0fut.ic @@ -48,7 +48,7 @@ fut_get_ptr( buf_block_t* block; byte* ptr = NULL; - ut_ad(addr.boffset < UNIV_PAGE_SIZE); + ut_ad(addr.boffset < srv_page_size); ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH) || (rw_latch == RW_SX_LATCH)); diff --git a/storage/innobase/include/fut0lst.ic b/storage/innobase/include/fut0lst.ic index 128dc77ed92..5c9a9ca94c1 100644 --- a/storage/innobase/include/fut0lst.ic +++ b/storage/innobase/include/fut0lst.ic @@ -58,7 +58,7 @@ flst_write_addr( MTR_MEMO_PAGE_X_FIX | MTR_MEMO_PAGE_SX_FIX)); ut_a(addr.page == FIL_NULL || addr.boffset >= FIL_PAGE_DATA); - ut_a(ut_align_offset(faddr, UNIV_PAGE_SIZE) >= FIL_PAGE_DATA); + ut_a(ut_align_offset(faddr, srv_page_size) >= FIL_PAGE_DATA); mlog_write_ulint(faddr + FIL_ADDR_PAGE, addr.page, MLOG_4BYTES, mtr); mlog_write_ulint(faddr + FIL_ADDR_BYTE, addr.boffset, @@ -83,7 +83,7 @@ flst_read_addr( addr.boffset = mtr_read_ulint(faddr + FIL_ADDR_BYTE, MLOG_2BYTES, mtr); ut_a(addr.page == FIL_NULL || addr.boffset >= FIL_PAGE_DATA); - ut_a(ut_align_offset(faddr, UNIV_PAGE_SIZE) >= FIL_PAGE_DATA); + ut_a(ut_align_offset(faddr, srv_page_size) >= FIL_PAGE_DATA); return(addr); } diff --git a/storage/innobase/include/gis0rtree.h b/storage/innobase/include/gis0rtree.h index 65a53ec1d39..461d2816653 100644 --- a/storage/innobase/include/gis0rtree.h +++ b/storage/innobase/include/gis0rtree.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -28,6 +28,7 @@ Created 2013/03/27 Jimmy Yang and Allen Lai #define gis0rtree_h #include "univ.i" +#include "my_base.h" #include "data0type.h" #include "data0types.h" @@ -87,10 +88,8 @@ rtr_index_build_node_ptr( pointer */ ulint page_no,/*!< in: page number to put in node pointer */ - mem_heap_t* heap, /*!< in: memory heap where pointer + mem_heap_t* heap); /*!< in: memory heap where pointer created */ - ulint level); /*!< in: level of rec in tree: - 0 means leaf level */ /*************************************************************//** Splits an R-tree index page to halves and inserts the tuple. It is assumed @@ -179,7 +178,6 @@ dberr_t rtr_ins_enlarge_mbr( /*=================*/ btr_cur_t* cursor, /*!< in: btr cursor */ - que_thr_t* thr, /*!< in: query thread */ mtr_t* mtr); /*!< in: mtr */ /********************************************************************//** @@ -438,9 +436,6 @@ rtr_merge_and_update_mbr( ulint* offsets, /*!< in: rec offsets */ ulint* offsets2, /*!< in: rec offsets */ page_t* child_page, /*!< in: the child page. */ - buf_block_t* merge_block, /*!< in: page to merge */ - buf_block_t* block, /*!< in: page be merged */ - dict_index_t* index, /*!< in: index */ mtr_t* mtr); /*!< in: mtr */ /*************************************************************//** @@ -448,10 +443,8 @@ Deletes on the upper level the node pointer to a page. */ void rtr_node_ptr_delete( /*================*/ - dict_index_t* index, /*!< in: index tree */ - btr_cur_t* sea_cur,/*!< in: search cursor, contains information + btr_cur_t* cursor, /*!< in: search cursor, contains information about parent nodes in search */ - buf_block_t* block, /*!< in: page whose node pointer is deleted */ mtr_t* mtr); /*!< in: mtr */ /****************************************************************//** @@ -463,10 +456,7 @@ rtr_merge_mbr_changed( btr_cur_t* cursor2, /*!< in: the other cursor */ ulint* offsets, /*!< in: rec offsets */ ulint* offsets2, /*!< in: rec offsets */ - rtr_mbr_t* new_mbr, /*!< out: MBR to update */ - buf_block_t* merge_block, /*!< in: page to merge */ - buf_block_t* block, /*!< in: page be merged */ - dict_index_t* index); /*!< in: index */ + rtr_mbr_t* new_mbr); /*!< out: MBR to update */ /**************************************************************//** @@ -543,7 +533,7 @@ rtr_info_reinit_in_cursor( @param[in] tuple range tuple containing mbr, may also be empty tuple @param[in] mode search mode @return estimated number of rows */ -int64_t +ha_rows rtr_estimate_n_rows_in_range( dict_index_t* index, const dtuple_t* tuple, diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h index 86defe9b166..1313705f119 100644 --- a/storage/innobase/include/ha_prototypes.h +++ b/storage/innobase/include/ha_prototypes.h @@ -70,13 +70,11 @@ innobase_invalidate_query_cache( /*============================*/ trx_t* trx, /*!< in: transaction which modifies the table */ - const char* full_name, /*!< in: concatenation of + const char* full_name); /*!< in: concatenation of database name, path separator, table name, null char NUL; NOTE that in Windows this is always in LOWER CASE! */ - ulint full_name_len); /*!< in: full name length where - also the null chars count */ /** Quote a standard SQL identifier like tablespace, index or column name. @param[in] file output stream @@ -158,7 +156,6 @@ UNIV_INTERN void innobase_mysql_log_notify( /*======================*/ - ib_uint64_t write_lsn, /*!< in: LSN written to log file */ ib_uint64_t flush_lsn); /*!< in: LSN flushed to disk */ /** Converts a MySQL type to an InnoDB type. Note that this function returns @@ -240,7 +237,7 @@ wsrep_innobase_kill_one_trx(void * const thd_ptr, const trx_t * const bf_trx, trx_t *victim_trx, ibool signal); -int wsrep_innobase_mysql_sort(int mysql_type, uint charset_number, +ulint wsrep_innobase_mysql_sort(int mysql_type, uint charset_number, unsigned char* str, unsigned int str_length, unsigned int buf_length); #endif /* WITH_WSREP */ @@ -309,14 +306,6 @@ thd_lock_wait_timeout( /*==================*/ THD* thd); /*!< in: thread handle, or NULL to query the global innodb_lock_wait_timeout */ -/******************************************************************//** -Add up the time waited for the lock for the current query. */ -void -thd_set_lock_wait_time( -/*===================*/ - THD* thd, /*!< in/out: thread handle */ - ulint value); /*!< in: time waited for the lock */ - /** Get status of innodb_tmpdir. @param[in] thd thread handle, or NULL to query the global innodb_tmpdir. @@ -453,14 +442,6 @@ const char* server_get_hostname(); /*=================*/ -/******************************************************************//** -Get the error message format string. -@return the format string or 0 if not found. */ -const char* -innobase_get_err_msg( -/*=================*/ - int error_code); /*!< in: MySQL error code */ - /*********************************************************************//** Compute the next autoinc value. @@ -533,7 +514,7 @@ UNIV_INTERN void ib_push_warning( trx_t* trx, /*!< in: trx */ - ulint error, /*!< in: error code to push as warning */ + dberr_t error, /*!< in: error code to push as warning */ const char *format,/*!< in: warning message */ ...); @@ -543,7 +524,7 @@ UNIV_INTERN void ib_push_warning( void* ithd, /*!< in: thd */ - ulint error, /*!< in: error code to push as warning */ + dberr_t error, /*!< in: error code to push as warning */ const char *format,/*!< in: warning message */ ...); diff --git a/storage/innobase/include/ib0mutex.h b/storage/innobase/include/ib0mutex.h index d75893ec4d4..eaf391be09b 100644 --- a/storage/innobase/include/ib0mutex.h +++ b/storage/innobase/include/ib0mutex.h @@ -53,15 +53,8 @@ struct OSTrackMutex { ut_ad(!m_destroy_at_exit || !m_locked); } - /** Initialise the mutex. - @param[in] id Mutex ID - @param[in] filename File where mutex was created - @param[in] line Line in filename */ - void init( - latch_id_t id, - const char* filename, - uint32_t line) - UNIV_NOTHROW + /** Initialise the mutex. */ + void init(latch_id_t, const char*, uint32_t) UNIV_NOTHROW { ut_ad(m_freed); ut_ad(!m_locked); @@ -92,16 +85,8 @@ struct OSTrackMutex { m_mutex.exit(); } - /** Acquire the mutex. - @param[in] max_spins max number of spins - @param[in] max_delay max delay per spin - @param[in] filename from where called - @param[in] line within filename */ - void enter( - uint32_t max_spins, - uint32_t max_delay, - const char* filename, - uint32_t line) + /** Acquire the mutex. */ + void enter(uint32_t, uint32_t, const char*, uint32_t) UNIV_NOTHROW { ut_ad(!m_freed); @@ -186,15 +171,8 @@ struct TTASFutexMutex { } /** Called when the mutex is "created". Note: Not from the constructor - but when the mutex is initialised. - @param[in] id Mutex ID - @param[in] filename File where mutex was created - @param[in] line Line in filename */ - void init( - latch_id_t id, - const char* filename, - uint32_t line) - UNIV_NOTHROW + but when the mutex is initialised. */ + void init(latch_id_t, const char*, uint32_t) UNIV_NOTHROW { ut_a(m_lock_word == MUTEX_STATE_UNLOCKED); } @@ -208,14 +186,9 @@ struct TTASFutexMutex { /** Acquire the mutex. @param[in] max_spins max number of spins - @param[in] max_delay max delay per spin - @param[in] filename from where called - @param[in] line within filename */ - void enter( - uint32_t max_spins, - uint32_t max_delay, - const char* filename, - uint32_t line) UNIV_NOTHROW + @param[in] max_delay max delay per spin */ + void enter(uint32_t max_spins, uint32_t max_delay, + const char*, uint32_t) UNIV_NOTHROW { uint32_t n_spins, n_waits; @@ -308,15 +281,8 @@ struct TTASMutex { } /** Called when the mutex is "created". Note: Not from the constructor - but when the mutex is initialised. - @param[in] id Mutex ID - @param[in] filename File where mutex was created - @param[in] line Line in filename */ - void init( - latch_id_t id, - const char* filename, - uint32_t line) - UNIV_NOTHROW + but when the mutex is initialised. */ + void init(latch_id_t) UNIV_NOTHROW { ut_ad(m_lock_word == MUTEX_STATE_UNLOCKED); } @@ -349,14 +315,9 @@ struct TTASMutex { /** Acquire the mutex. @param max_spins max number of spins - @param max_delay max delay per spin - @param filename from where called - @param line within filename */ - void enter( - uint32_t max_spins, - uint32_t max_delay, - const char* filename, - uint32_t line) UNIV_NOTHROW + @param max_delay max delay per spin */ + void enter(uint32_t max_spins, uint32_t max_delay, + const char*, uint32_t) UNIV_NOTHROW { const uint32_t step = max_spins; uint32_t n_spins = 0; @@ -420,14 +381,8 @@ struct TTASEventMutex { /** Called when the mutex is "created". Note: Not from the constructor but when the mutex is initialised. - @param[in] id Mutex ID - @param[in] filename File where mutex was created - @param[in] line Line in filename */ - void init( - latch_id_t id, - const char* filename, - uint32_t line) - UNIV_NOTHROW + @param[in] id Mutex ID */ + void init(latch_id_t id, const char*, uint32_t) UNIV_NOTHROW { ut_a(m_event == 0); ut_a(m_lock_word == MUTEX_STATE_UNLOCKED); diff --git a/storage/innobase/include/ibuf0ibuf.h b/storage/innobase/include/ibuf0ibuf.h index 446000a39de..b5e3d1eddf7 100644 --- a/storage/innobase/include/ibuf0ibuf.h +++ b/storage/innobase/include/ibuf0ibuf.h @@ -49,19 +49,16 @@ typedef enum { IBUF_OP_COUNT = 3 } ibuf_op_t; -/** Combinations of operations that can be buffered. Because the enum -values are used for indexing innobase_change_buffering_values[], they -should start at 0 and there should not be any gaps. */ -typedef enum { +/** Combinations of operations that can be buffered. +@see innodb_change_buffering_names */ +enum ibuf_use_t { IBUF_USE_NONE = 0, IBUF_USE_INSERT, /* insert */ IBUF_USE_DELETE_MARK, /* delete */ IBUF_USE_INSERT_DELETE_MARK, /* insert+delete */ IBUF_USE_DELETE, /* delete+purge */ - IBUF_USE_ALL, /* insert+delete+purge */ - - IBUF_USE_COUNT /* number of entries in ibuf_use_t */ -} ibuf_use_t; + IBUF_USE_ALL /* insert+delete+purge */ +}; /** Operations that can currently be buffered. */ extern ibuf_use_t ibuf_use; diff --git a/storage/innobase/include/ibuf0ibuf.ic b/storage/innobase/include/ibuf0ibuf.ic index 09070c14059..f91ae5aee4a 100644 --- a/storage/innobase/include/ibuf0ibuf.ic +++ b/storage/innobase/include/ibuf0ibuf.ic @@ -28,7 +28,7 @@ Created 7/19/1997 Heikki Tuuri #include "fsp0types.h" #include "buf0lru.h" -/** An index page must contain at least UNIV_PAGE_SIZE / +/** An index page must contain at least srv_page_size / IBUF_PAGE_SIZE_PER_FREE_SPACE bytes of free space for ibuf to try to buffer inserts to this page. If there is this much of free space, the corresponding bits are set in the ibuf bitmap. */ @@ -314,9 +314,7 @@ ibuf_update_free_bits_if_full( block->page.size.physical(), max_ins_size); if (max_ins_size >= increase) { -#if ULINT32_UNDEFINED <= UNIV_PAGE_SIZE_MAX -# error "ULINT32_UNDEFINED <= UNIV_PAGE_SIZE_MAX" -#endif + compile_time_assert(ULINT32_UNDEFINED > UNIV_PAGE_SIZE_MAX); after = ibuf_index_page_calc_free_bits( block->page.size.physical(), max_ins_size - increase); #ifdef UNIV_IBUF_DEBUG diff --git a/storage/innobase/include/lock0prdt.h b/storage/innobase/include/lock0prdt.h index e4e37776d22..9eb38ff8975 100644 --- a/storage/innobase/include/lock0prdt.h +++ b/storage/innobase/include/lock0prdt.h @@ -51,9 +51,8 @@ lock_prdt_lock( SELECT FOR UPDATE */ ulint type_mode, /*!< in: LOCK_PREDICATE or LOCK_PRDT_PAGE */ - que_thr_t* thr, /*!< in: query thread + que_thr_t* thr); /*!< in: query thread (can be NULL if BTR_NO_LOCKING_FLAG) */ - mtr_t* mtr); /*!< in/out: mini-transaction */ /*********************************************************************//** Acquire a "Page" lock on a block @@ -107,7 +106,6 @@ Update predicate lock when page splits */ void lock_prdt_update_split( /*===================*/ - buf_block_t* block, /*!< in/out: page to be split */ buf_block_t* new_block, /*!< in/out: the new half page */ lock_prdt_t* prdt, /*!< in: MBR on the old page */ lock_prdt_t* new_prdt, /*!< in: MBR on the new page */ @@ -123,7 +121,6 @@ lock_prdt_update_parent( buf_block_t* right_block, /*!< in/out: the new half page */ lock_prdt_t* left_prdt, /*!< in: MBR on the old page */ lock_prdt_t* right_prdt, /*!< in: MBR on the new page */ - lock_prdt_t* parent_prdt, /*!< in: original parent MBR */ ulint space, /*!< in: space id */ ulint page_no); /*!< in: page number */ diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h index 716ca34b928..046037f9241 100644 --- a/storage/innobase/include/log0log.h +++ b/storage/innobase/include/log0log.h @@ -41,8 +41,8 @@ Created 12/9/1995 Heikki Tuuri #include "os0event.h" #include "os0file.h" -/** Redo log group */ -struct log_group_t; +/** Maximum number of srv_n_log_files, or innodb_log_files_in_group */ +#define SRV_N_LOG_FILES_MAX 100 /** Magic value to use instead of log checksums when they are disabled */ #define LOG_NO_CHECKSUM_MAGIC 0xDEADBEEFUL @@ -50,13 +50,13 @@ struct log_group_t; /* Margin for the free space in the smallest log group, before a new query step which modifies the database, is started */ -#define LOG_CHECKPOINT_FREE_PER_THREAD (4 * UNIV_PAGE_SIZE) -#define LOG_CHECKPOINT_EXTRA_FREE (8 * UNIV_PAGE_SIZE) +#define LOG_CHECKPOINT_FREE_PER_THREAD (4U << srv_page_size_shift) +#define LOG_CHECKPOINT_EXTRA_FREE (8U << srv_page_size_shift) typedef ulint (*log_checksum_func_t)(const byte* log_block); /** Pointer to the log checksum calculation function. Protected with -log_sys->mutex. */ +log_sys.mutex. */ extern log_checksum_func_t log_checksum_algorithm_ptr; /** Append a string to the log. @@ -82,9 +82,7 @@ log_free_check(void); /** Extends the log buffer. @param[in] len requested minimum size in bytes */ -void -log_buffer_extend( - ulint len); +void log_buffer_extend(ulong len); /** Check margin not to overwrite transaction log from the last checkpoint. If would estimate the log write to exceed the log_group_capacity, @@ -138,7 +136,7 @@ log_get_flush_lsn(void); /*=============*/ /**************************************************************** Gets the log group capacity. It is OK to read the value without -holding log_sys->mutex because it is constant. +holding log_sys.mutex because it is constant. @return log group capacity */ UNIV_INLINE lsn_t @@ -152,14 +150,7 @@ UNIV_INLINE lsn_t log_get_max_modified_age_async(void); /*================================*/ -/** Initializes the redo logging subsystem. */ -void -log_sys_init(); -/** Initialize the redo log. -@param[in] n_files number of files */ -void -log_init(ulint n_files); /** Calculate the recommended highest values for lsn - last_checkpoint_lsn and lsn - buf_get_oldest_modification(). @param[in] file_size requested innodb_log_file_size @@ -171,12 +162,6 @@ log_set_capacity(ulonglong file_size) MY_ATTRIBUTE((warn_unused_result)); /******************************************************//** -Completes an i/o to a log file. */ -void -log_io_complete( -/*============*/ - log_group_t* group); /*!< in: log group */ -/******************************************************//** This function is called, e.g., when a transaction wants to commit. It checks that the log has been written to the log file up to the last log entry written by the transaction. If there is a flush running, it waits and checks if the @@ -235,13 +220,9 @@ shutdown. This function also writes all log in log files to the log archive. */ void logs_empty_and_mark_files_at_shutdown(void); /*=======================================*/ -/** Read a log group header page to log_sys->checkpoint_buf. -@param[in] group log group -@param[in] header 0 or LOG_CHEKCPOINT_1 or LOG_CHECKPOINT2 */ -void -log_group_header_read( - const log_group_t* group, - ulint header); +/** Read a log group header page to log_sys.checkpoint_buf. +@param[in] header 0 or LOG_CHECKPOINT_1 or LOG_CHECKPOINT2 */ +void log_header_read(ulint header); /** Write checkpoint info to the log header and invoke log_mutex_exit(). @param[in] sync whether to wait for the write to complete @param[in] end_lsn start LSN of the MLOG_CHECKPOINT mini-transaction */ @@ -262,16 +243,6 @@ objects! */ void log_check_margins(void); -/********************************************************//** -Sets the field values in group to correspond to a given lsn. For this function -to work, the values must already be correctly initialized to correspond to -some lsn, for instance, a checkpoint lsn. */ -void -log_group_set_fields( -/*=================*/ - log_group_t* group, /*!< in/out: group */ - lsn_t lsn); /*!< in: lsn for which the values should be - set */ /************************************************************//** Gets a log block flush bit. @return TRUE if this block was the first to be written in a log flush */ @@ -322,11 +293,10 @@ log_block_calc_checksum_crc32( const byte* block); /** Calculates the checksum for a log block using the "no-op" algorithm. -@param[in] block the redo log block @return the calculated checksum value */ UNIV_INLINE ulint -log_block_calc_checksum_none(const byte* block); +log_block_calc_checksum_none(const byte*); /************************************************************//** Gets a log block checksum field value. @@ -403,14 +373,6 @@ Refreshes the statistics used to print per-second averages. */ void log_refresh_stats(void); /*===================*/ -/********************************************************//** -Closes all log groups. */ -void -log_group_close_all(void); -/*=====================*/ -/** Shut down the redo log subsystem. */ -void -log_shutdown(); /** Whether to generate and require checksums on the redo log pages */ extern my_bool innodb_log_checksums; @@ -422,8 +384,6 @@ extern my_bool innodb_log_checksums; /* The counting of lsn's starts from this value: this must be non-zero */ #define LOG_START_LSN ((lsn_t) (16 * OS_FILE_LOG_BLOCK_SIZE)) -#define LOG_BUFFER_SIZE (srv_log_buffer_size * UNIV_PAGE_SIZE) - /* Offsets of a log block header */ #define LOG_BLOCK_HDR_NO 0 /* block number which must be > 0 and is allowed to wrap around at 2G; the @@ -447,7 +407,7 @@ extern my_bool innodb_log_checksums; from this offset in this log block, if value not 0 */ #define LOG_BLOCK_CHECKPOINT_NO 8 /* 4 lower bytes of the value of - log_sys->next_checkpoint_no when the + log_sys.next_checkpoint_no when the log block was last written to: if the block has not yet been written full, this value is only updated before a @@ -470,7 +430,7 @@ extern my_bool innodb_log_checksums; #define LOG_CHECKPOINT_LSN 8 /** Byte offset of the log record corresponding to LOG_CHECKPOINT_LSN */ #define LOG_CHECKPOINT_OFFSET 16 -/** log_sys_t::buf_size at the time of the checkpoint (not used) */ +/** srv_log_buffer_size at the time of the checkpoint (not used) */ #define LOG_CHECKPOINT_LOG_BUF_SIZE 24 /** MariaDB 10.2.5 encrypted redo log encryption key version (32 bits)*/ #define LOG_CHECKPOINT_CRYPT_KEY 32 @@ -546,75 +506,28 @@ enum log_group_state_t { typedef ib_mutex_t LogSysMutex; typedef ib_mutex_t FlushOrderMutex; -/** Log group consists of a number of log files, each of the same size; a log -group is implemented as a space in the sense of the module fil0fil. -Currently, this is only protected by log_sys->mutex. However, in the case -of log_write_up_to(), we will access some members only with the protection -of log_sys->write_mutex, which should affect nothing for now. */ -struct log_group_t{ - /** number of files in the group */ - ulint n_files; - /** format of the redo log: e.g., LOG_HEADER_FORMAT_CURRENT */ - ulint format; - /** individual log file size in bytes, including the header */ - lsn_t file_size; - /** corruption status */ - log_group_state_t state; - /** lsn used to fix coordinates within the log group */ - lsn_t lsn; - /** the byte offset of the above lsn */ - lsn_t lsn_offset; - /** unaligned buffers */ - byte** file_header_bufs_ptr; - /** buffers for each file header in the group */ - byte** file_header_bufs; - - /** used only in recovery: recovery scan succeeded up to this - lsn in this log group */ - lsn_t scanned_lsn; - /** unaligned checkpoint header */ - byte* checkpoint_buf_ptr; - /** buffer for writing a checkpoint header */ - byte* checkpoint_buf; - - /** @return whether the redo log is encrypted */ - bool is_encrypted() const - { - return((format & LOG_HEADER_FORMAT_ENCRYPTED) != 0); - } - - /** @return capacity in bytes */ - inline lsn_t capacity() const - { - return((file_size - LOG_FILE_HDR_SIZE) * n_files); - } -}; - /** Redo log buffer */ struct log_t{ - char pad1[CACHE_LINE_SIZE]; - /*!< Padding to prevent other memory - update hotspots from residing on the - same memory cache line */ + MY_ALIGNED(CACHE_LINE_SIZE) lsn_t lsn; /*!< log sequence number */ - ulint buf_free; /*!< first free offset within the log + ulong buf_free; /*!< first free offset within the log buffer in use */ - char pad2[CACHE_LINE_SIZE];/*!< Padding */ + MY_ALIGNED(CACHE_LINE_SIZE) LogSysMutex mutex; /*!< mutex protecting the log */ - char pad3[CACHE_LINE_SIZE]; /*!< Padding */ - LogSysMutex write_mutex; /*!< mutex protecting writing to log - file and accessing to log_group_t */ - char pad4[CACHE_LINE_SIZE];/*!< Padding */ + MY_ALIGNED(CACHE_LINE_SIZE) + LogSysMutex write_mutex; /*!< mutex protecting writing to log */ + MY_ALIGNED(CACHE_LINE_SIZE) FlushOrderMutex log_flush_order_mutex;/*!< mutex to serialize access to the flush list when we are putting dirty blocks in the list. The idea behind this mutex is to be able - to release log_sys->mutex during + to release log_sys.mutex during mtr_commit and still ensure that insertions in the flush_list happen in the LSN order. */ - byte* buf; /*!< Memory of double the buf_size is + byte* buf; /*!< Memory of double the + srv_log_buffer_size is allocated here. This pointer will change however to either the first half or the second half in turns, so that log @@ -626,8 +539,7 @@ struct log_t{ bool first_in_use; /*!< true if buf points to the first half of the aligned(buf_ptr), false if the second half */ - ulint buf_size; /*!< log buffer size of each in bytes */ - ulint max_buf_free; /*!< recommended maximum value of + ulong max_buf_free; /*!< recommended maximum value of buf_free for the buffer in use, after which the buffer is flushed */ bool check_flush_or_checkpoint; @@ -639,12 +551,71 @@ struct log_t{ max_checkpoint_age; this flag is peeked at by log_free_check(), which does not reserve the log mutex */ - /** the redo log */ - log_group_t log; + + /** Log files. Protected by mutex or write_mutex. */ + struct files { + /** number of files */ + ulint n_files; + /** format of the redo log: e.g., LOG_HEADER_FORMAT_CURRENT */ + ulint format; + /** individual log file size in bytes, including the header */ + lsn_t file_size; + /** corruption status */ + log_group_state_t state; + /** lsn used to fix coordinates within the log group */ + lsn_t lsn; + /** the byte offset of the above lsn */ + lsn_t lsn_offset; + + /** unaligned buffers */ + byte* file_header_bufs_ptr; + /** buffers for each file header in the group */ + byte* file_header_bufs[SRV_N_LOG_FILES_MAX]; + + /** used only in recovery: recovery scan succeeded up to this + lsn in this log group */ + lsn_t scanned_lsn; + + /** @return whether the redo log is encrypted */ + bool is_encrypted() const { return format & LOG_HEADER_FORMAT_ENCRYPTED; } + /** @return capacity in bytes */ + lsn_t capacity() const{ return (file_size - LOG_FILE_HDR_SIZE) * n_files; } + /** Calculate the offset of a log sequence number. + @param[in] lsn log sequence number + @return offset within the log */ + inline lsn_t calc_lsn_offset(lsn_t lsn) const; + + /** Set the field values to correspond to a given lsn. */ + void set_fields(lsn_t lsn) + { + lsn_offset = calc_lsn_offset(lsn); + this->lsn = lsn; + } + + /** Read a log segment to log_sys.buf. + @param[in,out] start_lsn in: read area start, + out: the last read valid lsn + @param[in] end_lsn read area end + @return whether no invalid blocks (e.g checksum mismatch) were found */ + bool read_log_seg(lsn_t* start_lsn, lsn_t end_lsn); + + /** Initialize the redo log buffer. + @param[in] n_files number of files */ + void create(ulint n_files); + + /** Close the redo log buffer. */ + void close() + { + ut_free(file_header_bufs_ptr); + n_files = 0; + file_header_bufs_ptr = NULL; + memset(file_header_bufs, 0, sizeof file_header_bufs); + } + } log; /** The fields involved in the log buffer flush @{ */ - ulint buf_next_to_write;/*!< first offset in the log buffer + ulong buf_next_to_write;/*!< first offset in the log buffer where the byte content may not exist written to file, e.g., the start offset of a log record catenated @@ -661,11 +632,11 @@ struct log_t{ AND flushed to disk */ ulint n_pending_flushes;/*!< number of currently pending flushes; protected by - log_sys_t::mutex */ + log_sys.mutex */ os_event_t flush_event; /*!< this event is in the reset state when a flush is running; os_event_set() and os_event_reset() - are protected by log_sys_t::mutex */ + are protected by log_sys.mutex */ ulint n_log_ios; /*!< number of log i/os initiated thus far */ ulint n_log_ios_old; /*!< number of log i/o's at the @@ -711,7 +682,7 @@ struct log_t{ /*!< extra redo log records to write during a checkpoint, or NULL if none. The pointer is protected by - log_sys->mutex, and the data must + log_sys.mutex, and the data must remain constant as long as this pointer is not NULL. */ ulint n_pending_checkpoint_writes; @@ -721,73 +692,105 @@ struct log_t{ checkpoint write is running; a thread should wait for this without owning the log mutex */ - byte* checkpoint_buf_ptr;/* unaligned checkpoint header */ - byte* checkpoint_buf; /*!< checkpoint header is read to this - buffer */ + + /** buffer for checkpoint header */ + MY_ALIGNED(OS_FILE_LOG_BLOCK_SIZE) + byte checkpoint_buf[OS_FILE_LOG_BLOCK_SIZE]; /* @} */ - /** @return whether the redo log is encrypted */ - bool is_encrypted() const - { - return(log.is_encrypted()); - } +private: + bool m_initialised; +public: + /** + Constructor. + + Some members may require late initialisation, thus we just mark object as + uninitialised. Real initialisation happens in create(). + */ + log_t(): m_initialised(false) {} + + /** @return whether the redo log is encrypted */ + bool is_encrypted() const { return(log.is_encrypted()); } + + bool is_initialised() { return m_initialised; } + + /** Complete an asynchronous checkpoint write. */ + void complete_checkpoint(); + + /** Initialise the redo log subsystem. */ + void create(); + + /** Shut down the redo log subsystem. */ + void close(); }; /** Redo log system */ -extern log_t* log_sys; +extern log_t log_sys; + +/** Calculate the offset of a log sequence number. +@param[in] lsn log sequence number +@return offset within the log */ +inline lsn_t log_t::files::calc_lsn_offset(lsn_t lsn) const +{ + ut_ad(this == &log_sys.log); + /* The lsn parameters are updated while holding both the mutexes + and it is ok to have either of them while reading */ + ut_ad(log_sys.mutex.is_owned() || log_sys.write_mutex.is_owned()); + const lsn_t group_size= capacity(); + lsn_t l= lsn - this->lsn; + if (longlong(l) < 0) { + l= lsn_t(-longlong(l)) % group_size; + l= group_size - l; + } + + l+= lsn_offset - LOG_FILE_HDR_SIZE * (1 + lsn_offset / file_size); + l%= group_size; + return l + LOG_FILE_HDR_SIZE * (1 + l / (file_size - LOG_FILE_HDR_SIZE)); +} /** Test if flush order mutex is owned. */ #define log_flush_order_mutex_own() \ - mutex_own(&log_sys->log_flush_order_mutex) + mutex_own(&log_sys.log_flush_order_mutex) /** Acquire the flush order mutex. */ #define log_flush_order_mutex_enter() do { \ - mutex_enter(&log_sys->log_flush_order_mutex); \ + mutex_enter(&log_sys.log_flush_order_mutex); \ } while (0) /** Release the flush order mutex. */ # define log_flush_order_mutex_exit() do { \ - mutex_exit(&log_sys->log_flush_order_mutex); \ + mutex_exit(&log_sys.log_flush_order_mutex); \ } while (0) /** Test if log sys mutex is owned. */ -#define log_mutex_own() mutex_own(&log_sys->mutex) +#define log_mutex_own() mutex_own(&log_sys.mutex) /** Test if log sys write mutex is owned. */ -#define log_write_mutex_own() mutex_own(&log_sys->write_mutex) +#define log_write_mutex_own() mutex_own(&log_sys.write_mutex) /** Acquire the log sys mutex. */ -#define log_mutex_enter() mutex_enter(&log_sys->mutex) +#define log_mutex_enter() mutex_enter(&log_sys.mutex) /** Acquire the log sys write mutex. */ -#define log_write_mutex_enter() mutex_enter(&log_sys->write_mutex) +#define log_write_mutex_enter() mutex_enter(&log_sys.write_mutex) /** Acquire all the log sys mutexes. */ #define log_mutex_enter_all() do { \ - mutex_enter(&log_sys->write_mutex); \ - mutex_enter(&log_sys->mutex); \ + mutex_enter(&log_sys.write_mutex); \ + mutex_enter(&log_sys.mutex); \ } while (0) /** Release the log sys mutex. */ -#define log_mutex_exit() mutex_exit(&log_sys->mutex) +#define log_mutex_exit() mutex_exit(&log_sys.mutex) /** Release the log sys write mutex.*/ -#define log_write_mutex_exit() mutex_exit(&log_sys->write_mutex) +#define log_write_mutex_exit() mutex_exit(&log_sys.write_mutex) /** Release all the log sys mutexes. */ #define log_mutex_exit_all() do { \ - mutex_exit(&log_sys->mutex); \ - mutex_exit(&log_sys->write_mutex); \ + mutex_exit(&log_sys.mutex); \ + mutex_exit(&log_sys.write_mutex); \ } while (0) -/** Calculate the offset of an lsn within a log group. -@param[in] lsn log sequence number -@param[in] group log group -@return offset within the log group */ -lsn_t -log_group_calc_lsn_offset( - lsn_t lsn, - const log_group_t* group); - /* log scrubbing speed, in bytes/sec */ extern ulonglong innodb_scrub_log_speed; diff --git a/storage/innobase/include/log0log.ic b/storage/innobase/include/log0log.ic index 58da7bacc6f..87d55f9e01d 100644 --- a/storage/innobase/include/log0log.ic +++ b/storage/innobase/include/log0log.ic @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -26,12 +26,12 @@ Created 12/9/1995 Heikki Tuuri #include "mach0data.h" #include "srv0mon.h" -#include "srv0srv.h" #include "ut0crc32.h" #ifdef UNIV_LOG_LSN_DEBUG #include "mtr0types.h" #endif /* UNIV_LOG_LSN_DEBUG */ +extern ulong srv_log_buffer_size; /************************************************************//** Gets a log block flush bit. @@ -241,12 +241,10 @@ log_block_calc_checksum_crc32( } /** Calculates the checksum for a log block using the "no-op" algorithm. -@param[in] block log block @return checksum */ UNIV_INLINE ulint -log_block_calc_checksum_none( - const byte* block) +log_block_calc_checksum_none(const byte*) { return(LOG_NO_CHECKSUM_MAGIC); } @@ -330,15 +328,15 @@ log_reserve_and_write_fast( len - SIZE_OF_MLOG_CHECKPOINT] ? 0 : 1 - + mach_get_compressed_size(log_sys->lsn >> 32) - + mach_get_compressed_size(log_sys->lsn & 0xFFFFFFFFUL); + + mach_get_compressed_size(log_sys.lsn >> 32) + + mach_get_compressed_size(log_sys.lsn & 0xFFFFFFFFUL); #endif /* UNIV_LOG_LSN_DEBUG */ const ulint data_len = len #ifdef UNIV_LOG_LSN_DEBUG + lsn_len #endif /* UNIV_LOG_LSN_DEBUG */ - + log_sys->buf_free % OS_FILE_LOG_BLOCK_SIZE; + + log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE; if (data_len >= OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) { @@ -348,44 +346,44 @@ log_reserve_and_write_fast( return(0); } - *start_lsn = log_sys->lsn; + *start_lsn = log_sys.lsn; #ifdef UNIV_LOG_LSN_DEBUG if (lsn_len) { /* Write the LSN pseudo-record. */ - byte* b = &log_sys->buf[log_sys->buf_free]; + byte* b = &log_sys.buf[log_sys.buf_free]; *b++ = MLOG_LSN | (MLOG_SINGLE_REC_FLAG & *(const byte*) str); /* Write the LSN in two parts, as a pseudo page number and space id. */ - b += mach_write_compressed(b, log_sys->lsn >> 32); - b += mach_write_compressed(b, log_sys->lsn & 0xFFFFFFFFUL); - ut_a(b - lsn_len == &log_sys->buf[log_sys->buf_free]); + b += mach_write_compressed(b, log_sys.lsn >> 32); + b += mach_write_compressed(b, log_sys.lsn & 0xFFFFFFFFUL); + ut_a(b - lsn_len == &log_sys.buf[log_sys.buf_free]); ::memcpy(b, str, len); len += lsn_len; } else #endif /* UNIV_LOG_LSN_DEBUG */ - memcpy(log_sys->buf + log_sys->buf_free, str, len); + memcpy(log_sys.buf + log_sys.buf_free, str, len); log_block_set_data_len( reinterpret_cast(ut_align_down( - log_sys->buf + log_sys->buf_free, + log_sys.buf + log_sys.buf_free, OS_FILE_LOG_BLOCK_SIZE)), data_len); - log_sys->buf_free += len; + log_sys.buf_free += ulong(len); - ut_ad(log_sys->buf_free <= log_sys->buf_size); + ut_ad(log_sys.buf_free <= srv_log_buffer_size); - log_sys->lsn += len; + log_sys.lsn += len; MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, - log_sys->lsn - log_sys->last_checkpoint_lsn); + log_sys.lsn - log_sys.last_checkpoint_lsn); - return(log_sys->lsn); + return(log_sys.lsn); } /************************************************************//** @@ -400,7 +398,7 @@ log_get_lsn(void) log_mutex_enter(); - lsn = log_sys->lsn; + lsn = log_sys.lsn; log_mutex_exit(); @@ -418,7 +416,7 @@ log_get_flush_lsn(void) log_mutex_enter(); - lsn = log_sys->flushed_to_disk_lsn; + lsn = log_sys.flushed_to_disk_lsn; log_mutex_exit(); @@ -435,11 +433,11 @@ log_get_lsn_nowait(void) { lsn_t lsn=0; - if (!mutex_enter_nowait(&(log_sys->mutex))) { + if (!mutex_enter_nowait(&(log_sys.mutex))) { - lsn = log_sys->lsn; + lsn = log_sys.lsn; - mutex_exit(&(log_sys->mutex)); + mutex_exit(&(log_sys.mutex)); } return(lsn); @@ -447,14 +445,14 @@ log_get_lsn_nowait(void) /**************************************************************** Gets the log group capacity. It is OK to read the value without -holding log_sys->mutex because it is constant. +holding log_sys.mutex because it is constant. @return log group capacity */ UNIV_INLINE lsn_t log_get_capacity(void) /*==================*/ { - return(log_sys->log_group_capacity); + return(log_sys.log_group_capacity); } /**************************************************************** @@ -466,7 +464,7 @@ lsn_t log_get_max_modified_age_async(void) /*================================*/ { - return(log_sys->max_modified_age_async); + return(log_sys.max_modified_age_async); } /***********************************************************************//** @@ -498,7 +496,7 @@ log_free_check(void) sync_allowed_latches(latches, latches + UT_ARR_SIZE(latches)))); - if (log_sys->check_flush_or_checkpoint) { + if (log_sys.check_flush_or_checkpoint) { log_check_margins(); } diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h index 50a8d741806..15ad34ba9a5 100644 --- a/storage/innobase/include/log0recv.h +++ b/storage/innobase/include/log0recv.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -96,20 +96,6 @@ void recv_sys_debug_free(void); /*=====================*/ -/** Read a log segment to a buffer. -@param[out] buf buffer -@param[in] group redo log files -@param[in, out] start_lsn in : read area start, out: the last read valid lsn -@param[in] end_lsn read area end -@param[out] invalid_block - invalid, (maybe incompletely written) block encountered -@return false, if invalid block encountered (e.g checksum mismatch), true otherwise */ -bool -log_group_read_log_seg( - byte* buf, - const log_group_t* group, - lsn_t* start_lsn, - lsn_t end_lsn); - /********************************************************//** Reset the state of the recovery system variables. */ void @@ -331,7 +317,7 @@ extern bool recv_no_ibuf_operations; extern bool recv_needed_recovery; #ifdef UNIV_DEBUG /** TRUE if writing to the redo log (mtr_commit) is forbidden. -Protected by log_sys->mutex. */ +Protected by log_sys.mutex. */ extern bool recv_no_log_write; #endif /* UNIV_DEBUG */ @@ -342,11 +328,11 @@ extern bool recv_lsn_checks_on; /** Size of the parsing buffer; it must accommodate RECV_SCAN_SIZE many times! */ -#define RECV_PARSING_BUF_SIZE (2 * 1024 * 1024) +#define RECV_PARSING_BUF_SIZE (2U << 20) /** Size of block reads when the log groups are scanned forward to do a roll-forward */ -#define RECV_SCAN_SIZE (4 * UNIV_PAGE_SIZE) +#define RECV_SCAN_SIZE (4U << srv_page_size_shift) /** This many frames must be left free in the buffer pool when we scan the log and store the scanned log records in the buffer pool: we will diff --git a/storage/innobase/include/mem0mem.h b/storage/innobase/include/mem0mem.h index a4848a4ed69..2cdb307ea96 100644 --- a/storage/innobase/include/mem0mem.h +++ b/storage/innobase/include/mem0mem.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -71,11 +71,11 @@ allocations of small buffers. */ #define MEM_BLOCK_START_SIZE 64 #define MEM_BLOCK_STANDARD_SIZE \ - (UNIV_PAGE_SIZE >= 16384 ? 8000 : MEM_MAX_ALLOC_IN_BUF) + (srv_page_size >= 16384 ? 8000 : MEM_MAX_ALLOC_IN_BUF) /** If a memory heap is allowed to grow into the buffer pool, the following is the maximum size for a single allocated buffer: */ -#define MEM_MAX_ALLOC_IN_BUF (UNIV_PAGE_SIZE - 200) +#define MEM_MAX_ALLOC_IN_BUF (srv_page_size - 200) /** Space needed when allocating for a user a field of length N. The space is allocated only in multiples of UNIV_MEM_ALIGNMENT. */ @@ -464,13 +464,14 @@ public: allocated by mem_heap_allocator) can be used as a hint to the implementation about where the new memory should be allocated in order to improve locality. */ - pointer allocate(size_type n, const_pointer hint = 0) + pointer allocate(size_type n) { return(reinterpret_cast( mem_heap_alloc(m_heap, n * sizeof(T)))); } + pointer allocate(size_type n, const_pointer) { return allocate(n); } - void deallocate(pointer p, size_type n) { } + void deallocate(pointer, size_type) {} pointer address (reference r) const { return(&r); } diff --git a/storage/innobase/include/mem0mem.ic b/storage/innobase/include/mem0mem.ic index dbad7cb6950..405b7338b51 100644 --- a/storage/innobase/include/mem0mem.ic +++ b/storage/innobase/include/mem0mem.ic @@ -277,7 +277,8 @@ mem_heap_free_heap_top( ut_ad(block); /* Set the free field of block */ - mem_block_set_free(block, old_top - (byte*) block); + mem_block_set_free(block, + ulint(old_top - reinterpret_cast(block))); ut_ad(mem_block_get_start(block) <= mem_block_get_free(block)); UNIV_MEM_FREE(old_top, (byte*) block + block->len - old_top); @@ -547,7 +548,7 @@ mem_heap_get_size( size = heap->total_size; if (heap->free_block) { - size += UNIV_PAGE_SIZE; + size += srv_page_size; } return(size); diff --git a/storage/innobase/include/mtr0log.ic b/storage/innobase/include/mtr0log.ic index dd68ea25613..5c72c7cb5da 100644 --- a/storage/innobase/include/mtr0log.ic +++ b/storage/innobase/include/mtr0log.ic @@ -225,7 +225,7 @@ mlog_write_initial_log_record_fast( ut_ad(log_ptr); ut_d(mtr->memo_modify_page(ptr)); - page = (const byte*) ut_align_down(ptr, UNIV_PAGE_SIZE); + page = (const byte*) ut_align_down(ptr, srv_page_size); space = mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); offset = mach_read_from_4(page + FIL_PAGE_OFFSET); diff --git a/storage/innobase/include/mtr0mtr.h b/storage/innobase/include/mtr0mtr.h index 79ff7638ab7..7d5c1c414bd 100644 --- a/storage/innobase/include/mtr0mtr.h +++ b/storage/innobase/include/mtr0mtr.h @@ -78,13 +78,6 @@ savepoint. */ (m)->memo_release((o), (t)) #ifdef UNIV_DEBUG - -/** Check if memo contains the given item. */ -#define mtr_is_block_fix(m, o, t, table) mtr_memo_contains(m, o, t) - -/** Check if memo contains the given page. */ -#define mtr_is_page_fix(m, p, t, table) mtr_memo_contains_page(m, p, t) - /** Check if memo contains the given item. @return TRUE if contains */ #define mtr_memo_contains(m, o, t) \ @@ -213,9 +206,8 @@ struct mtr_t { ~mtr_t() { } /** Start a mini-transaction. - @param sync true if it is a synchronous mini-transaction - @param read_only true if read only mini-transaction */ - void start(bool sync = true, bool read_only = false); + @param sync true if it is a synchronous mini-transaction */ + void start(bool sync = true); /** @return whether this is an asynchronous mini-transaction. */ bool is_async() const @@ -321,9 +313,9 @@ struct mtr_t { @param[in] space user or system tablespace */ void set_named_space(fil_space_t* space) { - ut_ad(m_impl.m_user_space_id == TRX_SYS_SPACE); + ut_ad(!m_impl.m_user_space_id); ut_d(m_impl.m_user_space_id = space->id); - if (space->id != TRX_SYS_SPACE) { + if (space->id) { m_impl.m_user_space = space; } } diff --git a/storage/innobase/include/mtr0types.h b/storage/innobase/include/mtr0types.h index 94d904e8efd..eaf838aaa76 100644 --- a/storage/innobase/include/mtr0types.h +++ b/storage/innobase/include/mtr0types.h @@ -100,13 +100,13 @@ enum mlog_id_t { /** Create an index page */ MLOG_PAGE_CREATE = 19, - /** insert an undo log record (used in MariaDB 10.2) */ + /** insert an undo log record */ MLOG_UNDO_INSERT = 20, /** erase an undo log page end (used in MariaDB 10.2) */ MLOG_UNDO_ERASE_END = 21, - /** initialize a page in an undo log (used in MariaDB 10.2) */ + /** initialize a page in an undo log */ MLOG_UNDO_INIT = 22, /** reuse an insert undo log header (used in MariaDB 10.2) */ diff --git a/storage/innobase/include/os0event.h b/storage/innobase/include/os0event.h index d5fdc6ba080..f8227235211 100644 --- a/storage/innobase/include/os0event.h +++ b/storage/innobase/include/os0event.h @@ -42,11 +42,7 @@ Creates an event semaphore, i.e., a semaphore which may just have two states: signaled and nonsignaled. The created event is manual reset: it must be reset explicitly by calling os_event_reset(). @return the event handle */ -os_event_t -os_event_create( -/*============*/ - const char* name); /*!< in: the name of the event, if NULL - the event is created without a name */ +os_event_t os_event_create(const char*); /** Sets an event semaphore to the signaled state: lets waiting threads diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index dec5e974ad2..20cab0ca7e9 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -68,10 +68,6 @@ the OS actually supports it: Win 95 does not, NT does. */ /** File handle */ typedef HANDLE os_file_t; -/** Convert a C file descriptor to a native file handle -@param fd file descriptor -@return native file handle */ -# define OS_FILE_FROM_FD(fd) (HANDLE) _get_osfhandle(fd) #else /* _WIN32 */ @@ -80,14 +76,9 @@ typedef DIR* os_file_dir_t; /*!< directory stream */ /** File handle */ typedef int os_file_t; -/** Convert a C file descriptor to a native file handle -@param fd file descriptor -@return native file handle */ -# define OS_FILE_FROM_FD(fd) fd - #endif /* _WIN32 */ -static const os_file_t OS_FILE_CLOSED = os_file_t(~0); +static const os_file_t OS_FILE_CLOSED = IF_WIN(os_file_t(INVALID_HANDLE_VALUE),-1); /** File descriptor with optional PERFORMANCE_SCHEMA instrumentation */ struct pfs_os_file_t @@ -843,18 +834,10 @@ The wrapper functions have the prefix of "innodb_". */ pfs_os_file_read_no_error_handling_func( \ type, file, buf, offset, n, o, __FILE__, __LINE__) -# define os_file_read_no_error_handling_int_fd(type, file, buf, offset, n) \ - pfs_os_file_read_no_error_handling_int_fd_func( \ - type, file, buf, offset, n, __FILE__, __LINE__) - # define os_file_write(type, name, file, buf, offset, n) \ pfs_os_file_write_func(type, name, file, buf, offset, \ n, __FILE__, __LINE__) -# define os_file_write_int_fd(type, name, file, buf, offset, n) \ - pfs_os_file_write_int_fd_func(type, name, file, buf, offset, \ - n, __FILE__, __LINE__) - # define os_file_flush(file) \ pfs_os_file_flush_func(file, __FILE__, __LINE__) @@ -1564,7 +1547,7 @@ path. If the path is NULL then it will be created on --tmpdir location. This function is defined in ha_innodb.cc. @param[in] path location for creating temporary file @return temporary file descriptor, or < 0 on error */ -int +os_file_t innobase_mysql_tmpfile( const char* path); diff --git a/storage/innobase/include/os0file.ic b/storage/innobase/include/os0file.ic index a7e4f2695da..895f82cf2d8 100644 --- a/storage/innobase/include/os0file.ic +++ b/storage/innobase/include/os0file.ic @@ -340,49 +340,6 @@ pfs_os_file_read_no_error_handling_func( return(result); } -/** NOTE! Please use the corresponding macro -os_file_read_no_error_handling_int_fd() to request -a synchronous read operation. -@param[in] type read request -@param[in] file file handle -@param[out] buf buffer where to read -@param[in] offset file offset where to read -@param[in] n number of bytes to read -@param[in] src_file caller file name -@param[in] src_line caller line number -@return whether the request was successful */ -UNIV_INLINE -bool -pfs_os_file_read_no_error_handling_int_fd_func( - const IORequest& type, - int file, - void* buf, - os_offset_t offset, - ulint n, - const char* src_file, - uint src_line) -{ - PSI_file_locker_state state; - - PSI_file_locker* locker = PSI_FILE_CALL( - get_thread_file_descriptor_locker)( - &state, file, PSI_FILE_READ); - if (locker != NULL) { - PSI_FILE_CALL(start_file_wait)( - locker, n, - __FILE__, __LINE__); - } - - bool success = DB_SUCCESS == os_file_read_no_error_handling_func( - type, OS_FILE_FROM_FD(file), buf, offset, n, NULL); - - if (locker != NULL) { - PSI_FILE_CALL(end_file_wait)(locker, n); - } - - return(success); -} - /** NOTE! Please use the corresponding macro os_file_write(), not directly this function! This is the performance schema instrumented wrapper function for @@ -425,51 +382,6 @@ pfs_os_file_write_func( return(result); } -/** NOTE! Please use the corresponding macro os_file_write_int_fd(), -not directly this function! -This is the performance schema instrumented wrapper function for -os_file_write_int_fd() which requests a synchronous write operation. -@param[in] type write request -@param[in] name file name -@param[in] file file handle -@param[in] buf buffer to write -@param[in] offset file offset -@param[in] n number of bytes -@param[in] src_file file name where func invoked -@param[in] src_line line where the func invoked -@return whether the request was successful */ -UNIV_INLINE -bool -pfs_os_file_write_int_fd_func( - const IORequest& type, - const char* name, - int file, - const void* buf, - os_offset_t offset, - ulint n, - const char* src_file, - uint src_line) -{ - PSI_file_locker_state state; - struct PSI_file_locker* locker; - - locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( - &state, file, PSI_FILE_WRITE); - if (locker != NULL) { - PSI_FILE_CALL(start_file_wait)( - locker, n, - __FILE__, __LINE__); - } - - bool success = DB_SUCCESS == os_file_write_func( - type, name, OS_FILE_FROM_FD(file), buf, offset, n); - - if (locker != NULL) { - PSI_FILE_CALL(end_file_wait)(locker, n); - } - - return(success); -} /** NOTE! Please use the corresponding macro os_file_flush(), not directly this function! diff --git a/storage/innobase/include/os0thread.h b/storage/innobase/include/os0thread.h index c1b96ef7a1f..b6838c919a0 100644 --- a/storage/innobase/include/os0thread.h +++ b/storage/innobase/include/os0thread.h @@ -30,12 +30,6 @@ Created 9/8/1995 Heikki Tuuri #include "univ.i" -/* Maximum number of threads which can be created in the program; -this is also the size of the wait slot array for MySQL threads which -can wait inside InnoDB */ - -#define OS_THREAD_MAX_N srv_max_n_threads - /* Possible fixed priorities for threads */ #define OS_THREAD_PRIORITY_NONE 100 #define OS_THREAD_PRIORITY_BACKGROUND 1 diff --git a/storage/innobase/include/page0cur.h b/storage/innobase/include/page0cur.h index f3890c43b73..78908b998fe 100644 --- a/storage/innobase/include/page0cur.h +++ b/storage/innobase/include/page0cur.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -156,10 +157,7 @@ page_cur_tuple_insert( ulint** offsets,/*!< out: offsets on *rec */ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ ulint n_ext, /*!< in: number of externally stored columns */ - mtr_t* mtr, /*!< in: mini-transaction handle, or NULL */ - bool use_cache = false) - /*!< in: if true, then use record cache to - hold the tuple converted record. */ + mtr_t* mtr) /*!< in: mini-transaction handle, or NULL */ MY_ATTRIBUTE((nonnull(1,2,3,4,5), warn_unused_result)); /***********************************************************//** Inserts a record next to page cursor. Returns pointer to inserted record if diff --git a/storage/innobase/include/page0cur.ic b/storage/innobase/include/page0cur.ic index 3e6d40cba4a..86e560395f3 100644 --- a/storage/innobase/include/page0cur.ic +++ b/storage/innobase/include/page0cur.ic @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, MariaDB Corporation. +Copyright (c) 2015, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -262,10 +262,7 @@ page_cur_tuple_insert( ulint** offsets,/*!< out: offsets on *rec */ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ ulint n_ext, /*!< in: number of externally stored columns */ - mtr_t* mtr, /*!< in: mini-transaction handle, or NULL */ - bool use_cache) - /*!< in: if true, then use record cache to - hold the tuple converted record. */ + mtr_t* mtr) /*!< in: mini-transaction handle, or NULL */ { rec_t* rec; ulint size = rec_get_converted_size(index, tuple, n_ext); diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h index dee08605e58..d3f6bd304a6 100644 --- a/storage/innobase/include/page0page.h +++ b/storage/innobase/include/page0page.h @@ -158,9 +158,9 @@ Otherwise written as 0. @see PAGE_ROOT_AUTO_INC */ /*-----------------------------*/ /* Heap numbers */ -#define PAGE_HEAP_NO_INFIMUM 0 /* page infimum */ -#define PAGE_HEAP_NO_SUPREMUM 1 /* page supremum */ -#define PAGE_HEAP_NO_USER_LOW 2 /* first user record in +#define PAGE_HEAP_NO_INFIMUM 0U /* page infimum */ +#define PAGE_HEAP_NO_SUPREMUM 1U /* page supremum */ +#define PAGE_HEAP_NO_USER_LOW 2U /* first user record in creation (insertion) order, not necessarily collation order; this record may have been deleted */ @@ -210,7 +210,7 @@ inline page_t* page_align(const void* ptr) { - return(static_cast(ut_align_down(ptr, UNIV_PAGE_SIZE))); + return(static_cast(ut_align_down(ptr, srv_page_size))); } /** Gets the byte offset within a page frame. @@ -221,7 +221,7 @@ inline ulint page_offset(const void* ptr) { - return(ut_align_offset(ptr, UNIV_PAGE_SIZE)); + return(ut_align_offset(ptr, srv_page_size)); } /** Determine whether an index page is not in ROW_FORMAT=REDUNDANT. @@ -335,7 +335,7 @@ page_rec_is_user_rec_low(ulint offset) compile_time_assert(PAGE_NEW_SUPREMUM < PAGE_OLD_SUPREMUM_END); compile_time_assert(PAGE_OLD_SUPREMUM < PAGE_NEW_SUPREMUM_END); ut_ad(offset >= PAGE_NEW_INFIMUM); - ut_ad(offset <= UNIV_PAGE_SIZE - PAGE_EMPTY_DIR_START); + ut_ad(offset <= srv_page_size - PAGE_EMPTY_DIR_START); return(offset != PAGE_NEW_SUPREMUM && offset != PAGE_NEW_INFIMUM @@ -351,7 +351,7 @@ bool page_rec_is_supremum_low(ulint offset) { ut_ad(offset >= PAGE_NEW_INFIMUM); - ut_ad(offset <= UNIV_PAGE_SIZE - PAGE_EMPTY_DIR_START); + ut_ad(offset <= srv_page_size - PAGE_EMPTY_DIR_START); return(offset == PAGE_NEW_SUPREMUM || offset == PAGE_OLD_SUPREMUM); } @@ -363,7 +363,7 @@ bool page_rec_is_infimum_low(ulint offset) { ut_ad(offset >= PAGE_NEW_INFIMUM); - ut_ad(offset <= UNIV_PAGE_SIZE - PAGE_EMPTY_DIR_START); + ut_ad(offset <= srv_page_size - PAGE_EMPTY_DIR_START); return(offset == PAGE_NEW_INFIMUM || offset == PAGE_OLD_INFIMUM); } @@ -663,7 +663,7 @@ page_dir_get_nth_slot( ulint n); /*!< in: position */ #else /* UNIV_DEBUG */ # define page_dir_get_nth_slot(page, n) \ - ((page) + (UNIV_PAGE_SIZE - PAGE_DIR \ + ((page) + (srv_page_size - PAGE_DIR \ - (n + 1) * PAGE_DIR_SLOT_SIZE)) #endif /* UNIV_DEBUG */ /**************************************************************//** diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic index da0cd8511af..307803367c0 100644 --- a/storage/innobase/include/page0page.ic +++ b/storage/innobase/include/page0page.ic @@ -172,8 +172,8 @@ page_header_set_field( { ut_ad(page); ut_ad(field <= PAGE_N_RECS); - ut_ad(field == PAGE_N_HEAP || val < UNIV_PAGE_SIZE); - ut_ad(field != PAGE_N_HEAP || (val & 0x7fff) < UNIV_PAGE_SIZE); + ut_ad(field == PAGE_N_HEAP || val < srv_page_size); + ut_ad(field != PAGE_N_HEAP || (val & 0x7fff) < srv_page_size); mach_write_to_2(page + PAGE_HEADER + field, val); if (page_zip) { @@ -396,7 +396,8 @@ page_get_middle_rec( /*================*/ page_t* page) /*!< in: page */ { - ulint middle = (page_get_n_recs(page) + PAGE_HEAP_NO_USER_LOW) / 2; + ulint middle = (ulint(page_get_n_recs(page)) + + PAGE_HEAP_NO_USER_LOW) / 2; return(page_rec_get_nth(page, middle)); } @@ -520,7 +521,7 @@ page_dir_get_nth_slot( ut_ad(page_dir_get_n_slots(page) > n); return((page_dir_slot_t*) - page + UNIV_PAGE_SIZE - PAGE_DIR + page + srv_page_size - PAGE_DIR - (n + 1) * PAGE_DIR_SLOT_SIZE); } #endif /* UNIV_DEBUG */ @@ -639,7 +640,7 @@ page_rec_get_next_low( offs = rec_get_next_offs(rec, comp); - if (offs >= UNIV_PAGE_SIZE) { + if (offs >= srv_page_size) { fprintf(stderr, "InnoDB: Next record offset is nonsensical %lu" " in record at offset %lu\n" @@ -828,9 +829,8 @@ page_rec_get_base_extra_size( /*=========================*/ const rec_t* rec) /*!< in: physical record */ { -#if REC_N_NEW_EXTRA_BYTES + 1 != REC_N_OLD_EXTRA_BYTES -# error "REC_N_NEW_EXTRA_BYTES + 1 != REC_N_OLD_EXTRA_BYTES" -#endif + compile_time_assert(REC_N_NEW_EXTRA_BYTES + 1 + == REC_N_OLD_EXTRA_BYTES); return(REC_N_NEW_EXTRA_BYTES + (ulint) !page_rec_is_comp(rec)); } @@ -851,7 +851,7 @@ page_get_data_size( ? PAGE_NEW_SUPREMUM_END : PAGE_OLD_SUPREMUM_END) - page_header_get_field(page, PAGE_GARBAGE); - ut_ad(ret < UNIV_PAGE_SIZE); + ut_ad(ret < srv_page_size); return(ret); } @@ -899,13 +899,13 @@ page_get_free_space_of_empty( ulint comp) /*!< in: nonzero=compact page layout */ { if (comp) { - return((ulint)(UNIV_PAGE_SIZE + return((ulint)(srv_page_size - PAGE_NEW_SUPREMUM_END - PAGE_DIR - 2 * PAGE_DIR_SLOT_SIZE)); } - return((ulint)(UNIV_PAGE_SIZE + return((ulint)(srv_page_size - PAGE_OLD_SUPREMUM_END - PAGE_DIR - 2 * PAGE_DIR_SLOT_SIZE)); @@ -1043,7 +1043,7 @@ page_mem_free( page_zip_dir_delete(page_zip, rec, index, offsets, free); } else { page_header_set_field(page, page_zip, PAGE_N_RECS, - page_get_n_recs(page) - 1); + ulint(page_get_n_recs(page)) - 1); } } diff --git a/storage/innobase/include/page0size.h b/storage/innobase/include/page0size.h index 30a996df0a6..7b8b7efe617 100644 --- a/storage/innobase/include/page0size.h +++ b/storage/innobase/include/page0size.h @@ -30,7 +30,7 @@ Created Nov 14, 2013 Vasil Dimov #include "univ.i" #include "fsp0types.h" -#define FIELD_REF_SIZE 20 +#define FIELD_REF_SIZE 20U /** A BLOB field reference full of zero, for use in assertions and tests.Initially, BLOB field references are set to zero, in diff --git a/storage/innobase/include/page0zip.ic b/storage/innobase/include/page0zip.ic index b471e2cf64e..b3ebc5dcf51 100644 --- a/storage/innobase/include/page0zip.ic +++ b/storage/innobase/include/page0zip.ic @@ -120,7 +120,7 @@ page_zip_get_size( size = (UNIV_ZIP_SIZE_MIN >> 1) << page_zip->ssize; ut_ad(size >= UNIV_ZIP_SIZE_MIN); - ut_ad(size <= UNIV_PAGE_SIZE); + ut_ad(size <= srv_page_size); return(size); } @@ -242,9 +242,9 @@ page_zip_get_trailer_len( ut_ad(!page_zip->n_blobs); } - return((page_dir_get_n_heap(page_zip->data) - 2) - * uncompressed_size - + page_zip->n_blobs * BTR_EXTERN_FIELD_REF_SIZE); + return (ulint(page_dir_get_n_heap(page_zip->data)) - 2) + * uncompressed_size + + ulint(page_zip->n_blobs) * BTR_EXTERN_FIELD_REF_SIZE; } /**********************************************************************//** diff --git a/storage/innobase/include/pars0pars.h b/storage/innobase/include/pars0pars.h index 37498c1c638..487ba8c147f 100644 --- a/storage/innobase/include/pars0pars.h +++ b/storage/innobase/include/pars0pars.h @@ -539,7 +539,7 @@ pars_info_add_int4_literal( /*=======================*/ pars_info_t* info, /*!< in: info struct */ const char* name, /*!< in: name */ - lint val); /*!< in: value */ + ulint val); /*!< in: value */ /****************************************************************//** Equivalent to: diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h index da82361875c..cc8b92e99b1 100644 --- a/storage/innobase/include/rem0rec.h +++ b/storage/innobase/include/rem0rec.h @@ -738,9 +738,7 @@ rec_offs_any_flag(const ulint* offsets, ulint flag) /** Determine if the offsets are for a record containing off-page columns. @param[in] offsets rec_get_offsets() @return nonzero if any off-page columns exist */ -inline -ulint -rec_offs_any_extern(const ulint* offsets) +inline bool rec_offs_any_extern(const ulint* offsets) { return rec_offs_any_flag(offsets, REC_OFFS_EXTERNAL); } @@ -963,15 +961,27 @@ rec_get_converted_size_temp( @param[in] rec temporary file record @param[in] index index of that the record belongs to @param[in,out] offsets offsets to the fields; in: rec_offs_n_fields(offsets) -@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED -*/ +@param[in] n_core number of core fields (index->n_core_fields) +@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED */ void rec_init_offsets_temp( const rec_t* rec, const dict_index_t* index, ulint* offsets, + ulint n_core, rec_comp_status_t status = REC_STATUS_ORDINARY) MY_ATTRIBUTE((nonnull)); +/** Determine the offset to each field in temporary file. +@param[in] rec temporary file record +@param[in] index index of that the record belongs to +@param[in,out] offsets offsets to the fields; in: rec_offs_n_fields(offsets) +*/ +void +rec_init_offsets_temp( + const rec_t* rec, + const dict_index_t* index, + ulint* offsets) + MY_ATTRIBUTE((nonnull)); /** Convert a data tuple prefix to the temporary file format. @param[out] rec record in temporary file format diff --git a/storage/innobase/include/rem0rec.ic b/storage/innobase/include/rem0rec.ic index bc9006a66e8..5e9dbcdcfb6 100644 --- a/storage/innobase/include/rem0rec.ic +++ b/storage/innobase/include/rem0rec.ic @@ -61,7 +61,7 @@ most significant bytes and bits are written below less significant. we can calculate the offset of the next record with the formula: relative_offset + offset_of_this_record - mod UNIV_PAGE_SIZE + mod srv_page_size 3 3 bits status: 000=REC_STATUS_ORDINARY 001=REC_STATUS_NODE_PTR @@ -235,8 +235,8 @@ rec_get_next_ptr_const( { ulint field_value; - ut_ad(REC_NEXT_MASK == 0xFFFFUL); - ut_ad(REC_NEXT_SHIFT == 0); + compile_time_assert(REC_NEXT_MASK == 0xFFFFUL); + compile_time_assert(REC_NEXT_SHIFT == 0); field_value = mach_read_from_2(rec - REC_NEXT); @@ -254,13 +254,13 @@ rec_get_next_ptr_const( as signed 16-bit integer in 2's complement arithmetics. If all platforms defined int16_t in the standard headers, the expression could be written simpler as - (int16_t) field_value + ut_align_offset(...) < UNIV_PAGE_SIZE + (int16_t) field_value + ut_align_offset(...) < srv_page_size */ ut_ad((field_value >= 32768 ? field_value - 65536 : field_value) - + ut_align_offset(rec, UNIV_PAGE_SIZE) - < UNIV_PAGE_SIZE); + + ut_align_offset(rec, srv_page_size) + < srv_page_size); #endif /* There must be at least REC_N_NEW_EXTRA_BYTES + 1 between each record. */ @@ -268,12 +268,12 @@ rec_get_next_ptr_const( && field_value < 32768) || field_value < (uint16) -REC_N_NEW_EXTRA_BYTES); - return((byte*) ut_align_down(rec, UNIV_PAGE_SIZE) - + ut_align_offset(rec + field_value, UNIV_PAGE_SIZE)); + return((byte*) ut_align_down(rec, srv_page_size) + + ut_align_offset(rec + field_value, srv_page_size)); } else { - ut_ad(field_value < UNIV_PAGE_SIZE); + ut_ad(field_value < srv_page_size); - return((byte*) ut_align_down(rec, UNIV_PAGE_SIZE) + return((byte*) ut_align_down(rec, srv_page_size) + field_value); } } @@ -304,12 +304,8 @@ rec_get_next_offs( ulint comp) /*!< in: nonzero=compact page format */ { ulint field_value; -#if REC_NEXT_MASK != 0xFFFFUL -# error "REC_NEXT_MASK != 0xFFFFUL" -#endif -#if REC_NEXT_SHIFT -# error "REC_NEXT_SHIFT != 0" -#endif + compile_time_assert(REC_NEXT_MASK == 0xFFFFUL); + compile_time_assert(REC_NEXT_SHIFT == 0); field_value = mach_read_from_2(rec - REC_NEXT); @@ -322,13 +318,13 @@ rec_get_next_offs( as signed 16-bit integer in 2's complement arithmetics. If all platforms defined int16_t in the standard headers, the expression could be written simpler as - (int16_t) field_value + ut_align_offset(...) < UNIV_PAGE_SIZE + (int16_t) field_value + ut_align_offset(...) < srv_page_size */ ut_ad((field_value >= 32768 ? field_value - 65536 : field_value) - + ut_align_offset(rec, UNIV_PAGE_SIZE) - < UNIV_PAGE_SIZE); + + ut_align_offset(rec, srv_page_size) + < srv_page_size); #endif if (field_value == 0) { @@ -341,9 +337,9 @@ rec_get_next_offs( && field_value < 32768) || field_value < (uint16) -REC_N_NEW_EXTRA_BYTES); - return(ut_align_offset(rec + field_value, UNIV_PAGE_SIZE)); + return(ut_align_offset(rec + field_value, srv_page_size)); } else { - ut_ad(field_value < UNIV_PAGE_SIZE); + ut_ad(field_value < srv_page_size); return(field_value); } @@ -360,14 +356,9 @@ rec_set_next_offs_old( ulint next) /*!< in: offset of the next record */ { ut_ad(rec); - ut_ad(UNIV_PAGE_SIZE > next); -#if REC_NEXT_MASK != 0xFFFFUL -# error "REC_NEXT_MASK != 0xFFFFUL" -#endif -#if REC_NEXT_SHIFT -# error "REC_NEXT_SHIFT != 0" -#endif - + ut_ad(srv_page_size > next); + compile_time_assert(REC_NEXT_MASK == 0xFFFFUL); + compile_time_assert(REC_NEXT_SHIFT == 0); mach_write_to_2(rec - REC_NEXT, next); } @@ -384,7 +375,7 @@ rec_set_next_offs_new( ulint field_value; ut_ad(rec); - ut_ad(UNIV_PAGE_SIZE > next); + ut_ad(srv_page_size > next); if (!next) { field_value = 0; @@ -395,7 +386,7 @@ rec_set_next_offs_new( field_value = (ulint) ((lint) next - - (lint) ut_align_offset(rec, UNIV_PAGE_SIZE)); + - (lint) ut_align_offset(rec, srv_page_size)); field_value &= REC_NEXT_MASK; } @@ -627,12 +618,11 @@ rec_get_info_and_status_bits( ulint comp) /*!< in: nonzero=compact page format */ { ulint bits; -#if (REC_NEW_STATUS_MASK >> REC_NEW_STATUS_SHIFT) \ -& (REC_INFO_BITS_MASK >> REC_INFO_BITS_SHIFT) -# error "REC_NEW_STATUS_MASK and REC_INFO_BITS_MASK overlap" -#endif + compile_time_assert(!((REC_NEW_STATUS_MASK >> REC_NEW_STATUS_SHIFT) + & (REC_INFO_BITS_MASK >> REC_INFO_BITS_SHIFT))); if (comp) { - bits = rec_get_info_bits(rec, TRUE) | rec_get_status(rec); + bits = rec_get_info_bits(rec, TRUE) + | ulint(rec_get_status(rec)); } else { bits = rec_get_info_bits(rec, FALSE); ut_ad(!(bits & ~(REC_INFO_BITS_MASK >> REC_INFO_BITS_SHIFT))); @@ -649,10 +639,8 @@ rec_set_info_and_status_bits( rec_t* rec, /*!< in/out: physical record */ ulint bits) /*!< in: info bits */ { -#if (REC_NEW_STATUS_MASK >> REC_NEW_STATUS_SHIFT) \ -& (REC_INFO_BITS_MASK >> REC_INFO_BITS_SHIFT) -# error "REC_NEW_STATUS_MASK and REC_INFO_BITS_MASK overlap" -#endif + compile_time_assert(!((REC_NEW_STATUS_MASK >> REC_NEW_STATUS_SHIFT) + & (REC_INFO_BITS_MASK >> REC_INFO_BITS_SHIFT))); rec_set_status(rec, bits & REC_NEW_STATUS_MASK); rec_set_info_bits_new(rec, bits & ~REC_NEW_STATUS_MASK); } @@ -805,10 +793,6 @@ rec_get_1byte_offs_flag( /*====================*/ const rec_t* rec) /*!< in: physical record */ { -#if TRUE != 1 -#error "TRUE != 1" -#endif - return(rec_get_bit_field_1(rec, REC_OLD_SHORT, REC_OLD_SHORT_MASK, REC_OLD_SHORT_SHIFT)); } @@ -822,10 +806,7 @@ rec_set_1byte_offs_flag( rec_t* rec, /*!< in: physical record */ ibool flag) /*!< in: TRUE if 1byte form */ { -#if TRUE != 1 -#error "TRUE != 1" -#endif - ut_ad(flag <= TRUE); + ut_ad(flag <= 1); rec_set_bit_field_1(rec, flag, REC_OLD_SHORT, REC_OLD_SHORT_MASK, REC_OLD_SHORT_SHIFT); @@ -1197,7 +1178,7 @@ rec_get_nth_field_size( os = rec_get_field_start_offs(rec, n); next_os = rec_get_field_start_offs(rec, n + 1); - ut_ad(next_os - os < UNIV_PAGE_SIZE); + ut_ad(next_os - os < srv_page_size); return(next_os - os); } @@ -1299,7 +1280,7 @@ rec_offs_data_size( ut_ad(rec_offs_validate(NULL, NULL, offsets)); size = rec_offs_base(offsets)[rec_offs_n_fields(offsets)] & REC_OFFS_MASK; - ut_ad(size < UNIV_PAGE_SIZE); + ut_ad(size < srv_page_size); return(size); } @@ -1317,7 +1298,7 @@ rec_offs_extra_size( ulint size; ut_ad(rec_offs_validate(NULL, NULL, offsets)); size = *rec_offs_base(offsets) & REC_OFFS_MASK; - ut_ad(size < UNIV_PAGE_SIZE); + ut_ad(size < srv_page_size); return(size); } diff --git a/storage/innobase/include/row0log.h b/storage/innobase/include/row0log.h index df9920d9bcc..6974ce1b56b 100644 --- a/storage/innobase/include/row0log.h +++ b/storage/innobase/include/row0log.h @@ -55,12 +55,13 @@ row_log_allocate( or NULL when creating a secondary index */ bool same_pk,/*!< in: whether the definition of the PRIMARY KEY has remained the same */ - const dtuple_t* add_cols, + const dtuple_t* defaults, /*!< in: default values of - added columns, or NULL */ + added, changed columns, or NULL */ const ulint* col_map,/*!< in: mapping of old column numbers to new ones, or NULL if !table */ - const char* path) /*!< in: where to create temporary file */ + const char* path, /*!< in: where to create temporary file */ + bool ignore) /*!< in: Whether alter ignore issued */ MY_ATTRIBUTE((nonnull(1), warn_unused_result)); /******************************************************//** diff --git a/storage/innobase/include/row0merge.h b/storage/innobase/include/row0merge.h index eb4da62164b..7330031c186 100644 --- a/storage/innobase/include/row0merge.h +++ b/storage/innobase/include/row0merge.h @@ -61,11 +61,11 @@ struct ib_sequence_t; /** @brief Block size for I/O operations in merge sort. -The minimum is UNIV_PAGE_SIZE, or page_get_free_space_of_empty() +The minimum is srv_page_size, or page_get_free_space_of_empty() rounded to a power of 2. When not creating a PRIMARY KEY that contains column prefixes, this -can be set as small as UNIV_PAGE_SIZE / 2. */ +can be set as small as srv_page_size / 2. */ typedef byte row_merge_block_t; /** @brief Secondary buffer for I/O operations of merge records. @@ -101,7 +101,7 @@ struct row_merge_buf_t { /** Information about temporary files used in merge sort */ struct merge_file_t { - int fd; /*!< file descriptor */ + pfs_os_file_t fd; /*!< file descriptor */ ulint offset; /*!< file offset (end of file) */ ib_uint64_t n_rec; /*!< number of records in the file */ }; @@ -193,7 +193,7 @@ row_merge_drop_temp_indexes(void); UNIV_PFS_IO defined, register the file descriptor with Performance Schema. @param[in] path location for creating temporary merge files, or NULL @return File descriptor */ -int +pfs_os_file_t row_merge_file_create_low( const char* path) MY_ATTRIBUTE((warn_unused_result)); @@ -203,7 +203,7 @@ if UNIV_PFS_IO is defined. */ void row_merge_file_destroy_low( /*=======================*/ - int fd); /*!< in: merge file descriptor */ + const pfs_os_file_t& fd); /*!< in: merge file descriptor */ /*********************************************************************//** Provide a new pathname for a table that is being renamed if it belongs to @@ -308,7 +308,7 @@ old_table unless creating a PRIMARY KEY @param[in] n_indexes size of indexes[] @param[in,out] table MySQL table, for reporting erroneous key value if applicable -@param[in] add_cols default values of added columns, or NULL +@param[in] defaults default values of added, changed columns, or NULL @param[in] col_map mapping of old column numbers to new ones, or NULL if old_table == new_table @param[in] add_autoinc number of added AUTO_INCREMENT columns, or @@ -322,7 +322,6 @@ this function and it will be passed to other functions for further accounting. @param[in] add_v new virtual columns added along with indexes @param[in] eval_table mysql table used to evaluate virtual column value, see innobase_get_computed_value(). -@param[in] drop_historical whether to drop historical system rows @return DB_SUCCESS or error code */ dberr_t row_merge_build_indexes( @@ -334,15 +333,14 @@ row_merge_build_indexes( const ulint* key_numbers, ulint n_indexes, struct TABLE* table, - const dtuple_t* add_cols, + const dtuple_t* defaults, const ulint* col_map, ulint add_autoinc, ib_sequence_t& sequence, bool skip_pk_sort, ut_stage_alter_t* stage, const dict_add_v_col_t* add_v, - struct TABLE* eval_table, - bool drop_historical) + struct TABLE* eval_table) MY_ATTRIBUTE((warn_unused_result)); /********************************************************************//** @@ -372,7 +370,7 @@ UNIV_INTERN bool row_merge_write( /*============*/ - int fd, /*!< in: file descriptor */ + const pfs_os_file_t& fd, /*!< in: file descriptor */ ulint offset, /*!< in: offset where to write, in number of row_merge_block_t elements */ const void* buf, /*!< in: data */ @@ -393,7 +391,7 @@ row_merge_buf_empty( @param[out] merge_file merge file structure @param[in] path location for creating temporary file, or NULL @return file descriptor, or -1 on failure */ -int +pfs_os_file_t row_merge_file_create( merge_file_t* merge_file, const char* path) @@ -421,7 +419,7 @@ row_merge_sort( const row_merge_dup_t* dup, merge_file_t* file, row_merge_block_t* block, - int* tmpfd, + pfs_os_file_t* tmpfd, const bool update_progress, const double pct_progress, const double pct_cost, @@ -460,7 +458,7 @@ row_merge_file_destroy( bool row_merge_read( /*===========*/ - int fd, /*!< in: file descriptor */ + const pfs_os_file_t& fd, /*!< in: file descriptor */ ulint offset, /*!< in: offset where to read in number of row_merge_block_t elements */ @@ -479,7 +477,7 @@ row_merge_read_rec( mrec_buf_t* buf, /*!< in/out: secondary buffer */ const byte* b, /*!< in: pointer to record */ const dict_index_t* index, /*!< in: index of the record */ - int fd, /*!< in: file descriptor */ + const pfs_os_file_t& fd, /*!< in: file descriptor */ ulint* foffs, /*!< in/out: file offset */ const mrec_t** mrec, /*!< out: pointer to merge record, or NULL on end of list diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h index 61a363d6de8..7c0b5d3ece9 100644 --- a/storage/innobase/include/row0mysql.h +++ b/storage/innobase/include/row0mysql.h @@ -792,7 +792,7 @@ struct row_prebuilt_t { allocated mem buf start, because there is a 4 byte magic number at the start and at the end */ - ibool keep_other_fields_on_keyread; /*!< when using fetch + bool keep_other_fields_on_keyread; /*!< when using fetch cache with HA_EXTRA_KEYREAD, don't overwrite other fields in mysql row row buffer.*/ diff --git a/storage/innobase/include/row0row.h b/storage/innobase/include/row0row.h index 4f329f0d675..1f37a6b02d7 100644 --- a/storage/innobase/include/row0row.h +++ b/storage/innobase/include/row0row.h @@ -153,9 +153,9 @@ row_build( consulted instead; the user columns in this table should be the same columns as in index->table */ - const dtuple_t* add_cols, + const dtuple_t* defaults, /*!< in: default values of - added columns, or NULL */ + added, changed columns, or NULL */ const ulint* col_map,/*!< in: mapping of old column numbers to new ones, or NULL */ row_ext_t** ext, /*!< out, own: cache of @@ -177,7 +177,7 @@ addition of new virtual columns. of an index, or NULL if index->table should be consulted instead -@param[in] add_cols default values of added columns, or NULL +@param[in] defaults default values of added, changed columns, or NULL @param[in] add_v new virtual columns added along with new indexes @param[in] col_map mapping of old column @@ -194,7 +194,7 @@ row_build_w_add_vcol( const rec_t* rec, const ulint* offsets, const dict_table_t* col_table, - const dtuple_t* add_cols, + const dtuple_t* defaults, const dict_add_v_col_t* add_v, const ulint* col_map, row_ext_t** ext, @@ -269,9 +269,8 @@ row_build_row_ref_in_tuple( held as long as the row reference is used! */ const dict_index_t* index, /*!< in: secondary index */ - ulint* offsets,/*!< in: rec_get_offsets(rec, index) + ulint* offsets)/*!< in: rec_get_offsets(rec, index) or NULL */ - trx_t* trx) /*!< in: transaction or NULL */ MY_ATTRIBUTE((nonnull(1,2,3))); /*******************************************************************//** Builds from a secondary index record a row reference with which we can diff --git a/storage/innobase/include/row0sel.h b/storage/innobase/include/row0sel.h index d73c186b12e..366c24acec8 100644 --- a/storage/innobase/include/row0sel.h +++ b/storage/innobase/include/row0sel.h @@ -135,8 +135,7 @@ row_sel_convert_mysql_key_to_innobase( ulint buf_len, /*!< in: buffer length */ dict_index_t* index, /*!< in: index of the key value */ const byte* key_ptr, /*!< in: MySQL key value */ - ulint key_len, /*!< in: MySQL key value length */ - trx_t* trx); /*!< in: transaction */ + ulint key_len); /*!< in: MySQL key value length */ /** Searches for rows in the database. This is used in the interface to diff --git a/storage/innobase/include/row0upd.h b/storage/innobase/include/row0upd.h index e10f0906a8c..5e01e513a50 100644 --- a/storage/innobase/include/row0upd.h +++ b/storage/innobase/include/row0upd.h @@ -522,8 +522,8 @@ struct upd_node_t{ ibool searched_update; /* TRUE if searched update, FALSE if positioned */ - ibool in_mysql_interface; - /* TRUE if the update node was created + bool in_mysql_interface; + /* whether the update node was created for the MySQL interface */ dict_foreign_t* foreign;/* NULL or pointer to a foreign key constraint if this update node is used in diff --git a/storage/innobase/include/row0upd.ic b/storage/innobase/include/row0upd.ic index 364c876ecc7..5e43a272388 100644 --- a/storage/innobase/include/row0upd.ic +++ b/storage/innobase/include/row0upd.ic @@ -181,9 +181,8 @@ row_upd_rec_sys_fields( offset = row_get_trx_id_offset(index, offsets); } -#if DATA_TRX_ID + 1 != DATA_ROLL_PTR -# error "DATA_TRX_ID + 1 != DATA_ROLL_PTR" -#endif + compile_time_assert(DATA_TRX_ID + 1 == DATA_ROLL_PTR); + /* During IMPORT the trx id in the record can be in the future, if the .ibd file is being imported from another instance. During IMPORT roll_ptr will be 0. */ diff --git a/storage/innobase/include/row0vers.h b/storage/innobase/include/row0vers.h index 645f11faaad..749f42cbcf3 100644 --- a/storage/innobase/include/row0vers.h +++ b/storage/innobase/include/row0vers.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -54,22 +54,6 @@ row_vers_impl_x_locked( dict_index_t* index, const ulint* offsets); -/*****************************************************************//** -Finds out if we must preserve a delete marked earlier version of a clustered -index record, because it is >= the purge view. -@param[in] trx_id transaction id in the version -@param[in] name table name -@param[in,out] mtr mini transaction holding the latch on the - clustered index record; it will also hold - the latch on purge_view -@return TRUE if earlier version should be preserved */ -ibool -row_vers_must_preserve_del_marked( -/*==============================*/ - trx_id_t trx_id, - const table_name_t& name, - mtr_t* mtr); - /*****************************************************************//** Finds out if a version of the record, where the version >= the current purge view, should have ientry as its secondary index entry. We check diff --git a/storage/innobase/include/srv0conc.h b/storage/innobase/include/srv0conc.h index 9573c5add84..35937fe1204 100644 --- a/storage/innobase/include/srv0conc.h +++ b/storage/innobase/include/srv0conc.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2011, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2018, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -41,9 +42,7 @@ Created 2011/04/18 Sunny Bains #define srv_conc_h /** We are prepared for a situation that we have this many threads waiting for -a semaphore inside InnoDB. innobase_start_or_create_for_mysql() sets the -value. */ - +a semaphore inside InnoDB. srv_start() sets the value. */ extern ulint srv_max_n_threads; /** The following controls how many threads we let inside InnoDB concurrently: diff --git a/storage/innobase/include/srv0mon.h b/storage/innobase/include/srv0mon.h index a0230575526..48dffe04197 100644 --- a/storage/innobase/include/srv0mon.h +++ b/storage/innobase/include/srv0mon.h @@ -2,7 +2,7 @@ Copyright (c) 2010, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2013, 2017, MariaDB Corporation. +Copyright (c) 2013, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the @@ -502,18 +502,18 @@ extern ulint monitor_set_tbl[(NUM_MONITOR + NUM_BITS_ULINT - 1) / /** Macros to turn on/off the control bit in monitor_set_tbl for a monitor counter option. */ -#define MONITOR_ON(monitor) \ - (monitor_set_tbl[monitor / NUM_BITS_ULINT] |= \ - ((ulint)1 << (monitor % NUM_BITS_ULINT))) +#define MONITOR_ON(monitor) \ + (monitor_set_tbl[unsigned(monitor) / NUM_BITS_ULINT] |= \ + (ulint(1) << (unsigned(monitor) % NUM_BITS_ULINT))) -#define MONITOR_OFF(monitor) \ - (monitor_set_tbl[monitor / NUM_BITS_ULINT] &= \ - ~((ulint)1 << (monitor % NUM_BITS_ULINT))) +#define MONITOR_OFF(monitor) \ + (monitor_set_tbl[unsigned(monitor) / NUM_BITS_ULINT] &= \ + ~(ulint(1) << (unsigned(monitor) % NUM_BITS_ULINT))) /** Check whether the requested monitor is turned on/off */ -#define MONITOR_IS_ON(monitor) \ - (monitor_set_tbl[monitor / NUM_BITS_ULINT] & \ - ((ulint)1 << (monitor % NUM_BITS_ULINT))) +#define MONITOR_IS_ON(monitor) \ + (monitor_set_tbl[unsigned(monitor) / NUM_BITS_ULINT] & \ + (ulint(1) << (unsigned(monitor) % NUM_BITS_ULINT))) /** The actual monitor counter array that records each monintor counter value */ diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index bd1452a3002..a5a73265170 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -3,7 +3,7 @@ Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, 2009, Google Inc. Copyright (c) 2009, Percona Inc. -Copyright (c) 2013, 2017, MariaDB Corporation. +Copyright (c) 2013, 2018, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -48,7 +48,6 @@ Created 10/10/1995 Heikki Tuuri #include "mysql/psi/psi.h" #include "univ.i" -#include "log0log.h" #include "os0event.h" #include "que0types.h" #include "trx0types.h" @@ -81,7 +80,7 @@ struct srv_stats_t lsn_ctr_1_t os_log_written; /** Number of writes being done to the log files. - Protected by log_sys->write_mutex. */ + Protected by log_sys.write_mutex. */ ulint_ctr_1_t os_log_pending_writes; /** We increase this counter, when we don't have enough @@ -148,7 +147,7 @@ struct srv_stats_t ulint_ctr_1_t n_lock_wait_count; /** Number of threads currently waiting on database locks */ - simple_counter n_lock_wait_current_count; + simple_atomic_counter<> n_lock_wait_current_count; /** Number of rows read. */ ulint_ctr_64_t n_rows_read; @@ -337,17 +336,15 @@ extern const ulint SRV_UNDO_TABLESPACE_SIZE_IN_PAGES; extern char* srv_log_group_home_dir; -/** Maximum number of srv_n_log_files, or innodb_log_files_in_group */ -#define SRV_N_LOG_FILES_MAX 100 extern ulong srv_n_log_files; /** The InnoDB redo log file size, or 0 when changing the redo log format at startup (while disallowing writes to the redo log). */ extern ulonglong srv_log_file_size; -extern ulint srv_log_buffer_size; +extern ulong srv_log_buffer_size; extern ulong srv_flush_log_at_trx_commit; extern uint srv_flush_log_at_timeout; extern ulong srv_log_write_ahead_size; -extern char srv_adaptive_flushing; +extern my_bool srv_adaptive_flushing; extern my_bool srv_flush_sync; #ifdef WITH_INNODB_DISALLOW_WRITES @@ -397,8 +394,8 @@ extern ulint srv_lock_table_size; extern ulint srv_n_file_io_threads; extern my_bool srv_random_read_ahead; extern ulong srv_read_ahead_threshold; -extern ulint srv_n_read_io_threads; -extern ulint srv_n_write_io_threads; +extern ulong srv_n_read_io_threads; +extern ulong srv_n_write_io_threads; /* Defragmentation, Origianlly facebook default value is 100, but it's too high */ #define SRV_DEFRAGMENT_FREQUENCY_DEFAULT 40 @@ -432,8 +429,6 @@ to treat NULL value when collecting statistics. It is not defined as enum type because the configure option takes unsigned integer type. */ extern ulong srv_innodb_stats_method; -extern char* srv_file_flush_method_str; - extern ulint srv_max_n_open_files; extern ulong srv_n_page_cleaners; @@ -468,7 +463,7 @@ extern my_bool srv_stats_include_delete_marked; extern unsigned long long srv_stats_modified_counter; extern my_bool srv_stats_sample_traditional; -extern ibool srv_use_doublewrite_buf; +extern my_bool srv_use_doublewrite_buf; extern ulong srv_doublewrite_batch_size; extern ulong srv_checksum_algorithm; @@ -660,10 +655,9 @@ extern PSI_stage_info srv_stage_buffer_pool_load; #endif /* HAVE_PSI_STAGE_INTERFACE */ -/** Alternatives for the file flush option in Unix; see the InnoDB manual -about what these mean */ +/** Alternatives for innodb_flush_method */ enum srv_flush_t { - SRV_FSYNC = 1, /*!< fsync, the default */ + SRV_FSYNC = 0, /*!< fsync, the default */ SRV_O_DSYNC, /*!< open log files in O_SYNC mode */ SRV_LITTLESYNC, /*!< do not call os_file_flush() when writing data files, but do flush @@ -675,18 +669,21 @@ enum srv_flush_t { the reason for which is that some FS do not flush meta-data when unbuffered IO happens */ - SRV_O_DIRECT_NO_FSYNC, + SRV_O_DIRECT_NO_FSYNC /*!< do not use fsync() when using direct IO i.e.: it can be set to avoid the fsync() call that we make when using SRV_UNIX_O_DIRECT. However, in this case user/DBA should be sure about the integrity of the meta-data */ - SRV_ALL_O_DIRECT_FSYNC +#ifdef _WIN32 + ,SRV_ALL_O_DIRECT_FSYNC /*!< Traditional Windows appoach to open all files without caching, and do FileFlushBuffers()*/ +#endif }; -extern enum srv_flush_t srv_file_flush_method; +/** innodb_flush_method */ +extern ulong srv_file_flush_method; /** Alternatives for srv_force_recovery. Non-zero values are intended to help the user get a damaged database up so that he can dump intact @@ -900,6 +897,9 @@ srv_release_threads(enum srv_thread_type type, ulint n); void srv_purge_wakeup(); +/** Shut down the purge threads. */ +void srv_purge_shutdown(); + /** Check if tablespace is being truncated. (Ignore system-tablespace as we don't re-create the tablespace and so some of the action that are suppressed by this function @@ -920,16 +920,10 @@ srv_was_tablespace_truncated(const fil_space_t* space); #ifdef UNIV_DEBUG /** Disables master thread. It's used by: SET GLOBAL innodb_master_thread_disabled_debug = 1 (0). -@param[in] thd thread handle -@param[in] var pointer to system variable -@param[out] var_ptr where the formal string goes @param[in] save immediate result from check function */ void -srv_master_thread_disabled_debug_update( - THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save); +srv_master_thread_disabled_debug_update(THD*, st_mysql_sys_var*, void*, + const void* save); #endif /* UNIV_DEBUG */ /** Status variables to be passed to MySQL */ @@ -974,7 +968,7 @@ struct export_var_t{ ulint innodb_os_log_fsyncs; /*!< fil_n_log_flushes */ ulint innodb_os_log_pending_writes; /*!< srv_os_log_pending_writes */ ulint innodb_os_log_pending_fsyncs; /*!< fil_n_pending_log_flushes */ - ulint innodb_page_size; /*!< UNIV_PAGE_SIZE */ + ulint innodb_page_size; /*!< srv_page_size */ ulint innodb_pages_created; /*!< buf_pool->stat.n_pages_created */ ulint innodb_pages_read; /*!< buf_pool->stat.n_pages_read*/ ulint innodb_pages_written; /*!< buf_pool->stat.n_pages_written */ diff --git a/storage/innobase/include/srv0start.h b/storage/innobase/include/srv0start.h index 3575f2e40b9..ee263f6c1f6 100644 --- a/storage/innobase/include/srv0start.h +++ b/storage/innobase/include/srv0start.h @@ -44,20 +44,16 @@ only one buffer pool instance is used. */ dberr_t srv_undo_tablespaces_init(bool create_new_db); -/****************************************************************//** -Starts Innobase and creates a new database if database files -are not found and the user wants. +/** Start InnoDB. +@param[in] create_new_db whether to create a new database @return DB_SUCCESS or error code */ -dberr_t -innobase_start_or_create_for_mysql(); +dberr_t srv_start(bool create_new_db); /** Shut down InnoDB. */ -void -innodb_shutdown(); +void innodb_shutdown(); /** Shut down background threads that can generate undo log. */ -void -srv_shutdown_bg_undo_sources(); +void srv_shutdown_bg_undo_sources(); /*************************************************************//** Copy the file path component of the physical file to parameter. It will diff --git a/storage/innobase/include/sync0arr.h b/storage/innobase/include/sync0arr.h index 4104e594cf9..b3180c1779d 100644 --- a/storage/innobase/include/sync0arr.h +++ b/storage/innobase/include/sync0arr.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2017, MariaDB Corporation. +Copyright (c) 2015, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -99,16 +99,11 @@ void sync_array_print( FILE* file); /*!< in: file where to print */ -/**********************************************************************//** -Create the primary system wait array(s), they are protected by an OS mutex */ -void -sync_array_init( - ulint n_threads); /*!< in: Number of slots to create */ +/** Create the primary system wait arrays */ +void sync_array_init(); -/**********************************************************************//** -Close sync array wait sub-system. */ -void -sync_array_close(); +/** Destroy the sync array wait sub-system. */ +void sync_array_close(); /**********************************************************************//** Get an instance of the sync wait array. */ diff --git a/storage/innobase/include/sync0policy.h b/storage/innobase/include/sync0policy.h index 78e77f93269..daa08a91e1e 100644 --- a/storage/innobase/include/sync0policy.h +++ b/storage/innobase/include/sync0policy.h @@ -50,7 +50,7 @@ public: m_mutex(), m_filename(), m_line(), - m_thread_id(os_thread_id_t(ULINT_UNDEFINED)) + m_thread_id(ULINT_UNDEFINED) { /* No op */ } @@ -76,7 +76,8 @@ public: { m_mutex = mutex; - my_atomic_storelint(&m_thread_id, os_thread_get_curr_id()); + my_atomic_storelint(&m_thread_id, + ulint(os_thread_get_curr_id())); m_filename = filename; @@ -167,8 +168,7 @@ public: /** Called when the mutex is "created". Note: Not from the constructor but when the mutex is initialised. @param[in] id Mutex ID */ - void init(latch_id_t id) - UNIV_NOTHROW; + void init(latch_id_t id) UNIV_NOTHROW; /** Called when an attempt is made to lock the mutex @param[in] mutex Mutex instance to be locked @@ -241,7 +241,7 @@ struct NoPolicy { void init(const Mutex&, latch_id_t, const char*, uint32_t) UNIV_NOTHROW { } void destroy() UNIV_NOTHROW { } - void enter(const Mutex&, const char*, unsigned line) UNIV_NOTHROW { } + void enter(const Mutex&, const char*, unsigned) UNIV_NOTHROW { } void add(uint32_t, uint32_t) UNIV_NOTHROW { } void locked(const Mutex&, const char*, ulint) UNIV_NOTHROW { } void release(const Mutex&) UNIV_NOTHROW { } @@ -275,12 +275,11 @@ public: /** Called when the mutex is "created". Note: Not from the constructor but when the mutex is initialised. - @param[in] mutex Mutex instance to track @param[in] id Mutex ID @param[in] filename File where mutex was created @param[in] line Line in filename */ void init( - const MutexType& mutex, + const Mutex&, latch_id_t id, const char* filename, uint32_t line) @@ -423,15 +422,8 @@ public: /** Called when the mutex is "created". Note: Not from the constructor but when the mutex is initialised. - @param[in] mutex Mutex instance to track - @param[in] id Mutex ID - @param[in] filename File where mutex was created - @param[in] line Line in filename */ - void init( - const MutexType& mutex, - latch_id_t id, - const char* filename, - uint32_t line) + @param[in] id Mutex ID */ + void init(const Mutex&, latch_id_t id, const char*, uint32) UNIV_NOTHROW { /* It can be LATCH_ID_BUF_BLOCK_MUTEX or diff --git a/storage/innobase/include/sync0policy.ic b/storage/innobase/include/sync0policy.ic index 2e032a51fb8..a28e3c382b4 100644 --- a/storage/innobase/include/sync0policy.ic +++ b/storage/innobase/include/sync0policy.ic @@ -88,7 +88,7 @@ void MutexDebug::locked( } template -void MutexDebug::release(const Mutex* mutex) +void MutexDebug::release(const Mutex*) UNIV_NOTHROW { ut_ad(is_owned()); diff --git a/storage/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic index 8a1a3741b47..5c7a73b490d 100644 --- a/storage/innobase/include/sync0rw.ic +++ b/storage/innobase/include/sync0rw.ic @@ -2,7 +2,7 @@ Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, 2018, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -116,10 +116,10 @@ rw_lock_get_reader_count( if (lock_word > X_LOCK_HALF_DECR) { /* s-locked, no x-waiter */ - return(X_LOCK_DECR - lock_word); + return ulint(X_LOCK_DECR - lock_word); } else if (lock_word > 0) { /* s-locked, with sx-locks only */ - return(X_LOCK_HALF_DECR - lock_word); + return ulint(X_LOCK_HALF_DECR - lock_word); } else if (lock_word == 0) { /* x-locked */ return(0); @@ -161,12 +161,12 @@ rw_lock_get_x_lock_count( /* no s-lock, no sx-lock, 2 or more x-locks. First 2 x-locks are set with -X_LOCK_DECR, all other recursive x-locks are set with -1 */ - return(2 - (lock_copy + X_LOCK_DECR)); + return ulint(2 - X_LOCK_DECR - lock_copy); } else { /* no s-lock, 1 or more sx-lock, 2 or more x-locks. First 2 x-locks are set with -(X_LOCK_DECR + X_LOCK_HALF_DECR), all other recursive x-locks are set with -1 */ - return(2 - (lock_copy + X_LOCK_DECR + X_LOCK_HALF_DECR)); + return ulint(2 - X_LOCK_DECR - X_LOCK_HALF_DECR - lock_copy); } } diff --git a/storage/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h index 0d813b6bd87..ffa682b46db 100644 --- a/storage/innobase/include/sync0sync.h +++ b/storage/innobase/include/sync0sync.h @@ -94,7 +94,6 @@ extern mysql_pfs_key_t srv_innodb_monitor_mutex_key; extern mysql_pfs_key_t srv_misc_tmpfile_mutex_key; extern mysql_pfs_key_t srv_monitor_file_mutex_key; extern mysql_pfs_key_t buf_dblwr_mutex_key; -extern mysql_pfs_key_t trx_undo_mutex_key; extern mysql_pfs_key_t trx_mutex_key; extern mysql_pfs_key_t trx_pool_mutex_key; extern mysql_pfs_key_t trx_pool_manager_mutex_key; diff --git a/storage/innobase/include/sync0types.h b/storage/innobase/include/sync0types.h index d70f8a2b29b..73a9856e0b0 100644 --- a/storage/innobase/include/sync0types.h +++ b/storage/innobase/include/sync0types.h @@ -108,16 +108,6 @@ V Transaction system header | V -Transaction undo mutex The undo log entry must be written -| before any index page is modified. -| Transaction undo mutex is for the undo -| logs the analogue of the tree latch -| for a B-tree. If a thread has the -| trx undo mutex reserved, it is allowed -| to latch the undo log pages in any -| order, and also after it has acquired -| the fsp latch. -V Rollback segment mutex The rollback segment mutex must be | reserved, if, e.g., a new page must | be added to an undo log. The rollback @@ -256,7 +246,6 @@ enum latch_level_t { SYNC_RSEG_HEADER_NEW, SYNC_NOREDO_RSEG, SYNC_REDO_RSEG, - SYNC_TRX_UNDO, SYNC_PURGE_LATCH, SYNC_TREE_NODE, SYNC_TREE_NODE_FROM_HASH, @@ -338,7 +327,6 @@ enum latch_id_t { LATCH_ID_SRV_MISC_TMPFILE, LATCH_ID_SRV_MONITOR_FILE, LATCH_ID_BUF_DBLWR, - LATCH_ID_TRX_UNDO, LATCH_ID_TRX_POOL, LATCH_ID_TRX_POOL_MANAGER, LATCH_ID_TRX, @@ -1198,50 +1186,43 @@ static inline void my_atomic_storelint(ulint *A, ulint B) #endif } -/** Simple counter aligned to CACHE_LINE_SIZE -@tparam Type the integer type of the counter -@tparam atomic whether to use atomic memory access */ -template +/** Simple non-atomic counter aligned to CACHE_LINE_SIZE +@tparam Type the integer type of the counter */ +template struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) simple_counter { /** Increment the counter */ Type inc() { return add(1); } /** Decrement the counter */ - Type dec() { return sub(1); } + Type dec() { return add(Type(~0)); } /** Add to the counter @param[in] i amount to be added @return the value of the counter after adding */ - Type add(Type i) - { - compile_time_assert(!atomic || sizeof(Type) == sizeof(lint)); - if (atomic) { -#ifdef _MSC_VER -// Suppress type conversion/ possible loss of data warning -#pragma warning (push) -#pragma warning (disable : 4244) -#endif - return Type(my_atomic_addlint(reinterpret_cast - (&m_counter), i)); -#ifdef _MSC_VER -#pragma warning (pop) -#endif - } else { - return m_counter += i; - } - } - /** Subtract from the counter - @param[in] i amount to be subtracted - @return the value of the counter after adding */ - Type sub(Type i) - { - compile_time_assert(!atomic || sizeof(Type) == sizeof(lint)); - if (atomic) { - return Type(my_atomic_addlint(&m_counter, -lint(i))); - } else { - return m_counter -= i; - } - } + Type add(Type i) { return m_counter += i; } + + /** @return the value of the counter */ + operator Type() const { return m_counter; } + +private: + /** The counter */ + Type m_counter; +}; + +/** Simple atomic counter aligned to CACHE_LINE_SIZE +@tparam Type lint or ulint */ +template +struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) simple_atomic_counter +{ + /** Increment the counter */ + Type inc() { return add(1); } + /** Decrement the counter */ + Type dec() { return add(Type(~0)); } + + /** Add to the counter + @param[in] i amount to be added + @return the value of the counter before adding */ + Type add(Type i) { return my_atomic_addlint(&m_counter, i); } /** @return the value of the counter (non-atomic access)! */ operator Type() const { return m_counter; } diff --git a/storage/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h index 0d50671f88f..171f1d2ce86 100644 --- a/storage/innobase/include/trx0purge.h +++ b/storage/innobase/include/trx0purge.h @@ -60,32 +60,6 @@ trx_purge( ulint n_purge_threads, /*!< in: number of purge tasks to submit to task queue. */ bool truncate); /*!< in: truncate history if true */ -/*******************************************************************//** -Stop purge and wait for it to stop, move to PURGE_STATE_STOP. */ -void -trx_purge_stop(void); -/*================*/ -/*******************************************************************//** -Resume purge, move to PURGE_STATE_RUN. */ -void -trx_purge_run(void); -/*================*/ - -/** Purge states */ -enum purge_state_t { - PURGE_STATE_INIT, /*!< Purge instance created */ - PURGE_STATE_RUN, /*!< Purge should be running */ - PURGE_STATE_STOP, /*!< Purge should be stopped */ - PURGE_STATE_EXIT, /*!< Purge has been shutdown */ - PURGE_STATE_DISABLED /*!< Purge was never started */ -}; - -/*******************************************************************//** -Get the purge state. -@return purge state. */ -purge_state_t -trx_purge_state(void); -/*=================*/ /** Rollback segements from a given transaction with trx-no scheduled for purge. */ @@ -396,29 +370,20 @@ namespace undo { /** The control structure used in the purge operation */ class purge_sys_t { - bool m_initialised; public: + /** signal state changes; os_event_reset() and os_event_set() + are protected by rw_lock_x_lock(latch) */ MY_ALIGNED(CACHE_LINE_SIZE) - rw_lock_t latch; /*!< The latch protecting the purge - view. A purge operation must acquire an - x-latch here for the instant at which - it changes the purge view: an undo - log operation can prevent this by - obtaining an s-latch here. It also - protects state and running */ + os_event_t event; + /** latch protecting view, m_enabled */ MY_ALIGNED(CACHE_LINE_SIZE) - os_event_t event; /*!< State signal event; - os_event_set() and os_event_reset() - are protected by purge_sys_t::latch - X-lock */ - MY_ALIGNED(CACHE_LINE_SIZE) - ulint n_stop; /*!< Counter to track number stops */ - - volatile bool running; /*!< true, if purge is active, - we check this without the latch too */ - volatile purge_state_t state; /*!< Purge coordinator thread states, - we check this in several places - without holding the latch. */ + rw_lock_t latch; +private: + /** whether purge is enabled; protected by latch and my_atomic */ + int32_t m_enabled; + /** number of pending stop() calls without resume() */ + int32_t m_paused; +public: que_t* query; /*!< The query graph which will do the parallelized purge operation */ MY_ALIGNED(CACHE_LINE_SIZE) @@ -494,10 +459,7 @@ public: uninitialised. Real initialisation happens in create(). */ - purge_sys_t() : m_initialised(false) {} - - - bool is_initialised() const { return m_initialised; } + purge_sys_t() : event(NULL), m_enabled(false) {} /** Create the instance */ @@ -505,6 +467,49 @@ public: /** Close the purge system on shutdown */ void close(); + + /** @return whether purge is enabled */ + bool enabled() + { + return my_atomic_load32_explicit(&m_enabled, MY_MEMORY_ORDER_RELAXED); + } + /** @return whether purge is enabled */ + bool enabled_latched() + { + ut_ad(rw_lock_own_flagged(&latch, RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)); + return bool(m_enabled); + } + /** @return whether the purge coordinator is paused */ + bool paused() + { return my_atomic_load32_explicit(&m_paused, MY_MEMORY_ORDER_RELAXED); } + /** @return whether the purge coordinator is paused */ + bool paused_latched() + { + ut_ad(rw_lock_own_flagged(&latch, RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)); + return m_paused != 0; + } + + /** Enable purge at startup. Not protected by latch; the main thread + will wait for purge_sys.enabled() in srv_start() */ + void coordinator_startup() + { + ut_ad(!enabled()); + my_atomic_store32_explicit(&m_enabled, true, MY_MEMORY_ORDER_RELAXED); + } + + /** Disable purge at shutdown */ + void coordinator_shutdown() + { + ut_ad(enabled()); + my_atomic_store32_explicit(&m_enabled, false, MY_MEMORY_ORDER_RELAXED); + } + + /** @return whether the purge coordinator thread is active */ + bool running(); + /** Stop purge during FLUSH TABLES FOR EXPORT */ + void stop(); + /** Resume purge at UNLOCK TABLES after FLUSH TABLES FOR EXPORT */ + void resume(); }; /** The global data structure coordinating a purge */ diff --git a/storage/innobase/include/trx0rec.h b/storage/innobase/include/trx0rec.h index 955a726eb50..88c98625462 100644 --- a/storage/innobase/include/trx0rec.h +++ b/storage/innobase/include/trx0rec.h @@ -244,7 +244,7 @@ trx_undo_prev_version_build( into this function by purge thread or not. And if we read "after image" of undo log */ -/** Parse MLOG_UNDO_INSERT for crash-upgrade from MariaDB 10.2. +/** Parse MLOG_UNDO_INSERT. @param[in] ptr log record @param[in] end_ptr end of log record buffer @param[in,out] page page or NULL diff --git a/storage/innobase/include/trx0rec.ic b/storage/innobase/include/trx0rec.ic index 5ae34c486cc..a9794eb213d 100644 --- a/storage/innobase/include/trx0rec.ic +++ b/storage/innobase/include/trx0rec.ic @@ -64,8 +64,8 @@ trx_undo_rec_copy( ulint len; len = mach_read_from_2(undo_rec) - - ut_align_offset(undo_rec, UNIV_PAGE_SIZE); - ut_ad(len < UNIV_PAGE_SIZE); + - ut_align_offset(undo_rec, srv_page_size); + ut_ad(len < srv_page_size); trx_undo_rec_t* rec = static_cast( mem_heap_dup(heap, undo_rec, len)); mach_write_to_2(rec, len); diff --git a/storage/innobase/include/trx0roll.h b/storage/innobase/include/trx0roll.h index e1718294f6e..af5ed73f04b 100644 --- a/storage/innobase/include/trx0roll.h +++ b/storage/innobase/include/trx0roll.h @@ -218,6 +218,4 @@ struct trx_named_savept_t{ transaction */ }; -#include "trx0roll.ic" - #endif diff --git a/storage/innobase/include/trx0roll.ic b/storage/innobase/include/trx0roll.ic deleted file mode 100644 index b09a1471150..00000000000 --- a/storage/innobase/include/trx0roll.ic +++ /dev/null @@ -1,62 +0,0 @@ -/***************************************************************************** - -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. - -This program is free software; you can redistribute it and/or modify it under -the terms of the GNU General Public License as published by the Free Software -Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - -You should have received a copy of the GNU General Public License along with -this program; if not, write to the Free Software Foundation, Inc., -51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA - -*****************************************************************************/ - -/**************************************************//** -@file include/trx0roll.ic -Transaction rollback - -Created 3/26/1996 Heikki Tuuri -*******************************************************/ - -#ifdef UNIV_DEBUG -/*******************************************************************//** -Check if undo numbering is maintained while processing undo records -for rollback. -@return true if undo numbering is maintained. */ -UNIV_INLINE -bool -trx_roll_check_undo_rec_ordering( -/*=============================*/ - undo_no_t curr_undo_rec_no, /*!< in: record number of - undo record to process. */ - ulint curr_undo_space_id, /*!< in: space-id of rollback - segment that contains the - undo record to process. */ - const trx_t* trx) /*!< in: transaction */ -{ - /* Each transaction now can have multiple rollback segments. - If a transaction involves temp and non-temp tables, both the rollback - segments will be active. In this case undo records will be distrubuted - across the two rollback segments. - CASE-1: UNDO action will apply all undo records from one rollback - segment before moving to next. This means undo record numbers can't be - sequential but ordering is still enforced as next undo record number - should be < processed undo record number. - CASE-2: For normal rollback (not initiated by crash) all rollback - segments will be active (including non-redo). - Based on transaction operation pattern undo record number of first - undo record from this new rollback segment can be > last undo number - from previous rollback segment and so we ignore this check if - rollback segments are switching. Once switched new rollback segment - should re-follow undo record number pattern (as mentioned in CASE-1). */ - - return(curr_undo_space_id != trx->undo_rseg_space - || curr_undo_rec_no + 1 <= trx->undo_no); -} -#endif /* UNIV_DEBUG */ - diff --git a/storage/innobase/include/trx0rseg.h b/storage/innobase/include/trx0rseg.h index 65e5124a2db..dbd80486b71 100644 --- a/storage/innobase/include/trx0rseg.h +++ b/storage/innobase/include/trx0rseg.h @@ -113,7 +113,7 @@ trx_rseg_get_n_undo_tablespaces( ulint* space_ids); /*!< out: array of space ids of UNDO tablespaces */ /* Number of undo log slots in a rollback segment file copy */ -#define TRX_RSEG_N_SLOTS (UNIV_PAGE_SIZE / 16) +#define TRX_RSEG_N_SLOTS (srv_page_size / 16) /* Maximum number of transactions supported by a single rollback segment */ #define TRX_RSEG_MAX_N_TRXS (TRX_RSEG_N_SLOTS / 2) diff --git a/storage/innobase/include/trx0sys.h b/storage/innobase/include/trx0sys.h index 716365a9868..ea01d698b3b 100644 --- a/storage/innobase/include/trx0sys.h +++ b/storage/innobase/include/trx0sys.h @@ -224,7 +224,7 @@ trx_sysf_rseg_get_page_no(const buf_block_t* sys_header, ulint rseg_id) # error "UNIV_PAGE_SIZE_MIN < 4096" #endif /** The offset of the MySQL binlog offset info in the trx system header */ -#define TRX_SYS_MYSQL_LOG_INFO (UNIV_PAGE_SIZE - 1000) +#define TRX_SYS_MYSQL_LOG_INFO (srv_page_size - 1000) #define TRX_SYS_MYSQL_LOG_MAGIC_N_FLD 0 /*!< magic number which is TRX_SYS_MYSQL_LOG_MAGIC_N if we have valid data in the @@ -233,7 +233,7 @@ trx_sysf_rseg_get_page_no(const buf_block_t* sys_header, ulint rseg_id) within that file */ #define TRX_SYS_MYSQL_LOG_NAME 12 /*!< MySQL log file name */ -/** Memory map TRX_SYS_PAGE_NO = 5 when UNIV_PAGE_SIZE = 4096 +/** Memory map TRX_SYS_PAGE_NO = 5 when srv_page_size = 4096 0...37 FIL_HEADER 38...45 TRX_SYS_TRX_ID_STORE @@ -249,7 +249,7 @@ trx_sysf_rseg_get_page_no(const buf_block_t* sys_header, ulint rseg_id) ... ...1063 TRX_SYS_RSEG_PAGE_NO for slot 126 -(UNIV_PAGE_SIZE-3500 WSREP ::: FAIL would overwrite undo tablespace +(srv_page_size-3500 WSREP ::: FAIL would overwrite undo tablespace space_id, page_no pairs :::) 596 TRX_SYS_WSREP_XID_INFO TRX_SYS_WSREP_XID_MAGIC_N_FLD 600 TRX_SYS_WSREP_XID_FORMAT @@ -259,7 +259,7 @@ space_id, page_no pairs :::) 739 TRX_SYS_WSREP_XID_DATA_END FIXED WSREP XID info offsets for 4k page size 10.0.32-galera -(UNIV_PAGE_SIZE-2500) +(srv_page_size-2500) 1596 TRX_SYS_WSREP_XID_INFO TRX_SYS_WSREP_XID_MAGIC_N_FLD 1600 TRX_SYS_WSREP_XID_FORMAT 1604 TRX_SYS_WSREP_XID_GTRID_LEN @@ -267,19 +267,19 @@ FIXED WSREP XID info offsets for 4k page size 10.0.32-galera 1612 TRX_SYS_WSREP_XID_DATA (len = 128) 1739 TRX_SYS_WSREP_XID_DATA_END -(UNIV_PAGE_SIZE - 2000 MYSQL MASTER LOG) +(srv_page_size - 2000 MYSQL MASTER LOG) 2096 TRX_SYS_MYSQL_MASTER_LOG_INFO TRX_SYS_MYSQL_LOG_MAGIC_N_FLD 2100 TRX_SYS_MYSQL_LOG_OFFSET_HIGH 2104 TRX_SYS_MYSQL_LOG_OFFSET_LOW 2108 TRX_SYS_MYSQL_LOG_NAME -(UNIV_PAGE_SIZE - 1000 MYSQL LOG) +(srv_page_size - 1000 MYSQL LOG) 3096 TRX_SYS_MYSQL_LOG_INFO TRX_SYS_MYSQL_LOG_MAGIC_N_FLD 3100 TRX_SYS_MYSQL_LOG_OFFSET_HIGH 3104 TRX_SYS_MYSQL_LOG_OFFSET_LOW 3108 TRX_SYS_MYSQL_LOG_NAME -(UNIV_PAGE_SIZE - 200 DOUBLEWRITE) +(srv_page_size - 200 DOUBLEWRITE) 3896 TRX_SYS_DOUBLEWRITE TRX_SYS_DOUBLEWRITE_FSEG 3906 TRX_SYS_DOUBLEWRITE_MAGIC 3910 TRX_SYS_DOUBLEWRITE_BLOCK1 @@ -287,7 +287,7 @@ FIXED WSREP XID info offsets for 4k page size 10.0.32-galera 3918 TRX_SYS_DOUBLEWRITE_REPEAT 3930 TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED_N -(UNIV_PAGE_SIZE - 8, TAILER) +(srv_page_size - 8, TAILER) 4088..4096 FIL_TAILER */ @@ -308,7 +308,7 @@ FIXED WSREP XID info offsets for 4k page size 10.0.32-galera /** Doublewrite buffer */ /* @{ */ /** The offset of the doublewrite buffer header on the trx system header page */ -#define TRX_SYS_DOUBLEWRITE (UNIV_PAGE_SIZE - 200) +#define TRX_SYS_DOUBLEWRITE (srv_page_size - 200) /*-------------------------------------------------------------*/ #define TRX_SYS_DOUBLEWRITE_FSEG 0 /*!< fseg header of the fseg containing the doublewrite @@ -627,6 +627,12 @@ public: */ if (!trx_id) return NULL; + if (caller_trx && caller_trx->id == trx_id) + { + if (do_ref_count) + caller_trx->reference(); + return caller_trx; + } trx_t *trx= 0; LF_PINS *pins= caller_trx ? get_pins(caller_trx) : lf_hash_get_pins(&hash); @@ -698,9 +704,10 @@ public: because it may change even before this method returns. */ - int32_t size() + uint32_t size() { - return my_atomic_load32_explicit(&hash.count, MY_MEMORY_ORDER_RELAXED); + return uint32_t(my_atomic_load32_explicit(&hash.count, + MY_MEMORY_ORDER_RELAXED)); } @@ -984,10 +991,10 @@ public: bool is_initialised() { return m_initialised; } - /** Initialise the purge subsystem. */ + /** Initialise the transaction subsystem. */ void create(); - /** Close the purge subsystem on shutdown. */ + /** Close the transaction subsystem on shutdown. */ void close(); /** @return total number of active (non-prepared) transactions */ diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 4b7ca171740..76d4e9caba0 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -355,8 +355,8 @@ trx_state_eq( /**********************************************************************//** Determines if the currently running transaction has been interrupted. -@return TRUE if interrupted */ -ibool +@return true if interrupted */ +bool trx_is_interrupted( /*===============*/ const trx_t* trx); /*!< in: transaction */ @@ -940,7 +940,7 @@ public: contains a pointer to the latest file name; this is NULL if binlog is not used */ - int64_t mysql_log_offset; + ulonglong mysql_log_offset; /*!< if MySQL binlog is used, this field contains the end offset of the binlog entry */ @@ -978,12 +978,6 @@ public: trx_savepoints; /*!< savepoints set with SAVEPOINT ..., oldest first */ /*------------------------------*/ - UndoMutex undo_mutex; /*!< mutex protecting the fields in this - section (down to undo_no_arr), EXCEPT - last_sql_stat_start, which can be - accessed only when we know that there - cannot be any activity in the undo - logs! */ undo_no_t undo_no; /*!< next undo log record number to assign; since the undo log is private for a transaction, this @@ -991,14 +985,10 @@ public: with no gaps; thus it represents the number of modified/inserted rows in a transaction */ - ulint undo_rseg_space; - /*!< space id where last undo record - was written */ trx_savept_t last_sql_stat_start; /*!< undo_no when the last sql statement was started: in case of an error, trx - is rolled back down to this undo - number; see note at undo_mutex! */ + is rolled back down to this number */ trx_rsegs_t rsegs; /* rollback segments for undo logging */ undo_no_t roll_limit; /*!< least undo number to undo during a partial rollback; 0 otherwise */ diff --git a/storage/innobase/include/trx0types.h b/storage/innobase/include/trx0types.h index 5100b8b978d..abc92a6edec 100644 --- a/storage/innobase/include/trx0types.h +++ b/storage/innobase/include/trx0types.h @@ -138,7 +138,6 @@ typedef byte trx_undo_rec_t; typedef ib_mutex_t RsegMutex; typedef ib_mutex_t TrxMutex; -typedef ib_mutex_t UndoMutex; typedef ib_mutex_t PQMutex; typedef ib_mutex_t TrxSysMutex; diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h index 4010fe4163b..7d4e632d3ce 100644 --- a/storage/innobase/include/trx0undo.h +++ b/storage/innobase/include/trx0undo.h @@ -166,13 +166,11 @@ trx_undo_get_first_rec( mtr_t* mtr); /** Allocate an undo log page. -@param[in,out] trx transaction @param[in,out] undo undo log @param[in,out] mtr mini-transaction that does not hold any page latch @return X-latched block if success @retval NULL on failure */ -buf_block_t* -trx_undo_add_page(trx_t* trx, trx_undo_t* undo, mtr_t* mtr) +buf_block_t* trx_undo_add_page(trx_undo_t* undo, mtr_t* mtr) MY_ATTRIBUTE((nonnull, warn_unused_result)); /** Free the last undo log page. The caller must hold the rseg mutex. @@ -274,7 +272,7 @@ bool trx_undo_truncate_tablespace( undo::Truncate* undo_trunc); -/** Parse MLOG_UNDO_INIT for crash-upgrade from MariaDB 10.2. +/** Parse MLOG_UNDO_INIT. @param[in] ptr log record @param[in] end_ptr end of log record buffer @param[in,out] page page or NULL @@ -282,23 +280,17 @@ trx_undo_truncate_tablespace( @return end of log record @retval NULL if the log record is incomplete */ byte* -trx_undo_parse_page_init( - const byte* ptr, - const byte* end_ptr, - page_t* page, - mtr_t* mtr); +trx_undo_parse_page_init(const byte* ptr, const byte* end_ptr, page_t* page); /** Parse MLOG_UNDO_HDR_REUSE for crash-upgrade from MariaDB 10.2. @param[in] ptr redo log record @param[in] end_ptr end of log buffer @param[in,out] page undo page or NULL -@param[in,out] mtr mini-transaction @return end of log record or NULL */ byte* trx_undo_parse_page_header_reuse( const byte* ptr, const byte* end_ptr, - page_t* page, - mtr_t* mtr); + page_t* page); /** Parse the redo log entry of an undo log page header create. @param[in] ptr redo log record @@ -343,8 +335,8 @@ trx_undo_mem_create_at_db_start(trx_rseg_t* rseg, ulint id, ulint page_no, #ifndef UNIV_INNOCHECKSUM -/** Transaction undo log memory object; this is protected by the undo_mutex -in the corresponding transaction object */ +/** Transaction undo log memory object; modified by the thread associated +with the transaction. */ struct trx_undo_t { /*-----------------------------*/ @@ -370,8 +362,6 @@ struct trx_undo_t { top_page_no during a rollback */ ulint size; /*!< current size in pages */ /*-----------------------------*/ - ulint empty; /*!< TRUE if the stack of undo log - records is currently empty */ ulint top_page_no; /*!< page number where the latest undo log record was catenated; during rollback the page from which the latest @@ -379,11 +369,16 @@ struct trx_undo_t { ulint top_offset; /*!< offset of the latest undo record, i.e., the topmost element in the undo log if we think of it as a stack */ - undo_no_t top_undo_no; /*!< undo number of the latest record */ + undo_no_t top_undo_no; /*!< undo number of the latest record + (IB_ID_MAX if the undo log is empty) */ buf_block_t* guess_block; /*!< guess for the buffer block where the top page might reside */ ulint withdraw_clock; /*!< the withdraw clock value of the buffer pool when guess_block was stored */ + + /** @return whether the undo log is empty */ + bool empty() const { return top_undo_no == IB_ID_MAX; } + /*-----------------------------*/ UT_LIST_NODE_T(trx_undo_t) undo_list; /*!< undo log objects in the rollback @@ -418,7 +413,7 @@ struct trx_undo_t { at most this many bytes used; we must leave space at least for one new undo log header on the page */ -#define TRX_UNDO_PAGE_REUSE_LIMIT (3 * UNIV_PAGE_SIZE / 4) +#define TRX_UNDO_PAGE_REUSE_LIMIT (3 << (srv_page_size_shift - 2)) /* An update undo log segment may contain several undo logs on its first page if the undo logs took so little space that the segment could be cached and diff --git a/storage/innobase/include/trx0undo.ic b/storage/innobase/include/trx0undo.ic index 407bc9ff484..630638f6b7b 100644 --- a/storage/innobase/include/trx0undo.ic +++ b/storage/innobase/include/trx0undo.ic @@ -40,9 +40,7 @@ trx_undo_build_roll_ptr( ulint offset) /*!< in: offset of the undo entry within page */ { roll_ptr_t roll_ptr; -#if DATA_ROLL_PTR_LEN != 7 -# error "DATA_ROLL_PTR_LEN != 7" -#endif + compile_time_assert(DATA_ROLL_PTR_LEN == 7); ut_ad(is_insert == 0 || is_insert == 1); ut_ad(rseg_id < TRX_SYS_N_RSEGS); ut_ad(offset < 65536); @@ -67,12 +65,7 @@ trx_undo_decode_roll_ptr( ulint* offset) /*!< out: offset of the undo entry within page */ { -#if DATA_ROLL_PTR_LEN != 7 -# error "DATA_ROLL_PTR_LEN != 7" -#endif -#if TRUE != 1 -# error "TRUE != 1" -#endif + compile_time_assert(DATA_ROLL_PTR_LEN == 7); ut_ad(roll_ptr < (1ULL << 56)); *offset = (ulint) roll_ptr & 0xFFFF; roll_ptr >>= 16; @@ -92,12 +85,7 @@ trx_undo_roll_ptr_is_insert( /*========================*/ roll_ptr_t roll_ptr) /*!< in: roll pointer */ { -#if DATA_ROLL_PTR_LEN != 7 -# error "DATA_ROLL_PTR_LEN != 7" -#endif -#if TRUE != 1 -# error "TRUE != 1" -#endif + compile_time_assert(DATA_ROLL_PTR_LEN == 7); ut_ad(roll_ptr < (1ULL << (ROLL_PTR_INSERT_FLAG_POS + 1))); return((ibool) (roll_ptr >> ROLL_PTR_INSERT_FLAG_POS)); } @@ -111,10 +99,8 @@ trx_undo_trx_id_is_insert( /*======================*/ const byte* trx_id) /*!< in: DB_TRX_ID, followed by DB_ROLL_PTR */ { -#if DATA_TRX_ID + 1 != DATA_ROLL_PTR -# error -#endif - return(static_cast(trx_id[DATA_TRX_ID_LEN] >> 7)); + compile_time_assert(DATA_TRX_ID + 1 == DATA_ROLL_PTR); + return bool(trx_id[DATA_TRX_ID_LEN] >> 7); } /*****************************************************************//** @@ -129,9 +115,7 @@ trx_write_roll_ptr( written */ roll_ptr_t roll_ptr) /*!< in: roll ptr */ { -#if DATA_ROLL_PTR_LEN != 7 -# error "DATA_ROLL_PTR_LEN != 7" -#endif + compile_time_assert(DATA_ROLL_PTR_LEN == 7); mach_write_to_7(ptr, roll_ptr); } @@ -146,9 +130,7 @@ trx_read_roll_ptr( /*==============*/ const byte* ptr) /*!< in: pointer to memory from where to read */ { -#if DATA_ROLL_PTR_LEN != 7 -# error "DATA_ROLL_PTR_LEN != 7" -#endif + compile_time_assert(DATA_ROLL_PTR_LEN == 7); return(mach_read_from_7(ptr)); } @@ -220,7 +202,7 @@ trx_undo_page_get_next_rec( ulint end; ulint next; - undo_page = (page_t*) ut_align_down(rec, UNIV_PAGE_SIZE); + undo_page = (page_t*) ut_align_down(rec, srv_page_size); end = trx_undo_page_get_end(undo_page, page_no, offset); diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index 49284f2dbc8..ffa4967b9de 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -41,7 +41,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 5 #define INNODB_VERSION_MINOR 7 -#define INNODB_VERSION_BUGFIX 21 +#define INNODB_VERSION_BUGFIX 22 /* The following is the InnoDB version as shown in SELECT plugin_version FROM information_schema.plugins; @@ -50,16 +50,14 @@ calculated in make_version_string() in sql/sql_show.cc like this: because the version is shown with only one dot, we skip the last component, i.e. we show M.N.P as M.N */ #define INNODB_VERSION_SHORT \ - (INNODB_VERSION_MAJOR << 8 | INNODB_VERSION_MINOR) + (MYSQL_VERSION_MAJOR << 8 | MYSQL_VERSION_MINOR) #define INNODB_VERSION_STR \ - IB_TO_STR(INNODB_VERSION_MAJOR) "." \ - IB_TO_STR(INNODB_VERSION_MINOR) "." \ - IB_TO_STR(INNODB_VERSION_BUGFIX) + IB_TO_STR(MYSQL_VERSION_MAJOR) "." \ + IB_TO_STR(MYSQL_VERSION_MINOR) "." \ + IB_TO_STR(MYSQL_VERSION_PATCH) -#define REFMAN "http://dev.mysql.com/doc/refman/" \ - IB_TO_STR(INNODB_VERSION_MAJOR) "." \ - IB_TO_STR(INNODB_VERSION_MINOR) "/en/" +#define REFMAN "http://dev.mysql.com/doc/refman/5.7/en/" /** How far ahead should we tell the service manager the timeout (time in seconds) */ @@ -172,9 +170,8 @@ for all cases. This is used by ut0lst.h related code. */ /* When this macro is defined then additional test functions will be compiled. These functions live at the end of each relevant source file and have "test_" prefix. These functions can be called from the end of -innobase_init() or they can be called from gdb after -innobase_start_or_create_for_mysql() has executed using the call -command. */ +innodb_init() or they can be called from gdb after srv_start() has executed +using the call command. */ /* #define UNIV_COMPILE_TEST_FUNCS #define UNIV_ENABLE_UNIT_TEST_GET_PARENT_DIR @@ -275,9 +272,6 @@ management to ensure correct alignment for doubles etc. */ ======================== */ -/** The 2-logarithm of UNIV_PAGE_SIZE: */ -#define UNIV_PAGE_SIZE_SHIFT srv_page_size_shift - #ifdef HAVE_LZO #define IF_LZO(A,B) A #else @@ -314,32 +308,29 @@ management to ensure correct alignment for doubles etc. */ #define IF_PUNCH_HOLE(A,B) B #endif -/** The universal page size of the database */ -#define UNIV_PAGE_SIZE ((ulint) srv_page_size) - /** log2 of smallest compressed page size (1<<10 == 1024 bytes) Note: This must never change! */ -#define UNIV_ZIP_SIZE_SHIFT_MIN 10 +#define UNIV_ZIP_SIZE_SHIFT_MIN 10U /** log2 of largest compressed page size (1<<14 == 16384 bytes). A compressed page directory entry reserves 14 bits for the start offset and 2 bits for flags. This limits the uncompressed page size to 16k. */ -#define UNIV_ZIP_SIZE_SHIFT_MAX 14 +#define UNIV_ZIP_SIZE_SHIFT_MAX 14U /* Define the Min, Max, Default page sizes. */ /** Minimum Page Size Shift (power of 2) */ -#define UNIV_PAGE_SIZE_SHIFT_MIN 12 +#define UNIV_PAGE_SIZE_SHIFT_MIN 12U /** log2 of largest page size (1<<16 == 64436 bytes). */ /** Maximum Page Size Shift (power of 2) */ -#define UNIV_PAGE_SIZE_SHIFT_MAX 16 +#define UNIV_PAGE_SIZE_SHIFT_MAX 16U /** log2 of default page size (1<<14 == 16384 bytes). */ /** Default Page Size Shift (power of 2) */ -#define UNIV_PAGE_SIZE_SHIFT_DEF 14 +#define UNIV_PAGE_SIZE_SHIFT_DEF 14U /** Original 16k InnoDB Page Size Shift, in case the default changes */ -#define UNIV_PAGE_SIZE_SHIFT_ORIG 14 +#define UNIV_PAGE_SIZE_SHIFT_ORIG 14U /** Original 16k InnoDB Page Size as an ssize (log2 - 9) */ -#define UNIV_PAGE_SSIZE_ORIG (UNIV_PAGE_SIZE_SHIFT_ORIG - 9) +#define UNIV_PAGE_SSIZE_ORIG (UNIV_PAGE_SIZE_SHIFT_ORIG - 9U) /** Minimum page size InnoDB currently supports. */ #define UNIV_PAGE_SIZE_MIN (1U << UNIV_PAGE_SIZE_SHIFT_MIN) @@ -359,13 +350,13 @@ and 2 bits for flags. This limits the uncompressed page size to 16k. /** Largest possible ssize for an uncompressed page. (The convention 'ssize' is used for 'log2 minus 9' or the number of shifts starting with 512.) -This max number varies depending on UNIV_PAGE_SIZE. */ +This max number varies depending on srv_page_size. */ #define UNIV_PAGE_SSIZE_MAX \ - static_cast(UNIV_PAGE_SIZE_SHIFT - UNIV_ZIP_SIZE_SHIFT_MIN + 1) + ulint(srv_page_size_shift - UNIV_ZIP_SIZE_SHIFT_MIN + 1U) /** Smallest possible ssize for an uncompressed page. */ #define UNIV_PAGE_SSIZE_MIN \ - static_cast(UNIV_PAGE_SIZE_SHIFT_MIN - UNIV_ZIP_SIZE_SHIFT_MIN + 1) + ulint(UNIV_PAGE_SIZE_SHIFT_MIN - UNIV_ZIP_SIZE_SHIFT_MIN + 1U) /** Maximum number of parallel threads in a parallelized operation */ #define UNIV_MAX_PARALLELISM 32 @@ -470,7 +461,7 @@ typedef ib_uint64_t lsn_t; #define UINT64_UNDEFINED ((ib_uint64_t)(-1)) /** The bitmask of 32-bit unsigned integer */ -#define ULINT32_MASK 0xFFFFFFFF +#define ULINT32_MASK 0xFFFFFFFFU /** The undefined 32-bit unsigned integer */ #define ULINT32_UNDEFINED ULINT32_MASK diff --git a/storage/innobase/include/ut0byte.ic b/storage/innobase/include/ut0byte.ic index 9c0cd6ee3c3..1ef90eca416 100644 --- a/storage/innobase/include/ut0byte.ic +++ b/storage/innobase/include/ut0byte.ic @@ -144,9 +144,6 @@ ut_bit_get_nth( ulint n) /*!< in: nth bit requested */ { ut_ad(n < 8 * sizeof(ulint)); -#if TRUE != 1 -# error "TRUE != 1" -#endif return(1 & (a >> n)); } @@ -162,9 +159,6 @@ ut_bit_set_nth( ibool val) /*!< in: value for the bit to set */ { ut_ad(n < 8 * sizeof(ulint)); -#if TRUE != 1 -# error "TRUE != 1" -#endif if (val) { return(((ulint) 1 << n) | a); } else { diff --git a/storage/innobase/include/ut0lst.h b/storage/innobase/include/ut0lst.h index 09733da20a0..f62d3744b96 100644 --- a/storage/innobase/include/ut0lst.h +++ b/storage/innobase/include/ut0lst.h @@ -426,7 +426,7 @@ Gets the last node in a two-way list. @return last node, or NULL if the list is empty */ #define UT_LIST_GET_LAST(BASE) (BASE).end -struct NullValidate { void operator()(const void* elem) { } }; +struct NullValidate { void operator()(const void*) { } }; /********************************************************************//** Iterate over all the elements and call the functor for each element. diff --git a/storage/innobase/include/ut0new.h b/storage/innobase/include/ut0new.h index 68acccbee97..5dcb25271c5 100644 --- a/storage/innobase/include/ut0new.h +++ b/storage/innobase/include/ut0new.h @@ -238,11 +238,18 @@ struct ut_new_pfx_t { #endif }; -static inline void ut_allocate_trace_dontdump(void * ptr, - size_t bytes, - bool dontdump, - ut_new_pfx_t* pfx, - const char* file) +static inline void ut_allocate_trace_dontdump(void *ptr, size_t bytes, + bool +#if defined(DBUG_OFF) && defined(HAVE_MADVISE) && defined(MADV_DONTDUMP) + dontdump +#endif + , ut_new_pfx_t* pfx, + const char* +#ifdef UNIV_PFS_MEMORY + file +#endif + + ) { ut_a(ptr != NULL); @@ -262,17 +269,19 @@ static inline void ut_allocate_trace_dontdump(void * ptr, } } +#if defined(DBUG_OFF) && defined(HAVE_MADVISE) && defined(MADV_DODUMP) static inline void ut_dodump(void* ptr, size_t m_size) { -#if defined(DBUG_OFF) && defined(HAVE_MADVISE) && defined(MADV_DODUMP) if (ptr && madvise(ptr, m_size, MADV_DODUMP)) { ib::warn() << "Failed to set memory to DODUMP: " << strerror(errno) << " ptr " << ptr << " size " << m_size; } -#endif } +#else +static inline void ut_dodump(void*, size_t) {} +#endif /** Allocator class for allocating memory from inside std::* containers. @tparam T type of allocated object @@ -288,19 +297,25 @@ public: typedef size_t size_type; typedef ptrdiff_t difference_type; +#ifdef UNIV_PFS_MEMORY /** Default constructor. */ explicit ut_allocator(PSI_memory_key key = PSI_NOT_INSTRUMENTED) -#ifdef UNIV_PFS_MEMORY : m_key(key) -#endif /* UNIV_PFS_MEMORY */ { } +#else + ut_allocator() {} + ut_allocator(PSI_memory_key) {} +#endif /* UNIV_PFS_MEMORY */ /** Constructor from allocator of another type. */ template - ut_allocator( - const ut_allocator& other) + ut_allocator(const ut_allocator& +#ifdef UNIV_PFS_MEMORY + other +#endif + ) #ifdef UNIV_PFS_MEMORY : m_key(other.m_key) #endif /* UNIV_PFS_MEMORY */ @@ -321,6 +336,8 @@ public: #endif /* UNIV_PFS_MEMORY */ } + pointer allocate(size_type n) { return allocate(n, NULL, NULL); } + /** Allocate a chunk of memory that can hold 'n_elements' objects of type 'T' and trace the allocation. If the allocation fails this method may throw an exception. This @@ -329,9 +346,6 @@ public: After successfull allocation the returned pointer must be passed to ut_allocator::deallocate() when no longer needed. @param[in] n_elements number of elements - @param[in] hint pointer to a nearby memory location, - unused by this implementation - @param[in] file file name of the caller @param[in] set_to_zero if true, then the returned memory is initialized with 0x0 bytes. @param[in] throw_on_error if true, raize exception if too big @@ -339,8 +353,12 @@ public: pointer allocate( size_type n_elements, - const_pointer hint = NULL, - const char* file = NULL, + const_pointer, + const char* +#ifdef UNIV_PFS_MEMORY + file /*!< file name of the caller */ +#endif + , bool set_to_zero = false, bool throw_on_error = true) { @@ -648,7 +666,11 @@ public: void deallocate_large( pointer ptr, - const ut_new_pfx_t* pfx, + const ut_new_pfx_t* +#ifdef UNIV_PFS_MEMORY + pfx +#endif + , size_t size, bool dodump = false) { @@ -775,12 +797,7 @@ could be freed by A2 even if the pfs mem key is different. */ template inline bool -operator==( - const ut_allocator& lhs, - const ut_allocator& rhs) -{ - return(true); -} +operator==(const ut_allocator&, const ut_allocator&) { return(true); } /** Compare two allocators of the same type. */ template diff --git a/storage/innobase/include/ut0pool.h b/storage/innobase/include/ut0pool.h index f60608bf6c6..6367b53dbe6 100644 --- a/storage/innobase/include/ut0pool.h +++ b/storage/innobase/include/ut0pool.h @@ -86,6 +86,11 @@ struct Pool { for (Element* elem = m_start; elem != m_last; ++elem) { ut_ad(elem->m_pool == this); + /* Unpoison the memory for AddressSanitizer */ + MEM_UNDEFINED(&elem->m_type, sizeof elem->m_type); + /* Declare the contents as initialized for Valgrind; + we checked this in mem_free(). */ + UNIV_MEM_VALID(&elem->m_type, sizeof elem->m_type); Factory::destroy(&elem->m_type); } @@ -110,7 +115,7 @@ struct Pool { } else if (m_last < m_end) { /* Initialise the remaining elements. */ - init(m_end - m_last); + init(size_t(m_end - m_last)); ut_ad(!m_pqueue.empty()); @@ -122,7 +127,18 @@ struct Pool { m_lock_strategy.exit(); - return(elem != NULL ? &elem->m_type : 0); + if (elem) { + /* Unpoison the memory for AddressSanitizer */ + MEM_UNDEFINED(&elem->m_type, sizeof elem->m_type); + /* Declare the memory initialized for Valgrind. + The trx_t that are released to the pool are + actually initialized; we checked that by + UNIV_MEM_ASSERT_RW() in mem_free() below. */ + UNIV_MEM_VALID(&elem->m_type, sizeof elem->m_type); + return &elem->m_type; + } + + return NULL; } /** Add the object to the pool. @@ -133,8 +149,10 @@ struct Pool { byte* p = reinterpret_cast(ptr + 1); elem = reinterpret_cast(p - sizeof(*elem)); + UNIV_MEM_ASSERT_RW(&elem->m_type, sizeof elem->m_type); elem->m_pool->put(elem); + MEM_NOACCESS(&elem->m_type, sizeof elem->m_type); } protected: diff --git a/storage/innobase/include/ut0stage.h b/storage/innobase/include/ut0stage.h index 1d5457a3ab0..4b96fad3c21 100644 --- a/storage/innobase/include/ut0stage.h +++ b/storage/innobase/include/ut0stage.h @@ -529,65 +529,28 @@ ut_stage_alter_t::change_phase( class ut_stage_alter_t { public: - explicit - ut_stage_alter_t( - const dict_index_t* pk) - { - } + explicit ut_stage_alter_t(const dict_index_t*) {} - void - begin_phase_read_pk( - ulint n_sort_indexes) - { - } + void begin_phase_read_pk(ulint) {} - void - n_pk_recs_inc() - { - } + void n_pk_recs_inc() {} - void - inc( - ulint inc_val = 1) - { - } + void inc() {} + void inc(ulint) {} - void - end_phase_read_pk() - { - } + void end_phase_read_pk() {} - void - begin_phase_sort( - double sort_multi_factor) - { - } + void begin_phase_sort(double) {} - void - begin_phase_insert() - { - } + void begin_phase_insert() {} - void - begin_phase_flush( - ulint n_flush_pages) - { - } + void begin_phase_flush(ulint) {} - void - begin_phase_log_index() - { - } + void begin_phase_log_index() {} - void - begin_phase_log_table() - { - } + void begin_phase_log_table() {} - void - begin_phase_end() - { - } + void begin_phase_end() {} }; #endif /* HAVE_PSI_STAGE_INTERFACE */ diff --git a/storage/innobase/innodb.cmake b/storage/innobase/innodb.cmake index 50d6b4bbeec..a728dd08c0d 100644 --- a/storage/innobase/innodb.cmake +++ b/storage/innobase/innodb.cmake @@ -49,12 +49,6 @@ ELSE() ENDMACRO() ENDIF() -## MySQL 5.7 LZ4 (not needed) -##IF(LZ4_INCLUDE_DIR AND LZ4_LIBRARY) -## ADD_DEFINITIONS(-DHAVE_LZ4=1) -## INCLUDE_DIRECTORIES(${LZ4_INCLUDE_DIR}) -##ENDIF() - # OS tests IF(UNIX) IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") @@ -127,20 +121,7 @@ ENDIF() OPTION(WITH_INNODB_EXTRA_DEBUG "Enable extra InnoDB debug checks" OFF) IF(WITH_INNODB_EXTRA_DEBUG) - IF(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") - MESSAGE(FATAL_ERROR "WITH_INNODB_EXTRA_DEBUG can be enabled only in debug builds") - ENDIF() - - SET(EXTRA_DEBUG_FLAGS "") - IF(WITH_INNODB_AHI) - SET(EXTRA_DEBUG_FLAGS "${EXTRA_DEBUG_FLAGS} -DUNIV_AHI_DEBUG") - ENDIF() - SET(EXTRA_DEBUG_FLAGS "${EXTRA_DEBUG_FLAGS} -DUNIV_DDL_DEBUG") - SET(EXTRA_DEBUG_FLAGS "${EXTRA_DEBUG_FLAGS} -DUNIV_DEBUG_FILE_ACCESSES") - SET(EXTRA_DEBUG_FLAGS "${EXTRA_DEBUG_FLAGS} -DUNIV_ZIP_DEBUG") - - SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${EXTRA_DEBUG_FLAGS}") - SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${EXTRA_DEBUG_FLAGS}") + ADD_DEFINITIONS(-DUNIV_ZIP_DEBUG) ENDIF() CHECK_FUNCTION_EXISTS(sched_getcpu HAVE_SCHED_GETCPU) diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 22788a33c4f..b5494ef7313 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -445,7 +445,7 @@ lock_sec_rec_cons_read_sees( /* NOTE that we might call this function while holding the search system latch. */ - if (dict_table_is_temporary(index->table)) { + if (index->table->is_temporary()) { /* Temp-tables are not shared across connections and multiple transactions from different connections cannot simultaneously @@ -1975,7 +1975,7 @@ lock_rec_lock( trx_mutex_enter(trx); if (lock_rec_get_next_on_page(lock) || lock->trx != trx || - lock->type_mode != (mode | LOCK_REC) || + lock->type_mode != (ulint(mode) | LOCK_REC) || lock_rec_get_n_bits(lock) <= heap_no) { /* Do nothing if the trx already has a strong enough lock on rec */ @@ -2470,7 +2470,8 @@ lock_rec_inherit_to_gap( && lock_get_mode(lock) == (lock->trx->duplicates ? LOCK_S : LOCK_X))) { lock_rec_add_to_queue( - LOCK_REC | LOCK_GAP | lock_get_mode(lock), + LOCK_REC | LOCK_GAP + | ulint(lock_get_mode(lock)), heir_block, heir_heap_no, lock->index, lock->trx, FALSE); } @@ -2506,7 +2507,8 @@ lock_rec_inherit_to_gap_if_gap_lock( || !lock_rec_get_rec_not_gap(lock))) { lock_rec_add_to_queue( - LOCK_REC | LOCK_GAP | lock_get_mode(lock), + LOCK_REC | LOCK_GAP + | ulint(lock_get_mode(lock)), block, heir_heap_no, lock->index, lock->trx, FALSE); } @@ -3770,7 +3772,7 @@ lock_table_enqueue_waiting( #endif /* WITH_WSREP */ /* Enqueue the lock request that will wait to be granted */ - lock = lock_table_create(table, mode | LOCK_WAIT, trx + lock = lock_table_create(table, ulint(mode) | LOCK_WAIT, trx #ifdef WITH_WSREP , c_lock #endif @@ -3881,7 +3883,7 @@ lock_table( locking overhead */ if ((flags & BTR_NO_LOCKING_FLAG) || srv_read_only_mode - || dict_table_is_temporary(table)) { + || table->is_temporary()) { return(DB_SUCCESS); } @@ -3929,13 +3931,14 @@ lock_table( mode: this trx may have to wait */ if (wait_for != NULL) { - err = lock_table_enqueue_waiting(mode | flags, table, thr + err = lock_table_enqueue_waiting(ulint(mode) | flags, table, + thr #ifdef WITH_WSREP , wait_for #endif ); } else { - lock_table_create(table, mode | flags, trx); + lock_table_create(table, ulint(mode) | flags, trx); ut_a(!flags || mode == LOCK_S || mode == LOCK_X); @@ -4617,44 +4620,15 @@ lock_print_info_summary( fprintf(file, "Purge done for trx's n:o < " TRX_ID_FMT - " undo n:o < " TRX_ID_FMT " state: ", + " undo n:o < " TRX_ID_FMT " state: %s\n" + "History list length " ULINTPF "\n", purge_sys.tail.trx_no(), - purge_sys.tail.undo_no); - - /* Note: We are reading the state without the latch. One because it - will violate the latching order and two because we are merely querying - the state of the variable for display. */ - - switch (purge_sys.state){ - case PURGE_STATE_INIT: - /* Should never be in this state while the system is running. */ - ut_error; - - case PURGE_STATE_EXIT: - fprintf(file, "exited"); - break; - - case PURGE_STATE_DISABLED: - fprintf(file, "disabled"); - break; - - case PURGE_STATE_RUN: - fprintf(file, "running"); - /* Check if it is waiting for more data to arrive. */ - if (!purge_sys.running) { - fprintf(file, " but idle"); - } - break; - - case PURGE_STATE_STOP: - fprintf(file, "stopped"); - break; - } - - fprintf(file, "\n"); - - fprintf(file, - "History list length " ULINTPF "\n", trx_sys.history_size()); + purge_sys.tail.undo_no, + purge_sys.enabled() + ? (purge_sys.running() ? "running" + : purge_sys.paused() ? "stopped" : "running but idle") + : "disabled", + trx_sys.history_size()); #ifdef PRINT_NUM_OF_LOCK_STRUCTS fprintf(file, @@ -5235,13 +5209,12 @@ lock_rec_block_validate( mtr_commit(&mtr); - fil_space_release(space); + space->release(); } } -static my_bool lock_validate_table_locks(rw_trx_hash_element_t *element, - void *arg) +static my_bool lock_validate_table_locks(rw_trx_hash_element_t *element, void*) { ut_ad(lock_mutex_own()); mutex_enter(&element->mutex); @@ -5347,7 +5320,7 @@ lock_rec_insert_check_and_lock( return(DB_SUCCESS); } - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); dberr_t err; lock_t* lock; @@ -5478,7 +5451,6 @@ lock_rec_convert_impl_to_expl_for_trx( const buf_block_t* block, /*!< in: buffer block of rec */ const rec_t* rec, /*!< in: user record on page */ dict_index_t* index, /*!< in: index of record */ - const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ trx_t* trx, /*!< in/out: active transaction */ ulint heap_no)/*!< in: rec heap number to lock */ { @@ -5623,7 +5595,7 @@ lock_rec_convert_impl_to_expl( trx cannot be committed until the ref count is zero. */ lock_rec_convert_impl_to_expl_for_trx( - block, rec, index, offsets, trx, heap_no); + block, rec, index, trx, heap_no); } } @@ -5660,7 +5632,7 @@ lock_clust_rec_modify_check_and_lock( return(DB_SUCCESS); } ut_ad(!rec_is_default_row(rec, index)); - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); heap_no = rec_offs_comp(offsets) ? rec_get_heap_no_new(rec) @@ -5718,7 +5690,7 @@ lock_sec_rec_modify_check_and_lock( return(DB_SUCCESS); } - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); heap_no = page_rec_get_heap_no(rec); @@ -5801,7 +5773,7 @@ lock_sec_rec_read_check_and_lock( if ((flags & BTR_NO_LOCKING_FLAG) || srv_read_only_mode - || dict_table_is_temporary(index->table)) { + || index->table->is_temporary()) { return(DB_SUCCESS); } @@ -5820,7 +5792,7 @@ lock_sec_rec_read_check_and_lock( index, offsets); } - err = lock_rec_lock(FALSE, mode | gap_mode, + err = lock_rec_lock(FALSE, ulint(mode) | gap_mode, block, heap_no, index, thr); ut_ad(lock_rec_queue_validate(FALSE, block, rec, index, offsets)); @@ -5871,7 +5843,7 @@ lock_clust_rec_read_check_and_lock( if ((flags & BTR_NO_LOCKING_FLAG) || srv_read_only_mode - || dict_table_is_temporary(index->table)) { + || index->table->is_temporary()) { return(DB_SUCCESS); } @@ -5884,7 +5856,8 @@ lock_clust_rec_read_check_and_lock( index, offsets); } - err = lock_rec_lock(FALSE, mode | gap_mode, block, heap_no, index, thr); + err = lock_rec_lock(FALSE, ulint(mode) | gap_mode, + block, heap_no, index, thr); ut_ad(lock_rec_queue_validate(FALSE, block, rec, index, offsets)); @@ -6602,10 +6575,10 @@ lock_trx_has_rec_x_lock( lock_mutex_enter(); ut_a(lock_table_has(trx, table, LOCK_IX) - || dict_table_is_temporary(table)); + || table->is_temporary()); ut_a(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, block, heap_no, trx) - || dict_table_is_temporary(table)); + || table->is_temporary()); lock_mutex_exit(); return(true); } diff --git a/storage/innobase/lock/lock0prdt.cc b/storage/innobase/lock/lock0prdt.cc index cf88ebbeaa1..bac9a788076 100644 --- a/storage/innobase/lock/lock0prdt.cc +++ b/storage/innobase/lock/lock0prdt.cc @@ -534,7 +534,7 @@ lock_prdt_insert_check_and_lock( return(DB_SUCCESS); } - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); ut_ad(!dict_index_is_clust(index)); trx_t* trx = thr_get_trx(thr); @@ -628,7 +628,6 @@ lock_prdt_update_parent( buf_block_t* right_block, /*!< in/out: the new half page */ lock_prdt_t* left_prdt, /*!< in: MBR on the old page */ lock_prdt_t* right_prdt, /*!< in: MBR on the new page */ - lock_prdt_t* parent_prdt, /*!< in: original parent MBR */ ulint space, /*!< in: parent space id */ ulint page_no) /*!< in: parent page number */ { @@ -682,7 +681,6 @@ static void lock_prdt_update_split_low( /*=======================*/ - buf_block_t* block, /*!< in/out: page to be split */ buf_block_t* new_block, /*!< in/out: the new half page */ lock_prdt_t* prdt, /*!< in: MBR on the old page */ lock_prdt_t* new_prdt, /*!< in: MBR on the new page */ @@ -759,17 +757,16 @@ Update predicate lock when page splits */ void lock_prdt_update_split( /*===================*/ - buf_block_t* block, /*!< in/out: page to be split */ buf_block_t* new_block, /*!< in/out: the new half page */ lock_prdt_t* prdt, /*!< in: MBR on the old page */ lock_prdt_t* new_prdt, /*!< in: MBR on the new page */ ulint space, /*!< in: space id */ ulint page_no) /*!< in: page number */ { - lock_prdt_update_split_low(block, new_block, prdt, new_prdt, + lock_prdt_update_split_low(new_block, prdt, new_prdt, space, page_no, LOCK_PREDICATE); - lock_prdt_update_split_low(block, new_block, NULL, NULL, + lock_prdt_update_split_low(new_block, NULL, NULL, space, page_no, LOCK_PRDT_PAGE); } @@ -811,15 +808,14 @@ lock_prdt_lock( SELECT FOR UPDATE */ ulint type_mode, /*!< in: LOCK_PREDICATE or LOCK_PRDT_PAGE */ - que_thr_t* thr, /*!< in: query thread + que_thr_t* thr) /*!< in: query thread (can be NULL if BTR_NO_LOCKING_FLAG) */ - mtr_t* mtr) /*!< in/out: mini-transaction */ { trx_t* trx = thr_get_trx(thr); dberr_t err = DB_SUCCESS; lock_rec_req_status status = LOCK_REC_SUCCESS; - if (trx->read_only || dict_table_is_temporary(index->table)) { + if (trx->read_only || index->table->is_temporary()) { return(DB_SUCCESS); } @@ -838,7 +834,7 @@ lock_prdt_lock( lock_mutex_enter(); - const ulint prdt_mode = mode | type_mode; + const ulint prdt_mode = ulint(mode) | type_mode; lock_t* lock = lock_rec_get_first_on_page(hash, block); if (lock == NULL) { @@ -846,7 +842,7 @@ lock_prdt_lock( #ifdef WITH_WSREP NULL, NULL, /* FIXME: replicate SPATIAL INDEX locks */ #endif - mode | type_mode, block, PRDT_HEAPNO, + ulint(mode) | type_mode, block, PRDT_HEAPNO, index, trx, FALSE); status = LOCK_REC_SUCCESS_CREATED; @@ -878,7 +874,7 @@ lock_prdt_lock( NULL, /* FIXME: replicate SPATIAL INDEX locks */ #endif - mode | type_mode, + ulint(mode) | type_mode, block, PRDT_HEAPNO, index, thr, prdt); } else { diff --git a/storage/innobase/lock/lock0wait.cc b/storage/innobase/lock/lock0wait.cc index 6ce9e843a43..4a3adaa4757 100644 --- a/storage/innobase/lock/lock0wait.cc +++ b/storage/innobase/lock/lock0wait.cc @@ -48,7 +48,7 @@ lock_wait_table_print(void) const srv_slot_t* slot = lock_sys.waiting_threads; - for (ulint i = 0; i < OS_THREAD_MAX_N; i++, ++slot) { + for (ulint i = 0; i < srv_max_n_threads; i++, ++slot) { fprintf(stderr, "Slot %lu: thread type %lu," @@ -72,7 +72,7 @@ lock_wait_table_release_slot( srv_slot_t* slot) /*!< in: slot to release */ { #ifdef UNIV_DEBUG - srv_slot_t* upper = lock_sys.waiting_threads + OS_THREAD_MAX_N; + srv_slot_t* upper = lock_sys.waiting_threads + srv_max_n_threads; #endif /* UNIV_DEBUG */ lock_wait_mutex_enter(); @@ -142,7 +142,7 @@ lock_wait_table_reserve_slot( slot = lock_sys.waiting_threads; - for (i = OS_THREAD_MAX_N; i--; ++slot) { + for (i = srv_max_n_threads; i--; ++slot) { if (!slot->in_use) { slot->in_use = TRUE; slot->thr = thr; @@ -163,13 +163,13 @@ lock_wait_table_reserve_slot( } ut_ad(lock_sys.last_slot - <= lock_sys.waiting_threads + OS_THREAD_MAX_N); + <= lock_sys.waiting_threads + srv_max_n_threads); return(slot); } } - ib::error() << "There appear to be " << OS_THREAD_MAX_N << " user" + ib::error() << "There appear to be " << srv_max_n_threads << " user" " threads currently waiting inside InnoDB, which is the upper" " limit. Cannot continue operation. Before aborting, we print" " a list of waiting threads."; @@ -290,7 +290,7 @@ lock_wait_suspend_thread( if (ut_usectime(&sec, &ms) == -1) { start_time = -1; } else { - start_time = static_cast(sec) * 1000000 + ms; + start_time = int64_t(sec) * 1000000 + int64_t(ms); } } @@ -378,31 +378,27 @@ lock_wait_suspend_thread( lock_wait_table_release_slot(slot); if (thr->lock_state == QUE_THR_LOCK_ROW) { - ulint diff_time; - - if (ut_usectime(&sec, &ms) == -1) { + int64_t diff_time; + if (start_time == -1 || ut_usectime(&sec, &ms) == -1) { finish_time = -1; + diff_time = 0; } else { - finish_time = static_cast(sec) * 1000000 + ms; - } + finish_time = int64_t(sec) * 1000000 + int64_t(ms); + diff_time = std::max( + 0, finish_time - start_time); + srv_stats.n_lock_wait_time.add(diff_time); - diff_time = (finish_time > start_time) ? - (ulint) (finish_time - start_time) : 0; + /* Only update the variable if we successfully + retrieved the start and finish times. See Bug#36819. */ + if (ulint(diff_time) > lock_sys.n_lock_max_wait_time) { + lock_sys.n_lock_max_wait_time + = ulint(diff_time); + } + /* Record the lock wait time for this thread */ + thd_storage_lock_wait(trx->mysql_thd, diff_time); + } srv_stats.n_lock_wait_current_count.dec(); - srv_stats.n_lock_wait_time.add(diff_time); - - /* Only update the variable if we successfully - retrieved the start and finish times. See Bug#36819. */ - if (diff_time > lock_sys.n_lock_max_wait_time - && start_time != -1 - && finish_time != -1) { - - lock_sys.n_lock_max_wait_time = diff_time; - } - - /* Record the lock wait time for this thread */ - thd_set_lock_wait_time(trx->mysql_thd, diff_time); DBUG_EXECUTE_IF("lock_instrument_slow_query_log", os_thread_sleep(1000);); diff --git a/storage/innobase/log/log0crypt.cc b/storage/innobase/log/log0crypt.cc index 9cd06bc0c6f..980b26d448c 100644 --- a/storage/innobase/log/log0crypt.cc +++ b/storage/innobase/log/log0crypt.cc @@ -62,7 +62,9 @@ struct crypt_info_t { static crypt_info_t info; /** Crypt info when upgrading from 10.1 */ -static crypt_info_t infos[5]; +static crypt_info_t infos[5 * 2]; +/** First unused slot in infos[] */ +static size_t infos_used; /*********************************************************************//** Get a log block's start lsn. @@ -80,28 +82,6 @@ log_block_get_start_lsn( return start_lsn; } -/*********************************************************************//** -Get crypt info from checkpoint. -@return a crypt info or NULL if not present. */ -static -const crypt_info_t* -get_crypt_info(ulint checkpoint_no) -{ - /* a log block only stores 4-bytes of checkpoint no */ - checkpoint_no &= 0xFFFFFFFF; - for (unsigned i = 0; i < 5; i++) { - const crypt_info_t* it = &infos[i]; - - if (it->key_version && it->checkpoint_no == checkpoint_no) { - return it; - } - } - - /* If checkpoint contains more than one key and we did not - find the correct one use the first one. */ - return infos; -} - /** Encrypt or decrypt log blocks. @param[in,out] buf log blocks to encrypt or decrypt @param[in] lsn log sequence number of the start of the buffer @@ -166,9 +146,7 @@ log_crypt(byte* buf, lsn_t lsn, ulint size, bool decrypt) @param[in,out] info encryption key @param[in] upgrade whether to use the key in MariaDB 10.1 format @return whether the operation was successful */ -static -bool -init_crypt_key(crypt_info_t* info, bool upgrade = false) +static bool init_crypt_key(crypt_info_t* info, bool upgrade = false) { byte mysqld_key[MY_AES_MAX_KEY_LENGTH]; uint keylen = sizeof mysqld_key; @@ -219,7 +197,7 @@ bool log_crypt_init() { ut_ad(log_mutex_own()); - ut_ad(log_sys->is_encrypted()); + ut_ad(log_sys.is_encrypted()); info.key_version = encryption_key_get_latest_version( LOG_DEFAULT_ENCRYPTION_KEY); @@ -253,8 +231,20 @@ log_crypt_101_read_checkpoint(const byte* buf) const size_t n = *buf++ == 2 ? std::min(unsigned(*buf++), 5U) : 0; for (size_t i = 0; i < n; i++) { - struct crypt_info_t& info = infos[i]; - info.checkpoint_no = mach_read_from_4(buf); + struct crypt_info_t& info = infos[infos_used]; + unsigned checkpoint_no = mach_read_from_4(buf); + for (size_t j = 0; j < infos_used; j++) { + if (infos[j].checkpoint_no == checkpoint_no) { + /* Do not overwrite an existing slot. */ + goto next_slot; + } + } + if (infos_used >= UT_ARR_SIZE(infos)) { + ut_ad(!"too many checkpoint pages"); + goto next_slot; + } + infos_used++; + info.checkpoint_no = checkpoint_no; info.key_version = mach_read_from_4(buf + 4); memcpy(info.crypt_msg.bytes, buf + 8, sizeof info.crypt_msg); memcpy(info.crypt_nonce.bytes, buf + 24, @@ -263,6 +253,7 @@ log_crypt_101_read_checkpoint(const byte* buf) if (!init_crypt_key(&info, true)) { return false; } +next_slot: buf += 4 + 4 + 2 * MY_AES_BLOCK_SIZE; } @@ -278,13 +269,19 @@ log_crypt_101_read_block(byte* buf) { ut_ad(log_block_calc_checksum_format_0(buf) != log_block_get_checksum(buf)); - const crypt_info_t* info = get_crypt_info( - log_block_get_checkpoint_no(buf)); - - if (!info || info->key_version == 0) { - return false; + const uint32_t checkpoint_no + = uint32_t(log_block_get_checkpoint_no(buf)); + const crypt_info_t* info = infos; + for (const crypt_info_t* const end = info + infos_used; info < end; + info++) { + if (info->key_version + && info->checkpoint_no == checkpoint_no) { + goto found; + } } + return false; +found: byte dst[OS_FILE_LOG_BLOCK_SIZE]; uint dst_len; byte aes_ctr_iv[MY_AES_BLOCK_SIZE]; @@ -313,9 +310,7 @@ log_crypt_101_read_block(byte* buf) LOG_DEFAULT_ENCRYPTION_KEY, info->key_version); - if (rc != MY_AES_OK || dst_len != src_len - || log_block_calc_checksum_format_0(dst) - != log_block_get_checksum(dst)) { + if (rc != MY_AES_OK || dst_len != src_len) { return false; } diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 1f217cf3526..b099e50cd9e 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -81,7 +81,7 @@ reduce the size of the log. */ /** Redo log system */ -log_t* log_sys = NULL; +log_t log_sys; /** Whether to generate and require checksums on the redo log pages */ my_bool innodb_log_checksums; @@ -106,7 +106,8 @@ static time_t log_last_margine_warning_time; /* Margins for free space in the log buffer after a log entry is catenated */ #define LOG_BUF_FLUSH_RATIO 2 -#define LOG_BUF_FLUSH_MARGIN (LOG_BUF_WRITE_MARGIN + 4 * UNIV_PAGE_SIZE) +#define LOG_BUF_FLUSH_MARGIN (LOG_BUF_WRITE_MARGIN \ + + (4U << srv_page_size_shift)) /* This parameter controls asynchronous making of a new checkpoint; the value should be bigger than LOG_POOL_PREFLUSH_RATIO_SYNC */ @@ -133,15 +134,8 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(log_scrub_thread)(void*); -/******************************************************//** -Completes a checkpoint write i/o to a log file. */ -static -void -log_io_complete_checkpoint(void); -/*============================*/ - /****************************************************************//** -Returns the oldest modified block lsn in the pool, or log_sys->lsn if none +Returns the oldest modified block lsn in the pool, or log_sys.lsn if none exists. @return LSN of oldest modification */ static @@ -157,7 +151,7 @@ log_buf_pool_get_oldest_modification(void) if (!lsn) { - lsn = log_sys->lsn; + lsn = log_sys.lsn; } return(lsn); @@ -165,17 +159,13 @@ log_buf_pool_get_oldest_modification(void) /** Extends the log buffer. @param[in] len requested minimum size in bytes */ -void -log_buffer_extend( - ulint len) +void log_buffer_extend(ulong len) { - ulint move_start; - ulint move_end; byte tmp_buf[OS_FILE_LOG_BLOCK_SIZE]; log_mutex_enter_all(); - while (log_sys->is_extending) { + while (log_sys.is_extending) { /* Another thread is trying to extend already. Needs to wait for. */ log_mutex_exit_all(); @@ -184,28 +174,28 @@ log_buffer_extend( log_mutex_enter_all(); - if (srv_log_buffer_size > len / UNIV_PAGE_SIZE) { + if (srv_log_buffer_size > len) { /* Already extended enough by the others */ log_mutex_exit_all(); return; } } - if (len >= log_sys->buf_size / 2) { + if (len >= srv_log_buffer_size / 2) { DBUG_EXECUTE_IF("ib_log_buffer_is_short_crash", DBUG_SUICIDE();); /* log_buffer is too small. try to extend instead of crash. */ - ib::warn() << "The transaction log size is too large" - " for innodb_log_buffer_size (" << len << " >= " - << LOG_BUFFER_SIZE << " / 2). Trying to extend it."; + ib::warn() << "The redo log transaction size " << len << + " exceeds innodb_log_buffer_size=" + << srv_log_buffer_size << " / 2). Trying to extend it."; } - log_sys->is_extending = true; + log_sys.is_extending = true; - while (ut_calc_align_down(log_sys->buf_free, + while (ut_calc_align_down(log_sys.buf_free, OS_FILE_LOG_BLOCK_SIZE) - != ut_calc_align_down(log_sys->buf_next_to_write, + != ut_calc_align_down(log_sys.buf_next_to_write, OS_FILE_LOG_BLOCK_SIZE)) { /* Buffer might have >1 blocks to write still. */ log_mutex_exit_all(); @@ -215,46 +205,46 @@ log_buffer_extend( log_mutex_enter_all(); } - move_start = ut_calc_align_down( - log_sys->buf_free, + ulong move_start = ut_calc_align_down( + log_sys.buf_free, OS_FILE_LOG_BLOCK_SIZE); - move_end = log_sys->buf_free; + ulong move_end = log_sys.buf_free; /* store the last log block in buffer */ - ut_memcpy(tmp_buf, log_sys->buf + move_start, + ut_memcpy(tmp_buf, log_sys.buf + move_start, move_end - move_start); - log_sys->buf_free -= move_start; - log_sys->buf_next_to_write -= move_start; + log_sys.buf_free -= move_start; + log_sys.buf_next_to_write -= move_start; /* free previous after getting the right address */ - if (!log_sys->first_in_use) { - log_sys->buf -= log_sys->buf_size; + if (!log_sys.first_in_use) { + log_sys.buf -= srv_log_buffer_size; } - ut_free_dodump(log_sys->buf, log_sys->buf_size * 2); + ut_free_dodump(log_sys.buf, srv_log_buffer_size * 2); /* reallocate log buffer */ - srv_log_buffer_size = len / UNIV_PAGE_SIZE + 1; - log_sys->buf_size = LOG_BUFFER_SIZE; + srv_log_buffer_size = len; - log_sys->buf = static_cast( - ut_malloc_dontdump(log_sys->buf_size * 2)); + log_sys.buf = static_cast( + ut_malloc_dontdump(srv_log_buffer_size * 2)); + TRASH_ALLOC(log_sys.buf, srv_log_buffer_size * 2); - log_sys->first_in_use = true; + log_sys.first_in_use = true; - log_sys->max_buf_free = log_sys->buf_size / LOG_BUF_FLUSH_RATIO + log_sys.max_buf_free = srv_log_buffer_size / LOG_BUF_FLUSH_RATIO - LOG_BUF_FLUSH_MARGIN; /* restore the last log block */ - ut_memcpy(log_sys->buf, tmp_buf, move_end - move_start); + ut_memcpy(log_sys.buf, tmp_buf, move_end - move_start); - ut_ad(log_sys->is_extending); - log_sys->is_extending = false; + ut_ad(log_sys.is_extending); + log_sys.is_extending = false; log_mutex_exit_all(); ib::info() << "innodb_log_buffer_size was extended to " - << LOG_BUFFER_SIZE << "."; + << srv_log_buffer_size << "."; } /** Calculate actual length in redo buffer and file including @@ -273,7 +263,7 @@ log_calculate_actual_len( - (LOG_BLOCK_HDR_SIZE + LOG_BLOCK_TRL_SIZE); /* actual data length in last block already written */ - ulint extra_len = (log_sys->buf_free % OS_FILE_LOG_BLOCK_SIZE); + ulint extra_len = (log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE); ut_ad(extra_len >= LOG_BLOCK_HDR_SIZE); extra_len -= LOG_BLOCK_HDR_SIZE; @@ -298,7 +288,7 @@ log_margin_checkpoint_age( ut_ad(log_mutex_own()); - if (margin > log_sys->log_group_capacity) { + if (margin > log_sys.log_group_capacity) { /* return with warning output to avoid deadlock */ if (!log_has_printed_chkp_margine_warning || difftime(time(NULL), @@ -310,7 +300,7 @@ log_margin_checkpoint_age( " small for the single transaction log (size=" << len << "). So, the last checkpoint age" " might exceed the log group capacity " - << log_sys->log_group_capacity << "."; + << log_sys.log_group_capacity << "."; } return; @@ -319,20 +309,20 @@ log_margin_checkpoint_age( /* Our margin check should ensure that we never reach this condition. Try to do checkpoint once. We cannot keep waiting here as it might result in hang in case the current mtr has latch on oldest lsn */ - if (log_sys->lsn - log_sys->last_checkpoint_lsn + margin - > log_sys->log_group_capacity) { + if (log_sys.lsn - log_sys.last_checkpoint_lsn + margin + > log_sys.log_group_capacity) { /* The log write of 'len' might overwrite the transaction log after the last checkpoint. Makes checkpoint. */ bool flushed_enough = false; - if (log_sys->lsn - log_buf_pool_get_oldest_modification() + if (log_sys.lsn - log_buf_pool_get_oldest_modification() + margin - <= log_sys->log_group_capacity) { + <= log_sys.log_group_capacity) { flushed_enough = true; } - log_sys->check_flush_or_checkpoint = true; + log_sys.check_flush_or_checkpoint = true; log_mutex_exit(); DEBUG_SYNC_C("margin_checkpoint_age_rescue"); @@ -363,7 +353,7 @@ log_reserve_and_open( loop: ut_ad(log_mutex_own()); - if (log_sys->is_extending) { + if (log_sys.is_extending) { log_mutex_exit(); /* Log buffer size is extending. Writing up to the next block @@ -383,7 +373,7 @@ loop: len_upper_limit = LOG_BUF_WRITE_MARGIN + srv_log_write_ahead_size + (5 * len) / 4; - if (log_sys->buf_free + len_upper_limit > log_sys->buf_size) { + if (log_sys.buf_free + len_upper_limit > srv_log_buffer_size) { log_mutex_exit(); DEBUG_SYNC_C("log_buf_size_exceeded"); @@ -399,7 +389,7 @@ loop: goto loop; } - return(log_sys->lsn); + return(log_sys.lsn); } /************************************************************//** @@ -411,7 +401,6 @@ log_write_low( const byte* str, /*!< in: string */ ulint str_len) /*!< in: string length */ { - log_t* log = log_sys; ulint len; ulint data_len; byte* log_block; @@ -420,7 +409,7 @@ log_write_low( part_loop: /* Calculate a part length */ - data_len = (log->buf_free % OS_FILE_LOG_BLOCK_SIZE) + str_len; + data_len = (log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE) + str_len; if (data_len <= OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) { @@ -431,18 +420,18 @@ part_loop: data_len = OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE; len = OS_FILE_LOG_BLOCK_SIZE - - (log->buf_free % OS_FILE_LOG_BLOCK_SIZE) + - (log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE) - LOG_BLOCK_TRL_SIZE; } - ut_memcpy(log->buf + log->buf_free, str, len); + memcpy(log_sys.buf + log_sys.buf_free, str, len); str_len -= len; str = str + len; log_block = static_cast( - ut_align_down( - log->buf + log->buf_free, OS_FILE_LOG_BLOCK_SIZE)); + ut_align_down(log_sys.buf + log_sys.buf_free, + OS_FILE_LOG_BLOCK_SIZE)); log_block_set_data_len(log_block, data_len); @@ -450,20 +439,21 @@ part_loop: /* This block became full */ log_block_set_data_len(log_block, OS_FILE_LOG_BLOCK_SIZE); log_block_set_checkpoint_no(log_block, - log_sys->next_checkpoint_no); + log_sys.next_checkpoint_no); len += LOG_BLOCK_HDR_SIZE + LOG_BLOCK_TRL_SIZE; - log->lsn += len; + log_sys.lsn += len; /* Initialize the next block header */ - log_block_init(log_block + OS_FILE_LOG_BLOCK_SIZE, log->lsn); + log_block_init(log_block + OS_FILE_LOG_BLOCK_SIZE, + log_sys.lsn); } else { - log->lsn += len; + log_sys.lsn += len; } - log->buf_free += len; + log_sys.buf_free += ulong(len); - ut_ad(log->buf_free <= log->buf_size); + ut_ad(log_sys.buf_free <= srv_log_buffer_size); if (str_len > 0) { goto part_loop; @@ -483,16 +473,15 @@ log_close(void) ulint first_rec_group; lsn_t oldest_lsn; lsn_t lsn; - log_t* log = log_sys; lsn_t checkpoint_age; ut_ad(log_mutex_own()); - lsn = log->lsn; + lsn = log_sys.lsn; log_block = static_cast( - ut_align_down( - log->buf + log->buf_free, OS_FILE_LOG_BLOCK_SIZE)); + ut_align_down(log_sys.buf + log_sys.buf_free, + OS_FILE_LOG_BLOCK_SIZE)); first_rec_group = log_block_get_first_rec_group(log_block); @@ -505,14 +494,13 @@ log_close(void) log_block, log_block_get_data_len(log_block)); } - if (log->buf_free > log->max_buf_free) { - - log->check_flush_or_checkpoint = true; + if (log_sys.buf_free > log_sys.max_buf_free) { + log_sys.check_flush_or_checkpoint = true; } - checkpoint_age = lsn - log->last_checkpoint_lsn; + checkpoint_age = lsn - log_sys.last_checkpoint_lsn; - if (checkpoint_age >= log->log_group_capacity) { + if (checkpoint_age >= log_sys.log_group_capacity) { DBUG_EXECUTE_IF( "print_all_chkp_warnings", log_has_printed_chkp_warning = false;); @@ -525,131 +513,28 @@ log_close(void) ib::error() << "The age of the last checkpoint is " << checkpoint_age << ", which exceeds the log" - " group capacity " << log->log_group_capacity + " group capacity " + << log_sys.log_group_capacity << "."; } } - if (checkpoint_age <= log->max_modified_age_sync) { - + if (checkpoint_age <= log_sys.max_modified_age_sync) { goto function_exit; } oldest_lsn = buf_pool_get_oldest_modification(); if (!oldest_lsn - || lsn - oldest_lsn > log->max_modified_age_sync - || checkpoint_age > log->max_checkpoint_age_async) { - - log->check_flush_or_checkpoint = true; + || lsn - oldest_lsn > log_sys.max_modified_age_sync + || checkpoint_age > log_sys.max_checkpoint_age_async) { + log_sys.check_flush_or_checkpoint = true; } function_exit: return(lsn); } -/******************************************************//** -Calculates the offset within a log group, when the log file headers are not -included. -@return size offset (<= offset) */ -UNIV_INLINE -lsn_t -log_group_calc_size_offset( -/*=======================*/ - lsn_t offset, /*!< in: real offset within the - log group */ - const log_group_t* group) /*!< in: log group */ -{ - /* The lsn parameters are updated while holding both the mutexes - and it is ok to have either of them while reading */ - ut_ad(log_mutex_own() || log_write_mutex_own()); - - return(offset - LOG_FILE_HDR_SIZE * (1 + offset / group->file_size)); -} - -/******************************************************//** -Calculates the offset within a log group, when the log file headers are -included. -@return real offset (>= offset) */ -UNIV_INLINE -lsn_t -log_group_calc_real_offset( -/*=======================*/ - lsn_t offset, /*!< in: size offset within the - log group */ - const log_group_t* group) /*!< in: log group */ -{ - /* The lsn parameters are updated while holding both the mutexes - and it is ok to have either of them while reading */ - ut_ad(log_mutex_own() || log_write_mutex_own()); - - return(offset + LOG_FILE_HDR_SIZE - * (1 + offset / (group->file_size - LOG_FILE_HDR_SIZE))); -} - -/** Calculate the offset of an lsn within a log group. -@param[in] lsn log sequence number -@param[in] group log group -@return offset within the log group */ -lsn_t -log_group_calc_lsn_offset( - lsn_t lsn, - const log_group_t* group) -{ - lsn_t gr_lsn; - lsn_t gr_lsn_size_offset; - lsn_t difference; - lsn_t group_size; - lsn_t offset; - - /* The lsn parameters are updated while holding both the mutexes - and it is ok to have either of them while reading */ - ut_ad(log_mutex_own() || log_write_mutex_own()); - - gr_lsn = group->lsn; - - gr_lsn_size_offset = log_group_calc_size_offset( - group->lsn_offset, group); - - group_size = group->capacity(); - - if (lsn >= gr_lsn) { - - difference = lsn - gr_lsn; - } else { - difference = gr_lsn - lsn; - - difference = difference % group_size; - - difference = group_size - difference; - } - - offset = (gr_lsn_size_offset + difference) % group_size; - - /* fprintf(stderr, - "Offset is " LSN_PF " gr_lsn_offset is " LSN_PF - " difference is " LSN_PF "\n", - offset, gr_lsn_size_offset, difference); - */ - - return(log_group_calc_real_offset(offset, group)); -} - -/********************************************************//** -Sets the field values in group to correspond to a given lsn. For this function -to work, the values must already be correctly initialized to correspond to -some lsn, for instance, a checkpoint lsn. */ -void -log_group_set_fields( -/*=================*/ - log_group_t* group, /*!< in/out: group */ - lsn_t lsn) /*!< in: lsn for which the values should be - set */ -{ - group->lsn_offset = log_group_calc_lsn_offset(lsn, group); - group->lsn = lsn; -} - /** Calculate the recommended highest values for lsn - last_checkpoint_lsn and lsn - buf_get_oldest_modification(). @param[in] file_size requested innodb_log_file_size @@ -689,175 +574,123 @@ log_set_capacity(ulonglong file_size) log_mutex_enter(); - log_sys->log_group_capacity = smallest_capacity; + log_sys.log_group_capacity = smallest_capacity; - log_sys->max_modified_age_async = margin + log_sys.max_modified_age_async = margin - margin / LOG_POOL_PREFLUSH_RATIO_ASYNC; - log_sys->max_modified_age_sync = margin + log_sys.max_modified_age_sync = margin - margin / LOG_POOL_PREFLUSH_RATIO_SYNC; - log_sys->max_checkpoint_age_async = margin - margin + log_sys.max_checkpoint_age_async = margin - margin / LOG_POOL_CHECKPOINT_RATIO_ASYNC; - log_sys->max_checkpoint_age = margin; + log_sys.max_checkpoint_age = margin; log_mutex_exit(); return(true); } -/** Initializes the redo logging subsystem. */ -void -log_sys_init() +/** Initialize the redo log subsystem. */ +void log_t::create() { - log_sys = static_cast(ut_zalloc_nokey(sizeof(log_t))); + ut_ad(this == &log_sys); + ut_ad(!is_initialised()); + m_initialised= true; - mutex_create(LATCH_ID_LOG_SYS, &log_sys->mutex); - mutex_create(LATCH_ID_LOG_WRITE, &log_sys->write_mutex); + mutex_create(LATCH_ID_LOG_SYS, &mutex); + mutex_create(LATCH_ID_LOG_WRITE, &write_mutex); + mutex_create(LATCH_ID_LOG_FLUSH_ORDER, &log_flush_order_mutex); - mutex_create(LATCH_ID_LOG_FLUSH_ORDER, &log_sys->log_flush_order_mutex); + /* Start the lsn from one log block from zero: this way every + log record has a non-zero start lsn, a fact which we will use */ - /* Start the lsn from one log block from zero: this way every - log record has a start lsn != zero, a fact which we will use */ + lsn= LOG_START_LSN; - log_sys->lsn = LOG_START_LSN; + ut_ad(srv_log_buffer_size >= 16 * OS_FILE_LOG_BLOCK_SIZE); + ut_ad(srv_log_buffer_size >= 4U << srv_page_size_shift); - ut_a(LOG_BUFFER_SIZE >= 16 * OS_FILE_LOG_BLOCK_SIZE); - ut_a(LOG_BUFFER_SIZE >= 4 * UNIV_PAGE_SIZE); + buf= static_cast(ut_malloc_dontdump(srv_log_buffer_size * 2)); + TRASH_ALLOC(buf, srv_log_buffer_size * 2); - log_sys->buf_size = LOG_BUFFER_SIZE; + first_in_use= true; - log_sys->buf = static_cast( - ut_malloc_dontdump(log_sys->buf_size * 2)); + max_buf_free= srv_log_buffer_size / LOG_BUF_FLUSH_RATIO - + LOG_BUF_FLUSH_MARGIN; + check_flush_or_checkpoint= true; - log_sys->first_in_use = true; + n_log_ios_old= n_log_ios; + last_printout_time= time(NULL); - log_sys->max_buf_free = log_sys->buf_size / LOG_BUF_FLUSH_RATIO - - LOG_BUF_FLUSH_MARGIN; - log_sys->check_flush_or_checkpoint = true; + buf_next_to_write= 0; + is_extending= false; + write_lsn= lsn; + flushed_to_disk_lsn= 0; + n_pending_flushes= 0; + flush_event = os_event_create("log_flush_event"); + os_event_set(flush_event); + n_log_ios= 0; + n_log_ios_old= 0; + log_group_capacity= 0; + max_modified_age_async= 0; + max_modified_age_sync= 0; + max_checkpoint_age_async= 0; + max_checkpoint_age= 0; + next_checkpoint_no= 0; + next_checkpoint_lsn= 0; + append_on_checkpoint= NULL; + n_pending_checkpoint_writes= 0; - log_sys->n_log_ios_old = log_sys->n_log_ios; - log_sys->last_printout_time = time(NULL); - /*----------------------------*/ + last_checkpoint_lsn= lsn; + rw_lock_create(checkpoint_lock_key, &checkpoint_lock, SYNC_NO_ORDER_CHECK); - log_sys->write_lsn = log_sys->lsn; + log_block_init(buf, lsn); + log_block_set_first_rec_group(buf, LOG_BLOCK_HDR_SIZE); - log_sys->flush_event = os_event_create(0); + buf_free= LOG_BLOCK_HDR_SIZE; + lsn= LOG_START_LSN + LOG_BLOCK_HDR_SIZE; - os_event_set(log_sys->flush_event); + MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, lsn - last_checkpoint_lsn); - /*----------------------------*/ - - log_sys->last_checkpoint_lsn = log_sys->lsn; - - rw_lock_create( - checkpoint_lock_key, &log_sys->checkpoint_lock, - SYNC_NO_ORDER_CHECK); - - log_sys->checkpoint_buf_ptr = static_cast( - ut_zalloc_nokey(2 * OS_FILE_LOG_BLOCK_SIZE)); - - log_sys->checkpoint_buf = static_cast( - ut_align(log_sys->checkpoint_buf_ptr, OS_FILE_LOG_BLOCK_SIZE)); - - /*----------------------------*/ - - log_block_init(log_sys->buf, log_sys->lsn); - log_block_set_first_rec_group(log_sys->buf, LOG_BLOCK_HDR_SIZE); - - log_sys->buf_free = LOG_BLOCK_HDR_SIZE; - log_sys->lsn = LOG_START_LSN + LOG_BLOCK_HDR_SIZE; // TODO(minliz): ensure various LOG_START_LSN? - - MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, - log_sys->lsn - log_sys->last_checkpoint_lsn); - - log_scrub_thread_active = !srv_read_only_mode && srv_scrub_log; - if (log_scrub_thread_active) { - log_scrub_event = os_event_create("log_scrub_event"); - os_thread_create(log_scrub_thread, NULL, NULL); - } + log_scrub_thread_active= !srv_read_only_mode && srv_scrub_log; + if (log_scrub_thread_active) { + log_scrub_event= os_event_create("log_scrub_event"); + os_thread_create(log_scrub_thread, NULL, NULL); + } } /** Initialize the redo log. @param[in] n_files number of files */ -void -log_init(ulint n_files) +void log_t::files::create(ulint n_files) { - ulint i; - log_group_t* group = &log_sys->log; + ut_ad(n_files <= SRV_N_LOG_FILES_MAX); + ut_ad(this == &log_sys.log); + ut_ad(log_sys.is_initialised()); - group->n_files = n_files; - group->format = srv_encrypt_log - ? LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED - : LOG_HEADER_FORMAT_CURRENT; - group->file_size = srv_log_file_size; - group->state = LOG_GROUP_OK; - group->lsn = LOG_START_LSN; - group->lsn_offset = LOG_FILE_HDR_SIZE; + this->n_files= n_files; + format= srv_encrypt_log + ? LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED + : LOG_HEADER_FORMAT_CURRENT; + file_size= srv_log_file_size; + state= LOG_GROUP_OK; + lsn= LOG_START_LSN; + lsn_offset= LOG_FILE_HDR_SIZE; - group->file_header_bufs_ptr = static_cast( - ut_zalloc_nokey(sizeof(byte*) * n_files)); + byte* ptr= static_cast(ut_zalloc_nokey(LOG_FILE_HDR_SIZE * n_files + + OS_FILE_LOG_BLOCK_SIZE)); + file_header_bufs_ptr= ptr; + ptr= static_cast(ut_align(ptr, OS_FILE_LOG_BLOCK_SIZE)); - group->file_header_bufs = static_cast( - ut_zalloc_nokey(sizeof(byte**) * n_files)); + memset(file_header_bufs, 0, sizeof file_header_bufs); - for (i = 0; i < n_files; i++) { - group->file_header_bufs_ptr[i] = static_cast( - ut_zalloc_nokey(LOG_FILE_HDR_SIZE - + OS_FILE_LOG_BLOCK_SIZE)); - - group->file_header_bufs[i] = static_cast( - ut_align(group->file_header_bufs_ptr[i], - OS_FILE_LOG_BLOCK_SIZE)); - } - - group->checkpoint_buf_ptr = static_cast( - ut_zalloc_nokey(2 * OS_FILE_LOG_BLOCK_SIZE)); - - group->checkpoint_buf = static_cast( - ut_align(group->checkpoint_buf_ptr,OS_FILE_LOG_BLOCK_SIZE)); -} - -/******************************************************//** -Completes an i/o to a log file. */ -void -log_io_complete( -/*============*/ - log_group_t* group) /*!< in: log group or a dummy pointer */ -{ - if ((ulint) group & 0x1UL) { - /* It was a checkpoint write */ - group = (log_group_t*)((ulint) group - 1); - - switch (srv_file_flush_method) { - case SRV_O_DSYNC: - case SRV_NOSYNC: - break; - case SRV_FSYNC: - case SRV_LITTLESYNC: - case SRV_O_DIRECT: - case SRV_O_DIRECT_NO_FSYNC: - case SRV_ALL_O_DIRECT_FSYNC: - fil_flush(SRV_LOG_SPACE_FIRST_ID); - } - - - DBUG_PRINT("ib_log", ("checkpoint info written")); - log_io_complete_checkpoint(); - - return; - } - - ut_error; /*!< We currently use synchronous writing of the - logs and cannot end up here! */ + for (ulint i = 0; i < n_files; i++, ptr += LOG_FILE_HDR_SIZE) + file_header_bufs[i] = ptr; } /******************************************************//** Writes a log file header to a log file space. */ static void -log_group_file_header_flush( -/*========================*/ - log_group_t* group, /*!< in: log group */ +log_file_header_flush( ulint nth_file, /*!< in: header to the nth file in the log file space */ lsn_t start_lsn) /*!< in: log file data starts at this @@ -868,14 +701,14 @@ log_group_file_header_flush( ut_ad(log_write_mutex_own()); ut_ad(!recv_no_log_write); - ut_a(nth_file < group->n_files); - ut_ad((group->format & ~LOG_HEADER_FORMAT_ENCRYPTED) + ut_a(nth_file < log_sys.log.n_files); + ut_ad((log_sys.log.format & ~LOG_HEADER_FORMAT_ENCRYPTED) == LOG_HEADER_FORMAT_CURRENT); - buf = *(group->file_header_bufs + nth_file); + buf = log_sys.log.file_header_bufs[nth_file]; memset(buf, 0, OS_FILE_LOG_BLOCK_SIZE); - mach_write_to_4(buf + LOG_HEADER_FORMAT, group->format); + mach_write_to_4(buf + LOG_HEADER_FORMAT, log_sys.log.format); mach_write_to_8(buf + LOG_HEADER_START_LSN, start_lsn); strcpy(reinterpret_cast(buf) + LOG_HEADER_CREATOR, LOG_HEADER_CREATOR_CURRENT); @@ -883,26 +716,25 @@ log_group_file_header_flush( >= sizeof LOG_HEADER_CREATOR_CURRENT); log_block_set_checksum(buf, log_block_calc_checksum_crc32(buf)); - dest_offset = nth_file * group->file_size; + dest_offset = nth_file * log_sys.log.file_size; DBUG_PRINT("ib_log", ("write " LSN_PF " file " ULINTPF " header", start_lsn, nth_file)); - log_sys->n_log_ios++; + log_sys.n_log_ios++; MONITOR_INC(MONITOR_LOG_IO); srv_stats.os_log_pending_writes.inc(); - const ulint page_no - = (ulint) (dest_offset / univ_page_size.physical()); + const ulint page_no = ulint(dest_offset >> srv_page_size_shift); fil_io(IORequestLogWrite, true, page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no), univ_page_size, - (ulint) (dest_offset % univ_page_size.physical()), - OS_FILE_LOG_BLOCK_SIZE, buf, group); + ulint(dest_offset & (srv_page_size - 1)), + OS_FILE_LOG_BLOCK_SIZE, buf, NULL); srv_stats.os_log_pending_writes.dec(); } @@ -921,12 +753,10 @@ log_block_store_checksum( } /******************************************************//** -Writes a buffer to a log file group. */ +Writes a buffer to a log file. */ static void -log_group_write_buf( -/*================*/ - log_group_t* group, /*!< in: log group */ +log_write_buf( byte* buf, /*!< in: buffer */ ulint len, /*!< in: buffer len; must be divisible by OS_FILE_LOG_BLOCK_SIZE */ @@ -957,28 +787,27 @@ loop: return; } - next_offset = log_group_calc_lsn_offset(start_lsn, group); + next_offset = log_sys.log.calc_lsn_offset(start_lsn); if (write_header - && next_offset % group->file_size == LOG_FILE_HDR_SIZE) { + && next_offset % log_sys.log.file_size == LOG_FILE_HDR_SIZE) { /* We start to write a new log file instance in the group */ - ut_a(next_offset / group->file_size <= ULINT_MAX); + ut_a(next_offset / log_sys.log.file_size <= ULINT_MAX); - log_group_file_header_flush(group, (ulint) - (next_offset / group->file_size), - start_lsn); + log_file_header_flush( + ulint(next_offset / log_sys.log.file_size), start_lsn); srv_stats.os_log_written.add(OS_FILE_LOG_BLOCK_SIZE); srv_stats.log_writes.inc(); } - if ((next_offset % group->file_size) + len > group->file_size) { - + if ((next_offset % log_sys.log.file_size) + len + > log_sys.log.file_size) { /* if the above condition holds, then the below expression is < len which is ulint, so the typecast is ok */ - write_len = (ulint) - (group->file_size - (next_offset % group->file_size)); + write_len = ulint(log_sys.log.file_size + - (next_offset % log_sys.log.file_size)); } else { write_len = len; } @@ -1010,22 +839,20 @@ loop: log_block_store_checksum(buf + i * OS_FILE_LOG_BLOCK_SIZE); } - log_sys->n_log_ios++; + log_sys.n_log_ios++; MONITOR_INC(MONITOR_LOG_IO); srv_stats.os_log_pending_writes.inc(); - ut_a(next_offset / UNIV_PAGE_SIZE <= ULINT_MAX); + ut_a((next_offset >> srv_page_size_shift) <= ULINT_MAX); - const ulint page_no - = (ulint) (next_offset / univ_page_size.physical()); + const ulint page_no = ulint(next_offset >> srv_page_size_shift); fil_io(IORequestLogWrite, true, page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no), univ_page_size, - (ulint) (next_offset % UNIV_PAGE_SIZE), write_len, buf, - group); + ulint(next_offset & (srv_page_size - 1)), write_len, buf, NULL); srv_stats.os_log_pending_writes.dec(); @@ -1049,9 +876,9 @@ static void log_write_flush_to_disk_low() { - /* FIXME: This is not holding log_sys->mutex while + /* FIXME: This is not holding log_sys.mutex while calling os_event_set()! */ - ut_a(log_sys->n_pending_flushes == 1); /* No other threads here */ + ut_a(log_sys.n_pending_flushes == 1); /* No other threads here */ bool do_flush = srv_file_flush_method != SRV_O_DSYNC; @@ -1063,12 +890,12 @@ log_write_flush_to_disk_low() log_mutex_enter(); if (do_flush) { - log_sys->flushed_to_disk_lsn = log_sys->current_flush_lsn; + log_sys.flushed_to_disk_lsn = log_sys.current_flush_lsn; } - log_sys->n_pending_flushes--; + log_sys.n_pending_flushes--; - os_event_set(log_sys->flush_event); + os_event_set(log_sys.flush_event); } /** Switch the log buffer in use, and copy the content of last block @@ -1081,29 +908,29 @@ log_buffer_switch() ut_ad(log_mutex_own()); ut_ad(log_write_mutex_own()); - const byte* old_buf = log_sys->buf; - ulint area_end = ut_calc_align(log_sys->buf_free, + const byte* old_buf = log_sys.buf; + ulint area_end = ut_calc_align(log_sys.buf_free, OS_FILE_LOG_BLOCK_SIZE); - if (log_sys->first_in_use) { - log_sys->first_in_use = false; - ut_ad(log_sys->buf == ut_align(log_sys->buf, + if (log_sys.first_in_use) { + log_sys.first_in_use = false; + ut_ad(log_sys.buf == ut_align(log_sys.buf, OS_FILE_LOG_BLOCK_SIZE)); - log_sys->buf += log_sys->buf_size; + log_sys.buf += srv_log_buffer_size; } else { - log_sys->first_in_use = true; - log_sys->buf -= log_sys->buf_size; - ut_ad(log_sys->buf == ut_align(log_sys->buf, + log_sys.first_in_use = true; + log_sys.buf -= srv_log_buffer_size; + ut_ad(log_sys.buf == ut_align(log_sys.buf, OS_FILE_LOG_BLOCK_SIZE)); } /* Copy the last block to new buf */ - ut_memcpy(log_sys->buf, + ut_memcpy(log_sys.buf, old_buf + area_end - OS_FILE_LOG_BLOCK_SIZE, OS_FILE_LOG_BLOCK_SIZE); - log_sys->buf_free %= OS_FILE_LOG_BLOCK_SIZE; - log_sys->buf_next_to_write = log_sys->buf_free; + log_sys.buf_free %= OS_FILE_LOG_BLOCK_SIZE; + log_sys.buf_next_to_write = log_sys.buf_free; } /** Ensure that the log has been written to the log file up to a given @@ -1148,7 +975,7 @@ loop: (flush_to_disk == true) case, because the log_mutex contention also works as the arbitrator for write-IO (fsync) bandwidth between log files and data files. */ - if (!flush_to_disk && log_sys->write_lsn >= lsn) { + if (!flush_to_disk && log_sys.write_lsn >= lsn) { return; } #endif @@ -1157,8 +984,8 @@ loop: ut_ad(!recv_no_log_write); lsn_t limit_lsn = flush_to_disk - ? log_sys->flushed_to_disk_lsn - : log_sys->write_lsn; + ? log_sys.flushed_to_disk_lsn + : log_sys.write_lsn; if (limit_lsn >= lsn) { log_write_mutex_exit(); @@ -1171,15 +998,15 @@ loop: pending flush and based on that we wait for it to finish before proceeding further. */ if (flush_to_disk - && (log_sys->n_pending_flushes > 0 - || !os_event_is_set(log_sys->flush_event))) { + && (log_sys.n_pending_flushes > 0 + || !os_event_is_set(log_sys.flush_event))) { /* Figure out if the current flush will do the job for us. */ - bool work_done = log_sys->current_flush_lsn >= lsn; + bool work_done = log_sys.current_flush_lsn >= lsn; log_write_mutex_exit(); - os_event_wait(log_sys->flush_event); + os_event_wait(log_sys.flush_event); if (work_done) { return; @@ -1190,7 +1017,7 @@ loop: log_mutex_enter(); if (!flush_to_disk - && log_sys->buf_free == log_sys->buf_next_to_write) { + && log_sys.buf_free == log_sys.buf_next_to_write) { /* Nothing to write and no flush to disk requested */ log_mutex_exit_all(); return; @@ -1204,15 +1031,15 @@ loop: ulint pad_size; DBUG_PRINT("ib_log", ("write " LSN_PF " to " LSN_PF, - log_sys->write_lsn, - log_sys->lsn)); + log_sys.write_lsn, + log_sys.lsn)); if (flush_to_disk) { - log_sys->n_pending_flushes++; - log_sys->current_flush_lsn = log_sys->lsn; + log_sys.n_pending_flushes++; + log_sys.current_flush_lsn = log_sys.lsn; MONITOR_INC(MONITOR_PENDING_LOG_FLUSH); - os_event_reset(log_sys->flush_event); + os_event_reset(log_sys.flush_event); - if (log_sys->buf_free == log_sys->buf_next_to_write) { + if (log_sys.buf_free == log_sys.buf_next_to_write) { /* Nothing to write, flush only */ log_mutex_exit_all(); log_write_flush_to_disk_low(); @@ -1221,39 +1048,37 @@ loop: } } - start_offset = log_sys->buf_next_to_write; - end_offset = log_sys->buf_free; + start_offset = log_sys.buf_next_to_write; + end_offset = log_sys.buf_free; area_start = ut_calc_align_down(start_offset, OS_FILE_LOG_BLOCK_SIZE); area_end = ut_calc_align(end_offset, OS_FILE_LOG_BLOCK_SIZE); ut_ad(area_end - area_start > 0); - log_block_set_flush_bit(log_sys->buf + area_start, TRUE); + log_block_set_flush_bit(log_sys.buf + area_start, TRUE); log_block_set_checkpoint_no( - log_sys->buf + area_end - OS_FILE_LOG_BLOCK_SIZE, - log_sys->next_checkpoint_no); + log_sys.buf + area_end - OS_FILE_LOG_BLOCK_SIZE, + log_sys.next_checkpoint_no); - write_lsn = log_sys->lsn; - write_buf = log_sys->buf; + write_lsn = log_sys.lsn; + write_buf = log_sys.buf; log_buffer_switch(); - log_group_set_fields(&log_sys->log, log_sys->write_lsn); + log_sys.log.set_fields(log_sys.write_lsn); log_mutex_exit(); /* Erase the end of the last log block. */ - memset(write_buf + end_offset, 0, ~end_offset & OS_FILE_LOG_BLOCK_SIZE); + memset(write_buf + end_offset, 0, + ~end_offset & (OS_FILE_LOG_BLOCK_SIZE - 1)); /* Calculate pad_size if needed. */ pad_size = 0; if (write_ahead_size > OS_FILE_LOG_BLOCK_SIZE) { - lsn_t end_offset; ulint end_offset_in_unit; - end_offset = log_group_calc_lsn_offset( - ut_uint64_align_up(write_lsn, - OS_FILE_LOG_BLOCK_SIZE), - &log_sys->log); + lsn_t end_offset = log_sys.log.calc_lsn_offset( + ut_uint64_align_up(write_lsn, OS_FILE_LOG_BLOCK_SIZE)); end_offset_in_unit = (ulint) (end_offset % write_ahead_size); if (end_offset_in_unit > 0 @@ -1261,47 +1086,45 @@ loop: /* The first block in the unit was initialized after the last writing. Needs to be written padded data once. */ - pad_size = std::min( + pad_size = std::min( ulint(write_ahead_size) - end_offset_in_unit, - log_sys->buf_size - area_end); + srv_log_buffer_size - area_end); ::memset(write_buf + area_end, 0, pad_size); } } - if (log_sys->is_encrypted()) { - log_crypt(write_buf + area_start, log_sys->write_lsn, + if (log_sys.is_encrypted()) { + log_crypt(write_buf + area_start, log_sys.write_lsn, area_end - area_start); } /* Do the write to the log files */ - log_group_write_buf( - &log_sys->log, write_buf + area_start, - area_end - area_start + pad_size, + log_write_buf( + write_buf + area_start, area_end - area_start + pad_size, #ifdef UNIV_DEBUG pad_size, #endif /* UNIV_DEBUG */ - ut_uint64_align_down(log_sys->write_lsn, + ut_uint64_align_down(log_sys.write_lsn, OS_FILE_LOG_BLOCK_SIZE), start_offset - area_start); srv_stats.log_padded.add(pad_size); - log_sys->write_lsn = write_lsn; + log_sys.write_lsn = write_lsn; if (srv_file_flush_method == SRV_O_DSYNC) { /* O_SYNC means the OS did not buffer the log file at all: so we have also flushed to disk what we have written */ - log_sys->flushed_to_disk_lsn = log_sys->write_lsn; + log_sys.flushed_to_disk_lsn = log_sys.write_lsn; } log_write_mutex_exit(); if (flush_to_disk) { log_write_flush_to_disk_low(); - ib_uint64_t write_lsn = log_sys->write_lsn; - ib_uint64_t flush_lsn = log_sys->flushed_to_disk_lsn; + ib_uint64_t flush_lsn = log_sys.flushed_to_disk_lsn; log_mutex_exit(); - innobase_mysql_log_notify(write_lsn, flush_lsn); + innobase_mysql_log_notify(flush_lsn); } } @@ -1330,11 +1153,11 @@ log_buffer_sync_in_background( log_mutex_enter(); - lsn = log_sys->lsn; + lsn = log_sys.lsn; if (flush - && log_sys->n_pending_flushes > 0 - && log_sys->current_flush_lsn >= lsn) { + && log_sys.n_pending_flushes > 0 + && log_sys.current_flush_lsn >= lsn) { /* The write + flush will write enough */ log_mutex_exit(); return; @@ -1354,14 +1177,13 @@ void log_flush_margin(void) /*==================*/ { - log_t* log = log_sys; lsn_t lsn = 0; log_mutex_enter(); - if (log->buf_free > log->max_buf_free) { + if (log_sys.buf_free > log_sys.max_buf_free) { /* We can write during flush */ - lsn = log->lsn; + lsn = log_sys.lsn; } log_mutex_exit(); @@ -1441,36 +1263,33 @@ log_complete_checkpoint(void) /*=========================*/ { ut_ad(log_mutex_own()); - ut_ad(log_sys->n_pending_checkpoint_writes == 0); + ut_ad(log_sys.n_pending_checkpoint_writes == 0); - log_sys->next_checkpoint_no++; + log_sys.next_checkpoint_no++; - log_sys->last_checkpoint_lsn = log_sys->next_checkpoint_lsn; + log_sys.last_checkpoint_lsn = log_sys.next_checkpoint_lsn; MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, - log_sys->lsn - log_sys->last_checkpoint_lsn); + log_sys.lsn - log_sys.last_checkpoint_lsn); DBUG_PRINT("ib_log", ("checkpoint ended at " LSN_PF ", flushed to " LSN_PF, - log_sys->last_checkpoint_lsn, - log_sys->flushed_to_disk_lsn)); + log_sys.last_checkpoint_lsn, + log_sys.flushed_to_disk_lsn)); - rw_lock_x_unlock_gen(&(log_sys->checkpoint_lock), LOG_CHECKPOINT); + rw_lock_x_unlock_gen(&(log_sys.checkpoint_lock), LOG_CHECKPOINT); } -/******************************************************//** -Completes an asynchronous checkpoint info write i/o to a log file. */ -static -void -log_io_complete_checkpoint(void) -/*============================*/ +/** Complete an asynchronous checkpoint write. */ +void log_t::complete_checkpoint() { + ut_ad(this == &log_sys); MONITOR_DEC(MONITOR_PENDING_CHECKPOINT_WRITE); log_mutex_enter(); - ut_ad(log_sys->n_pending_checkpoint_writes > 0); + ut_ad(n_pending_checkpoint_writes > 0); - if (--log_sys->n_pending_checkpoint_writes == 0) { + if (!--n_pending_checkpoint_writes) { log_complete_checkpoint(); } @@ -1484,91 +1303,78 @@ void log_group_checkpoint(lsn_t end_lsn) { lsn_t lsn_offset; - byte* buf; ut_ad(!srv_read_only_mode); ut_ad(log_mutex_own()); - ut_ad(end_lsn == 0 || end_lsn >= log_sys->next_checkpoint_lsn); - ut_ad(end_lsn <= log_sys->lsn); - ut_ad(end_lsn + SIZE_OF_MLOG_CHECKPOINT <= log_sys->lsn + ut_ad(end_lsn == 0 || end_lsn >= log_sys.next_checkpoint_lsn); + ut_ad(end_lsn <= log_sys.lsn); + ut_ad(end_lsn + SIZE_OF_MLOG_CHECKPOINT <= log_sys.lsn || srv_shutdown_state != SRV_SHUTDOWN_NONE); DBUG_PRINT("ib_log", ("checkpoint " UINT64PF " at " LSN_PF " written", - log_sys->next_checkpoint_no, - log_sys->next_checkpoint_lsn)); + log_sys.next_checkpoint_no, + log_sys.next_checkpoint_lsn)); - log_group_t* group = &log_sys->log; - - buf = group->checkpoint_buf; + byte* buf = log_sys.checkpoint_buf; memset(buf, 0, OS_FILE_LOG_BLOCK_SIZE); - mach_write_to_8(buf + LOG_CHECKPOINT_NO, log_sys->next_checkpoint_no); - mach_write_to_8(buf + LOG_CHECKPOINT_LSN, log_sys->next_checkpoint_lsn); + mach_write_to_8(buf + LOG_CHECKPOINT_NO, log_sys.next_checkpoint_no); + mach_write_to_8(buf + LOG_CHECKPOINT_LSN, log_sys.next_checkpoint_lsn); - if (log_sys->is_encrypted()) { + if (log_sys.is_encrypted()) { log_crypt_write_checkpoint_buf(buf); } - lsn_offset = log_group_calc_lsn_offset(log_sys->next_checkpoint_lsn, - group); + lsn_offset = log_sys.log.calc_lsn_offset(log_sys.next_checkpoint_lsn); mach_write_to_8(buf + LOG_CHECKPOINT_OFFSET, lsn_offset); - mach_write_to_8(buf + LOG_CHECKPOINT_LOG_BUF_SIZE, log_sys->buf_size); + mach_write_to_8(buf + LOG_CHECKPOINT_LOG_BUF_SIZE, + srv_log_buffer_size); mach_write_to_8(buf + LOG_CHECKPOINT_END_LSN, end_lsn); log_block_set_checksum(buf, log_block_calc_checksum_crc32(buf)); MONITOR_INC(MONITOR_PENDING_CHECKPOINT_WRITE); - log_sys->n_log_ios++; + log_sys.n_log_ios++; MONITOR_INC(MONITOR_LOG_IO); - ut_ad(LOG_CHECKPOINT_1 < univ_page_size.physical()); - ut_ad(LOG_CHECKPOINT_2 < univ_page_size.physical()); + ut_ad(LOG_CHECKPOINT_1 < srv_page_size); + ut_ad(LOG_CHECKPOINT_2 < srv_page_size); - if (log_sys->n_pending_checkpoint_writes++ == 0) { - rw_lock_x_lock_gen(&log_sys->checkpoint_lock, + if (log_sys.n_pending_checkpoint_writes++ == 0) { + rw_lock_x_lock_gen(&log_sys.checkpoint_lock, LOG_CHECKPOINT); } /* Note: We alternate the physical place of the checkpoint info. See the (next_checkpoint_no & 1) below. */ - /* We send as the last parameter the group machine address - added with 1, as we want to distinguish between a normal log - file write and a checkpoint field write */ - fil_io(IORequestLogWrite, false, page_id_t(SRV_LOG_SPACE_FIRST_ID, 0), univ_page_size, - (log_sys->next_checkpoint_no & 1) + (log_sys.next_checkpoint_no & 1) ? LOG_CHECKPOINT_2 : LOG_CHECKPOINT_1, OS_FILE_LOG_BLOCK_SIZE, - buf, (byte*) group + 1); - - ut_ad(((ulint) group & 0x1UL) == 0); + buf, reinterpret_cast(1) /* checkpoint write */); } -/** Read a log group header page to log_sys->checkpoint_buf. -@param[in] group log group -@param[in] header 0 or LOG_CHEKCPOINT_1 or LOG_CHECKPOINT2 */ -void -log_group_header_read( - const log_group_t* group, - ulint header) +/** Read a log group header page to log_sys.checkpoint_buf. +@param[in] header 0 or LOG_CHECKPOINT_1 or LOG_CHECKPOINT2 */ +void log_header_read(ulint header) { ut_ad(log_mutex_own()); - log_sys->n_log_ios++; + log_sys.n_log_ios++; MONITOR_INC(MONITOR_LOG_IO); fil_io(IORequestLogRead, true, page_id_t(SRV_LOG_SPACE_FIRST_ID, - header / univ_page_size.physical()), - univ_page_size, header % univ_page_size.physical(), - OS_FILE_LOG_BLOCK_SIZE, log_sys->checkpoint_buf, NULL); + header >> srv_page_size_shift), + univ_page_size, header & (srv_page_size - 1), + OS_FILE_LOG_BLOCK_SIZE, log_sys.checkpoint_buf, NULL); } /** Write checkpoint info to the log header and invoke log_mutex_exit(). @@ -1588,8 +1394,8 @@ log_write_checkpoint_info(bool sync, lsn_t end_lsn) if (sync) { /* Wait for the checkpoint write to complete */ - rw_lock_s_lock(&log_sys->checkpoint_lock); - rw_lock_s_unlock(&log_sys->checkpoint_lock); + rw_lock_s_lock(&log_sys.checkpoint_lock); + rw_lock_s_unlock(&log_sys.checkpoint_lock); DBUG_EXECUTE_IF( "crash_after_checkpoint", @@ -1605,8 +1411,8 @@ log_append_on_checkpoint( mtr_buf_t* buf) { log_mutex_enter(); - mtr_buf_t* old = log_sys->append_on_checkpoint; - log_sys->append_on_checkpoint = buf; + mtr_buf_t* old = log_sys.append_on_checkpoint; + log_sys.append_on_checkpoint = buf; log_mutex_exit(); return(old); } @@ -1645,7 +1451,9 @@ log_checkpoint( case SRV_LITTLESYNC: case SRV_O_DIRECT: case SRV_O_DIRECT_NO_FSYNC: +#ifdef _WIN32 case SRV_ALL_O_DIRECT_FSYNC: +#endif fil_flush_file_spaces(FIL_TYPE_TABLESPACE); } @@ -1655,24 +1463,24 @@ log_checkpoint( oldest_lsn = log_buf_pool_get_oldest_modification(); /* Because log also contains headers and dummy log records, - log_buf_pool_get_oldest_modification() will return log_sys->lsn + log_buf_pool_get_oldest_modification() will return log_sys.lsn if the buffer pool contains no dirty buffers. We must make sure that the log is flushed up to that lsn. If there are dirty buffers in the buffer pool, then our write-ahead-logging algorithm ensures that the log has been flushed up to oldest_lsn. */ - ut_ad(oldest_lsn >= log_sys->last_checkpoint_lsn); + ut_ad(oldest_lsn >= log_sys.last_checkpoint_lsn); if (!write_always && oldest_lsn - <= log_sys->last_checkpoint_lsn + SIZE_OF_MLOG_CHECKPOINT) { + <= log_sys.last_checkpoint_lsn + SIZE_OF_MLOG_CHECKPOINT) { /* Do nothing, because nothing was logged (other than a MLOG_CHECKPOINT marker) since the previous checkpoint. */ log_mutex_exit(); return(true); } /* Repeat the MLOG_FILE_NAME records after the checkpoint, in - case some log records between the checkpoint and log_sys->lsn + case some log records between the checkpoint and log_sys.lsn need them. Finally, write a MLOG_CHECKPOINT marker. Redo log apply expects to see a MLOG_CHECKPOINT after the checkpoint, except on clean shutdown, where the log will be empty after @@ -1683,14 +1491,14 @@ log_checkpoint( threads will be blocked, and no pages can be added to the flush lists. */ lsn_t flush_lsn = oldest_lsn; - const lsn_t end_lsn = log_sys->lsn; + const lsn_t end_lsn = log_sys.lsn; const bool do_write = srv_shutdown_state == SRV_SHUTDOWN_NONE || flush_lsn != end_lsn; if (fil_names_clear(flush_lsn, do_write)) { - ut_ad(log_sys->lsn >= end_lsn + SIZE_OF_MLOG_CHECKPOINT); - flush_lsn = log_sys->lsn; + ut_ad(log_sys.lsn >= end_lsn + SIZE_OF_MLOG_CHECKPOINT); + flush_lsn = log_sys.lsn; } log_mutex_exit(); @@ -1713,28 +1521,28 @@ log_checkpoint( log_mutex_enter(); - ut_ad(log_sys->flushed_to_disk_lsn >= flush_lsn); + ut_ad(log_sys.flushed_to_disk_lsn >= flush_lsn); ut_ad(flush_lsn >= oldest_lsn); - if (log_sys->last_checkpoint_lsn >= oldest_lsn) { + if (log_sys.last_checkpoint_lsn >= oldest_lsn) { log_mutex_exit(); return(true); } - if (log_sys->n_pending_checkpoint_writes > 0) { + if (log_sys.n_pending_checkpoint_writes > 0) { /* A checkpoint write is running */ log_mutex_exit(); if (sync) { /* Wait for the checkpoint write to complete */ - rw_lock_s_lock(&log_sys->checkpoint_lock); - rw_lock_s_unlock(&log_sys->checkpoint_lock); + rw_lock_s_lock(&log_sys.checkpoint_lock); + rw_lock_s_unlock(&log_sys.checkpoint_lock); } return(false); } - log_sys->next_checkpoint_lsn = oldest_lsn; + log_sys.next_checkpoint_lsn = oldest_lsn; log_write_checkpoint_info(sync, end_lsn); ut_ad(!log_mutex_own()); @@ -1772,7 +1580,6 @@ void log_checkpoint_margin(void) /*=======================*/ { - log_t* log = log_sys; lsn_t age; lsn_t checkpoint_age; ib_uint64_t advance; @@ -1784,39 +1591,39 @@ loop: log_mutex_enter(); ut_ad(!recv_no_log_write); - if (!log->check_flush_or_checkpoint) { + if (!log_sys.check_flush_or_checkpoint) { log_mutex_exit(); return; } oldest_lsn = log_buf_pool_get_oldest_modification(); - age = log->lsn - oldest_lsn; + age = log_sys.lsn - oldest_lsn; - if (age > log->max_modified_age_sync) { + if (age > log_sys.max_modified_age_sync) { /* A flush is urgent: we have to do a synchronous preflush */ - advance = age - log->max_modified_age_sync; + advance = age - log_sys.max_modified_age_sync; } - checkpoint_age = log->lsn - log->last_checkpoint_lsn; + checkpoint_age = log_sys.lsn - log_sys.last_checkpoint_lsn; bool checkpoint_sync; bool do_checkpoint; - if (checkpoint_age > log->max_checkpoint_age) { + if (checkpoint_age > log_sys.max_checkpoint_age) { /* A checkpoint is urgent: we do it synchronously */ checkpoint_sync = true; do_checkpoint = true; - } else if (checkpoint_age > log->max_checkpoint_age_async) { + } else if (checkpoint_age > log_sys.max_checkpoint_age_async) { /* A checkpoint is not urgent: do it asynchronously */ do_checkpoint = true; checkpoint_sync = false; - log->check_flush_or_checkpoint = false; + log_sys.check_flush_or_checkpoint = false; } else { do_checkpoint = false; checkpoint_sync = false; - log->check_flush_or_checkpoint = false; + log_sys.check_flush_or_checkpoint = false; } log_mutex_exit(); @@ -1831,9 +1638,7 @@ loop: thread doing a flush at the same time. */ if (!success) { log_mutex_enter(); - - log->check_flush_or_checkpoint = true; - + log_sys.check_flush_or_checkpoint = true; log_mutex_exit(); goto loop; } @@ -1864,7 +1669,7 @@ log_check_margins(void) log_checkpoint_margin(); log_mutex_enter(); ut_ad(!recv_no_log_write); - check = log_sys->check_flush_or_checkpoint; + check = log_sys.check_flush_or_checkpoint; log_mutex_exit(); } while (check); } @@ -1889,7 +1694,7 @@ logs_empty_and_mark_files_at_shutdown(void) srv_shutdown_state = SRV_SHUTDOWN_CLEANUP; loop: ut_ad(lock_sys.is_initialised() || !srv_was_started); - ut_ad(log_sys || !srv_was_started); + ut_ad(log_sys.is_initialised() || !srv_was_started); ut_ad(fil_system.is_initialised() || !srv_was_started); os_event_set(srv_buf_resize_event); @@ -2026,10 +1831,10 @@ wait_suspend_loop: os_event_set(log_scrub_event); } - if (log_sys) { + if (log_sys.is_initialised()) { log_mutex_enter(); - const ulint n_write = log_sys->n_pending_checkpoint_writes; - const ulint n_flush = log_sys->n_pending_flushes; + const ulint n_write = log_sys.n_pending_checkpoint_writes; + const ulint n_flush = log_sys.n_pending_flushes; log_mutex_exit(); if (log_scrub_thread_active || n_write || n_flush) { @@ -2093,10 +1898,10 @@ wait_suspend_loop: log_mutex_enter(); - lsn = log_sys->lsn; + lsn = log_sys.lsn; - const bool lsn_changed = lsn != log_sys->last_checkpoint_lsn; - ut_ad(lsn >= log_sys->last_checkpoint_lsn); + const bool lsn_changed = lsn != log_sys.last_checkpoint_lsn; + ut_ad(lsn >= log_sys.last_checkpoint_lsn); log_mutex_exit(); @@ -2104,6 +1909,9 @@ wait_suspend_loop: goto loop; } + /* Ensure that all buffered changes are written to the + redo log before fil_close_all_files(). */ + fil_flush_file_spaces(FIL_TYPE_LOG); } else { lsn = srv_start_lsn; } @@ -2117,7 +1925,7 @@ wait_suspend_loop: "Free innodb buffer pool"); buf_all_freed(); - ut_a(lsn == log_sys->lsn + ut_a(lsn == log_sys.lsn || srv_force_recovery == SRV_FORCE_NO_LOG_REDO); if (lsn < srv_start_lsn) { @@ -2141,7 +1949,7 @@ wait_suspend_loop: /* Make some checks that the server really is quiet */ ut_a(srv_get_active_thread_type() == SRV_NONE); - ut_a(lsn == log_sys->lsn + ut_a(lsn == log_sys.lsn || srv_force_recovery == SRV_FORCE_NO_LOG_REDO); } @@ -2153,8 +1961,8 @@ log_peek_lsn( /*=========*/ lsn_t* lsn) /*!< out: if returns TRUE, current lsn is here */ { - if (0 == mutex_enter_nowait(&(log_sys->mutex))) { - *lsn = log_sys->lsn; + if (0 == mutex_enter_nowait(&(log_sys.mutex))) { + *lsn = log_sys.lsn; log_mutex_exit(); @@ -2181,15 +1989,15 @@ log_print( "Log flushed up to " LSN_PF "\n" "Pages flushed up to " LSN_PF "\n" "Last checkpoint at " LSN_PF "\n", - log_sys->lsn, - log_sys->flushed_to_disk_lsn, + log_sys.lsn, + log_sys.flushed_to_disk_lsn, log_buf_pool_get_oldest_modification(), - log_sys->last_checkpoint_lsn); + log_sys.last_checkpoint_lsn); current_time = time(NULL); time_elapsed = difftime(current_time, - log_sys->last_printout_time); + log_sys.last_printout_time); if (time_elapsed <= 0) { time_elapsed = 1; @@ -2199,15 +2007,15 @@ log_print( ULINTPF " pending log flushes, " ULINTPF " pending chkp writes\n" ULINTPF " log i/o's done, %.2f log i/o's/second\n", - log_sys->n_pending_flushes, - log_sys->n_pending_checkpoint_writes, - log_sys->n_log_ios, + log_sys.n_pending_flushes, + log_sys.n_pending_checkpoint_writes, + log_sys.n_log_ios, static_cast( - log_sys->n_log_ios - log_sys->n_log_ios_old) + log_sys.n_log_ios - log_sys.n_log_ios_old) / time_elapsed); - log_sys->n_log_ios_old = log_sys->n_log_ios; - log_sys->last_printout_time = current_time; + log_sys.n_log_ios_old = log_sys.n_log_ios; + log_sys.last_printout_time = current_time; log_mutex_exit(); } @@ -2218,70 +2026,39 @@ void log_refresh_stats(void) /*===================*/ { - log_sys->n_log_ios_old = log_sys->n_log_ios; - log_sys->last_printout_time = time(NULL); -} - -/** Close a log group. -@param[in,out] group log group to close */ -static -void -log_group_close(log_group_t* group) -{ - ulint i; - - for (i = 0; i < group->n_files; i++) { - ut_free(group->file_header_bufs_ptr[i]); - } - - ut_free(group->file_header_bufs_ptr); - ut_free(group->file_header_bufs); - ut_free(group->checkpoint_buf_ptr); - group->n_files = 0; - group->file_header_bufs_ptr = NULL; - group->file_header_bufs = NULL; - group->checkpoint_buf_ptr = NULL; -} - -/********************************************************//** -Closes all log groups. */ -void -log_group_close_all(void) -/*=====================*/ -{ - log_group_close(&log_sys->log); + log_sys.n_log_ios_old = log_sys.n_log_ios; + log_sys.last_printout_time = time(NULL); } /** Shut down the redo log subsystem. */ -void -log_shutdown() +void log_t::close() { - log_group_close_all(); + ut_ad(this == &log_sys); + if (!is_initialised()) return; + m_initialised = false; + log.close(); - if (!log_sys->first_in_use) { - log_sys->buf -= log_sys->buf_size; - } - ut_free_dodump(log_sys->buf, log_sys->buf_size * 2); - log_sys->buf = NULL; - ut_free(log_sys->checkpoint_buf_ptr); - log_sys->checkpoint_buf_ptr = NULL; - log_sys->checkpoint_buf = NULL; + if (!first_in_use) + buf -= srv_log_buffer_size; + ut_free_dodump(buf, srv_log_buffer_size * 2); + buf = NULL; - os_event_destroy(log_sys->flush_event); + os_event_destroy(flush_event); - rw_lock_free(&log_sys->checkpoint_lock); + rw_lock_free(&checkpoint_lock); + /* rw_lock_free() already called checkpoint_lock.~rw_lock_t(); + tame the debug assertions when the destructor will be called once more. */ + ut_ad(checkpoint_lock.magic_n == 0); + ut_d(checkpoint_lock.magic_n = RW_LOCK_MAGIC_N); - mutex_free(&log_sys->mutex); - mutex_free(&log_sys->write_mutex); - mutex_free(&log_sys->log_flush_order_mutex); + mutex_free(&mutex); + mutex_free(&write_mutex); + mutex_free(&log_flush_order_mutex); - if (!srv_read_only_mode && srv_scrub_log) { - os_event_destroy(log_scrub_event); - } + if (!srv_read_only_mode && srv_scrub_log) + os_event_destroy(log_scrub_event); - recv_sys_close(); - ut_free(log_sys); - log_sys = NULL; + recv_sys_close(); } /******************************************************//** @@ -2302,7 +2079,7 @@ log_pad_current_log_block(void) lsn = log_reserve_and_open(OS_FILE_LOG_BLOCK_SIZE); pad_length = OS_FILE_LOG_BLOCK_SIZE - - (log_sys->buf_free % OS_FILE_LOG_BLOCK_SIZE) + - (log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE) - LOG_BLOCK_TRL_SIZE; if (pad_length == (OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_HDR_SIZE @@ -2319,7 +2096,7 @@ log_pad_current_log_block(void) log_write_low(&b, 1); } - lsn = log_sys->lsn; + lsn = log_sys.lsn; log_close(); @@ -2335,14 +2112,14 @@ log_scrub() /*=========*/ { log_mutex_enter(); - ulint cur_lbn = log_block_convert_lsn_to_no(log_sys->lsn); + ulint cur_lbn = log_block_convert_lsn_to_no(log_sys.lsn); if (next_lbn_to_pad == cur_lbn) { log_pad_current_log_block(); } - next_lbn_to_pad = log_block_convert_lsn_to_no(log_sys->lsn); + next_lbn_to_pad = log_block_convert_lsn_to_no(log_sys.lsn); log_mutex_exit(); } diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index e44c306c6a3..0de383c24b4 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -62,7 +62,7 @@ Created 9/20/1997 Heikki Tuuri #include "row0merge.h" /** Log records are stored in the hash table in chunks at most of this size; -this must be less than UNIV_PAGE_SIZE as it is stored in the buffer pool */ +this must be less than srv_page_size as it is stored in the buffer pool */ #define RECV_DATA_BLOCK_SIZE (MEM_MAX_ALLOC_IN_BUF - sizeof(recv_data_t)) /** Read-ahead area in applying log records to file pages */ @@ -79,7 +79,7 @@ volatile bool recv_recovery_on; bool recv_needed_recovery; #ifdef UNIV_DEBUG /** TRUE if writing to the redo log (mtr_commit) is forbidden. -Protected by log_sys->mutex. */ +Protected by log_sys.mutex. */ bool recv_no_log_write = false; #endif /* UNIV_DEBUG */ @@ -522,7 +522,7 @@ DECLARE_THREAD(recv_writer_thread)( /* Wait till we get a signal to clean the LRU list. Bounded by max wait time of 100ms. */ - ib_uint64_t sig_count = os_event_reset(buf_flush_event); + int64_t sig_count = os_event_reset(buf_flush_event); os_event_wait_time_low(buf_flush_event, 100000, sig_count); mutex_enter(&recv_sys->writer_mutex); @@ -631,57 +631,46 @@ recv_sys_debug_free(void) mutex_exit(&(recv_sys->mutex)); } -/** Read a log segment to a buffer. -@param[out] buf buffer -@param[in] group redo log files -@param[in, out] start_lsn in : read area start, out: the last read valid lsn +/** Read a log segment to log_sys.buf. +@param[in,out] start_lsn in: read area start, +out: the last read valid lsn @param[in] end_lsn read area end -@param[out] invalid_block - invalid, (maybe incompletely written) block encountered -@return false, if invalid block encountered (e.g checksum mismatch), true otherwise */ -bool -log_group_read_log_seg( - byte* buf, - const log_group_t* group, - lsn_t *start_lsn, - lsn_t end_lsn) +@return whether no invalid blocks (e.g checksum mismatch) were found */ +bool log_t::files::read_log_seg(lsn_t* start_lsn, lsn_t end_lsn) { ulint len; - lsn_t source_offset; bool success = true; - ut_ad(log_mutex_own()); + ut_ad(log_sys.mutex.is_owned()); ut_ad(!(*start_lsn % OS_FILE_LOG_BLOCK_SIZE)); ut_ad(!(end_lsn % OS_FILE_LOG_BLOCK_SIZE)); - + byte* buf = log_sys.buf; loop: - source_offset = log_group_calc_lsn_offset(*start_lsn, group); + lsn_t source_offset = calc_lsn_offset(*start_lsn); ut_a(end_lsn - *start_lsn <= ULINT_MAX); len = (ulint) (end_lsn - *start_lsn); ut_ad(len != 0); - const bool at_eof = (source_offset % group->file_size) + len - > group->file_size; + const bool at_eof = (source_offset % file_size) + len > file_size; if (at_eof) { /* If the above condition is true then len (which is ulint) is > the expression below, so the typecast is ok */ - len = (ulint) (group->file_size - - (source_offset % group->file_size)); + len = ulint(file_size - (source_offset % file_size)); } - log_sys->n_log_ios++; + log_sys.n_log_ios++; MONITOR_INC(MONITOR_LOG_IO); - ut_a(source_offset / UNIV_PAGE_SIZE <= ULINT_MAX); + ut_a((source_offset >> srv_page_size_shift) <= ULINT_MAX); - const ulint page_no - = (ulint) (source_offset / univ_page_size.physical()); + const ulint page_no = ulint(source_offset >> srv_page_size_shift); fil_io(IORequestLogRead, true, page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no), univ_page_size, - (ulint) (source_offset % univ_page_size.physical()), + ulint(source_offset & (srv_page_size - 1)), len, buf, NULL); for (ulint l = 0; l < len; l += OS_FILE_LOG_BLOCK_SIZE, @@ -699,7 +688,7 @@ loop: break; } - if (innodb_log_checksums || group->is_encrypted()) { + if (innodb_log_checksums || is_encrypted()) { ulint crc = log_block_calc_checksum_crc32(buf); ulint cksum = log_block_get_checksum(buf); @@ -722,7 +711,7 @@ loop: break; } - if (group->is_encrypted()) { + if (is_encrypted()) { log_crypt(buf, *start_lsn, OS_FILE_LOG_BLOCK_SIZE, true); } @@ -760,14 +749,10 @@ recv_synchronize_groups() the block is always incomplete */ lsn_t start_lsn = ut_uint64_align_down(recovered_lsn, - OS_FILE_LOG_BLOCK_SIZE); - log_group_read_log_seg(log_sys->buf, &log_sys->log, - &start_lsn, start_lsn + OS_FILE_LOG_BLOCK_SIZE); - - /* Update the fields in the group struct to correspond to - recovered_lsn */ - - log_group_set_fields(&log_sys->log, recovered_lsn); + OS_FILE_LOG_BLOCK_SIZE); + log_sys.log.read_log_seg(&start_lsn, + start_lsn + OS_FILE_LOG_BLOCK_SIZE); + log_sys.log.set_fields(recovered_lsn); /* Copy the checkpoint info to the log; remember that we have incremented checkpoint_no by one, and the info will not be written @@ -793,19 +778,17 @@ recv_check_log_header_checksum( } /** Find the latest checkpoint in the format-0 log header. -@param[out] max_group log group, or NULL @param[out] max_field LOG_CHECKPOINT_1 or LOG_CHECKPOINT_2 @return error code or DB_SUCCESS */ static MY_ATTRIBUTE((warn_unused_result)) dberr_t -recv_find_max_checkpoint_0(log_group_t** max_group, ulint* max_field) +recv_find_max_checkpoint_0(ulint* max_field) { - log_group_t* group = &log_sys->log; ib_uint64_t max_no = 0; ib_uint64_t checkpoint_no; - byte* buf = log_sys->checkpoint_buf; + byte* buf = log_sys.checkpoint_buf; - ut_ad(group->format == 0); + ut_ad(log_sys.log.format == 0); /** Offset of the first checkpoint checksum */ static const uint CHECKSUM_1 = 288; @@ -816,11 +799,11 @@ recv_find_max_checkpoint_0(log_group_t** max_group, ulint* max_field) /** Least significant bits of the checkpoint offset */ static const uint OFFSET_LOW32 = 16; - *max_group = NULL; + bool found = false; for (ulint field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2; field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) { - log_group_header_read(group, field); + log_header_read(field); if (static_cast(ut_fold_binary(buf, CHECKSUM_1)) != mach_read_from_4(buf + CHECKSUM_1) @@ -847,21 +830,21 @@ recv_find_max_checkpoint_0(log_group_t** max_group, ulint* max_field) mach_read_from_8(buf + LOG_CHECKPOINT_LSN))); if (checkpoint_no >= max_no) { - *max_group = group; + found = true; *max_field = field; max_no = checkpoint_no; - group->state = LOG_GROUP_OK; + log_sys.log.state = LOG_GROUP_OK; - group->lsn = mach_read_from_8( + log_sys.log.lsn = mach_read_from_8( buf + LOG_CHECKPOINT_LSN); - group->lsn_offset = static_cast( + log_sys.log.lsn_offset = static_cast( mach_read_from_4(buf + OFFSET_HIGH32)) << 32 | mach_read_from_4(buf + OFFSET_LOW32); } } - if (*max_group != NULL) { + if (found) { return(DB_SUCCESS); } @@ -883,13 +866,10 @@ dberr_t recv_log_format_0_recover(lsn_t lsn) { log_mutex_enter(); - log_group_t* group = &log_sys->log; - const lsn_t source_offset - = log_group_calc_lsn_offset(lsn, group); + const lsn_t source_offset = log_sys.log.calc_lsn_offset(lsn); log_mutex_exit(); - const ulint page_no - = (ulint) (source_offset / univ_page_size.physical()); - byte* buf = log_sys->buf; + const ulint page_no = ulint(source_offset >> srv_page_size_shift); + byte* buf = log_sys.buf; static const char* NO_UPGRADE_RECOVERY_MSG = "Upgrade after a crash is not supported." @@ -898,9 +878,9 @@ recv_log_format_0_recover(lsn_t lsn) fil_io(IORequestLogRead, true, page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no), univ_page_size, - (ulint) ((source_offset & ~(OS_FILE_LOG_BLOCK_SIZE - 1)) - % univ_page_size.physical()), - OS_FILE_LOG_BLOCK_SIZE, buf, NULL); + ulint((source_offset & ~(OS_FILE_LOG_BLOCK_SIZE - 1)) + & (srv_page_size - 1)), + OS_FILE_LOG_BLOCK_SIZE, buf, NULL); if (log_block_calc_checksum_format_0(buf) != log_block_get_checksum(buf) @@ -921,11 +901,11 @@ recv_log_format_0_recover(lsn_t lsn) recv_sys->parse_start_lsn = recv_sys->recovered_lsn = recv_sys->scanned_lsn = recv_sys->mlog_checkpoint_lsn = lsn; - log_sys->last_checkpoint_lsn = log_sys->next_checkpoint_lsn - = log_sys->lsn = log_sys->write_lsn - = log_sys->current_flush_lsn = log_sys->flushed_to_disk_lsn + log_sys.last_checkpoint_lsn = log_sys.next_checkpoint_lsn + = log_sys.lsn = log_sys.write_lsn + = log_sys.current_flush_lsn = log_sys.flushed_to_disk_lsn = lsn; - log_sys->next_checkpoint_no = 0; + log_sys.next_checkpoint_no = 0; return(DB_SUCCESS); } @@ -935,26 +915,23 @@ recv_log_format_0_recover(lsn_t lsn) dberr_t recv_find_max_checkpoint(ulint* max_field) { - log_group_t* group; ib_uint64_t max_no; ib_uint64_t checkpoint_no; ulint field; byte* buf; - group = &log_sys->log; - max_no = 0; *max_field = 0; - buf = log_sys->checkpoint_buf; + buf = log_sys.checkpoint_buf; - group->state = LOG_GROUP_CORRUPTED; + log_sys.log.state = LOG_GROUP_CORRUPTED; - log_group_header_read(group, 0); + log_header_read(0); /* Check the header page checksum. There was no checksum in the first redo log format (version 0). */ - group->format = mach_read_from_4(buf + LOG_HEADER_FORMAT); - if (group->format != LOG_HEADER_FORMAT_3_23 + log_sys.log.format = mach_read_from_4(buf + LOG_HEADER_FORMAT); + if (log_sys.log.format != LOG_HEADER_FORMAT_3_23 && !recv_check_log_header_checksum(buf)) { ib::error() << "Invalid redo log header checksum."; return(DB_CORRUPTION); @@ -966,9 +943,9 @@ recv_find_max_checkpoint(ulint* max_field) /* Ensure that the string is NUL-terminated. */ creator[LOG_HEADER_CREATOR_END - LOG_HEADER_CREATOR] = 0; - switch (group->format) { + switch (log_sys.log.format) { case LOG_HEADER_FORMAT_3_23: - return(recv_find_max_checkpoint_0(&group, max_field)); + return(recv_find_max_checkpoint_0(max_field)); case LOG_HEADER_FORMAT_10_2: case LOG_HEADER_FORMAT_10_2 | LOG_HEADER_FORMAT_ENCRYPTED: case LOG_HEADER_FORMAT_CURRENT: @@ -983,7 +960,7 @@ recv_find_max_checkpoint(ulint* max_field) for (field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2; field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) { - log_group_header_read(group, field); + log_header_read(field); const ulint crc32 = log_block_calc_checksum_crc32(buf); const ulint cksum = log_block_get_checksum(buf); @@ -998,7 +975,7 @@ recv_find_max_checkpoint(ulint* max_field) continue; } - if (group->is_encrypted() + if (log_sys.is_encrypted() && !log_crypt_read_checkpoint_buf(buf)) { ib::error() << "Reading checkpoint" " encryption info failed."; @@ -1016,12 +993,12 @@ recv_find_max_checkpoint(ulint* max_field) if (checkpoint_no >= max_no) { *max_field = field; max_no = checkpoint_no; - group->state = LOG_GROUP_OK; - group->lsn = mach_read_from_8( + log_sys.log.state = LOG_GROUP_OK; + log_sys.log.lsn = mach_read_from_8( buf + LOG_CHECKPOINT_LSN); - group->lsn_offset = mach_read_from_8( + log_sys.log.lsn_offset = mach_read_from_8( buf + LOG_CHECKPOINT_OFFSET); - log_sys->next_checkpoint_no = checkpoint_no; + log_sys.next_checkpoint_no = checkpoint_no; } } @@ -1369,12 +1346,11 @@ parse_log: break; case MLOG_UNDO_INIT: /* Allow anything in page_type when creating a page. */ - ptr = trx_undo_parse_page_init(ptr, end_ptr, page, mtr); + ptr = trx_undo_parse_page_init(ptr, end_ptr, page); break; case MLOG_UNDO_HDR_REUSE: ut_ad(!page || page_type == FIL_PAGE_UNDO_LOG); - ptr = trx_undo_parse_page_header_reuse(ptr, end_ptr, page, - mtr); + ptr = trx_undo_parse_page_header_reuse(ptr, end_ptr, page); break; case MLOG_UNDO_HDR_CREATE: ut_ad(!page || page_type == FIL_PAGE_UNDO_LOG); @@ -1454,7 +1430,7 @@ parse_log: break; case MLOG_FILE_WRITE_CRYPT_DATA: dberr_t err; - ptr = const_cast(fil_parse_write_crypt_data(ptr, end_ptr, block, &err)); + ptr = const_cast(fil_parse_write_crypt_data(ptr, end_ptr, &err)); if (err != DB_SUCCESS) { recv_sys->found_corrupt_log = TRUE; @@ -1563,13 +1539,13 @@ recv_add_to_hash_table( ut_ad(type != MLOG_INDEX_LOAD); ut_ad(type != MLOG_TRUNCATE); - len = rec_end - body; + len = ulint(rec_end - body); recv = static_cast( mem_heap_alloc(recv_sys->heap, sizeof(recv_t))); recv->type = type; - recv->len = rec_end - body; + recv->len = ulint(rec_end - body); recv->start_lsn = start_lsn; recv->end_lsn = end_lsn; @@ -1598,13 +1574,13 @@ recv_add_to_hash_table( prev_field = &(recv->data); - /* Store the log record body in chunks of less than UNIV_PAGE_SIZE: + /* Store the log record body in chunks of less than srv_page_size: recv_sys->heap grows into the buffer pool, and bigger chunks could not be allocated */ while (rec_end > body) { - len = rec_end - body; + len = ulint(rec_end - body); if (len > RECV_DATA_BLOCK_SIZE) { len = RECV_DATA_BLOCK_SIZE; @@ -1753,7 +1729,7 @@ recv_recover_page(bool just_read_in, buf_block_t* block) while (recv) { end_lsn = recv->end_lsn; - ut_ad(end_lsn <= log_sys->log.scanned_lsn); + ut_ad(end_lsn <= log_sys.log.scanned_lsn); if (recv->len > RECV_DATA_BLOCK_SIZE) { /* We have to copy the record body to a separate @@ -1811,7 +1787,7 @@ recv_recover_page(bool just_read_in, buf_block_t* block) end_lsn = recv->start_lsn + recv->len; mach_write_to_8(FIL_PAGE_LSN + page, end_lsn); - mach_write_to_8(UNIV_PAGE_SIZE + mach_write_to_8(srv_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM + page, end_lsn); @@ -1828,7 +1804,7 @@ recv_recover_page(bool just_read_in, buf_block_t* block) recv = UT_LIST_GET_NEXT(rec_list, recv); } - fil_space_release(space); + space->release(); #ifdef UNIV_ZIP_DEBUG if (fil_page_index_page_check(page)) { @@ -2167,7 +2143,7 @@ recv_parse_log_rec( end_ptr)); } - return(new_ptr - ptr); + return ulint(new_ptr - ptr); } /*******************************************************//** @@ -2226,7 +2202,7 @@ recv_report_corrupt_log( const ulint before = std::min(recv_previous_parsed_rec_offset, limit); const ulint after - = std::min(recv_sys->len - (ptr - recv_sys->buf), limit); + = std::min(recv_sys->len - ulint(ptr - recv_sys->buf), limit); ib::info() << "Hex dump starting " << before << " bytes before and" " ending " << after << " bytes after the corrupted record:"; @@ -2234,7 +2210,7 @@ recv_report_corrupt_log( ut_print_buf(stderr, recv_sys->buf + recv_previous_parsed_rec_offset - before, - ptr - recv_sys->buf + before + after + ulint(ptr - recv_sys->buf) + before + after - recv_previous_parsed_rec_offset); putc('\n', stderr); @@ -2343,9 +2319,7 @@ loop: /* Do nothing */ break; case MLOG_CHECKPOINT: -#if SIZE_OF_MLOG_CHECKPOINT != 1 + 8 -# error SIZE_OF_MLOG_CHECKPOINT != 1 + 8 -#endif + compile_time_assert(SIZE_OF_MLOG_CHECKPOINT == 1 + 8); lsn = mach_read_from_8(ptr + 1); DBUG_PRINT("ib_log", @@ -2869,7 +2843,6 @@ recv_scan_log_recs( /** Scans log from a buffer and stores new log data to the parsing buffer. Parses and hashes the log records if new data found. -@param[in,out] group log group @param[in] checkpoint_lsn latest checkpoint log sequence number @param[in,out] contiguous_lsn log sequence number until which all redo log has been scanned @@ -2879,7 +2852,6 @@ can be applied to the tablespaces static bool recv_group_scan_log_recs( - log_group_t* group, lsn_t checkpoint_lsn, lsn_t* contiguous_lsn, bool last_phase) @@ -2908,12 +2880,12 @@ recv_group_scan_log_recs( lsn_t end_lsn; store_t store_to_hash = recv_sys->mlog_checkpoint_lsn == 0 ? STORE_NO : (last_phase ? STORE_IF_EXISTS : STORE_YES); - ulint available_mem = UNIV_PAGE_SIZE + ulint available_mem = srv_page_size * (buf_pool_get_n_pages() - (recv_n_pool_free_frames * srv_buf_pool_instances)); - group->scanned_lsn = end_lsn = *contiguous_lsn = ut_uint64_align_down( - *contiguous_lsn, OS_FILE_LOG_BLOCK_SIZE); + log_sys.log.scanned_lsn = end_lsn = *contiguous_lsn = + ut_uint64_align_down(*contiguous_lsn, OS_FILE_LOG_BLOCK_SIZE); do { if (last_phase && store_to_hash == STORE_NO) { @@ -2928,15 +2900,13 @@ recv_group_scan_log_recs( start_lsn = ut_uint64_align_down(end_lsn, OS_FILE_LOG_BLOCK_SIZE); end_lsn = start_lsn; - log_group_read_log_seg( - log_sys->buf, group, &end_lsn, - start_lsn + RECV_SCAN_SIZE); + log_sys.log.read_log_seg(&end_lsn, start_lsn + RECV_SCAN_SIZE); } while (end_lsn != start_lsn && !recv_scan_log_recs( - available_mem, &store_to_hash, log_sys->buf, + available_mem, &store_to_hash, log_sys.buf, checkpoint_lsn, start_lsn, end_lsn, - contiguous_lsn, &group->scanned_lsn)); + contiguous_lsn, &log_sys.log.scanned_lsn)); if (recv_sys->found_corrupt_log || recv_sys->found_corrupt_fs) { DBUG_RETURN(false); @@ -2944,7 +2914,7 @@ recv_group_scan_log_recs( DBUG_PRINT("ib_log", ("%s " LSN_PF " completed", last_phase ? "rescan" : "scan", - group->scanned_lsn)); + log_sys.log.scanned_lsn)); DBUG_RETURN(store_to_hash == STORE_NO); } @@ -3125,7 +3095,6 @@ of first system tablespace page dberr_t recv_recovery_from_checkpoint_start(lsn_t flush_lsn) { - log_group_t* group; ulint max_cp_field; lsn_t checkpoint_lsn; bool rescan; @@ -3153,39 +3122,35 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) log_mutex_enter(); - /* Look for the latest checkpoint from any of the log groups */ - err = recv_find_max_checkpoint(&max_cp_field); if (err != DB_SUCCESS) { - srv_start_lsn = recv_sys->recovered_lsn = log_sys->lsn; + srv_start_lsn = recv_sys->recovered_lsn = log_sys.lsn; log_mutex_exit(); return(err); } - log_group_header_read(&log_sys->log, max_cp_field); + log_header_read(max_cp_field); - buf = log_sys->checkpoint_buf; + buf = log_sys.checkpoint_buf; checkpoint_lsn = mach_read_from_8(buf + LOG_CHECKPOINT_LSN); checkpoint_no = mach_read_from_8(buf + LOG_CHECKPOINT_NO); - /* Start reading the log groups from the checkpoint lsn up. The - variable contiguous_lsn contains an lsn up to which the log is - known to be contiguously written to all log groups. */ - + /* Start reading the log from the checkpoint lsn. The variable + contiguous_lsn contains an lsn up to which the log is known to + be contiguously written. */ recv_sys->mlog_checkpoint_lsn = 0; - ut_ad(RECV_SCAN_SIZE <= log_sys->buf_size); + ut_ad(RECV_SCAN_SIZE <= srv_log_buffer_size); - group = &log_sys->log; const lsn_t end_lsn = mach_read_from_8( buf + LOG_CHECKPOINT_END_LSN); ut_ad(recv_sys->n_addrs == 0); contiguous_lsn = checkpoint_lsn; - switch (group->format) { + switch (log_sys.log.format) { case 0: log_mutex_exit(); return(recv_log_format_0_recover(checkpoint_lsn)); @@ -3203,8 +3168,7 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) } /* Look for MLOG_CHECKPOINT. */ - recv_group_scan_log_recs(group, checkpoint_lsn, &contiguous_lsn, - false); + recv_group_scan_log_recs(checkpoint_lsn, &contiguous_lsn, false); /* The first scan should not have stored or applied any records. */ ut_ad(recv_sys->n_addrs == 0); ut_ad(!recv_sys->found_corrupt_fs); @@ -3221,7 +3185,7 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) } if (recv_sys->mlog_checkpoint_lsn == 0) { - lsn_t scan_lsn = group->scanned_lsn; + lsn_t scan_lsn = log_sys.log.scanned_lsn; if (!srv_read_only_mode && scan_lsn != checkpoint_lsn) { log_mutex_exit(); ib::error err; @@ -3234,12 +3198,12 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) return(DB_ERROR); } - group->scanned_lsn = checkpoint_lsn; + log_sys.log.scanned_lsn = checkpoint_lsn; rescan = false; } else { contiguous_lsn = checkpoint_lsn; rescan = recv_group_scan_log_recs( - group, checkpoint_lsn, &contiguous_lsn, false); + checkpoint_lsn, &contiguous_lsn, false); if ((recv_sys->found_corrupt_log && !srv_force_recovery) || recv_sys->found_corrupt_fs) { @@ -3285,7 +3249,7 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) } } - log_sys->lsn = recv_sys->recovered_lsn; + log_sys.lsn = recv_sys->recovered_lsn; if (recv_needed_recovery) { bool missing_tablespace = false; @@ -3310,8 +3274,7 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) lsn_t recent_stored_lsn = recv_sys->last_stored_lsn; rescan = recv_group_scan_log_recs( - group, checkpoint_lsn, - &recent_stored_lsn, false); + checkpoint_lsn, &recent_stored_lsn, false); ut_ad(!recv_sys->found_corrupt_fs); @@ -3342,8 +3305,8 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) if (rescan) { contiguous_lsn = checkpoint_lsn; - recv_group_scan_log_recs(group, checkpoint_lsn, - &contiguous_lsn, true); + recv_group_scan_log_recs( + checkpoint_lsn, &contiguous_lsn, true); if ((recv_sys->found_corrupt_log && !srv_force_recovery) @@ -3356,12 +3319,11 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) ut_ad(!rescan || recv_sys->n_addrs == 0); } - /* We currently have only one log group */ + if (log_sys.log.scanned_lsn < checkpoint_lsn + || log_sys.log.scanned_lsn < recv_max_page_lsn) { - if (group->scanned_lsn < checkpoint_lsn - || group->scanned_lsn < recv_max_page_lsn) { - - ib::error() << "We scanned the log up to " << group->scanned_lsn + ib::error() << "We scanned the log up to " + << log_sys.log.scanned_lsn << ". A checkpoint was at " << checkpoint_lsn << " and" " the maximum LSN on a database page was " << recv_max_page_lsn << ". It is possible that the" @@ -3377,11 +3339,8 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) return(DB_ERROR); } - /* Synchronize the uncorrupted log groups to the most up-to-date log - group; we also copy checkpoint info to groups */ - - log_sys->next_checkpoint_lsn = checkpoint_lsn; - log_sys->next_checkpoint_no = checkpoint_no + 1; + log_sys.next_checkpoint_lsn = checkpoint_lsn; + log_sys.next_checkpoint_no = checkpoint_no + 1; recv_synchronize_groups(); @@ -3391,24 +3350,24 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) srv_start_lsn = recv_sys->recovered_lsn; } - log_sys->buf_free = (ulint) log_sys->lsn % OS_FILE_LOG_BLOCK_SIZE; - log_sys->buf_next_to_write = log_sys->buf_free; - log_sys->write_lsn = log_sys->lsn; + log_sys.buf_free = ulong(log_sys.lsn % OS_FILE_LOG_BLOCK_SIZE); + log_sys.buf_next_to_write = log_sys.buf_free; + log_sys.write_lsn = log_sys.lsn; - log_sys->last_checkpoint_lsn = checkpoint_lsn; + log_sys.last_checkpoint_lsn = checkpoint_lsn; if (!srv_read_only_mode && srv_operation == SRV_OPERATION_NORMAL) { /* Write a MLOG_CHECKPOINT marker as the first thing, before generating any other redo log. This ensures that subsequent crash recovery will be possible even if the server were killed soon after this. */ - fil_names_clear(log_sys->last_checkpoint_lsn, true); + fil_names_clear(log_sys.last_checkpoint_lsn, true); } MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, - log_sys->lsn - log_sys->last_checkpoint_lsn); + log_sys.lsn - log_sys.last_checkpoint_lsn); - log_sys->next_checkpoint_no = ++checkpoint_no; + log_sys.next_checkpoint_no = ++checkpoint_no; mutex_enter(&recv_sys->mutex); @@ -3514,26 +3473,26 @@ recv_reset_logs( { ut_ad(log_mutex_own()); - log_sys->lsn = ut_uint64_align_up(lsn, OS_FILE_LOG_BLOCK_SIZE); + log_sys.lsn = ut_uint64_align_up(lsn, OS_FILE_LOG_BLOCK_SIZE); - log_sys->log.lsn = log_sys->lsn; - log_sys->log.lsn_offset = LOG_FILE_HDR_SIZE; + log_sys.log.lsn = log_sys.lsn; + log_sys.log.lsn_offset = LOG_FILE_HDR_SIZE; - log_sys->buf_next_to_write = 0; - log_sys->write_lsn = log_sys->lsn; + log_sys.buf_next_to_write = 0; + log_sys.write_lsn = log_sys.lsn; - log_sys->next_checkpoint_no = 0; - log_sys->last_checkpoint_lsn = 0; + log_sys.next_checkpoint_no = 0; + log_sys.last_checkpoint_lsn = 0; - memset(log_sys->buf, 0, log_sys->buf_size); - log_block_init(log_sys->buf, log_sys->lsn); - log_block_set_first_rec_group(log_sys->buf, LOG_BLOCK_HDR_SIZE); + memset(log_sys.buf, 0, srv_log_buffer_size); + log_block_init(log_sys.buf, log_sys.lsn); + log_block_set_first_rec_group(log_sys.buf, LOG_BLOCK_HDR_SIZE); - log_sys->buf_free = LOG_BLOCK_HDR_SIZE; - log_sys->lsn += LOG_BLOCK_HDR_SIZE; + log_sys.buf_free = LOG_BLOCK_HDR_SIZE; + log_sys.lsn += LOG_BLOCK_HDR_SIZE; MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, - (log_sys->lsn - log_sys->last_checkpoint_lsn)); + (log_sys.lsn - log_sys.last_checkpoint_lsn)); log_mutex_exit(); diff --git a/storage/innobase/mem/mem0mem.cc b/storage/innobase/mem/mem0mem.cc index f101d624e88..09f0765d7dc 100644 --- a/storage/innobase/mem/mem0mem.cc +++ b/storage/innobase/mem/mem0mem.cc @@ -126,7 +126,7 @@ mem_heap_printf_low( val = va_arg(ap, unsigned long); - plen = sprintf(tmp, "%lu", val); + plen = size_t(sprintf(tmp, "%lu", val)); len += plen; if (buf) { @@ -219,7 +219,7 @@ mem_heap_validate( break; case MEM_HEAP_BUFFER: case MEM_HEAP_BUFFER | MEM_HEAP_BTR_SEARCH: - ut_ad(block->len <= UNIV_PAGE_SIZE); + ut_ad(block->len <= srv_page_size); break; default: ut_error; @@ -264,13 +264,13 @@ mem_heap_create_block_func( /* In dynamic allocation, calculate the size: block header + data. */ len = MEM_BLOCK_HEADER_SIZE + MEM_SPACE_NEEDED(n); - if (type == MEM_HEAP_DYNAMIC || len < UNIV_PAGE_SIZE / 2) { + if (type == MEM_HEAP_DYNAMIC || len < srv_page_size / 2) { ut_ad(type == MEM_HEAP_DYNAMIC || n <= MEM_MAX_ALLOC_IN_BUF); block = static_cast(ut_malloc_nokey(len)); } else { - len = UNIV_PAGE_SIZE; + len = srv_page_size; if ((type & MEM_HEAP_BTR_SEARCH) && heap) { /* We cannot allocate the block from the @@ -412,7 +412,7 @@ mem_heap_block_free( len = block->len; block->magic_n = MEM_FREED_BLOCK_MAGIC_N; - if (type == MEM_HEAP_DYNAMIC || len < UNIV_PAGE_SIZE / 2) { + if (type == MEM_HEAP_DYNAMIC || len < srv_page_size / 2) { ut_ad(!buf_block); ut_free(block); } else { diff --git a/storage/innobase/mtr/mtr0log.cc b/storage/innobase/mtr/mtr0log.cc index 92ab6466fc3..b789e24f19c 100644 --- a/storage/innobase/mtr/mtr0log.cc +++ b/storage/innobase/mtr/mtr0log.cc @@ -144,7 +144,7 @@ mlog_parse_nbytes( offset = mach_read_from_2(ptr); ptr += 2; - if (offset >= UNIV_PAGE_SIZE) { + if (offset >= srv_page_size) { recv_sys->found_corrupt_log = TRUE; return(NULL); @@ -312,7 +312,7 @@ mlog_write_string( mtr_t* mtr) /*!< in: mini-transaction handle */ { ut_ad(ptr && mtr); - ut_a(len < UNIV_PAGE_SIZE); + ut_a(len < srv_page_size); memcpy(ptr, str, len); @@ -332,7 +332,7 @@ mlog_log_string( byte* log_ptr; ut_ad(ptr && mtr); - ut_ad(len <= UNIV_PAGE_SIZE); + ut_ad(len <= srv_page_size); log_ptr = mlog_open(mtr, 30); @@ -383,7 +383,7 @@ mlog_parse_string( len = mach_read_from_2(ptr); ptr += 2; - if (offset >= UNIV_PAGE_SIZE || len + offset > UNIV_PAGE_SIZE) { + if (offset >= srv_page_size || len + offset > srv_page_size) { recv_sys->found_corrupt_log = TRUE; return(NULL); @@ -426,11 +426,20 @@ mlog_open_and_write_index( ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table)); + mtr->set_modified(); + switch (mtr->get_log_mode()) { + case MTR_LOG_NONE: + case MTR_LOG_NO_REDO: + return NULL; + case MTR_LOG_SHORT_INSERTS: + ut_ad(0); + /* fall through */ + case MTR_LOG_ALL: + break; + } + if (!page_rec_is_comp(rec)) { - log_start = log_ptr = mlog_open(mtr, 11 + size); - if (!log_ptr) { - return(NULL); /* logging is disabled */ - } + log_start = log_ptr = mtr->get_log()->open(11 + size); log_ptr = mlog_write_initial_log_record_fast(rec, type, log_ptr, mtr); log_end = log_ptr + 11 + size; @@ -439,11 +448,8 @@ mlog_open_and_write_index( bool is_instant = index->is_instant(); ulint n = dict_index_get_n_fields(index); ulint total = 11 + (is_instant ? 2 : 0) + size + (n + 2) * 2; - ulint alloc = total; - - if (alloc > mtr_buf_t::MAX_DATA_SIZE) { - alloc = mtr_buf_t::MAX_DATA_SIZE; - } + ulint alloc = std::min(total, + ulint(mtr_buf_t::MAX_DATA_SIZE)); const bool is_leaf = page_is_leaf(page_align(rec)); @@ -453,12 +459,7 @@ mlog_open_and_write_index( n = DICT_INDEX_SPATIAL_NODEPTR_SIZE; } - log_start = log_ptr = mlog_open(mtr, alloc); - - if (!log_ptr) { - return(NULL); /* logging is disabled */ - } - + log_start = log_ptr = mtr->get_log()->open(alloc); log_end = log_ptr + alloc; log_ptr = mlog_write_initial_log_record_fast( @@ -477,16 +478,10 @@ mlog_open_and_write_index( } log_ptr += 2; - - if (is_leaf) { - mach_write_to_2( - log_ptr, dict_index_get_n_unique_in_tree(index)); - } else { - mach_write_to_2( - log_ptr, - dict_index_get_n_unique_in_tree_nonleaf(index)); - } - + mach_write_to_2( + log_ptr, is_leaf + ? dict_index_get_n_unique_in_tree(index) + : dict_index_get_n_unique_in_tree_nonleaf(index)); log_ptr += 2; for (i = 0; i < n; i++) { @@ -509,19 +504,14 @@ mlog_open_and_write_index( } if (log_ptr + 2 > log_end) { mlog_close(mtr, log_ptr); - ut_a(total > (ulint) (log_ptr - log_start)); - total -= log_ptr - log_start; - alloc = total; + ut_a(total > ulint(log_ptr - log_start)); + total -= ulint(log_ptr - log_start); + alloc = std::min( + total, + ulint(mtr_buf_t::MAX_DATA_SIZE)); - if (alloc > mtr_buf_t::MAX_DATA_SIZE) { - alloc = mtr_buf_t::MAX_DATA_SIZE; - } - - log_start = log_ptr = mlog_open(mtr, alloc); - - if (!log_ptr) { - return(NULL); /* logging is disabled */ - } + log_start = log_ptr = mtr->get_log()->open( + alloc); log_end = log_ptr + alloc; } mach_write_to_2(log_ptr, len); @@ -641,7 +631,7 @@ mlog_parse_index( ind->get_n_nullable(n_core_fields)); } else { ind->n_core_null_bytes = UT_BITS_IN_BYTES( - ind->n_nullable); + unsigned(ind->n_nullable)); ind->n_core_fields = ind->n_fields; } } diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc index 86540a3bc2d..215bfbaf42c 100644 --- a/storage/innobase/mtr/mtr0mtr.cc +++ b/storage/innobase/mtr/mtr0mtr.cc @@ -481,7 +481,7 @@ mtr_write_log( ut_ad(!recv_no_log_write); DBUG_PRINT("ib_log", (ULINTPF " extra bytes written at " LSN_PF, - len, log_sys->lsn)); + len, log_sys.lsn)); log_reserve_and_open(len); log->for_each_block(write_log); @@ -489,10 +489,9 @@ mtr_write_log( } /** Start a mini-transaction. -@param sync true if it is a synchronous mini-transaction -@param read_only true if read only mini-transaction */ +@param sync true if it is a synchronous mini-transaction */ void -mtr_t::start(bool sync, bool read_only) +mtr_t::start(bool sync) { UNIV_MEM_INVALID(this, sizeof(*this)); @@ -610,9 +609,7 @@ mtr_t::commit_checkpoint( if (write_mlog_checkpoint) { byte* ptr = m_impl.m_log.push(SIZE_OF_MLOG_CHECKPOINT); -#if SIZE_OF_MLOG_CHECKPOINT != 9 -# error SIZE_OF_MLOG_CHECKPOINT != 9 -#endif + compile_time_assert(SIZE_OF_MLOG_CHECKPOINT == 1 + 8); *ptr = MLOG_CHECKPOINT; mach_write_to_8(ptr + 1, checkpoint_lsn); } @@ -624,7 +621,7 @@ mtr_t::commit_checkpoint( if (write_mlog_checkpoint) { DBUG_PRINT("ib_log", ("MLOG_CHECKPOINT(" LSN_PF ") written at " LSN_PF, - checkpoint_lsn, log_sys->lsn)); + checkpoint_lsn, log_sys.lsn)); } } @@ -774,7 +771,7 @@ mtr_t::Command::prepare_write() case MTR_LOG_NONE: ut_ad(m_impl->m_log.size() == 0); log_mutex_enter(); - m_end_lsn = m_start_lsn = log_sys->lsn; + m_end_lsn = m_start_lsn = log_sys.lsn; return(0); case MTR_LOG_ALL: break; @@ -785,8 +782,8 @@ mtr_t::Command::prepare_write() ut_ad(len > 0); ut_ad(n_recs > 0); - if (len > log_sys->buf_size / 2) { - log_buffer_extend((len + 1) * 2); + if (len > srv_log_buffer_size / 2) { + log_buffer_extend(ulong((len + 1) * 2)); } ut_ad(m_impl->m_n_log_recs == n_recs); diff --git a/storage/innobase/mysql-test/storage_engine/repair_table.rdiff b/storage/innobase/mysql-test/storage_engine/repair_table.rdiff index 717d437b2d1..e9c46b3a6c1 100644 --- a/storage/innobase/mysql-test/storage_engine/repair_table.rdiff +++ b/storage/innobase/mysql-test/storage_engine/repair_table.rdiff @@ -78,7 +78,7 @@ DROP TABLE t1, t2; call mtr.add_suppression("Got an error from thread_id=.*"); call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table"); -@@ -62,45 +63,32 @@ +@@ -63,46 +64,33 @@ CREATE TABLE t1 (a , b , (a)) ENGINE= ; REPAIR TABLE t1; Table Op Msg_type Msg_text @@ -94,9 +94,10 @@ Table Op Msg_type Msg_text -test.t1 repair warning Number of rows changed from 0 to 3 -test.t1 repair status OK ++test.t1 repair note The storage engine for the table doesn't support repair + db.opt -t1.MYD -t1.MYI -+test.t1 repair note The storage engine for the table doesn't support repair t1.frm +t1.ibd INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); diff --git a/storage/innobase/os/os0event.cc b/storage/innobase/os/os0event.cc index 98b474c0bda..73067bf7fe7 100644 --- a/storage/innobase/os/os0event.cc +++ b/storage/innobase/os/os0event.cc @@ -48,7 +48,7 @@ typedef os_event_list_t::iterator event_iter_t; /** InnoDB condition variable. */ struct os_event { - os_event(const char* name) UNIV_NOTHROW; + os_event() UNIV_NOTHROW; ~os_event() UNIV_NOTHROW; @@ -415,7 +415,7 @@ os_event::wait_time_low( } /** Constructor */ -os_event::os_event(const char* name) UNIV_NOTHROW +os_event::os_event() UNIV_NOTHROW { init(); @@ -444,14 +444,9 @@ Creates an event semaphore, i.e., a semaphore which may just have two states: signaled and nonsignaled. The created event is manual reset: it must be reset explicitly by calling sync_os_reset_event. @return the event handle */ -os_event_t -os_event_create( -/*============*/ - const char* name) /*!< in: the name of the - event, if NULL the event - is created without a name */ +os_event_t os_event_create(const char*) { - return(UT_NEW_NOKEY(os_event(name))); + return(UT_NEW_NOKEY(os_event())); } /** diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index 6bbda59188d..8dcf936532a 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -698,10 +698,6 @@ bool os_has_said_disk_full; /** Default Zip compression level */ extern uint page_zip_level; -#if DATA_TRX_ID_LEN > 6 -#error "COMPRESSION_ALGORITHM will not fit" -#endif /* DATA_TRX_ID_LEN */ - /** Validates the consistency of the aio system. @return true if ok */ static @@ -866,8 +862,10 @@ os_file_get_block_size( 0, OPEN_EXISTING, 0, 0); if (volume_handle == INVALID_HANDLE_VALUE) { - os_file_handle_error_no_exit(volume, - "CreateFile()", FALSE); + if (GetLastError() != ERROR_ACCESS_DENIED) { + os_file_handle_error_no_exit(volume, + "CreateFile()", FALSE); + } goto end; } @@ -889,16 +887,7 @@ os_file_get_block_size( if (!result) { DWORD err = GetLastError(); - if (err == ERROR_INVALID_FUNCTION || err == ERROR_NOT_SUPPORTED) { - // Don't report error, it is driver's fault, not ours or users. - // We handle this with fallback. Report wit info message, just once. - static bool write_info = true; - if (write_info) { - ib::info() << "DeviceIoControl(IOCTL_STORAGE_QUERY_PROPERTY)" - << " unsupported on volume " << volume; - write_info = false; - } - } else { + if (err != ERROR_INVALID_FUNCTION && err != ERROR_NOT_SUPPORTED) { os_file_handle_error_no_exit(volume, "DeviceIoControl(IOCTL_STORAGE_QUERY_PROPERTY)", FALSE); } @@ -1038,7 +1027,7 @@ AIOHandler::post_io_processing(Slot* slot) ut_ad(slot->is_reserved); /* Total bytes read so far */ - ulint n_bytes = (slot->ptr - slot->buf) + slot->n_bytes; + ulint n_bytes = ulint(slot->ptr - slot->buf) + slot->n_bytes; return(n_bytes == slot->original_len ? DB_SUCCESS : DB_FAIL); } @@ -1253,10 +1242,23 @@ os_file_create_tmpfile() { FILE* file = NULL; WAIT_ALLOW_WRITES(); - int fd = innobase_mysql_tmpfile(NULL); + os_file_t fd = innobase_mysql_tmpfile(NULL); - if (fd >= 0) { + if (fd != OS_FILE_CLOSED) { +#ifdef _WIN32 + int crt_fd = _open_osfhandle((intptr_t)HANDLE(fd), 0); + if (crt_fd != -1) { + file = fdopen(crt_fd, "w+b"); + if (!file) { + close(crt_fd); + } + } +#else file = fdopen(fd, "w+b"); + if (!file) { + close(fd); + } +#endif } if (file == NULL) { @@ -1264,10 +1266,6 @@ os_file_create_tmpfile() ib::error() << "Unable to create temporary file; errno: " << errno; - - if (fd >= 0) { - close(fd); - } } return(file); @@ -1325,7 +1323,7 @@ os_file_make_new_pathname( /* Find the offset of the last slash. We will strip off the old basename.ibd which starts after that slash. */ last_slash = strrchr((char*) old_path, OS_PATH_SEPARATOR); - dir_len = last_slash ? last_slash - old_path : strlen(old_path); + dir_len = last_slash ? ulint(last_slash - old_path) : strlen(old_path); /* allocate a new path and move the old directory path to it. */ new_path_len = dir_len + strlen(base_name) + sizeof "/.ibd"; @@ -1472,7 +1470,7 @@ os_file_get_parent_dir( /* Non-trivial directory component */ - return(mem_strdupl(path, last_slash - path)); + return(mem_strdupl(path, ulint(last_slash - path))); } #ifdef UNIV_ENABLE_UNIT_TEST_GET_PARENT_DIR @@ -2299,23 +2297,23 @@ AIO::is_linux_native_aio_supported() memset(&io_event, 0x0, sizeof(io_event)); - byte* buf = static_cast(ut_malloc_nokey(UNIV_PAGE_SIZE * 2)); - byte* ptr = static_cast(ut_align(buf, UNIV_PAGE_SIZE)); + byte* buf = static_cast(ut_malloc_nokey(srv_page_size * 2)); + byte* ptr = static_cast(ut_align(buf, srv_page_size)); struct iocb iocb; /* Suppress valgrind warning. */ - memset(buf, 0x00, UNIV_PAGE_SIZE * 2); + memset(buf, 0x00, srv_page_size * 2); memset(&iocb, 0x0, sizeof(iocb)); struct iocb* p_iocb = &iocb; if (!srv_read_only_mode) { - io_prep_pwrite(p_iocb, fd, ptr, UNIV_PAGE_SIZE, 0); + io_prep_pwrite(p_iocb, fd, ptr, srv_page_size, 0); } else { - ut_a(UNIV_PAGE_SIZE >= 512); + ut_a(srv_page_size >= 512); io_prep_pread(p_iocb, fd, ptr, 512, 0); } @@ -3286,7 +3284,7 @@ os_file_get_size( /* st_blocks is in 512 byte sized blocks */ file_size.m_alloc_size = s.st_blocks * 512; } else { - file_size.m_total_size = ~0; + file_size.m_total_size = ~0U; file_size.m_alloc_size = (os_offset_t) errno; } @@ -4845,7 +4843,7 @@ os_file_io( os_offset_t offset, dberr_t* err) { - ulint original_n = n; + ssize_t original_n = ssize_t(n); IORequest type = in_type; ssize_t bytes_returned = 0; @@ -4860,7 +4858,7 @@ os_file_io( break; - } else if ((ulint) n_bytes + bytes_returned == n) { + } else if (n_bytes + bytes_returned == ssize_t(n)) { bytes_returned += n_bytes; @@ -4879,9 +4877,9 @@ os_file_io( /* Handle partial read/write. */ - ut_ad((ulint) n_bytes + bytes_returned < n); + ut_ad(ulint(n_bytes + bytes_returned) < n); - bytes_returned += (ulint) n_bytes; + bytes_returned += n_bytes; if (!type.is_partial_io_warning_disabled()) { @@ -5205,7 +5203,7 @@ os_file_set_nocache( ib::error() << "Failed to set DIRECTIO_ON on file " - << file_name << ": " << operation_name + << file_name << "; " << operation_name << ": " << strerror(errno_save) << "," " continuing anyway."; } @@ -5219,9 +5217,9 @@ os_file_set_nocache( # ifdef UNIV_LINUX ib::warn() << "Failed to set O_DIRECT on file" - << file_name << ";" << operation_name + << file_name << "; " << operation_name << ": " << strerror(errno_save) << ", " - << "ccontinuing anyway. O_DIRECT is " + "continuing anyway. O_DIRECT is " "known to result in 'Invalid argument' " "on Linux on tmpfs, " "see MySQL Bug#26662."; @@ -5237,7 +5235,7 @@ short_warning: << "Failed to set O_DIRECT on file " << file_name << "; " << operation_name << " : " << strerror(errno_save) - << " continuing anyway."; + << ", continuing anyway."; } } #endif /* defined(UNIV_SOLARIS) && defined(DIRECTIO_ON) */ @@ -5325,18 +5323,16 @@ fallback: #endif /* _WIN32*/ /* Write up to 1 megabyte at a time. */ - ulint buf_size = ut_min( - static_cast(64), - static_cast(size / UNIV_PAGE_SIZE)); - - buf_size *= UNIV_PAGE_SIZE; + ulint buf_size = ut_min(ulint(64), + ulint(size >> srv_page_size_shift)) + << srv_page_size_shift; /* Align the buffer for possible raw i/o */ byte* buf2; - buf2 = static_cast(ut_malloc_nokey(buf_size + UNIV_PAGE_SIZE)); + buf2 = static_cast(ut_malloc_nokey(buf_size + srv_page_size)); - byte* buf = static_cast(ut_align(buf2, UNIV_PAGE_SIZE)); + byte* buf = static_cast(ut_align(buf2, srv_page_size)); /* Write buffer full of zeros */ memset(buf, 0, buf_size); @@ -5559,7 +5555,7 @@ os_is_sparse_file_supported(os_file_t fh) /* We don't know the FS block size, use the sector size. The FS will do the magic. */ - err = os_file_punch_hole_posix(fh, 0, UNIV_PAGE_SIZE); + err = os_file_punch_hole_posix(fh, 0, srv_page_size); return(err == DB_SUCCESS); #endif /* _WIN32 */ @@ -6170,7 +6166,7 @@ AIO::reserve_slot( doing simulated AIO */ ulint local_seg; - local_seg = (offset >> (UNIV_PAGE_SIZE_SHIFT + 6)) % m_n_segments; + local_seg = (offset >> (srv_page_size_shift + 6)) % m_n_segments; for (;;) { @@ -6851,10 +6847,10 @@ public: } m_ptr = static_cast( - ut_malloc_nokey(len + UNIV_PAGE_SIZE)); + ut_malloc_nokey(len + srv_page_size)); m_buf = static_cast( - ut_align(m_ptr, UNIV_PAGE_SIZE)); + ut_align(m_ptr, srv_page_size)); } else { len = first_slot()->len; diff --git a/storage/innobase/os/os0thread.cc b/storage/innobase/os/os0thread.cc index 3986c4f4c44..bbb24fae3b8 100644 --- a/storage/innobase/os/os0thread.cc +++ b/storage/innobase/os/os0thread.cc @@ -142,7 +142,7 @@ os_thread_create_func( #endif /* not _WIN32 */ - ut_a(os_thread_count <= OS_THREAD_MAX_N); + ut_a(os_thread_count <= srv_max_n_threads); /* Return the thread_id if the caller requests it. */ if (thread_id != NULL) { @@ -187,7 +187,7 @@ os_thread_exit(bool detach) pfs_delete_thread(); #endif - my_atomic_addlint(&os_thread_count, -1); + my_atomic_addlint(&os_thread_count, ulint(-1)); #ifdef _WIN32 ExitThread(0); diff --git a/storage/innobase/page/page0cur.cc b/storage/innobase/page/page0cur.cc index 0316f33d8af..9effe74fa7c 100644 --- a/storage/innobase/page/page0cur.cc +++ b/storage/innobase/page/page0cur.cc @@ -413,7 +413,7 @@ page_cur_search_with_match( owned by the upper limit directory slot. */ low = 0; - up = page_dir_get_n_slots(page) - 1; + up = ulint(page_dir_get_n_slots(page)) - 1; /* Perform binary search until the lower and upper limit directory slots come to the distance 1 of each other */ @@ -659,7 +659,7 @@ page_cur_search_with_match_bytes( owned by the upper limit directory slot. */ low = 0; - up = page_dir_get_n_slots(page) - 1; + up = ulint(page_dir_get_n_slots(page)) - 1; /* Perform binary search until the lower and upper limit directory slots come to the distance 1 of each other */ @@ -844,12 +844,12 @@ page_cur_insert_rec_write_log( const byte* log_end; ulint i; - if (dict_table_is_temporary(index->table)) { + if (index->table->is_temporary()) { ut_ad(!mlog_open(mtr, 0)); return; } - ut_a(rec_size < UNIV_PAGE_SIZE); + ut_a(rec_size < srv_page_size); ut_ad(mtr->is_named_space(index->table->space)); ut_ad(page_align(insert_rec) == page_align(cursor_rec)); ut_ad(!page_rec_is_comp(insert_rec) @@ -992,8 +992,8 @@ need_extra_info: /* Write the mismatch index */ log_ptr += mach_write_compressed(log_ptr, i); - ut_a(i < UNIV_PAGE_SIZE); - ut_a(extra_size < UNIV_PAGE_SIZE); + ut_a(i < srv_page_size); + ut_a(extra_size < srv_page_size); } else { /* Write the record end segment length and the extra info storage flag */ @@ -1010,7 +1010,7 @@ need_extra_info: mlog_close(mtr, log_ptr + rec_size); } else { mlog_close(mtr, log_ptr); - ut_a(rec_size < UNIV_PAGE_SIZE); + ut_a(rec_size < srv_page_size); mlog_catenate_string(mtr, ins_ptr, rec_size); } } @@ -1062,7 +1062,7 @@ page_cur_parse_insert_rec( cursor_rec = page + offset; - if (offset >= UNIV_PAGE_SIZE) { + if (offset >= srv_page_size) { recv_sys->found_corrupt_log = TRUE; @@ -1077,7 +1077,7 @@ page_cur_parse_insert_rec( return(NULL); } - if (end_seg_len >= UNIV_PAGE_SIZE << 1) { + if (end_seg_len >= srv_page_size << 1) { recv_sys->found_corrupt_log = TRUE; return(NULL); @@ -1101,7 +1101,7 @@ page_cur_parse_insert_rec( return(NULL); } - ut_a(origin_offset < UNIV_PAGE_SIZE); + ut_a(origin_offset < srv_page_size); mismatch_index = mach_parse_compressed(&ptr, end_ptr); @@ -1110,7 +1110,7 @@ page_cur_parse_insert_rec( return(NULL); } - ut_a(mismatch_index < UNIV_PAGE_SIZE); + ut_a(mismatch_index < srv_page_size); } if (end_ptr < ptr + (end_seg_len >> 1)) { @@ -1152,7 +1152,7 @@ page_cur_parse_insert_rec( /* Build the inserted record to buf */ - if (UNIV_UNLIKELY(mismatch_index >= UNIV_PAGE_SIZE)) { + if (UNIV_UNLIKELY(mismatch_index >= srv_page_size)) { ib::fatal() << "is_short " << is_short << ", " << "info_and_status_bits " << info_and_status_bits @@ -1240,9 +1240,9 @@ page_direction_increment( if (page_zip) { page_zip_write_header(page_zip, ptr, 1, NULL); } - page_header_set_field(page, page_zip, PAGE_N_DIRECTION, - page_header_get_field(page, PAGE_N_DIRECTION) - + 1); + page_header_set_field( + page, page_zip, PAGE_N_DIRECTION, + 1U + page_header_get_field(page, PAGE_N_DIRECTION)); } /***********************************************************//** @@ -1391,7 +1391,7 @@ use_heap: } page_header_set_field(page, NULL, PAGE_N_RECS, - 1 + page_get_n_recs(page)); + 1U + page_get_n_recs(page)); /* 5. Set the n_owned field in the inserted record to zero, and set the heap_no field */ @@ -1743,14 +1743,13 @@ too_small: /* On compressed pages, do not relocate records from the free list. If extra_size would grow, use the heap. */ - extra_size_diff - = rec_offs_extra_size(offsets) - - rec_offs_extra_size(foffsets); + extra_size_diff = lint(rec_offs_extra_size(offsets) + - rec_offs_extra_size(foffsets)); if (UNIV_UNLIKELY(extra_size_diff < 0)) { /* Add an offset to the extra_size. */ if (rec_offs_size(foffsets) - < rec_size - extra_size_diff) { + < rec_size - ulint(extra_size_diff)) { goto too_small; } @@ -1853,7 +1852,7 @@ use_heap: } page_header_set_field(page, page_zip, PAGE_N_RECS, - 1 + page_get_n_recs(page)); + 1U + page_get_n_recs(page)); /* 5. Set the n_owned field in the inserted record to zero, and set the heap_no field */ @@ -2067,9 +2066,9 @@ page_copy_rec_list_end_to_created_page( #ifdef UNIV_DEBUG /* To pass the debug tests we have to set these dummy values in the debug version */ - page_dir_set_n_slots(new_page, NULL, UNIV_PAGE_SIZE / 2); + page_dir_set_n_slots(new_page, NULL, srv_page_size / 2); page_header_set_ptr(new_page, NULL, PAGE_HEAP_TOP, - new_page + UNIV_PAGE_SIZE - 1); + new_page + srv_page_size - 1); #endif log_ptr = page_copy_rec_list_to_created_page_write_log(new_page, index, mtr); @@ -2078,7 +2077,7 @@ page_copy_rec_list_end_to_created_page( /* Individual inserts are logged in a shorter form */ - const mtr_log_t log_mode = dict_table_is_temporary(index->table) + const mtr_log_t log_mode = index->table->is_temporary() || !index->is_readable() /* IMPORT TABLESPACE */ ? mtr_get_log_mode(mtr) : mtr_set_log_mode(mtr, MTR_LOG_SHORT_INSERTS); @@ -2134,7 +2133,7 @@ page_copy_rec_list_end_to_created_page( rec_size = rec_offs_size(offsets); - ut_ad(heap_top < new_page + UNIV_PAGE_SIZE); + ut_ad(heap_top < new_page + srv_page_size); heap_top += rec_size; @@ -2172,7 +2171,7 @@ page_copy_rec_list_end_to_created_page( log_data_len = mtr->get_log()->size() - log_data_len; - ut_a(log_data_len < 100 * UNIV_PAGE_SIZE); + ut_a(log_data_len < 100U << srv_page_size_shift); if (log_ptr != NULL) { mach_write_to_4(log_ptr, log_data_len); @@ -2256,7 +2255,7 @@ page_cur_parse_delete_rec( offset = mach_read_from_2(ptr); ptr += 2; - ut_a(offset <= UNIV_PAGE_SIZE); + ut_a(offset <= srv_page_size); if (block) { page_t* page = buf_block_get_frame(block); @@ -2399,9 +2398,7 @@ page_cur_delete_rec( prev_rec is owned by the same slot, i.e., PAGE_DIR_SLOT_MIN_N_OWNED >= 2. */ -#if PAGE_DIR_SLOT_MIN_N_OWNED < 2 -# error "PAGE_DIR_SLOT_MIN_N_OWNED < 2" -#endif + compile_time_assert(PAGE_DIR_SLOT_MIN_N_OWNED >= 2); ut_ad(cur_n_owned > 1); if (current_rec == page_dir_slot_get_rec(cur_dir_slot)) { diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc index c02bdb74ce6..d438e4b64e0 100644 --- a/storage/innobase/page/page0page.cc +++ b/storage/innobase/page/page0page.cc @@ -87,33 +87,29 @@ page_dir_find_owner_slot( /*=====================*/ const rec_t* rec) /*!< in: the physical record */ { - const page_t* page; - register uint16 rec_offs_bytes; - register const page_dir_slot_t* slot; - register const page_dir_slot_t* first_slot; - register const rec_t* r = rec; - ut_ad(page_rec_check(rec)); - page = page_align(rec); - first_slot = page_dir_get_nth_slot(page, 0); - slot = page_dir_get_nth_slot(page, page_dir_get_n_slots(page) - 1); + const page_t* page = page_align(rec); + const page_dir_slot_t* first_slot = page_dir_get_nth_slot(page, 0); + const page_dir_slot_t* slot = page_dir_get_nth_slot( + page, ulint(page_dir_get_n_slots(page)) - 1); + const rec_t* r = rec; if (page_is_comp(page)) { while (rec_get_n_owned_new(r) == 0) { r = rec_get_next_ptr_const(r, TRUE); ut_ad(r >= page + PAGE_NEW_SUPREMUM); - ut_ad(r < page + (UNIV_PAGE_SIZE - PAGE_DIR)); + ut_ad(r < page + (srv_page_size - PAGE_DIR)); } } else { while (rec_get_n_owned_old(r) == 0) { r = rec_get_next_ptr_const(r, FALSE); ut_ad(r >= page + PAGE_OLD_SUPREMUM); - ut_ad(r < page + (UNIV_PAGE_SIZE - PAGE_DIR)); + ut_ad(r < page + (srv_page_size - PAGE_DIR)); } } - rec_offs_bytes = mach_encode_2(r - page); + uint16 rec_offs_bytes = mach_encode_2(ulint(r - page)); while (UNIV_LIKELY(*(uint16*) slot != rec_offs_bytes)) { @@ -361,12 +357,10 @@ page_create_low( { page_t* page; -#if PAGE_BTR_IBUF_FREE_LIST + FLST_BASE_NODE_SIZE > PAGE_DATA -# error "PAGE_BTR_IBUF_FREE_LIST + FLST_BASE_NODE_SIZE > PAGE_DATA" -#endif -#if PAGE_BTR_IBUF_FREE_LIST_NODE + FLST_NODE_SIZE > PAGE_DATA -# error "PAGE_BTR_IBUF_FREE_LIST_NODE + FLST_NODE_SIZE > PAGE_DATA" -#endif + compile_time_assert(PAGE_BTR_IBUF_FREE_LIST + FLST_BASE_NODE_SIZE + <= PAGE_DATA); + compile_time_assert(PAGE_BTR_IBUF_FREE_LIST_NODE + FLST_NODE_SIZE + <= PAGE_DATA); buf_block_modify_clock_inc(block); @@ -391,10 +385,10 @@ page_create_low( sizeof infimum_supremum_compact); memset(page + PAGE_NEW_SUPREMUM_END, 0, - UNIV_PAGE_SIZE - PAGE_DIR - PAGE_NEW_SUPREMUM_END); - page[UNIV_PAGE_SIZE - PAGE_DIR - PAGE_DIR_SLOT_SIZE * 2 + 1] + srv_page_size - PAGE_DIR - PAGE_NEW_SUPREMUM_END); + page[srv_page_size - PAGE_DIR - PAGE_DIR_SLOT_SIZE * 2 + 1] = PAGE_NEW_SUPREMUM; - page[UNIV_PAGE_SIZE - PAGE_DIR - PAGE_DIR_SLOT_SIZE + 1] + page[srv_page_size - PAGE_DIR - PAGE_DIR_SLOT_SIZE + 1] = PAGE_NEW_INFIMUM; } else { page[PAGE_HEADER + PAGE_N_HEAP + 1] = PAGE_HEAP_NO_USER_LOW; @@ -403,10 +397,10 @@ page_create_low( sizeof infimum_supremum_redundant); memset(page + PAGE_OLD_SUPREMUM_END, 0, - UNIV_PAGE_SIZE - PAGE_DIR - PAGE_OLD_SUPREMUM_END); - page[UNIV_PAGE_SIZE - PAGE_DIR - PAGE_DIR_SLOT_SIZE * 2 + 1] + srv_page_size - PAGE_DIR - PAGE_OLD_SUPREMUM_END); + page[srv_page_size - PAGE_DIR - PAGE_DIR_SLOT_SIZE * 2 + 1] = PAGE_OLD_SUPREMUM; - page[UNIV_PAGE_SIZE - PAGE_DIR - PAGE_DIR_SLOT_SIZE + 1] + page[srv_page_size - PAGE_DIR - PAGE_DIR_SLOT_SIZE + 1] = PAGE_OLD_INFIMUM; } @@ -479,19 +473,19 @@ page_create_zip( /* PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC are always 0 for temporary tables. */ - ut_ad(max_trx_id == 0 || !dict_table_is_temporary(index->table)); + ut_ad(max_trx_id == 0 || !index->table->is_temporary()); /* In secondary indexes and the change buffer, PAGE_MAX_TRX_ID must be zero on non-leaf pages. max_trx_id can be 0 when the index consists of an empty root (leaf) page. */ ut_ad(max_trx_id == 0 || level == 0 || !dict_index_is_sec_or_ibuf(index) - || dict_table_is_temporary(index->table)); + || index->table->is_temporary()); /* In the clustered index, PAGE_ROOT_AUTOINC or PAGE_MAX_TRX_ID must be 0 on other pages than the root. */ ut_ad(level == 0 || max_trx_id == 0 || !dict_index_is_sec_or_ibuf(index) - || dict_table_is_temporary(index->table)); + || index->table->is_temporary()); page = page_create_low(block, TRUE, is_spatial); mach_write_to_2(PAGE_HEADER + PAGE_LEVEL + page, level); @@ -537,7 +531,7 @@ page_create_empty( max_trx_id is ignored for temp tables because it not required for MVCC. */ if (dict_index_is_sec_or_ibuf(index) - && !dict_table_is_temporary(index->table) + && !index->table->is_temporary() && page_is_leaf(page)) { max_trx_id = page_get_max_trx_id(page); ut_ad(max_trx_id); @@ -549,7 +543,7 @@ page_create_empty( } if (page_zip) { - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); page_create_zip(block, index, page_header_get_field(page, PAGE_LEVEL), max_trx_id, NULL, mtr); @@ -598,7 +592,7 @@ page_copy_rec_list_end_no_locks( btr_assert_not_corrupted(new_block, index); ut_a(page_is_comp(new_page) == page_rec_is_comp(rec)); - ut_a(mach_read_from_2(new_page + UNIV_PAGE_SIZE - 10) == (ulint) + ut_a(mach_read_from_2(new_page + srv_page_size - 10) == (ulint) (page_is_comp(new_page) ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM)); const bool is_leaf = page_is_leaf(block->frame); @@ -719,7 +713,7 @@ page_copy_rec_list_end( for MVCC. */ if (dict_index_is_sec_or_ibuf(index) && page_is_leaf(page) - && !dict_table_is_temporary(index->table)) { + && !index->table->is_temporary()) { page_update_max_trx_id(new_block, NULL, page_get_max_trx_id(page), mtr); } @@ -880,7 +874,7 @@ page_copy_rec_list_start( max_trx_id is ignored for temp tables because it not required for MVCC. */ if (is_leaf && dict_index_is_sec_or_ibuf(index) - && !dict_table_is_temporary(index->table)) { + && !index->table->is_temporary()) { page_update_max_trx_id(new_block, NULL, page_get_max_trx_id(page_align(rec)), mtr); @@ -1056,7 +1050,7 @@ page_delete_rec_list_end( ulint* offsets = offsets_; rec_offs_init(offsets_); - ut_ad(size == ULINT_UNDEFINED || size < UNIV_PAGE_SIZE); + ut_ad(size == ULINT_UNDEFINED || size < srv_page_size); ut_ad(!page_zip || page_rec_is_comp(rec)); #ifdef UNIV_ZIP_DEBUG ut_a(!page_zip || page_zip_validate(page_zip, page, index)); @@ -1161,9 +1155,10 @@ delete_all: is_leaf, ULINT_UNDEFINED, &heap); s = rec_offs_size(offsets); - ut_ad(rec2 - page + s - rec_offs_extra_size(offsets) - < UNIV_PAGE_SIZE); - ut_ad(size + s < UNIV_PAGE_SIZE); + ut_ad(ulint(rec2 - page) + s + - rec_offs_extra_size(offsets) + < srv_page_size); + ut_ad(size + s < srv_page_size); size += s; n_recs++; @@ -1180,7 +1175,7 @@ delete_all: } } - ut_ad(size < UNIV_PAGE_SIZE); + ut_ad(size < srv_page_size); /* Update the page directory; there is no need to balance the number of the records owned by the supremum record, as it is allowed to be @@ -1635,7 +1630,7 @@ page_rec_get_nth_const( return(page_get_infimum_rec(page)); } - ut_ad(nth < UNIV_PAGE_SIZE / (REC_N_NEW_EXTRA_BYTES + 1)); + ut_ad(nth < srv_page_size / (REC_N_NEW_EXTRA_BYTES + 1)); for (i = 0;; i++) { @@ -1697,7 +1692,7 @@ page_rec_get_n_recs_before( slot = page_dir_get_nth_slot(page, i); slot_rec = page_dir_slot_get_rec(slot); - n += rec_get_n_owned_new(slot_rec); + n += lint(rec_get_n_owned_new(slot_rec)); if (rec == slot_rec) { @@ -1715,7 +1710,7 @@ page_rec_get_n_recs_before( slot = page_dir_get_nth_slot(page, i); slot_rec = page_dir_slot_get_rec(slot); - n += rec_get_n_owned_old(slot_rec); + n += lint(rec_get_n_owned_old(slot_rec)); if (rec == slot_rec) { @@ -1727,7 +1722,7 @@ page_rec_get_n_recs_before( n--; ut_ad(n >= 0); - ut_ad((ulong) n < UNIV_PAGE_SIZE / (REC_N_NEW_EXTRA_BYTES + 1)); + ut_ad((ulong) n < srv_page_size / (REC_N_NEW_EXTRA_BYTES + 1)); return((ulint) n); } @@ -2020,7 +2015,7 @@ page_simple_validate_old( n_slots = page_dir_get_n_slots(page); - if (UNIV_UNLIKELY(n_slots > UNIV_PAGE_SIZE / 4)) { + if (UNIV_UNLIKELY(n_slots > srv_page_size / 4)) { ib::error() << "Nonsensical number " << n_slots << " of page dir slots"; @@ -2060,7 +2055,7 @@ page_simple_validate_old( goto func_exit; } - if (UNIV_UNLIKELY(rec_get_n_owned_old(rec))) { + if (UNIV_UNLIKELY(rec_get_n_owned_old(rec) != 0)) { /* This is a record pointed to by a dir slot */ if (UNIV_UNLIKELY(rec_get_n_owned_old(rec) != own_count)) { @@ -2096,7 +2091,7 @@ page_simple_validate_old( if (UNIV_UNLIKELY (rec_get_next_offs(rec, FALSE) < FIL_PAGE_DATA - || rec_get_next_offs(rec, FALSE) >= UNIV_PAGE_SIZE)) { + || rec_get_next_offs(rec, FALSE) >= srv_page_size)) { ib::error() << "Next record offset nonsensical " << rec_get_next_offs(rec, FALSE) << " for rec " @@ -2107,7 +2102,7 @@ page_simple_validate_old( count++; - if (UNIV_UNLIKELY(count > UNIV_PAGE_SIZE)) { + if (UNIV_UNLIKELY(count > srv_page_size)) { ib::error() << "Page record list appears" " to be circular " << count; goto func_exit; @@ -2144,7 +2139,7 @@ page_simple_validate_old( while (rec != NULL) { if (UNIV_UNLIKELY(rec < page + FIL_PAGE_DATA - || rec >= page + UNIV_PAGE_SIZE)) { + || rec >= page + srv_page_size)) { ib::error() << "Free list record has" " a nonsensical offset " << (rec - page); @@ -2161,7 +2156,7 @@ page_simple_validate_old( count++; - if (UNIV_UNLIKELY(count > UNIV_PAGE_SIZE)) { + if (UNIV_UNLIKELY(count > srv_page_size)) { ib::error() << "Page free list appears" " to be circular " << count; goto func_exit; @@ -2210,7 +2205,7 @@ page_simple_validate_new( n_slots = page_dir_get_n_slots(page); - if (UNIV_UNLIKELY(n_slots > UNIV_PAGE_SIZE / 4)) { + if (UNIV_UNLIKELY(n_slots > srv_page_size / 4)) { ib::error() << "Nonsensical number " << n_slots << " of page dir slots"; @@ -2251,7 +2246,7 @@ page_simple_validate_new( goto func_exit; } - if (UNIV_UNLIKELY(rec_get_n_owned_new(rec))) { + if (UNIV_UNLIKELY(rec_get_n_owned_new(rec) != 0)) { /* This is a record pointed to by a dir slot */ if (UNIV_UNLIKELY(rec_get_n_owned_new(rec) != own_count)) { @@ -2287,7 +2282,7 @@ page_simple_validate_new( if (UNIV_UNLIKELY (rec_get_next_offs(rec, TRUE) < FIL_PAGE_DATA - || rec_get_next_offs(rec, TRUE) >= UNIV_PAGE_SIZE)) { + || rec_get_next_offs(rec, TRUE) >= srv_page_size)) { ib::error() << "Next record offset nonsensical " << rec_get_next_offs(rec, TRUE) @@ -2298,7 +2293,7 @@ page_simple_validate_new( count++; - if (UNIV_UNLIKELY(count > UNIV_PAGE_SIZE)) { + if (UNIV_UNLIKELY(count > srv_page_size)) { ib::error() << "Page record list appears to be" " circular " << count; goto func_exit; @@ -2335,7 +2330,7 @@ page_simple_validate_new( while (rec != NULL) { if (UNIV_UNLIKELY(rec < page + FIL_PAGE_DATA - || rec >= page + UNIV_PAGE_SIZE)) { + || rec >= page + srv_page_size)) { ib::error() << "Free list record has" " a nonsensical offset " << page_offset(rec); @@ -2353,7 +2348,7 @@ page_simple_validate_new( count++; - if (UNIV_UNLIKELY(count > UNIV_PAGE_SIZE)) { + if (UNIV_UNLIKELY(count > srv_page_size)) { ib::error() << "Page free list appears to be" " circular " << count; goto func_exit; @@ -2443,12 +2438,12 @@ page_validate( ut_ad(srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN); } - heap = mem_heap_create(UNIV_PAGE_SIZE + 200); + heap = mem_heap_create(srv_page_size + 200); /* The following buffer is used to check that the records in the page record heap do not overlap */ - buf = static_cast(mem_heap_zalloc(heap, UNIV_PAGE_SIZE)); + buf = static_cast(mem_heap_zalloc(heap, srv_page_size)); /* Check first that the record heap and the directory do not overlap. */ @@ -2540,7 +2535,7 @@ page_validate( data_size += rec_offs_size(offsets); -#if UNIV_GIS_DEBUG +#if defined(UNIV_GIS_DEBUG) /* For spatial index, print the mbr info.*/ if (index->type & DICT_SPATIAL) { rec_print_mbr_rec(stderr, rec, offsets); @@ -2551,7 +2546,7 @@ page_validate( offs = page_offset(rec_get_start(rec, offsets)); i = rec_offs_size(offsets); - if (UNIV_UNLIKELY(offs + i >= UNIV_PAGE_SIZE)) { + if (UNIV_UNLIKELY(offs + i >= srv_page_size)) { ib::error() << "Record offset out of bounds"; goto func_exit; } @@ -2572,7 +2567,7 @@ page_validate( rec_own_count = rec_get_n_owned_old(rec); } - if (UNIV_UNLIKELY(rec_own_count)) { + if (UNIV_UNLIKELY(rec_own_count != 0)) { /* This is a record pointed to by a dir slot */ if (UNIV_UNLIKELY(rec_own_count != own_count)) { ib::error() << "Wrong owned count " @@ -2659,7 +2654,7 @@ n_owned_zero: count++; offs = page_offset(rec_get_start(rec, offsets)); i = rec_offs_size(offsets); - if (UNIV_UNLIKELY(offs + i >= UNIV_PAGE_SIZE)) { + if (UNIV_UNLIKELY(offs + i >= srv_page_size)) { ib::error() << "Record offset out of bounds"; goto func_exit; } @@ -2757,7 +2752,11 @@ page_delete_rec( belongs to */ page_cur_t* pcur, /*!< in/out: page cursor on record to delete */ - page_zip_des_t* page_zip,/*!< in: compressed page descriptor */ + page_zip_des_t* +#ifdef UNIV_ZIP_DEBUG + page_zip/*!< in: compressed page descriptor */ +#endif + , const ulint* offsets)/*!< in: offsets for record */ { bool no_compress_needed; diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc index 9b6856ff2c5..4b611baefae 100644 --- a/storage/innobase/page/page0zip.cc +++ b/storage/innobase/page/page0zip.cc @@ -156,7 +156,7 @@ page_zip_empty_size( ulint n_fields, /*!< in: number of columns in the index */ ulint zip_size) /*!< in: compressed page size in bytes */ { - lint size = zip_size + ulint size = zip_size /* subtract the page header and the longest uncompressed data needed for one record */ - (PAGE_DATA @@ -166,7 +166,7 @@ page_zip_empty_size( - REC_N_NEW_EXTRA_BYTES/* omitted bytes */) /* subtract the space for page_zip_fields_encode() */ - compressBound(static_cast(2 * (n_fields + 1))); - return(size > 0 ? (ulint) size : 0); + return(lint(size) > 0 ? size : 0); } /** Check whether a tuple is too big for compressed table @@ -230,7 +230,8 @@ page_zip_dir_elems( const page_zip_des_t* page_zip) /*!< in: compressed page */ { /* Exclude the page infimum and supremum from the record count. */ - return(page_dir_get_n_heap(page_zip->data) - PAGE_HEAP_NO_USER_LOW); + return ulint(page_dir_get_n_heap(page_zip->data)) + - PAGE_HEAP_NO_USER_LOW; } /*************************************************************//** @@ -289,7 +290,7 @@ page_zip_dir_user_size( const page_zip_des_t* page_zip) /*!< in: compressed page */ { ulint size = PAGE_ZIP_DIR_SLOT_SIZE - * page_get_n_recs(page_zip->data); + * ulint(page_get_n_recs(page_zip->data)); ut_ad(size <= page_zip_dir_size(page_zip)); return(size); } @@ -397,7 +398,7 @@ page_zip_compress_write_log( } /* Read the number of user records. */ - trailer_size = page_dir_get_n_heap(page_zip->data) + trailer_size = ulint(page_dir_get_n_heap(page_zip->data)) - PAGE_HEAP_NO_USER_LOW; /* Multiply by uncompressed of size stored per record */ if (!page_is_leaf(page)) { @@ -411,15 +412,13 @@ page_zip_compress_write_log( /* Add the space occupied by BLOB pointers. */ trailer_size += page_zip->n_blobs * BTR_EXTERN_FIELD_REF_SIZE; ut_a(page_zip->m_end > PAGE_DATA); -#if FIL_PAGE_DATA > PAGE_DATA -# error "FIL_PAGE_DATA > PAGE_DATA" -#endif + compile_time_assert(FIL_PAGE_DATA <= PAGE_DATA); ut_a(page_zip->m_end + trailer_size <= page_zip_get_size(page_zip)); log_ptr = mlog_write_initial_log_record_fast((page_t*) page, MLOG_ZIP_PAGE_COMPRESS, log_ptr, mtr); - mach_write_to_2(log_ptr, page_zip->m_end - FIL_PAGE_TYPE); + mach_write_to_2(log_ptr, ulint(page_zip->m_end - FIL_PAGE_TYPE)); log_ptr += 2; mach_write_to_2(log_ptr, trailer_size); log_ptr += 2; @@ -431,7 +430,7 @@ page_zip_compress_write_log( /* Write most of the page header, the compressed stream and the modification log. */ mlog_catenate_string(mtr, page_zip->data + FIL_PAGE_TYPE, - page_zip->m_end - FIL_PAGE_TYPE); + ulint(page_zip->m_end - FIL_PAGE_TYPE)); /* Write the uncompressed trailer of the compressed page. */ mlog_catenate_string(mtr, page_zip->data + page_zip_get_size(page_zip) - trailer_size, trailer_size); @@ -614,7 +613,7 @@ page_zip_fields_encode( } buf = page_zip_fixed_field_encode( - buf, field->fixed_len << 1); + buf, ulint(field->fixed_len) << 1); col++; } } @@ -694,15 +693,14 @@ page_zip_dir_encode( heap_no = rec_get_heap_no_new(rec); ut_a(heap_no >= PAGE_HEAP_NO_USER_LOW); ut_a(heap_no < n_heap); - ut_a(offs < UNIV_PAGE_SIZE - PAGE_DIR); + ut_a(offs < srv_page_size - PAGE_DIR); ut_a(offs >= PAGE_ZIP_START); -#if PAGE_ZIP_DIR_SLOT_MASK & (PAGE_ZIP_DIR_SLOT_MASK + 1) -# error PAGE_ZIP_DIR_SLOT_MASK is not 1 less than a power of 2 -#endif -#if PAGE_ZIP_DIR_SLOT_MASK < UNIV_ZIP_SIZE_MAX - 1 -# error PAGE_ZIP_DIR_SLOT_MASK < UNIV_ZIP_SIZE_MAX - 1 -#endif - if (UNIV_UNLIKELY(rec_get_n_owned_new(rec))) { + compile_time_assert(!(PAGE_ZIP_DIR_SLOT_MASK + & (PAGE_ZIP_DIR_SLOT_MASK + 1))); + compile_time_assert(PAGE_ZIP_DIR_SLOT_MASK + >= UNIV_ZIP_SIZE_MAX - 1); + + if (UNIV_UNLIKELY(rec_get_n_owned_new(rec) != 0)) { offs |= PAGE_ZIP_DIR_SLOT_OWNED; } @@ -725,7 +723,7 @@ page_zip_dir_encode( recs[heap_no - PAGE_HEAP_NO_USER_LOW] = rec; } - ut_a(rec_get_status(rec) == status); + ut_a(ulint(rec_get_status(rec)) == status); } offs = page_header_get_field(page, PAGE_FREE); @@ -740,7 +738,7 @@ page_zip_dir_encode( ut_a(heap_no < n_heap); ut_a(!rec[-REC_N_NEW_EXTRA_BYTES]); /* info_bits and n_owned */ - ut_a(rec_get_status(rec) == status); + ut_a(ulint(rec_get_status(rec)) == status); mach_write_to_2(buf - PAGE_ZIP_DIR_SLOT_SIZE * ++i, offs); @@ -809,7 +807,7 @@ page_zip_set_alloc( #ifdef PAGE_ZIP_COMPRESS_DBG /** Set this variable in a debugger to enable excessive logging in page_zip_compress(). */ -static ibool page_zip_compress_dbg; +static bool page_zip_compress_dbg; /** Set this variable in a debugger to enable binary logging of the data passed to deflate(). When this variable is nonzero, it will act @@ -1295,7 +1293,7 @@ page_zip_compress( && dict_table_is_comp(index->table) && !dict_index_is_ibuf(index))); - UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE); + UNIV_MEM_ASSERT_RW(page, srv_page_size); /* Check the data that will be omitted. */ ut_a(!memcmp(page + (PAGE_NEW_INFIMUM - REC_N_NEW_EXTRA_BYTES), @@ -1327,7 +1325,7 @@ page_zip_compress( } /* The dense directory excludes the infimum and supremum records. */ - n_dense = page_dir_get_n_heap(page) - PAGE_HEAP_NO_USER_LOW; + n_dense = ulint(page_dir_get_n_heap(page)) - PAGE_HEAP_NO_USER_LOW; #ifdef PAGE_ZIP_COMPRESS_DBG if (UNIV_UNLIKELY(page_zip_compress_dbg)) { ib::info() << "compress " @@ -1346,8 +1344,8 @@ page_zip_compress( if (logfile) { /* Write the uncompressed page to the log. */ - if (fwrite(page, 1, UNIV_PAGE_SIZE, logfile) - != UNIV_PAGE_SIZE) { + if (fwrite(page, 1, srv_page_size, logfile) + != srv_page_size) { perror("fwrite"); } /* Record the compressed size as zero. @@ -1406,7 +1404,7 @@ page_zip_compress( + REC_OFFS_HEADER_SIZE + n_dense * ((sizeof *recs) - PAGE_ZIP_DIR_SLOT_SIZE) - + UNIV_PAGE_SIZE * 4 + + srv_page_size * 4 + (512 << MAX_MEM_LEVEL)); recs = static_cast( @@ -1423,7 +1421,7 @@ page_zip_compress( page_zip_set_alloc(&c_stream, heap); err = deflateInit2(&c_stream, static_cast(level), - Z_DEFLATED, UNIV_PAGE_SIZE_SHIFT, + Z_DEFLATED, srv_page_size_shift, MAX_MEM_LEVEL, Z_DEFAULT_STRATEGY); ut_a(err == Z_OK); @@ -1544,7 +1542,7 @@ page_zip_compress( c_stream.avail_in = static_cast( page_header_get_field(page, PAGE_HEAP_TOP) - (c_stream.next_in - page)); - ut_a(c_stream.avail_in <= UNIV_PAGE_SIZE - PAGE_ZIP_START - PAGE_DIR); + ut_a(c_stream.avail_in <= srv_page_size - PAGE_ZIP_START - PAGE_DIR); UNIV_MEM_ASSERT_RW(c_stream.next_in, c_stream.avail_in); err = deflate(&c_stream, Z_FINISH); @@ -1622,7 +1620,7 @@ err_exit: /* Record the compressed size of the block. */ byte sz[4]; mach_write_to_4(sz, c_stream.total_out); - fseek(logfile, UNIV_PAGE_SIZE, SEEK_SET); + fseek(logfile, srv_page_size, SEEK_SET); if (fwrite(sz, 1, sizeof sz, logfile) != sizeof sz) { perror("fwrite"); } @@ -1775,7 +1773,8 @@ page_zip_fields_decode( /* ROW_FORMAT=COMPRESSED does not support instant ADD COLUMN */ index->n_core_fields = index->n_fields; - index->n_core_null_bytes = UT_BITS_IN_BYTES(index->n_nullable); + index->n_core_null_bytes + = UT_BITS_IN_BYTES(unsigned(index->n_nullable)); ut_ad(b == end); @@ -1818,7 +1817,7 @@ page_zip_dir_decode( /* Traverse the list of stored records in the sorting order, starting from the first user record. */ - slot = page + (UNIV_PAGE_SIZE - PAGE_DIR - PAGE_DIR_SLOT_SIZE); + slot = page + (srv_page_size - PAGE_DIR - PAGE_DIR_SLOT_SIZE); UNIV_PREFETCH_RW(slot); /* Zero out the page trailer. */ @@ -1852,7 +1851,7 @@ page_zip_dir_decode( mach_write_to_2(slot, PAGE_NEW_SUPREMUM); { const page_dir_slot_t* last_slot = page_dir_get_nth_slot( - page, page_dir_get_n_slots(page) - 1); + page, page_dir_get_n_slots(page) - 1U); if (UNIV_UNLIKELY(slot != last_slot)) { page_zip_fail(("page_zip_dir_decode 3: %p != %p\n", @@ -1935,7 +1934,7 @@ page_zip_set_extra_bytes( page[PAGE_NEW_SUPREMUM - REC_N_NEW_EXTRA_BYTES] = (byte) n_owned; /* The dense directory excludes the infimum and supremum records. */ - n = page_dir_get_n_heap(page) - PAGE_HEAP_NO_USER_LOW; + n = ulint(page_dir_get_n_heap(page)) - PAGE_HEAP_NO_USER_LOW; if (i >= n) { if (UNIV_LIKELY(i == n)) { @@ -2021,8 +2020,8 @@ page_zip_apply_log_ext( return(NULL); } - memcpy(next_out, data, dst - next_out); - data += dst - next_out; + memcpy(next_out, data, ulint(dst - next_out)); + data += ulint(dst - next_out); next_out = dst + (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN); } else if (rec_offs_nth_extern(offsets, i)) { @@ -2031,7 +2030,7 @@ page_zip_apply_log_ext( ut_ad(len >= BTR_EXTERN_FIELD_REF_SIZE); - len += dst - next_out + len += ulint(dst - next_out) - BTR_EXTERN_FIELD_REF_SIZE; if (UNIV_UNLIKELY(data + len >= end)) { @@ -2051,7 +2050,7 @@ page_zip_apply_log_ext( } /* Copy the last bytes of the record. */ - len = rec_get_end(rec, offsets) - next_out; + len = ulint(rec_get_end(rec, offsets) - next_out); if (UNIV_UNLIKELY(data + len >= end)) { page_zip_fail(("page_zip_apply_log_ext:" " last %p+%lu >= %p\n", @@ -2251,7 +2250,7 @@ page_zip_apply_log( /* Copy any bytes following DB_TRX_ID, DB_ROLL_PTR. */ b = rec + l + (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN); - len = rec_get_end(rec, offsets) - b; + len = ulint(rec_get_end(rec, offsets) - b); if (UNIV_UNLIKELY(data + len >= end)) { page_zip_fail(("page_zip_apply_log:" " clust %p+%lu >= %p\n", @@ -2324,7 +2323,7 @@ page_zip_decompress_node_ptrs( d_stream->avail_out = static_cast( rec - REC_N_NEW_EXTRA_BYTES - d_stream->next_out); - ut_ad(d_stream->avail_out < UNIV_PAGE_SIZE + ut_ad(d_stream->avail_out < srv_page_size - PAGE_ZIP_START - PAGE_DIR); switch (inflate(d_stream, Z_SYNC_FLUSH)) { case Z_STREAM_END: @@ -2391,7 +2390,7 @@ page_zip_decompress_node_ptrs( d_stream->avail_out = static_cast( page_header_get_field(page_zip->data, PAGE_HEAP_TOP) - page_offset(d_stream->next_out)); - if (UNIV_UNLIKELY(d_stream->avail_out > UNIV_PAGE_SIZE + if (UNIV_UNLIKELY(d_stream->avail_out > srv_page_size - PAGE_ZIP_START - PAGE_DIR)) { page_zip_fail(("page_zip_decompress_node_ptrs:" @@ -2422,9 +2421,10 @@ zlib_done: /* Clear the unused heap space on the uncompressed page. */ memset(d_stream->next_out, 0, - page_dir_get_nth_slot(page, - page_dir_get_n_slots(page) - 1) - - d_stream->next_out); + ulint(page_dir_get_nth_slot(page, + page_dir_get_n_slots(page) + - 1U) + - d_stream->next_out)); } #ifdef UNIV_DEBUG @@ -2545,7 +2545,7 @@ page_zip_decompress_sec( d_stream->avail_out = static_cast( page_header_get_field(page_zip->data, PAGE_HEAP_TOP) - page_offset(d_stream->next_out)); - if (UNIV_UNLIKELY(d_stream->avail_out > UNIV_PAGE_SIZE + if (UNIV_UNLIKELY(d_stream->avail_out > srv_page_size - PAGE_ZIP_START - PAGE_DIR)) { page_zip_fail(("page_zip_decompress_sec:" @@ -2576,9 +2576,10 @@ zlib_done: /* Clear the unused heap space on the uncompressed page. */ memset(d_stream->next_out, 0, - page_dir_get_nth_slot(page, - page_dir_get_n_slots(page) - 1) - - d_stream->next_out); + ulint(page_dir_get_nth_slot(page, + page_dir_get_n_slots(page) + - 1U) + - d_stream->next_out)); } ut_d(page_zip->m_start = unsigned(PAGE_DATA + d_stream->total_in)); @@ -2764,7 +2765,7 @@ page_zip_decompress_clust( d_stream->avail_out =static_cast( rec - REC_N_NEW_EXTRA_BYTES - d_stream->next_out); - ut_ad(d_stream->avail_out < UNIV_PAGE_SIZE + ut_ad(d_stream->avail_out < srv_page_size - PAGE_ZIP_START - PAGE_DIR); err = inflate(d_stream, Z_SYNC_FLUSH); switch (err) { @@ -2874,7 +2875,7 @@ page_zip_decompress_clust( d_stream->avail_out = static_cast( page_header_get_field(page_zip->data, PAGE_HEAP_TOP) - page_offset(d_stream->next_out)); - if (UNIV_UNLIKELY(d_stream->avail_out > UNIV_PAGE_SIZE + if (UNIV_UNLIKELY(d_stream->avail_out > srv_page_size - PAGE_ZIP_START - PAGE_DIR)) { page_zip_fail(("page_zip_decompress_clust:" @@ -2905,9 +2906,10 @@ zlib_done: /* Clear the unused heap space on the uncompressed page. */ memset(d_stream->next_out, 0, - page_dir_get_nth_slot(page, - page_dir_get_n_slots(page) - 1) - - d_stream->next_out); + ulint(page_dir_get_nth_slot(page, + page_dir_get_n_slots(page) + - 1U) + - d_stream->next_out)); } ut_d(page_zip->m_start = unsigned(PAGE_DATA + d_stream->total_in)); @@ -2951,7 +2953,7 @@ zlib_done: ulint len; byte* dst; rec_t* rec = recs[slot]; - ibool exists = !page_zip_dir_find_free( + bool exists = !page_zip_dir_find_free( page_zip, page_offset(rec)); offsets = rec_get_offsets(rec, index, offsets, true, ULINT_UNDEFINED, &heap); @@ -3047,7 +3049,7 @@ page_zip_decompress_low( ulint* offsets; ut_ad(page_zip_simple_validate(page_zip)); - UNIV_MEM_ASSERT_W(page, UNIV_PAGE_SIZE); + UNIV_MEM_ASSERT_W(page, srv_page_size); UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip)); /* The dense directory excludes the infimum and supremum records. */ @@ -3060,7 +3062,7 @@ page_zip_decompress_low( return(FALSE); } - heap = mem_heap_create(n_dense * (3 * sizeof *recs) + UNIV_PAGE_SIZE); + heap = mem_heap_create(n_dense * (3 * sizeof *recs) + srv_page_size); recs = static_cast( mem_heap_alloc(heap, n_dense * sizeof *recs)); @@ -3092,9 +3094,9 @@ page_zip_decompress_low( #ifdef UNIV_ZIP_DEBUG /* Clear the uncompressed page, except the header. */ - memset(PAGE_DATA + page, 0x55, UNIV_PAGE_SIZE - PAGE_DATA); + memset(PAGE_DATA + page, 0x55, srv_page_size - PAGE_DATA); #endif /* UNIV_ZIP_DEBUG */ - UNIV_MEM_INVALID(PAGE_DATA + page, UNIV_PAGE_SIZE - PAGE_DATA); + UNIV_MEM_INVALID(PAGE_DATA + page, srv_page_size - PAGE_DATA); /* Copy the page directory. */ if (UNIV_UNLIKELY(!page_zip_dir_decode(page_zip, page, recs, @@ -3127,9 +3129,9 @@ zlib_error: d_stream.avail_in = static_cast( page_zip_get_size(page_zip) - (PAGE_DATA + 1)); d_stream.next_out = page + PAGE_ZIP_START; - d_stream.avail_out = uInt(UNIV_PAGE_SIZE - PAGE_ZIP_START); + d_stream.avail_out = uInt(srv_page_size - PAGE_ZIP_START); - if (UNIV_UNLIKELY(inflateInit2(&d_stream, UNIV_PAGE_SIZE_SHIFT) + if (UNIV_UNLIKELY(inflateInit2(&d_stream, srv_page_size_shift) != Z_OK)) { ut_error; } @@ -3224,7 +3226,7 @@ err_exit: } ut_a(page_is_comp(page)); - UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE); + UNIV_MEM_ASSERT_RW(page, srv_page_size); page_zip_fields_free(index); mem_heap_free(heap); @@ -3313,7 +3315,7 @@ page_zip_hexdump_func( #define page_zip_hexdump(buf, size) page_zip_hexdump_func(#buf, buf, size) /** Flag: make page_zip_validate() compare page headers only */ -ibool page_zip_validate_header_only = FALSE; +bool page_zip_validate_header_only; /**********************************************************************//** Check that the compressed and decompressed pages match. @@ -3340,7 +3342,7 @@ page_zip_validate_low( page_zip_fail(("page_zip_validate: page header\n")); page_zip_hexdump(page_zip, sizeof *page_zip); page_zip_hexdump(page_zip->data, page_zip_get_size(page_zip)); - page_zip_hexdump(page, UNIV_PAGE_SIZE); + page_zip_hexdump(page, srv_page_size); return(FALSE); } @@ -3351,11 +3353,12 @@ page_zip_validate_low( } /* page_zip_decompress() expects the uncompressed page to be - UNIV_PAGE_SIZE aligned. */ - temp_page_buf = static_cast(ut_malloc_nokey(2 * UNIV_PAGE_SIZE)); - temp_page = static_cast(ut_align(temp_page_buf, UNIV_PAGE_SIZE)); + srv_page_size aligned. */ + temp_page_buf = static_cast( + ut_malloc_nokey(2 << srv_page_size_shift)); + temp_page = static_cast(ut_align(temp_page_buf, srv_page_size)); - UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE); + UNIV_MEM_ASSERT_RW(page, srv_page_size); UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip)); temp_page_zip = *page_zip; @@ -3388,7 +3391,7 @@ page_zip_validate_low( valid = FALSE; } if (memcmp(page + PAGE_HEADER, temp_page + PAGE_HEADER, - UNIV_PAGE_SIZE - PAGE_HEADER - FIL_PAGE_DATA_END)) { + srv_page_size - PAGE_HEADER - FIL_PAGE_DATA_END)) { /* In crash recovery, the "minimum record" flag may be set incorrectly until the mini-transaction is @@ -3412,7 +3415,7 @@ page_zip_validate_low( if (!memcmp(page + PAGE_HEADER, temp_page + PAGE_HEADER, - UNIV_PAGE_SIZE - PAGE_HEADER + srv_page_size - PAGE_HEADER - FIL_PAGE_DATA_END)) { /* Only the minimum record flag @@ -3463,7 +3466,7 @@ page_zip_validate_low( page + PAGE_NEW_INFIMUM, TRUE); trec = page_rec_get_next_low( temp_page + PAGE_NEW_INFIMUM, TRUE); - ut_d(const bool is_leaf = page_is_leaf(page)); + const bool is_leaf = page_is_leaf(page); do { if (page_offset(rec) != page_offset(trec)) { @@ -3506,8 +3509,8 @@ func_exit: if (!valid) { page_zip_hexdump(page_zip, sizeof *page_zip); page_zip_hexdump(page_zip->data, page_zip_get_size(page_zip)); - page_zip_hexdump(page, UNIV_PAGE_SIZE); - page_zip_hexdump(temp_page, UNIV_PAGE_SIZE); + page_zip_hexdump(page, srv_page_size); + page_zip_hexdump(temp_page, srv_page_size); } ut_free(temp_page_buf); return(valid); @@ -3605,7 +3608,7 @@ page_zip_write_rec_ext( memmove(ext_end - n_ext * BTR_EXTERN_FIELD_REF_SIZE, ext_end, - externs - ext_end); + ulint(externs - ext_end)); } ut_a(blob_no + n_ext <= page_zip->n_blobs); @@ -3631,7 +3634,7 @@ page_zip_write_rec_ext( /* Log the preceding fields. */ ASSERT_ZERO(data, src - start); - memcpy(data, start, src - start); + memcpy(data, start, ulint(src - start)); data += src - start; start = src + (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN); @@ -3651,7 +3654,7 @@ page_zip_write_rec_ext( src += len - BTR_EXTERN_FIELD_REF_SIZE; ASSERT_ZERO(data, src - start); - memcpy(data, start, src - start); + memcpy(data, start, ulint(src - start)); data += src - start; start = src + BTR_EXTERN_FIELD_REF_SIZE; @@ -3663,7 +3666,7 @@ page_zip_write_rec_ext( } /* Log the last bytes of the record. */ - len = rec_offs_data_size(offsets) - (start - rec); + len = rec_offs_data_size(offsets) - ulint(start - rec); ASSERT_ZERO(data, len); memcpy(data, start, len); @@ -3723,7 +3726,7 @@ page_zip_write_rec( } ut_ad(rec_get_start((rec_t*) rec, offsets) >= page + PAGE_ZIP_START); - ut_ad(rec_get_end((rec_t*) rec, offsets) <= page + UNIV_PAGE_SIZE + ut_ad(rec_get_end((rec_t*) rec, offsets) <= page + srv_page_size - PAGE_DIR - PAGE_DIR_SLOT_SIZE * page_dir_get_n_slots(page)); @@ -3794,7 +3797,7 @@ page_zip_write_rec( /* Log the preceding fields. */ ASSERT_ZERO(data, src - rec); - memcpy(data, rec, src - rec); + memcpy(data, rec, ulint(src - rec)); data += src - rec; /* Store trx_id and roll_ptr. */ @@ -3808,7 +3811,7 @@ page_zip_write_rec( /* Log the last bytes of the record. */ len = rec_offs_data_size(offsets) - - (src - rec); + - ulint(src - rec); ASSERT_ZERO(data, len); memcpy(data, src, len); @@ -3889,8 +3892,8 @@ page_zip_parse_write_blob_ptr( z_offset = mach_read_from_2(ptr + 2); if (offset < PAGE_ZIP_START - || offset >= UNIV_PAGE_SIZE - || z_offset >= UNIV_PAGE_SIZE) { + || offset >= srv_page_size + || z_offset >= srv_page_size) { corrupt: recv_sys->found_corrupt_log = TRUE; @@ -3996,7 +3999,7 @@ page_zip_write_blob_ptr( (byte*) field, MLOG_ZIP_WRITE_BLOB_PTR, log_ptr, mtr); mach_write_to_2(log_ptr, page_offset(field)); log_ptr += 2; - mach_write_to_2(log_ptr, externs - page_zip->data); + mach_write_to_2(log_ptr, ulint(externs - page_zip->data)); log_ptr += 2; memcpy(log_ptr, externs, BTR_EXTERN_FIELD_REF_SIZE); log_ptr += BTR_EXTERN_FIELD_REF_SIZE; @@ -4031,8 +4034,8 @@ page_zip_parse_write_node_ptr( z_offset = mach_read_from_2(ptr + 2); if (offset < PAGE_ZIP_START - || offset >= UNIV_PAGE_SIZE - || z_offset >= UNIV_PAGE_SIZE) { + || offset >= srv_page_size + || z_offset >= srv_page_size) { corrupt: recv_sys->found_corrupt_log = TRUE; @@ -4059,7 +4062,7 @@ corrupt: storage_end = page_zip_dir_start(page_zip); - heap_no = 1 + (storage_end - storage) / REC_NODE_PTR_SIZE; + heap_no = 1 + ulint(storage_end - storage) / REC_NODE_PTR_SIZE; if (UNIV_UNLIKELY((storage_end - storage) % REC_NODE_PTR_SIZE) || UNIV_UNLIKELY(heap_no < PAGE_HEAP_NO_USER_LOW) @@ -4117,9 +4120,7 @@ page_zip_write_node_ptr( #if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG ut_a(!memcmp(storage, field, REC_NODE_PTR_SIZE)); #endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ -#if REC_NODE_PTR_SIZE != 4 -# error "REC_NODE_PTR_SIZE != 4" -#endif + compile_time_assert(REC_NODE_PTR_SIZE == 4); mach_write_to_4(field, ptr); memcpy(storage, field, REC_NODE_PTR_SIZE); @@ -4134,7 +4135,7 @@ page_zip_write_node_ptr( field, MLOG_ZIP_WRITE_NODE_PTR, log_ptr, mtr); mach_write_to_2(log_ptr, page_offset(field)); log_ptr += 2; - mach_write_to_2(log_ptr, storage - page_zip->data); + mach_write_to_2(log_ptr, ulint(storage - page_zip->data)); log_ptr += 2; memcpy(log_ptr, field, REC_NODE_PTR_SIZE); log_ptr += REC_NODE_PTR_SIZE; @@ -4185,9 +4186,7 @@ page_zip_write_trx_id_and_roll_ptr( - (rec_get_heap_no_new(rec) - 1) * (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN); -#if DATA_TRX_ID + 1 != DATA_ROLL_PTR -# error "DATA_TRX_ID + 1 != DATA_ROLL_PTR" -#endif + compile_time_assert(DATA_TRX_ID + 1 == DATA_ROLL_PTR); field = rec_get_nth_field(rec, offsets, trx_id_col, &len); ut_ad(len == DATA_TRX_ID_LEN); ut_ad(field + DATA_TRX_ID_LEN @@ -4196,13 +4195,9 @@ page_zip_write_trx_id_and_roll_ptr( #if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG ut_a(!memcmp(storage, field, DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN)); #endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ -#if DATA_TRX_ID_LEN != 6 -# error "DATA_TRX_ID_LEN != 6" -#endif + compile_time_assert(DATA_TRX_ID_LEN == 6); mach_write_to_6(field, trx_id); -#if DATA_ROLL_PTR_LEN != 7 -# error "DATA_ROLL_PTR_LEN != 7" -#endif + compile_time_assert(DATA_ROLL_PTR_LEN == 7); mach_write_to_7(field + DATA_TRX_ID_LEN, roll_ptr); memcpy(storage, field, DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN); @@ -4222,7 +4217,7 @@ page_zip_write_trx_id_and_roll_ptr( (byte*) field, MLOG_ZIP_WRITE_TRX_ID, log_ptr, mtr); mach_write_to_2(log_ptr, page_offset(field)); log_ptr += 2; - mach_write_to_2(log_ptr, storage - page_zip->data); + mach_write_to_2(log_ptr, ulint(storage - page_zip->data)); log_ptr += 2; memcpy(log_ptr, field, DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN); log_ptr += DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN; @@ -4254,8 +4249,8 @@ page_zip_parse_write_trx_id( uint z_offset = mach_read_from_2(ptr + 2); if (offset < PAGE_ZIP_START - || offset >= UNIV_PAGE_SIZE - || z_offset >= UNIV_PAGE_SIZE) { + || offset >= srv_page_size + || z_offset >= srv_page_size) { corrupt: recv_sys->found_corrupt_log = TRUE; @@ -4466,7 +4461,7 @@ page_zip_dir_insert( /* Read the old n_dense (n_heap may have been incremented). */ n_dense = page_dir_get_n_heap(page_zip->data) - - (PAGE_HEAP_NO_USER_LOW + 1); + - (PAGE_HEAP_NO_USER_LOW + 1U); if (UNIV_LIKELY_NULL(free_rec)) { /* The record was allocated from the free list. @@ -4493,7 +4488,7 @@ page_zip_dir_insert( /* Shift the dense directory to allocate place for rec. */ memmove(slot_free - PAGE_ZIP_DIR_SLOT_SIZE, slot_free, - slot_rec - slot_free); + ulint(slot_rec - slot_free)); /* Write the entry for the inserted record. The "owned" and "deleted" flags must be zero. */ @@ -4551,7 +4546,7 @@ page_zip_dir_delete( if (UNIV_LIKELY(slot_rec > slot_free)) { memmove(slot_free + PAGE_ZIP_DIR_SLOT_SIZE, slot_free, - slot_rec - slot_free); + ulint(slot_rec - slot_free)); } /* Write the entry for the deleted record. @@ -4564,7 +4559,7 @@ page_zip_dir_delete( } n_ext = rec_offs_n_extern(offsets); - if (UNIV_UNLIKELY(n_ext)) { + if (UNIV_UNLIKELY(n_ext != 0)) { /* Shift and zero fill the array of BLOB pointers. */ ulint blob_no; byte* externs; @@ -4584,7 +4579,7 @@ page_zip_dir_delete( page_zip->n_blobs -= static_cast(n_ext); /* Shift and zero fill the array. */ memmove(ext_end + n_ext * BTR_EXTERN_FIELD_REF_SIZE, ext_end, - (page_zip->n_blobs - blob_no) + ulint(page_zip->n_blobs - blob_no) * BTR_EXTERN_FIELD_REF_SIZE); memset(ext_end, 0, n_ext * BTR_EXTERN_FIELD_REF_SIZE); } @@ -4615,7 +4610,7 @@ page_zip_dir_add_slot( /* Read the old n_dense (n_heap has already been incremented). */ n_dense = page_dir_get_n_heap(page_zip->data) - - (PAGE_HEAP_NO_USER_LOW + 1); + - (PAGE_HEAP_NO_USER_LOW + 1U); dir = page_zip->data + page_zip_get_size(page_zip) - PAGE_ZIP_DIR_SLOT_SIZE * n_dense; @@ -4635,7 +4630,7 @@ page_zip_dir_add_slot( ASSERT_ZERO(externs - PAGE_ZIP_CLUST_LEAF_SLOT_SIZE, PAGE_ZIP_CLUST_LEAF_SLOT_SIZE); memmove(externs - PAGE_ZIP_CLUST_LEAF_SLOT_SIZE, - externs, stored - externs); + externs, ulint(stored - externs)); } else { stored = dir - page_zip->n_blobs * BTR_EXTERN_FIELD_REF_SIZE; @@ -4645,7 +4640,7 @@ page_zip_dir_add_slot( /* Move the uncompressed area backwards to make space for one directory slot. */ - memmove(stored - PAGE_ZIP_DIR_SLOT_SIZE, stored, dir - stored); + memmove(stored - PAGE_ZIP_DIR_SLOT_SIZE, stored, ulint(dir - stored)); } /***********************************************************//** @@ -4720,9 +4715,7 @@ page_zip_write_header_log( ut_ad(offset < PAGE_DATA); ut_ad(offset + length < PAGE_DATA); -#if PAGE_DATA > 255 -# error "PAGE_DATA > 255" -#endif + compile_time_assert(PAGE_DATA < 256U); ut_ad(length > 0); ut_ad(length < 256); @@ -4771,9 +4764,9 @@ page_zip_reorganize( ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); ut_ad(page_is_comp(page)); ut_ad(!dict_index_is_ibuf(index)); - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); /* Note that page_zip_validate(page_zip, page, index) may fail here. */ - UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE); + UNIV_MEM_ASSERT_RW(page, srv_page_size); UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip)); /* Disable logging */ @@ -4847,7 +4840,7 @@ page_zip_copy_recs( ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX)); ut_ad(mtr_memo_contains_page(mtr, src, MTR_MEMO_PAGE_X_FIX)); ut_ad(!dict_index_is_ibuf(index)); - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); #ifdef UNIV_ZIP_DEBUG /* The B-tree operations that call this function may set FIL_PAGE_PREV or PAGE_LEVEL, causing a temporary min_rec_flag @@ -4861,22 +4854,20 @@ page_zip_copy_recs( ut_a(dict_index_is_clust(index)); } - UNIV_MEM_ASSERT_W(page, UNIV_PAGE_SIZE); + UNIV_MEM_ASSERT_W(page, srv_page_size); UNIV_MEM_ASSERT_W(page_zip->data, page_zip_get_size(page_zip)); - UNIV_MEM_ASSERT_RW(src, UNIV_PAGE_SIZE); + UNIV_MEM_ASSERT_RW(src, srv_page_size); UNIV_MEM_ASSERT_RW(src_zip->data, page_zip_get_size(page_zip)); /* Copy those B-tree page header fields that are related to the records stored in the page. Also copy the field PAGE_MAX_TRX_ID. Skip the rest of the page header and trailer. On the compressed page, there is no trailer. */ -#if PAGE_MAX_TRX_ID + 8 != PAGE_HEADER_PRIV_END -# error "PAGE_MAX_TRX_ID + 8 != PAGE_HEADER_PRIV_END" -#endif + compile_time_assert(PAGE_MAX_TRX_ID + 8 == PAGE_HEADER_PRIV_END); memcpy(PAGE_HEADER + page, PAGE_HEADER + src, PAGE_HEADER_PRIV_END); memcpy(PAGE_DATA + page, PAGE_DATA + src, - UNIV_PAGE_SIZE - PAGE_DATA - FIL_PAGE_DATA_END); + srv_page_size - PAGE_DATA - FIL_PAGE_DATA_END); memcpy(PAGE_HEADER + page_zip->data, PAGE_HEADER + src_zip->data, PAGE_HEADER_PRIV_END); memcpy(PAGE_DATA + page_zip->data, PAGE_DATA + src_zip->data, @@ -5078,9 +5069,7 @@ page_zip_verify_checksum( (data) + FIL_PAGE_SPACE_ID); const page_id_t page_id(space_id, page_no); -#if FIL_PAGE_LSN % 8 -#error "FIL_PAGE_LSN must be 64 bit aligned" -#endif + compile_time_assert(!(FIL_PAGE_LSN % 8)); /* Check if page is empty */ if (stored == 0 diff --git a/storage/innobase/pars/pars0opt.cc b/storage/innobase/pars/pars0opt.cc index bbc8eec0774..0d60dc7bade 100644 --- a/storage/innobase/pars/pars0opt.cc +++ b/storage/innobase/pars/pars0opt.cc @@ -206,7 +206,7 @@ opt_look_for_col_in_comparison_before( if (opt_check_exp_determined_before(exp, sel_node, nth_table)) { - *op = search_cond->func; + *op = ulint(search_cond->func); return(exp); } @@ -225,7 +225,8 @@ opt_look_for_col_in_comparison_before( if (opt_check_exp_determined_before(exp, sel_node, nth_table)) { - *op = opt_invert_cmp_op(search_cond->func); + *op = ulint(opt_invert_cmp_op( + search_cond->func)); return(exp); } diff --git a/storage/innobase/pars/pars0pars.cc b/storage/innobase/pars/pars0pars.cc index fbe55d1df03..8a30e611eb4 100644 --- a/storage/innobase/pars/pars0pars.cc +++ b/storage/innobase/pars/pars0pars.cc @@ -1832,7 +1832,7 @@ pars_column_def( ulint len2; if (len) { - len2 = eval_node_get_int_val(len); + len2 = ulint(eval_node_get_int_val(len)); } else { len2 = 0; } @@ -2074,9 +2074,8 @@ pars_get_lex_chars( { int len; - len = static_cast( - pars_sym_tab_global->string_len - - pars_sym_tab_global->next_char_pos); + len = int(pars_sym_tab_global->string_len) + - pars_sym_tab_global->next_char_pos; if (len == 0) { return(0); } @@ -2085,8 +2084,8 @@ pars_get_lex_chars( len = max_size; } - ut_memcpy(buf, pars_sym_tab_global->sql_string - + pars_sym_tab_global->next_char_pos, len); + memcpy(buf, pars_sym_tab_global->sql_string + + pars_sym_tab_global->next_char_pos, ulint(len)); pars_sym_tab_global->next_char_pos += len; @@ -2345,7 +2344,7 @@ pars_info_add_int4_literal( /*=======================*/ pars_info_t* info, /*!< in: info struct */ const char* name, /*!< in: name */ - lint val) /*!< in: value */ + ulint val) /*!< in: value */ { byte* buf = static_cast(mem_heap_alloc(info->heap, 4)); diff --git a/storage/innobase/que/que0que.cc b/storage/innobase/que/que0que.cc index 937f215dc39..947b281e4b9 100644 --- a/storage/innobase/que/que0que.cc +++ b/storage/innobase/que/que0que.cc @@ -484,7 +484,7 @@ que_graph_free_recursive( if (upd->in_mysql_interface) { btr_pcur_free_for_mysql(upd->pcur); - upd->in_mysql_interface = FALSE; + upd->in_mysql_interface = false; } que_graph_free_recursive(upd->cascade_node); @@ -1005,11 +1005,6 @@ que_thr_step( } else if (type == QUE_NODE_FOR) { for_step(thr); } else if (type == QUE_NODE_PROC) { - - /* We can access trx->undo_no without reserving - trx->undo_mutex, because there cannot be active query - threads doing updating or inserting at the moment! */ - if (thr->prev_node == que_node_get_parent(node)) { trx->last_sql_stat_start.least_undo_no = trx->undo_no; diff --git a/storage/innobase/rem/rem0cmp.cc b/storage/innobase/rem/rem0cmp.cc index bfb9e95a5f8..34f71c86bbe 100644 --- a/storage/innobase/rem/rem0cmp.cc +++ b/storage/innobase/rem/rem0cmp.cc @@ -229,7 +229,6 @@ static int cmp_geometry_field( /*===============*/ - ulint mtype, /*!< in: main type */ ulint prtype, /*!< in: precise type */ const byte* a, /*!< in: data field */ unsigned int a_length, /*!< in: data field length, @@ -303,12 +302,10 @@ cmp_gis_field( not UNIV_SQL_NULL */ { if (mode == PAGE_CUR_MBR_EQUAL) { - /* TODO: Since the DATA_GEOMETRY is not used in compare - function, we could pass it instead of a specific type now */ - return(cmp_geometry_field(DATA_GEOMETRY, DATA_GIS_MBR, - a, a_length, b, b_length)); + return cmp_geometry_field(DATA_GIS_MBR, + a, a_length, b, b_length); } else { - return(rtree_key_cmp(mode, a, a_length, b, b_length)); + return rtree_key_cmp(mode, a, int(a_length), b, int(b_length)); } } @@ -379,8 +376,7 @@ cmp_whole_field( return(innobase_mysql_cmp(prtype, a, a_length, b, b_length)); case DATA_GEOMETRY: - return(cmp_geometry_field(mtype, prtype, a, a_length, b, - b_length)); + return cmp_geometry_field(prtype, a, a_length, b, b_length); default: ib::fatal() << "Unknown data type number " << mtype; } diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc index dcc435c091f..7d1a35d82e4 100644 --- a/storage/innobase/rem/rem0rec.cc +++ b/storage/innobase/rem/rem0rec.cc @@ -298,6 +298,7 @@ in ROW_FORMAT=COMPACT,DYNAMIC,COMPRESSED. This is a special case of rec_init_offsets() and rec_get_offsets_func(). @param[in] rec leaf-page record @param[in] index the index that the record belongs in +@param[in] n_core number of core fields (index->n_core_fields) @param[in,out] offsets offsets, with valid rec_offs_n_fields(offsets) @param[in] format record format */ static inline @@ -306,23 +307,25 @@ rec_init_offsets_comp_ordinary( const rec_t* rec, const dict_index_t* index, ulint* offsets, + ulint n_core, rec_leaf_format format) { ulint offs = 0; ulint any = 0; const byte* nulls = rec; const byte* lens = NULL; - ulint n_fields = index->n_core_fields; + ulint n_fields = n_core; ulint null_mask = 1; - ut_ad(index->n_core_fields > 0); - ut_ad(index->n_fields >= index->n_core_fields); + ut_ad(index->n_core_fields >= n_core); + ut_ad(n_core > 0); + ut_ad(index->n_fields >= n_core); ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable)); ut_ad(format == REC_LEAF_TEMP || format == REC_LEAF_TEMP_COLUMNS_ADDED || dict_table_is_comp(index->table)); ut_ad(format != REC_LEAF_TEMP_COLUMNS_ADDED || index->n_fields == rec_offs_n_fields(offsets)); - ut_d(ulint n_null); + ut_d(ulint n_null= 0); switch (format) { case REC_LEAF_TEMP: @@ -344,17 +347,17 @@ ordinary: /* We would have !index->is_instant() when rolling back an instant ADD COLUMN operation. */ nulls -= REC_N_NEW_EXTRA_BYTES; + ut_ad(index->is_instant()); /* fall through */ case REC_LEAF_TEMP_COLUMNS_ADDED: - ut_ad(index->is_instant()); - n_fields = index->n_core_fields + 1 - + rec_get_n_add_field(nulls); + n_fields = n_core + 1 + rec_get_n_add_field(nulls); ut_ad(n_fields <= index->n_fields); const ulint n_nullable = index->get_n_nullable(n_fields); const ulint n_null_bytes = UT_BITS_IN_BYTES(n_nullable); ut_d(n_null = n_nullable); ut_ad(n_null <= index->n_nullable); - ut_ad(n_null_bytes >= index->n_core_null_bytes); + ut_ad(n_null_bytes >= index->n_core_null_bytes + || n_core < index->n_core_fields); lens = --nulls - n_null_bytes; } @@ -448,7 +451,7 @@ resolved: } while (++i < rec_offs_n_fields(offsets)); *rec_offs_base(offsets) - = (rec - (lens + 1)) | REC_OFFS_COMPACT | any; + = ulint(rec - (lens + 1)) | REC_OFFS_COMPACT | any; } #ifdef UNIV_DEBUG @@ -594,7 +597,7 @@ rec_init_offsets( const byte* lens; dict_field_t* field; ulint null_mask; - ulint status = rec_get_status(rec); + rec_comp_status_t status = rec_get_status(rec); ulint n_node_ptr_field = ULINT_UNDEFINED; switch (UNIV_EXPECT(status, REC_STATUS_ORDINARY)) { @@ -614,11 +617,13 @@ rec_init_offsets( case REC_STATUS_COLUMNS_ADDED: ut_ad(leaf); rec_init_offsets_comp_ordinary(rec, index, offsets, + index->n_core_fields, REC_LEAF_COLUMNS_ADDED); return; case REC_STATUS_ORDINARY: ut_ad(leaf); rec_init_offsets_comp_ordinary(rec, index, offsets, + index->n_core_fields, REC_LEAF_ORDINARY); return; } @@ -710,7 +715,7 @@ resolved: } while (++i < rec_offs_n_fields(offsets)); *rec_offs_base(offsets) - = (rec - (lens + 1)) | REC_OFFS_COMPACT; + = ulint(rec - (lens + 1)) | REC_OFFS_COMPACT; } else { /* Old-style record: determine extra size and end offsets */ offs = REC_N_OLD_EXTRA_BYTES; @@ -910,7 +915,7 @@ rec_get_offsets_reverse( ut_ad(dict_table_is_comp(index->table)); ut_ad(!index->is_instant()); - if (UNIV_UNLIKELY(node_ptr)) { + if (UNIV_UNLIKELY(node_ptr != 0)) { n_node_ptr_field = dict_index_get_n_unique_in_tree_nonleaf(index); n = n_node_ptr_field + 1; @@ -996,7 +1001,7 @@ resolved: } while (++i < rec_offs_n_fields(offsets)); ut_ad(lens >= extra); - *rec_offs_base(offsets) = (lens - extra + REC_N_NEW_EXTRA_BYTES) + *rec_offs_base(offsets) = (ulint(lens - extra) + REC_N_NEW_EXTRA_BYTES) | REC_OFFS_COMPACT | any_ext; } @@ -1048,7 +1053,7 @@ rec_get_nth_field_offs_old( *len = next_os - os; - ut_ad(*len < UNIV_PAGE_SIZE); + ut_ad(*len < srv_page_size); return(os); } @@ -1447,10 +1452,10 @@ rec_convert_dtuple_to_rec_comp( byte* end; byte* nulls = temp ? rec - 1 : rec - (REC_N_NEW_EXTRA_BYTES + 1); - byte* lens; + byte* UNINIT_VAR(lens); ulint len; ulint i; - ulint n_node_ptr_field; + ulint UNINIT_VAR(n_node_ptr_field); ulint fixed_len; ulint null_mask = 1; @@ -1484,7 +1489,8 @@ rec_convert_dtuple_to_rec_comp( lens = nulls - (index->is_instant() ? UT_BITS_IN_BYTES(index->get_n_nullable( n_fields)) - : UT_BITS_IN_BYTES(index->n_nullable)); + : UT_BITS_IN_BYTES( + unsigned(index->n_nullable))); break; case REC_STATUS_NODE_PTR: ut_ad(!temp); @@ -1505,7 +1511,7 @@ rec_convert_dtuple_to_rec_comp( end = rec; /* clear the SQL-null flags */ - memset(lens + 1, 0, nulls - lens); + memset(lens + 1, 0, ulint(nulls - lens)); /* Store the data and the offsets */ @@ -1688,25 +1694,44 @@ rec_get_converted_size_temp( @param[in] rec temporary file record @param[in] index index of that the record belongs to @param[in,out] offsets offsets to the fields; in: rec_offs_n_fields(offsets) -@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED -*/ +@param[in] n_core number of core fields (index->n_core_fields) +@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED */ void rec_init_offsets_temp( const rec_t* rec, const dict_index_t* index, ulint* offsets, + ulint n_core, rec_comp_status_t status) { ut_ad(status == REC_STATUS_ORDINARY || status == REC_STATUS_COLUMNS_ADDED); - ut_ad(status == REC_STATUS_ORDINARY || index->is_instant()); - - rec_init_offsets_comp_ordinary(rec, index, offsets, + /* The table may have been converted to plain format + if it was emptied during an ALTER TABLE operation. */ + ut_ad(index->n_core_fields == n_core || !index->is_instant()); + ut_ad(index->n_core_fields >= n_core); + rec_init_offsets_comp_ordinary(rec, index, offsets, n_core, status == REC_STATUS_COLUMNS_ADDED ? REC_LEAF_TEMP_COLUMNS_ADDED : REC_LEAF_TEMP); } +/** Determine the offset to each field in temporary file. +@param[in] rec temporary file record +@param[in] index index of that the record belongs to +@param[in,out] offsets offsets to the fields; in: rec_offs_n_fields(offsets) +*/ +void +rec_init_offsets_temp( + const rec_t* rec, + const dict_index_t* index, + ulint* offsets) +{ + ut_ad(!index->is_instant()); + rec_init_offsets_comp_ordinary(rec, index, offsets, + index->n_core_fields, REC_LEAF_TEMP); +} + /** Convert a data tuple prefix to the temporary file format. @param[out] rec record in temporary file format @param[in] index clustered or secondary index @@ -1834,13 +1859,6 @@ rec_copy_prefix_to_buf( or NULL */ ulint* buf_size) /*!< in/out: buffer size */ { - const byte* nulls; - const byte* lens; - ulint i; - ulint prefix_len; - ulint null_mask; - bool is_rtr_node_ptr = false; - ut_ad(n_fields <= index->n_fields || dict_index_is_ibuf(index)); ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable)); UNIV_PREFETCH_RW(*buf); @@ -1853,48 +1871,62 @@ rec_copy_prefix_to_buf( buf, buf_size)); } + ulint prefix_len = 0; + ulint instant_omit = 0; + const byte* nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1); + const byte* nullf = nulls; + const byte* lens = nulls - index->n_core_null_bytes; + switch (rec_get_status(rec)) { - case REC_STATUS_INFIMUM: - case REC_STATUS_SUPREMUM: + default: /* infimum or supremum record: no sense to copy anything */ ut_error; return(NULL); case REC_STATUS_ORDINARY: ut_ad(n_fields <= index->n_core_fields); - nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1); - lens = nulls - index->n_core_null_bytes; break; case REC_STATUS_NODE_PTR: /* For R-tree, we need to copy the child page number field. */ + compile_time_assert(DICT_INDEX_SPATIAL_NODEPTR_SIZE == 1); if (dict_index_is_spatial(index)) { + ut_ad(index->n_core_null_bytes == 0); ut_ad(n_fields == DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1); - is_rtr_node_ptr = true; - } else { - /* it doesn't make sense to copy the child page number - field */ - ut_ad(n_fields <= - dict_index_get_n_unique_in_tree_nonleaf(index)); + ut_ad(index->fields[0].col->prtype & DATA_NOT_NULL); + ut_ad(DATA_BIG_COL(index->fields[0].col)); + /* This is a deficiency of the format introduced + in MySQL 5.7. The length in the R-tree index should + always be DATA_MBR_LEN. */ + ut_ad(!index->fields[0].fixed_len); + ut_ad(*lens == DATA_MBR_LEN); + lens--; + prefix_len = DATA_MBR_LEN + REC_NODE_PTR_SIZE; + n_fields = 0; /* skip the "for" loop below */ + break; } - nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1); - lens = nulls - index->n_core_null_bytes; + /* it doesn't make sense to copy the child page number field */ + ut_ad(n_fields + <= dict_index_get_n_unique_in_tree_nonleaf(index)); break; case REC_STATUS_COLUMNS_ADDED: /* We would have !index->is_instant() when rolling back an instant ADD COLUMN operation. */ ut_ad(index->is_instant() || page_rec_is_default_row(rec)); - nulls = &rec[-REC_N_NEW_EXTRA_BYTES]; - const ulint n_rec = index->n_core_fields + 1 + nulls++; + const ulint n_rec = ulint(index->n_core_fields) + 1 + rec_get_n_add_field(nulls); - const uint n_nullable = index->get_n_nullable(n_rec); - lens = --nulls - UT_BITS_IN_BYTES(n_nullable); + instant_omit = ulint(&rec[-REC_N_NEW_EXTRA_BYTES] - nulls); + ut_ad(instant_omit == 1 || instant_omit == 2); + nullf = nulls; + const uint nb = UT_BITS_IN_BYTES(index->get_n_nullable(n_rec)); + instant_omit += nb - index->n_core_null_bytes; + lens = --nulls - nb; } + const byte* const lenf = lens; UNIV_PREFETCH_R(lens); - prefix_len = 0; - null_mask = 1; /* read the lengths of fields 0..n */ - for (i = 0; i < n_fields; i++) { + for (ulint i = 0, null_mask = 1; i < n_fields; i++) { const dict_field_t* field; const dict_col_t* col; @@ -1916,11 +1948,7 @@ rec_copy_prefix_to_buf( null_mask <<= 1; } - if (is_rtr_node_ptr && i == 1) { - /* For rtree node ptr rec, we need to - copy the page no field with 4 bytes len. */ - prefix_len += 4; - } else if (field->fixed_len) { + if (field->fixed_len) { prefix_len += field->fixed_len; } else { ulint len = *lens--; @@ -1946,17 +1974,41 @@ rec_copy_prefix_to_buf( UNIV_PREFETCH_R(rec + prefix_len); - prefix_len += rec - (lens + 1); + ulint size = prefix_len + ulint(rec - (lens + 1)) - instant_omit; - if ((*buf == NULL) || (*buf_size < prefix_len)) { + if (*buf == NULL || *buf_size < size) { ut_free(*buf); - *buf_size = prefix_len; - *buf = static_cast(ut_malloc_nokey(prefix_len)); + *buf_size = size; + *buf = static_cast(ut_malloc_nokey(size)); } - memcpy(*buf, lens + 1, prefix_len); - - return(*buf + (rec - (lens + 1))); + if (instant_omit) { + /* Copy and convert the record header to a format where + instant ADD COLUMN has not been used: + + lengths of variable-length fields in the prefix + - omit any null flag bytes for any instantly added columns + + index->n_core_null_bytes of null flags + - omit the n_add_fields header (1 or 2 bytes) + + REC_N_NEW_EXTRA_BYTES of fixed header */ + byte* b = *buf; + /* copy the lengths of the variable-length fields */ + memcpy(b, lens + 1, ulint(lenf - lens)); + b += ulint(lenf - lens); + /* copy the null flags */ + memcpy(b, nullf - index->n_core_null_bytes, + index->n_core_null_bytes); + b += index->n_core_null_bytes + REC_N_NEW_EXTRA_BYTES; + ut_ad(ulint(b - *buf) + prefix_len == size); + /* copy the fixed-size header and the record prefix */ + memcpy(b - REC_N_NEW_EXTRA_BYTES, rec - REC_N_NEW_EXTRA_BYTES, + prefix_len + REC_N_NEW_EXTRA_BYTES); + ut_ad(rec_get_status(b) == REC_STATUS_COLUMNS_ADDED); + rec_set_status(b, REC_STATUS_ORDINARY); + return b; + } else { + memcpy(*buf, lens + 1, size); + return *buf + (rec - (lens + 1)); + } } /***************************************************************//** @@ -1984,7 +2036,7 @@ rec_validate_old( for (i = 0; i < n_fields; i++) { rec_get_nth_field_offs_old(rec, i, &len); - if (!((len < UNIV_PAGE_SIZE) || (len == UNIV_SQL_NULL))) { + if (!((len < srv_page_size) || (len == UNIV_SQL_NULL))) { ib::error() << "Record field " << i << " len " << len; return(FALSE); } @@ -2035,7 +2087,7 @@ rec_validate( switch (len) { default: - if (len >= UNIV_PAGE_SIZE) { + if (len >= srv_page_size) { ib::error() << "Record field " << i << " len " << len; return(FALSE); @@ -2129,7 +2181,7 @@ rec_print_comp( ulint i; for (i = 0; i < rec_offs_n_fields(offsets); i++) { - const byte* data; + const byte* UNINIT_VAR(data); ulint len; if (rec_offs_nth_default(offsets, i)) { diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc index e6ee5eb70f4..3ca93ba34b9 100644 --- a/storage/innobase/row/row0ftsort.cc +++ b/storage/innobase/row/row0ftsort.cc @@ -108,7 +108,7 @@ row_merge_create_fts_sort_index( ? DATA_VARCHAR : DATA_VARMYSQL; field->col->mbminlen = idx_field->col->mbminlen; field->col->mbmaxlen = idx_field->col->mbmaxlen; - field->col->len = HA_FT_MAXCHARLEN * field->col->mbmaxlen; + field->col->len = HA_FT_MAXCHARLEN * unsigned(field->col->mbmaxlen); field->fixed_len = 0; @@ -255,7 +255,7 @@ row_fts_psort_info_init( dup->index); if (row_merge_file_create(psort_info[j].merge_file[i], - path) < 0) { + path) == OS_FILE_CLOSED) { goto func_exit; } @@ -415,9 +415,9 @@ row_merge_fts_doc_add_word_for_parser( ut_ad(t_ctx); str.f_str = (byte*)(word); - str.f_len = word_len; + str.f_len = ulint(word_len); str.f_n_char = fts_get_token_size( - (CHARSET_INFO*)param->cs, word, word_len); + (CHARSET_INFO*)param->cs, word, ulint(word_len)); /* JAN: TODO: MySQL 5.7 FTS ut_ad(boolean_info->position >= 0); @@ -776,7 +776,7 @@ DECLARE_THREAD(fts_parallel_tokenization)( merge_file_t** merge_file; row_merge_block_t** block; row_merge_block_t** crypt_block; - int tmpfd[FTS_NUM_AUX_INDEX]; + pfs_os_file_t tmpfd[FTS_NUM_AUX_INDEX]; ulint mycount[FTS_NUM_AUX_INDEX]; ib_uint64_t total_rec = 0; ulint num_doc_processed = 0; @@ -1029,7 +1029,7 @@ exit: } tmpfd[i] = row_merge_file_create_low(path); - if (tmpfd[i] < 0) { + if (tmpfd[i] == OS_FILE_CLOSED) { error = DB_OUT_OF_MEMORY; goto func_exit; } @@ -1041,12 +1041,12 @@ exit: crypt_block[i], table->space->id); if (error != DB_SUCCESS) { - close(tmpfd[i]); + os_file_close(tmpfd[i]); goto func_exit; } total_rec += merge_file[i]->n_rec; - close(tmpfd[i]); + os_file_close(tmpfd[i]); } func_exit: @@ -1135,7 +1135,7 @@ row_fts_start_parallel_merge( /*=========================*/ fts_psort_t* merge_info) /*!< in: parallel sort info */ { - int i = 0; + ulint i = 0; /* Kick off merge/insert threads */ for (i = 0; i < FTS_NUM_AUX_INDEX; i++) { @@ -1375,10 +1375,10 @@ row_fts_insert_tuple( Propagate a newly added record up one level in the selection tree @return parent where this value propagated to */ static -int +ulint row_fts_sel_tree_propagate( /*=======================*/ - int propogated, /*(parent)); + return parent; } /*********************************************************************//** @@ -1437,8 +1437,8 @@ row_fts_sel_tree_update( ulint i; for (i = 1; i <= height; i++) { - propagated = static_cast(row_fts_sel_tree_propagate( - static_cast(propagated), sel_tree, mrec, offsets, index)); + propagated = row_fts_sel_tree_propagate( + propagated, sel_tree, mrec, offsets, index); } return(sel_tree[0]); @@ -1518,7 +1518,7 @@ row_fts_build_sel_tree( { ulint treelevel = 1; ulint num = 2; - int i = 0; + ulint i = 0; ulint start; /* No need to build selection tree if we only have two merge threads */ @@ -1533,13 +1533,13 @@ row_fts_build_sel_tree( start = (ulint(1) << treelevel) - 1; - for (i = 0; i < (int) fts_sort_pll_degree; i++) { - sel_tree[i + start] = i; + for (i = 0; i < fts_sort_pll_degree; i++) { + sel_tree[i + start] = int(i); } - for (i = static_cast(treelevel) - 1; i >= 0; i--) { + for (i = treelevel; --i; ) { row_fts_build_sel_tree_level( - sel_tree, static_cast(i), mrec, offsets, index); + sel_tree, i, mrec, offsets, index); } return(treelevel); @@ -1570,7 +1570,7 @@ row_fts_merge_insert( ib_alloc_t* heap_alloc; ulint i; mrec_buf_t** buf; - int* fd; + pfs_os_file_t* fd; byte** block; byte** crypt_block; const mrec_t** mrec; @@ -1579,7 +1579,7 @@ row_fts_merge_insert( ulint height; ulint start; fts_psort_insert_t ins_ctx; - ulint count_diag = 0; + uint64_t count_diag = 0; fts_table_t fts_table; char aux_table_name[MAX_FULL_NAME_LEN]; dict_table_t* aux_table; @@ -1612,7 +1612,7 @@ row_fts_merge_insert( heap, sizeof(*offsets) * fts_sort_pll_degree); buf = (mrec_buf_t**) mem_heap_alloc( heap, sizeof(*buf) * fts_sort_pll_degree); - fd = (int*) mem_heap_alloc(heap, sizeof(*fd) * fts_sort_pll_degree); + fd = (pfs_os_file_t*) mem_heap_alloc(heap, sizeof(*fd) * fts_sort_pll_degree); block = (byte**) mem_heap_alloc( heap, sizeof(*block) * fts_sort_pll_degree); crypt_block = (byte**) mem_heap_alloc( @@ -1645,7 +1645,7 @@ row_fts_merge_insert( buf[i] = static_cast( mem_heap_alloc(heap, sizeof *buf[i])); - count_diag += (int) psort_info[i].merge_file[id]->n_rec; + count_diag += psort_info[i].merge_file[id]->n_rec; } if (fts_enable_diag_print) { @@ -1737,7 +1737,7 @@ row_fts_merge_insert( height = row_fts_build_sel_tree(sel_tree, (const mrec_t **) mrec, offsets, index); - start = (1 << height) - 1; + start = (1U << height) - 1; /* Fetch sorted records from sort buffer and insert them into corresponding FTS index auxiliary tables */ diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 6ec06ab1933..4746eb0aab8 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2012, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2015, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -28,6 +28,7 @@ Created 2012-02-08 by Sunny Bains. #include "row0import.h" #include "btr0pcur.h" +#include "btr0sea.h" #include "que0que.h" #include "dict0boot.h" #include "ibuf0ibuf.h" @@ -404,7 +405,7 @@ public: Called for every page in the tablespace. If the page was not updated then its state must be set to BUF_PAGE_NOT_USED. For compressed tables the page descriptor memory will be at offset: - block->frame + UNIV_PAGE_SIZE; + block->frame + srv_page_size; @param offset - physical offset within the file @param block - block read from file, note it is not from the buffer pool @retval DB_SUCCESS or error code. */ @@ -573,7 +574,7 @@ AbstractCallback::init( ib::error() << "Page size " << m_page_size.physical() << " of ibd file is not the same as the server page" - " size " << univ_page_size.physical(); + " size " << srv_page_size; return(DB_CORRUPTION); @@ -886,17 +887,14 @@ private: /** Purge delete-marked records, only if it is possible to do so without re-organising the B+tree. - @param offsets current row offsets. @retval true if purged */ - bool purge(const ulint* offsets) UNIV_NOTHROW; + bool purge() UNIV_NOTHROW; /** Adjust the BLOB references and sys fields for the current record. - @param index the index being converted @param rec record to update @param offsets column offsets for the record @return DB_SUCCESS or error code. */ dberr_t adjust_cluster_record( - const dict_index_t* index, rec_t* rec, const ulint* offsets) UNIV_NOTHROW; @@ -1560,7 +1558,7 @@ PageConverter::adjust_cluster_index_blob_column( return(DB_CORRUPTION); } - field += BTR_EXTERN_SPACE_ID - BTR_EXTERN_FIELD_REF_SIZE + len; + field += len - (BTR_EXTERN_FIELD_REF_SIZE - BTR_EXTERN_SPACE_ID); mach_write_to_4(field, get_space_id()); @@ -1631,11 +1629,8 @@ PageConverter::adjust_cluster_index_blob_ref( /** Purge delete-marked records, only if it is possible to do so without re-organising the B+tree. -@param offsets current row offsets. @return true if purge succeeded */ -inline -bool -PageConverter::purge(const ulint* offsets) UNIV_NOTHROW +inline bool PageConverter::purge() UNIV_NOTHROW { const dict_index_t* index = m_index->m_srv_index; @@ -1659,7 +1654,6 @@ PageConverter::purge(const ulint* offsets) UNIV_NOTHROW inline dberr_t PageConverter::adjust_cluster_record( - const dict_index_t* index, rec_t* rec, const ulint* offsets) UNIV_NOTHROW { @@ -1705,12 +1699,9 @@ PageConverter::update_records( m_rec_iter.open(block); - if (!page_is_leaf(block->frame)) { - return DB_SUCCESS; - } - while (!m_rec_iter.end()) { rec_t* rec = m_rec_iter.current(); + ibool deleted = rec_get_deleted_flag(rec, comp); /* For the clustered index we have to adjust the BLOB @@ -1726,8 +1717,7 @@ PageConverter::update_records( if (clust_index) { - dberr_t err = adjust_cluster_record( - m_index->m_srv_index, rec, m_offsets); + dberr_t err = adjust_cluster_record(rec, m_offsets); if (err != DB_SUCCESS) { return(err); @@ -1741,7 +1731,7 @@ PageConverter::update_records( /* A successful purge will move the cursor to the next record. */ - if (!purge(m_offsets)) { + if (!purge()) { m_rec_iter.next(); } @@ -1839,7 +1829,7 @@ PageConverter::update_index_page( return(DB_SUCCESS); } - return(update_records(block)); + return page_is_leaf(block->frame) ? update_records(block) : DB_SUCCESS; } /** Validate the space flags and update tablespace header page. @@ -2108,8 +2098,6 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_import_adjust_root_pages_of_secondary_indexes( /*==============================================*/ - row_prebuilt_t* prebuilt, /*!< in/out: prebuilt from - handler */ trx_t* trx, /*!< in: transaction used for the import */ dict_table_t* table, /*!< in: table the indexes @@ -2367,8 +2355,7 @@ row_import_cfg_read_index_fields( /*=============================*/ FILE* file, /*!< in: file to write to */ THD* thd, /*!< in/out: session */ - row_index_t* index, /*!< Index being read in */ - row_import* cfg) /*!< in/out: meta-data read */ + row_index_t* index) /*!< Index being read in */ { byte row[sizeof(ib_uint32_t) * 3]; ulint n_fields = index->m_n_fields; @@ -2588,8 +2575,7 @@ row_import_read_index_data( return(err); } - err = row_import_cfg_read_index_fields( - file, thd, cfg_index, cfg); + err = row_import_cfg_read_index_fields(file, thd, cfg_index); if (err != DB_SUCCESS) { return(err); @@ -2919,14 +2905,14 @@ row_import_read_v1( const ulint logical_page_size = mach_read_from_4(ptr); ptr += sizeof(ib_uint32_t); - if (logical_page_size != univ_page_size.logical()) { + if (logical_page_size != srv_page_size) { ib_errf(thd, IB_LOG_LEVEL_ERROR, ER_TABLE_SCHEMA_MISMATCH, "Tablespace to be imported has a different" " page size than this server. Server page size" - " is " ULINTPF ", whereas tablespace page size" + " is %lu, whereas tablespace page size" " is " ULINTPF, - univ_page_size.logical(), + srv_page_size, logical_page_size); return(DB_ERROR); @@ -2965,7 +2951,6 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_import_read_meta_data( /*======================*/ - dict_table_t* table, /*!< in: table */ FILE* file, /*!< in: File to read from */ THD* thd, /*!< in: session */ row_import& cfg) /*!< out: contents of the .cfg file */ @@ -3039,7 +3024,7 @@ row_import_read_cfg( cfg.m_missing = false; - err = row_import_read_meta_data(table, file, thd, cfg); + err = row_import_read_meta_data(file, thd, cfg); fclose(file); } @@ -3598,8 +3583,8 @@ fil_tablespace_iterate( We allocate an extra page in case it is a compressed table. One page is to ensure alignement. */ - void* page_ptr = ut_malloc_nokey(3 * UNIV_PAGE_SIZE); - byte* page = static_cast(ut_align(page_ptr, UNIV_PAGE_SIZE)); + void* page_ptr = ut_malloc_nokey(3U << srv_page_size_shift); + byte* page = static_cast(ut_align(page_ptr, srv_page_size)); buf_block_t* block = reinterpret_cast (ut_zalloc_nokey(sizeof *block)); @@ -3615,7 +3600,7 @@ fil_tablespace_iterate( request.disable_partial_io_warnings(); err = os_file_read_no_error_handling(request, file, page, 0, - UNIV_PAGE_SIZE, 0); + srv_page_size, 0); if (err == DB_SUCCESS) { err = callback.init(file_size, block); @@ -3655,23 +3640,24 @@ fil_tablespace_iterate( /* Add an extra page for compressed page scratch area. */ void* io_buffer = ut_malloc_nokey( - (2 + iter.n_io_buffers) * UNIV_PAGE_SIZE); + (2 + iter.n_io_buffers) << srv_page_size_shift); iter.io_buffer = static_cast( - ut_align(io_buffer, UNIV_PAGE_SIZE)); + ut_align(io_buffer, srv_page_size)); void* crypt_io_buffer = NULL; if (iter.crypt_data) { crypt_io_buffer = ut_malloc_nokey( - (2 + iter.n_io_buffers) * UNIV_PAGE_SIZE); + (2 + iter.n_io_buffers) + << srv_page_size_shift); iter.crypt_io_buffer = static_cast( - ut_align(crypt_io_buffer, UNIV_PAGE_SIZE)); + ut_align(crypt_io_buffer, srv_page_size)); } if (block->page.zip.ssize) { ut_ad(iter.n_io_buffers == 1); block->frame = iter.io_buffer; - block->page.zip.data = block->frame + UNIV_PAGE_SIZE; + block->page.zip.data = block->frame + srv_page_size; } err = fil_iterate(iter, block, callback); @@ -3723,7 +3709,7 @@ row_import_for_mysql( /* The caller assured that this is not read_only_mode and that no temorary tablespace is being imported. */ ut_ad(!srv_read_only_mode); - ut_ad(!dict_table_is_temporary(table)); + ut_ad(!table->is_temporary()); ut_ad(table->space_id); ut_ad(table->space_id < SRV_LOG_SPACE_FIRST_ID); @@ -3752,8 +3738,6 @@ row_import_for_mysql( /* Assign an undo segment for the transaction, so that the transaction will be recovered after a crash. */ - mutex_enter(&trx->undo_mutex); - /* TODO: Do not write any undo log for the IMPORT cleanup. */ { mtr_t mtr; @@ -3762,8 +3746,6 @@ row_import_for_mysql( mtr.commit(); } - mutex_exit(&trx->undo_mutex); - DBUG_EXECUTE_IF("ib_import_undo_assign_failure", err = DB_TOO_MANY_CONCURRENT_TRXS;); @@ -3889,6 +3871,17 @@ row_import_for_mysql( return(row_import_cleanup(prebuilt, trx, err)); } + /* On DISCARD TABLESPACE, we did not drop any adaptive hash + index entries. If we replaced the discarded tablespace with a + smaller one here, there could still be some adaptive hash + index entries that point to cached garbage pages in the buffer + pool, because PageConverter::operator() only evicted those + pages that were replaced by the imported pages. We must + discard all remaining adaptive hash index entries, because the + adaptive hash index must be a subset of the table contents; + false positives are not tolerated. */ + buf_LRU_drop_page_hash_for_tablespace(table); + row_mysql_lock_data_dictionary(trx); /* If the table is stored in a remote tablespace, we need to @@ -4001,7 +3994,7 @@ row_import_for_mysql( during the page conversion phase. */ err = row_import_adjust_root_pages_of_secondary_indexes( - prebuilt, trx, table, cfg); + trx, table, cfg); DBUG_EXECUTE_IF("ib_import_sec_root_adjust_failure", err = DB_CORRUPTION;); diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index c247c66bd96..97b79b705b2 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -359,7 +359,8 @@ row_ins_clust_index_entry_by_modify( cursor->index, entry, rec, NULL, true, thr_get_trx(thr), heap, mysql_table); if (mode != BTR_MODIFY_TREE) { - ut_ad((mode & ~BTR_ALREADY_S_LATCHED) == BTR_MODIFY_LEAF); + ut_ad((mode & ulint(~BTR_ALREADY_S_LATCHED)) + == BTR_MODIFY_LEAF); /* Try optimistic updating of the record, keeping changes within the page */ @@ -640,7 +641,7 @@ row_ins_cascade_calc_update_vec( && dict_table_is_fts_column( table->fts->indexes, dict_col_get_no(col), - dict_col_is_virtual(col)) + col->is_virtual()) != ULINT_UNDEFINED) { affects_fulltext = true; } @@ -914,8 +915,7 @@ row_ins_invalidate_query_cache( const char* name) /*!< in: table name prefixed with database name and a '/' character */ { - ulint len = strlen(name) + 1; - innobase_invalidate_query_cache(thr_get_trx(thr), name, len); + innobase_invalidate_query_cache(thr_get_trx(thr), name); } @@ -1300,9 +1300,9 @@ row_ins_foreign_check_on_constraint( if (!affects_fulltext && table->fts && dict_table_is_fts_column( table->fts->indexes, - dict_index_get_nth_col_no(index, i), - dict_col_is_virtual( - dict_index_get_nth_col(index, i))) + dict_index_get_nth_col(index, i)->ind, + dict_index_get_nth_col(index, i) + ->is_virtual()) != ULINT_UNDEFINED) { affects_fulltext = true; } @@ -1329,9 +1329,8 @@ row_ins_foreign_check_on_constraint( for (ulint i = 0; i < foreign->n_fields; i++) { if (dict_table_is_fts_column( table->fts->indexes, - dict_index_get_nth_col_no(index, i), - dict_col_is_virtual( - dict_index_get_nth_col(index, i))) + dict_index_get_nth_col(index, i)->ind, + dict_index_get_nth_col(index, i)->is_virtual()) != ULINT_UNDEFINED) { affects_fulltext = true; break; @@ -2577,7 +2576,7 @@ row_ins_clust_index_entry_low( mtr_start(&mtr); - if (dict_table_is_temporary(index->table)) { + if (index->table->is_temporary()) { /* Disable REDO logging as the lifetime of temp-tables is limited to server or connection lifetime and so REDO information is not needed on restart for recovery. @@ -2638,7 +2637,7 @@ row_ins_clust_index_entry_low( } #endif /* UNIV_DEBUG */ - if (UNIV_UNLIKELY(entry->info_bits)) { + if (UNIV_UNLIKELY(entry->info_bits != 0)) { ut_ad(entry->info_bits == REC_INFO_DEFAULT_ROW); ut_ad(flags == BTR_NO_LOCKING_FLAG); ut_ad(index->is_instant()); @@ -2739,7 +2738,7 @@ do_insert: rec_t* insert_rec; if (mode != BTR_MODIFY_TREE) { - ut_ad((mode & ~BTR_ALREADY_S_LATCHED) + ut_ad((mode & ulint(~BTR_ALREADY_S_LATCHED)) == BTR_MODIFY_LEAF); err = btr_cur_optimistic_insert( flags, cursor, &offsets, &offsets_heap, @@ -3132,7 +3131,7 @@ row_ins_sec_index_entry_low( if (err == DB_SUCCESS && dict_index_is_spatial(index) && rtr_info.mbr_adj) { - err = rtr_ins_enlarge_mbr(&cursor, thr, &mtr); + err = rtr_ins_enlarge_mbr(&cursor, &mtr); } } else { rec_t* insert_rec; @@ -3146,7 +3145,7 @@ row_ins_sec_index_entry_low( if (err == DB_SUCCESS && dict_index_is_spatial(index) && rtr_info.mbr_adj) { - err = rtr_ins_enlarge_mbr(&cursor, thr, &mtr); + err = rtr_ins_enlarge_mbr(&cursor, &mtr); } } else { ut_ad(mode == BTR_MODIFY_TREE); @@ -3171,7 +3170,7 @@ row_ins_sec_index_entry_low( if (err == DB_SUCCESS && dict_index_is_spatial(index) && rtr_info.mbr_adj) { - err = rtr_ins_enlarge_mbr(&cursor, thr, &mtr); + err = rtr_ins_enlarge_mbr(&cursor, &mtr); } } @@ -3228,7 +3227,7 @@ row_ins_clust_index_entry( n_uniq = dict_index_is_unique(index) ? index->n_uniq : 0; ulint flags = index->table->no_rollback() ? BTR_NO_ROLLBACK - : dict_table_is_temporary(index->table) + : index->table->is_temporary() ? BTR_NO_LOCKING_FLAG : 0; const ulint orig_n_fields = entry->n_fields; @@ -3314,7 +3313,7 @@ row_ins_sec_index_entry( /* Try first optimistic descent to the B-tree */ log_free_check(); - ulint flags = dict_table_is_temporary(index->table) + ulint flags = index->table->is_temporary() ? BTR_NO_LOCKING_FLAG : 0; @@ -3446,7 +3445,7 @@ row_ins_index_entry_set_vals( col = ind_field->col; } - if (dict_col_is_virtual(col)) { + if (col->is_virtual()) { const dict_v_col_t* v_col = reinterpret_cast(col); ut_ad(dtuple_get_n_fields(row) @@ -3754,7 +3753,7 @@ row_ins( } } - if (node->duplicate && dict_table_is_temporary(node->table)) { + if (node->duplicate && node->table->is_temporary()) { ut_ad(thr_get_trx(thr)->error_state == DB_DUPLICATE_KEY); /* For TEMPORARY TABLE, we won't lock anything, @@ -3774,8 +3773,7 @@ row_ins( node->index = NULL; node->entry = NULL; break;); /* Skip corrupted secondary index and its entry */ - while (node->index && dict_index_is_corrupted(node->index)) { - + while (node->index && node->index->is_corrupted()) { node->index = dict_table_get_next_index(node->index); node->entry = UT_LIST_GET_NEXT(tuple_list, node->entry); } diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc index 82f2b740871..bf20debbef6 100644 --- a/storage/innobase/row/row0log.cc +++ b/storage/innobase/row/row0log.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -171,7 +171,7 @@ When head.blocks == tail.blocks, the reader will access tail.block directly. When also head.bytes == tail.bytes, both counts will be reset to 0 and the file will be truncated. */ struct row_log_t { - int fd; /*!< file descriptor */ + pfs_os_file_t fd; /*!< file descriptor */ ib_mutex_t mutex; /*!< mutex protecting error, max_trx and tail */ page_no_map* blobs; /*!< map of page numbers of off-page columns @@ -183,8 +183,9 @@ struct row_log_t { index that is being created online */ bool same_pk;/*!< whether the definition of the PRIMARY KEY has remained the same */ - const dtuple_t* add_cols; - /*!< default values of added columns, or NULL */ + const dtuple_t* defaults; + /*!< default values of added, changed columns, + or NULL */ const ulint* col_map;/*!< mapping of old column numbers to new ones, or NULL if !table */ dberr_t error; /*!< error that occurred during online @@ -220,24 +221,42 @@ struct row_log_t { decryption or NULL */ const char* path; /*!< where to create temporary file during log operation */ + /** the number of core fields in the clustered index of the + source table; before row_log_table_apply() completes, the + table could be emptied, so that table->is_instant() no longer holds, + but all log records must be in the "instant" format. */ + unsigned n_core_fields; + bool ignore; /*!< Whether the alter ignore is being used; + if not, NULL values will not be converted to + defaults */ + + /** Determine whether the log should be in the 'instant ADD' format + @param[in] index the clustered index of the source table + @return whether to use the 'instant ADD COLUMN' format */ + bool is_instant(const dict_index_t* index) const + { + ut_ad(table); + ut_ad(n_core_fields <= index->n_fields); + return n_core_fields != index->n_fields; + } }; /** Create the file or online log if it does not exist. @param[in,out] log online rebuild log @return true if success, false if not */ static MY_ATTRIBUTE((warn_unused_result)) -int +pfs_os_file_t row_log_tmpfile( row_log_t* log) { DBUG_ENTER("row_log_tmpfile"); - if (log->fd < 0) { + if (log->fd == OS_FILE_CLOSED) { log->fd = row_merge_file_create_low(log->path); DBUG_EXECUTE_IF("row_log_tmpfile_fail", - if (log->fd > 0) + if (log->fd != OS_FILE_CLOSED) row_merge_file_destroy_low(log->fd); - log->fd = -1;); - if (log->fd >= 0) { + log->fd = OS_FILE_CLOSED;); + if (log->fd != OS_FILE_CLOSED) { MONITOR_ATOMIC_INC(MONITOR_ALTER_TABLE_LOG_FILES); } } @@ -309,7 +328,7 @@ row_log_online_op( ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_S) || rw_lock_own(dict_index_get_lock(index), RW_LOCK_X)); - if (dict_index_is_corrupted(index)) { + if (index->is_corrupted()) { return; } @@ -392,7 +411,7 @@ row_log_online_op( UNIV_MEM_ASSERT_RW(buf, srv_sort_buf_size); - if (row_log_tmpfile(log) < 0) { + if (row_log_tmpfile(log) == OS_FILE_CLOSED) { log->error = DB_OUT_OF_MEMORY; goto err_exit; } @@ -413,7 +432,7 @@ row_log_online_op( } log->tail.blocks++; - if (!os_file_write_int_fd( + if (!os_file_write( request, "(modification log)", log->fd, @@ -485,6 +504,8 @@ err_exit: *avail = srv_sort_buf_size - log->tail.bytes; if (size > *avail) { + /* Make sure log->tail.buf is large enough */ + ut_ad(size <= sizeof log->tail.buf); return(log->tail.buf); } else { return(log->tail.block + log->tail.bytes); @@ -528,7 +549,7 @@ row_log_table_close_func( UNIV_MEM_ASSERT_RW(buf, srv_sort_buf_size); - if (row_log_tmpfile(log) < 0) { + if (row_log_tmpfile(log) == OS_FILE_CLOSED) { log->error = DB_OUT_OF_MEMORY; goto err_exit; } @@ -549,7 +570,7 @@ row_log_table_close_func( } log->tail.blocks++; - if (!os_file_write_int_fd( + if (!os_file_write( request, "(modification log)", log->fd, @@ -614,12 +635,10 @@ row_log_table_delete( { ulint old_pk_extra_size; ulint old_pk_size; - ulint ext_size = 0; ulint mrec_size; ulint avail_size; mem_heap_t* heap = NULL; const dtuple_t* old_pk; - row_ext_t* ext; ut_ad(dict_index_is_clust(index)); ut_ad(rec_offs_validate(rec, index, offsets)); @@ -629,8 +648,8 @@ row_log_table_delete( &index->lock, RW_LOCK_FLAG_S | RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX)); - if (dict_index_is_corrupted(index) - || !dict_index_is_online_ddl(index) + if (index->online_status != ONLINE_INDEX_CREATION + || (index->type & DICT_CORRUPT) || index->table->corrupted || index->online_log->error != DB_SUCCESS) { return; } @@ -651,8 +670,9 @@ row_log_table_delete( fields of the record. */ heap = mem_heap_create( DATA_TRX_ID_LEN - + DTUPLE_EST_ALLOC(new_index->n_uniq + 2)); - old_pk = tuple = dtuple_create(heap, new_index->n_uniq + 2); + + DTUPLE_EST_ALLOC(unsigned(new_index->n_uniq) + 2)); + old_pk = tuple = dtuple_create( + heap, unsigned(new_index->n_uniq) + 2); dict_index_copy_types(tuple, new_index, tuple->n_fields); dtuple_set_n_fields_cmp(tuple, new_index->n_uniq); @@ -711,72 +731,20 @@ row_log_table_delete( &old_pk_extra_size); ut_ad(old_pk_extra_size < 0x100); - mrec_size = 6 + old_pk_size; - - /* Log enough prefix of the BLOB unless both the - old and new table are in COMPACT or REDUNDANT format, - which store the prefix in the clustered index record. */ - if (rec_offs_any_extern(offsets) - && (dict_table_has_atomic_blobs(index->table) - || dict_table_has_atomic_blobs(new_table))) { - - /* Build a cache of those off-page column prefixes - that are referenced by secondary indexes. It can be - that none of the off-page columns are needed. */ - row_build(ROW_COPY_DATA, index, rec, - offsets, NULL, NULL, NULL, &ext, heap); - if (ext) { - /* Log the row_ext_t, ext->ext and ext->buf */ - ext_size = ext->n_ext * ext->max_len - + sizeof(*ext) - + ext->n_ext * sizeof(ulint) - + (ext->n_ext - 1) * sizeof ext->len; - mrec_size += ext_size; - } - } + /* 2 = 1 (extra_size) + at least 1 byte payload */ + mrec_size = 2 + old_pk_size; if (byte* b = row_log_table_open(index->online_log, mrec_size, &avail_size)) { *b++ = ROW_T_DELETE; *b++ = static_cast(old_pk_extra_size); - /* Log the size of external prefix we saved */ - mach_write_to_4(b, ext_size); - b += 4; - rec_convert_dtuple_to_temp( b + old_pk_extra_size, new_index, old_pk->fields, old_pk->n_fields); b += old_pk_size; - if (ext_size) { - ulint cur_ext_size = sizeof(*ext) - + (ext->n_ext - 1) * sizeof ext->len; - - memcpy(b, ext, cur_ext_size); - b += cur_ext_size; - - /* Check if we need to col_map to adjust the column - number. If columns were added/removed/reordered, - adjust the column number. */ - if (const ulint* col_map = - index->online_log->col_map) { - for (ulint i = 0; i < ext->n_ext; i++) { - const_cast(ext->ext[i]) = - col_map[ext->ext[i]]; - } - } - - memcpy(b, ext->ext, ext->n_ext * sizeof(*ext->ext)); - b += ext->n_ext * sizeof(*ext->ext); - - ext_size -= cur_ext_size - + ext->n_ext * sizeof(*ext->ext); - memcpy(b, ext->buf, ext_size); - b += ext_size; - } - row_log_table_close(index, b, mrec_size, avail_size); } @@ -867,12 +835,13 @@ row_log_table_low_redundant( DATA_ROLL_PTR_LEN); } - rec_comp_status_t status = index->is_instant() + const bool is_instant = index->online_log->is_instant(index); + rec_comp_status_t status = is_instant ? REC_STATUS_COLUMNS_ADDED : REC_STATUS_ORDINARY; size = rec_get_converted_size_temp( index, tuple->fields, tuple->n_fields, &extra_size, status); - if (index->is_instant()) { + if (is_instant) { size++; extra_size++; } @@ -923,8 +892,8 @@ row_log_table_low_redundant( } if (status == REC_STATUS_COLUMNS_ADDED) { - ut_ad(index->is_instant()); - if (n_fields <= index->n_core_fields) { + ut_ad(is_instant); + if (n_fields <= index->online_log->n_core_fields) { status = REC_STATUS_ORDINARY; } *b = status; @@ -995,8 +964,8 @@ row_log_table_low( ut_ad(!old_pk || !insert); ut_ad(!old_pk || old_pk->n_v_fields == 0); - if (dict_index_is_corrupted(index) - || !dict_index_is_online_ddl(index) + if (index->online_status != ONLINE_INDEX_CREATION + || (index->type & DICT_CORRUPT) || index->table->corrupted || index->online_log->error != DB_SUCCESS) { return; } @@ -1014,11 +983,12 @@ row_log_table_low( const ulint omit_size = REC_N_NEW_EXTRA_BYTES; const ulint rec_extra_size = rec_offs_extra_size(offsets) - omit_size; - extra_size = rec_extra_size + index->is_instant(); + const bool is_instant = index->online_log->is_instant(index); + extra_size = rec_extra_size + is_instant; mrec_size = ROW_LOG_HEADER_SIZE + (extra_size >= 0x80) + rec_offs_size(offsets) - omit_size - + index->is_instant(); + + is_instant; if (insert || index->online_log->same_pk) { ut_ad(!old_pk); @@ -1063,7 +1033,7 @@ row_log_table_low( *b++ = static_cast(extra_size); } - if (index->is_instant()) { + if (is_instant) { *b++ = rec_get_status(rec); } else { ut_ad(rec_get_status(rec) == REC_STATUS_ORDINARY); @@ -1127,7 +1097,6 @@ row_log_table_get_pk_old_col( } /** Maps an old table column of a PRIMARY KEY column. -@param[in] col old table column (before ALTER TABLE) @param[in] ifield clustered index field in the new table (after ALTER TABLE) @param[in,out] dfield clustered index tuple field in the new table @@ -1143,7 +1112,6 @@ table static dberr_t row_log_table_get_pk_col( - const dict_col_t* col, const dict_field_t* ifield, dfield_t* dfield, mem_heap_t* heap, @@ -1151,7 +1119,9 @@ row_log_table_get_pk_col( const ulint* offsets, ulint i, const page_size_t& page_size, - ulint max_len) + ulint max_len, + bool ignore, + const dtuple_t* defaults) { const byte* field; ulint len; @@ -1159,7 +1129,12 @@ row_log_table_get_pk_col( field = rec_get_nth_field(rec, offsets, i, &len); if (len == UNIV_SQL_NULL) { - return(DB_INVALID_NULL); + if (!ignore || !defaults->fields[i].data) { + return(DB_INVALID_NULL); + } + + field = static_cast(defaults->fields[i].data); + len = defaults->fields[i].len; } if (rec_offs_nth_extern(offsets, i)) { @@ -1271,7 +1246,7 @@ row_log_table_get_pk( if (!offsets) { size += (1 + REC_OFFS_HEADER_SIZE - + index->n_fields) + + unsigned(index->n_fields)) * sizeof *offsets; } @@ -1322,8 +1297,9 @@ row_log_table_get_pk( } log->error = row_log_table_get_pk_col( - col, ifield, dfield, *heap, - rec, offsets, i, page_size, max_len); + ifield, dfield, *heap, + rec, offsets, i, page_size, max_len, + log->ignore, log->defaults); if (log->error != DB_SUCCESS) { err_exit: @@ -1338,10 +1314,10 @@ err_exit: /* No matching column was found in the old table, so this must be an added column. Copy the default value. */ - ut_ad(log->add_cols); + ut_ad(log->defaults); dfield_copy(dfield, dtuple_get_nth_field( - log->add_cols, col_no)); + log->defaults, col_no)); mbminlen = dfield->type.mbminlen; mbmaxlen = dfield->type.mbmaxlen; prtype = dfield->type.prtype; @@ -1512,8 +1488,8 @@ row_log_table_apply_convert_mrec( *error = DB_SUCCESS; /* This is based on row_build(). */ - if (log->add_cols) { - row = dtuple_copy(log->add_cols, heap); + if (log->defaults) { + row = dtuple_copy(log->defaults, heap); /* dict_table_copy_types() would set the fields to NULL */ for (ulint i = 0; i < dict_table_get_n_cols(log->table); i++) { dict_col_copy_type( @@ -1634,9 +1610,17 @@ blob_done: if ((new_col->prtype & DATA_NOT_NULL) && dfield_is_null(dfield)) { - /* We got a NULL value for a NOT NULL column. */ - *error = DB_INVALID_NULL; - return(NULL); + + const dfield_t& default_field + = log->defaults->fields[col_no]; + + if (!log->ignore || !default_field.data) { + /* We got a NULL value for a NOT NULL column. */ + *error = DB_INVALID_NULL; + return NULL; + } + + *dfield = default_field; } /* Adjust the DATA_NOT_NULL flag in the parsed row. */ @@ -1778,15 +1762,13 @@ row_log_table_apply_insert( /******************************************************//** Deletes a record from a table that is being rebuilt. @return DB_SUCCESS or error code */ -static MY_ATTRIBUTE((warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_log_table_apply_delete_low( /*===========================*/ btr_pcur_t* pcur, /*!< in/out: B-tree cursor, will be trashed */ const ulint* offsets, /*!< in: offsets on pcur */ - const row_ext_t* save_ext, /*!< in: saved external field - info, or NULL */ mem_heap_t* heap, /*!< in/out: memory heap */ mtr_t* mtr) /*!< in/out: mini-transaction, will be committed */ @@ -1807,12 +1789,7 @@ row_log_table_apply_delete_low( /* Build a row template for purging secondary index entries. */ row = row_build( ROW_COPY_DATA, index, btr_pcur_get_rec(pcur), - offsets, NULL, NULL, NULL, - save_ext ? NULL : &ext, heap); - - if (!save_ext) { - save_ext = ext; - } + offsets, NULL, NULL, NULL, &ext, heap); } else { row = NULL; } @@ -1831,7 +1808,7 @@ row_log_table_apply_delete_low( } const dtuple_t* entry = row_build_index_entry( - row, save_ext, index, heap); + row, ext, index, heap); mtr->start(); index->set_modified(*mtr); btr_pcur_open(index, entry, PAGE_CUR_LE, @@ -1876,11 +1853,10 @@ flag_ok: /******************************************************//** Replays a delete operation on a table that was rebuilt. @return DB_SUCCESS or error code */ -static MY_ATTRIBUTE((nonnull(1, 3, 4, 5, 6, 7), warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_log_table_apply_delete( /*=======================*/ - que_thr_t* thr, /*!< in: query graph */ ulint trx_id_col, /*!< in: position of DB_TRX_ID in the new clustered index */ @@ -1889,10 +1865,7 @@ row_log_table_apply_delete( mem_heap_t* offsets_heap, /*!< in/out: memory heap that can be emptied */ mem_heap_t* heap, /*!< in/out: memory heap */ - const row_log_t* log, /*!< in: online log */ - const row_ext_t* save_ext, /*!< in: saved external field - info, or NULL */ - ulint ext_size) /*!< in: external field size */ + const row_log_t* log) /*!< in: online log */ { dict_table_t* new_table = log->table; dict_index_t* index = dict_table_get_first_index(new_table); @@ -1996,9 +1969,7 @@ all_done: } } - return(row_log_table_apply_delete_low(&pcur, - offsets, save_ext, - heap, &mtr)); + return row_log_table_apply_delete_low(&pcur, offsets, heap, &mtr); } /******************************************************//** @@ -2209,7 +2180,7 @@ func_exit_committed: /* Some BLOBs are missing, so we are interpreting this ROW_T_UPDATE as ROW_T_DELETE (see *1). */ error = row_log_table_apply_delete_low( - &pcur, cur_offsets, NULL, heap, &mtr); + &pcur, cur_offsets, heap, &mtr); goto func_exit_committed; } @@ -2247,7 +2218,7 @@ func_exit_committed: } error = row_log_table_apply_delete_low( - &pcur, cur_offsets, NULL, heap, &mtr); + &pcur, cur_offsets, heap, &mtr); ut_ad(mtr.has_committed()); if (error == DB_SUCCESS) { @@ -2393,8 +2364,6 @@ row_log_table_apply_op( ulint extra_size; const mrec_t* next_mrec; dtuple_t* old_pk; - row_ext_t* ext; - ulint ext_size; ut_ad(dict_index_is_clust(dup->index)); ut_ad(dup->index->table != log->table); @@ -2402,11 +2371,12 @@ row_log_table_apply_op( *error = DB_SUCCESS; - /* 3 = 1 (op type) + 1 (ext_size) + at least 1 byte payload */ + /* 3 = 1 (op type) + 1 (extra_size) + at least 1 byte payload */ if (mrec + 3 >= mrec_end) { return(NULL); } + const bool is_instant = log->is_instant(dup->index); const mrec_t* const mrec_start = mrec; switch (*mrec++) { @@ -2426,7 +2396,7 @@ row_log_table_apply_op( mrec += extra_size; - ut_ad(extra_size || !dup->index->is_instant()); + ut_ad(extra_size || !is_instant); if (mrec > mrec_end) { return(NULL); @@ -2434,7 +2404,8 @@ row_log_table_apply_op( rec_offs_set_n_fields(offsets, dup->index->n_fields); rec_init_offsets_temp(mrec, dup->index, offsets, - dup->index->is_instant() + log->n_core_fields, + is_instant ? static_cast( *(mrec - extra_size)) : REC_STATUS_ORDINARY); @@ -2444,7 +2415,7 @@ row_log_table_apply_op( if (next_mrec > mrec_end) { return(NULL); } else { - log->head.total += next_mrec - mrec_start; + log->head.total += ulint(next_mrec - mrec_start); *error = row_log_table_apply_insert( thr, mrec, offsets, offsets_heap, heap, dup); @@ -2452,14 +2423,12 @@ row_log_table_apply_op( break; case ROW_T_DELETE: - /* 1 (extra_size) + 4 (ext_size) + at least 1 (payload) */ - if (mrec + 6 >= mrec_end) { + /* 1 (extra_size) + at least 1 (payload) */ + if (mrec + 2 >= mrec_end) { return(NULL); } extra_size = *mrec++; - ext_size = mach_read_from_4(mrec); - mrec += 4; ut_ad(mrec < mrec_end); /* We assume extra_size < 0x100 for the PRIMARY KEY prefix. @@ -2469,43 +2438,19 @@ row_log_table_apply_op( /* The ROW_T_DELETE record was converted by rec_convert_dtuple_to_temp() using new_index. */ ut_ad(!new_index->is_instant()); - rec_offs_set_n_fields(offsets, new_index->n_uniq + 2); + rec_offs_set_n_fields(offsets, + unsigned(new_index->n_uniq) + 2); rec_init_offsets_temp(mrec, new_index, offsets); - next_mrec = mrec + rec_offs_data_size(offsets) + ext_size; - + next_mrec = mrec + rec_offs_data_size(offsets); if (next_mrec > mrec_end) { return(NULL); } - log->head.total += next_mrec - mrec_start; - - /* If there are external fields, retrieve those logged - prefix info and reconstruct the row_ext_t */ - if (ext_size) { - /* We use memcpy to avoid unaligned - access on some non-x86 platforms.*/ - ext = static_cast( - mem_heap_dup(heap, - mrec + rec_offs_data_size(offsets), - ext_size)); - - byte* ext_start = reinterpret_cast(ext); - - ulint ext_len = sizeof(*ext) - + (ext->n_ext - 1) * sizeof ext->len; - - ext->ext = reinterpret_cast(ext_start + ext_len); - ext_len += ext->n_ext * sizeof(*ext->ext); - - ext->buf = static_cast(ext_start + ext_len); - } else { - ext = NULL; - } + log->head.total += ulint(next_mrec - mrec_start); *error = row_log_table_apply_delete( - thr, new_trx_id_col, - mrec, offsets, offsets_heap, heap, - log, ext, ext_size); + new_trx_id_col, + mrec, offsets, offsets_heap, heap, log); break; case ROW_T_UPDATE: @@ -2517,7 +2462,7 @@ row_log_table_apply_op( is not changed, the log will only contain DB_TRX_ID,new_row. */ - if (dup->index->online_log->same_pk) { + if (log->same_pk) { ut_ad(new_index->n_uniq == dup->index->n_uniq); extra_size = *mrec++; @@ -2531,7 +2476,7 @@ row_log_table_apply_op( mrec += extra_size; - ut_ad(extra_size || !dup->index->is_instant()); + ut_ad(extra_size || !is_instant); if (mrec > mrec_end) { return(NULL); @@ -2539,7 +2484,8 @@ row_log_table_apply_op( rec_offs_set_n_fields(offsets, dup->index->n_fields); rec_init_offsets_temp(mrec, dup->index, offsets, - dup->index->is_instant() + log->n_core_fields, + is_instant ? static_cast( *(mrec - extra_size)) : REC_STATUS_ORDINARY); @@ -2583,7 +2529,8 @@ row_log_table_apply_op( /* The old_pk prefix was converted by rec_convert_dtuple_to_temp() using new_index. */ ut_ad(!new_index->is_instant()); - rec_offs_set_n_fields(offsets, new_index->n_uniq + 2); + rec_offs_set_n_fields(offsets, + unsigned(new_index->n_uniq) + 2); rec_init_offsets_temp(mrec, new_index, offsets); next_mrec = mrec + rec_offs_data_size(offsets); @@ -2593,7 +2540,8 @@ row_log_table_apply_op( /* Copy the PRIMARY KEY fields and DB_TRX_ID, DB_ROLL_PTR from mrec to old_pk. */ - old_pk = dtuple_create(heap, new_index->n_uniq + 2); + old_pk = dtuple_create( + heap, unsigned(new_index->n_uniq) + 2); dict_index_copy_types(old_pk, new_index, old_pk->n_fields); @@ -2629,7 +2577,7 @@ row_log_table_apply_op( mrec += extra_size; - ut_ad(extra_size || !dup->index->is_instant()); + ut_ad(extra_size || !is_instant); if (mrec > mrec_end) { return(NULL); @@ -2637,7 +2585,8 @@ row_log_table_apply_op( rec_offs_set_n_fields(offsets, dup->index->n_fields); rec_init_offsets_temp(mrec, dup->index, offsets, - dup->index->is_instant() + log->n_core_fields, + is_instant ? static_cast( *(mrec - extra_size)) : REC_STATUS_ORDINARY); @@ -2650,7 +2599,7 @@ row_log_table_apply_op( } ut_ad(next_mrec <= mrec_end); - log->head.total += next_mrec - mrec_start; + log->head.total += ulint(next_mrec - mrec_start); dtuple_set_n_fields_cmp(old_pk, new_index->n_uniq); *error = row_log_table_apply_update( @@ -2680,10 +2629,8 @@ row_log_progress_inc_per_block() /* We must increment the progress once per page (as in univ_page_size, usually 16KiB). One block here is srv_sort_buf_size (usually 1MiB). */ - const ulint pages_per_block = std::max( - static_cast( - srv_sort_buf_size / univ_page_size.physical()), - 1UL); + const ulint pages_per_block = std::max( + ulint(srv_sort_buf_size >> srv_page_size_shift), 1); /* Multiply by an artificial factor of 6 to even the pace with the rest of the ALTER TABLE phases, they process page_size amount @@ -2772,8 +2719,8 @@ row_log_table_apply_ops( offsets[0] = i; offsets[1] = dict_index_get_n_fields(index); - heap = mem_heap_create(UNIV_PAGE_SIZE); - offsets_heap = mem_heap_create(UNIV_PAGE_SIZE); + heap = mem_heap_create(srv_page_size); + offsets_heap = mem_heap_create(srv_page_size); has_index_lock = true; next_block: @@ -2787,7 +2734,7 @@ next_block: goto interrupted; } - if (dict_index_is_corrupted(index)) { + if (index->is_corrupted()) { error = DB_INDEX_CORRUPT; goto func_exit; } @@ -2865,9 +2812,9 @@ all_done: IORequest request(IORequest::READ); byte* buf = index->online_log->head.block; - if (!os_file_read_no_error_handling_int_fd( + if (!os_file_read_no_error_handling( request, index->online_log->fd, - buf, ofs, srv_sort_buf_size)) { + buf, ofs, srv_sort_buf_size, 0)) { ib::error() << "Unable to read temporary file" " for table " << index->table->name; @@ -2919,7 +2866,7 @@ all_done: ut_ad(mrec_end < (&index->online_log->head.buf)[1]); memcpy((mrec_t*) mrec_end, next_mrec, - (&index->online_log->head.buf)[1] - mrec_end); + ulint((&index->online_log->head.buf)[1] - mrec_end)); mrec = row_log_table_apply_op( thr, new_trx_id_col, dup, &error, offsets_heap, heap, @@ -2936,7 +2883,7 @@ all_done: it should proceed beyond the old end of the buffer. */ ut_a(mrec > mrec_end); - index->online_log->head.bytes = mrec - mrec_end; + index->online_log->head.bytes = ulint(mrec - mrec_end); next_mrec += index->online_log->head.bytes; } @@ -2981,7 +2928,15 @@ all_done: while (!trx_is_interrupted(trx)) { mrec = next_mrec; - ut_ad(mrec < mrec_end); + ut_ad(mrec <= mrec_end); + + if (mrec == mrec_end) { + /* We are at the end of the log. + Mark the replay all_done. */ + if (has_index_lock) { + goto all_done; + } + } if (!has_index_lock) { /* We are applying operations from a different @@ -3044,7 +2999,8 @@ process_next_block: goto next_block; } else if (next_mrec != NULL) { ut_ad(next_mrec < next_mrec_end); - index->online_log->head.bytes += next_mrec - mrec; + index->online_log->head.bytes + += ulint(next_mrec - mrec); } else if (has_index_lock) { /* When mrec is within tail.block, it should be a complete record, because we are holding @@ -3056,8 +3012,8 @@ process_next_block: goto unexpected_eof; } else { memcpy(index->online_log->head.buf, mrec, - mrec_end - mrec); - mrec_end += index->online_log->head.buf - mrec; + ulint(mrec_end - mrec)); + mrec_end += ulint(index->online_log->head.buf - mrec); mrec = index->online_log->head.buf; goto process_next_block; } @@ -3147,12 +3103,13 @@ row_log_allocate( or NULL when creating a secondary index */ bool same_pk,/*!< in: whether the definition of the PRIMARY KEY has remained the same */ - const dtuple_t* add_cols, + const dtuple_t* defaults, /*!< in: default values of - added columns, or NULL */ + added, changed columns, or NULL */ const ulint* col_map,/*!< in: mapping of old column numbers to new ones, or NULL if !table */ - const char* path) /*!< in: where to create temporary file */ + const char* path, /*!< in: where to create temporary file */ + const bool ignore) /*!< in: alter ignore issued */ { row_log_t* log; DBUG_ENTER("row_log_allocate"); @@ -3162,7 +3119,7 @@ row_log_allocate( ut_ad(!table || index->table != table); ut_ad(same_pk || table); ut_ad(!table || col_map); - ut_ad(!add_cols || col_map); + ut_ad(!defaults || col_map); ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_X)); ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE)); ut_ad(trx->id); @@ -3173,13 +3130,13 @@ row_log_allocate( DBUG_RETURN(false); } - log->fd = -1; + log->fd = OS_FILE_CLOSED; mutex_create(LATCH_ID_INDEX_ONLINE_LOG, &log->mutex); log->blobs = NULL; log->table = table; log->same_pk = same_pk; - log->add_cols = add_cols; + log->defaults = defaults; log->col_map = col_map; log->error = DB_SUCCESS; log->min_trx = trx->id; @@ -3191,6 +3148,9 @@ row_log_allocate( log->head.blocks = log->head.bytes = 0; log->head.total = 0; log->path = path; + log->n_core_fields = index->n_core_fields; + ut_ad(!table || log->is_instant(index) == index->is_instant()); + log->ignore=ignore; dict_index_set_online_status(index, ONLINE_INDEX_CREATION); index->online_log = log; @@ -3286,7 +3246,7 @@ row_log_apply_op_low( ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_X) == has_index_lock); - ut_ad(!dict_index_is_corrupted(index)); + ut_ad(!index->is_corrupted()); ut_ad(trx_id != 0 || op == ROW_OP_DELETE); DBUG_LOG("ib_create_index", @@ -3530,7 +3490,7 @@ row_log_apply_op( ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_X) == has_index_lock); - if (dict_index_is_corrupted(index)) { + if (index->is_corrupted()) { *error = DB_INDEX_CORRUPT; return(NULL); } @@ -3647,8 +3607,8 @@ row_log_apply_ops( offsets[0] = i; offsets[1] = dict_index_get_n_fields(index); - offsets_heap = mem_heap_create(UNIV_PAGE_SIZE); - heap = mem_heap_create(UNIV_PAGE_SIZE); + offsets_heap = mem_heap_create(srv_page_size); + heap = mem_heap_create(srv_page_size); has_index_lock = true; next_block: @@ -3667,7 +3627,7 @@ next_block: goto func_exit; } - if (dict_index_is_corrupted(index)) { + if (index->is_corrupted()) { error = DB_INDEX_CORRUPT; goto func_exit; } @@ -3732,9 +3692,9 @@ all_done: byte* buf = index->online_log->head.block; - if (!os_file_read_no_error_handling_int_fd( + if (!os_file_read_no_error_handling( request, index->online_log->fd, - buf, ofs, srv_sort_buf_size)) { + buf, ofs, srv_sort_buf_size, 0)) { ib::error() << "Unable to read temporary file" " for index " << index->name; @@ -3776,7 +3736,7 @@ all_done: ut_ad(mrec_end < (&index->online_log->head.buf)[1]); memcpy((mrec_t*) mrec_end, next_mrec, - (&index->online_log->head.buf)[1] - mrec_end); + ulint((&index->online_log->head.buf)[1] - mrec_end)); mrec = row_log_apply_op( index, dup, &error, offsets_heap, heap, has_index_lock, index->online_log->head.buf, @@ -3792,7 +3752,7 @@ all_done: it should proceed beyond the old end of the buffer. */ ut_a(mrec > mrec_end); - index->online_log->head.bytes = mrec - mrec_end; + index->online_log->head.bytes = ulint(mrec - mrec_end); next_mrec += index->online_log->head.bytes; } @@ -3890,7 +3850,8 @@ process_next_block: goto next_block; } else if (next_mrec != NULL) { ut_ad(next_mrec < next_mrec_end); - index->online_log->head.bytes += next_mrec - mrec; + index->online_log->head.bytes + += ulint(next_mrec - mrec); } else if (has_index_lock) { /* When mrec is within tail.block, it should be a complete record, because we are holding @@ -3902,8 +3863,8 @@ process_next_block: goto unexpected_eof; } else { memcpy(index->online_log->head.buf, mrec, - mrec_end - mrec); - mrec_end += index->online_log->head.buf - mrec; + ulint(mrec_end - mrec)); + mrec_end += ulint(index->online_log->head.buf - mrec); mrec = index->online_log->head.buf; goto process_next_block; } @@ -3977,7 +3938,7 @@ row_log_apply( } if (error != DB_SUCCESS) { - ut_a(!dict_table_is_discarded(index->table)); + ut_ad(index->table->space); /* We set the flag directly instead of invoking dict_set_corrupted_index_cache_only(index) here, because the index is not "public" yet. */ diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 394d54d408e..afa4ce208e9 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -144,7 +144,7 @@ public: ut_ad(dict_index_is_spatial(m_index)); DBUG_EXECUTE_IF("row_merge_instrument_log_check_flush", - log_sys->check_flush_or_checkpoint = true; + log_sys.check_flush_or_checkpoint = true; ); for (idx_tuple_vec::iterator it = m_dtuple_vec->begin(); @@ -153,7 +153,7 @@ public: dtuple = *it; ut_ad(dtuple); - if (log_sys->check_flush_or_checkpoint) { + if (log_sys.check_flush_or_checkpoint) { if (!(*mtr_committed)) { btr_pcur_move_to_prev_on_page(pcur); btr_pcur_store_position(pcur, scan_mtr); @@ -232,7 +232,7 @@ public: if (error == DB_SUCCESS) { if (rtr_info.mbr_adj) { error = rtr_ins_enlarge_mbr( - &ins_cur, NULL, &mtr); + &ins_cur, &mtr); } if (error == DB_SUCCESS) { @@ -290,7 +290,7 @@ dberr_t row_merge_insert_index_tuples( dict_index_t* index, const dict_table_t* old_table, - int fd, + const pfs_os_file_t& fd, row_merge_block_t* block, const row_merge_buf_t* row_buf, BtrBulk* btr_bulk, @@ -559,7 +559,7 @@ row_merge_buf_add( mem_heap_alloc(buf->heap, n_fields * sizeof *entry->fields)); data_size = 0; - extra_size = UT_BITS_IN_BYTES(index->n_nullable); + extra_size = UT_BITS_IN_BYTES(unsigned(index->n_nullable)); ifield = dict_index_get_nth_field(index, 0); @@ -572,7 +572,7 @@ row_merge_buf_add( col = ifield->col; const dict_v_col_t* v_col = NULL; - if (dict_col_is_virtual(col)) { + if (col->is_virtual()) { v_col = reinterpret_cast(col); } @@ -581,7 +581,7 @@ row_merge_buf_add( /* Process the Doc ID column */ if (*doc_id > 0 && col_no == index->table->fts->doc_col - && !dict_col_is_virtual(col)) { + && !col->is_virtual()) { fts_write_doc_id((byte*) &write_doc_id, *doc_id); /* Note: field->data now points to a value on the @@ -600,7 +600,7 @@ row_merge_buf_add( field->type.len = ifield->col->len; } else { /* Use callback to get the virtual column value */ - if (dict_col_is_virtual(col)) { + if (col->is_virtual()) { dict_index_t* clust_index = dict_table_get_first_index(new_table); @@ -732,7 +732,7 @@ row_merge_buf_add( len = dfield_get_len(field); } } - } else if (!dict_col_is_virtual(col)) { + } else if (!col->is_virtual()) { /* Only non-virtual column are stored externally */ const byte* buf = row_ext_lookup(ext, col_no, &len); @@ -819,9 +819,9 @@ row_merge_buf_add( /* Record size can exceed page size while converting to redundant row format. But there is assert - ut_ad(size < UNIV_PAGE_SIZE) in rec_offs_data_size(). + ut_ad(size < srv_page_size) in rec_offs_data_size(). It may hit the assert before attempting to insert the row. */ - if (conv_heap != NULL && data_size > UNIV_PAGE_SIZE) { + if (conv_heap != NULL && data_size > srv_page_size) { *err = DB_TOO_BIG_RECORD; } @@ -1069,7 +1069,7 @@ row_merge_heap_create( bool row_merge_read( /*===========*/ - int fd, /*!< in: file descriptor */ + const pfs_os_file_t& fd, /*!< in: file descriptor */ ulint offset, /*!< in: offset where to read in number of row_merge_block_t elements */ @@ -1084,8 +1084,8 @@ row_merge_read( DBUG_EXECUTE_IF("row_merge_read_failure", DBUG_RETURN(FALSE);); IORequest request(IORequest::READ); - const bool success = os_file_read_no_error_handling_int_fd( - request, fd, buf, ofs, srv_sort_buf_size); + const bool success = os_file_read_no_error_handling( + request, fd, buf, ofs, srv_sort_buf_size, 0); /* If encryption is enabled decrypt buffer */ if (success && log_tmp_is_encrypted()) { @@ -1117,7 +1117,7 @@ UNIV_INTERN bool row_merge_write( /*============*/ - int fd, /*!< in: file descriptor */ + const pfs_os_file_t& fd, /*!< in: file descriptor */ ulint offset, /*!< in: offset where to write, in number of row_merge_block_t elements */ const void* buf, /*!< in: data */ @@ -1146,7 +1146,7 @@ row_merge_write( } IORequest request(IORequest::WRITE); - const bool success = os_file_write_int_fd( + const bool success = os_file_write( request, "(merge)", fd, out_buf, ofs, buf_len); #ifdef POSIX_FADV_DONTNEED @@ -1168,7 +1168,7 @@ row_merge_read_rec( mrec_buf_t* buf, /*!< in/out: secondary buffer */ const byte* b, /*!< in: pointer to record */ const dict_index_t* index, /*!< in: index of the record */ - int fd, /*!< in: file descriptor */ + const pfs_os_file_t& fd, /*!< in: file descriptor */ ulint* foffs, /*!< in/out: file offset */ const mrec_t** mrec, /*!< out: pointer to merge record, or NULL on end of list @@ -1232,7 +1232,7 @@ err_exit: to the auxiliary buffer and handle this as a special case. */ - avail_size = &block[srv_sort_buf_size] - b; + avail_size = ulint(&block[srv_sort_buf_size] - b); ut_ad(avail_size < sizeof *buf); memcpy(*buf, b, avail_size); @@ -1287,7 +1287,7 @@ err_exit: /* The record spans two blocks. Copy it to buf. */ b -= extra_size + data_size; - avail_size = &block[srv_sort_buf_size] - b; + avail_size = ulint(&block[srv_sort_buf_size] - b); memcpy(*buf, b, avail_size); *mrec = *buf + extra_size; @@ -1331,7 +1331,7 @@ row_merge_write_rec_low( ulint e, /*!< in: encoded extra_size */ #ifndef DBUG_OFF ulint size, /*!< in: total size to write */ - int fd, /*!< in: file descriptor */ + const pfs_os_file_t& fd, /*!< in: file descriptor */ ulint foffs, /*!< in: file offset */ #endif /* !DBUG_OFF */ const mrec_t* mrec, /*!< in: record to write */ @@ -1374,7 +1374,7 @@ row_merge_write_rec( row_merge_block_t* block, /*!< in/out: file buffer */ mrec_buf_t* buf, /*!< in/out: secondary buffer */ byte* b, /*!< in: pointer to end of block */ - int fd, /*!< in: file descriptor */ + const pfs_os_file_t& fd, /*!< in: file descriptor */ ulint* foffs, /*!< in/out: file offset */ const mrec_t* mrec, /*!< in: record to write */ const ulint* offsets,/*!< in: offsets of mrec */ @@ -1403,7 +1403,7 @@ row_merge_write_rec( if (UNIV_UNLIKELY(b + size >= &block[srv_sort_buf_size])) { /* The record spans two blocks. Copy it to the temporary buffer first. */ - avail_size = &block[srv_sort_buf_size] - b; + avail_size = ulint(&block[srv_sort_buf_size] - b); row_merge_write_rec_low(buf[0], extra_size, size, fd, *foffs, @@ -1444,7 +1444,7 @@ row_merge_write_eof( /*================*/ row_merge_block_t* block, /*!< in/out: file buffer */ byte* b, /*!< in: pointer to end of block */ - int fd, /*!< in: file descriptor */ + const pfs_os_file_t& fd, /*!< in: file descriptor */ ulint* foffs, /*!< in/out: file offset */ row_merge_block_t* crypt_block, /*!< in: crypt buf or NULL */ ulint space) /*!< in: space id */ @@ -1467,7 +1467,7 @@ row_merge_write_eof( #ifdef UNIV_DEBUG_VALGRIND /* The rest of the block is uninitialized. Initialize it to avoid bogus warnings. */ - memset(b, 0xff, &block[srv_sort_buf_size] - b); + memset(b, 0xff, ulint(&block[srv_sort_buf_size] - b)); #endif /* UNIV_DEBUG_VALGRIND */ if (!row_merge_write(fd, (*foffs)++, block, crypt_block, space)) { @@ -1481,48 +1481,48 @@ row_merge_write_eof( /** Create a temporary file if it has not been created already. @param[in,out] tmpfd temporary file handle @param[in] path location for creating temporary file -@return file descriptor, or -1 on failure */ +@return true on success, false on error */ static MY_ATTRIBUTE((warn_unused_result)) -int +bool row_merge_tmpfile_if_needed( - int* tmpfd, + pfs_os_file_t* tmpfd, const char* path) { - if (*tmpfd < 0) { + if (*tmpfd == OS_FILE_CLOSED) { *tmpfd = row_merge_file_create_low(path); - if (*tmpfd >= 0) { + if (*tmpfd != OS_FILE_CLOSED) { MONITOR_ATOMIC_INC(MONITOR_ALTER_TABLE_SORT_FILES); } } - return(*tmpfd); + return(*tmpfd != OS_FILE_CLOSED); } /** Create a temporary file for merge sort if it was not created already. @param[in,out] file merge file structure @param[in] nrec number of records in the file @param[in] path location for creating temporary file -@return file descriptor, or -1 on failure */ +@return true on success, false on error */ static MY_ATTRIBUTE((warn_unused_result)) -int +bool row_merge_file_create_if_needed( merge_file_t* file, - int* tmpfd, + pfs_os_file_t* tmpfd, ulint nrec, const char* path) { - ut_ad(file->fd < 0 || *tmpfd >=0); - if (file->fd < 0 && row_merge_file_create(file, path) >= 0) { + ut_ad(file->fd == OS_FILE_CLOSED || *tmpfd != OS_FILE_CLOSED); + if (file->fd == OS_FILE_CLOSED && row_merge_file_create(file, path)!= OS_FILE_CLOSED) { MONITOR_ATOMIC_INC(MONITOR_ALTER_TABLE_SORT_FILES); - if (row_merge_tmpfile_if_needed(tmpfd, path) < 0) { - return(-1); + if (!row_merge_tmpfile_if_needed(tmpfd, path) ) { + return(false); } file->n_rec = nrec; } - ut_ad(file->fd < 0 || *tmpfd >=0); - return(file->fd); + ut_ad(file->fd == OS_FILE_CLOSED || *tmpfd != OS_FILE_CLOSED); + return(file->fd != OS_FILE_CLOSED); } /** Copy the merge data tuple from another merge data tuple. @@ -1656,7 +1656,7 @@ containing the index entries for the indexes to be built. @param[in] files temporary files @param[in] key_numbers MySQL key numbers to create @param[in] n_index number of indexes to create -@param[in] add_cols default values of added columns, or NULL +@param[in] defaults default values of added, changed columns, or NULL @param[in] add_v newly added virtual columns along with indexes @param[in] col_map mapping of old column numbers to new ones, or NULL if old_table == new_table @@ -1689,19 +1689,18 @@ row_merge_read_clustered_index( merge_file_t* files, const ulint* key_numbers, ulint n_index, - const dtuple_t* add_cols, + const dtuple_t* defaults, const dict_add_v_col_t* add_v, const ulint* col_map, ulint add_autoinc, ib_sequence_t& sequence, row_merge_block_t* block, bool skip_pk_sort, - int* tmpfd, + pfs_os_file_t* tmpfd, ut_stage_alter_t* stage, double pct_cost, row_merge_block_t* crypt_block, - struct TABLE* eval_table, - bool drop_historical) + struct TABLE* eval_table) { dict_index_t* clust_index; /* Clustered index */ mem_heap_t* row_heap; /* Heap memory to create @@ -1743,7 +1742,7 @@ row_merge_read_clustered_index( DBUG_ENTER("row_merge_read_clustered_index"); ut_ad((old_table == new_table) == !col_map); - ut_ad(!add_cols || col_map); + ut_ad(!defaults || col_map); ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE)); ut_ad(trx->id); @@ -1842,7 +1841,7 @@ row_merge_read_clustered_index( clust_index = dict_table_get_first_index(old_table); const ulint old_trx_id_col = DATA_TRX_ID - DATA_N_SYS_COLS - + old_table->n_cols; + + ulint(old_table->n_cols); ut_ad(old_table->cols[old_trx_id_col].mtype == DATA_SYS); ut_ad(old_table->cols[old_trx_id_col].prtype == (DATA_TRX_ID | DATA_NOT_NULL)); @@ -1959,15 +1958,6 @@ row_merge_read_clustered_index( } } -#ifdef DBUG_OFF -# define dbug_run_purge false -#else /* DBUG_OFF */ - bool dbug_run_purge = false; -#endif /* DBUG_OFF */ - DBUG_EXECUTE_IF( - "ib_purge_on_create_index_page_switch", - dbug_run_purge = true;); - /* Insert the cached spatial index rows. */ bool mtr_committed = false; @@ -1984,9 +1974,8 @@ row_merge_read_clustered_index( goto scan_next; } - if (dbug_run_purge - || my_atomic_load32_explicit(&clust_index->lock.waiters, - MY_MEMORY_ORDER_RELAXED)) { + if (my_atomic_load32_explicit(&clust_index->lock.waiters, + MY_MEMORY_ORDER_RELAXED)) { /* There are waiters on the clustered index tree lock, likely the purge thread. Store and restore the cursor @@ -2007,18 +1996,6 @@ row_merge_read_clustered_index( btr_pcur_store_position(&pcur, &mtr); mtr_commit(&mtr); - if (dbug_run_purge) { - /* This is for testing - purposes only (see - DBUG_EXECUTE_IF above). We - signal the purge thread and - hope that the purge batch will - complete before we execute - btr_pcur_restore_position(). */ - trx_purge_run(); - os_thread_sleep(1000000); - } - /* Give the waiters a chance to proceed. */ os_thread_yield(); scan_next: @@ -2187,19 +2164,26 @@ end_of_index: row = row_build_w_add_vcol(ROW_COPY_POINTERS, clust_index, rec, offsets, new_table, - add_cols, add_v, col_map, &ext, + defaults, add_v, col_map, &ext, row_heap); ut_ad(row); for (ulint i = 0; i < n_nonnull; i++) { - const dfield_t* field = &row->fields[nonnull[i]]; + dfield_t* field = &row->fields[nonnull[i]]; ut_ad(dfield_get_type(field)->prtype & DATA_NOT_NULL); if (dfield_is_null(field)) { - err = DB_INVALID_NULL; - trx->error_key_num = 0; - goto func_exit; + const dfield_t& default_field + = defaults->fields[nonnull[i]]; + + if (default_field.data == NULL) { + err = DB_INVALID_NULL; + trx->error_key_num = 0; + goto func_exit; + } + + *field = default_field; } } @@ -2313,7 +2297,7 @@ end_of_index: } if (old_table->versioned()) { - if ((!new_table->versioned() || drop_historical) + if (!new_table->versioned() && clust_index->vers_history_row(rec, offsets)) { continue; } @@ -2524,7 +2508,7 @@ write_buffers: err = row_merge_insert_index_tuples( index[i], old_table, - -1, NULL, buf, clust_btr_bulk, + OS_FILE_CLOSED, NULL, buf, clust_btr_bulk, table_total_rows, curr_progress, pct_cost, @@ -2615,7 +2599,7 @@ write_buffers: we can insert directly into the index without temporary file if clustered index does not uses temporary file. */ - if (row == NULL && file->fd == -1 + if (row == NULL && file->fd == OS_FILE_CLOSED && !clust_temp_file) { DBUG_EXECUTE_IF( "row_merge_write_failure", @@ -2635,7 +2619,7 @@ write_buffers: err = row_merge_insert_index_tuples( index[i], old_table, - -1, NULL, buf, &btr_bulk, + OS_FILE_CLOSED, NULL, buf, &btr_bulk, table_total_rows, curr_progress, pct_cost, @@ -2652,9 +2636,9 @@ write_buffers: break; } } else { - if (row_merge_file_create_if_needed( + if (!row_merge_file_create_if_needed( file, tmpfd, - buf->n_tuples, path) < 0) { + buf->n_tuples, path)) { err = DB_OUT_OF_MEMORY; trx->error_key_num = i; goto func_exit; @@ -2933,10 +2917,10 @@ wait_again: @param[in,out] foffs1 offset of second source list in the file @param[in,out] of output file @param[in,out] stage performance schema accounting object, used by -@param[in,out] crypt_block encryption buffer -@param[in] space tablespace ID for encryption ALTER TABLE. If not NULL stage->inc() will be called for each record processed. +@param[in,out] crypt_block encryption buffer +@param[in] space tablespace ID for encryption @return DB_SUCCESS or error code */ static MY_ATTRIBUTE((warn_unused_result)) dberr_t @@ -2947,7 +2931,7 @@ row_merge_blocks( ulint* foffs0, ulint* foffs1, merge_file_t* of, - ut_stage_alter_t* stage, + ut_stage_alter_t* stage MY_ATTRIBUTE((unused)), row_merge_block_t* crypt_block, ulint space) { @@ -3055,10 +3039,10 @@ done1: @param[in,out] foffs0 input file offset @param[in,out] of output file @param[in,out] stage performance schema accounting object, used by -@param[in,out] crypt_block encryption buffer -@param[in] space tablespace ID for encryption ALTER TABLE. If not NULL stage->inc() will be called for each record processed. +@param[in,out] crypt_block encryption buffer +@param[in] space tablespace ID for encryption @return TRUE on success, FALSE on failure */ static MY_ATTRIBUTE((warn_unused_result)) ibool @@ -3068,7 +3052,7 @@ row_merge_blocks_copy( row_merge_block_t* block, ulint* foffs0, merge_file_t* of, - ut_stage_alter_t* stage, + ut_stage_alter_t* stage MY_ATTRIBUTE((unused)), row_merge_block_t* crypt_block, ulint space) { @@ -3159,7 +3143,7 @@ row_merge( const row_merge_dup_t* dup, merge_file_t* file, row_merge_block_t* block, - int* tmpfd, + pfs_os_file_t* tmpfd, ulint* num_run, ulint* run_offset, ut_stage_alter_t* stage, @@ -3301,7 +3285,7 @@ row_merge_sort( const row_merge_dup_t* dup, merge_file_t* file, row_merge_block_t* block, - int* tmpfd, + pfs_os_file_t* tmpfd, const bool update_progress, /*!< in: update progress status variable or not */ @@ -3515,7 +3499,7 @@ dberr_t row_merge_insert_index_tuples( dict_index_t* index, const dict_table_t* old_table, - int fd, + const pfs_os_file_t& fd, row_merge_block_t* block, const row_merge_buf_t* row_buf, BtrBulk* btr_bulk, @@ -3568,7 +3552,7 @@ row_merge_insert_index_tuples( } if (row_buf != NULL) { - ut_ad(fd == -1); + ut_ad(fd == OS_FILE_CLOSED); ut_ad(block == NULL); DBUG_EXECUTE_IF("row_merge_read_failure", error = DB_CORRUPTION; @@ -3880,7 +3864,8 @@ row_merge_drop_indexes( A concurrent purge will be prevented by dict_operation_lock. */ - if (!locked && table->get_ref_count() > 1) { + if (!locked && (table->get_ref_count() > 1 + || UT_LIST_GET_FIRST(table->locks))) { /* We will have to drop the indexes later, when the table is guaranteed to be no longer in use. Mark the indexes as incomplete and corrupted, so that other @@ -4097,37 +4082,33 @@ row_merge_drop_temp_indexes(void) UNIV_PFS_IO defined, register the file descriptor with Performance Schema. @param[in] path location for creating temporary merge files, or NULL @return File descriptor */ -int +pfs_os_file_t row_merge_file_create_low( const char* path) { - int fd; + pfs_os_file_t fd; #ifdef UNIV_PFS_IO /* This temp file open does not go through normal file APIs, add instrumentation to register with performance schema */ - struct PSI_file_locker* locker; + struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; - locker = PSI_FILE_CALL(get_thread_file_name_locker)( - &state, innodb_temp_file_key, PSI_FILE_OPEN, - "Innodb Merge Temp File", &locker); - if (locker != NULL) { - PSI_FILE_CALL(start_file_open_wait)(locker, - __FILE__, - __LINE__); - } + + register_pfs_file_open_begin( + &state, locker, innodb_temp_file_key, + PSI_FILE_CREATE, + "Innodb Merge Temp File", + __FILE__, __LINE__); + #endif fd = innobase_mysql_tmpfile(path); #ifdef UNIV_PFS_IO - if (locker != NULL) { - PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)( - locker, fd); - } + register_pfs_file_open_end(locker, fd, + (fd == OS_FILE_CLOSED)?NULL:&fd); #endif - if (fd < 0) { + if (fd == OS_FILE_CLOSED) { ib::error() << "Cannot create temporary merge file"; - return(-1); } return(fd); } @@ -4136,8 +4117,8 @@ row_merge_file_create_low( /** Create a merge file in the given location. @param[out] merge_file merge file structure @param[in] path location for creating temporary file, or NULL -@return file descriptor, or -1 on failure */ -int +@return file descriptor, or OS_FILE_CLOSED on error */ +pfs_os_file_t row_merge_file_create( merge_file_t* merge_file, const char* path) @@ -4146,7 +4127,7 @@ row_merge_file_create( merge_file->offset = 0; merge_file->n_rec = 0; - if (merge_file->fd >= 0) { + if (merge_file->fd != OS_FILE_CLOSED) { if (srv_disable_sort_file_cache) { os_file_set_nocache(merge_file->fd, "row0merge.cc", "sort"); @@ -4161,26 +4142,11 @@ if UNIV_PFS_IO is defined. */ void row_merge_file_destroy_low( /*=======================*/ - int fd) /*!< in: merge file descriptor */ + const pfs_os_file_t& fd) /*!< in: merge file descriptor */ { -#ifdef UNIV_PFS_IO - struct PSI_file_locker* locker = NULL; - PSI_file_locker_state state; - locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( - &state, fd, PSI_FILE_CLOSE); - if (locker != NULL) { - PSI_FILE_CALL(start_file_wait)( - locker, 0, __FILE__, __LINE__); + if (fd != OS_FILE_CLOSED) { + os_file_close(fd); } -#endif - if (fd >= 0) { - close(fd); - } -#ifdef UNIV_PFS_IO - if (locker != NULL) { - PSI_FILE_CALL(end_file_wait)(locker, 0); - } -#endif } /*********************************************************************//** Destroy a merge file. */ @@ -4191,9 +4157,9 @@ row_merge_file_destroy( { ut_ad(!srv_read_only_mode); - if (merge_file->fd != -1) { + if (merge_file->fd != OS_FILE_CLOSED) { row_merge_file_destroy_low(merge_file->fd); - merge_file->fd = -1; + merge_file->fd = OS_FILE_CLOSED; } } @@ -4376,7 +4342,7 @@ row_merge_rename_tables_dict( pars_info_add_str_literal(info, "tmp_name", tmp_name); pars_info_add_str_literal(info, "tmp_path", tmp_path); pars_info_add_int4_literal(info, "old_space", - lint(old_table->space_id)); + old_table->space_id); err = que_eval_sql(info, "PROCEDURE RENAME_OLD_SPACE () IS\n" @@ -4407,7 +4373,7 @@ row_merge_rename_tables_dict( old_table->name.m_name); pars_info_add_str_literal(info, "old_path", old_path); pars_info_add_int4_literal(info, "new_space", - lint(new_table->space_id)); + new_table->space_id); err = que_eval_sql(info, "PROCEDURE RENAME_NEW_SPACE () IS\n" @@ -4423,7 +4389,7 @@ row_merge_rename_tables_dict( ut_free(old_path); } - if (err == DB_SUCCESS && dict_table_is_discarded(new_table)) { + if (err == DB_SUCCESS && (new_table->flags2 & DICT_TF2_DISCARDED)) { err = row_import_update_discarded_flag( trx, new_table->id, true); } @@ -4495,14 +4461,14 @@ row_merge_is_index_usable( const trx_t* trx, /*!< in: transaction */ const dict_index_t* index) /*!< in: index to check */ { - if (!dict_index_is_clust(index) + if (!index->is_primary() && dict_index_is_online_ddl(index)) { /* Indexes that are being created are not useable. */ return(false); } - return(!dict_index_is_corrupted(index) - && (dict_table_is_temporary(index->table) + return(!index->is_corrupted() + && (index->table->is_temporary() || index->trx_id == 0 || !trx->read_view.is_open() || trx->read_view.changes_visible( @@ -4567,7 +4533,7 @@ old_table unless creating a PRIMARY KEY @param[in] n_indexes size of indexes[] @param[in,out] table MySQL table, for reporting erroneous key value if applicable -@param[in] add_cols default values of added columns, or NULL +@param[in] defaults default values of added, changed columns, or NULL @param[in] col_map mapping of old column numbers to new ones, or NULL if old_table == new_table @param[in] add_autoinc number of added AUTO_INCREMENT columns, or @@ -4581,7 +4547,6 @@ this function and it will be passed to other functions for further accounting. @param[in] add_v new virtual columns added along with indexes @param[in] eval_table mysql table used to evaluate virtual column value, see innobase_get_computed_value(). -@param[in] drop_historical whether to drop historical system rows @return DB_SUCCESS or error code */ dberr_t row_merge_build_indexes( @@ -4593,15 +4558,14 @@ row_merge_build_indexes( const ulint* key_numbers, ulint n_indexes, struct TABLE* table, - const dtuple_t* add_cols, + const dtuple_t* defaults, const ulint* col_map, ulint add_autoinc, ib_sequence_t& sequence, bool skip_pk_sort, ut_stage_alter_t* stage, const dict_add_v_col_t* add_v, - struct TABLE* eval_table, - bool drop_historical) + struct TABLE* eval_table) { merge_file_t* merge_files; row_merge_block_t* block; @@ -4612,7 +4576,7 @@ row_merge_build_indexes( ulint i; ulint j; dberr_t error; - int tmpfd = -1; + pfs_os_file_t tmpfd = OS_FILE_CLOSED; dict_index_t* fts_sort_idx = NULL; fts_psort_t* psort_info = NULL; fts_psort_t* merge_info = NULL; @@ -4629,7 +4593,7 @@ row_merge_build_indexes( ut_ad(!srv_read_only_mode); ut_ad((old_table == new_table) == !col_map); - ut_ad(!add_cols || col_map); + ut_ad(!defaults || col_map); stage->begin_phase_read_pk(skip_pk_sort && new_table != old_table ? n_indexes - 1 @@ -4697,7 +4661,7 @@ row_merge_build_indexes( merge file descriptor */ for (i = 0; i < n_indexes; i++) { - merge_files[i].fd = -1; + merge_files[i].fd = OS_FILE_CLOSED; merge_files[i].offset = 0; } @@ -4763,9 +4727,9 @@ row_merge_build_indexes( error = row_merge_read_clustered_index( trx, table, old_table, new_table, online, indexes, fts_sort_idx, psort_info, merge_files, key_numbers, - n_indexes, add_cols, add_v, col_map, add_autoinc, + n_indexes, defaults, add_v, col_map, add_autoinc, sequence, block, skip_pk_sort, &tmpfd, stage, - pct_cost, crypt_block, eval_table, drop_historical); + pct_cost, crypt_block, eval_table); stage->end_phase_read_pk(); @@ -4869,7 +4833,7 @@ wait_again: #ifdef FTS_INTERNAL_DIAG_PRINT DEBUG_FTS_SORT_PRINT("FTS_SORT: Complete Insert\n"); #endif - } else if (merge_files[i].fd >= 0) { + } else if (merge_files[i].fd != OS_FILE_CLOSED) { char buf[NAME_LEN + 1]; row_merge_dup_t dup = { sort_idx, table, col_map, 0}; diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index f508a464006..b603292705a 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2000, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2015, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -477,7 +477,7 @@ row_mysql_store_col_in_innobase_format( case 4: /* space=0x00000020 */ /* Trim "half-chars", just in case. */ - col_len &= ~3; + col_len &= ~3U; while (col_len >= 4 && ptr[col_len - 4] == 0x00 @@ -490,7 +490,7 @@ row_mysql_store_col_in_innobase_format( case 2: /* space=0x0020 */ /* Trim "half-chars", just in case. */ - col_len &= ~1; + col_len &= ~1U; while (col_len >= 2 && ptr[col_len - 2] == 0x00 && ptr[col_len - 1] == 0x20) { @@ -1369,7 +1369,7 @@ row_insert_for_mysql( ut_a(prebuilt->magic_n == ROW_PREBUILT_ALLOCATED); ut_a(prebuilt->magic_n2 == ROW_PREBUILT_ALLOCATED); - if (dict_table_is_discarded(prebuilt->table)) { + if (!prebuilt->table->space) { ib::error() << "The table " << prebuilt->table->name << " doesn't have a corresponding tablespace, it was" @@ -1518,8 +1518,7 @@ error_exit: doc_ids difference should not exceed FTS_DOC_ID_MAX_STEP value. */ - if (next_doc_id > 1 - && doc_id - next_doc_id >= FTS_DOC_ID_MAX_STEP) { + if (doc_id - next_doc_id >= FTS_DOC_ID_MAX_STEP) { ib::error() << "Doc ID " << doc_id << " is too big. Its difference with" " largest used Doc ID " @@ -1621,7 +1620,7 @@ row_create_update_node_for_mysql( node = upd_node_create(heap); - node->in_mysql_interface = TRUE; + node->in_mysql_interface = true; node->is_delete = NO_DELETE; node->searched_update = FALSE; node->select = NULL; @@ -2456,10 +2455,8 @@ err_exit: if (dict_table_is_file_per_table(table) && fil_delete_tablespace(table->space->id) != DB_SUCCESS) { - - ib::error() << "Not able to delete tablespace " - << table->space << " of table " - << table->name << "!"; + ib::error() << "Cannot delete the file of table " + << table->name; } /* fall through */ @@ -2581,7 +2578,7 @@ row_create_index_for_mysql( if (index) { ut_ad(!index->is_instant()); index->n_core_null_bytes = UT_BITS_IN_BYTES( - index->n_nullable); + unsigned(index->n_nullable)); err = dict_create_index_tree_in_mem(index, trx); @@ -2654,12 +2651,6 @@ row_table_add_foreign_constraints( ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); ut_a(sql_string); - trx->op_info = "adding foreign keys"; - - trx_start_if_not_started_xa(trx, true); - - trx_set_dict_operation(trx, TRX_DICT_OP_TABLE); - err = dict_create_foreign_constraints( trx, sql_string, sql_length, name, reject_fks); @@ -3160,11 +3151,7 @@ row_discard_tablespace( } /* Discard the physical file that is used for the tablespace. */ - err = fil_delete_tablespace(table->space_id -#ifdef BTR_CUR_HASH_ADAPT - , true -#endif /* BTR_CUR_HASH_ADAPT */ - ); + err = fil_delete_tablespace(table->space_id); switch (err) { case DB_IO_ERROR: ib::warn() << "ALTER TABLE " << table->name @@ -3221,7 +3208,7 @@ row_discard_tablespace_for_mysql( if (table == 0) { err = DB_TABLE_NOT_FOUND; - } else if (dict_table_is_temporary(table)) { + } else if (table->is_temporary()) { ib_senderrf(trx->mysql_thd, IB_LOG_LEVEL_ERROR, ER_CANNOT_DISCARD_TEMPORARY_TABLE); @@ -3331,16 +3318,15 @@ fil_wait_crypt_bg_threads( if (now >= last + 30) { ib::warn() << "Waited " << now - start - << " seconds for ref-count on table: " - << table->name << " space: " << table->space; + << " seconds for ref-count on table " + << table->name; last = now; } if (now >= start + 300) { ib::warn() << "After " << now - start << " seconds, gave up waiting " - << "for ref-count on table: " << table->name - << " space: " << table->space; + << "for ref-count on table " << table->name; break; } } @@ -3716,6 +3702,21 @@ defer: rw_lock_x_unlock(dict_index_get_lock(index)); } + if (table->space_id != TRX_SYS_SPACE) { + /* On DISCARD TABLESPACE, we would not drop the + adaptive hash index entries. If the tablespace is + missing here, delete-marking the record in SYS_INDEXES + would not free any pages in the buffer pool. Thus, + dict_index_remove_from_cache() would hang due to + adaptive hash index entries existing in the buffer + pool. To prevent this hang, and also to guarantee + that btr_search_drop_page_hash_when_freed() will avoid + calling btr_search_drop_page_hash_index() while we + hold the InnoDB dictionary lock, we will drop any + adaptive hash index entries upfront. */ + buf_LRU_drop_page_hash_for_tablespace(table); + } + /* Deleting a row from SYS_INDEXES table will invoke dict_drop_index_tree(). */ info = pars_info_create(); @@ -4347,7 +4348,7 @@ row_rename_table_for_mysql( goto funct_exit; } else if (!table->is_readable() && !table->space - && !dict_table_is_discarded(table)) { + && !(table->flags2 & DICT_TF2_DISCARDED)) { err = DB_TABLE_NOT_FOUND; @@ -4581,7 +4582,8 @@ row_rename_table_for_mysql( } if (err == DB_SUCCESS - && dict_table_has_fts_index(table) + && (dict_table_has_fts_index(table) + || DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)) && !dict_tables_have_same_db(old_name, new_name)) { err = fts_rename_aux_tables(table, new_name, trx); if (err != DB_TABLE_NOT_FOUND) { @@ -4802,7 +4804,8 @@ row_scan_index_for_mysql( return(DB_SUCCESS); } - ulint bufsize = ut_max(UNIV_PAGE_SIZE, prebuilt->mysql_row_len); + ulint bufsize = std::max(srv_page_size, + prebuilt->mysql_row_len); buf = static_cast(ut_malloc_nokey(bufsize)); heap = mem_heap_create(100); diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc index 13e01f5ebf9..af8cd682288 100644 --- a/storage/innobase/row/row0purge.cc +++ b/storage/innobase/row/row0purge.cc @@ -414,7 +414,7 @@ row_purge_remove_sec_if_poss_leaf( log_free_check(); ut_ad(index->table == node->table); - ut_ad(!dict_table_is_temporary(index->table)); + ut_ad(!index->table->is_temporary()); mtr_start(&mtr); index->set_modified(mtr); @@ -806,12 +806,12 @@ skip_secondaries: can calculate from node->roll_ptr the file address of the new_val data */ - internal_offset - = ((const byte*) - dfield_get_data(&ufield->new_val)) - - undo_rec; + internal_offset = ulint( + static_cast + (dfield_get_data(&ufield->new_val)) + - undo_rec); - ut_a(internal_offset < UNIV_PAGE_SIZE); + ut_a(internal_offset < srv_page_size); trx_undo_decode_roll_ptr(node->roll_ptr, &is_insert, &rseg_id, @@ -937,7 +937,7 @@ try_again: goto err_exit; } - ut_ad(!dict_table_is_temporary(node->table)); + ut_ad(!node->table->is_temporary()); if (!fil_table_accessible(node->table)) { dict_table_close(node->table, FALSE, FALSE); @@ -972,8 +972,7 @@ try_again: clust_index = dict_table_get_first_index(node->table); - if (clust_index == NULL - || dict_index_is_corrupted(clust_index)) { + if (!clust_index || clust_index->is_corrupted()) { /* The table was corrupt in the data dictionary. dict_set_corrupted() works on an index, and we do not have an index to call it with. */ diff --git a/storage/innobase/row/row0quiesce.cc b/storage/innobase/row/row0quiesce.cc index dc0cb5f5424..4bfa7e0760f 100644 --- a/storage/innobase/row/row0quiesce.cc +++ b/storage/innobase/row/row0quiesce.cc @@ -243,7 +243,7 @@ row_quiesce_write_table( This field is also redundant, because the lengths are a property of the character set encoding, which in turn is encodedin prtype above. */ - mach_write_to_4(ptr, col->mbmaxlen * 5 + col->mbminlen); + mach_write_to_4(ptr, ulint(col->mbmaxlen * 5 + col->mbminlen)); ptr += sizeof(ib_uint32_t); mach_write_to_4(ptr, col->ind); @@ -394,7 +394,7 @@ row_quiesce_write_header( byte* ptr = row; /* Write the system page size. */ - mach_write_to_4(ptr, UNIV_PAGE_SIZE); + mach_write_to_4(ptr, srv_page_size); ptr += sizeof(ib_uint32_t); /* Write the table->flags. */ @@ -525,7 +525,7 @@ row_quiesce_table_start( ib::info() << "Sync to disk of " << table->name << " started."; if (srv_undo_sources) { - trx_purge_stop(); + purge_sys.stop(); } for (ulint count = 0; @@ -609,7 +609,7 @@ row_quiesce_table_complete( } if (srv_undo_sources) { - trx_purge_run(); + purge_sys.resume(); } dberr_t err = row_quiesce_set_state(table, QUIESCE_NONE, trx); @@ -635,7 +635,7 @@ row_quiesce_set_state( return(DB_UNSUPPORTED); - } else if (dict_table_is_temporary(table)) { + } else if (table->is_temporary()) { ib_senderrf(trx->mysql_thd, IB_LOG_LEVEL_WARN, ER_CANNOT_DISCARD_TEMPORARY_TABLE); diff --git a/storage/innobase/row/row0row.cc b/storage/innobase/row/row0row.cc index f9aa4fadacc..21a44fc1825 100644 --- a/storage/innobase/row/row0row.cc +++ b/storage/innobase/row/row0row.cc @@ -113,11 +113,10 @@ row_build_index_entry_low( col_no = dict_col_get_no(col); dfield = dtuple_get_nth_field(entry, i); } -#if DATA_MISSING != 0 -# error "DATA_MISSING != 0" -#endif - if (dict_col_is_virtual(col)) { + compile_time_assert(DATA_MISSING == 0); + + if (col->is_virtual()) { const dict_v_col_t* v_col = reinterpret_cast(col); @@ -357,7 +356,7 @@ addition of new virtual columns. of an index, or NULL if index->table should be consulted instead -@param[in] add_cols default values of added columns, or NULL +@param[in] defaults default values of added/changed columns, or NULL @param[in] add_v new virtual columns added along with new indexes @param[in] col_map mapping of old column @@ -375,7 +374,7 @@ row_build_low( const rec_t* rec, const ulint* offsets, const dict_table_t* col_table, - const dtuple_t* add_cols, + const dtuple_t* defaults, const dict_add_v_col_t* add_v, const ulint* col_map, row_ext_t** ext, @@ -441,13 +440,13 @@ row_build_low( if (!col_table) { ut_ad(!col_map); - ut_ad(!add_cols); + ut_ad(!defaults); col_table = index->table; } - if (add_cols) { + if (defaults) { ut_ad(col_map); - row = dtuple_copy(add_cols, heap); + row = dtuple_copy(defaults, heap); /* dict_table_copy_types() would set the fields to NULL */ for (ulint i = 0; i < dict_table_get_n_cols(col_table); i++) { dict_col_copy_type( @@ -594,9 +593,9 @@ row_build( of an index, or NULL if index->table should be consulted instead */ - const dtuple_t* add_cols, + const dtuple_t* defaults, /*!< in: default values of - added columns, or NULL */ + added and changed columns, or NULL */ const ulint* col_map,/*!< in: mapping of old column numbers to new ones, or NULL */ row_ext_t** ext, /*!< out, own: cache of @@ -606,7 +605,7 @@ row_build( the memory needed is allocated */ { return(row_build_low(type, index, rec, offsets, col_table, - add_cols, NULL, col_map, ext, heap)); + defaults, NULL, col_map, ext, heap)); } /** An inverse function to row_build_index_entry. Builds a row from a @@ -622,7 +621,7 @@ addition of new virtual columns. of an index, or NULL if index->table should be consulted instead -@param[in] add_cols default values of added columns, or NULL +@param[in] defaults default values of added, changed columns, or NULL @param[in] add_v new virtual columns added along with new indexes @param[in] col_map mapping of old column @@ -639,14 +638,14 @@ row_build_w_add_vcol( const rec_t* rec, const ulint* offsets, const dict_table_t* col_table, - const dtuple_t* add_cols, + const dtuple_t* defaults, const dict_add_v_col_t* add_v, const ulint* col_map, row_ext_t** ext, mem_heap_t* heap) { return(row_build_low(type, index, rec, offsets, col_table, - add_cols, add_v, col_map, ext, heap)); + defaults, add_v, col_map, ext, heap)); } /** Convert an index record to a data tuple. @@ -910,9 +909,8 @@ row_build_row_ref_in_tuple( held as long as the row reference is used! */ const dict_index_t* index, /*!< in: secondary index */ - ulint* offsets,/*!< in: rec_get_offsets(rec, index) + ulint* offsets)/*!< in: rec_get_offsets(rec, index) or NULL */ - trx_t* trx) /*!< in: transaction */ { const dict_index_t* clust_index; dfield_t* dfield; @@ -1014,7 +1012,7 @@ row_search_on_row_ref( index = dict_table_get_first_index(table); - if (UNIV_UNLIKELY(ref->info_bits)) { + if (UNIV_UNLIKELY(ref->info_bits != 0)) { ut_ad(ref->info_bits == REC_INFO_DEFAULT_ROW); ut_ad(ref->n_fields <= index->n_uniq); btr_pcur_open_at_index_side(true, index, mode, pcur, true, 0, @@ -1186,7 +1184,7 @@ row_raw_format_int( value = mach_read_int_type( (const byte*) data, data_len, unsigned_type); - ret = snprintf( + ret = (ulint) snprintf( buf, buf_size, unsigned_type ? "%llu" : "%lld", (longlong) value)+1; } else { diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 55be50ba35b..3c081657e35 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -215,7 +215,7 @@ row_sel_sec_rec_is_for_clust_rec( ifield = dict_index_get_nth_field(sec_index, i); col = dict_field_get_col(ifield); - is_virtual = dict_col_is_virtual(col); + is_virtual = col->is_virtual(); /* For virtual column, its value will need to be reconstructed from base column in cluster index */ @@ -506,8 +506,8 @@ row_sel_fetch_columns( if (field_no != ULINT_UNDEFINED) { - if (UNIV_UNLIKELY(rec_offs_nth_extern(offsets, - field_no))) { + if (UNIV_UNLIKELY(rec_offs_nth_extern( + offsets, field_no) != 0)) { /* Copy an externally stored field to the temporary heap, if possible. */ @@ -2489,8 +2489,7 @@ row_sel_convert_mysql_key_to_innobase( ulint buf_len, /*!< in: buffer length */ dict_index_t* index, /*!< in: index of the key value */ const byte* key_ptr, /*!< in: MySQL key value */ - ulint key_len, /*!< in: MySQL key value length */ - trx_t* trx) /*!< in: transaction */ + ulint key_len) /*!< in: MySQL key value length */ { byte* original_buf = buf; const byte* original_key_ptr = key_ptr; @@ -2577,8 +2576,8 @@ row_sel_convert_mysql_key_to_innobase( even though the actual value only takes data len bytes from the start. */ - data_len = key_ptr[data_offset] - + 256 * key_ptr[data_offset + 1]; + data_len = ulint(key_ptr[data_offset]) + | ulint(key_ptr[data_offset + 1]) << 8; data_field_len = data_offset + 2 + field->prefix_len; @@ -2817,7 +2816,8 @@ row_sel_field_store_in_mysql_format_func( } } - row_mysql_pad_col(templ->mbminlen, pad, field_end - pad); + row_mysql_pad_col(templ->mbminlen, pad, + ulint(field_end - pad)); break; case DATA_BLOB: @@ -2943,7 +2943,7 @@ row_sel_store_mysql_field_func( || field_no == templ->icp_rec_field_no); ut_ad(rec_offs_validate(rec, index, offsets)); - if (UNIV_UNLIKELY(rec_offs_nth_extern(offsets, field_no))) { + if (UNIV_UNLIKELY(rec_offs_nth_extern(offsets, field_no) != 0)) { mem_heap_t* heap; /* Copy an externally stored field to a temporary heap */ @@ -2953,12 +2953,12 @@ row_sel_store_mysql_field_func( if (DATA_LARGE_MTYPE(templ->type)) { if (prebuilt->blob_heap == NULL) { prebuilt->blob_heap = mem_heap_create( - UNIV_PAGE_SIZE); + srv_page_size); } heap = prebuilt->blob_heap; } else { - heap = mem_heap_create(UNIV_PAGE_SIZE); + heap = mem_heap_create(srv_page_size); } /* NOTE: if we are retrieving a big BLOB, we may @@ -3041,7 +3041,7 @@ row_sel_store_mysql_field_func( if (prebuilt->blob_heap == NULL) { prebuilt->blob_heap = mem_heap_create( - UNIV_PAGE_SIZE); + srv_page_size); DBUG_PRINT("anna", ("blob_heap allocated: %p", prebuilt->blob_heap)); } @@ -3287,7 +3287,7 @@ row_sel_get_clust_rec_for_mysql( thd_get_thread_id(trx->mysql_thd)); row_build_row_ref_in_tuple(prebuilt->clust_ref, rec, - sec_index, *offsets, trx); + sec_index, *offsets); clust_index = dict_table_get_first_index(sec_index->table); @@ -3504,10 +3504,10 @@ err_exit: Restores cursor position after it has been stored. We have to take into account that the record cursor was positioned on may have been deleted. Then we may have to move the cursor one step up or down. -@return TRUE if we may need to process the record the cursor is now +@return true if we may need to process the record the cursor is now positioned on (i.e. we should not go to the next record yet) */ static -ibool +bool sel_restore_position_for_mysql( /*===========================*/ ibool* same_user_rec, /*!< out: TRUE if we were able to restore @@ -3553,12 +3553,12 @@ next: btr_pcur_move_to_next(pcur, mtr); } - return(TRUE); + return true; } return(!success); case BTR_PCUR_AFTER_LAST_IN_TREE: case BTR_PCUR_BEFORE_FIRST_IN_TREE: - return(TRUE); + return true; case BTR_PCUR_AFTER: /* positioned to record after pcur->old_rec. */ pcur->pos_state = BTR_PCUR_IS_POSITIONED; @@ -3568,7 +3568,7 @@ prev: pcur->btr_cur.index)) { btr_pcur_move_to_prev(pcur, mtr); } - return(TRUE); + return true; case BTR_PCUR_BEFORE: /* For non optimistic restoration: The position is now set to the record before pcur->old_rec. @@ -3590,19 +3590,19 @@ prev: HANDLER READ idx PREV; */ goto prev; } - return(TRUE); + return true; case BTR_PCUR_IS_POSITIONED: if (moves_up && btr_pcur_is_on_user_rec(pcur)) { goto next; } - return(TRUE); + return true; case BTR_PCUR_WAS_POSITIONED: case BTR_PCUR_NOT_POSITIONED: break; } } ut_ad(0); - return(TRUE); + return true; } /********************************************************************//** @@ -4011,7 +4011,7 @@ row_sel_fill_vrow( field = dict_index_get_nth_field(index, i); col = dict_field_get_col(field); - if (dict_col_is_virtual(col)) { + if (col->is_virtual()) { const byte* data; ulint len; @@ -4177,7 +4177,7 @@ row_search_mvcc( trx_t* trx = prebuilt->trx; dict_index_t* clust_index; que_thr_t* thr; - const rec_t* rec; + const rec_t* UNINIT_VAR(rec); const dtuple_t* vrow = NULL; const rec_t* result_rec = NULL; const rec_t* clust_rec; @@ -4218,19 +4218,15 @@ row_search_mvcc( ut_ad(!sync_check_iterate(sync_check())); - if (dict_table_is_discarded(prebuilt->table)) { - + if (!prebuilt->table->space) { DBUG_RETURN(DB_TABLESPACE_DELETED); - } else if (!prebuilt->table->is_readable()) { DBUG_RETURN(prebuilt->table->space ? DB_DECRYPTION_FAILED : DB_TABLESPACE_NOT_FOUND); } else if (!prebuilt->index_usable) { DBUG_RETURN(DB_MISSING_HISTORY); - - } else if (dict_index_is_corrupted(prebuilt->index)) { - + } else if (prebuilt->index->is_corrupted()) { DBUG_RETURN(DB_CORRUPTION); } @@ -4374,7 +4370,7 @@ row_search_mvcc( && dict_index_is_clust(index) && !prebuilt->templ_contains_blob && !prebuilt->used_in_HANDLER - && (prebuilt->mysql_row_len < UNIV_PAGE_SIZE / 8)) { + && (prebuilt->mysql_row_len < srv_page_size / 8)) { mode = PAGE_CUR_GE; @@ -4520,39 +4516,26 @@ row_search_mvcc( prebuilt->sql_stat_start = FALSE; } else if (!prebuilt->sql_stat_start) { /* No need to set an intention lock or assign a read view */ - - if (!trx->read_view.is_open() - && !srv_read_only_mode - && prebuilt->select_lock_type == LOCK_NONE) { - - ib::error() << "MySQL is trying to perform a" - " consistent read but the read view is not" - " assigned!"; - trx_print(stderr, trx, 600); - fputc('\n', stderr); - ut_error; - } - } else if (prebuilt->select_lock_type == LOCK_NONE) { - /* This is a consistent read */ - /* Assign a read view for the query */ - trx_start_if_not_started(trx, false); - - trx->read_view.open(trx); - - prebuilt->sql_stat_start = FALSE; + ut_a(prebuilt->select_lock_type != LOCK_NONE + || srv_read_only_mode || trx->read_view.is_open()); } else { - trx_start_if_not_started(trx, false); -wait_table_again: - err = lock_table(0, prebuilt->table, - prebuilt->select_lock_type == LOCK_S - ? LOCK_IS : LOCK_IX, thr); - - if (err != DB_SUCCESS) { - - table_lock_waited = TRUE; - goto lock_table_wait; - } prebuilt->sql_stat_start = FALSE; + trx_start_if_not_started(trx, false); + + if (prebuilt->select_lock_type == LOCK_NONE) { + trx->read_view.open(trx); + } else { +wait_table_again: + err = lock_table(0, prebuilt->table, + prebuilt->select_lock_type == LOCK_S + ? LOCK_IS : LOCK_IX, thr); + + if (err != DB_SUCCESS) { + + table_lock_waited = TRUE; + goto lock_table_wait; + } + } } /* Open or restore index cursor position */ @@ -4564,7 +4547,7 @@ wait_table_again: goto next_rec; } - ibool need_to_process = sel_restore_position_for_mysql( + bool need_to_process = sel_restore_position_for_mysql( &same_user_rec, BTR_SEARCH_LEAF, pcur, moves_up, &mtr); @@ -4784,7 +4767,7 @@ rec_loop: } } - if (UNIV_UNLIKELY(next_offs >= UNIV_PAGE_SIZE - PAGE_DIR)) { + if (UNIV_UNLIKELY(next_offs >= srv_page_size - PAGE_DIR)) { wrong_offs: if (srv_force_recovery == 0 || moves_up == FALSE) { @@ -5551,25 +5534,25 @@ next_rec: For R-tree spatial search, we also commit the mini-transaction each time */ - if (mtr_has_extra_clust_latch || spatial_search) { + if (spatial_search) { + /* No need to do store restore for R-tree */ + mtr.commit(); + mtr.start(); + mtr_has_extra_clust_latch = FALSE; + } else if (mtr_has_extra_clust_latch) { /* If we have extra cluster latch, we must commit mtr if we are moving to the next non-clustered index record, because we could break the latching order if we would access a different clustered index page right away without releasing the previous. */ - /* No need to do store restore for R-tree */ - if (!spatial_search) { - btr_pcur_store_position(pcur, &mtr); - } - + btr_pcur_store_position(pcur, &mtr); mtr.commit(); mtr_has_extra_clust_latch = FALSE; mtr.start(); - if (!spatial_search - && sel_restore_position_for_mysql(&same_user_rec, + if (sel_restore_position_for_mysql(&same_user_rec, BTR_SEARCH_LEAF, pcur, moves_up, &mtr)) { goto rec_loop; @@ -5835,7 +5818,8 @@ row_count_rtree_recs( prebuilt->search_tuple = entry; - ulint bufsize = ut_max(UNIV_PAGE_SIZE, prebuilt->mysql_row_len); + ulint bufsize = std::max(srv_page_size, + prebuilt->mysql_row_len); buf = static_cast(ut_malloc_nokey(bufsize)); ulint cnt = 1000; diff --git a/storage/innobase/row/row0trunc.cc b/storage/innobase/row/row0trunc.cc index 305aa26a827..4e7b7bd9b73 100644 --- a/storage/innobase/row/row0trunc.cc +++ b/storage/innobase/row/row0trunc.cc @@ -96,7 +96,7 @@ public: for (;;) { if (!btr_pcur_is_on_user_rec(&m_pcur) - || !callback.match(&m_mtr, &m_pcur)) { + || !callback.match(&m_pcur)) { /* The end of of the index has been reached. */ err = DB_END_OF_INDEX; @@ -195,10 +195,9 @@ public: } /** - @param mtr mini-transaction covering the iteration @param pcur persistent cursor used for iteration @return true if the table id column matches. */ - bool match(mtr_t* mtr, btr_pcur_t* pcur) const + bool match(btr_pcur_t* pcur) const { ulint len; const byte* field; @@ -352,8 +351,8 @@ public: } - ulint sz = UNIV_PAGE_SIZE; - void* buf = ut_zalloc_nokey(sz + UNIV_PAGE_SIZE); + ulint sz = srv_page_size; + void* buf = ut_zalloc_nokey(sz + srv_page_size); if (buf == 0) { os_file_close(handle); return(DB_OUT_OF_MEMORY); @@ -361,7 +360,7 @@ public: /* Align the memory for file i/o if we might have O_DIRECT set*/ byte* log_buf = static_cast( - ut_align(buf, UNIV_PAGE_SIZE)); + ut_align(buf, srv_page_size)); lsn_t lsn = log_get_lsn(); @@ -383,7 +382,7 @@ public: ut_ad(err == DB_FAIL); ut_free(buf); sz *= 2; - buf = ut_zalloc_nokey(sz + UNIV_PAGE_SIZE); + buf = ut_zalloc_nokey(sz + srv_page_size); DBUG_EXECUTE_IF("ib_err_trunc_oom_logging", ut_free(buf); buf = 0;); @@ -392,7 +391,7 @@ public: return(DB_OUT_OF_MEMORY); } log_buf = static_cast( - ut_align(buf, UNIV_PAGE_SIZE)); + ut_align(buf, srv_page_size)); } } while (err != DB_SUCCESS); @@ -664,8 +663,8 @@ TruncateLogParser::parse( return(DB_IO_ERROR); } - ulint sz = UNIV_PAGE_SIZE; - void* buf = ut_zalloc_nokey(sz + UNIV_PAGE_SIZE); + ulint sz = srv_page_size; + void* buf = ut_zalloc_nokey(sz + srv_page_size); if (buf == 0) { os_file_close(handle); return(DB_OUT_OF_MEMORY); @@ -674,7 +673,7 @@ TruncateLogParser::parse( IORequest request(IORequest::READ); /* Align the memory for file i/o if we might have O_DIRECT set*/ - byte* log_buf = static_cast(ut_align(buf, UNIV_PAGE_SIZE)); + byte* log_buf = static_cast(ut_align(buf, srv_page_size)); do { err = os_file_read(request, handle, log_buf, 0, sz); @@ -714,7 +713,7 @@ TruncateLogParser::parse( sz *= 2; - buf = ut_zalloc_nokey(sz + UNIV_PAGE_SIZE); + buf = ut_zalloc_nokey(sz + srv_page_size); if (buf == 0) { os_file_close(handle); @@ -725,7 +724,7 @@ TruncateLogParser::parse( } log_buf = static_cast( - ut_align(buf, UNIV_PAGE_SIZE)); + ut_align(buf, srv_page_size)); } } while (err != DB_SUCCESS); @@ -866,15 +865,13 @@ public: /** Look for table-id in SYS_XXXX tables without loading the table. - @param mtr mini-transaction covering the read @param pcur persistent cursor used for reading - @return DB_SUCCESS or error code */ - dberr_t operator()(mtr_t* mtr, btr_pcur_t* pcur); - -private: - // Disably copying - TableLocator(const TableLocator&); - TableLocator& operator=(const TableLocator&); + @return DB_SUCCESS */ + dberr_t operator()(mtr_t*, btr_pcur_t*) + { + m_table_found = true; + return(DB_SUCCESS); + } private: /** Set to true if table is present */ @@ -882,11 +879,10 @@ private: }; /** -@param mtr mini-transaction covering the read @param pcur persistent cursor used for reading @return DB_SUCCESS or error code */ dberr_t -TruncateLogger::operator()(mtr_t* mtr, btr_pcur_t* pcur) +TruncateLogger::operator()(mtr_t*, btr_pcur_t* pcur) { ulint len; const byte* field; @@ -1088,20 +1084,6 @@ CreateIndex::operator()(mtr_t* mtr, btr_pcur_t* pcur) const return(DB_SUCCESS); } -/** -Look for table-id in SYS_XXXX tables without loading the table. - -@param mtr mini-transaction covering the read -@param pcur persistent cursor used for reading -@return DB_SUCCESS */ -dberr_t -TableLocator::operator()(mtr_t* mtr, btr_pcur_t* pcur) -{ - m_table_found = true; - - return(DB_SUCCESS); -} - /** Rollback the transaction and release the index locks. Drop indexes if table is corrupted so that drop/create @@ -1203,7 +1185,7 @@ row_truncate_complete( DEBUG_SYNC_C("ib_trunc_table_trunc_completing"); - if (!dict_table_is_temporary(table)) { + if (!table->is_temporary()) { DBUG_EXECUTE_IF("ib_trunc_crash_before_log_removal", log_buffer_flush_to_disk(); @@ -1224,7 +1206,7 @@ row_truncate_complete( /* If non-temp file-per-table tablespace... */ if (is_file_per_table - && !dict_table_is_temporary(table) + && !table->is_temporary() && fsp_flags != ULINT_UNDEFINED) { /* This function will reset back the stop_new_ops @@ -1484,7 +1466,7 @@ row_truncate_update_system_tables( { dberr_t err = DB_SUCCESS; - ut_a(!dict_table_is_temporary(table)); + ut_a(!table->is_temporary()); err = row_truncate_update_table_id(table->id, new_id, FALSE, trx); @@ -1598,7 +1580,7 @@ dberr_t row_truncate_sanity_checks( const dict_table_t* table) { - if (dict_table_is_discarded(table)) { + if (!table->space) { return(DB_TABLESPACE_DELETED); @@ -1771,7 +1753,7 @@ row_truncate_table_for_mysql( } - if (!dict_table_is_temporary(table)) { + if (!table->is_temporary()) { trx_set_dict_operation(trx, TRX_DICT_OP_TABLE); } @@ -1804,12 +1786,10 @@ row_truncate_table_for_mysql( /* Step-6: Truncate operation can be rolled back in case of error till some point. Associate rollback segment to record undo log. */ if (!table->is_temporary()) { - mutex_enter(&trx->undo_mutex); mtr_t mtr; mtr.start(); trx_undo_assign(trx, &err, &mtr); mtr.commit(); - mutex_exit(&trx->undo_mutex); DBUG_EXECUTE_IF("ib_err_trunc_assigning_undo_log", err = DB_ERROR;); @@ -1848,7 +1828,7 @@ row_truncate_table_for_mysql( we need to use index locks to sync up */ dict_table_x_lock_indexes(table); - if (!dict_table_is_temporary(table)) { + if (!table->is_temporary()) { fsp_flags = table->space ? table->space->flags : ULINT_UNDEFINED; @@ -2028,7 +2008,7 @@ row_truncate_table_for_mysql( DBUG_SUICIDE();); /* Step-10: Re-create new indexes. */ - if (!dict_table_is_temporary(table)) { + if (!table->is_temporary()) { CreateIndex createIndex(table, no_redo); @@ -2068,7 +2048,7 @@ row_truncate_table_for_mysql( on-disk (INNODB_SYS_TABLES). INNODB_SYS_INDEXES also needs to be updated to reflect updated root-page-no of new index created and updated table-id. */ - if (dict_table_is_temporary(table)) { + if (table->is_temporary()) { dict_table_change_id_in_cache(table, new_id); err = DB_SUCCESS; @@ -2201,10 +2181,11 @@ fil_recreate_tablespace( byte* buf; page_t* page; - buf = static_cast(ut_zalloc_nokey(3 * UNIV_PAGE_SIZE)); + buf = static_cast( + ut_zalloc_nokey(3U << srv_page_size_shift)); /* Align the memory for file i/o */ - page = static_cast(ut_align(buf, UNIV_PAGE_SIZE)); + page = static_cast(ut_align(buf, srv_page_size)); flags |= FSP_FLAGS_PAGE_SSIZE(); @@ -2215,7 +2196,7 @@ fil_recreate_tablespace( page_zip_des_t page_zip; page_zip_set_size(&page_zip, page_size.physical()); - page_zip.data = page + UNIV_PAGE_SIZE; + page_zip.data = page + srv_page_size; #ifdef UNIV_DEBUG page_zip.m_start = @@ -2324,7 +2305,7 @@ fil_recreate_tablespace( truncate_t::s_fix_up_active = false; func_exit: - fil_space_release(space); + space->release(); return(err); } @@ -2617,7 +2598,7 @@ truncate_t::update_root_page_no( pars_info_add_ull_literal( info, "index_id", - (mark_index_corrupted ? -1 : it->m_id)); + (mark_index_corrupted ? IB_ID_MAX : it->m_id)); err = que_eval_sql( info, diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc index 0e7acecd77e..cbcb4eb8d1d 100644 --- a/storage/innobase/row/row0uins.cc +++ b/storage/innobase/row/row0uins.cc @@ -433,7 +433,7 @@ row_undo_ins_parse_undo_rec( ut_ad(dict_table_is_file_per_table(table) == !is_system_tablespace(table->space->id)); size_t len = mach_read_from_2(node->undo_rec) - + node->undo_rec - ptr - 2; + + size_t(node->undo_rec - ptr) - 2; ptr[len] = 0; const char* name = reinterpret_cast(ptr); if (strcmp(table->name.m_name, name)) { diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc index fd9ff75943e..47c515f70cf 100644 --- a/storage/innobase/row/row0umod.cc +++ b/storage/innobase/row/row0umod.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -32,6 +32,7 @@ Created 2/27/1997 Heikki Tuuri #include "dict0boot.h" #include "trx0undo.h" #include "trx0roll.h" +#include "trx0purge.h" #include "btr0btr.h" #include "mach0data.h" #include "ibuf0ibuf.h" @@ -123,7 +124,8 @@ row_undo_mod_clust_low( } if (mode != BTR_MODIFY_TREE) { - ut_ad((mode & ~BTR_ALREADY_S_LATCHED) == BTR_MODIFY_LEAF); + ut_ad((mode & ulint(~BTR_ALREADY_S_LATCHED)) + == BTR_MODIFY_LEAF); err = btr_cur_optimistic_update( BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG @@ -148,101 +150,56 @@ row_undo_mod_clust_low( return(err); } -/***********************************************************//** -Purges a clustered index record after undo if possible. -This is attempted when the record was inserted by updating a -delete-marked record and there no longer exist transactions -that would see the delete-marked record. -@return DB_SUCCESS, DB_FAIL, or error code: we may run out of file space */ -static MY_ATTRIBUTE((nonnull, warn_unused_result)) -dberr_t -row_undo_mod_remove_clust_low( -/*==========================*/ - undo_node_t* node, /*!< in: row undo node */ - mtr_t* mtr, /*!< in/out: mini-transaction */ - ulint mode) /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE */ +/** Get the byte offset of the DB_TRX_ID column +@param[in] rec clustered index record +@param[in] index clustered index +@return the byte offset of DB_TRX_ID, from the start of rec */ +static ulint row_trx_id_offset(const rec_t* rec, const dict_index_t* index) { - btr_cur_t* btr_cur; - dberr_t err; - ulint trx_id_offset; - - ut_ad(node->rec_type == TRX_UNDO_UPD_DEL_REC); - - /* Find out if the record has been purged already - or if we can remove it. */ - - if (!btr_pcur_restore_position(mode, &node->pcur, mtr) - || row_vers_must_preserve_del_marked(node->new_trx_id, - node->table->name, - mtr)) { - - return(DB_SUCCESS); - } - - btr_cur = btr_pcur_get_btr_cur(&node->pcur); - - trx_id_offset = btr_cur_get_index(btr_cur)->trx_id_offset; - + ut_ad(index->n_uniq <= MAX_REF_PARTS); + ulint trx_id_offset = index->trx_id_offset; if (!trx_id_offset) { - mem_heap_t* heap = NULL; - ulint trx_id_col; - const ulint* offsets; - ulint len; - - trx_id_col = dict_index_get_sys_col_pos( - btr_cur_get_index(btr_cur), DATA_TRX_ID); - ut_ad(trx_id_col > 0); - ut_ad(trx_id_col != ULINT_UNDEFINED); - - offsets = rec_get_offsets( - btr_cur_get_rec(btr_cur), btr_cur_get_index(btr_cur), - NULL, true, trx_id_col + 1, &heap); - + /* Reserve enough offsets for the PRIMARY KEY and 2 columns + so that we can access DB_TRX_ID, DB_ROLL_PTR. */ + ulint offsets_[REC_OFFS_HEADER_SIZE + MAX_REF_PARTS + 2]; + rec_offs_init(offsets_); + mem_heap_t* heap = NULL; + const ulint trx_id_pos = index->n_uniq ? index->n_uniq : 1; + ulint* offsets = rec_get_offsets(rec, index, offsets_, true, + trx_id_pos + 1, &heap); + ut_ad(!heap); + ulint len; trx_id_offset = rec_get_nth_field_offs( - offsets, trx_id_col, &len); + offsets, trx_id_pos, &len); ut_ad(len == DATA_TRX_ID_LEN); - mem_heap_free(heap); } - if (trx_read_trx_id(btr_cur_get_rec(btr_cur) + trx_id_offset) - != node->new_trx_id) { - /* The record must have been purged and then replaced - with a different one. */ - return(DB_SUCCESS); + return trx_id_offset; +} + +/** Determine if rollback must execute a purge-like operation. +@param[in,out] node row undo +@param[in,out] mtr mini-transaction +@return whether the record should be purged */ +static bool row_undo_mod_must_purge(undo_node_t* node, mtr_t* mtr) +{ + ut_ad(node->rec_type == TRX_UNDO_UPD_DEL_REC); + ut_ad(!node->table->is_temporary()); + + btr_cur_t* btr_cur = btr_pcur_get_btr_cur(&node->pcur); + ut_ad(btr_cur->index->is_primary()); + + mtr_s_lock(&purge_sys.latch, mtr); + + if (!purge_sys.view.changes_visible(node->new_trx_id, + node->table->name)) { + return false; } - /* We are about to remove an old, delete-marked version of the - record that may have been delete-marked by a different transaction - than the rolling-back one. */ - ut_ad(rec_get_deleted_flag(btr_cur_get_rec(btr_cur), - dict_table_is_comp(node->table))); - /* In delete-marked records, DB_TRX_ID must - always refer to an existing update_undo log record. */ - ut_ad(rec_get_trx_id(btr_cur_get_rec(btr_cur), btr_cur->index)); + const rec_t* rec = btr_cur_get_rec(btr_cur); - if (mode == BTR_MODIFY_LEAF) { - err = btr_cur_optimistic_delete(btr_cur, 0, mtr) - ? DB_SUCCESS - : DB_FAIL; - } else { - ut_ad(mode == (BTR_MODIFY_TREE | BTR_LATCH_FOR_DELETE)); - - /* This operation is analogous to purge, we can free also - inherited externally stored fields. - We can also assume that the record was complete - (including BLOBs), because it had been delete-marked - after it had been completely inserted. Therefore, we - are passing rollback=false, just like purge does. */ - - btr_cur_pessimistic_delete(&err, FALSE, btr_cur, 0, - false, mtr); - - /* The delete operation may fail if we have little - file space left: TODO: easiest to crash the database - and restart with more file space */ - } - - return(err); + return trx_read_trx_id(rec + row_trx_id_offset(rec, btr_cur->index)) + == node->new_trx_id; } /***********************************************************//** @@ -271,6 +228,7 @@ row_undo_mod_clust( log_free_check(); pcur = &node->pcur; index = btr_cur_get_index(btr_pcur_get_btr_cur(pcur)); + ut_ad(index->is_primary()); mtr.start(); if (index->table->is_temporary()) { @@ -364,44 +322,122 @@ row_undo_mod_clust( btr_pcur_commit_specify_mtr(pcur, &mtr); - if (err == DB_SUCCESS && node->rec_type == TRX_UNDO_UPD_DEL_REC) { + if (err != DB_SUCCESS) { + goto func_exit; + } + + /* FIXME: Perform the below operations in the above + mini-transaction when possible. */ + + if (node->rec_type == TRX_UNDO_UPD_DEL_REC) { + /* In delete-marked records, DB_TRX_ID must + always refer to an existing update_undo log record. */ + ut_ad(node->new_trx_id); mtr.start(); + if (!btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, &mtr)) { + goto mtr_commit_exit; + } + if (index->table->is_temporary()) { mtr.set_log_mode(MTR_LOG_NO_REDO); } else { + if (!row_undo_mod_must_purge(node, &mtr)) { + goto mtr_commit_exit; + } index->set_modified(mtr); } - /* It is not necessary to call row_log_table, - because the record is delete-marked and would thus - be omitted from the rebuilt copy of the table. */ - err = row_undo_mod_remove_clust_low( - node, &mtr, BTR_MODIFY_LEAF); - if (err != DB_SUCCESS) { - btr_pcur_commit_specify_mtr(pcur, &mtr); - - /* We may have to modify tree structure: do a - pessimistic descent down the index tree */ - - mtr.start(); - if (index->table->is_temporary()) { - mtr.set_log_mode(MTR_LOG_NO_REDO); - } else { - index->set_modified(mtr); - } - - err = row_undo_mod_remove_clust_low( - node, &mtr, - BTR_MODIFY_TREE | BTR_LATCH_FOR_DELETE); - - ut_ad(err == DB_SUCCESS - || err == DB_OUT_OF_FILE_SPACE); + ut_ad(rec_get_deleted_flag(btr_pcur_get_rec(pcur), + dict_table_is_comp(node->table))); + if (btr_cur_optimistic_delete(&pcur->btr_cur, 0, &mtr)) { + goto mtr_commit_exit; } btr_pcur_commit_specify_mtr(pcur, &mtr); + + mtr.start(); + if (!btr_pcur_restore_position( + BTR_MODIFY_TREE | BTR_LATCH_FOR_DELETE, + pcur, &mtr)) { + goto mtr_commit_exit; + } + + if (index->table->is_temporary()) { + mtr.set_log_mode(MTR_LOG_NO_REDO); + } else { + if (!row_undo_mod_must_purge(node, &mtr)) { + goto mtr_commit_exit; + } + index->set_modified(mtr); + } + + ut_ad(rec_get_deleted_flag(btr_pcur_get_rec(pcur), + dict_table_is_comp(node->table))); + + /* This operation is analogous to purge, we can free + also inherited externally stored fields. We can also + assume that the record was complete (including BLOBs), + because it had been delete-marked after it had been + completely inserted. Therefore, we are passing + rollback=false, just like purge does. */ + btr_cur_pessimistic_delete(&err, FALSE, &pcur->btr_cur, 0, + false, &mtr); + ut_ad(err == DB_SUCCESS + || err == DB_OUT_OF_FILE_SPACE); + } else if (!index->table->is_temporary() && node->new_trx_id) { + /* We rolled back a record so that it still exists. + We must reset the DB_TRX_ID if the history is no + longer accessible by any active read view. */ + + mtr.start(); + if (!btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, &mtr)) { + goto mtr_commit_exit; + } + rec_t* rec = btr_pcur_get_rec(pcur); + mtr_s_lock(&purge_sys.latch, &mtr); + if (!purge_sys.view.changes_visible(node->new_trx_id, + node->table->name)) { + goto mtr_commit_exit; + } + + ulint trx_id_pos = index->n_uniq ? index->n_uniq : 1; + ut_ad(index->n_uniq <= MAX_REF_PARTS); + /* Reserve enough offsets for the PRIMARY KEY and 2 columns + so that we can access DB_TRX_ID, DB_ROLL_PTR. */ + ulint offsets_[REC_OFFS_HEADER_SIZE + MAX_REF_PARTS + 2]; + rec_offs_init(offsets_); + offsets = rec_get_offsets( + rec, index, offsets_, true, trx_id_pos + 2, &heap); + ulint len; + ulint trx_id_offset = rec_get_nth_field_offs( + offsets, trx_id_pos, &len); + ut_ad(len == DATA_TRX_ID_LEN); + + if (trx_read_trx_id(rec + trx_id_offset) == node->new_trx_id) { + ut_ad(!rec_get_deleted_flag( + rec, dict_table_is_comp(node->table))); + index->set_modified(mtr); + if (page_zip_des_t* page_zip = buf_block_get_page_zip( + btr_pcur_get_block(&node->pcur))) { + page_zip_write_trx_id_and_roll_ptr( + page_zip, rec, offsets, trx_id_pos, + 0, 1ULL << ROLL_PTR_INSERT_FLAG_POS, + &mtr); + } else { + mlog_write_string(rec + trx_id_offset, + reset_trx_id, + sizeof reset_trx_id, &mtr); + } + } + } else { + goto func_exit; } +mtr_commit_exit: + btr_pcur_commit_specify_mtr(pcur, &mtr); + +func_exit: node->state = UNDO_NODE_FETCH_NEXT; if (offsets_heap) { @@ -428,7 +464,6 @@ row_undo_mod_del_mark_or_remove_sec_low( btr_pcur_t pcur; btr_cur_t* btr_cur; ibool success; - ibool old_has; dberr_t err = DB_SUCCESS; mtr_t mtr; mtr_t mtr_vers; @@ -504,11 +539,12 @@ row_undo_mod_del_mark_or_remove_sec_low( &mtr_vers); ut_a(success); - old_has = row_vers_old_has_index_entry(FALSE, - btr_pcur_get_rec(&(node->pcur)), - &mtr_vers, index, entry, - 0, 0); - if (old_has) { + /* For temporary table, we can skip to check older version of + clustered index entry, because there is no MVCC or purge. */ + if (node->table->is_temporary() + || row_vers_old_has_index_entry( + FALSE, btr_pcur_get_rec(&node->pcur), + &mtr_vers, index, entry, 0, 0)) { err = btr_cur_del_mark_set_sec_rec(BTR_NO_LOCKING_FLAG, btr_cur, TRUE, thr, &mtr); ut_ad(err == DB_SUCCESS); @@ -527,18 +563,14 @@ row_undo_mod_del_mark_or_remove_sec_low( } if (modify_leaf) { - success = btr_cur_optimistic_delete(btr_cur, 0, &mtr); - if (success) { - err = DB_SUCCESS; - } else { - err = DB_FAIL; - } + err = btr_cur_optimistic_delete(btr_cur, 0, &mtr) + ? DB_SUCCESS : DB_FAIL; } else { /* Passing rollback=false, because we are deleting a secondary index record: the distinction only matters when deleting a record that contains externally stored columns. */ - ut_ad(!dict_index_is_clust(index)); + ut_ad(!index->is_primary()); btr_cur_pessimistic_delete(&err, FALSE, btr_cur, 0, false, &mtr); @@ -862,8 +894,8 @@ row_undo_mod_upd_del_sec( } /* During online index creation, - HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE should - guarantee that any active transaction has not modified + HA_ALTER_INPLACE_COPY_NO_LOCK or HA_ALTER_INPLACE_NOCOPY_NO_LOCk + should guarantee that any active transaction has not modified indexed columns such that col->ord_part was 0 at the time when the undo log record was written. When we get to roll back an undo log entry TRX_UNDO_DEL_MARK_REC, @@ -928,8 +960,8 @@ row_undo_mod_del_mark_sec( } /* During online index creation, - HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE should - guarantee that any active transaction has not modified + HA_ALTER_INPLACE_COPY_NO_LOCK or HA_ALTER_INPLACE_NOCOPY_NO_LOCK + should guarantee that any active transaction has not modified indexed columns such that col->ord_part was 0 at the time when the undo log record was written. When we get to roll back an undo log entry TRX_UNDO_DEL_MARK_REC, diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc index e9706a3393f..e2197f845f3 100644 --- a/storage/innobase/row/row0upd.cc +++ b/storage/innobase/row/row0upd.cc @@ -456,17 +456,15 @@ func_exit: @param[in] node query node @param[in] trx transaction @return whether the node cannot be ignored */ -inline -bool -wsrep_must_process_fk(const upd_node_t* node, const trx_t* trx) + +inline bool wsrep_must_process_fk(const upd_node_t* node, const trx_t* trx) { - if (que_node_get_type(node->common.parent) != QUE_NODE_UPDATE - || !wsrep_on_trx(trx)) { + if (!wsrep_on_trx(trx)) { return false; } - - return static_cast(node->common.parent)->cascade_node - == node; + return que_node_get_type(node->common.parent) != QUE_NODE_UPDATE + || static_cast(node->common.parent)->cascade_node + != node; } #endif /* WITH_WSREP */ @@ -515,9 +513,7 @@ row_upd_rec_sys_fields_in_recovery( field = rec_get_nth_field(rec, offsets, pos, &len); ut_ad(len == DATA_TRX_ID_LEN); -#if DATA_TRX_ID + 1 != DATA_ROLL_PTR -# error "DATA_TRX_ID + 1 != DATA_ROLL_PTR" -#endif + compile_time_assert(DATA_TRX_ID + 1 == DATA_ROLL_PTR); trx_write_trx_id(field, trx_id); trx_write_roll_ptr(field + DATA_TRX_ID_LEN, roll_ptr); } @@ -847,10 +843,7 @@ row_upd_index_write_log( log_ptr += mach_write_compressed(log_ptr, n_fields); for (i = 0; i < n_fields; i++) { - -#if MLOG_BUF_MARGIN <= 30 -# error "MLOG_BUF_MARGIN <= 30" -#endif + compile_time_assert(MLOG_BUF_MARGIN > 30); if (log_ptr + 30 > buf_end) { mlog_close(mtr, log_ptr); @@ -868,8 +861,8 @@ row_upd_index_write_log( /* If this is a virtual column, mark it using special field_no */ ulint field_no = upd_fld_is_virtual_col(upd_field) - ? REC_MAX_N_FIELDS + upd_field->field_no - : upd_field->field_no; + ? REC_MAX_N_FIELDS + unsigned(upd_field->field_no) + : unsigned(upd_field->field_no); log_ptr += mach_write_compressed(log_ptr, field_no); log_ptr += mach_write_compressed(log_ptr, len); @@ -1328,7 +1321,7 @@ row_upd_index_replace_new_col_val( /* Copy the locally stored prefix. */ memcpy(buf, data, - uf->orig_len - BTR_EXTERN_FIELD_REF_SIZE); + unsigned(uf->orig_len) - BTR_EXTERN_FIELD_REF_SIZE); /* Copy the BLOB pointer. */ memcpy(buf + uf->orig_len - BTR_EXTERN_FIELD_REF_SIZE, @@ -1368,7 +1361,7 @@ row_upd_index_replace_new_col_vals_index_pos( field = dict_index_get_nth_field(index, i); col = dict_field_get_col(field); - if (dict_col_is_virtual(col)) { + if (col->is_virtual()) { const dict_v_col_t* vcol = reinterpret_cast< const dict_v_col_t*>( col); @@ -1422,7 +1415,7 @@ row_upd_index_replace_new_col_vals( field = dict_index_get_nth_field(index, i); col = dict_field_get_col(field); - if (dict_col_is_virtual(col)) { + if (col->is_virtual()) { const dict_v_col_t* vcol = reinterpret_cast< const dict_v_col_t*>( col); @@ -1727,7 +1720,7 @@ row_upd_changes_ord_field_binary_func( ind_field = dict_index_get_nth_field(index, i); col = dict_field_get_col(ind_field); col_no = dict_col_get_no(col); - is_virtual = dict_col_is_virtual(col); + is_virtual = col->is_virtual(); if (is_virtual) { vcol = reinterpret_cast(col); @@ -2355,7 +2348,7 @@ row_upd_sec_index_entry( are no foreign key constraints referring to the index. Change buffering is disabled for temporary tables and spatial index. */ - mode = (referenced || dict_table_is_temporary(index->table) + mode = (referenced || index->table->is_temporary() || dict_index_is_spatial(index)) ? BTR_MODIFY_LEAF_ALREADY_S_LATCHED : BTR_DELETE_MARK_LEAF_ALREADY_S_LATCHED; @@ -2369,7 +2362,7 @@ row_upd_sec_index_entry( are no foreign key constraints referring to the index. Change buffering is disabled for temporary tables and spatial index. */ - mode = (referenced || dict_table_is_temporary(index->table) + mode = (referenced || index->table->is_temporary() || dict_index_is_spatial(index)) ? BTR_MODIFY_LEAF : BTR_DELETE_MARK_LEAF; @@ -3056,7 +3049,7 @@ row_upd_clust_step( mtr.start(); - if (dict_table_is_temporary(node->table)) { + if (node->table->is_temporary()) { /* Disable locking, because temporary tables are private to the connection (no concurrent access). */ flags = node->table->no_rollback() diff --git a/storage/innobase/row/row0vers.cc b/storage/innobase/row/row0vers.cc index eed40ec30df..d585ef3a9d3 100644 --- a/storage/innobase/row/row0vers.cc +++ b/storage/innobase/row/row0vers.cc @@ -60,7 +60,7 @@ row_vers_non_virtual_fields_equal( for (const dict_field_t* ifield = index->fields; ifield != end; ifield++) { - if (!dict_col_is_virtual(ifield->col) + if (!ifield->col->is_virtual() && cmp_dfield_dfield(a++, b++)) { return false; } @@ -420,29 +420,6 @@ row_vers_impl_x_locked( return(trx); } -/*****************************************************************//** -Finds out if we must preserve a delete marked earlier version of a clustered -index record, because it is >= the purge view. -@param[in] trx_id transaction id in the version -@param[in] name table name -@param[in,out] mtr mini transaction holding the latch on the - clustered index record; it will also hold - the latch on purge_view -@return TRUE if earlier version should be preserved */ -ibool -row_vers_must_preserve_del_marked( -/*==============================*/ - trx_id_t trx_id, - const table_name_t& name, - mtr_t* mtr) -{ - ut_ad(!rw_lock_own(&(purge_sys.latch), RW_LOCK_S)); - - mtr_s_lock(&purge_sys.latch, mtr); - - return(!purge_sys.view.changes_visible(trx_id, name)); -} - /** build virtual column value from current cluster index record data @param[in,out] row the cluster index row in dtuple form @param[in] clust_index clustered index @@ -461,7 +438,7 @@ row_vers_build_clust_v_col( const dict_field_t* ind_field = dict_index_get_nth_field( index, i); - if (dict_col_is_virtual(ind_field->col)) { + if (ind_field->col->is_virtual()) { const dict_v_col_t* col; col = reinterpret_cast( @@ -560,7 +537,7 @@ row_vers_build_cur_vrow_low( = dict_index_get_nth_field(index, i); const dict_col_t* col = ind_field->col; - if (!dict_col_is_virtual(col)) { + if (!col->is_virtual()) { continue; } @@ -644,7 +621,7 @@ row_vers_vc_matches_cluster( for (const dict_field_t *ifield = index->fields, *const end = &index->fields[index->n_fields]; ifield != end; ifield++, a++, b++) { - if (!dict_col_is_virtual(ifield->col)) { + if (!ifield->col->is_virtual()) { if (cmp_dfield_dfield(a, b)) { return false; } @@ -708,7 +685,7 @@ row_vers_vc_matches_cluster( const dict_col_t* col = ind_field->col; field1 = dtuple_get_nth_field(ientry, i); - if (!dict_col_is_virtual(col)) { + if (!col->is_virtual()) { continue; } @@ -773,7 +750,6 @@ func_exit: @param[in] clust_index cluster index @param[in] clust_offsets cluster rec offset @param[in] index secondary index -@param[in] ientry secondary index rec @param[in] roll_ptr roll_ptr for the purge record @param[in] trx_id transaction ID on the purging record @param[in,out] heap heap memory @@ -788,7 +764,6 @@ row_vers_build_cur_vrow( dict_index_t* clust_index, ulint** clust_offsets, dict_index_t* index, - const dtuple_t* ientry, roll_ptr_t roll_ptr, trx_id_t trx_id, mem_heap_t* heap, @@ -993,7 +968,7 @@ safe_to_purge: associated with current cluster index */ cur_vrow = row_vers_build_cur_vrow( also_curr, rec, clust_index, &clust_offsets, - index, ientry, roll_ptr, trx_id, heap, v_heap, mtr); + index, roll_ptr, trx_id, heap, v_heap, mtr); } version = rec; diff --git a/storage/innobase/srv/srv0conc.cc b/storage/innobase/srv/srv0conc.cc index a1ffa8986a8..d26e61253cd 100644 --- a/storage/innobase/srv/srv0conc.cc +++ b/storage/innobase/srv/srv0conc.cc @@ -58,10 +58,8 @@ ulong srv_thread_sleep_delay = 10000; /** We are prepared for a situation that we have this many threads waiting for -a semaphore inside InnoDB. innobase_start_or_create_for_mysql() sets the -value. */ - -ulint srv_max_n_threads = 0; +a semaphore inside InnoDB. srv_start() sets the value. */ +ulint srv_max_n_threads; /** The following controls how many threads we let inside InnoDB concurrently: threads waiting for locks are not counted into the number because otherwise @@ -136,12 +134,9 @@ srv_conc_enter_innodb_with_atomics( #endif /* WITH_WSREP */ if (srv_thread_concurrency == 0) { - if (notified_mysql) { - - (void) my_atomic_addlint( - &srv_conc.n_waiting, -1); - + my_atomic_addlint(&srv_conc.n_waiting, + ulint(-1)); thd_wait_end(trx->mysql_thd); } @@ -160,10 +155,8 @@ srv_conc_enter_innodb_with_atomics( srv_enter_innodb_with_tickets(trx); if (notified_mysql) { - - (void) my_atomic_addlint( - &srv_conc.n_waiting, -1); - + my_atomic_addlint(&srv_conc.n_waiting, + ulint(-1)); thd_wait_end(trx->mysql_thd); } @@ -185,13 +178,11 @@ srv_conc_enter_innodb_with_atomics( /* Since there were no free seats, we relinquish the overbooked ticket. */ - (void) my_atomic_addlint( - &srv_conc.n_active, -1); + my_atomic_addlint(&srv_conc.n_active, ulint(-1)); } if (!notified_mysql) { - (void) my_atomic_addlint( - &srv_conc.n_waiting, 1); + my_atomic_addlint(&srv_conc.n_waiting, 1); thd_wait_begin(trx->mysql_thd, THD_WAIT_USER_LOCK); @@ -235,7 +226,7 @@ srv_conc_exit_innodb_with_atomics( trx->n_tickets_to_enter_innodb = 0; trx->declared_to_be_inside_innodb = FALSE; - (void) my_atomic_addlint(&srv_conc.n_active, -1); + my_atomic_addlint(&srv_conc.n_active, ulint(-1)); } /*********************************************************************//** diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc index e51b948e6d9..8c62d61423f 100644 --- a/storage/innobase/srv/srv0mon.cc +++ b/storage/innobase/srv/srv0mon.cc @@ -1479,8 +1479,8 @@ srv_mon_set_module_control( mon_option_t set_option) /*!< in: Turn on/off reset the counter */ { - ulint ix; - ulint start_id; + lint ix; + lint start_id; ibool set_current_module = FALSE; ut_a(module_id <= NUM_MONITOR); @@ -1827,7 +1827,7 @@ srv_mon_process_existing_counter( /* innodb_page_size */ case MONITOR_OVLD_SRV_PAGE_SIZE: - value = UNIV_PAGE_SIZE; + value = srv_page_size; break; case MONITOR_OVLD_RWLOCK_S_SPIN_WAITS: @@ -1989,11 +1989,11 @@ srv_mon_process_existing_counter( break; case MONITOR_OVLD_LSN_FLUSHDISK: - value = (mon_type_t) log_sys->flushed_to_disk_lsn; + value = (mon_type_t) log_sys.flushed_to_disk_lsn; break; case MONITOR_OVLD_LSN_CURRENT: - value = (mon_type_t) log_sys->lsn; + value = (mon_type_t) log_sys.lsn; break; case MONITOR_OVLD_BUF_OLDEST_LSN: @@ -2001,15 +2001,15 @@ srv_mon_process_existing_counter( break; case MONITOR_OVLD_LSN_CHECKPOINT: - value = (mon_type_t) log_sys->last_checkpoint_lsn; + value = (mon_type_t) log_sys.last_checkpoint_lsn; break; case MONITOR_OVLD_MAX_AGE_ASYNC: - value = log_sys->max_modified_age_async; + value = log_sys.max_modified_age_async; break; case MONITOR_OVLD_MAX_AGE_SYNC: - value = log_sys->max_modified_age_sync; + value = log_sys.max_modified_age_sync; break; #ifdef BTR_CUR_HASH_ADAPT diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index d83e2276a58..ca60949ad5f 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -170,7 +170,7 @@ use simulated aio we build below with threads. Currently we support native aio on windows and linux */ my_bool srv_use_native_aio; my_bool srv_numa_interleave; -/** copy of innodb_use_atomic_writes; @see innobase_init() */ +/** copy of innodb_use_atomic_writes; @see innodb_init_params() */ my_bool srv_use_atomic_writes; /** innodb_compression_algorithm; used with page compression */ ulong innodb_compression_algorithm; @@ -189,15 +189,15 @@ ulong srv_n_log_files; /** The InnoDB redo log file size, or 0 when changing the redo log format at startup (while disallowing writes to the redo log). */ ulonglong srv_log_file_size; -/** copy of innodb_log_buffer_size, but in database pages */ -ulint srv_log_buffer_size; +/** innodb_log_buffer_size, in bytes */ +ulong srv_log_buffer_size; /** innodb_flush_log_at_trx_commit */ ulong srv_flush_log_at_trx_commit; /** innodb_flush_log_at_timeout */ uint srv_flush_log_at_timeout; /** innodb_page_size */ ulong srv_page_size; -/** log2 of innodb_page_size; @see innobase_init() */ +/** log2 of innodb_page_size; @see innodb_init_params() */ ulong srv_page_size_shift; /** innodb_log_write_ahead_size */ ulong srv_log_write_ahead_size; @@ -262,10 +262,10 @@ ulint srv_lock_table_size = ULINT_MAX; /** innodb_idle_flush_pct */ ulong srv_idle_flush_pct; -/** copy of innodb_read_io_threads */ -ulint srv_n_read_io_threads; -/** copy of innodb_write_io_threads */ -ulint srv_n_write_io_threads; +/** innodb_read_io_threads */ +ulong srv_n_read_io_threads; +/** innodb_write_io_threads */ +ulong srv_n_write_io_threads; /** innodb_random_read_ahead */ my_bool srv_random_read_ahead; @@ -278,13 +278,10 @@ ulong srv_read_ahead_threshold; buffer in terms of percentage of the buffer pool. */ uint srv_change_buffer_max_size; -char* srv_file_flush_method_str; +ulong srv_file_flush_method; -enum srv_flush_t srv_file_flush_method = IF_WIN(SRV_ALL_O_DIRECT_FSYNC,SRV_FSYNC); - - -/** copy of innodb_open_files, initialized by innobase_init() */ +/** copy of innodb_open_files; @see innodb_init_params() */ ulint srv_max_n_open_files; /** innodb_io_capacity */ @@ -381,8 +378,7 @@ unsigned long long srv_stats_modified_counter; based on number of configured pages */ my_bool srv_stats_sample_traditional; -/** copy of innodb_doublewrite */ -ibool srv_use_doublewrite_buf; +my_bool srv_use_doublewrite_buf; /** innodb_doublewrite_batch_size (a debug parameter) specifies the number of pages to use in LRU and flush_list batch flushing. @@ -617,6 +613,12 @@ struct srv_sys_t{ static srv_sys_t srv_sys; +/** @return whether the purge coordinator thread is active */ +bool purge_sys_t::running() +{ + return my_atomic_loadlint(&srv_sys.n_threads_active[SRV_PURGE]); +} + /** Event to signal srv_monitor_thread. Not protected by a mutex. Set after setting srv_print_innodb_monitor. */ os_event_t srv_monitor_event; @@ -860,7 +862,8 @@ srv_suspend_thread_low( ut_a(!slot->suspended); slot->suspended = TRUE; - if ((lint)my_atomic_addlint(&srv_sys.n_threads_active[type], -1) < 0) { + if (lint(my_atomic_addlint(&srv_sys.n_threads_active[type], ulint(-1))) + < 0) { ut_error; } @@ -1111,41 +1114,16 @@ srv_free(void) trx_i_s_cache_free(trx_i_s_cache); } -/*********************************************************************//** -Normalizes init parameter values to use units we use inside InnoDB. */ -static -void -srv_normalize_init_values(void) -/*===========================*/ -{ - srv_sys_space.normalize(); - - srv_tmp_space.normalize(); - - srv_log_buffer_size /= UNIV_PAGE_SIZE; - - srv_lock_table_size = 5 * (srv_buf_pool_size / UNIV_PAGE_SIZE); -} - /*********************************************************************//** Boots the InnoDB server. */ void srv_boot(void) /*==========*/ { - /* Transform the init parameter values given by MySQL to - use units we use inside InnoDB: */ - - srv_normalize_init_values(); - sync_check_init(); - /* Reset the system variables in the recovery module. */ recv_sys_var_init(); trx_pool_init(); row_mysql_init(); - - /* Initialize this module */ - srv_init(); } @@ -1537,7 +1515,7 @@ srv_export_innodb_status(void) export_vars.innodb_have_atomic_builtins = 0; #endif - export_vars.innodb_page_size = UNIV_PAGE_SIZE; + export_vars.innodb_page_size = srv_page_size; export_vars.innodb_log_waits = srv_stats.log_waits; @@ -1923,19 +1901,8 @@ srv_get_active_thread_type(void) srv_sys_mutex_exit(); - if (ret == SRV_NONE && srv_shutdown_state != SRV_SHUTDOWN_NONE - && purge_sys.is_initialised()) { - /* Check only on shutdown. */ - switch (trx_purge_state()) { - case PURGE_STATE_RUN: - case PURGE_STATE_STOP: - ret = SRV_PURGE; - break; - case PURGE_STATE_INIT: - case PURGE_STATE_DISABLED: - case PURGE_STATE_EXIT: - break; - } + if (ret == SRV_NONE && purge_sys.enabled()) { + ret = SRV_PURGE; } return(ret); @@ -1974,7 +1941,7 @@ srv_wake_purge_thread_if_not_active() { ut_ad(!srv_sys_mutex_own()); - if (purge_sys.state == PURGE_STATE_RUN + if (purge_sys.enabled() && !purge_sys.paused() && !my_atomic_loadlint(&srv_sys.n_threads_active[SRV_PURGE]) && trx_sys.history_size()) { @@ -2122,16 +2089,10 @@ srv_master_do_disabled_loop(void) /** Disables master thread. It's used by: SET GLOBAL innodb_master_thread_disabled_debug = 1 (0). -@param[in] thd thread handle -@param[in] var pointer to system variable -@param[out] var_ptr where the formal string goes @param[in] save immediate result from check function */ void -srv_master_thread_disabled_debug_update( - THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save) +srv_master_thread_disabled_debug_update(THD*, st_mysql_sys_var*, void*, + const void* save) { /* This method is protected by mutex, as every SET GLOBAL .. */ ut_ad(srv_master_thread_disabled_event != NULL); @@ -2399,10 +2360,6 @@ DECLARE_THREAD(srv_master_thread)( ut_a(slot == srv_sys.sys_threads); loop: - if (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND) { - goto suspend_thread; - } - while (srv_shutdown_state == SRV_SHUTDOWN_NONE) { srv_master_sleep(); @@ -2417,7 +2374,6 @@ loop: } } -suspend_thread: switch (srv_shutdown_state) { case SRV_SHUTDOWN_NONE: break; @@ -2441,10 +2397,6 @@ suspend_thread: srv_suspend_thread(slot); - /* DO NOT CHANGE THIS STRING. innobase_start_or_create_for_mysql() - waits for database activity to die down when converting < 4.1.x - databases, and relies on this string being exactly as it is. InnoDB - manual also mentions this string in several places. */ srv_main_thread_op_info = "waiting for server activity"; srv_resume_thread(slot); @@ -2536,19 +2488,11 @@ DECLARE_THREAD(srv_worker_thread)( srv_wake_purge_thread_if_not_active(); } - - /* Note: we are checking the state without holding the - purge_sys.latch here. */ - } while (purge_sys.state != PURGE_STATE_EXIT); + } while (purge_sys.enabled()); srv_free_slot(slot); - rw_lock_x_lock(&purge_sys.latch); - - ut_a(!purge_sys.running); - ut_a(purge_sys.state == PURGE_STATE_EXIT); - - rw_lock_x_unlock(&purge_sys.latch); + ut_ad(!purge_sys.enabled()); #ifdef UNIV_DEBUG_THREAD_CREATION ib::info() << "Purge worker thread exiting, id " @@ -2646,8 +2590,7 @@ srv_do_purge(ulint* n_total_purged) /* The previous round still did some work. */ continue; } - } while (n_pages_purged > 0 - && purge_sys.state == PURGE_STATE_RUN + } while (n_pages_purged > 0 && !purge_sys.paused() && !srv_purge_should_exit()); return(rseg_history_len); @@ -2675,12 +2618,6 @@ srv_purge_coordinator_suspend( int64_t sig_count = srv_suspend_thread(slot); do { - rw_lock_x_lock(&purge_sys.latch); - - purge_sys.running = false; - - rw_lock_x_unlock(&purge_sys.latch); - /* We don't wait right away on the the non-timed wait because we want to signal the thread that wants to suspend purge. */ const bool wait = stop @@ -2693,13 +2630,10 @@ srv_purge_coordinator_suspend( rw_lock_x_lock(&purge_sys.latch); - stop = (srv_shutdown_state == SRV_SHUTDOWN_NONE - && purge_sys.state == PURGE_STATE_STOP); + stop = srv_shutdown_state == SRV_SHUTDOWN_NONE + && purge_sys.paused_latched(); if (!stop) { - ut_a(purge_sys.n_stop == 0); - purge_sys.running = true; - if (timeout && rseg_history_len < 5000 && rseg_history_len == trx_sys.history_size()) { @@ -2713,8 +2647,6 @@ srv_purge_coordinator_suspend( stop = true; } } else { - ut_a(purge_sys.n_stop > 0); - /* Signal that we are suspended. */ os_event_set(purge_sys.event); } @@ -2742,15 +2674,9 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( ut_ad(!srv_read_only_mode); ut_a(srv_n_purge_threads >= 1); - ut_a(trx_purge_state() == PURGE_STATE_INIT); ut_a(srv_force_recovery < SRV_FORCE_NO_BACKGROUND); - rw_lock_x_lock(&purge_sys.latch); - - purge_sys.running = true; - purge_sys.state = PURGE_STATE_RUN; - - rw_lock_x_unlock(&purge_sys.latch); + purge_sys.coordinator_startup(); #ifdef UNIV_PFS_THREAD pfs_register_thread(srv_purge_thread_key); @@ -2771,8 +2697,7 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( if (srv_shutdown_state == SRV_SHUTDOWN_NONE && srv_undo_sources - && (n_total_purged == 0 - || purge_sys.state == PURGE_STATE_STOP)) { + && (n_total_purged == 0 || purge_sys.paused())) { srv_purge_coordinator_suspend(slot, rseg_history_len); } @@ -2796,16 +2721,13 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( /* Note that we are shutting down. */ rw_lock_x_lock(&purge_sys.latch); - - purge_sys.state = PURGE_STATE_EXIT; + purge_sys.coordinator_shutdown(); /* If there are any pending undo-tablespace truncate then clear it off as we plan to shutdown the purge thread. */ purge_sys.undo_trunc.clear(); - purge_sys.running = false; - - /* Ensure that the wait in trx_purge_stop() will terminate. */ + /* Ensure that the wait in purge_sys_t::stop() will terminate. */ os_event_set(purge_sys.event); rw_lock_x_unlock(&purge_sys.latch); @@ -2816,8 +2738,18 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( #endif /* UNIV_DEBUG_THREAD_CREATION */ /* Ensure that all the worker threads quit. */ - if (srv_n_purge_threads > 1) { - srv_release_threads(SRV_WORKER, srv_n_purge_threads - 1); + if (ulint n_workers = srv_n_purge_threads - 1) { + const srv_slot_t* slot; + const srv_slot_t* const end = &srv_sys.sys_threads[ + srv_sys.n_sys_threads]; + + do { + srv_release_threads(SRV_WORKER, n_workers); + srv_sys_mutex_enter(); + for (slot = &srv_sys.sys_threads[2]; + !slot++->in_use && slot < end; ); + srv_sys_mutex_exit(); + } while (slot < end); } innobase_destroy_background_thd(thd); @@ -2872,6 +2804,7 @@ void srv_purge_wakeup() { ut_ad(!srv_read_only_mode); + ut_ad(!sync_check_iterate(sync_check())); if (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND) { return; @@ -2892,6 +2825,15 @@ srv_purge_wakeup() || srv_sys.n_threads_active[SRV_PURGE])); } +/** Shut down the purge threads. */ +void srv_purge_shutdown() +{ + do { + ut_ad(!srv_undo_sources); + srv_purge_wakeup(); + } while (srv_sys.sys_threads[SRV_PURGE_SLOT].in_use); +} + /** Check if tablespace is being truncated. (Ignore system-tablespace as we don't re-create the tablespace and so some of the action that are suppressed by this function diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index e20b02faf1b..89070214a1b 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -108,11 +108,6 @@ Created 2/16/1996 Heikki Tuuri #include "btr0scrub.h" #include "ut0new.h" -#ifdef HAVE_LZO1X -#include -extern bool srv_lzo_disabled; -#endif /* HAVE_LZO1X */ - /** Log sequence number immediately after startup */ lsn_t srv_start_lsn; /** Log sequence number at shutdown */ @@ -138,7 +133,7 @@ bool srv_sys_tablespaces_open; bool srv_was_started; /** The original value of srv_log_file_size (innodb_log_file_size) */ static ulonglong srv_log_file_size_requested; -/** TRUE if innobase_start_or_create_for_mysql() has been called */ +/** whether srv_start() has been called */ static bool srv_start_has_been_called; /** Whether any undo log records can be generated */ @@ -200,9 +195,6 @@ static bool thread_started[SRV_MAX_N_IO_THREADS + 6 + 32] = {false}; /** Name of srv_monitor_file */ static char* srv_monitor_file_name; -/** Minimum expected tablespace size. (10M) */ -static const ulint MIN_EXPECTED_TABLESPACE_SIZE = 5 * 1024 * 1024; - /** */ #define SRV_MAX_N_PENDING_SYNC_IOS 100 @@ -310,7 +302,7 @@ DECLARE_THREAD(io_handler_thread)( #endif /* For read only mode, we don't need ibuf and log I/O thread. - Please see innobase_start_or_create_for_mysql() */ + Please see srv_start() */ ulint start = (srv_read_only_mode) ? 0 : 2; if (segment < start) { @@ -495,7 +487,7 @@ create_log_files( } } - log_init(srv_n_log_files); + log_sys.log.create(srv_n_log_files); if (!log_set_capacity(srv_log_file_size_requested)) { return(DB_ERROR); } @@ -504,7 +496,7 @@ create_log_files( /* Create a log checkpoint. */ log_mutex_enter(); - if (log_sys->is_encrypted() && !log_crypt_init()) { + if (log_sys.is_encrypted() && !log_crypt_init()) { return(DB_ERROR); } ut_d(recv_no_log_write = false); @@ -648,13 +640,13 @@ srv_undo_tablespace_create( " be created"; ib::info() << "Setting file " << name << " size to " - << (size >> (20 - UNIV_PAGE_SIZE_SHIFT)) << " MB"; + << (size >> (20 - srv_page_size_shift)) << " MB"; ib::info() << "Database physically writes the file full: " << "wait..."; ret = os_file_set_size( - name, fh, os_offset_t(size) << UNIV_PAGE_SIZE_SHIFT); + name, fh, os_offset_t(size) << srv_page_size_shift); if (!ret) { ib::info() << "Error in creating " << name @@ -731,7 +723,7 @@ srv_undo_tablespace_open( ut_a(fil_validate()); ut_a(space); - os_offset_t n_pages = size / UNIV_PAGE_SIZE; + os_offset_t n_pages = size >> srv_page_size_shift; /* On 32-bit platforms, ulint is 32 bits and os_offset_t is 64 bits. It is OK to cast the n_pages to ulint because @@ -756,7 +748,7 @@ dberr_t srv_check_undo_redo_logs_exists() { bool ret; - os_file_t fh; + pfs_os_file_t fh; char name[OS_FILE_MAX_PATH]; /* Check if any undo tablespaces exist */ @@ -1110,41 +1102,6 @@ srv_undo_tablespaces_init(bool create_new_db) return(DB_SUCCESS); } -/******************************************************************** -Wait for the purge thread(s) to start up. */ -static -void -srv_start_wait_for_purge_to_start() -/*===============================*/ -{ - /* Wait for the purge coordinator and master thread to startup. */ - - purge_state_t state = trx_purge_state(); - - ut_a(state != PURGE_STATE_DISABLED); - - while (srv_shutdown_state == SRV_SHUTDOWN_NONE - && srv_force_recovery < SRV_FORCE_NO_BACKGROUND - && state == PURGE_STATE_INIT) { - - switch (state = trx_purge_state()) { - case PURGE_STATE_RUN: - case PURGE_STATE_STOP: - break; - - case PURGE_STATE_INIT: - ib::info() << "Waiting for purge to start"; - - os_thread_sleep(50000); - break; - - case PURGE_STATE_EXIT: - case PURGE_STATE_DISABLED: - ut_error; - } - } -} - /** Create the temporary file tablespace. @param[in] create_new_db whether we are creating a new database @return DB_SUCCESS or error code. */ @@ -1208,7 +1165,7 @@ srv_start_state_set( srv_start_state_t state) /*!< in: indicate current state of thread startup */ { - srv_start_state |= state; + srv_start_state |= ulint(state); } /****************************************************************//** @@ -1220,7 +1177,7 @@ srv_start_state_is_set( /*===================*/ srv_start_state_t state) /*!< in: state to check for */ { - return(srv_start_state & state); + return(srv_start_state & ulint(state)); } /** @@ -1378,14 +1335,14 @@ srv_prepare_to_delete_redo_log_files( log_mutex_enter(); - fil_names_clear(log_sys->lsn, false); + fil_names_clear(log_sys.lsn, false); - flushed_lsn = log_sys->lsn; + flushed_lsn = log_sys.lsn; { ib::info info; if (srv_log_file_size == 0 - || (log_sys->log.format + || (log_sys.log.format & ~LOG_HEADER_FORMAT_ENCRYPTED) != LOG_HEADER_FORMAT_CURRENT) { info << "Upgrading redo log: "; @@ -1393,7 +1350,7 @@ srv_prepare_to_delete_redo_log_files( || srv_log_file_size != srv_log_file_size_requested) { if (srv_encrypt_log - == (my_bool)log_sys->is_encrypted()) { + == (my_bool)log_sys.is_encrypted()) { info << (srv_encrypt_log ? "Resizing encrypted" : "Resizing"); @@ -1451,14 +1408,11 @@ srv_prepare_to_delete_redo_log_files( DBUG_RETURN(flushed_lsn); } -/******************************************************************** -Starts InnoDB and creates a new database if database files -are not found and the user wants. +/** Start InnoDB. +@param[in] create_new_db whether to create a new database @return DB_SUCCESS or error code */ -dberr_t -innobase_start_or_create_for_mysql() +dberr_t srv_start(bool create_new_db) { - bool create_new_db = false; lsn_t flushed_lsn; dberr_t err = DB_SUCCESS; ulint srv_n_log_files_found = srv_n_log_files; @@ -1472,6 +1426,7 @@ innobase_start_or_create_for_mysql() || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT); + if (srv_force_recovery == SRV_FORCE_NO_LOG_REDO) { srv_read_only_mode = true; } @@ -1483,25 +1438,6 @@ innobase_start_or_create_for_mysql() /* Reset the start state. */ srv_start_state = SRV_START_STATE_NONE; - if (srv_read_only_mode) { - ib::info() << "Started in read only mode"; - - /* There is no write to InnoDB tablespaces (not even - temporary ones, because also CREATE TEMPORARY TABLE is - refused in read-only mode). */ - srv_use_doublewrite_buf = FALSE; - } - -#ifdef HAVE_LZO1X - if (lzo_init() != LZO_E_OK) { - ib::warn() << "lzo_init() failed, support disabled"; - srv_lzo_disabled = true; - } else { - ib::info() << "LZO1X support available"; - srv_lzo_disabled = false; - } -#endif /* HAVE_LZO1X */ - compile_time_assert(sizeof(ulint) == sizeof(void*)); #ifdef UNIV_DEBUG @@ -1557,62 +1493,10 @@ innobase_start_or_create_for_mysql() srv_is_being_started = true; -#ifdef _WIN32 - srv_use_native_aio = TRUE; - -#elif defined(LINUX_NATIVE_AIO) - - if (srv_use_native_aio) { - ib::info() << "Using Linux native AIO"; - } -#else - /* Currently native AIO is supported only on windows and linux - and that also when the support is compiled in. In all other - cases, we ignore the setting of innodb_use_native_aio. */ - srv_use_native_aio = FALSE; -#endif /* _WIN32 */ - /* Register performance schema stages before any real work has been started which may need to be instrumented. */ mysql_stage_register("innodb", srv_stages, UT_ARR_SIZE(srv_stages)); - if (srv_file_flush_method_str == NULL) { - /* These are the default options */ - srv_file_flush_method = IF_WIN(SRV_ALL_O_DIRECT_FSYNC,SRV_FSYNC); - } else if (0 == ut_strcmp(srv_file_flush_method_str, "fsync")) { - srv_file_flush_method = SRV_FSYNC; - - } else if (0 == ut_strcmp(srv_file_flush_method_str, "O_DSYNC")) { - srv_file_flush_method = SRV_O_DSYNC; - - } else if (0 == ut_strcmp(srv_file_flush_method_str, "O_DIRECT")) { - srv_file_flush_method = SRV_O_DIRECT; - - } else if (0 == ut_strcmp(srv_file_flush_method_str, "O_DIRECT_NO_FSYNC")) { - srv_file_flush_method = SRV_O_DIRECT_NO_FSYNC; - - } else if (0 == ut_strcmp(srv_file_flush_method_str, "littlesync")) { - srv_file_flush_method = SRV_LITTLESYNC; - - } else if (0 == ut_strcmp(srv_file_flush_method_str, "nosync")) { - srv_file_flush_method = SRV_NOSYNC; -#ifdef _WIN32 - } else if (0 == ut_strcmp(srv_file_flush_method_str, "normal")) { - srv_file_flush_method = SRV_FSYNC; - } else if (0 == ut_strcmp(srv_file_flush_method_str, "unbuffered")) { - } else if (0 == ut_strcmp(srv_file_flush_method_str, - "async_unbuffered")) { -#endif /* _WIN32 */ - } else { - ib::error() << "Unrecognized value " - << srv_file_flush_method_str - << " for innodb_flush_method"; - err = DB_ERROR; - } - - /* Note that the call srv_boot() also changes the values of - some variables to the units used by InnoDB internally */ - /* Set the maximum number of threads which can wait for a semaphore inside InnoDB: this is the 'sync wait array' size, as well as the maximum number of threads that can wait in the 'srv_conc array' for @@ -1641,65 +1525,6 @@ innobase_start_or_create_for_mysql() + fts_sort_pll_degree * FTS_NUM_AUX_INDEX * max_connections; - if (srv_buf_pool_size >= BUF_POOL_SIZE_THRESHOLD) { - - if (srv_buf_pool_instances == srv_buf_pool_instances_default) { -#if defined(_WIN32) && !defined(_WIN64) - /* Do not allocate too large of a buffer pool on - Windows 32-bit systems, which can have trouble - allocating larger single contiguous memory blocks. */ - srv_buf_pool_size = static_cast(ut_uint64_align_up(srv_buf_pool_size, srv_buf_pool_chunk_unit)); - srv_buf_pool_instances = ut_min( - static_cast(MAX_BUFFER_POOLS), - static_cast(srv_buf_pool_size / srv_buf_pool_chunk_unit)); -#else /* defined(_WIN32) && !defined(_WIN64) */ - /* Default to 8 instances when size > 1GB. */ - srv_buf_pool_instances = 8; -#endif /* defined(_WIN32) && !defined(_WIN64) */ - } - } else { - /* If buffer pool is less than 1 GiB, assume fewer - threads. Also use only one buffer pool instance. */ - if (srv_buf_pool_instances != srv_buf_pool_instances_default - && srv_buf_pool_instances != 1) { - /* We can't distinguish whether the user has explicitly - started mysqld with --innodb-buffer-pool-instances=0, - (srv_buf_pool_instances_default is 0) or has not - specified that option at all. Thus we have the - limitation that if the user started with =0, we - will not emit a warning here, but we should actually - do so. */ - ib::info() - << "Adjusting innodb_buffer_pool_instances" - " from " << srv_buf_pool_instances << " to 1" - " since innodb_buffer_pool_size is less than " - << BUF_POOL_SIZE_THRESHOLD / (1024 * 1024) - << " MiB"; - } - - srv_buf_pool_instances = 1; - } - - if (srv_buf_pool_chunk_unit * srv_buf_pool_instances - > srv_buf_pool_size) { - /* Size unit of buffer pool is larger than srv_buf_pool_size. - adjust srv_buf_pool_chunk_unit for srv_buf_pool_size. */ - srv_buf_pool_chunk_unit - = static_cast(srv_buf_pool_size) - / srv_buf_pool_instances; - if (srv_buf_pool_size % srv_buf_pool_instances != 0) { - ++srv_buf_pool_chunk_unit; - } - } - - srv_buf_pool_size = buf_pool_size_align(srv_buf_pool_size); - - if (srv_n_page_cleaners > srv_buf_pool_instances) { - /* limit of page_cleaner parallelizability - is number of buffer pool instances. */ - srv_n_page_cleaners = srv_buf_pool_instances; - } - srv_boot(); ib::info() << ut_crc32_implementation; @@ -1829,8 +1654,7 @@ innobase_start_or_create_for_mysql() } #endif /* UNIV_DEBUG */ - log_sys_init(); - + log_sys.create(); recv_sys_init(); lock_sys.create(srv_lock_table_size); @@ -1864,27 +1688,6 @@ innobase_start_or_create_for_mysql() srv_start_state_set(SRV_START_STATE_IO); } - if (srv_n_log_files * srv_log_file_size >= 512ULL << 30) { - /* log_block_convert_lsn_to_no() limits the returned block - number to 1G and given that OS_FILE_LOG_BLOCK_SIZE is 512 - bytes, then we have a limit of 512 GB. If that limit is to - be raised, then log_block_convert_lsn_to_no() must be - modified. */ - ib::error() << "Combined size of log files must be < 512 GB"; - - return(srv_init_abort(DB_ERROR)); - } - - os_normalize_path(srv_data_home); - - /* Check if the data files exist or not. */ - err = srv_sys_space.check_file_spec( - &create_new_db, MIN_EXPECTED_TABLESPACE_SIZE); - - if (err != DB_SUCCESS) { - return(srv_init_abort(DB_ERROR)); - } - srv_startup_is_before_trx_rollback_phase = !create_new_db; /* Check if undo tablespaces and redo log files exist before creating @@ -2088,7 +1891,7 @@ innobase_start_or_create_for_mysql() } } - log_init(srv_n_log_files_found); + log_sys.log.create(srv_n_log_files_found); if (!log_set_capacity(srv_log_file_size_requested)) { return(srv_init_abort(DB_ERROR)); @@ -2338,7 +2141,6 @@ files_checked: err = fil_write_flushed_lsn(log_get_lsn()); ut_ad(!buf_pool_check_no_pending_io()); fil_close_log_files(true); - log_group_close_all(); if (err == DB_SUCCESS) { bool trunc = srv_operation == SRV_OPERATION_RESTORE; @@ -2366,7 +2168,7 @@ files_checked: /* Leave the redo log alone. */ } else if (srv_log_file_size_requested == srv_log_file_size && srv_n_log_files_found == srv_n_log_files - && log_sys->log.format + && log_sys.log.format == (srv_encrypt_log ? LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED @@ -2406,9 +2208,6 @@ files_checked: return(srv_init_abort(DB_ERROR));); DBUG_PRINT("ib_log", ("After innodb_log_abort_5")); - /* Free the old log file space. */ - log_group_close_all(); - ib::info() << "Starting to delete and rewrite log" " files."; @@ -2583,7 +2382,7 @@ files_checked: | SRV_START_STATE_MONITOR; ut_ad(srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN - || trx_purge_state() == PURGE_STATE_INIT); + || !purge_sys.enabled()); if (srv_force_recovery < SRV_FORCE_NO_BACKGROUND) { srv_undo_sources = true; @@ -2630,11 +2429,14 @@ files_checked: trx_temp_rseg_create(); - thread_handles[1 + SRV_MAX_N_IO_THREADS] = os_thread_create( - srv_master_thread, - NULL, thread_ids + (1 + SRV_MAX_N_IO_THREADS)); - thread_started[1 + SRV_MAX_N_IO_THREADS] = true; - srv_start_state_set(SRV_START_STATE_MASTER); + if (srv_force_recovery < SRV_FORCE_NO_BACKGROUND) { + thread_handles[1 + SRV_MAX_N_IO_THREADS] + = os_thread_create(srv_master_thread, NULL, + (1 + SRV_MAX_N_IO_THREADS) + + thread_ids); + thread_started[1 + SRV_MAX_N_IO_THREADS] = true; + srv_start_state_set(SRV_START_STATE_MASTER); + } } if (!srv_read_only_mode && srv_operation == SRV_OPERATION_NORMAL @@ -2657,11 +2459,14 @@ files_checked: thread_started[5 + i + SRV_MAX_N_IO_THREADS] = true; } - srv_start_wait_for_purge_to_start(); + while (srv_shutdown_state == SRV_SHUTDOWN_NONE + && srv_force_recovery < SRV_FORCE_NO_BACKGROUND + && !purge_sys.enabled()) { + ib::info() << "Waiting for purge to start"; + os_thread_sleep(50000); + } srv_start_state_set(SRV_START_STATE_PURGE); - } else { - purge_sys.state = PURGE_STATE_DISABLED; } srv_is_being_started = false; @@ -2773,8 +2578,7 @@ srv_fts_close(void) #endif /** Shut down background threads that can generate undo log. */ -void -srv_shutdown_bg_undo_sources() +void srv_shutdown_bg_undo_sources() { if (srv_undo_sources) { ut_ad(!srv_read_only_mode); @@ -2789,8 +2593,7 @@ srv_shutdown_bg_undo_sources() } /** Shut down InnoDB. */ -void -innodb_shutdown() +void innodb_shutdown() { ut_ad(!my_atomic_loadptr_explicit(reinterpret_cast (&srv_running), @@ -2838,11 +2641,11 @@ innodb_shutdown() ut_ad(buf_dblwr || !srv_was_started || srv_read_only_mode || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO); ut_ad(lock_sys.is_initialised() || !srv_was_started); + ut_ad(log_sys.is_initialised() || !srv_was_started); #ifdef BTR_CUR_HASH_ADAPT ut_ad(btr_search_sys || !srv_was_started); #endif /* BTR_CUR_HASH_ADAPT */ ut_ad(ibuf || !srv_was_started); - ut_ad(log_sys || !srv_was_started); if (dict_stats_event) { dict_stats_thread_deinit(); @@ -2869,9 +2672,7 @@ innodb_shutdown() if (ibuf) { ibuf_close(); } - if (log_sys) { - log_shutdown(); - } + log_sys.close(); purge_sys.close(); trx_sys.close(); if (buf_dblwr) { @@ -2880,22 +2681,13 @@ innodb_shutdown() lock_sys.close(); trx_pool_close(); - /* We don't create these mutexes in RO mode because we don't create - the temp files that the cover. */ if (!srv_read_only_mode) { mutex_free(&srv_monitor_file_mutex); mutex_free(&srv_misc_tmpfile_mutex); } - if (dict_sys) { - dict_close(); - } - -#ifdef BTR_CUR_HASH_ADAPT - if (btr_search_sys) { - btr_search_sys_free(); - } -#endif /* BTR_CUR_HASH_ADAPT */ + dict_close(); + btr_search_sys_free(); /* 3. Free all InnoDB's own mutexes and the os_fast_mutexes inside them */ @@ -2916,10 +2708,6 @@ innodb_shutdown() sync_check_close(); - if (dict_foreign_err_file) { - fclose(dict_foreign_err_file); - } - if (srv_was_started && srv_print_verbose_log) { ib::info() << "Shutdown completed; log sequence number " << srv_shutdown_lsn diff --git a/storage/innobase/sync/sync0arr.cc b/storage/innobase/sync/sync0arr.cc index b56a5c73a8d..7c0c6503b21 100644 --- a/storage/innobase/sync/sync0arr.cc +++ b/storage/innobase/sync/sync0arr.cc @@ -2,7 +2,7 @@ Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. -Copyright (c) 2013, 2017, MariaDB Corporation. +Copyright (c) 2013, 2018, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -1080,13 +1080,11 @@ sync_array_print_long_waits( } if (noticed) { - ibool old_val; - fprintf(stderr, "InnoDB: ###### Starts InnoDB Monitor" " for 30 secs to print diagnostic info:\n"); - old_val = srv_print_innodb_monitor; + my_bool old_val = srv_print_innodb_monitor; /* If some crucial semaphore is reserved, then also the InnoDB Monitor can hang, and we do not get diagnostics. Since in @@ -1159,23 +1157,18 @@ sync_array_print_info( sync_array_exit(arr); } -/**********************************************************************//** -Create the primary system wait array(s), they are protected by an OS mutex */ -void -sync_array_init( -/*============*/ - ulint n_threads) /*!< in: Number of slots to - create in all arrays */ +/** Create the primary system wait arrays */ +void sync_array_init() { ut_a(sync_wait_array == NULL); ut_a(srv_sync_array_size > 0); - ut_a(n_threads > 0); + ut_a(srv_max_n_threads > 0); sync_array_size = srv_sync_array_size; sync_wait_array = UT_NEW_ARRAY_NOKEY(sync_array_t*, sync_array_size); - ulint n_slots = 1 + (n_threads - 1) / sync_array_size; + ulint n_slots = 1 + (srv_max_n_threads - 1) / sync_array_size; for (ulint i = 0; i < sync_array_size; ++i) { @@ -1183,11 +1176,8 @@ sync_array_init( } } -/**********************************************************************//** -Close sync array wait sub-system. */ -void -sync_array_close(void) -/*==================*/ +/** Destroy the sync array wait sub-system. */ +void sync_array_close() { for (ulint i = 0; i < sync_array_size; ++i) { sync_array_free(sync_wait_array[i]); diff --git a/storage/innobase/sync/sync0debug.cc b/storage/innobase/sync/sync0debug.cc index 75e6f0b39ca..2053b36e61d 100644 --- a/storage/innobase/sync/sync0debug.cc +++ b/storage/innobase/sync/sync0debug.cc @@ -191,10 +191,10 @@ struct LatchDebug { latch that the thread is trying to acquire @return true if passes, else crash with error message. */ - bool basic_check( + inline bool basic_check( const Latches* latches, latch_level_t requested_level, - ulint level) const + lint level) const UNIV_NOTHROW; /** Adds a latch and its level in the thread level array. Allocates @@ -499,7 +499,6 @@ LatchDebug::LatchDebug() LEVEL_MAP_INSERT(SYNC_RSEG_HEADER_NEW); LEVEL_MAP_INSERT(SYNC_NOREDO_RSEG); LEVEL_MAP_INSERT(SYNC_REDO_RSEG); - LEVEL_MAP_INSERT(SYNC_TRX_UNDO); LEVEL_MAP_INSERT(SYNC_PURGE_LATCH); LEVEL_MAP_INSERT(SYNC_TREE_NODE); LEVEL_MAP_INSERT(SYNC_TREE_NODE_FROM_HASH); @@ -607,11 +606,11 @@ LatchDebug::less( The level of the latch that the thread is trying to acquire @return true if passes, else crash with error message. */ -bool +inline bool LatchDebug::basic_check( const Latches* latches, latch_level_t requested_level, - ulint in_level) const + lint in_level) const UNIV_NOTHROW { latch_level_t level = latch_level_t(in_level); @@ -739,7 +738,7 @@ LatchDebug::check_order( if (srv_is_being_started) { /* This is violated during trx_sys_create_rsegs() when creating additional rollback segments when - upgrading in innobase_start_or_create_for_mysql(). */ + upgrading in srv_start(). */ break; } @@ -767,7 +766,6 @@ LatchDebug::check_order( case SYNC_IBUF_BITMAP_MUTEX: case SYNC_REDO_RSEG: case SYNC_NOREDO_RSEG: - case SYNC_TRX_UNDO: case SYNC_PURGE_LATCH: case SYNC_PURGE_QUEUE: case SYNC_DICT_AUTOINC_MUTEX: @@ -894,8 +892,7 @@ LatchDebug::check_order( The purge thread can read the UNDO pages without any covering mutex. */ - ut_a(find(latches, SYNC_TRX_UNDO) != 0 - || find(latches, SYNC_REDO_RSEG) != 0 + ut_a(find(latches, SYNC_REDO_RSEG) != 0 || find(latches, SYNC_NOREDO_RSEG) != 0 || basic_check(latches, level, level - 1)); break; @@ -1400,8 +1397,6 @@ sync_latch_meta_init() LATCH_ADD_MUTEX(BUF_DBLWR, SYNC_DOUBLEWRITE, buf_dblwr_mutex_key); - LATCH_ADD_MUTEX(TRX_UNDO, SYNC_TRX_UNDO, trx_undo_mutex_key); - LATCH_ADD_MUTEX(TRX_POOL, SYNC_POOL, trx_pool_mutex_key); LATCH_ADD_MUTEX(TRX_POOL_MANAGER, SYNC_POOL_MANAGER, @@ -1744,7 +1739,7 @@ sync_check_init() ut_d(LatchDebug::init()); - sync_array_init(OS_THREAD_MAX_N); + sync_array_init(); } /** Free the InnoDB synchronization data structures. */ diff --git a/storage/innobase/sync/sync0rw.cc b/storage/innobase/sync/sync0rw.cc index c3a0ed2284b..528b15f4e79 100644 --- a/storage/innobase/sync/sync0rw.cc +++ b/storage/innobase/sync/sync0rw.cc @@ -302,8 +302,8 @@ rw_lock_s_lock_spin( { ulint i = 0; /* spin round count */ sync_array_t* sync_arr; - ulint spin_count = 0; - uint64_t count_os_wait = 0; + lint spin_count = 0; + int64_t count_os_wait = 0; /* We reuse the thread id to index into the counter, cache it here for efficiency. */ @@ -430,9 +430,9 @@ rw_lock_x_lock_wait_func( unsigned line) /*!< in: line where requested */ { ulint i = 0; - ulint n_spins = 0; + lint n_spins = 0; sync_array_t* sync_arr; - uint64_t count_os_wait = 0; + int64_t count_os_wait = 0; ut_ad(my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) <= threshold); @@ -682,8 +682,8 @@ rw_lock_x_lock_func( { ulint i = 0; sync_array_t* sync_arr; - ulint spin_count = 0; - uint64_t count_os_wait = 0; + lint spin_count = 0; + int64_t count_os_wait = 0; ut_ad(rw_lock_validate(lock)); ut_ad(!rw_lock_own(lock, RW_LOCK_S)); @@ -714,7 +714,7 @@ lock_loop: } HMT_medium(); - spin_count += i; + spin_count += lint(i); if (i >= srv_n_spin_wait_rounds) { @@ -780,9 +780,9 @@ rw_lock_sx_lock_func( { ulint i = 0; sync_array_t* sync_arr; - ulint spin_count = 0; - uint64_t count_os_wait = 0; - ulint spin_wait_count = 0; + lint spin_count = 0; + int64_t count_os_wait = 0; + lint spin_wait_count = 0; ut_ad(rw_lock_validate(lock)); ut_ad(!rw_lock_own(lock, RW_LOCK_S)); @@ -814,7 +814,7 @@ lock_loop: i++; } - spin_count += i; + spin_count += lint(i); if (i >= srv_n_spin_wait_rounds) { diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc index 9716d573a63..a64290fda4a 100644 --- a/storage/innobase/sync/sync0sync.cc +++ b/storage/innobase/sync/sync0sync.cc @@ -80,7 +80,6 @@ mysql_pfs_key_t srv_innodb_monitor_mutex_key; mysql_pfs_key_t srv_misc_tmpfile_mutex_key; mysql_pfs_key_t srv_monitor_file_mutex_key; mysql_pfs_key_t buf_dblwr_mutex_key; -mysql_pfs_key_t trx_undo_mutex_key; mysql_pfs_key_t trx_mutex_key; mysql_pfs_key_t trx_pool_mutex_key; mysql_pfs_key_t trx_pool_manager_mutex_key; diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index 0abef1cb6ee..32e050e9b91 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -161,11 +161,11 @@ purge_graph_build() void purge_sys_t::create() { ut_ad(this == &purge_sys); - ut_ad(!is_initialised()); + ut_ad(!enabled()); + ut_ad(!event); event= os_event_create(0); - n_stop= 0; - running= false; - state= PURGE_STATE_INIT; + ut_ad(event); + m_paused= 0; query= purge_graph_build(); n_submitted= 0; n_completed= 0; @@ -178,29 +178,28 @@ void purge_sys_t::create() rw_lock_create(trx_purge_latch_key, &latch, SYNC_PURGE_LATCH); mutex_create(LATCH_ID_PURGE_SYS_PQ, &pq_mutex); undo_trunc.create(); - m_initialised = true; } /** Close the purge subsystem on shutdown. */ void purge_sys_t::close() { - ut_ad(this == &purge_sys); - if (!is_initialised()) return; + ut_ad(this == &purge_sys); + if (!event) return; - m_initialised = false; - trx_t* trx = query->trx; - que_graph_free(query); - ut_ad(!trx->id); - ut_ad(trx->state == TRX_STATE_ACTIVE); - trx->state = TRX_STATE_NOT_STARTED; - trx_free(trx); - rw_lock_free(&latch); - /* rw_lock_free() already called latch.~rw_lock_t(); tame the - debug assertions when the destructor will be called once more. */ - ut_ad(latch.magic_n == 0); - ut_d(latch.magic_n = RW_LOCK_MAGIC_N); - mutex_free(&pq_mutex); - os_event_destroy(event); + m_enabled= false; + trx_t* trx = query->trx; + que_graph_free(query); + ut_ad(!trx->id); + ut_ad(trx->state == TRX_STATE_ACTIVE); + trx->state= TRX_STATE_NOT_STARTED; + trx_free(trx); + rw_lock_free(&latch); + /* rw_lock_free() already called latch.~rw_lock_t(); tame the + debug assertions when the destructor will be called once more. */ + ut_ad(latch.magic_n == 0); + ut_d(latch.magic_n= RW_LOCK_MAGIC_N); + mutex_free(&pq_mutex); + os_event_destroy(event); } /*================ UNDO LOG HISTORY LIST =============================*/ @@ -274,11 +273,10 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr) in THD::cleanup() invoked from unlink_thd(), and we may also continue to execute user transactions. */ ut_ad(srv_undo_sources - || ((srv_startup_is_before_trx_rollback_phase - || trx_rollback_is_active) - && purge_sys.state == PURGE_STATE_INIT) - || (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND - && purge_sys.state == PURGE_STATE_DISABLED) + || (!purge_sys.enabled() + && (srv_startup_is_before_trx_rollback_phase + || trx_rollback_is_active + || srv_force_recovery >= SRV_FORCE_NO_BACKGROUND)) || ((trx->undo_no == 0 || trx->mysql_thd || trx->internal) && srv_fast_shutdown)); @@ -583,8 +581,8 @@ namespace undo { return(DB_IO_ERROR); } - ulint sz = UNIV_PAGE_SIZE; - void* buf = ut_zalloc_nokey(sz + UNIV_PAGE_SIZE); + ulint sz = srv_page_size; + void* buf = ut_zalloc_nokey(sz + srv_page_size); if (buf == NULL) { os_file_close(handle); delete[] log_file_name; @@ -592,7 +590,7 @@ namespace undo { } byte* log_buf = static_cast( - ut_align(buf, UNIV_PAGE_SIZE)); + ut_align(buf, srv_page_size)); IORequest request(IORequest::WRITE); @@ -643,8 +641,8 @@ namespace undo { return; } - ulint sz = UNIV_PAGE_SIZE; - void* buf = ut_zalloc_nokey(sz + UNIV_PAGE_SIZE); + ulint sz = srv_page_size; + void* buf = ut_zalloc_nokey(sz + srv_page_size); if (buf == NULL) { os_file_close(handle); os_file_delete(innodb_log_file_key, log_file_name); @@ -653,7 +651,7 @@ namespace undo { } byte* log_buf = static_cast( - ut_align(buf, UNIV_PAGE_SIZE)); + ut_align(buf, srv_page_size)); mach_write_to_4(log_buf, undo::s_magic); @@ -711,8 +709,8 @@ namespace undo { return(false); } - ulint sz = UNIV_PAGE_SIZE; - void* buf = ut_zalloc_nokey(sz + UNIV_PAGE_SIZE); + ulint sz = srv_page_size; + void* buf = ut_zalloc_nokey(sz + srv_page_size); if (buf == NULL) { os_file_close(handle); os_file_delete(innodb_log_file_key, @@ -722,7 +720,7 @@ namespace undo { } byte* log_buf = static_cast( - ut_align(buf, UNIV_PAGE_SIZE)); + ut_align(buf, srv_page_size)); IORequest request(IORequest::READ); @@ -800,7 +798,7 @@ trx_purge_mark_undo_for_truncate( for (ulint i = 1; i <= srv_undo_tablespaces_active; i++) { if (fil_space_get_size(space_id) - > (srv_max_undo_log_size / srv_page_size)) { + > (srv_max_undo_log_size >> srv_page_size_shift)) { /* Tablespace qualifies for truncate. */ undo_trunc->mark(space_id); undo::Truncate::add_space_to_trunc_list(space_id); @@ -1296,7 +1294,7 @@ trx_purge_get_next_rec( } else { page = page_align(rec2); - purge_sys.offset = rec2 - page; + purge_sys.offset = ulint(rec2 - page); purge_sys.page_no = page_get_page_no(page); purge_sys.tail.undo_no = trx_undo_rec_get_undo_no(rec2); @@ -1591,110 +1589,63 @@ trx_purge( return(n_pages_handled); } -/*******************************************************************//** -Get the purge state. -@return purge state. */ -purge_state_t -trx_purge_state(void) -/*=================*/ +/** Stop purge during FLUSH TABLES FOR EXPORT */ +void purge_sys_t::stop() { - purge_state_t state; + rw_lock_x_lock(&latch); - rw_lock_x_lock(&purge_sys.latch); + if (!enabled_latched()) + { + /* Shutdown must have been initiated during FLUSH TABLES FOR EXPORT. */ + ut_ad(!srv_undo_sources); + rw_lock_x_unlock(&latch); + return; + } - state = purge_sys.state; + ut_ad(srv_n_purge_threads > 0); - rw_lock_x_unlock(&purge_sys.latch); + if (0 == my_atomic_add32_explicit(&m_paused, 1, MY_MEMORY_ORDER_RELAXED)) + { + /* We need to wakeup the purge thread in case it is suspended, so + that it can acknowledge the state change. */ + const int64_t sig_count = os_event_reset(event); + rw_lock_x_unlock(&latch); + ib::info() << "Stopping purge"; + srv_purge_wakeup(); + /* Wait for purge coordinator to signal that it is suspended. */ + os_event_wait_low(event, sig_count); + MONITOR_ATOMIC_INC(MONITOR_PURGE_STOP_COUNT); + return; + } - return(state); + rw_lock_x_unlock(&latch); + + if (running()) + { + ib::info() << "Waiting for purge to stop"; + while (running()) + os_thread_sleep(10000); + } } -/*******************************************************************//** -Stop purge and wait for it to stop, move to PURGE_STATE_STOP. */ -void -trx_purge_stop(void) -/*================*/ +/** Resume purge at UNLOCK TABLES after FLUSH TABLES FOR EXPORT */ +void purge_sys_t::resume() { - rw_lock_x_lock(&purge_sys.latch); + if (!enabled()) + { + /* Shutdown must have been initiated during FLUSH TABLES FOR EXPORT. */ + ut_ad(!srv_undo_sources); + return; + } - switch (purge_sys.state) { - case PURGE_STATE_INIT: - case PURGE_STATE_DISABLED: - ut_error; - case PURGE_STATE_EXIT: - /* Shutdown must have been initiated during - FLUSH TABLES FOR EXPORT. */ - ut_ad(!srv_undo_sources); -unlock: - rw_lock_x_unlock(&purge_sys.latch); - break; - case PURGE_STATE_STOP: - ut_ad(srv_n_purge_threads > 0); - ++purge_sys.n_stop; - if (!purge_sys.running) { - goto unlock; - } - ib::info() << "Waiting for purge to stop"; - do { - rw_lock_x_unlock(&purge_sys.latch); - os_thread_sleep(10000); - rw_lock_x_lock(&purge_sys.latch); - } while (purge_sys.running); - goto unlock; - case PURGE_STATE_RUN: - ut_ad(srv_n_purge_threads > 0); - ++purge_sys.n_stop; - ib::info() << "Stopping purge"; + int32_t paused= my_atomic_add32_explicit(&m_paused, -1, + MY_MEMORY_ORDER_RELAXED); + ut_a(paused); - /* We need to wakeup the purge thread in case it is suspended, - so that it can acknowledge the state change. */ - - const int64_t sig_count = os_event_reset(purge_sys.event); - purge_sys.state = PURGE_STATE_STOP; - srv_purge_wakeup(); - rw_lock_x_unlock(&purge_sys.latch); - /* Wait for purge coordinator to signal that it - is suspended. */ - os_event_wait_low(purge_sys.event, sig_count); - } - - MONITOR_INC_VALUE(MONITOR_PURGE_STOP_COUNT, 1); -} - -/*******************************************************************//** -Resume purge, move to PURGE_STATE_RUN. */ -void -trx_purge_run(void) -/*===============*/ -{ - rw_lock_x_lock(&purge_sys.latch); - - switch (purge_sys.state) { - case PURGE_STATE_EXIT: - /* Shutdown must have been initiated during - FLUSH TABLES FOR EXPORT. */ - ut_ad(!srv_undo_sources); - break; - case PURGE_STATE_INIT: - case PURGE_STATE_DISABLED: - ut_error; - - case PURGE_STATE_RUN: - ut_a(!purge_sys.n_stop); - break; - case PURGE_STATE_STOP: - ut_a(purge_sys.n_stop); - if (--purge_sys.n_stop == 0) { - - ib::info() << "Resuming purge"; - - purge_sys.state = PURGE_STATE_RUN; - } - - MONITOR_INC_VALUE(MONITOR_PURGE_RESUME_COUNT, 1); - } - - rw_lock_x_unlock(&purge_sys.latch); - - srv_purge_wakeup(); + if (paused == 1) + { + ib::info() << "Resuming purge"; + srv_purge_wakeup(); + MONITOR_ATOMIC_INC(MONITOR_PURGE_RESUME_COUNT); + } } diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc index 31d9ce096a2..90681f20cab 100644 --- a/storage/innobase/trx/trx0rec.cc +++ b/storage/innobase/trx/trx0rec.cc @@ -52,30 +52,54 @@ const dtuple_t trx_undo_default_rec = { /*=========== UNDO LOG RECORD CREATION AND DECODING ====================*/ -/**********************************************************************//** -Writes the mtr log entry of the inserted undo log record on the undo log -page. */ -UNIV_INLINE -void -trx_undof_page_add_undo_rec_log( -/*============================*/ - page_t* undo_page, /*!< in: undo log page */ - ulint old_free, /*!< in: start offset of the inserted entry */ - ulint new_free, /*!< in: end offset of the entry */ - mtr_t* mtr) /*!< in: mtr */ +/** Write redo log of writing an undo log record. +@param[in] undo_block undo log page +@param[in] old_free start offset of the undo log record +@param[in] new_free end offset of the undo log record +@param[in,out] mtr mini-transaction */ +static void trx_undof_page_add_undo_rec_log(const buf_block_t* undo_block, + ulint old_free, ulint new_free, + mtr_t* mtr) { ut_ad(old_free >= TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE); ut_ad(new_free >= old_free); - ut_ad(new_free < UNIV_PAGE_SIZE); - ut_ad(mach_read_from_2(undo_page - + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE) + ut_ad(new_free < srv_page_size); + ut_ad(mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + + undo_block->frame) == new_free); - mlog_write_ulint(undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE, - new_free, MLOG_2BYTES, mtr); - mlog_log_string(undo_page + old_free, new_free - old_free, mtr); + mtr->set_modified(); + switch (mtr->get_log_mode()) { + case MTR_LOG_NONE: + case MTR_LOG_NO_REDO: + return; + case MTR_LOG_SHORT_INSERTS: + ut_ad(0); + /* fall through */ + case MTR_LOG_ALL: + break; + } + + const uint32_t + len = uint32_t(new_free - old_free - 4), + reserved = std::min(11 + 13 + len, + mtr->get_log()->MAX_DATA_SIZE); + byte* log_ptr = mtr->get_log()->open(reserved); + const byte* log_end = log_ptr + reserved; + log_ptr = mlog_write_initial_log_record_low( + MLOG_UNDO_INSERT, + undo_block->page.id.space(), undo_block->page.id.page_no(), + log_ptr, mtr); + mach_write_to_2(log_ptr, len); + if (log_ptr + 2 + len <= log_end) { + memcpy(log_ptr + 2, undo_block->frame + old_free + 2, len); + mlog_close(mtr, log_ptr + 2 + len); + } else { + mlog_close(mtr, log_ptr + 2); + mtr->get_log()->push(undo_block->frame + old_free + 2, len); + } } -/** Parse MLOG_UNDO_INSERT for crash-upgrade from MariaDB 10.2. +/** Parse MLOG_UNDO_INSERT. @param[in] ptr log record @param[in] end_ptr end of log record buffer @param[in,out] page page or NULL @@ -118,20 +142,16 @@ trx_undo_parse_add_undo_rec( return(const_cast(ptr + len)); } -/**********************************************************************//** -Calculates the free space left for extending an undo log record. +/** Calculate the free space left for extending an undo log record. +@param[in] undo_block undo log page +@param[in] ptr current end of the undo page @return bytes left */ -UNIV_INLINE -ulint -trx_undo_left( -/*==========*/ - const page_t* page, /*!< in: undo log page */ - const byte* ptr) /*!< in: pointer to page */ +static ulint trx_undo_left(const buf_block_t* undo_block, const byte* ptr) { - /* The '- 10' is a safety margin, in case we have some small + /* The 10 is a safety margin, in case we have some small calculation error below */ - - return(UNIV_PAGE_SIZE - (ptr - page) - 10 - FIL_PAGE_DATA_END); + return srv_page_size - ulint(ptr - undo_block->frame) + - (10 + FIL_PAGE_DATA_END); } /**********************************************************************//** @@ -143,7 +163,7 @@ static ulint trx_undo_page_set_next_prev_and_add( /*================================*/ - page_t* undo_page, /*!< in/out: undo log page */ + buf_block_t* undo_block, /*!< in/out: undo log page */ byte* ptr, /*!< in: ptr up to where data has been written on this undo page. */ mtr_t* mtr) /*!< in: mtr */ @@ -155,15 +175,15 @@ trx_undo_page_set_next_prev_and_add( that points to the next free offset value within undo_page.*/ - ut_ad(ptr > undo_page); - ut_ad(ptr < undo_page + UNIV_PAGE_SIZE); - - if (UNIV_UNLIKELY(trx_undo_left(undo_page, ptr) < 2)) { + ut_ad(ptr > undo_block->frame); + ut_ad(ptr < undo_block->frame + srv_page_size); + if (UNIV_UNLIKELY(trx_undo_left(undo_block, ptr) < 2)) { return(0); } - ptr_to_first_free = undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE; + ptr_to_first_free = TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + + undo_block->frame; first_free = mach_read_from_2(ptr_to_first_free); @@ -171,16 +191,16 @@ trx_undo_page_set_next_prev_and_add( mach_write_to_2(ptr, first_free); ptr += 2; - end_of_rec = ptr - undo_page; + end_of_rec = ulint(ptr - undo_block->frame); /* Write offset of the next undo log record */ - mach_write_to_2(undo_page + first_free, end_of_rec); + mach_write_to_2(undo_block->frame + first_free, end_of_rec); /* Update the offset to first free undo record */ mach_write_to_2(ptr_to_first_free, end_of_rec); /* Write this log entry to the UNDO log */ - trx_undof_page_add_undo_rec_log(undo_page, first_free, + trx_undof_page_add_undo_rec_log(undo_block, first_free, end_of_rec, mtr); return(first_free); @@ -192,7 +212,7 @@ static const ulint VIRTUAL_COL_UNDO_FORMAT_1 = 0xF1; /** Write virtual column index info (index id and column position in index) to the undo log -@param[in,out] undo_page undo log page +@param[in,out] undo_block undo log page @param[in] table the table @param[in] pos the virtual column position @param[in] ptr undo log record being written @@ -202,7 +222,7 @@ to the undo log static byte* trx_undo_log_v_idx( - page_t* undo_page, + buf_block_t* undo_block, const dict_table_t* table, ulint pos, byte* ptr, @@ -221,7 +241,7 @@ trx_undo_log_v_idx( 1 byte for undo log record format version marker */ ulint size = n_idx * (5 + 5) + 5 + 2 + (first_v_col ? 1 : 0); - if (trx_undo_left(undo_page, ptr) < size) { + if (trx_undo_left(undo_block, ptr) < size) { return(NULL); } @@ -250,7 +270,7 @@ trx_undo_log_v_idx( ptr += mach_write_compressed(ptr, v_index.nth_field); } - mach_write_to_2(old_ptr, ptr - old_ptr); + mach_write_to_2(old_ptr, ulint(ptr - old_ptr)); return(ptr); } @@ -294,7 +314,7 @@ trx_undo_read_v_idx_low( if (index->id == id) { const dict_col_t* col = dict_index_get_nth_col( index, pos); - ut_ad(dict_col_is_virtual(col)); + ut_ad(col->is_virtual()); const dict_v_col_t* vcol = reinterpret_cast< const dict_v_col_t*>(col); *col_pos = vcol->v_pos; @@ -351,7 +371,7 @@ trx_undo_read_v_idx( } /** Reports in the undo log of an insert of virtual columns. -@param[in] undo_page undo log page +@param[in] undo_block undo log page @param[in] table the table @param[in] row dtuple contains the virtual columns @param[in,out] ptr log ptr @@ -359,7 +379,7 @@ trx_undo_read_v_idx( static bool trx_undo_report_insert_virtual( - page_t* undo_page, + buf_block_t* undo_block, dict_table_t* table, const dtuple_t* row, byte** ptr) @@ -367,7 +387,7 @@ trx_undo_report_insert_virtual( byte* start = *ptr; bool first_v_col = true; - if (trx_undo_left(undo_page, *ptr) < 2) { + if (trx_undo_left(undo_block, *ptr) < 2) { return(false); } @@ -386,7 +406,7 @@ trx_undo_report_insert_virtual( if (col->m_col.ord_part) { /* make sure enought space to write the length */ - if (trx_undo_left(undo_page, *ptr) < 5) { + if (trx_undo_left(undo_block, *ptr) < 5) { return(false); } @@ -394,7 +414,7 @@ trx_undo_report_insert_virtual( pos += REC_MAX_N_FIELDS; *ptr += mach_write_compressed(*ptr, pos); - *ptr = trx_undo_log_v_idx(undo_page, table, + *ptr = trx_undo_log_v_idx(undo_block, table, col_no, *ptr, first_v_col); first_v_col = false; @@ -414,8 +434,8 @@ trx_undo_report_insert_virtual( flen = max_len; } - if (trx_undo_left(undo_page, *ptr) < flen + 5) { - + if (trx_undo_left(undo_block, *ptr) + < flen + 5) { return(false); } *ptr += mach_write_compressed(*ptr, flen); @@ -423,8 +443,7 @@ trx_undo_report_insert_virtual( ut_memcpy(*ptr, vfield->data, flen); *ptr += flen; } else { - if (trx_undo_left(undo_page, *ptr) < 5) { - + if (trx_undo_left(undo_block, *ptr) < 5) { return(false); } @@ -434,7 +453,7 @@ trx_undo_report_insert_virtual( } /* Always mark the end of the log with 2 bytes length field */ - mach_write_to_2(start, *ptr - start); + mach_write_to_2(start, ulint(*ptr - start)); return(true); } @@ -446,7 +465,7 @@ static ulint trx_undo_page_report_insert( /*========================*/ - page_t* undo_page, /*!< in: undo log page */ + buf_block_t* undo_block, /*!< in: undo log page */ trx_t* trx, /*!< in: transaction */ dict_index_t* index, /*!< in: clustered index */ const dtuple_t* clust_entry, /*!< in: index entry which will be @@ -463,18 +482,16 @@ trx_undo_page_report_insert( TRX_UNDO_INSERT == 1 into insert_undo pages, or TRX_UNDO_UPDATE == 2 into update_undo pages. */ ut_ad(mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE - + undo_page) <= 2); + + undo_block->frame) <= 2); - first_free = mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR - + TRX_UNDO_PAGE_FREE); - ptr = undo_page + first_free; + first_free = mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + + undo_block->frame); + ptr = undo_block->frame + first_free; - ut_ad(first_free <= UNIV_PAGE_SIZE); - - if (trx_undo_left(undo_page, ptr) < 2 + 1 + 11 + 11) { + ut_ad(first_free <= srv_page_size); + if (trx_undo_left(undo_block, ptr) < 2 + 1 + 11 + 11) { /* Not enough space for writing the general parameters */ - return(0); } @@ -488,11 +505,12 @@ trx_undo_page_report_insert( /*----------------------------------------*/ /* Store then the fields required to uniquely determine the record to be inserted in the clustered index */ - if (UNIV_UNLIKELY(clust_entry->info_bits)) { + if (UNIV_UNLIKELY(clust_entry->info_bits != 0)) { ut_ad(clust_entry->info_bits == REC_INFO_DEFAULT_ROW); ut_ad(index->is_instant()); - ut_ad(undo_page[first_free + 2] == TRX_UNDO_INSERT_REC); - undo_page[first_free + 2] = TRX_UNDO_INSERT_DEFAULT; + ut_ad(undo_block->frame[first_free + 2] + == TRX_UNDO_INSERT_REC); + undo_block->frame[first_free + 2] = TRX_UNDO_INSERT_DEFAULT; goto done; } @@ -501,7 +519,7 @@ trx_undo_page_report_insert( const dfield_t* field = dtuple_get_nth_field(clust_entry, i); ulint flen = dfield_get_len(field); - if (trx_undo_left(undo_page, ptr) < 5) { + if (trx_undo_left(undo_block, ptr) < 5) { return(0); } @@ -509,7 +527,7 @@ trx_undo_page_report_insert( ptr += mach_write_compressed(ptr, flen); if (flen != UNIV_SQL_NULL) { - if (trx_undo_left(undo_page, ptr) < flen) { + if (trx_undo_left(undo_block, ptr) < flen) { return(0); } @@ -521,13 +539,13 @@ trx_undo_page_report_insert( if (index->table->n_v_cols) { if (!trx_undo_report_insert_virtual( - undo_page, index->table, clust_entry, &ptr)) { + undo_block, index->table, clust_entry, &ptr)) { return(0); } } done: - return(trx_undo_page_set_next_prev_and_add(undo_page, ptr, mtr)); + return(trx_undo_page_set_next_prev_and_add(undo_block, ptr, mtr)); } /**********************************************************************//** @@ -764,7 +782,7 @@ trx_undo_page_report_modify_ext( } /* Encode spatial status into length. */ - spatial_len |= spatial_status << SPATIAL_STATUS_SHIFT; + spatial_len |= ulint(spatial_status) << SPATIAL_STATUS_SHIFT; if (spatial_status == SPATIAL_ONLY) { /* If the column is only used by gis index, log its @@ -843,7 +861,7 @@ static ulint trx_undo_page_report_modify( /*========================*/ - page_t* undo_page, /*!< in: undo log page */ + buf_block_t* undo_block, /*!< in: undo log page */ trx_t* trx, /*!< in: transaction */ dict_index_t* index, /*!< in: clustered index where update or delete marking is done */ @@ -859,9 +877,34 @@ trx_undo_page_report_modify( virtual column info */ mtr_t* mtr) /*!< in: mtr */ { - dict_table_t* table = index->table; ulint first_free; byte* ptr; + + ut_ad(index->is_primary()); + ut_ad(rec_offs_validate(rec, index, offsets)); + /* MariaDB 10.3.1+ in trx_undo_page_init() always initializes + TRX_UNDO_PAGE_TYPE as 0, but previous versions wrote + TRX_UNDO_INSERT == 1 into insert_undo pages, + or TRX_UNDO_UPDATE == 2 into update_undo pages. */ + ut_ad(mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE + + undo_block->frame) <= 2); + + first_free = mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + + undo_block->frame); + ptr = undo_block->frame + first_free; + + ut_ad(first_free <= srv_page_size); + + if (trx_undo_left(undo_block, ptr) < 50) { + /* NOTE: the value 50 must be big enough so that the general + fields written below fit on the undo log page */ + return 0; + } + + /* Reserve 2 bytes for the pointer to the next undo log record */ + ptr += 2; + + dict_table_t* table = index->table; const byte* field; ulint flen; ulint col_no; @@ -874,32 +917,6 @@ trx_undo_page_report_modify( + BTR_EXTERN_FIELD_REF_SIZE]; bool first_v_col = true; - ut_a(dict_index_is_clust(index)); - ut_ad(rec_offs_validate(rec, index, offsets)); - /* MariaDB 10.3.1+ in trx_undo_page_init() always initializes - TRX_UNDO_PAGE_TYPE as 0, but previous versions wrote - TRX_UNDO_INSERT == 1 into insert_undo pages, - or TRX_UNDO_UPDATE == 2 into update_undo pages. */ - ut_ad(mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE - + undo_page) <= 2); - - first_free = mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR - + TRX_UNDO_PAGE_FREE); - ptr = undo_page + first_free; - - ut_ad(first_free <= UNIV_PAGE_SIZE); - - if (trx_undo_left(undo_page, ptr) < 50) { - - /* NOTE: the value 50 must be big enough so that the general - fields written below fit on the undo log page */ - - return(0); - } - - /* Reserve 2 bytes for the pointer to the next undo log record */ - ptr += 2; - /* Store first some general parameters to the undo log */ if (!update) { @@ -945,8 +962,8 @@ trx_undo_page_report_modify( allowed to ignore blob prefixes if the delete marking was done by some other trx as it must have committed by now for us to allow an over-write. */ - if (ignore_prefix) { - ignore_prefix = (trx_id != trx->id); + if (trx_id == trx->id) { + ignore_prefix = false; } ptr += mach_u64_write_compressed(ptr, trx_id); @@ -972,16 +989,14 @@ trx_undo_page_report_modify( ut_ad(!rec_offs_nth_extern(offsets, i)); ut_ad(dict_index_get_nth_col(index, i)->ord_part); - if (trx_undo_left(undo_page, ptr) < 5) { - + if (trx_undo_left(undo_block, ptr) < 5) { return(0); } ptr += mach_write_compressed(ptr, flen); if (flen != UNIV_SQL_NULL) { - if (trx_undo_left(undo_page, ptr) < flen) { - + if (trx_undo_left(undo_block, ptr) < flen) { return(0); } @@ -994,8 +1009,7 @@ trx_undo_page_report_modify( /* Save to the undo log the old values of the columns to be updated. */ if (update) { - if (trx_undo_left(undo_page, ptr) < 5) { - + if (trx_undo_left(undo_block, ptr) < 5) { return(0); } @@ -1033,8 +1047,7 @@ trx_undo_page_report_modify( ulint pos = fld->field_no; /* Write field number to undo log */ - if (trx_undo_left(undo_page, ptr) < 5) { - + if (trx_undo_left(undo_block, ptr) < 5) { return(0); } @@ -1058,7 +1071,7 @@ trx_undo_page_report_modify( if (is_virtual) { ut_ad(fld->field_no < table->n_v_def); - ptr = trx_undo_log_v_idx(undo_page, table, + ptr = trx_undo_log_v_idx(undo_block, table, fld->field_no, ptr, first_v_col); if (ptr == NULL) { @@ -1085,8 +1098,7 @@ trx_undo_page_report_modify( rec, index, offsets, pos, &flen); } - if (trx_undo_left(undo_page, ptr) < 15) { - + if (trx_undo_left(undo_block, ptr) < 15) { return(0); } @@ -1115,8 +1127,7 @@ trx_undo_page_report_modify( } if (flen != UNIV_SQL_NULL) { - if (trx_undo_left(undo_page, ptr) < flen) { - + if (trx_undo_left(undo_block, ptr) < flen) { return(0); } @@ -1133,16 +1144,15 @@ trx_undo_page_report_modify( flen, max_v_log_len); } - if (trx_undo_left(undo_page, ptr) < 15) { - + if (trx_undo_left(undo_block, ptr) < 15) { return(0); } ptr += mach_write_compressed(ptr, flen); if (flen != UNIV_SQL_NULL) { - if (trx_undo_left(undo_page, ptr) < flen) { - + if (trx_undo_left(undo_block, ptr) + < flen) { return(0); } @@ -1176,8 +1186,7 @@ trx_undo_page_report_modify( double mbr[SPDIMS * 2]; mem_heap_t* row_heap = NULL; - if (trx_undo_left(undo_page, ptr) < 5) { - + if (trx_undo_left(undo_block, ptr) < 5) { return(0); } @@ -1243,8 +1252,7 @@ trx_undo_page_report_modify( if (true) { /* Write field number to undo log */ - if (trx_undo_left(undo_page, ptr) < 5 + 15) { - + if (trx_undo_left(undo_block, ptr) < 5 + 15) { return(0); } @@ -1292,9 +1300,8 @@ trx_undo_page_report_modify( if (flen != UNIV_SQL_NULL && spatial_status != SPATIAL_ONLY) { - if (trx_undo_left(undo_page, ptr) + if (trx_undo_left(undo_block, ptr) < flen) { - return(0); } @@ -1303,7 +1310,7 @@ trx_undo_page_report_modify( } if (spatial_status != SPATIAL_NONE) { - if (trx_undo_left(undo_page, ptr) + if (trx_undo_left(undo_block, ptr) < DATA_MBR_LEN) { return(0); } @@ -1336,8 +1343,7 @@ already_logged: /* Write field number to undo log. Make sure there is enought space in log */ - if (trx_undo_left(undo_page, ptr) < 5) { - + if (trx_undo_left(undo_block, ptr) < 5) { return(0); } @@ -1345,7 +1351,7 @@ already_logged: ptr += mach_write_compressed(ptr, pos); ut_ad(col_no < table->n_v_def); - ptr = trx_undo_log_v_idx(undo_page, table, + ptr = trx_undo_log_v_idx(undo_block, table, col_no, ptr, first_v_col); first_v_col = false; @@ -1385,9 +1391,8 @@ already_logged: ptr += mach_write_compressed(ptr, flen); if (flen != UNIV_SQL_NULL) { - if (trx_undo_left(undo_page, ptr) + if (trx_undo_left(undo_block, ptr) < flen) { - return(0); } @@ -1397,7 +1402,7 @@ already_logged: } } - mach_write_to_2(old_ptr, ptr - old_ptr); + mach_write_to_2(old_ptr, ulint(ptr - old_ptr)); if (row_heap) { mem_heap_free(row_heap); @@ -1406,22 +1411,20 @@ already_logged: /*----------------------------------------*/ /* Write pointers to the previous and the next undo log records */ - if (trx_undo_left(undo_page, ptr) < 2) { - + if (trx_undo_left(undo_block, ptr) < 2) { return(0); } mach_write_to_2(ptr, first_free); ptr += 2; - mach_write_to_2(undo_page + first_free, ptr - undo_page); + const ulint new_free = ulint(ptr - undo_block->frame); + mach_write_to_2(undo_block->frame + first_free, new_free); - mach_write_to_2(undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE, - ptr - undo_page); + mach_write_to_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + + undo_block->frame, new_free); /* Write to the REDO log about this change in the UNDO log */ - - trx_undof_page_add_undo_rec_log(undo_page, first_free, - ptr - undo_page, mtr); + trx_undof_page_add_undo_rec_log(undo_block, first_free, new_free, mtr); return(first_free); } @@ -1837,7 +1840,7 @@ trx_undo_erase_page_end(page_t* undo_page) first_free = mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE); memset(undo_page + first_free, 0, - (UNIV_PAGE_SIZE - FIL_PAGE_DATA_END) - first_free); + (srv_page_size - FIL_PAGE_DATA_END) - first_free); return(first_free != TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE); } @@ -1858,7 +1861,7 @@ trx_undo_page_report_rename(trx_t* trx, const dict_table_t* table, + block->frame; ulint first_free = mach_read_from_2(ptr_first_free); ut_ad(first_free >= TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE); - ut_ad(first_free <= UNIV_PAGE_SIZE); + ut_ad(first_free <= srv_page_size); byte* start = block->frame + first_free; size_t len = strlen(table->name.m_name); const size_t fixed = 2 + 1 + 11 + 11 + 2; @@ -1868,7 +1871,7 @@ trx_undo_page_report_rename(trx_t* trx, const dict_table_t* table, + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE < UNIV_PAGE_SIZE_MIN - 10 - FIL_PAGE_DATA_END); - if (trx_undo_left(block->frame, start) < fixed + len) { + if (trx_undo_left(block, start) < fixed + len) { ut_ad(first_free > TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE); return 0; @@ -1886,7 +1889,7 @@ trx_undo_page_report_rename(trx_t* trx, const dict_table_t* table, mach_write_to_2(start, offset); mach_write_to_2(ptr_first_free, offset); - trx_undof_page_add_undo_rec_log(block->frame, first_free, offset, mtr); + trx_undof_page_add_undo_rec_log(block, first_free, offset, mtr); return first_free; } @@ -1904,7 +1907,6 @@ trx_undo_report_rename(trx_t* trx, const dict_table_t* table) mtr_t mtr; dberr_t err; mtr.start(); - mutex_enter(&trx->undo_mutex); if (buf_block_t* block = trx_undo_assign(trx, &err, &mtr)) { trx_undo_t* undo = trx->rsegs.m_redo.undo; ut_ad(err == DB_SUCCESS); @@ -1916,20 +1918,18 @@ trx_undo_report_rename(trx_t* trx, const dict_table_t* table) if (ulint offset = trx_undo_page_report_rename( trx, table, block, &mtr)) { undo->withdraw_clock = buf_withdraw_clock; - undo->empty = FALSE; undo->top_page_no = undo->last_page_no; undo->top_offset = offset; undo->top_undo_no = trx->undo_no++; undo->guess_block = block; + ut_ad(!undo->empty()); - trx->undo_rseg_space - = trx->rsegs.m_redo.rseg->space->id; err = DB_SUCCESS; break; } else { mtr.commit(); mtr.start(); - block = trx_undo_add_page(trx, undo, &mtr); + block = trx_undo_add_page(undo, &mtr); if (!block) { err = DB_OUT_OF_FILE_SPACE; break; @@ -1940,7 +1940,6 @@ trx_undo_report_rename(trx_t* trx, const dict_table_t* table) mtr.commit(); } - mutex_exit(&trx->undo_mutex); return err; } @@ -2007,30 +2006,28 @@ trx_undo_report_row_operation( rseg = trx->rsegs.m_redo.rseg; } - mutex_enter(&trx->undo_mutex); dberr_t err; buf_block_t* undo_block = trx_undo_assign_low(trx, rseg, pundo, &err, &mtr); trx_undo_t* undo = *pundo; ut_ad((err == DB_SUCCESS) == (undo_block != NULL)); - if (undo_block == NULL) { + if (UNIV_UNLIKELY(undo_block == NULL)) { goto err_exit; } ut_ad(undo != NULL); do { - page_t* undo_page = buf_block_get_frame(undo_block); ulint offset = !rec ? trx_undo_page_report_insert( - undo_page, trx, index, clust_entry, &mtr) + undo_block, trx, index, clust_entry, &mtr) : trx_undo_page_report_modify( - undo_page, trx, index, rec, offsets, update, + undo_block, trx, index, rec, offsets, update, cmpl_info, clust_entry, &mtr); if (UNIV_UNLIKELY(offset == 0)) { - if (!trx_undo_erase_page_end(undo_page)) { + if (!trx_undo_erase_page_end(undo_block->frame)) { /* The record did not fit on an empty undo page. Discard the freshly allocated page and return an error. */ @@ -2064,15 +2061,11 @@ trx_undo_report_row_operation( undo->withdraw_clock = buf_withdraw_clock; mtr_commit(&mtr); - undo->empty = FALSE; undo->top_page_no = undo_block->page.id.page_no(); undo->top_offset = offset; undo->top_undo_no = trx->undo_no++; undo->guess_block = undo_block; - - trx->undo_rseg_space = rseg->space->id; - - mutex_exit(&trx->undo_mutex); + ut_ad(!undo->empty()); if (!is_temp) { const undo_no_t limit = undo->top_undo_no; @@ -2111,11 +2104,11 @@ trx_undo_report_row_operation( mtr.set_log_mode(MTR_LOG_NO_REDO); } - undo_block = trx_undo_add_page(trx, undo, &mtr); + undo_block = trx_undo_add_page(undo, &mtr); DBUG_EXECUTE_IF("ib_err_ins_undo_page_add_failure", undo_block = NULL;); - } while (undo_block != NULL); + } while (UNIV_LIKELY(undo_block != NULL)); ib_errf(trx->mysql_thd, IB_LOG_LEVEL_ERROR, DB_OUT_OF_FILE_SPACE, @@ -2131,7 +2124,6 @@ trx_undo_report_row_operation( err = DB_OUT_OF_FILE_SPACE; err_exit: - mutex_exit(&trx->undo_mutex); mtr_commit(&mtr); return(err); } @@ -2140,13 +2132,11 @@ err_exit: /** Copy an undo record to heap. @param[in] roll_ptr roll pointer to a record that exists -@param[in] is_temp whether this is a temporary table @param[in,out] heap memory heap where copied */ static trx_undo_rec_t* trx_undo_get_undo_rec_low( roll_ptr_t roll_ptr, - bool is_temp, mem_heap_t* heap) { trx_undo_rec_t* undo_rec; @@ -2162,10 +2152,8 @@ trx_undo_get_undo_rec_low( &offset); ut_ad(page_no > FSP_FIRST_INODE_PAGE_NO); ut_ad(offset >= TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE); - rseg = is_temp - ? trx_sys.temp_rsegs[rseg_id] - : trx_sys.rseg_array[rseg_id]; - ut_ad(is_temp == !rseg->is_persistent()); + rseg = trx_sys.rseg_array[rseg_id]; + ut_ad(rseg->is_persistent()); mtr_start(&mtr); @@ -2181,7 +2169,6 @@ trx_undo_get_undo_rec_low( /** Copy an undo record to heap. @param[in] roll_ptr roll pointer to record -@param[in] is_temp whether this is a temporary table @param[in,out] heap memory heap where copied @param[in] trx_id id of the trx that generated the roll pointer: it points to an @@ -2196,7 +2183,6 @@ static MY_ATTRIBUTE((warn_unused_result)) bool trx_undo_get_undo_rec( roll_ptr_t roll_ptr, - bool is_temp, mem_heap_t* heap, trx_id_t trx_id, const table_name_t& name, @@ -2208,7 +2194,7 @@ trx_undo_get_undo_rec( missing_history = purge_sys.view.changes_visible(trx_id, name); if (!missing_history) { - *undo_rec = trx_undo_get_undo_rec_low(roll_ptr, is_temp, heap); + *undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap); } rw_lock_s_unlock(&purge_sys.latch); @@ -2273,12 +2259,13 @@ trx_undo_prev_version_build( bool dummy_extern; byte* buf; + ut_ad(!index->table->is_temporary()); ut_ad(!rw_lock_own(&purge_sys.latch, RW_LOCK_S)); ut_ad(mtr_memo_contains_page_flagged(index_mtr, index_rec, MTR_MEMO_PAGE_S_FIX | MTR_MEMO_PAGE_X_FIX)); ut_ad(rec_offs_validate(rec, index, offsets)); - ut_a(dict_index_is_clust(index)); + ut_a(index->is_primary()); roll_ptr = row_get_rec_roll_ptr(rec, index, offsets); @@ -2289,19 +2276,16 @@ trx_undo_prev_version_build( return(true); } - const bool is_temp = dict_table_is_temporary(index->table); rec_trx_id = row_get_rec_trx_id(rec, index, offsets); ut_ad(!index->table->skip_alter_undo); if (trx_undo_get_undo_rec( - roll_ptr, is_temp, heap, rec_trx_id, index->table->name, + roll_ptr, heap, rec_trx_id, index->table->name, &undo_rec)) { if (v_status & TRX_UNDO_PREV_IN_PURGE) { /* We are fetching the record being purged */ - ut_ad(!is_temp); - undo_rec = trx_undo_get_undo_rec_low( - roll_ptr, is_temp, heap); + undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap); } else { /* The undo record may already have been purged, during purge or semi-consistent read. */ diff --git a/storage/innobase/trx/trx0roll.cc b/storage/innobase/trx/trx0roll.cc index 10f2288635e..c0864e40622 100644 --- a/storage/innobase/trx/trx0roll.cc +++ b/storage/innobase/trx/trx0roll.cc @@ -635,8 +635,6 @@ trx_rollback_active( que_fork_t* fork; que_thr_t* thr; roll_node_t* roll_node; - dict_table_t* table; - ibool dictionary_locked = FALSE; const trx_id_t trx_id = trx->id; ut_ad(trx_id); @@ -659,9 +657,11 @@ trx_rollback_active( trx_roll_crash_recv_trx = trx; - if (trx_get_dict_operation(trx) != TRX_DICT_OP_NONE) { + const bool dictionary_locked = trx_get_dict_operation(trx) + != TRX_DICT_OP_NONE; + + if (dictionary_locked) { row_mysql_lock_data_dictionary(trx); - dictionary_locked = TRUE; } que_run_threads(thr); @@ -679,27 +679,16 @@ trx_rollback_active( ut_a(trx->lock.que_state == TRX_QUE_RUNNING); - if (trx_get_dict_operation(trx) != TRX_DICT_OP_NONE - && trx->table_id != 0) { + if (!dictionary_locked || !trx->table_id) { + } else if (dict_table_t* table = dict_table_open_on_id( + trx->table_id, TRUE, DICT_TABLE_OP_NORMAL)) { + ib::info() << "Dropping table " << table->name + << ", with id " << trx->table_id + << " in recovery"; - ut_ad(dictionary_locked); + dict_table_close_and_drop(trx, table); - /* If the transaction was for a dictionary operation, - we drop the relevant table only if it is not flagged - as DISCARDED. If it still exists. */ - - table = dict_table_open_on_id( - trx->table_id, TRUE, DICT_TABLE_OP_NORMAL); - - if (table && !dict_table_is_discarded(table)) { - ib::warn() << "Dropping table '" << table->name - << "', with id " << trx->table_id - << " in recovery"; - - dict_table_close_and_drop(trx, table); - - trx_commit_for_mysql(trx); - } + trx_commit_for_mysql(trx); } ib::info() << "Rolled back recovered transaction " << trx_id; @@ -895,8 +884,6 @@ static void trx_roll_try_truncate(trx_t* trx) { - ut_ad(mutex_own(&trx->undo_mutex)); - trx->pages_undone = 0; undo_no_t undo_no = trx->undo_no; @@ -934,8 +921,6 @@ trx_roll_pop_top_rec( trx_undo_t* undo, /*!< in: undo log */ mtr_t* mtr) /*!< in: mtr */ { - ut_ad(mutex_own(&trx->undo_mutex)); - page_t* undo_page = trx_undo_page_get_s_latched( page_id_t(undo->rseg->space->id, undo->top_page_no), mtr); @@ -946,8 +931,8 @@ trx_roll_pop_top_rec( true, mtr); if (prev_rec == NULL) { - - undo->empty = TRUE; + undo->top_undo_no = IB_ID_MAX; + ut_ad(undo->empty()); } else { page_t* prev_rec_page = page_align(prev_rec); @@ -957,8 +942,9 @@ trx_roll_pop_top_rec( } undo->top_page_no = page_get_page_no(prev_rec_page); - undo->top_offset = prev_rec - prev_rec_page; + undo->top_offset = ulint(prev_rec - prev_rec_page); undo->top_undo_no = trx_undo_rec_get_undo_no(prev_rec); + ut_ad(!undo->empty()); } return(undo_page + offset); @@ -973,49 +959,55 @@ trx_roll_pop_top_rec( trx_undo_rec_t* trx_roll_pop_top_rec_of_trx(trx_t* trx, roll_ptr_t* roll_ptr, mem_heap_t* heap) { - mutex_enter(&trx->undo_mutex); - if (trx->pages_undone >= TRX_ROLL_TRUNC_THRESHOLD) { trx_roll_try_truncate(trx); } - trx_undo_t* undo; + trx_undo_t* undo = NULL; trx_undo_t* insert = trx->rsegs.m_redo.old_insert; trx_undo_t* update = trx->rsegs.m_redo.undo; trx_undo_t* temp = trx->rsegs.m_noredo.undo; const undo_no_t limit = trx->roll_limit; - ut_ad(!insert || !update || insert->empty || update->empty + ut_ad(!insert || !update || insert->empty() || update->empty() || insert->top_undo_no != update->top_undo_no); - ut_ad(!insert || !temp || insert->empty || temp->empty + ut_ad(!insert || !temp || insert->empty() || temp->empty() || insert->top_undo_no != temp->top_undo_no); - ut_ad(!update || !temp || update->empty || temp->empty + ut_ad(!update || !temp || update->empty() || temp->empty() || update->top_undo_no != temp->top_undo_no); if (UNIV_LIKELY_NULL(insert) - && !insert->empty && limit <= insert->top_undo_no) { - if (update && !update->empty - && update->top_undo_no > insert->top_undo_no) { + && !insert->empty() && limit <= insert->top_undo_no) { + undo = insert; + } + + if (update && !update->empty() && update->top_undo_no >= limit) { + if (!undo) { + undo = update; + } else if (undo->top_undo_no < update->top_undo_no) { undo = update; - } else { - undo = insert; } - } else if (update && !update->empty && limit <= update->top_undo_no) { - undo = update; - } else if (temp && !temp->empty && limit <= temp->top_undo_no) { - undo = temp; - } else { + } + + if (temp && !temp->empty() && temp->top_undo_no >= limit) { + if (!undo) { + undo = temp; + } else if (undo->top_undo_no < temp->top_undo_no) { + undo = temp; + } + } + + if (undo == NULL) { trx_roll_try_truncate(trx); /* Mark any ROLLBACK TO SAVEPOINT completed, so that if the transaction object is committed and reused later, we will default to a full ROLLBACK. */ trx->roll_limit = 0; trx->in_rollback = false; - mutex_exit(&trx->undo_mutex); return(NULL); } - ut_ad(!undo->empty); + ut_ad(!undo->empty()); ut_ad(limit <= undo->top_undo_no); *roll_ptr = trx_undo_build_roll_ptr( @@ -1047,12 +1039,7 @@ trx_roll_pop_top_rec_of_trx(trx_t* trx, roll_ptr_t* roll_ptr, mem_heap_t* heap) break; } - ut_ad(trx_roll_check_undo_rec_ordering( - undo_no, undo->rseg->space->id, trx)); - trx->undo_no = undo_no; - trx->undo_rseg_space = undo->rseg->space->id; - mutex_exit(&trx->undo_mutex); trx_undo_rec_t* undo_rec_copy = trx_undo_rec_copy(undo_rec, heap); mtr.commit(); diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc index d5058809703..81ad85dca7d 100644 --- a/storage/innobase/trx/trx0rseg.cc +++ b/storage/innobase/trx/trx0rseg.cc @@ -254,7 +254,7 @@ void trx_rseg_format_upgrade(trx_rsegf_t* rseg_header, mtr_t* mtr) /* Clear also possible garbage at the end of the page. Old InnoDB versions did not initialize unused parts of pages. */ byte* b = rseg_header + TRX_RSEG_MAX_TRX_ID + 8; - ulint len = UNIV_PAGE_SIZE + ulint len = srv_page_size - (FIL_PAGE_DATA_END + TRX_RSEG + TRX_RSEG_MAX_TRX_ID + 8); memset(b, 0, len); diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc index a2bbde6555b..b01595e313d 100644 --- a/storage/innobase/trx/trx0sys.cc +++ b/storage/innobase/trx/trx0sys.cc @@ -192,12 +192,12 @@ trx_sysf_create( compile_time_assert(256 >= TRX_SYS_N_RSEGS); memset(ptr, 0xff, 256 * TRX_SYS_RSEG_SLOT_SIZE); ptr += 256 * TRX_SYS_RSEG_SLOT_SIZE; - ut_a(ptr <= page + (UNIV_PAGE_SIZE - FIL_PAGE_DATA_END)); + ut_a(ptr <= page + (srv_page_size - FIL_PAGE_DATA_END)); /* Initialize all of the page. This part used to be uninitialized. */ - memset(ptr, 0, UNIV_PAGE_SIZE - FIL_PAGE_DATA_END + page - ptr); + memset(ptr, 0, srv_page_size - FIL_PAGE_DATA_END + size_t(page - ptr)); - mlog_log_string(TRX_SYS + page, UNIV_PAGE_SIZE - FIL_PAGE_DATA_END + mlog_log_string(TRX_SYS + page, srv_page_size - FIL_PAGE_DATA_END - TRX_SYS, mtr); /* Create the first rollback segment in the SYSTEM tablespace */ diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index 9b3e67a9043..11d83c96a92 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -225,7 +225,6 @@ struct TrxFactory { &trx_named_savept_t::trx_savepoints); mutex_create(LATCH_ID_TRX, &trx->mutex); - mutex_create(LATCH_ID_TRX_UNDO, &trx->undo_mutex); lock_trx_alloc_locks(trx); } @@ -252,7 +251,6 @@ struct TrxFactory { ut_free(trx->detailed_error); mutex_free(&trx->mutex); - mutex_free(&trx->undo_mutex); trx->mod_tables.~trx_mod_tables_t(); @@ -490,6 +488,12 @@ void trx_free(trx_t*& trx) ut_ad(trx->will_lock == 0); trx_pools->mem_free(trx); + /* Unpoison the memory for innodb_monitor_set_option; + it is operating also on the freed transaction objects. */ + MEM_UNDEFINED(&trx->mutex, sizeof trx->mutex); + /* Declare the contents as initialized for Valgrind; + we checked that it was initialized in trx_pools->mem_free(trx). */ + UNIV_MEM_VALID(&trx->mutex, sizeof trx->mutex); trx = NULL; } @@ -563,8 +567,7 @@ trx_resurrect_table_locks( trx_state_eq(trx, TRX_STATE_PREPARED)); ut_ad(undo->rseg == trx->rsegs.m_redo.rseg); - if (undo->empty) { - + if (undo->empty()) { return; } @@ -679,12 +682,7 @@ static void trx_resurrect(trx_undo_t *undo, trx_rseg_t *rseg, else trx->rsegs.m_redo.undo= undo; - if (!undo->empty) - { - trx->undo_no= undo->top_undo_no + 1; - trx->undo_rseg_space= undo->rseg->space->id; - } - + trx->undo_no= undo->top_undo_no + 1; trx->rsegs.m_redo.rseg= rseg; /* For transactions with active data will not have rseg size = 1 @@ -717,7 +715,6 @@ trx_lists_init_at_db_start() { ut_a(srv_is_being_started); ut_ad(!srv_was_started); - ut_ad(!purge_sys.is_initialised()); if (srv_operation == SRV_OPERATION_RESTORE) { /* mariabackup --prepare only deals with @@ -777,8 +774,7 @@ trx_lists_init_at_db_start() ut_ad(trx->rsegs.m_redo.rseg->trx_ref_count); trx->rsegs.m_redo.undo = undo; - if (!undo->empty - && undo->top_undo_no >= trx->undo_no) { + if (undo->top_undo_no >= trx->undo_no) { if (trx_state_eq(trx, TRX_STATE_ACTIVE)) { rows_to_undo -= trx->undo_no; @@ -787,7 +783,6 @@ trx_lists_init_at_db_start() } trx->undo_no = undo->top_undo_no + 1; - trx->undo_rseg_space = rseg->space->id; } trx_resurrect_table_locks(trx, undo); } @@ -1114,9 +1109,6 @@ trx_write_serialisation_history( undo log to the purge queue. */ trx_serialise(trx); - /* It is not necessary to acquire trx->undo_mutex here because - only a single OS thread is allowed to commit this transaction. - The undo logs will be processed and purged later. */ if (UNIV_LIKELY_NULL(old_insert)) { UT_LIST_REMOVE(rseg->old_insert_list, old_insert); trx_purge_add_undo_to_history(trx, old_insert, mtr); @@ -1746,7 +1738,6 @@ trx_mark_sql_stat_end( break; case TRX_STATE_NOT_STARTED: trx->undo_no = 0; - trx->undo_rseg_space = 0; /* fall through */ case TRX_STATE_ACTIVE: trx->last_sql_stat_start.least_undo_no = trx->undo_no; @@ -1969,10 +1960,6 @@ trx_prepare_low(trx_t* trx) mtr_t mtr; - /* It is not necessary to acquire trx->undo_mutex here because - only the owning (connection) thread of the transaction is - allowed to perform XA PREPARE. */ - if (trx_undo_t* undo = trx->rsegs.m_noredo.undo) { ut_ad(undo->rseg == trx->rsegs.m_noredo.rseg); @@ -2127,7 +2114,7 @@ int trx_recover_for_mysql(XID *xid_list, uint len) if (arg.count) ib::info() << arg.count << " transactions in prepared state after recovery"; - return(arg.count); + return int(arg.count); } diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc index f5984de02b9..18755d0457a 100644 --- a/storage/innobase/trx/trx0undo.cc +++ b/storage/innobase/trx/trx0undo.cc @@ -77,31 +77,25 @@ can still remove old versions from the bottom of the stack. */ ------------------------------------------------------------------- latches? ------- -The contention of the trx_sys_t::mutex should be minimized. When a transaction +The contention of the trx_sys.mutex should be minimized. When a transaction does its first insert or modify in an index, an undo log is assigned for it. Then we must have an x-latch to the rollback segment header. - When the transaction does more modifys or rolls back, the undo log is -protected with undo_mutex in the transaction. - When the transaction commits, its insert undo log is either reset and -cached for a fast reuse, or freed. In these cases we must have an x-latch on -the rollback segment page. The update undo log is put to the history list. If -it is not suitable for reuse, its slot in the rollback segment is reset. In -both cases, an x-latch must be acquired on the rollback segment. + When the transaction performs modifications or rolls back, its +undo log is protected by undo page latches. +Only the thread that is associated with the transaction may hold multiple +undo page latches at a time. Undo pages are always private to a single +transaction. Other threads that are performing MVCC reads +or checking for implicit locks will lock at most one undo page at a time +in trx_undo_get_undo_rec_low(). + When the transaction commits, its persistent undo log is added +to the history list. If it is not suitable for reuse, its slot is reset. +In both cases, an x-latch must be acquired on the rollback segment header page. The purge operation steps through the history list without modifying it until a truncate operation occurs, which can remove undo logs from the end of the list and release undo log segments. In stepping through the list, s-latches on the undo log pages are enough, but in a truncate, x-latches must be obtained on the rollback segment and individual pages. */ -/********************************************************************//** -Initializes the fields in an undo log segment page. */ -static -void -trx_undo_page_init( -/*===============*/ - page_t* undo_page, /*!< in: undo log segment page */ - mtr_t* mtr); /*!< in: mtr */ - /********************************************************************//** Creates and initializes an undo log memory object. @return own: the undo log memory object */ @@ -219,7 +213,7 @@ trx_undo_page_get_prev_rec(trx_undo_rec_t* rec, ulint page_no, ulint offset) page_t* undo_page; ulint start; - undo_page = (page_t*) ut_align_down(rec, UNIV_PAGE_SIZE); + undo_page = (page_t*) ut_align_down(rec, srv_page_size); start = trx_undo_page_get_start(undo_page, page_no, offset); @@ -381,26 +375,28 @@ trx_undo_get_first_rec( /*============== UNDO LOG FILE COPY CREATION AND FREEING ==================*/ -/** Parse MLOG_UNDO_INIT for crash-upgrade from MariaDB 10.2. +/** Parse MLOG_UNDO_INIT. @param[in] ptr log record @param[in] end_ptr end of log record buffer @param[in,out] page page or NULL -@param[in,out] mtr mini-transaction @return end of log record @retval NULL if the log record is incomplete */ byte* -trx_undo_parse_page_init( - const byte* ptr, - const byte* end_ptr, - page_t* page, - mtr_t* mtr) +trx_undo_parse_page_init(const byte* ptr, const byte* end_ptr, page_t* page) { - ulint type = mach_parse_compressed(&ptr, end_ptr); + if (end_ptr <= ptr) { + return NULL; + } - if (!ptr) { - } else if (type != 1 && type != 2) { + const ulint type = *ptr++; + + if (type > TRX_UNDO_UPDATE) { recv_sys->found_corrupt_log = true; } else if (page) { + /* Starting with MDEV-12288 in MariaDB 10.3.1, we use + type=0 for the combined insert/update undo log + pages. MariaDB 10.2 would use TRX_UNDO_INSERT or + TRX_UNDO_UPDATE. */ mach_write_to_2(FIL_PAGE_TYPE + page, FIL_PAGE_UNDO_LOG); mach_write_to_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE + page, type); @@ -417,14 +413,12 @@ trx_undo_parse_page_init( @param[in] ptr redo log record @param[in] end_ptr end of log buffer @param[in,out] page undo log page or NULL -@param[in,out] mtr mini-transaction @return end of log record or NULL */ byte* trx_undo_parse_page_header_reuse( const byte* ptr, const byte* end_ptr, - page_t* undo_page, - mtr_t* mtr) + page_t* undo_page) { trx_id_t trx_id = mach_u64_parse_compressed(&ptr, end_ptr); @@ -463,29 +457,39 @@ trx_undo_parse_page_header_reuse( return(const_cast(ptr)); } -/********************************************************************//** -Initializes the fields in an undo log segment page. */ -static -void -trx_undo_page_init( -/*===============*/ - page_t* undo_page, /*!< in: undo log segment page */ - mtr_t* mtr) /*!< in: mtr */ +/** Initialize the fields in an undo log segment page. +@param[in,out] undo_block undo page +@param[in,out] mtr mini-transaction */ +static void trx_undo_page_init(buf_block_t* undo_block, mtr_t* mtr) { - trx_upagef_t* page_hdr; + page_t* page = undo_block->frame; + mach_write_to_2(FIL_PAGE_TYPE + page, FIL_PAGE_UNDO_LOG); + mach_write_to_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE + page, 0); + mach_write_to_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_START + page, + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE); + mach_write_to_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + page, + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE); - mlog_write_ulint(undo_page + FIL_PAGE_TYPE, - FIL_PAGE_UNDO_LOG, MLOG_2BYTES, mtr); - compile_time_assert(TRX_UNDO_PAGE_TYPE == 0); - compile_time_assert(TRX_UNDO_PAGE_START == 2); - compile_time_assert(TRX_UNDO_PAGE_NODE == TRX_UNDO_PAGE_FREE + 2); + mtr->set_modified(); + switch (mtr->get_log_mode()) { + case MTR_LOG_NONE: + case MTR_LOG_NO_REDO: + return; + case MTR_LOG_SHORT_INSERTS: + ut_ad(0); + /* fall through */ + case MTR_LOG_ALL: + break; + } - page_hdr = undo_page + TRX_UNDO_PAGE_HDR; - mlog_write_ulint(page_hdr, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE, - MLOG_4BYTES, mtr); - mlog_write_ulint(page_hdr + TRX_UNDO_PAGE_FREE, - TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE, - MLOG_2BYTES, mtr); + byte* log_ptr = mtr->get_log()->open(11 + 1); + log_ptr = mlog_write_initial_log_record_low( + MLOG_UNDO_INIT, + undo_block->page.id.space(), + undo_block->page.id.page_no(), + log_ptr, mtr); + *log_ptr++ = 0; + mlog_close(mtr, log_ptr); } /** Create an undo log segment. @@ -537,7 +541,7 @@ trx_undo_seg_create(fil_space_t* space, trx_rsegf_t* rseg_hdr, ulint* id, buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE); - trx_undo_page_init(block->frame, mtr); + trx_undo_page_init(block, mtr); mlog_write_ulint(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + block->frame, TRX_UNDO_SEG_HDR + TRX_UNDO_SEG_HDR_SIZE, @@ -612,7 +616,7 @@ trx_undo_header_create( new_free = free + TRX_UNDO_LOG_OLD_HDR_SIZE; - ut_a(free + TRX_UNDO_LOG_XA_HDR_SIZE < UNIV_PAGE_SIZE - 100); + ut_a(free + TRX_UNDO_LOG_XA_HDR_SIZE < srv_page_size - 100); mach_write_to_2(page_hdr + TRX_UNDO_PAGE_START, new_free); @@ -758,16 +762,12 @@ trx_undo_parse_page_header( } /** Allocate an undo log page. -@param[in,out] trx transaction @param[in,out] undo undo log @param[in,out] mtr mini-transaction that does not hold any page latch @return X-latched block if success @retval NULL on failure */ -buf_block_t* -trx_undo_add_page(trx_t* trx, trx_undo_t* undo, mtr_t* mtr) +buf_block_t* trx_undo_add_page(trx_undo_t* undo, mtr_t* mtr) { - ut_ad(mutex_own(&trx->undo_mutex)); - trx_rseg_t* rseg = undo->rseg; buf_block_t* new_block = NULL; ulint n_reserved; @@ -802,7 +802,7 @@ trx_undo_add_page(trx_t* trx, trx_undo_t* undo, mtr_t* mtr) buf_block_dbg_add_level(new_block, SYNC_TRX_UNDO_PAGE); undo->last_page_no = new_block->page.id.page_no(); - trx_undo_page_init(new_block->frame, mtr); + trx_undo_page_init(new_block, mtr); flst_add_last(TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + header_page, @@ -929,7 +929,7 @@ function_exit: if (trunc_here) { mlog_write_ulint(undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE, - trunc_here - undo_page, + ulint(trunc_here - undo_page), MLOG_2BYTES, &mtr); } @@ -1150,13 +1150,14 @@ trx_undo_mem_create_at_db_start(trx_rseg_t* rseg, ulint id, ulint page_no, page_t* last_page = trx_undo_page_get( page_id_t(rseg->space->id, undo->last_page_no), &mtr); - const trx_undo_rec_t* rec = trx_undo_page_get_last_rec( - last_page, page_no, offset); - - undo->empty = !rec; - if (rec) { - undo->top_offset = rec - last_page; + if (const trx_undo_rec_t* rec = trx_undo_page_get_last_rec( + last_page, page_no, offset)) { + undo->top_offset = ulint(rec - last_page); undo->top_undo_no = trx_undo_rec_get_undo_no(rec); + ut_ad(!undo->empty()); + } else { + undo->top_undo_no = IB_ID_MAX; + ut_ad(undo->empty()); } } @@ -1217,10 +1218,11 @@ trx_undo_mem_create( undo->last_page_no = page_no; undo->size = 1; - undo->empty = TRUE; + undo->top_undo_no = IB_ID_MAX; undo->top_page_no = page_no; undo->guess_block = NULL; undo->withdraw_clock = 0; + ut_ad(undo->empty()); return(undo); } @@ -1248,7 +1250,8 @@ trx_undo_mem_init_for_reuse( undo->dict_operation = FALSE; undo->hdr_offset = offset; - undo->empty = TRUE; + undo->top_undo_no = IB_ID_MAX; + ut_ad(undo->empty()); } /** Create an undo log. @@ -1391,7 +1394,6 @@ A new undo log is created or a cached undo log reused. buf_block_t* trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr) { - ut_ad(mutex_own(&trx->undo_mutex)); ut_ad(mtr->get_log_mode() == MTR_LOG_ALL); trx_undo_t* undo = trx->rsegs.m_redo.undo; @@ -1444,7 +1446,6 @@ trx_undo_assign_low(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo, { const bool is_temp = rseg == trx->rsegs.m_noredo.rseg; - ut_ad(mutex_own(&trx->undo_mutex)); ut_ad(rseg == trx->rsegs.m_redo.rseg || rseg == trx->rsegs.m_noredo.rseg); ut_ad(undo == (is_temp @@ -1699,7 +1700,7 @@ trx_undo_truncate_tablespace( /* Step-1: Truncate tablespace. */ if (!fil_truncate_tablespace( space, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES)) { - fil_space_release(space); + space->release(); return false; } @@ -1763,7 +1764,7 @@ trx_undo_truncate_tablespace( rseg->needs_purge = false; } mtr_commit(&mtr); - fil_space_release(space); + space->release(); return true; } diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc index 7bc51b801c5..c2a103313d7 100644 --- a/storage/innobase/ut/ut0ut.cc +++ b/storage/innobase/ut/ut0ut.cc @@ -160,7 +160,7 @@ ut_time_us( ut_gettimeofday(&tv, NULL); - us = static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; + us = uintmax_t(tv.tv_sec) * 1000000 + uintmax_t(tv.tv_usec); if (tloc != NULL) { *tloc = us; @@ -182,7 +182,7 @@ ut_time_ms(void) ut_gettimeofday(&tv, NULL); - return((ulint) tv.tv_sec * 1000 + tv.tv_usec / 1000); + return(ulint(tv.tv_sec) * 1000 + ulint(tv.tv_usec / 1000)); } /**********************************************************//** @@ -429,7 +429,7 @@ ut_get_name( name, strlen(name), trx ? trx->mysql_thd : NULL); buf[bufend - buf] = '\0'; - return(std::string(buf, 0, bufend - buf)); + return(std::string(buf, 0, size_t(bufend - buf))); } /**********************************************************************//** @@ -453,7 +453,7 @@ ut_print_name( name, strlen(name), trx ? trx->mysql_thd : NULL); - if (fwrite(buf, 1, bufend - buf, f) != (size_t) (bufend - buf)) { + if (fwrite(buf, 1, size_t(bufend - buf), f) != size_t(bufend - buf)) { perror("fwrite"); } } @@ -526,32 +526,6 @@ ut_copy_file( } while (len > 0); } -/** Convert an error number to a human readable text message. -The returned string is static and should not be freed or modified. -@param[in] num InnoDB internal error number -@return string, describing the error */ -std::string -ut_get_name( -/*=========*/ - const trx_t* trx, /*!< in: transaction (NULL=no quotes) */ - ibool table_id,/*!< in: TRUE=print a table name, - FALSE=print other identifier */ - const char* name) /*!< in: name to print */ -{ - /* 2 * NAME_LEN for database and table name, - and some slack for the #mysql50# prefix and quotes */ - char buf[3 * NAME_LEN]; - const char* bufend; - ulint namelen = strlen(name); - - bufend = innobase_convert_name(buf, sizeof buf, - name, namelen, - trx ? trx->mysql_thd : NULL); - buf[bufend-buf]='\0'; - std::string str(buf); - return str; -} - /** Convert an error number to a human readable text message. The returned string is static and should not be freed or modified. @param[in] num InnoDB internal error number diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index f3376ebc584..94f9ade9d65 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -37,6 +37,7 @@ C_MODE_START #include "ma_checkpoint.h" #include "ma_recovery.h" C_MODE_END +#include "ma_trnman.h" //#include "sql_priv.h" #include "protocol.h" @@ -1335,6 +1336,7 @@ int ha_maria::check(THD * thd, HA_CHECK_OPT * check_opt) old_proc_info= thd_proc_info(thd, "Checking status"); thd_progress_init(thd, 3); error= maria_chk_status(param, file); // Not fatal + /* maria_chk_size() will flush the page cache for this file */ if (maria_chk_size(param, file)) error= 1; if (!error) @@ -1393,7 +1395,8 @@ int ha_maria::check(THD * thd, HA_CHECK_OPT * check_opt) } /* Reset trn, that may have been set by repair */ - _ma_set_trn_for_table(file, old_trn); + if (old_trn && old_trn != file->trn) + _ma_set_trn_for_table(file, old_trn); thd_proc_info(thd, old_proc_info); thd_progress_end(thd); return error ? HA_ADMIN_CORRUPT : HA_ADMIN_OK; @@ -1527,7 +1530,8 @@ int ha_maria::zerofill(THD * thd, HA_CHECK_OPT *check_opt) error=maria_zerofill(param, file, share->open_file_name.str); /* Reset trn, that may have been set by repair */ - _ma_set_trn_for_table(file, old_trn); + if (old_trn && old_trn != file->trn) + _ma_set_trn_for_table(file, old_trn); if (!error) { @@ -1770,7 +1774,8 @@ int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize) maria_lock_database(file, F_UNLCK); /* Reset trn, that may have been set by repair */ - _ma_set_trn_for_table(file, old_trn); + if (old_trn && old_trn != file->trn) + _ma_set_trn_for_table(file, old_trn); error= error ? HA_ADMIN_FAILED : (optimize_done ? (write_log_record_for_repair(param, file) ? HA_ADMIN_FAILED : @@ -2246,6 +2251,7 @@ int ha_maria::end_bulk_insert() bulk_insert_single_undo == BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR))) first_error= first_error ? first_error : error; + bulk_insert_single_undo= BULK_INSERT_NONE; // Safety } DBUG_RETURN(first_error); } @@ -2589,9 +2595,12 @@ int ha_maria::extra(enum ha_extra_function operation) without calling commit/rollback in between. If file->trn is not set we can't remove file->share from the transaction list in the extra() call. - We also ensure that we set file->trn to 0 if THD_TRN is 0 as in - this case we have already freed the trn. This can happen when one - implicit_commit() is called as part of alter table. + In current code we don't have to do this for HA_EXTRA_PREPARE_FOR_RENAME + as this is only used the intermediate table used by ALTER TABLE which + is not part of the transaction (it's not in the TRN list). Better to + keep this for now, to not break anything in a stable release. + When HA_EXTRA_PREPARE_FOR_RENAME is not handled below, we can change + the warnings in _ma_remove_table_from_trnman() to asserts. table->in_use is not set in the case this is a done as part of closefrm() as part of drop table. @@ -2604,7 +2613,7 @@ int ha_maria::extra(enum ha_extra_function operation) { THD *thd= table->in_use; TRN *trn= THD_TRN; - _ma_set_trn_for_table(file, trn); + _ma_set_tmp_trn_for_table(file, trn); } DBUG_ASSERT(file->s->base.born_transactional || file->trn == 0 || file->trn == &dummy_transaction_object); @@ -2720,6 +2729,7 @@ int ha_maria::external_lock(THD *thd, int lock_type) if (file->trn) { /* This can only happen with tables created with clone() */ + DBUG_PRINT("info",("file->trn: %p", file->trn)); trnman_increment_locked_tables(file->trn); } @@ -2740,7 +2750,7 @@ int ha_maria::external_lock(THD *thd, int lock_type) } else { - TRN *trn= THD_TRN; + TRN *trn= (file->trn != &dummy_transaction_object ? file->trn : 0); /* End of transaction */ /* @@ -2755,8 +2765,7 @@ int ha_maria::external_lock(THD *thd, int lock_type) */ if (_ma_reenable_logging_for_table(file, TRUE)) DBUG_RETURN(1); - /** @todo zero file->trn also in commit and rollback */ - _ma_set_trn_for_table(file, NULL); // Safety + _ma_reset_trn_for_table(file); /* Ensure that file->state points to the current number of rows. This is needed if someone calls maria_info() without first doing an @@ -2812,13 +2821,6 @@ int ha_maria::start_stmt(THD *thd, thr_lock_type lock_type) DBUG_ASSERT(lock_type != TL_UNLOCK); DBUG_ASSERT(file->trn == trn); - /* - If there was an implicit commit under this LOCK TABLES by a previous - statement (like a DDL), at least if that previous statement was about a - different ha_maria than 'this' then this->file->trn is a stale - pointer. We fix it: - */ - _ma_set_trn_for_table(file, trn); /* As external_lock() was already called, don't increment locked_tables. Note that we call the function below possibly several times when @@ -2843,6 +2845,23 @@ int ha_maria::start_stmt(THD *thd, thr_lock_type lock_type) } +/* + Reset THD_TRN and all file->trn related to the transaction + This is needed as some calls, like extra() or external_lock() may access + it before next transaction is started +*/ + +static void reset_thd_trn(THD *thd, MARIA_HA *first_table) +{ + DBUG_ENTER("reset_thd_trn"); + THD_TRN= NULL; + for (MARIA_HA *table= first_table; table ; + table= table->trn_next) + _ma_reset_trn_for_table(table); + DBUG_VOID_RETURN; +} + + /** Performs an implicit commit of the Maria transaction and creates a new one. @@ -2866,10 +2885,10 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) TRN *trn; int error; uint locked_tables; - DYNAMIC_ARRAY used_tables; extern my_bool plugins_are_initialized; - + MARIA_HA *used_tables, *trn_next; DBUG_ENTER("ha_maria::implicit_commit"); + if (!maria_hton || !plugins_are_initialized || !(trn= THD_TRN)) DBUG_RETURN(0); if (!new_trn && (thd->locked_tables_mode == LTM_LOCK_TABLES || @@ -2887,48 +2906,16 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) locked_tables= trnman_has_locked_tables(trn); - if (new_trn && trn && trn->used_tables) - { - MARIA_USED_TABLES *tables; - /* - Save locked tables so that we can move them to another transaction - We are using a dynamic array as locked_tables in some cases can be - smaller than the used_tables list (for example when the server does - early unlock of tables. - */ - - my_init_dynamic_array2(&used_tables, sizeof(MARIA_SHARE*), (void*) 0, - locked_tables, 8, MYF(MY_THREAD_SPECIFIC)); - for (tables= (MARIA_USED_TABLES*) trn->used_tables; - tables; - tables= tables->next) - { - if (tables->share->base.born_transactional) - { - if (insert_dynamic(&used_tables, (uchar*) &tables->share)) - { - error= HA_ERR_OUT_OF_MEM; - goto end_and_free; - } - } - } - } - else - bzero(&used_tables, sizeof(used_tables)); - + used_tables= (MARIA_HA*) trn->used_instances; error= 0; if (unlikely(ma_commit(trn))) error= 1; if (!new_trn) { - /* - To be extra safe, we should also reset file->trn for all open - tables as some calls, like extra() may access it. We take care - of this in extra() by resetting file->trn if THD_TRN is 0. - */ - THD_TRN= NULL; + reset_thd_trn(thd, used_tables); goto end; } + /* We need to create a new transaction and put it in THD_TRN. Indeed, tables may be under LOCK TABLES, and so they will start the next @@ -2938,8 +2925,9 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) THD_TRN= trn; if (unlikely(trn == NULL)) { + reset_thd_trn(thd, used_tables); error= HA_ERR_OUT_OF_MEM; - goto end_and_free; + goto end; } /* Move all locked tables to the new transaction @@ -2949,35 +2937,25 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) in check table, we use the table without calling start_stmt(). */ - uint i; - for (i= 0 ; i < used_tables.elements ; i++) + for (MARIA_HA *handler= used_tables; handler ; + handler= trn_next) { - MARIA_SHARE *share; - LIST *handlers; + trn_next= handler->trn_next; + DBUG_ASSERT(handler->s->base.born_transactional); - share= *(dynamic_element(&used_tables, i, MARIA_SHARE**)); - /* Find table instances that was used in this transaction */ - for (handlers= share->open_list; handlers; handlers= handlers->next) + /* If handler uses versioning */ + if (handler->s->lock_key_trees) { - MARIA_HA *handler= (MARIA_HA*) handlers->data; - if (handler->external_ref && - ((TABLE*) handler->external_ref)->in_use == thd) - { - _ma_set_trn_for_table(handler, trn); - /* If handler uses versioning */ - if (handler->s->lock_key_trees) - { - if (_ma_setup_live_state(handler)) - error= HA_ERR_OUT_OF_MEM; - } - } + /* _ma_set_trn_for_table() will be called indirectly */ + if (_ma_setup_live_state(handler)) + error= HA_ERR_OUT_OF_MEM; } + else + _ma_set_trn_for_table(handler, trn); } /* This is just a commit, tables stay locked if they were: */ trnman_reset_locked_tables(trn, locked_tables); -end_and_free: - delete_dynamic(&used_tables); end: DBUG_RETURN(error); } @@ -3087,11 +3065,11 @@ static enum data_file_type maria_row_type(HA_CREATE_INFO *info) } -int ha_maria::create(const char *name, register TABLE *table_arg, +int ha_maria::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *ha_create_info) { int error; - uint create_flags= 0, record_count, i; + uint create_flags= 0, record_count= 0, i; char buff[FN_REFLEN]; MARIA_KEYDEF *keydef; MARIA_COLUMNDEF *recinfo; @@ -3357,10 +3335,10 @@ static int maria_commit(handlerton *hton __attribute__ ((unused)), trnman_set_flags(trn, trnman_get_flags(trn) & ~TRN_STATE_INFO_LOGGED); /* statement or transaction ? */ - if ((thd->variables.option_bits & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) && !all) + if ((thd->variables.option_bits & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) && + !all) DBUG_RETURN(0); // end of statement - DBUG_PRINT("info", ("THD_TRN set to 0x0")); - THD_TRN= 0; + reset_thd_trn(thd, (MARIA_HA*) trn->used_instances); DBUG_RETURN(ma_commit(trn)); // end of transaction } @@ -3377,8 +3355,7 @@ static int maria_rollback(handlerton *hton __attribute__ ((unused)), trnman_rollback_statement(trn); DBUG_RETURN(0); // end of statement } - DBUG_PRINT("info", ("THD_TRN set to 0x0")); - THD_TRN= 0; + reset_thd_trn(thd, (MARIA_HA*) trn->used_instances); DBUG_RETURN(trnman_rollback_trn(trn) ? HA_ERR_OUT_OF_MEM : 0); // end of transaction } diff --git a/storage/maria/ha_maria.h b/storage/maria/ha_maria.h index 51438462787..e67907039a1 100644 --- a/storage/maria/ha_maria.h +++ b/storage/maria/ha_maria.h @@ -195,6 +195,7 @@ public: private: DsMrr_impl ds_mrr; friend ICP_RESULT index_cond_func_maria(void *arg); + friend void reset_thd_trn(THD *thd); }; #endif /* HA_MARIA_INCLUDED */ diff --git a/storage/maria/ma_blockrec.c b/storage/maria/ma_blockrec.c index 8eaac990741..ae9f2fae889 100644 --- a/storage/maria/ma_blockrec.c +++ b/storage/maria/ma_blockrec.c @@ -271,6 +271,7 @@ #include "maria_def.h" #include "ma_blockrec.h" #include "trnman.h" +#include "ma_trnman.h" #include "ma_key_recover.h" #include "ma_recovery_util.h" #include @@ -7525,7 +7526,7 @@ void maria_ignore_trids(MARIA_HA *info) if (info->s->base.born_transactional) { if (!info->trn) - _ma_set_trn_for_table(info, &dummy_transaction_object); + _ma_set_tmp_trn_for_table(info, &dummy_transaction_object); /* Ignore transaction id when row is read */ info->trn->min_read_from= ~(TrID) 0; } diff --git a/storage/maria/ma_blockrec.h b/storage/maria/ma_blockrec.h index 8e5b4bc42de..6437672933b 100644 --- a/storage/maria/ma_blockrec.h +++ b/storage/maria/ma_blockrec.h @@ -190,8 +190,7 @@ MARIA_RECORD_POS _ma_write_init_block_record(MARIA_HA *info, const uchar *record); my_bool _ma_write_block_record(MARIA_HA *info, const uchar *record); my_bool _ma_write_abort_block_record(MARIA_HA *info); -my_bool _ma_compare_block_record(register MARIA_HA *info, - register const uchar *record); +my_bool _ma_compare_block_record(MARIA_HA *info, const uchar *record); void _ma_compact_block_page(MARIA_SHARE *share, uchar *buff, uint rownr, my_bool extend_block, TrID min_read_from, uint min_row_length); diff --git a/storage/maria/ma_close.c b/storage/maria/ma_close.c index 882e9f585f1..6e85551c24f 100644 --- a/storage/maria/ma_close.c +++ b/storage/maria/ma_close.c @@ -37,6 +37,8 @@ int maria_close(register MARIA_HA *info) /* Check that we have unlocked key delete-links properly */ DBUG_ASSERT(info->key_del_used == 0); + /* Check that file is not part of any uncommited transactions */ + DBUG_ASSERT(info->trn == 0 || info->trn == &dummy_transaction_object); if (share->reopen == 1) { diff --git a/storage/maria/ma_commit.c b/storage/maria/ma_commit.c index 68435a45c0a..0ae3868dbf6 100644 --- a/storage/maria/ma_commit.c +++ b/storage/maria/ma_commit.c @@ -15,6 +15,7 @@ #include "maria_def.h" #include "trnman.h" +#include "ma_trnman.h" /** writes a COMMIT record to log and commits transaction in memory @@ -43,9 +44,9 @@ int ma_commit(TRN *trn) COMMIT record) and this is not an issue as * transaction's updates were not made visible to other transactions * "commit ok" was not sent to client - Alternatively, Recovery might commit trn (if MY_MIN(rec_lsn) is before COMMIT - record), which is ok too. All in all it means that "trn committed" is not - 100% equal to "COMMIT record written". + Alternatively, Recovery might commit trn (if MY_MIN(rec_lsn) is before + COMMIT record), which is ok too. All in all it means that "trn committed" + is not 100% equal to "COMMIT record written". - if COMMIT record is written after trnman_commit_trn(): if crash happens between the two, trn will be rolled back which is an issue (transaction's updates were made visible to other transactions). @@ -93,7 +94,12 @@ int ma_commit(TRN *trn) int maria_commit(MARIA_HA *info) { - return info->s->now_transactional ? ma_commit(info->trn) : 0; + TRN *trn; + if (!info->s->now_transactional) + return 0; + trn= info->trn; + info->trn= 0; /* checked in maria_close() */ + return ma_commit(trn); } @@ -120,10 +126,7 @@ int maria_begin(MARIA_HA *info) TRN *trn= trnman_new_trn(0); if (unlikely(!trn)) DBUG_RETURN(HA_ERR_OUT_OF_MEM); - - DBUG_PRINT("info", ("TRN set to %p", trn)); _ma_set_trn_for_table(info, trn); } DBUG_RETURN(0); } - diff --git a/storage/maria/ma_create.c b/storage/maria/ma_create.c index d7dcbf387c8..503d2420c41 100644 --- a/storage/maria/ma_create.c +++ b/storage/maria/ma_create.c @@ -1432,7 +1432,7 @@ int _ma_update_state_lsns(MARIA_SHARE *share, LSN lsn, TrID create_trid, @retval 1 error (disk problem) */ -#if (_MSC_VER == 1310) +#if defined(_MSC_VER) && (_MSC_VER == 1310) /* Visual Studio 2003 compiler produces internal compiler error in this function. Disable optimizations to workaround. @@ -1505,6 +1505,6 @@ int _ma_update_state_lsns_sub(MARIA_SHARE *share, LSN lsn, TrID create_trid, MARIA_FILE_CREATE_TRID_OFFSET, MYF(MY_NABP)) || (do_sync && mysql_file_sync(file, MYF(0)))); } -#if (_MSC_VER == 1310) +#if defined(_MSC_VER) && (_MSC_VER == 1310) #pragma optimize("",on) #endif /*VS2003 compiler bug workaround*/ diff --git a/storage/maria/ma_delete_all.c b/storage/maria/ma_delete_all.c index a14603b24a5..ee75218e2b6 100644 --- a/storage/maria/ma_delete_all.c +++ b/storage/maria/ma_delete_all.c @@ -135,6 +135,9 @@ int maria_delete_all_rows(MARIA_HA *info) goto err; } + if (info->opt_flag & WRITE_CACHE_USED) + reinit_io_cache(&info->rec_cache, WRITE_CACHE, 0, 1, 1); + _ma_writeinfo(info, WRITEINFO_UPDATE_KEYFILE); #ifdef HAVE_MMAP /* Map again */ diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c index 41261b1422e..9feead42cf7 100644 --- a/storage/maria/ma_extra.c +++ b/storage/maria/ma_extra.c @@ -346,7 +346,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function, _ma_decrement_open_count(info, 0); if (info->trn) { - _ma_remove_table_from_trnman(share, info->trn); + _ma_remove_table_from_trnman(info); /* Ensure we don't point to the deleted data in trn */ info->state= info->state_start= &share->state.state; } @@ -409,7 +409,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function, if (info->trn) { mysql_mutex_lock(&share->intern_lock); - _ma_remove_table_from_trnman(share, info->trn); + _ma_remove_table_from_trnman(info); /* Ensure we don't point to the deleted data in trn */ info->state= info->state_start= &share->state.state; mysql_mutex_unlock(&share->intern_lock); diff --git a/storage/maria/ma_ft_nlq_search.c b/storage/maria/ma_ft_nlq_search.c index 3252b95e89e..3b0ea0baab7 100644 --- a/storage/maria/ma_ft_nlq_search.c +++ b/storage/maria/ma_ft_nlq_search.c @@ -77,11 +77,7 @@ static int walk_and_match(FT_WORD *word, uint32 count, ALL_IN_ONE *aio) my_off_t key_root; uint extra=HA_FT_WLEN+share->rec_reflength; MARIA_KEY key; -#if HA_FT_WTYPE == HA_KEYTYPE_FLOAT - float tmp_weight; -#else -#error -#endif + float tmp_weight; DBUG_ENTER("walk_and_match"); LINT_INIT_STRUCT(subkeys); @@ -139,12 +135,8 @@ static int walk_and_match(FT_WORD *word, uint32 count, ALL_IN_ONE *aio) r= _ma_search_first(info, keyinfo, key_root); goto do_skip; } -#if HA_FT_WTYPE == HA_KEYTYPE_FLOAT /* The weight we read was actually a float */ tmp_weight= subkeys.f; -#else -#error -#endif /* The following should be safe, even if we compare doubles */ if (tmp_weight==0) DBUG_RETURN(doc_cnt); /* stopword, doc_cnt should be 0 */ diff --git a/storage/maria/ma_ft_update.c b/storage/maria/ma_ft_update.c index ddf2a7251ab..1b4f018779a 100644 --- a/storage/maria/ma_ft_update.c +++ b/storage/maria/ma_ft_update.c @@ -289,17 +289,10 @@ MARIA_KEY *_ma_ft_make_key(MARIA_HA *info, MARIA_KEY *key, uint keynr, FT_WORD *wptr, my_off_t filepos) { uchar buf[HA_FT_MAXBYTELEN+16]; + float weight=(float) ((filepos==HA_OFFSET_ERROR) ? 0 : wptr->weight); DBUG_ENTER("_ma_ft_make_key"); -#if HA_FT_WTYPE == HA_KEYTYPE_FLOAT - { - float weight=(float) ((filepos==HA_OFFSET_ERROR) ? 0 : wptr->weight); - mi_float4store(buf,weight); - } -#else -#error -#endif - + mi_float4store(buf,weight); int2store(buf+HA_FT_WLEN,wptr->len); memcpy(buf+HA_FT_WLEN+2,wptr->pos,wptr->len); /* Can't be spatial so it's ok to call _ma_make_key directly here */ diff --git a/storage/maria/ma_fulltext.h b/storage/maria/ma_fulltext.h index 89f7268974c..dc663806d34 100644 --- a/storage/maria/ma_fulltext.h +++ b/storage/maria/ma_fulltext.h @@ -20,6 +20,11 @@ #include "maria_def.h" #include "ft_global.h" +/* If HA_FT_MAXLEN is change to 127 or over, it must be tested properly as + it may cause different representation on disk for full text indexes +*/ +#define HA_FT_MAXLEN 126 + int _ma_ft_cmp(MARIA_HA *, uint, const uchar *, const uchar *); int _ma_ft_add(MARIA_HA *, uint, uchar *, const uchar *, my_off_t); int _ma_ft_del(MARIA_HA *, uint, uchar *, const uchar *, my_off_t); diff --git a/storage/maria/ma_key.c b/storage/maria/ma_key.c index 6f3e17ed80d..703ce118843 100644 --- a/storage/maria/ma_key.c +++ b/storage/maria/ma_key.c @@ -279,7 +279,6 @@ MARIA_KEY *_ma_make_key(MARIA_HA *info, MARIA_KEY *int_key, uint keynr, } else if (keyseg->flag & HA_SWAP_KEY) { /* Numerical column */ -#ifdef HAVE_ISNAN if (type == HA_KEYTYPE_FLOAT) { float nr; @@ -303,7 +302,6 @@ MARIA_KEY *_ma_make_key(MARIA_HA *info, MARIA_KEY *int_key, uint keynr, continue; } } -#endif pos+=length; while (length--) { diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c index 3febf879ec6..4139409d477 100644 --- a/storage/maria/ma_open.c +++ b/storage/maria/ma_open.c @@ -19,6 +19,8 @@ #include "ma_sp_defs.h" #include "ma_rt_index.h" #include "ma_blockrec.h" +#include "trnman.h" +#include "ma_trnman.h" #include #include "ma_crypt.h" @@ -184,7 +186,7 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, if (!share->base.born_transactional) /* For transactional ones ... */ { /* ... force crash if no trn given */ - _ma_set_trn_for_table(&info, &dummy_transaction_object); + _ma_set_tmp_trn_for_table(&info, &dummy_transaction_object); info.state= &share->state.state; /* Change global values by default */ } else diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c index fd5995515c1..d10595fffd9 100644 --- a/storage/maria/ma_pagecache.c +++ b/storage/maria/ma_pagecache.c @@ -985,7 +985,7 @@ static int flush_all_key_blocks(PAGECACHE *pagecache) resizing, due to the page locking specific to this page cache. So we disable it for now. */ -#if NOT_USED /* keep disabled until code is fixed see above !! */ +#ifdef NOT_USED /* keep disabled until code is fixed see above !! */ size_t resize_pagecache(PAGECACHE *pagecache, size_t use_mem, uint division_limit, uint age_threshold, uint changed_blocks_hash_size) diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c index 9f436f3d8e5..38200104538 100644 --- a/storage/maria/ma_recovery.c +++ b/storage/maria/ma_recovery.c @@ -3520,6 +3520,14 @@ void _ma_tmp_disable_logging_for_table(MARIA_HA *info, { MARIA_SHARE *share= info->s; DBUG_ENTER("_ma_tmp_disable_logging_for_table"); + + /* + We have to ensure that bitmap is flushed, as it's checking + that share->now_transactional is set + */ + if (share->now_transactional && share->data_file_type == BLOCK_RECORD) + _ma_bitmap_flush_all(share); + if (log_incomplete) { uchar log_data[FILEID_STORE_SIZE]; @@ -3583,7 +3591,10 @@ my_bool _ma_reenable_logging_for_table(MARIA_HA *info, my_bool flush_pages) if (share->now_transactional == share->base.born_transactional || !info->switched_transactional) + { + info->switched_transactional= FALSE; DBUG_RETURN(0); + } info->switched_transactional= FALSE; if ((share->now_transactional= share->base.born_transactional)) diff --git a/storage/maria/ma_sp_key.c b/storage/maria/ma_sp_key.c index 2a663c22ee2..87502aff5a9 100644 --- a/storage/maria/ma_sp_key.c +++ b/storage/maria/ma_sp_key.c @@ -77,7 +77,6 @@ MARIA_KEY *_ma_sp_make_key(MARIA_HA *info, MARIA_KEY *ret_key, uint keynr, DBUG_ASSERT(keyseg->type == HA_KEYTYPE_DOUBLE); val= mbr[start / sizeof (double)]; -#ifdef HAVE_ISNAN if (isnan(val)) { bzero(key, length); @@ -85,7 +84,6 @@ MARIA_KEY *_ma_sp_make_key(MARIA_HA *info, MARIA_KEY *ret_key, uint keynr, len+= length; continue; } -#endif if (keyseg->flag & HA_SWAP_KEY) { diff --git a/storage/maria/ma_state.c b/storage/maria/ma_state.c index a3c2d50bdc6..23cb625fc58 100644 --- a/storage/maria/ma_state.c +++ b/storage/maria/ma_state.c @@ -66,7 +66,7 @@ my_bool _ma_setup_live_state(MARIA_HA *info) DBUG_RETURN(1); trn= info->trn; - for (tables= (MARIA_USED_TABLES*) info->trn->used_tables; + for (tables= (MARIA_USED_TABLES*) trn->used_tables; tables; tables= tables->next) { @@ -551,6 +551,7 @@ my_bool _ma_trnman_end_trans_hook(TRN *trn, my_bool commit, my_free(tables); } trn->used_tables= 0; + trn->used_instances= 0; DBUG_RETURN(error); } @@ -565,18 +566,25 @@ my_bool _ma_trnman_end_trans_hook(TRN *trn, my_bool commit, share->internal_lock must be locked when function is called */ -void _ma_remove_table_from_trnman(MARIA_SHARE *share, TRN *trn) +void _ma_remove_table_from_trnman(MARIA_HA *info) { + MARIA_SHARE *share= info->s; + TRN *trn= info->trn; MARIA_USED_TABLES *tables, **prev; + MARIA_HA *handler, **prev_file; DBUG_ENTER("_ma_remove_table_from_trnman"); DBUG_PRINT("enter", ("trn: %p used_tables: %p share: %p in_trans: %d", trn, trn->used_tables, share, share->in_trans)); mysql_mutex_assert_owner(&share->intern_lock); + + if (trn == &dummy_transaction_object) + DBUG_VOID_RETURN; - for (prev= (MARIA_USED_TABLES**) (char*) &trn->used_tables, tables= *prev; - tables; - tables= *prev) + /* First remove share from used_tables */ + for (prev= (MARIA_USED_TABLES**) (char*) &trn->used_tables; + (tables= *prev); + prev= &tables->next) { if (tables->share == share) { @@ -585,8 +593,36 @@ void _ma_remove_table_from_trnman(MARIA_SHARE *share, TRN *trn) my_free(tables); break; } - prev= &tables->next; } + if (tables != 0) + { + /* + This can only happens in case of rename of intermediate table as + part of alter table + */ + DBUG_PRINT("warning", ("share: %p where not in used_tables_list", share)); + } + + /* unlink table from used_instances */ + for (prev_file= (MARIA_HA**) &trn->used_instances; + (handler= *prev_file); + prev_file= &handler->trn_next) + { + if (handler == info) + { + *prev_file= info->trn_next; + break; + } + } + if (handler != 0) + { + /* + This can only happens in case of rename of intermediate table as + part of alter table + */ + DBUG_PRINT("warning", ("table: %p where not in used_instances", info)); + } + info->trn= 0; /* Not part of trans anymore */ DBUG_VOID_RETURN; } diff --git a/storage/maria/ma_state.h b/storage/maria/ma_state.h index a86aada94fd..8728a2113e5 100644 --- a/storage/maria/ma_state.h +++ b/storage/maria/ma_state.h @@ -84,5 +84,5 @@ my_bool _ma_row_visible_non_transactional_table(MARIA_HA *info); my_bool _ma_row_visible_transactional_table(MARIA_HA *info); void _ma_remove_not_visible_states_with_lock(struct st_maria_share *share, my_bool all); -void _ma_remove_table_from_trnman(struct st_maria_share *share, TRN *trn); +void _ma_remove_table_from_trnman(MARIA_HA *info); void _ma_reset_history(struct st_maria_share *share); diff --git a/storage/maria/ma_trnman.h b/storage/maria/ma_trnman.h new file mode 100644 index 00000000000..9bfd1f0d047 --- /dev/null +++ b/storage/maria/ma_trnman.h @@ -0,0 +1,65 @@ +/* Copyright (C) 2006-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _ma_trnman_h +#define _ma_trnman_h + +/** + Sets table's trn and prints debug information + Links table into used_instances if new_trn is not 0 + + @param tbl MARIA_HA of table + @param newtrn what to put into tbl->trn +*/ + +static inline void _ma_set_trn_for_table(MARIA_HA *tbl, TRN *newtrn) +{ + DBUG_PRINT("info",("table: %p trn: %p -> %p", + tbl, tbl->trn, newtrn)); + + /* check that we are not calling this twice in a row */ + DBUG_ASSERT(newtrn->used_instances != (void*) tbl); + + tbl->trn= newtrn; + /* Link into used list */ + tbl->trn_next= (MARIA_HA*) newtrn->used_instances; + newtrn->used_instances= tbl; +} + + +/* + Same as _ma_set_trn_for_table(), but don't link table into used_instance list + Used when we want to temporary set trn for a table in extra() +*/ + +static inline void _ma_set_tmp_trn_for_table(MARIA_HA *tbl, TRN *newtrn) +{ + DBUG_PRINT("info",("table: %p trn: %p -> %p", + tbl, tbl->trn, newtrn)); + tbl->trn= newtrn; +} + + +/* + Reset TRN in table +*/ + +static inline void _ma_reset_trn_for_table(MARIA_HA *tbl) +{ + DBUG_PRINT("info",("table: %p trn: %p -> NULL", tbl, tbl->trn)); + tbl->trn= 0; +} + +#endif /* _ma_trnman_h */ diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h index 51cb9574af2..76233ef4a94 100644 --- a/storage/maria/maria_def.h +++ b/storage/maria/maria_def.h @@ -110,7 +110,7 @@ typedef struct st_maria_sort_param int (*key_read)(struct st_maria_sort_param *, uchar *); int (*key_write)(struct st_maria_sort_param *, const uchar *); void (*lock_in_memory)(HA_CHECK *); - int (*write_keys)(struct st_maria_sort_param *, register uchar **, + int (*write_keys)(struct st_maria_sort_param *, uchar **, ulonglong , struct st_buffpek *, IO_CACHE *); my_off_t (*read_to_buffer)(IO_CACHE *,struct st_buffpek *, uint); int (*write_key)(struct st_maria_sort_param *, IO_CACHE *,uchar *, @@ -602,6 +602,7 @@ struct st_maria_handler { MARIA_SHARE *s; /* Shared between open:s */ struct st_ma_transaction *trn; /* Pointer to active transaction */ + struct st_maria_handler *trn_next; MARIA_STATUS_INFO *state, state_save; MARIA_STATUS_INFO *state_start; /* State at start of transaction */ MARIA_USED_TABLES *used_tables; @@ -862,19 +863,6 @@ struct st_maria_handler #define get_pack_length(length) ((length) >= 255 ? 3 : 1) #define _ma_have_versioning(info) ((info)->row_flag & ROW_FLAG_TRANSID) -/** - Sets table's trn and prints debug information - @param tbl MARIA_HA of table - @param newtrn what to put into tbl->trn - @note cast of newtrn is because %p of NULL gives warning (NULL is int) -*/ -#define _ma_set_trn_for_table(tbl, newtrn) do { \ - DBUG_PRINT("info",("table: %p trn: %p -> %p", \ - (tbl), (tbl)->trn, (void *)(newtrn))); \ - (tbl)->trn= (newtrn); \ - } while (0) - - #define MARIA_MIN_BLOCK_LENGTH 20 /* Because of delete-link */ /* Don't use to small record-blocks */ #define MARIA_EXTEND_BLOCK_LENGTH 20 @@ -1052,7 +1040,7 @@ my_off_t _ma_no_keypos_to_recpos(MARIA_SHARE *share, my_off_t pos); extern my_bool _ma_ck_write(MARIA_HA *info, MARIA_KEY *key); extern my_bool _ma_enlarge_root(MARIA_HA *info, MARIA_KEY *key, MARIA_RECORD_POS *root); -int _ma_insert(register MARIA_HA *info, MARIA_KEY *key, +int _ma_insert(MARIA_HA *info, MARIA_KEY *key, MARIA_PAGE *anc_page, uchar *key_pos, uchar *key_buff, MARIA_PAGE *father_page, uchar *father_key_pos, my_bool insert_last); @@ -1094,7 +1082,7 @@ extern void _ma_store_bin_pack_key(MARIA_KEYDEF *keyinfo, uchar *key_pos, MARIA_KEY_PARAM *s_temp); extern my_bool _ma_ck_delete(MARIA_HA *info, MARIA_KEY *key); -extern my_bool _ma_ck_real_delete(register MARIA_HA *info, MARIA_KEY *key, +extern my_bool _ma_ck_real_delete(MARIA_HA *info, MARIA_KEY *key, my_off_t *root); extern int _ma_readinfo(MARIA_HA *info, int lock_flag, int check_keybuffer); extern int _ma_writeinfo(MARIA_HA *info, uint options); @@ -1166,7 +1154,7 @@ extern my_bool _ma_fetch_keypage(MARIA_PAGE *page, MARIA_HA *info, extern my_bool _ma_write_keypage(MARIA_PAGE *page, enum pagecache_page_lock lock, int level); extern int _ma_dispose(MARIA_HA *info, my_off_t pos, my_bool page_not_read); -extern my_off_t _ma_new(register MARIA_HA *info, int level, +extern my_off_t _ma_new(MARIA_HA *info, int level, MARIA_PINNED_PAGE **page_link); extern my_bool _ma_compact_keypage(MARIA_PAGE *page, TrID min_read_from); extern uint transid_store_packed(MARIA_HA *info, uchar *to, ulonglong trid); @@ -1354,7 +1342,7 @@ extern MARIA_HA *_ma_test_if_reopen(const char *filename); my_bool _ma_check_table_is_closed(const char *name, const char *where); int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share); int _ma_open_keyfile(MARIA_SHARE *share); -void _ma_setup_functions(register MARIA_SHARE *share); +void _ma_setup_functions(MARIA_SHARE *share); my_bool _ma_dynmap_file(MARIA_HA *info, my_off_t size); void _ma_remap_file(MARIA_HA *info, my_off_t size); @@ -1436,7 +1424,7 @@ extern my_bool maria_flush_log_for_page_none(PAGECACHE_IO_HOOK_ARGS *args); extern PAGECACHE *maria_log_pagecache; extern void ma_set_index_cond_func(MARIA_HA *info, index_cond_func_t func, void *func_arg); -ICP_RESULT ma_check_index_cond(register MARIA_HA *info, uint keynr, uchar *record); +ICP_RESULT ma_check_index_cond(MARIA_HA *info, uint keynr, uchar *record); extern my_bool ma_yield_and_check_if_killed(MARIA_HA *info, int inx); extern my_bool ma_killed_standalone(MARIA_HA *); diff --git a/storage/maria/trnman.c b/storage/maria/trnman.c index bc48d39baaa..5b3c9f0287a 100644 --- a/storage/maria/trnman.c +++ b/storage/maria/trnman.c @@ -357,6 +357,7 @@ TRN *trnman_new_trn(WT_THD *wt) trn->commit_trid= MAX_TRID; trn->rec_lsn= trn->undo_lsn= trn->first_undo_lsn= 0; trn->used_tables= 0; + trn->used_instances= 0; trn->locked_tables= 0; trn->flags= 0; diff --git a/storage/maria/trnman.h b/storage/maria/trnman.h index 66139a31230..11c73797766 100644 --- a/storage/maria/trnman.h +++ b/storage/maria/trnman.h @@ -46,7 +46,8 @@ struct st_ma_transaction LF_PINS *pins; WT_THD *wt; mysql_mutex_t state_lock; - void *used_tables; /**< Tables used by transaction */ + void *used_tables; /**< Table shares used by transaction */ + void *used_instances; /* table files used by transaction */ TRN *next, *prev; TrID trid, min_read_from, commit_trid; LSN rec_lsn, undo_lsn; diff --git a/storage/mroonga/CMakeLists.txt b/storage/mroonga/CMakeLists.txt index 5d8e8c1eeb8..5a7d4699386 100644 --- a/storage/mroonga/CMakeLists.txt +++ b/storage/mroonga/CMakeLists.txt @@ -315,6 +315,9 @@ if(MRN_BUNDLED) ${MRN_ALL_SOURCES} STORAGE_ENGINE MODULE_ONLY LINK_LIBRARIES ${MRN_LIBRARIES}) + if(NOT TARGET mroonga) + return() + endif() else() add_library(mroonga MODULE ${MRN_ALL_SOURCES}) diff --git a/storage/mroonga/vendor/groonga/CMakeLists.txt b/storage/mroonga/vendor/groonga/CMakeLists.txt index e27070f9e0c..ee526646c09 100644 --- a/storage/mroonga/vendor/groonga/CMakeLists.txt +++ b/storage/mroonga/vendor/groonga/CMakeLists.txt @@ -361,10 +361,18 @@ if(NOT ${GRN_WITH_LZ4} STREQUAL "no") pkg_check_modules(LIBLZ4 liblz4) endif() if(LIBLZ4_FOUND) + # According to CMake documentation, this is the recommended way to force + # looking in LIBRARY_DIRS first and in regular system paths otherwise. + # + # pkg_check_modules does not guarantee that LIBLZ4_LIBRARY_DIRS will be + # set. If it's not set we won't find the library without looking through + # the regular system paths. find_library(LZ4_LIBS - NAMES ${LIBLZ4_LIBRARIES} - PATHS ${LIBLZ4_LIBRARY_DIRS} - NO_DEFAULT_PATH) + NAMES ${LIBLZ4_LIBRARIES} + PATHS ${LIBLZ4_LIBRARY_DIRS} + NO_DEFAULT_PATH) + find_library(LZ4_LIBS + NAMES ${LIBLZ4_LIBRARIES}) set(GRN_WITH_LZ4 TRUE) else() if(${GRN_WITH_LZ4} STREQUAL "yes") diff --git a/storage/mroonga/vendor/groonga/lib/CMakeLists.txt b/storage/mroonga/vendor/groonga/lib/CMakeLists.txt index 2274e95aa24..64fadb8bbdd 100644 --- a/storage/mroonga/vendor/groonga/lib/CMakeLists.txt +++ b/storage/mroonga/vendor/groonga/lib/CMakeLists.txt @@ -179,6 +179,7 @@ if(GRN_WITH_MRUBY) endif() # Workaround GCC ICE on ARM64 -IF(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") +IF(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64" AND + CMAKE_C_COMPILER_VERSION VERSION_GREATER "5") ADD_COMPILE_FLAGS(ts/ts_expr_node.c COMPILE_FLAGS "-fno-tree-loop-vectorize") ENDIF() diff --git a/storage/mroonga/vendor/groonga/lib/alloc.c b/storage/mroonga/vendor/groonga/lib/alloc.c index 5d77c19e74c..face9a3dde1 100644 --- a/storage/mroonga/vendor/groonga/lib/alloc.c +++ b/storage/mroonga/vendor/groonga/lib/alloc.c @@ -644,7 +644,7 @@ grn_ctx_free_lifo(grn_ctx *ctx, void *ptr, } } -#if USE_DYNAMIC_MALLOC_CHANGE +#if defined(USE_DYNAMIC_MALLOC_CHANGE) grn_malloc_func grn_ctx_get_malloc(grn_ctx *ctx) { diff --git a/storage/mroonga/vendor/groonga/lib/grn.h b/storage/mroonga/vendor/groonga/lib/grn.h index 0d0768eba41..541c19d3e21 100644 --- a/storage/mroonga/vendor/groonga/lib/grn.h +++ b/storage/mroonga/vendor/groonga/lib/grn.h @@ -259,7 +259,7 @@ typedef pthread_key_t grn_thread_key; # define THREAD_SETSPECIFIC(key, value) pthread_setspecific(key, value) # define THREAD_GETSPECIFIC(key) pthread_getspecific(key) -#if USE_UYIELD +#if defined(USE_UYIELD) extern int grn_uyield_count; #define GRN_TEST_YIELD() do {\ if (((++grn_uyield_count) & (0x20 - 1)) == 0) {\ diff --git a/storage/myisam/ft_nlq_search.c b/storage/myisam/ft_nlq_search.c index 3945484c8eb..55fae97bb3c 100644 --- a/storage/myisam/ft_nlq_search.c +++ b/storage/myisam/ft_nlq_search.c @@ -75,11 +75,7 @@ static int walk_and_match(FT_WORD *word, uint32 count, ALL_IN_ONE *aio) MI_KEYDEF *keyinfo=info->s->keyinfo+aio->keynr; my_off_t key_root; uint extra= HA_FT_WLEN + info->s->rec_reflength; -#if HA_FT_WTYPE == HA_KEYTYPE_FLOAT float tmp_weight; -#else -#error -#endif DBUG_ENTER("walk_and_match"); LINT_INIT_STRUCT(subkeys); @@ -134,12 +130,8 @@ static int walk_and_match(FT_WORD *word, uint32 count, ALL_IN_ONE *aio) r=_mi_search_first(info, keyinfo, key_root); goto do_skip; } -#if HA_FT_WTYPE == HA_KEYTYPE_FLOAT /* The weight we read was actually a float */ tmp_weight= subkeys.f; -#else -#error -#endif /* The following should be safe, even if we compare doubles */ if (tmp_weight==0) DBUG_RETURN(doc_cnt); /* stopword, doc_cnt should be 0 */ diff --git a/storage/myisam/ft_update.c b/storage/myisam/ft_update.c index f851c0236ae..51d8ee81339 100644 --- a/storage/myisam/ft_update.c +++ b/storage/myisam/ft_update.c @@ -281,17 +281,10 @@ uint _ft_make_key(MI_INFO *info, uint keynr, uchar *keybuf, FT_WORD *wptr, my_off_t filepos) { uchar buf[HA_FT_MAXBYTELEN+16]; + float weight=(float) ((filepos==HA_OFFSET_ERROR) ? 0 : wptr->weight); DBUG_ENTER("_ft_make_key"); -#if HA_FT_WTYPE == HA_KEYTYPE_FLOAT - { - float weight=(float) ((filepos==HA_OFFSET_ERROR) ? 0 : wptr->weight); - mi_float4store(buf,weight); - } -#else -#error -#endif - + mi_float4store(buf,weight); int2store(buf+HA_FT_WLEN,wptr->len); memcpy(buf+HA_FT_WLEN+2,wptr->pos,wptr->len); DBUG_RETURN(_mi_make_key(info,keynr,(uchar*) keybuf,buf,filepos)); diff --git a/storage/myisam/fulltext.h b/storage/myisam/fulltext.h index 98b3247ba19..05e0dd9c30e 100644 --- a/storage/myisam/fulltext.h +++ b/storage/myisam/fulltext.h @@ -21,6 +21,11 @@ #include "myisamdef.h" #include "ft_global.h" +/* If HA_FT_MAXLEN is change to 127 or over, it must be tested properly as + it may cause different representation on disk for full text indexes +*/ +#define HA_FT_MAXLEN 126 + int _mi_ft_cmp(MI_INFO *, uint, const uchar *, const uchar *); int _mi_ft_add(MI_INFO *, uint, uchar *, const uchar *, my_off_t); int _mi_ft_del(MI_INFO *, uint, uchar *, const uchar *, my_off_t); diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index c540f7df14b..164851974d6 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -2118,7 +2118,7 @@ void ha_myisam::update_create_info(HA_CREATE_INFO *create_info) } -int ha_myisam::create(const char *name, register TABLE *table_arg, +int ha_myisam::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *ha_create_info) { int error; diff --git a/storage/myisam/mi_delete_all.c b/storage/myisam/mi_delete_all.c index 37fdf2dcb04..c772e843ecf 100644 --- a/storage/myisam/mi_delete_all.c +++ b/storage/myisam/mi_delete_all.c @@ -62,6 +62,10 @@ int mi_delete_all_rows(MI_INFO *info) if (mysql_file_chsize(info->dfile, 0, 0, MYF(MY_WME)) || mysql_file_chsize(share->kfile, share->base.keystart, 0, MYF(MY_WME))) goto err; + + if (info->opt_flag & WRITE_CACHE_USED) + reinit_io_cache(&info->rec_cache, WRITE_CACHE, 0, 1, 1); + (void) _mi_writeinfo(info,WRITEINFO_UPDATE_KEYFILE); DBUG_RETURN(0); diff --git a/storage/myisam/mi_key.c b/storage/myisam/mi_key.c index 52066c0033d..43babb2968b 100644 --- a/storage/myisam/mi_key.c +++ b/storage/myisam/mi_key.c @@ -150,7 +150,6 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, } else if (keyseg->flag & HA_SWAP_KEY) { /* Numerical column */ -#ifdef HAVE_ISNAN if (type == HA_KEYTYPE_FLOAT) { float nr; @@ -174,7 +173,6 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, continue; } } -#endif pos+=length; while (length--) { diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h index 41ad0e8dd7f..e350626f192 100644 --- a/storage/myisam/myisamdef.h +++ b/storage/myisam/myisamdef.h @@ -539,8 +539,7 @@ extern uchar *_mi_get_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page, uchar *key, uchar *keypos, uint *return_key_length); extern uint _mi_keylength(MI_KEYDEF *keyinfo, uchar *key); -extern uint _mi_keylength_part(MI_KEYDEF *keyinfo, register uchar *key, - HA_KEYSEG *end); +extern uint _mi_keylength_part(MI_KEYDEF *keyinfo, uchar *key, HA_KEYSEG *end); extern uchar *_mi_move_key(MI_KEYDEF *keyinfo, uchar *to, uchar *from); extern int _mi_search_next(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key, uint key_length, uint nextflag, my_off_t pos); @@ -719,12 +718,12 @@ my_bool check_table_is_closed(const char *name, const char *where); int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share); int mi_open_keyfile(MYISAM_SHARE *share); -void mi_setup_functions(register MYISAM_SHARE *share); +void mi_setup_functions(MYISAM_SHARE *share); my_bool mi_dynmap_file(MI_INFO *info, my_off_t size); int mi_munmap_file(MI_INFO *info); void mi_remap_file(MI_INFO *info, my_off_t size); -ICP_RESULT mi_check_index_cond(register MI_INFO *info, uint keynr, uchar *record); +ICP_RESULT mi_check_index_cond(MI_INFO *info, uint keynr, uchar *record); /* Functions needed by mi_check */ int killed_ptr(HA_CHECK *param); void mi_check_print_error(HA_CHECK *param, const char *fmt, ...); diff --git a/storage/myisam/mysql-test/storage_engine/trx/xa_recovery.rdiff b/storage/myisam/mysql-test/storage_engine/trx/xa_recovery.rdiff index e637643d59d..4b057019bbd 100644 --- a/storage/myisam/mysql-test/storage_engine/trx/xa_recovery.rdiff +++ b/storage/myisam/mysql-test/storage_engine/trx/xa_recovery.rdiff @@ -10,7 +10,7 @@ call mtr.add_suppression("Found 2 prepared XA transactions"); FLUSH TABLES; DROP TABLE IF EXISTS t1; -@@ -18,12 +24,18 @@ +@@ -18,12 +24,17 @@ connection default; XA RECOVER; formatID gtrid_length bqual_length data @@ -28,6 +28,5 @@ 4 +Warnings: +Error 145 Table './test/t1' is marked as crashed and should be repaired -+Error 1194 Table 't1' is marked as crashed and should be repaired +Error 1034 1 client is using or hasn't closed the table properly DROP TABLE t1; diff --git a/storage/myisam/sp_key.c b/storage/myisam/sp_key.c index 3837ed01bd2..2f8a723accf 100644 --- a/storage/myisam/sp_key.c +++ b/storage/myisam/sp_key.c @@ -66,7 +66,6 @@ uint sp_make_key(register MI_INFO *info, uint keynr, uchar *key, DBUG_ASSERT(keyseg->type == HA_KEYTYPE_DOUBLE); val= mbr[start / sizeof (double)]; -#ifdef HAVE_ISNAN if (isnan(val)) { bzero(key, length); @@ -74,7 +73,6 @@ uint sp_make_key(register MI_INFO *info, uint keynr, uchar *key, len+= length; continue; } -#endif if (keyseg->flag & HA_SWAP_KEY) { diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index 4a35f3666d0..18860b31839 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -1575,7 +1575,7 @@ int ha_myisammrg::create_mrg(const char *name, HA_CREATE_INFO *create_info) } -int ha_myisammrg::create(const char *name, register TABLE *form, +int ha_myisammrg::create(const char *name, TABLE *form, HA_CREATE_INFO *create_info) { char buff[FN_REFLEN]; diff --git a/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff index 35d4f6b63d6..eddb6872ec3 100644 --- a/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff @@ -1,6 +1,6 @@ --- suite/storage_engine/parts/repair_table.result 2017-08-28 19:29:20.491633306 +0300 +++ suite/storage_engine/parts/repair_table.reject 2017-08-28 19:34:41.723633059 +0300 -@@ -1,235 +1,115 @@ +@@ -1,232 +1,116 @@ call mtr.add_suppression("Table '.*t1.*' is marked as crashed and should be repaired"); DROP TABLE IF EXISTS t1, t2; CREATE TABLE t1 (a , b ) ENGINE= PARTITION BY HASH(a) PARTITIONS 2; @@ -165,6 +165,9 @@ REPAIR TABLE t1 USE_FRM; Table Op Msg_type Msg_text -test.t1 repair status OK ++test.t1 repair Error Table 'test.t1' doesn't exist ++test.t1 repair status Operation failed + db.opt -t1#P#p0.MYD -t1#P#p0.MYI -t1#P#p1.MYD @@ -189,7 +192,6 @@ -15 o -Warnings: -Error 145 Table './test/t1#P#p0' is marked as crashed and should be repaired --Error 1194 Table 't1' is marked as crashed and should be repaired -Error 1034 Number of rows changed from 3 to 2 -# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). -# If you got a difference in error message, just add it to rdiff file @@ -215,7 +217,6 @@ -15 o -Warnings: -Error 145 Table './test/t1#P#p0' is marked as crashed and should be repaired --Error 1194 Table 't1' is marked as crashed and should be repaired -Error 1034 Number of rows changed from 2 to 3 -# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). -# If you got a difference in error message, just add it to rdiff file @@ -240,7 +241,6 @@ -15 o -Warnings: -Error 145 Table './test/t1#P#p1' is marked as crashed and should be repaired --Error 1194 Table 't1' is marked as crashed and should be repaired -Error 1034 Number of rows changed from 4 to 3 -# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). -# If you got a difference in error message, just add it to rdiff file @@ -269,7 +269,6 @@ -15 o -Warnings: -Error 145 Table './test/t1#P#p1' is marked as crashed and should be repaired --Error 1194 Table 't1' is marked as crashed and should be repaired -Error 1034 Number of rows changed from 3 to 4 -# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). -# If you got a difference in error message, just add it to rdiff file @@ -296,7 +295,5 @@ -15 o -# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). -# If you got a difference in error message, just add it to rdiff file -+test.t1 repair Error Table 'test.t1' doesn't exist -+test.t1 repair status Operation failed DROP TABLE t1; +ERROR 42S02: Unknown table 'test.t1' diff --git a/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff index 79f6c7040e0..d6c46b8c5b8 100644 --- a/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff @@ -71,7 +71,7 @@ DROP TABLE t1, t2; call mtr.add_suppression("Got an error from thread_id=.*"); call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table"); -@@ -63,45 +57,32 @@ +@@ -63,46 +57,33 @@ CREATE TABLE t1 (a , b , (a)) ENGINE= ; REPAIR TABLE t1; Table Op Msg_type Msg_text @@ -87,9 +87,10 @@ Table Op Msg_type Msg_text -test.t1 repair warning Number of rows changed from 0 to 3 -test.t1 repair status OK ++test.t1 repair note The storage engine for the table doesn't support repair + db.opt -t1.MYD -t1.MYI -+test.t1 repair note The storage engine for the table doesn't support repair +t1.MRG t1.frm INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/xa_recovery.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/xa_recovery.rdiff index 8dc888a1d58..fa920abefc8 100644 --- a/storage/myisammrg/mysql-test/storage_engine/trx/xa_recovery.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/trx/xa_recovery.rdiff @@ -10,7 +10,7 @@ call mtr.add_suppression("Found 2 prepared XA transactions"); FLUSH TABLES; DROP TABLE IF EXISTS t1; -@@ -18,12 +24,18 @@ +@@ -18,12 +24,17 @@ connection default; XA RECOVER; formatID gtrid_length bqual_length data @@ -28,6 +28,5 @@ 4 +Warnings: +Error 145 Table './mrg/t1' is marked as crashed and should be repaired -+Error 1194 Table 't1' is marked as crashed and should be repaired +Error 1034 1 client is using or hasn't closed the table properly DROP TABLE t1; diff --git a/storage/oqgraph/graphcore-config.h b/storage/oqgraph/graphcore-config.h index 2afb7dfbcd6..3ef9da152ad 100644 --- a/storage/oqgraph/graphcore-config.h +++ b/storage/oqgraph/graphcore-config.h @@ -27,6 +27,8 @@ #define BOOST_ALL_NO_LIB 1 #define BOOST_NO_RTTI 1 #define BOOST_NO_TYPEID 1 +#define BOOST_NO_HASH 1 +#define BOOST_NO_SLIST 1 #ifdef DBUG_OFF #define NDEBUG 1 diff --git a/storage/oqgraph/oqgraph_shim.h b/storage/oqgraph/oqgraph_shim.h index df578c9e4d0..aab6e797306 100644 --- a/storage/oqgraph/oqgraph_shim.h +++ b/storage/oqgraph/oqgraph_shim.h @@ -27,9 +27,6 @@ #include "oqgraph_judy.h" #include "oqgraph_thunk.h" -#define BOOST_NO_HASH 1 -#define BOOST_NO_SLIST 1 - #include #include diff --git a/storage/perfschema/ha_perfschema.cc b/storage/perfschema/ha_perfschema.cc index e8e9581d54b..78bd2d34c32 100644 --- a/storage/perfschema/ha_perfschema.cc +++ b/storage/perfschema/ha_perfschema.cc @@ -214,7 +214,7 @@ maria_declare_plugin(perfschema) 0x0001, pfs_status_vars, NULL, - "5.6.36", + "5.6.40", MariaDB_PLUGIN_MATURITY_STABLE } maria_declare_plugin_end; diff --git a/storage/perfschema/pfs.cc b/storage/perfschema/pfs.cc index 1b57a3d38ab..5bdc973341f 100644 --- a/storage/perfschema/pfs.cc +++ b/storage/perfschema/pfs.cc @@ -1615,6 +1615,14 @@ open_table_v1(PSI_table_share *share, const void *identity) { PFS_table_share *pfs_table_share= reinterpret_cast (share); + /* + When the performance schema is off, do not instrument anything. + Table handles have short life cycle, instrumentation will happen + again if needed during the next open(). + */ + if (psi_unlikely(! flag_global_instrumentation)) + return NULL; + if (unlikely(pfs_table_share == NULL)) return NULL; @@ -1626,14 +1634,6 @@ open_table_v1(PSI_table_share *share, const void *identity) if (! global_table_io_class.m_enabled && ! global_table_lock_class.m_enabled) return NULL; - /* - When the performance schema is off, do not instrument anything. - Table handles have short life cycle, instrumentation will happen - again if needed during the next open(). - */ - if (! flag_global_instrumentation) - return NULL; - PFS_thread *thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS); if (unlikely(thread == NULL)) return NULL; @@ -1668,6 +1668,12 @@ rebind_table_v1(PSI_table_share *share, const void *identity, PSI_table *table) PFS_thread *thread; DBUG_ASSERT(pfs->m_thread_owner == NULL); + if (psi_unlikely(! flag_global_instrumentation)) + { + destroy_table(pfs); + return NULL; + } + /* The table handle was already instrumented, reuse it for this thread. */ thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS); @@ -1683,16 +1689,13 @@ rebind_table_v1(PSI_table_share *share, const void *identity, PSI_table *table) return NULL; } - if (unlikely(! flag_global_instrumentation)) - { - destroy_table(pfs); - return NULL; - } - pfs->m_thread_owner= thread; return table; } + if (psi_unlikely(! flag_global_instrumentation)) + return NULL; + /* See open_table_v1() */ PFS_table_share *pfs_table_share= reinterpret_cast (share); @@ -1706,9 +1709,6 @@ rebind_table_v1(PSI_table_share *share, const void *identity, PSI_table *table) if (! global_table_io_class.m_enabled && ! global_table_lock_class.m_enabled) return NULL; - if (! flag_global_instrumentation) - return NULL; - PFS_thread *thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS); if (unlikely(thread == NULL)) return NULL; @@ -1760,7 +1760,7 @@ static void destroy_socket_v1(PSI_socket *socket) */ static void create_file_v1(PSI_file_key key, const char *name, File file) { - if (! flag_global_instrumentation) + if (psi_unlikely(! flag_global_instrumentation)) return; int index= (int) file; if (unlikely(index < 0)) @@ -2215,7 +2215,7 @@ start_mutex_wait_v1(PSI_mutex_locker_state *state, if (! pfs_mutex->m_enabled) return NULL; - register uint flags; + uint flags; ulonglong timer_start= 0; if (flag_thread_instrumentation) @@ -2313,7 +2313,7 @@ start_rwlock_wait_v1(PSI_rwlock_locker_state *state, if (! pfs_rwlock->m_enabled) return NULL; - register uint flags; + uint flags; ulonglong timer_start= 0; if (flag_thread_instrumentation) @@ -2421,7 +2421,7 @@ start_cond_wait_v1(PSI_cond_locker_state *state, if (! pfs_cond->m_enabled) return NULL; - register uint flags; + uint flags; ulonglong timer_start= 0; if (flag_thread_instrumentation) @@ -2565,7 +2565,7 @@ start_table_io_wait_v1(PSI_table_locker_state *state, PFS_thread *pfs_thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS); - register uint flags; + uint flags; ulonglong timer_start= 0; if (flag_thread_instrumentation) @@ -2691,7 +2691,7 @@ start_table_lock_wait_v1(PSI_table_locker_state *state, DBUG_ASSERT((uint) lock_type < array_elements(table_lock_operation_map)); - register uint flags; + uint flags; ulonglong timer_start= 0; if (flag_thread_instrumentation) @@ -2783,7 +2783,7 @@ get_thread_file_name_locker_v1(PSI_file_locker_state *state, DBUG_ASSERT(static_cast (op) < array_elements(file_operation_map)); DBUG_ASSERT(state != NULL); - if (! flag_global_instrumentation) + if (psi_unlikely(! flag_global_instrumentation)) return NULL; PFS_file_class *klass= find_file_class(key); if (unlikely(klass == NULL)) @@ -2799,7 +2799,7 @@ get_thread_file_name_locker_v1(PSI_file_locker_state *state, if (flag_thread_instrumentation && ! pfs_thread->m_enabled) return NULL; - register uint flags; + uint flags; state->m_thread= reinterpret_cast (pfs_thread); flags= STATE_FLAG_THREAD; @@ -2868,7 +2868,7 @@ get_thread_file_stream_locker_v1(PSI_file_locker_state *state, if (! pfs_file->m_enabled) return NULL; - register uint flags; + uint flags; if (flag_thread_instrumentation) { @@ -2974,7 +2974,7 @@ get_thread_file_descriptor_locker_v1(PSI_file_locker_state *state, DBUG_ASSERT(pfs_file->m_class != NULL); PFS_file_class *klass= pfs_file->m_class; - register uint flags; + uint flags; if (flag_thread_instrumentation) { @@ -3063,7 +3063,7 @@ start_socket_wait_v1(PSI_socket_locker_state *state, if (!pfs_socket->m_enabled || pfs_socket->m_idle) return NULL; - register uint flags= 0; + uint flags= 0; ulonglong timer_start= 0; if (flag_thread_instrumentation) @@ -3316,13 +3316,13 @@ start_idle_wait_v1(PSI_idle_locker_state* state, const char *src_file, uint src_ { DBUG_ASSERT(state != NULL); - if (!flag_global_instrumentation) + if (psi_unlikely(! flag_global_instrumentation)) return NULL; if (!global_idle_class.m_enabled) return NULL; - register uint flags= 0; + uint flags= 0; ulonglong timer_start= 0; if (flag_thread_instrumentation) @@ -3404,7 +3404,7 @@ static void end_idle_wait_v1(PSI_idle_locker* locker) ulonglong timer_end= 0; ulonglong wait_time= 0; - register uint flags= state->m_flags; + uint flags= state->m_flags; if (flags & STATE_FLAG_TIMED) { @@ -3474,7 +3474,7 @@ static void end_mutex_wait_v1(PSI_mutex_locker* locker, int rc) DBUG_ASSERT(mutex != NULL); PFS_thread *thread= reinterpret_cast (state->m_thread); - register uint flags= state->m_flags; + uint flags= state->m_flags; if (flags & STATE_FLAG_TIMED) { @@ -3795,7 +3795,7 @@ static void end_table_io_wait_v1(PSI_table_locker* locker) break; } - register uint flags= state->m_flags; + uint flags= state->m_flags; if (flags & STATE_FLAG_TIMED) { @@ -3866,7 +3866,7 @@ static void end_table_lock_wait_v1(PSI_table_locker* locker) PFS_single_stat *stat= & table->m_table_stat.m_lock_stat.m_stat[state->m_index]; - register uint flags= state->m_flags; + uint flags= state->m_flags; if (flags & STATE_FLAG_TIMED) { @@ -4029,7 +4029,7 @@ static void start_file_wait_v1(PSI_file_locker *locker, PSI_file_locker_state *state= reinterpret_cast (locker); DBUG_ASSERT(state != NULL); - register uint flags= state->m_flags; + uint flags= state->m_flags; if (flags & STATE_FLAG_TIMED) { @@ -4065,7 +4065,7 @@ static void end_file_wait_v1(PSI_file_locker *locker, ulonglong timer_end= 0; ulonglong wait_time= 0; PFS_byte_stat *byte_stat; - register uint flags= state->m_flags; + uint flags= state->m_flags; size_t bytes= ((int)byte_count > -1 ? byte_count : 0); PFS_file_stat *file_stat; @@ -4253,7 +4253,7 @@ static void start_stage_v1(PSI_stage_key key, const char *src_file, int src_line /* Always update column threads.processlist_state. */ pfs_thread->m_stage= key; - if (! flag_global_instrumentation) + if (psi_unlikely(! flag_global_instrumentation)) return; if (flag_thread_instrumentation && ! pfs_thread->m_enabled) @@ -4353,7 +4353,7 @@ static void end_stage_v1() pfs_thread->m_stage= 0; - if (! flag_global_instrumentation) + if (psi_unlikely(! flag_global_instrumentation)) return; if (flag_thread_instrumentation && ! pfs_thread->m_enabled) @@ -4412,7 +4412,7 @@ get_thread_statement_locker_v1(PSI_statement_locker_state *state, DBUG_ASSERT(state != NULL); DBUG_ASSERT(charset != NULL); - if (! flag_global_instrumentation) + if (psi_unlikely(! flag_global_instrumentation)) return NULL; PFS_statement_class *klass= find_statement_class(key); if (unlikely(klass == NULL)) @@ -4420,7 +4420,7 @@ get_thread_statement_locker_v1(PSI_statement_locker_state *state, if (! klass->m_enabled) return NULL; - register uint flags; + uint flags; if (flag_thread_instrumentation) { @@ -4597,7 +4597,7 @@ static void start_statement_v1(PSI_statement_locker *locker, PSI_statement_locker_state *state= reinterpret_cast (locker); DBUG_ASSERT(state != NULL); - register uint flags= state->m_flags; + uint flags= state->m_flags; ulonglong timer_start= 0; if (flags & STATE_FLAG_TIMED) @@ -4799,7 +4799,7 @@ static void end_statement_v1(PSI_statement_locker *locker, void *stmt_da) ulonglong timer_end= 0; ulonglong wait_time= 0; - register uint flags= state->m_flags; + uint flags= state->m_flags; if (flags & STATE_FLAG_TIMED) { @@ -5027,7 +5027,7 @@ static void end_socket_wait_v1(PSI_socket_locker *locker, size_t byte_count) ulonglong timer_end= 0; ulonglong wait_time= 0; PFS_byte_stat *byte_stat; - register uint flags= state->m_flags; + uint flags= state->m_flags; size_t bytes= ((int)byte_count > -1 ? byte_count : 0); switch (state->m_operation) diff --git a/storage/perfschema/pfs_global.h b/storage/perfschema/pfs_global.h index c6ff2dce361..fb7a2a60707 100644 --- a/storage/perfschema/pfs_global.h +++ b/storage/perfschema/pfs_global.h @@ -79,7 +79,7 @@ inline uint randomized_index(const void *ptr, uint max_size) static uint seed1= 0; static uint seed2= 0; uint result; - register intptr value; + intptr value; if (unlikely(max_size == 0)) return 0; diff --git a/storage/perfschema/pfs_instr_class.cc b/storage/perfschema/pfs_instr_class.cc index 3f5fce9e7a2..d2202190461 100644 --- a/storage/perfschema/pfs_instr_class.cc +++ b/storage/perfschema/pfs_instr_class.cc @@ -40,13 +40,6 @@ @{ */ -/** - Global performance schema flag. - Indicate if the performance schema is enabled. - This flag is set at startup, and never changes. -*/ -my_bool pfs_enabled= TRUE; - /** PFS_INSTRUMENT option settings array and associated state variable to serialize access during shutdown. diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 7a6d8ef8765..d37f7e8bf0f 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -19,6 +19,9 @@ IF(HAVE_SCHED_GETCPU) ADD_DEFINITIONS(-DHAVE_SCHED_GETCPU=1 -DROCKSDB_SCHED_GETCPU_PRESENT) ENDIF() +IF(WITH_VALGRIND) + ADD_DEFINITIONS(-DROCKSDB_VALGRIND_RUN=1) +ENDIF() # We've had our builders hang during the build process. This prevents MariaRocks # to be built on 32 bit intel OS kernels. @@ -99,6 +102,8 @@ SET(ROCKSDB_SE_SOURCES rdb_threads.h rdb_psi.h rdb_psi.cc + rdb_sst_info.cc + rdb_sst_info.h ) # MariaDB: the following is added in build_rocksdb.cmake, when appropriate: @@ -106,7 +111,7 @@ SET(ROCKSDB_SE_SOURCES #ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX #) -MYSQL_ADD_PLUGIN(rocksdb ${ROCKSDB_SE_SOURCES} STORAGE_ENGINE +MYSQL_ADD_PLUGIN(rocksdb ${ROCKSDB_SE_SOURCES} MODULE_ONLY STORAGE_ENGINE MODULE_OUTPUT_NAME ha_rocksdb COMPONENT rocksdb-engine) @@ -141,8 +146,6 @@ ADD_CONVENIENCE_LIBRARY(rocksdb_aux_lib event_listener.h rdb_perf_context.cc rdb_perf_context.h - rdb_sst_info.cc - rdb_sst_info.h rdb_buff.h rdb_mariadb_port.h ) @@ -182,9 +185,9 @@ IF(HAVE_SCHED_GETCPU) ADD_DEFINITIONS(-DHAVE_SCHED_GETCPU=1) ENDIF() -IF (NOT "$ENV{WITH_TBB}" STREQUAL "") +IF (WITH_TBB) SET(rocksdb_static_libs ${rocksdb_static_libs} - $ENV{WITH_TBB}/libtbb${PIC_EXT}.a) + ${WITH_TBB}/lib/libtbb${PIC_EXT}.a) ADD_DEFINITIONS(-DTBB) ENDIF() @@ -210,7 +213,7 @@ MYSQL_ADD_EXECUTABLE(sst_dump rocksdb/tools/sst_dump.cc COMPONENT rocksdb-engine TARGET_LINK_LIBRARIES(sst_dump rocksdblib) MYSQL_ADD_EXECUTABLE(mysql_ldb tools/mysql_ldb.cc COMPONENT rocksdb-engine) -TARGET_LINK_LIBRARIES(mysql_ldb rocksdb_tools rocksdb_aux_lib) +TARGET_LINK_LIBRARIES(mysql_ldb rocksdb_tools rocksdb_aux_lib dbug) INSTALL_SCRIPT(myrocks_hotbackup COMPONENT rocksdb-engine) @@ -231,6 +234,13 @@ IF(MSVC) ENDIF() ENDIF() +# Enable ZSTD if available. Upstream rocksdb cmake will use WITH_ZSTD and set +# defines within their code. +FIND_PACKAGE(zstd) +IF (ZSTD_FOUND) + SET(WITH_ZSTD ON) +ENDIF() + IF(GIT_EXECUTABLE) EXECUTE_PROCESS( COMMAND ${GIT_EXECUTABLE} rev-parse HEAD diff --git a/storage/rocksdb/build_rocksdb.cmake b/storage/rocksdb/build_rocksdb.cmake index 5810412f566..c76f711463e 100644 --- a/storage/rocksdb/build_rocksdb.cmake +++ b/storage/rocksdb/build_rocksdb.cmake @@ -12,11 +12,6 @@ INCLUDE_DIRECTORIES( ${ROCKSDB_SOURCE_DIR}/third-party/gtest-1.7.0/fused-src ) -IF(WIN32) - INCLUDE_DIRECTORIES(BEFORE - ${CMAKE_CURRENT_SOURCE_DIR}/patch) -ENDIF() - list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/") if(WIN32) @@ -245,6 +240,7 @@ set(ROCKSDB_SOURCES table/block_based_table_factory.cc table/block_based_table_reader.cc table/block_builder.cc + table/block_fetcher.cc table/block_prefix_index.cc table/bloom_block.cc table/cuckoo_table_builder.cc @@ -340,15 +336,16 @@ set(ROCKSDB_SOURCES utilities/transactions/optimistic_transaction_db_impl.cc utilities/transactions/pessimistic_transaction.cc utilities/transactions/pessimistic_transaction_db.cc + utilities/transactions/snapshot_checker.cc utilities/transactions/transaction_base.cc utilities/transactions/transaction_db_mutex_impl.cc utilities/transactions/transaction_lock_mgr.cc utilities/transactions/transaction_util.cc utilities/transactions/write_prepared_txn.cc + utilities/transactions/write_prepared_txn_db.cc utilities/ttl/db_ttl_impl.cc utilities/write_batch_with_index/write_batch_with_index.cc utilities/write_batch_with_index/write_batch_with_index_internal.cc - ) if(WIN32) diff --git a/storage/rocksdb/event_listener.cc b/storage/rocksdb/event_listener.cc index f949f166b2e..85c23f47c62 100644 --- a/storage/rocksdb/event_listener.cc +++ b/storage/rocksdb/event_listener.cc @@ -82,4 +82,14 @@ void Rdb_event_listener::OnExternalFileIngested( DBUG_ASSERT(db != nullptr); update_index_stats(info.table_properties); } + +void Rdb_event_listener::OnBackgroundError( + rocksdb::BackgroundErrorReason reason, rocksdb::Status *status) { + rdb_log_status_error(*status, "Error detected in background"); + sql_print_error("RocksDB: BackgroundErrorReason: %d", (int)reason); + if (status->IsCorruption()) { + rdb_persist_corruption_marker(); + abort(); + } +} } // namespace myrocks diff --git a/storage/rocksdb/event_listener.h b/storage/rocksdb/event_listener.h index d535031644b..8772105de36 100644 --- a/storage/rocksdb/event_listener.h +++ b/storage/rocksdb/event_listener.h @@ -37,6 +37,9 @@ public: rocksdb::DB *db, const rocksdb::ExternalFileIngestionInfo &ingestion_info) override; + void OnBackgroundError(rocksdb::BackgroundErrorReason reason, + rocksdb::Status *status) override; + private: Rdb_ddl_manager *m_ddl_manager; diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index bd28b27576e..48297aa38f0 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -144,10 +144,6 @@ static handler *rocksdb_create_handler(my_core::handlerton *hton, my_core::TABLE_SHARE *table_arg, my_core::MEM_ROOT *mem_root); -bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd, - const rocksdb::Slice &eq_cond, - const bool use_all_keys); - static rocksdb::CompactRangeOptions getCompactRangeOptions() { rocksdb::CompactRangeOptions compact_range_options; compact_range_options.bottommost_level_compaction = @@ -220,8 +216,8 @@ static int rocksdb_compact_column_family(THD *const thd, if (const char *const cf = value->val_str(value, buff, &len)) { auto cfh = cf_manager.get_cf(cf); if (cfh != nullptr && rdb != nullptr) { - sql_print_information("RocksDB: Manual compaction of column family: %s\n", - cf); + sql_print_verbose_info("RocksDB: Manual compaction of column family: %s\n", + cf); rdb->CompactRange(getCompactRangeOptions(), cfh, nullptr, nullptr); } } @@ -423,24 +419,37 @@ static void rocksdb_set_collation_exception_list(THD *thd, void *var_ptr, const void *save); -void rocksdb_set_update_cf_options(THD *thd, - struct st_mysql_sys_var *var, - void *var_ptr, - const void *save); +static int rocksdb_validate_update_cf_options(THD *thd, + struct st_mysql_sys_var *var, + void *save, + st_mysql_value *value); -static void -rocksdb_set_bulk_load(THD *thd, - struct st_mysql_sys_var *var MY_ATTRIBUTE((__unused__)), - void *var_ptr, const void *save); +static void rocksdb_set_update_cf_options(THD *thd, + struct st_mysql_sys_var *var, + void *var_ptr, const void *save); -static void rocksdb_set_bulk_load_allow_unsorted( - THD *thd, struct st_mysql_sys_var *var MY_ATTRIBUTE((__unused__)), - void *var_ptr, const void *save); +static int rocksdb_check_bulk_load(THD *const thd, + struct st_mysql_sys_var *var + MY_ATTRIBUTE((__unused__)), + void *save, + struct st_mysql_value *value); + +static int rocksdb_check_bulk_load_allow_unsorted( + THD *const thd, struct st_mysql_sys_var *var MY_ATTRIBUTE((__unused__)), + void *save, struct st_mysql_value *value); static void rocksdb_set_max_background_jobs(THD *thd, struct st_mysql_sys_var *const var, void *const var_ptr, const void *const save); +static void rocksdb_set_bytes_per_sync(THD *thd, + struct st_mysql_sys_var *const var, + void *const var_ptr, + const void *const save); +static void rocksdb_set_wal_bytes_per_sync(THD *thd, + struct st_mysql_sys_var *const var, + void *const var_ptr, + const void *const save); ////////////////////////////////////////////////////////////////////////////// // Options definitions ////////////////////////////////////////////////////////////////////////////// @@ -471,6 +480,7 @@ static char *rocksdb_compact_cf_name; static char *rocksdb_checkpoint_name; static my_bool rocksdb_signal_drop_index_thread; static my_bool rocksdb_strict_collation_check = 1; +static my_bool rocksdb_ignore_unknown_options = 1; static my_bool rocksdb_enable_2pc = 0; static char *rocksdb_strict_collation_exceptions; static my_bool rocksdb_collect_sst_properties = 1; @@ -484,7 +494,6 @@ static int rocksdb_debug_ttl_read_filter_ts = 0; static my_bool rocksdb_debug_ttl_ignore_pk = 0; static my_bool rocksdb_reset_stats = 0; static uint32_t rocksdb_io_write_timeout_secs = 0; -static uint64_t rocksdb_number_stat_computes = 0; static uint32_t rocksdb_seconds_between_stat_computes = 3600; static long long rocksdb_compaction_sequential_deletes = 0l; static long long rocksdb_compaction_sequential_deletes_window = 0l; @@ -495,11 +504,14 @@ static uint32_t rocksdb_table_stats_sampling_pct; static my_bool rocksdb_enable_bulk_load_api = 1; static my_bool rocksdb_print_snapshot_conflict_queries = 0; static my_bool rocksdb_large_prefix = 0; +static my_bool rocksdb_allow_to_start_after_corruption = 0; static char* rocksdb_git_hash; char *compression_types_val= const_cast(get_rocksdb_supported_compression_types()); +std::atomic rocksdb_row_lock_deadlocks(0); +std::atomic rocksdb_row_lock_wait_timeouts(0); std::atomic rocksdb_snapshot_conflict_errors(0); std::atomic rocksdb_wal_group_syncs(0); @@ -510,8 +522,9 @@ static std::unique_ptr rdb_init_rocksdb_db_options(void) { o->listeners.push_back(std::make_shared(&ddl_manager)); o->info_log_level = rocksdb::InfoLogLevel::INFO_LEVEL; o->max_subcompactions = DEFAULT_SUBCOMPACTIONS; + o->max_open_files = -2; // auto-tune to 50% open_files_limit - o->concurrent_prepare = true; + o->two_write_queues = true; o->manual_wal_flush = true; return o; } @@ -593,6 +606,33 @@ static void rocksdb_set_io_write_timeout( RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } +enum rocksdb_flush_log_at_trx_commit_type : unsigned int { + FLUSH_LOG_NEVER = 0, + FLUSH_LOG_SYNC, + FLUSH_LOG_BACKGROUND, + FLUSH_LOG_MAX /* must be last */ +}; + +static int rocksdb_validate_flush_log_at_trx_commit( + THD *const thd, + struct st_mysql_sys_var *const var, /* in: pointer to system variable */ + void *var_ptr, /* out: immediate result for update function */ + struct st_mysql_value *const value /* in: incoming value */) { + long long new_value; + + /* value is NULL */ + if (value->val_int(value, &new_value)) { + return HA_EXIT_FAILURE; + } + + if (rocksdb_db_options->allow_mmap_writes && new_value != FLUSH_LOG_NEVER) { + return HA_EXIT_FAILURE; + } + + *static_cast(var_ptr) = static_cast(new_value); + return HA_EXIT_SUCCESS; +} + static const char *index_type_names[] = {"kBinarySearch", "kHashSearch", NullS}; static TYPELIB index_type_typelib = {array_elements(index_type_names) - 1, @@ -600,7 +640,7 @@ static TYPELIB index_type_typelib = {array_elements(index_type_names) - 1, nullptr}; const ulong RDB_MAX_LOCK_WAIT_SECONDS = 1024 * 1024 * 1024; -const ulong RDB_MAX_ROW_LOCKS = 1024 * 1024 * 1024; +const ulong RDB_MAX_ROW_LOCKS = 1024 * 1024; const ulong RDB_DEFAULT_BULK_LOAD_SIZE = 1000; const ulong RDB_MAX_BULK_LOAD_SIZE = 1024 * 1024 * 1024; const size_t RDB_DEFAULT_MERGE_BUF_SIZE = 64 * 1024 * 1024; @@ -640,12 +680,13 @@ static MYSQL_THDVAR_BOOL( bulk_load, PLUGIN_VAR_RQCMDARG, "Use bulk-load mode for inserts. This disables " "unique_checks and enables rocksdb_commit_in_the_middle.", - nullptr, rocksdb_set_bulk_load, FALSE); + rocksdb_check_bulk_load, nullptr, FALSE); static MYSQL_THDVAR_BOOL(bulk_load_allow_unsorted, PLUGIN_VAR_RQCMDARG, "Allow unsorted input during bulk-load. " "Can be changed only when bulk load is disabled.", - nullptr, rocksdb_set_bulk_load_allow_unsorted, FALSE); + rocksdb_check_bulk_load_allow_unsorted, nullptr, + FALSE); static MYSQL_SYSVAR_BOOL(enable_bulk_load_api, rocksdb_enable_bulk_load_api, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, @@ -751,11 +792,11 @@ static MYSQL_SYSVAR_BOOL( rocksdb_db_options->create_if_missing); static MYSQL_SYSVAR_BOOL( - concurrent_prepare, - *reinterpret_cast(&rocksdb_db_options->concurrent_prepare), + two_write_queues, + *reinterpret_cast(&rocksdb_db_options->two_write_queues), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::concurrent_prepare for RocksDB", nullptr, nullptr, - rocksdb_db_options->concurrent_prepare); + "DBOptions::two_write_queues for RocksDB", nullptr, nullptr, + rocksdb_db_options->two_write_queues); static MYSQL_SYSVAR_BOOL( manual_wal_flush, @@ -882,7 +923,7 @@ static MYSQL_SYSVAR_INT(max_open_files, rocksdb_db_options->max_open_files, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_open_files for RocksDB", nullptr, nullptr, rocksdb_db_options->max_open_files, - /* min */ -1, /* max */ INT_MAX, 0); + /* min */ -2, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_UINT64_T(max_total_wal_size, rocksdb_db_options->max_total_wal_size, @@ -1063,16 +1104,18 @@ static MYSQL_SYSVAR_BOOL( rocksdb_db_options->use_adaptive_mutex); static MYSQL_SYSVAR_UINT64_T(bytes_per_sync, rocksdb_db_options->bytes_per_sync, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + PLUGIN_VAR_RQCMDARG, "DBOptions::bytes_per_sync for RocksDB", nullptr, - nullptr, rocksdb_db_options->bytes_per_sync, + rocksdb_set_bytes_per_sync, + rocksdb_db_options->bytes_per_sync, /* min */ 0L, /* max */ ULONGLONG_MAX, 0); static MYSQL_SYSVAR_UINT64_T(wal_bytes_per_sync, rocksdb_db_options->wal_bytes_per_sync, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + PLUGIN_VAR_RQCMDARG, "DBOptions::wal_bytes_per_sync for RocksDB", nullptr, - nullptr, rocksdb_db_options->wal_bytes_per_sync, + rocksdb_set_wal_bytes_per_sync, + rocksdb_db_options->wal_bytes_per_sync, /* min */ 0L, /* max */ ULONGLONG_MAX, 0); static MYSQL_SYSVAR_BOOL( @@ -1190,22 +1233,17 @@ static MYSQL_SYSVAR_STR(override_cf_options, rocksdb_override_cf_options, static MYSQL_SYSVAR_STR(update_cf_options, rocksdb_update_cf_options, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC /* psergey-merge: need this? : PLUGIN_VAR_ALLOCATED*/, - "Option updates per column family for RocksDB", nullptr, + "Option updates per column family for RocksDB", + rocksdb_validate_update_cf_options, rocksdb_set_update_cf_options, nullptr); -enum rocksdb_flush_log_at_trx_commit_type : unsigned int { - FLUSH_LOG_NEVER = 0, - FLUSH_LOG_SYNC, - FLUSH_LOG_BACKGROUND, - FLUSH_LOG_MAX /* must be last */ -}; - static MYSQL_SYSVAR_UINT(flush_log_at_trx_commit, rocksdb_flush_log_at_trx_commit, PLUGIN_VAR_RQCMDARG, "Sync on transaction commit. Similar to " "innodb_flush_log_at_trx_commit. 1: sync on commit, " "0,2: not sync on commit", - nullptr, nullptr, /* default */ FLUSH_LOG_SYNC, + rocksdb_validate_flush_log_at_trx_commit, nullptr, + /* default */ FLUSH_LOG_SYNC, /* min */ FLUSH_LOG_NEVER, /* max */ FLUSH_LOG_BACKGROUND, 0); @@ -1346,6 +1384,11 @@ static MYSQL_SYSVAR_BOOL(enable_2pc, rocksdb_enable_2pc, PLUGIN_VAR_RQCMDARG, "Enable two phase commit for MyRocks", nullptr, nullptr, TRUE); +static MYSQL_SYSVAR_BOOL(ignore_unknown_options, rocksdb_ignore_unknown_options, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, + "Enable ignoring unknown options passed to RocksDB", + nullptr, nullptr, TRUE); + static MYSQL_SYSVAR_BOOL(strict_collation_check, rocksdb_strict_collation_check, PLUGIN_VAR_RQCMDARG, "Enforce case sensitive collation for MyRocks indexes", @@ -1377,11 +1420,6 @@ static MYSQL_SYSVAR_BOOL( rocksdb_force_flush_memtable_and_lzero_now, rocksdb_force_flush_memtable_and_lzero_now_stub, FALSE); -static MYSQL_THDVAR_BOOL( - flush_memtable_on_analyze, PLUGIN_VAR_RQCMDARG, - "Forces memtable flush on ANALZYE table to get accurate cardinality", - nullptr, nullptr, true); - static MYSQL_SYSVAR_UINT( seconds_between_stat_computes, rocksdb_seconds_between_stat_computes, PLUGIN_VAR_RQCMDARG, @@ -1498,6 +1536,13 @@ static MYSQL_SYSVAR_BOOL( "index prefix length is 767.", nullptr, nullptr, FALSE); +static MYSQL_SYSVAR_BOOL( + allow_to_start_after_corruption, rocksdb_allow_to_start_after_corruption, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, + "Allow server still to start successfully even if RocksDB corruption is " + "detected.", + nullptr, nullptr, FALSE); + static const int ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE = 100; static struct st_mysql_sys_var *rocksdb_system_variables[] = { @@ -1523,7 +1568,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(skip_bloom_filter_on_read), MYSQL_SYSVAR(create_if_missing), - MYSQL_SYSVAR(concurrent_prepare), + MYSQL_SYSVAR(two_write_queues), MYSQL_SYSVAR(manual_wal_flush), MYSQL_SYSVAR(create_missing_column_families), MYSQL_SYSVAR(error_if_exists), @@ -1605,6 +1650,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(signal_drop_index_thread), MYSQL_SYSVAR(pause_background_work), MYSQL_SYSVAR(enable_2pc), + MYSQL_SYSVAR(ignore_unknown_options), MYSQL_SYSVAR(strict_collation_check), MYSQL_SYSVAR(strict_collation_exceptions), MYSQL_SYSVAR(collect_sst_properties), @@ -1618,7 +1664,6 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(debug_ttl_ignore_pk), MYSQL_SYSVAR(reset_stats), MYSQL_SYSVAR(io_write_timeout), - MYSQL_SYSVAR(flush_memtable_on_analyze), MYSQL_SYSVAR(seconds_between_stat_computes), MYSQL_SYSVAR(compaction_sequential_deletes), @@ -1640,6 +1685,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(table_stats_sampling_pct), MYSQL_SYSVAR(large_prefix), + MYSQL_SYSVAR(allow_to_start_after_corruption), MYSQL_SYSVAR(git_hash), nullptr}; @@ -1768,10 +1814,22 @@ protected: ulonglong m_update_count = 0; ulonglong m_delete_count = 0; ulonglong m_lock_count = 0; + std::unordered_map m_auto_incr_map; bool m_is_delayed_snapshot = false; bool m_is_two_phase = false; +private: + /* Number of RockDB savepoints taken */ + int m_n_savepoints; + /* + Number of write operations this transaction had when we took the last + savepoint (the idea is not to take another savepoint if we haven't made + any changes) + */ + ulonglong m_writes_at_last_savepoint; + +protected: THD *m_thd = nullptr; rocksdb::ReadOptions m_read_opts; @@ -1799,8 +1857,39 @@ protected: virtual rocksdb::Iterator * get_iterator(const rocksdb::ReadOptions &options, rocksdb::ColumnFamilyHandle *column_family) = 0; + +protected: + /* + The following two are helper functions to be overloaded by child classes. + They should provide RocksDB's savepoint semantics. + */ + virtual void do_set_savepoint() = 0; + virtual void do_rollback_to_savepoint() = 0; -public: + /* + @detail + This function takes in the WriteBatch of the transaction to add + all the AUTO_INCREMENT merges. It does so by iterating through + m_auto_incr_map and then constructing key/value pairs to call merge upon. + + @param wb + */ + rocksdb::Status merge_auto_incr_map(rocksdb::WriteBatchBase *const wb) { + DBUG_EXECUTE_IF("myrocks_autoinc_upgrade", return rocksdb::Status::OK();); + + // Iterate through the merge map merging all keys into data dictionary. + rocksdb::Status s; + for (auto &it : m_auto_incr_map) { + s = dict_manager.put_auto_incr_val(wb, it.first, it.second); + if (!s.ok()) { + return s; + } + } + m_auto_incr_map.clear(); + return s; + } + + public: const char *m_mysql_log_file_name; my_off_t m_mysql_log_offset; #ifdef MARIAROCKS_NOT_YET @@ -1869,6 +1958,7 @@ public: m_detailed_error.copy(timeout_message( "index", tbl_def->full_tablename().c_str(), kd.get_name().c_str())); table_handler->m_lock_wait_timeout_counter.inc(); + rocksdb_row_lock_wait_timeouts++; return HA_ERR_LOCK_WAIT_TIMEOUT; } @@ -1878,6 +1968,7 @@ public: false /* just statement */); m_detailed_error = String(); table_handler->m_deadlock_counter.inc(); + rocksdb_row_lock_deadlocks++; return HA_ERR_LOCK_DEADLOCK; } else if (s.IsBusy()) { rocksdb_snapshot_conflict_errors++; @@ -2007,10 +2098,16 @@ public: rollback(); return true; } else { +#ifdef MARIAROCKS_NOT_YET + /* + Storing binlog position inside MyRocks is needed only for restoring + MyRocks from backups. This feature is not supported yet. + */ mysql_bin_log_commit_pos(m_thd, &m_mysql_log_offset, &m_mysql_log_file_name); binlog_manager.update(m_mysql_log_file_name, m_mysql_log_offset, get_write_batch()); +#endif return commit_no_binlog(); } } @@ -2031,28 +2128,110 @@ public: bool has_snapshot() const { return m_read_opts.snapshot != nullptr; } private: - // The tables we are currently loading. In a partitioned table this can - // have more than one entry - std::vector m_curr_bulk_load; + // The Rdb_sst_info structures we are currently loading. In a partitioned + // table this can have more than one entry + std::vector> m_curr_bulk_load; + std::string m_curr_bulk_load_tablename; + + /* External merge sorts for bulk load: key ID -> merge sort instance */ + std::unordered_map m_key_merge; public: - int finish_bulk_load() { - int rc = 0; + int get_key_merge(GL_INDEX_ID kd_gl_id, rocksdb::ColumnFamilyHandle *cf, + Rdb_index_merge **key_merge) { + int res; + auto it = m_key_merge.find(kd_gl_id); + if (it == m_key_merge.end()) { + m_key_merge.emplace( + std::piecewise_construct, std::make_tuple(kd_gl_id), + std::make_tuple( + get_rocksdb_tmpdir(), THDVAR(get_thd(), merge_buf_size), + THDVAR(get_thd(), merge_combine_read_size), + THDVAR(get_thd(), merge_tmp_file_removal_delay_ms), cf)); + it = m_key_merge.find(kd_gl_id); + if ((res = it->second.init()) != 0) { + return res; + } + } + *key_merge = &it->second; + return HA_EXIT_SUCCESS; + } - std::vector::iterator it; - while ((it = m_curr_bulk_load.begin()) != m_curr_bulk_load.end()) { - int rc2 = (*it)->finalize_bulk_load(); + int finish_bulk_load(int print_client_error = true) { + int rc = 0, rc2; + + std::vector>::iterator it; + for (it = m_curr_bulk_load.begin(); it != m_curr_bulk_load.end(); it++) { + rc2 = (*it)->commit(print_client_error); if (rc2 != 0 && rc == 0) { rc = rc2; } } - + m_curr_bulk_load.clear(); + m_curr_bulk_load_tablename.clear(); DBUG_ASSERT(m_curr_bulk_load.size() == 0); + // Flush the index_merge sort buffers + if (!m_key_merge.empty()) { + rocksdb::Slice merge_key; + rocksdb::Slice merge_val; + for (auto it = m_key_merge.begin(); it != m_key_merge.end(); it++) { + GL_INDEX_ID index_id = it->first; + std::shared_ptr keydef = + ddl_manager.safe_find(index_id); + std::string table_name = ddl_manager.safe_get_table_name(index_id); + + // Unable to find key definition or table name since the + // table could have been dropped. + // TODO(herman): there is a race here between dropping the table + // and detecting a drop here. If the table is dropped while bulk + // loading is finishing, these keys being added here may + // be missed by the compaction filter and not be marked for + // removal. It is unclear how to lock the sql table from the storage + // engine to prevent modifications to it while bulk load is occurring. + if (keydef == nullptr || table_name.empty()) { + rc2 = HA_ERR_ROCKSDB_BULK_LOAD; + break; + } + const std::string &index_name = keydef->get_name(); + Rdb_index_merge &rdb_merge = it->second; + + // Rdb_sst_info expects a denormalized table name in the form of + // "./database/table" + std::replace(table_name.begin(), table_name.end(), '.', '/'); + table_name = "./" + table_name; + Rdb_sst_info sst_info(rdb, table_name, index_name, rdb_merge.get_cf(), + *rocksdb_db_options, + THDVAR(get_thd(), trace_sst_api)); + + while ((rc2 = rdb_merge.next(&merge_key, &merge_val)) == 0) { + if ((rc2 = sst_info.put(merge_key, merge_val)) != 0) { + break; + } + } + + // rc2 == -1 => finished ok; rc2 > 0 => error + if (rc2 > 0 || (rc2 = sst_info.commit(print_client_error)) != 0) { + if (rc == 0) { + rc = rc2; + } + break; + } + } + m_key_merge.clear(); + + /* + Explicitly tell jemalloc to clean up any unused dirty pages at this + point. + See https://reviews.facebook.net/D63723 for more details. + */ + purge_all_jemalloc_arenas(); + } return rc; } - void start_bulk_load(ha_rocksdb *const bulk_load) { + int start_bulk_load(ha_rocksdb *const bulk_load, + std::shared_ptr sst_info) { /* If we already have an open bulk load of a table and the name doesn't match the current one, close out the currently running one. This allows @@ -2062,29 +2241,46 @@ public: DBUG_ASSERT(bulk_load != nullptr); if (!m_curr_bulk_load.empty() && - !bulk_load->same_table(*m_curr_bulk_load[0])) { + bulk_load->get_table_basename() != m_curr_bulk_load_tablename) { const auto res = finish_bulk_load(); - SHIP_ASSERT(res == 0); - } - - m_curr_bulk_load.push_back(bulk_load); - } - - void end_bulk_load(ha_rocksdb *const bulk_load) { - for (auto it = m_curr_bulk_load.begin(); it != m_curr_bulk_load.end(); - it++) { - if (*it == bulk_load) { - m_curr_bulk_load.erase(it); - return; + if (res != HA_EXIT_SUCCESS) { + m_curr_bulk_load.clear(); + m_curr_bulk_load_tablename.clear(); + return res; } } - // Should not reach here - SHIP_ASSERT(0); + /* + This used to track ha_rocksdb handler objects, but those can be + freed by the table cache while this was referencing them. Instead + of tracking ha_rocksdb handler objects, this now tracks the + Rdb_sst_info allocated, and both the ha_rocksdb handler and the + Rdb_transaction both have shared pointers to them. + + On transaction complete, it will commit each Rdb_sst_info structure found. + If the ha_rocksdb object is freed, etc., it will also commit + the Rdb_sst_info. The Rdb_sst_info commit path needs to be idempotent. + */ + m_curr_bulk_load.push_back(sst_info); + m_curr_bulk_load_tablename = bulk_load->get_table_basename(); + return HA_EXIT_SUCCESS; } int num_ongoing_bulk_load() const { return m_curr_bulk_load.size(); } + const char *get_rocksdb_tmpdir() const { + const char *tmp_dir = THDVAR(get_thd(), tmpdir); + + /* + We want to treat an empty string as nullptr, in these cases DDL operations + will use the default --tmpdir passed to mysql instead. + */ + if (tmp_dir != nullptr && *tmp_dir == '\0') { + tmp_dir = nullptr; + } + return (tmp_dir); + } + /* Flush the data accumulated so far. This assumes we're doing a bulk insert. @@ -2111,6 +2307,20 @@ public: return false; } + void set_auto_incr(const GL_INDEX_ID &gl_index_id, ulonglong curr_id) { + m_auto_incr_map[gl_index_id] = + std::max(m_auto_incr_map[gl_index_id], curr_id); + } + +#ifndef NDEBUG + ulonglong get_auto_incr(const GL_INDEX_ID &gl_index_id) { + if (m_auto_incr_map.count(gl_index_id) > 0) { + return m_auto_incr_map[gl_index_id]; + } + return 0; + } +#endif + virtual rocksdb::Status put(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, const rocksdb::Slice &value) = 0; @@ -2134,15 +2344,17 @@ public: virtual rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, - std::string *value) const = 0; + rocksdb::PinnableSlice *const value) const = 0; virtual rocksdb::Status get_for_update(rocksdb::ColumnFamilyHandle *const column_family, - const rocksdb::Slice &key, std::string *const value, + const rocksdb::Slice &key, rocksdb::PinnableSlice *const value, bool exclusive) = 0; rocksdb::Iterator * get_iterator(rocksdb::ColumnFamilyHandle *const column_family, bool skip_bloom_filter, bool fill_cache, + const rocksdb::Slice &eq_cond_lower_bound, + const rocksdb::Slice &eq_cond_upper_bound, bool read_current = false, bool create_snapshot = true) { // Make sure we are not doing both read_current (which implies we don't // want a snapshot) and create_snapshot which makes sure we create @@ -2157,6 +2369,8 @@ public: if (skip_bloom_filter) { options.total_order_seek = true; + options.iterate_lower_bound = &eq_cond_lower_bound; + options.iterate_upper_bound = &eq_cond_upper_bound; } else { // With this option, Iterator::Valid() returns false if key // is outside of the prefix bloom filter range set at Seek(). @@ -2173,6 +2387,50 @@ public: virtual bool is_tx_started() const = 0; virtual void start_tx() = 0; virtual void start_stmt() = 0; + + void set_initial_savepoint() { + /* + Set the initial savepoint. If the first statement in the transaction + fails, we need something to roll back to, without rolling back the + entire transaction. + */ + do_set_savepoint(); + m_n_savepoints= 1; + m_writes_at_last_savepoint= m_write_count; + } + + /* + Called when a "top-level" statement inside a transaction completes + successfully and its changes become part of the transaction's changes. + */ + void make_stmt_savepoint_permanent() { + + // Take another RocksDB savepoint only if we had changes since the last + // one. This is very important for long transactions doing lots of + // SELECTs. + if (m_writes_at_last_savepoint != m_write_count) + { + do_set_savepoint(); + m_writes_at_last_savepoint= m_write_count; + m_n_savepoints++; + } + } + + + /* + Rollback to the savepoint we've set before the last statement + */ + void rollback_to_stmt_savepoint() { + if (m_writes_at_last_savepoint != m_write_count) { + do_rollback_to_savepoint(); + if (!--m_n_savepoints) { + do_set_savepoint(); + m_n_savepoints= 1; + } + m_writes_at_last_savepoint= m_write_count; + } + } + virtual void rollback_stmt() = 0; void set_tx_failed(bool failed_arg) { m_is_tx_failed = failed_arg; } @@ -2270,6 +2528,12 @@ private: return false; } + s = merge_auto_incr_map(m_rocksdb_tx->GetWriteBatch()->GetWriteBatch()); + if (!s.ok()) { + rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); + return false; + } + s = m_rocksdb_tx->Prepare(); if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); @@ -2280,13 +2544,24 @@ private: bool commit_no_binlog() override { bool res = false; - release_snapshot(); - const rocksdb::Status s = m_rocksdb_tx->Commit(); + rocksdb::Status s; + + s = merge_auto_incr_map(m_rocksdb_tx->GetWriteBatch()->GetWriteBatch()); if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); res = true; + goto error; } + release_snapshot(); + s = m_rocksdb_tx->Commit(); + if (!s.ok()) { + rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); + res = true; + goto error; + } + +error: /* Save the transaction object to be reused */ release_tx(); @@ -2307,6 +2582,7 @@ public: m_update_count = 0; m_delete_count = 0; m_lock_count = 0; + m_auto_incr_map.clear(); m_ddl_transaction = false; if (m_rocksdb_tx) { release_snapshot(); @@ -2408,18 +2684,25 @@ public: rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, - std::string *value) const override { + rocksdb::PinnableSlice *const value) const override { + // clean PinnableSlice right begfore Get() for multiple gets per statement + // the resources after the last Get in a statement are cleared in + // handler::reset call + value->Reset(); global_stats.queries[QUERIES_POINT].inc(); return m_rocksdb_tx->Get(m_read_opts, column_family, key, value); } rocksdb::Status get_for_update(rocksdb::ColumnFamilyHandle *const column_family, - const rocksdb::Slice &key, std::string *const value, + const rocksdb::Slice &key, rocksdb::PinnableSlice *const value, bool exclusive) override { if (++m_lock_count > m_max_row_locks) return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); + if (value != nullptr) { + value->Reset(); + } return m_rocksdb_tx->GetForUpdate(m_read_opts, column_family, key, value, exclusive); } @@ -2462,9 +2745,20 @@ public: m_read_opts = rocksdb::ReadOptions(); + set_initial_savepoint(); + m_ddl_transaction = false; } + /* Implementations of do_*savepoint based on rocksdB::Transaction savepoints */ + void do_set_savepoint() override { + m_rocksdb_tx->SetSavePoint(); + } + + void do_rollback_to_savepoint() override { + m_rocksdb_tx->RollbackToSavePoint(); + } + /* Start a statement inside a multi-statement transaction. @@ -2477,7 +2771,6 @@ public: void start_stmt() override { // Set the snapshot to delayed acquisition (SetSnapshotOnNextOperation) acquire_snapshot(false); - m_rocksdb_tx->SetSavePoint(); } /* @@ -2488,7 +2781,7 @@ public: /* TODO: here we must release the locks taken since the start_stmt() call */ if (m_rocksdb_tx) { const rocksdb::Snapshot *const org_snapshot = m_rocksdb_tx->GetSnapshot(); - m_rocksdb_tx->RollbackToSavePoint(); + rollback_to_stmt_savepoint(); const rocksdb::Snapshot *const cur_snapshot = m_rocksdb_tx->GetSnapshot(); if (org_snapshot != cur_snapshot) { @@ -2547,13 +2840,24 @@ private: bool commit_no_binlog() override { bool res = false; - release_snapshot(); - const rocksdb::Status s = - rdb->GetBaseDB()->Write(write_opts, m_batch->GetWriteBatch()); + rocksdb::Status s; + + s = merge_auto_incr_map(m_batch->GetWriteBatch()); if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); res = true; + goto error; } + + release_snapshot(); + + s = rdb->GetBaseDB()->Write(write_opts, m_batch->GetWriteBatch()); + if (!s.ok()) { + rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); + res = true; + goto error; + } +error: reset(); m_write_count = 0; @@ -2565,6 +2869,16 @@ private: return res; } +protected: + /* Implementations of do_*savepoint based on rocksdB::WriteBatch savepoints */ + void do_set_savepoint() override { + m_batch->SetSavePoint(); + } + + void do_rollback_to_savepoint() override { + m_batch->RollbackToSavePoint(); + } + public: bool is_writebatch_trx() const override { return true; } @@ -2642,14 +2956,15 @@ public: rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, - std::string *const value) const override { + rocksdb::PinnableSlice *const value) const override { + value->Reset(); return m_batch->GetFromBatchAndDB(rdb, m_read_opts, column_family, key, value); } rocksdb::Status get_for_update(rocksdb::ColumnFamilyHandle *const column_family, - const rocksdb::Slice &key, std::string *const value, + const rocksdb::Slice &key, rocksdb::PinnableSlice *const value, bool exclusive) override { return get(column_family, key, value); } @@ -2670,13 +2985,15 @@ public: write_opts.disableWAL = THDVAR(m_thd, write_disable_wal); write_opts.ignore_missing_column_families = THDVAR(m_thd, write_ignore_missing_column_families); + + set_initial_savepoint(); } - void start_stmt() override { m_batch->SetSavePoint(); } + void start_stmt() override {} void rollback_stmt() override { if (m_batch) - m_batch->RollbackToSavePoint(); + rollback_to_stmt_savepoint(); } explicit Rdb_writebatch_impl(THD *const thd) @@ -2778,13 +3095,12 @@ static Rdb_transaction *get_or_create_tx(THD *const thd) { static int rocksdb_close_connection(handlerton *const hton, THD *const thd) { Rdb_transaction *&tx = get_tx_from_thd(thd); if (tx != nullptr) { - int rc = tx->finish_bulk_load(); + int rc = tx->finish_bulk_load(false); if (rc != 0) { // NO_LINT_DEBUG sql_print_error("RocksDB: Error %d finalizing last SST file while " "disconnecting", rc); - abort_with_stack_traces(); } delete tx; @@ -2833,7 +3149,8 @@ static bool rocksdb_flush_wal(handlerton* hton __attribute__((__unused__))) /* target_lsn is set to 0 when MySQL wants to sync the wal files */ - if (target_lsn == 0 || rocksdb_flush_log_at_trx_commit != FLUSH_LOG_NEVER) { + if ((target_lsn == 0 && !rocksdb_db_options->allow_mmap_writes) || + rocksdb_flush_log_at_trx_commit != FLUSH_LOG_NEVER) { rocksdb_wal_group_syncs++; s = rdb->FlushWAL(target_lsn == 0 || rocksdb_flush_log_at_trx_commit == FLUSH_LOG_SYNC); @@ -2864,7 +3181,11 @@ static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx) /* We were instructed to prepare the whole transaction, or this is an SQL statement end and autocommit is on */ -#ifdef MARIAROCKS_NOT_YET // Crash-safe slave does not work yet +#ifdef MARIAROCKS_NOT_YET + /* + Storing binlog position inside MyRocks is needed only for restoring + MyRocks from backups. This feature is not supported yet. + */ std::vector slave_gtid_info; my_core::thd_slave_gtid_info(thd, &slave_gtid_info); for (const auto &it : slave_gtid_info) { @@ -2922,6 +3243,8 @@ static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx) DEBUG_SYNC(thd, "rocksdb.prepared"); } + else + tx->make_stmt_savepoint_permanent(); return HA_EXIT_SUCCESS; } @@ -3172,11 +3495,8 @@ static int rocksdb_commit(handlerton* hton, THD* thd, bool commit_tx) } else { /* We get here when committing a statement within a transaction. - - We don't need to do anything here. tx->start_stmt() will notify - Rdb_transaction_impl that another statement has started. */ - tx->set_tx_failed(false); + tx->make_stmt_savepoint_permanent(); } if (my_core::thd_tx_isolation(thd) <= ISO_READ_COMMITTED) { @@ -3319,79 +3639,54 @@ private: "=========================================\n"; } - static std::string get_dlock_txn_info(const rocksdb::DeadlockInfo &txn, - const GL_INDEX_ID &gl_index_id, - bool is_last_path = false) { - std::string txn_data; + static Rdb_deadlock_info::Rdb_dl_trx_info + get_dl_txn_info(const rocksdb::DeadlockInfo &txn, + const GL_INDEX_ID &gl_index_id) { + Rdb_deadlock_info::Rdb_dl_trx_info txn_data; - /* extract table name and index names using the index id */ - std::string table_name = ddl_manager.safe_get_table_name(gl_index_id); - if (table_name.empty()) { - table_name = + txn_data.trx_id = txn.m_txn_id; + + txn_data.table_name = ddl_manager.safe_get_table_name(gl_index_id); + if (txn_data.table_name.empty()) { + txn_data.table_name = "NOT FOUND; INDEX_ID: " + std::to_string(gl_index_id.index_id); } + auto kd = ddl_manager.safe_find(gl_index_id); - std::string idx_name = + txn_data.index_name = (kd) ? kd->get_name() : "NOT FOUND; INDEX_ID: " + std::to_string(gl_index_id.index_id); - /* get the name of the column family */ rocksdb::ColumnFamilyHandle *cfh = cf_manager.get_cf(txn.m_cf_id); - std::string cf_name = cfh->GetName(); + txn_data.cf_name = cfh->GetName(); + + txn_data.waiting_key = + rdb_hexdump(txn.m_waiting_key.c_str(), txn.m_waiting_key.length()); + + txn_data.exclusive_lock = txn.m_exclusive; - txn_data += format_string( - "TRANSACTIONID: %u\n" - "COLUMN FAMILY NAME: %s\n" - "WAITING KEY: %s\n" - "LOCK TYPE: %s\n" - "INDEX NAME: %s\n" - "TABLE NAME: %s\n", - txn.m_txn_id, cf_name.c_str(), - rdb_hexdump(txn.m_waiting_key.c_str(), txn.m_waiting_key.length()) - .c_str(), - txn.m_exclusive ? "EXCLUSIVE" : "SHARED", idx_name.c_str(), - table_name.c_str()); - if (!is_last_path) { - txn_data += "---------------WAITING FOR---------------\n"; - } return txn_data; } - static std::string - get_dlock_path_info(const rocksdb::DeadlockPath &path_entry) { - std::string path_data; - if (path_entry.limit_exceeded) { - path_data += "\n-------DEADLOCK EXCEEDED MAX DEPTH-------\n"; - } else { - path_data += "\n*** DEADLOCK PATH\n" - "=========================================\n"; - for (auto it = path_entry.path.begin(); it != path_entry.path.end(); + static Rdb_deadlock_info + get_dl_path_trx_info(const rocksdb::DeadlockPath &path_entry) { + Rdb_deadlock_info deadlock_info; + + for (auto it = path_entry.path.begin(); it != path_entry.path.end(); it++) { - auto txn = *it; - const GL_INDEX_ID gl_index_id = { - txn.m_cf_id, rdb_netbuf_to_uint32(reinterpret_cast( - txn.m_waiting_key.c_str()))}; - path_data += get_dlock_txn_info(txn, gl_index_id); - } - - DBUG_ASSERT_IFF(path_entry.limit_exceeded, path_entry.path.empty()); - /* print the first txn in the path to display the full deadlock cycle */ - if (!path_entry.path.empty() && !path_entry.limit_exceeded) { - auto txn = path_entry.path[0]; - const GL_INDEX_ID gl_index_id = { - txn.m_cf_id, rdb_netbuf_to_uint32(reinterpret_cast( - txn.m_waiting_key.c_str()))}; - path_data += get_dlock_txn_info(txn, gl_index_id, true); - - /* prints the txn id of the transaction that caused the deadlock */ - auto deadlocking_txn = *(path_entry.path.end() - 1); - path_data += - format_string("\n--------TRANSACTIONID: %u GOT DEADLOCK---------\n", - deadlocking_txn.m_txn_id); - } + auto txn = *it; + const GL_INDEX_ID gl_index_id = { + txn.m_cf_id, rdb_netbuf_to_uint32(reinterpret_cast( + txn.m_waiting_key.c_str()))}; + deadlock_info.path.push_back(get_dl_txn_info(txn, gl_index_id)); } - - return path_data; + DBUG_ASSERT_IFF(path_entry.limit_exceeded, path_entry.path.empty()); + /* print the first txn in the path to display the full deadlock cycle */ + if (!path_entry.path.empty() && !path_entry.limit_exceeded) { + auto deadlocking_txn = *(path_entry.path.end() - 1); + deadlock_info.victim_trx_id = deadlocking_txn.m_txn_id; + } + return deadlock_info; } public: @@ -3430,9 +3725,48 @@ private: m_data += "----------LATEST DETECTED DEADLOCKS----------\n"; for (auto path_entry : dlock_buffer) { - m_data += get_dlock_path_info(path_entry); + std::string path_data; + if (path_entry.limit_exceeded) { + path_data += "\n-------DEADLOCK EXCEEDED MAX DEPTH-------\n"; + } else { + path_data += "\n*** DEADLOCK PATH\n" + "=========================================\n"; + const auto dl_info = get_dl_path_trx_info(path_entry); + for (auto it = dl_info.path.begin(); it != dl_info.path.end(); it++) { + const auto trx_info = *it; + path_data += format_string( + "TRANSACTION ID: %u\n" + "COLUMN FAMILY NAME: %s\n" + "WAITING KEY: %s\n" + "LOCK TYPE: %s\n" + "INDEX NAME: %s\n" + "TABLE NAME: %s\n", + trx_info.trx_id, trx_info.cf_name.c_str(), + trx_info.waiting_key.c_str(), + trx_info.exclusive_lock ? "EXCLUSIVE" : "SHARED", + trx_info.index_name.c_str(), trx_info.table_name.c_str()); + if (it != dl_info.path.end() - 1) { + path_data += "---------------WAITING FOR---------------\n"; + } + } + path_data += + format_string("\n--------TRANSACTION ID: %u GOT DEADLOCK---------\n", + dl_info.victim_trx_id); + } + m_data += path_data; } } + + std::vector get_deadlock_info() { + std::vector deadlock_info; + auto dlock_buffer = rdb->GetDeadlockInfoBuffer(); + for (auto path_entry : dlock_buffer) { + if (!path_entry.limit_exceeded) { + deadlock_info.push_back(get_dl_path_trx_info(path_entry)); + } + } + return deadlock_info; + } }; /** @@ -3521,6 +3855,17 @@ std::vector rdb_get_all_trx_info() { return trx_info; } + +/* + returns a vector of info of recent deadlocks + for use by information_schema.rocksdb_deadlock +*/ +std::vector rdb_get_deadlock_info() { + Rdb_snapshot_status showStatus; + Rdb_transaction::walk_tx_list(&showStatus); + return showStatus.get_deadlock_info(); +} + #ifdef MARIAROCKS_NOT_YET /* Generate the snapshot status table */ static bool rocksdb_show_snapshot_status(handlerton *const hton, THD *const thd, @@ -3827,6 +4172,7 @@ static void rocksdb_update_table_stats( comp_stats_t comp_stats; uint lock_wait_timeout_stats; uint deadlock_stats; + uint lock_wait_stats; std::vector tablenames; /* @@ -3873,6 +4219,9 @@ static void rocksdb_update_table_stats( io_perf_write.requests = table_handler->m_io_perf_write.requests.load(); lock_wait_timeout_stats = table_handler->m_lock_wait_timeout_counter.load(); deadlock_stats = table_handler->m_deadlock_counter.load(); + lock_wait_stats = + table_handler->m_table_perf_context.m_value[PC_KEY_LOCK_WAIT_COUNT] + .load(); /* Convert from rocksdb timer to mysql timer. RocksDB values are @@ -3900,7 +4249,7 @@ static void rocksdb_update_table_stats( sizeof(tablename_sys)); (*cb)(dbname_sys, tablename_sys, is_partition, &io_perf_read, &io_perf_write, &io_perf, &io_perf, &io_perf, &page_stats, - &comp_stats, 0, lock_wait_timeout_stats, deadlock_stats, + &comp_stats, lock_wait_stats, lock_wait_timeout_stats, deadlock_stats, rocksdb_hton_name); } } @@ -3912,8 +4261,9 @@ static rocksdb::Status check_rocksdb_options_compatibility( rocksdb::DBOptions loaded_db_opt; std::vector loaded_cf_descs; - rocksdb::Status status = LoadLatestOptions(dbpath, rocksdb::Env::Default(), - &loaded_db_opt, &loaded_cf_descs); + rocksdb::Status status = + LoadLatestOptions(dbpath, rocksdb::Env::Default(), &loaded_db_opt, + &loaded_cf_descs, rocksdb_ignore_unknown_options); // If we're starting from scratch and there are no options saved yet then this // is a valid case. Therefore we can't compare the current set of options to @@ -3952,7 +4302,8 @@ static rocksdb::Status check_rocksdb_options_compatibility( // This is the essence of the function - determine if it's safe to open the // database or not. status = CheckOptionsCompatibility(dbpath, rocksdb::Env::Default(), main_opts, - loaded_cf_descs); + loaded_cf_descs, + rocksdb_ignore_unknown_options); return status; } @@ -3976,6 +4327,22 @@ static int rocksdb_init_func(void *const p) { DBUG_RETURN(1); } + if (rdb_check_rocksdb_corruption()) { + sql_print_error("RocksDB: There was a corruption detected in RockDB files. " + "Check error log emitted earlier for more details."); + if (rocksdb_allow_to_start_after_corruption) { + sql_print_information( + "RocksDB: Remove rocksdb_allow_to_start_after_corruption to prevent " + "server operating if RocksDB corruption is detected."); + } else { + sql_print_error("RocksDB: The server will exit normally and stop restart " + "attempts. Remove %s file from data directory and " + "start mysqld manually.", + rdb_corruption_marker_file_name().c_str()); + exit(0); + } + } + // Validate the assumption about the size of ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN. static_assert(sizeof(longlong) == 8, "Assuming that longlong is 8 bytes."); @@ -4052,6 +4419,16 @@ static int rocksdb_init_func(void *const p) { rocksdb_hton->tablefile_extensions= ha_rocksdb_exts; DBUG_ASSERT(!mysqld_embedded); + if (rocksdb_db_options->max_open_files > (long)open_files_limit) { + sql_print_information("RocksDB: rocksdb_max_open_files should not be " + "greater than the open_files_limit, effective value " + "of rocksdb_max_open_files is being set to " + "open_files_limit / 2."); + rocksdb_db_options->max_open_files = open_files_limit / 2; + } else if (rocksdb_db_options->max_open_files == -2) { + rocksdb_db_options->max_open_files = open_files_limit / 2; + } + rocksdb_stats = rocksdb::CreateDBStatistics(); rocksdb_db_options->statistics = rocksdb_stats; @@ -4100,14 +4477,20 @@ static int rocksdb_init_func(void *const p) { DBUG_RETURN(HA_EXIT_FAILURE); } + if (rocksdb_db_options->allow_mmap_writes && + rocksdb_flush_log_at_trx_commit != FLUSH_LOG_NEVER) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: rocksdb_flush_log_at_trx_commit needs to be 0 " + "to use allow_mmap_writes"); + DBUG_RETURN(HA_EXIT_FAILURE); + } + // sst_file_manager will move deleted rocksdb sst files to trash_dir // to be deleted in a background thread. std::string trash_dir = std::string(rocksdb_datadir) + "/trash"; - rocksdb_db_options->sst_file_manager.reset( - NewSstFileManager(rocksdb_db_options->env, myrocks_logger, trash_dir)); - - rocksdb_db_options->sst_file_manager->SetDeleteRateBytesPerSecond( - rocksdb_sst_mgr_rate_bytes_per_sec); + rocksdb_db_options->sst_file_manager.reset(NewSstFileManager( + rocksdb_db_options->env, myrocks_logger, trash_dir, + rocksdb_sst_mgr_rate_bytes_per_sec, true /* delete_existing_trash */)); std::vector cf_names; rocksdb::Status status; @@ -4180,9 +4563,15 @@ static int rocksdb_init_func(void *const p) { if (rocksdb_persistent_cache_size_mb > 0) { std::shared_ptr pcache; uint64_t cache_size_bytes= rocksdb_persistent_cache_size_mb * 1024 * 1024; - rocksdb::NewPersistentCache( + status = rocksdb::NewPersistentCache( rocksdb::Env::Default(), std::string(rocksdb_persistent_cache_path), cache_size_bytes, myrocks_logger, true, &pcache); + if (!status.ok()) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Persistent cache returned error: (%s)", + status.getState()); + DBUG_RETURN(HA_EXIT_FAILURE); + } rocksdb_tbl_options->persistent_cache = pcache; } else if (strlen(rocksdb_persistent_cache_path)) { sql_print_error("RocksDB: Must specify rocksdb_persistent_cache_size_mb"); @@ -4559,6 +4948,33 @@ static inline void rocksdb_smart_next(bool seek_backward, } } +#ifndef NDEBUG +// simulate that RocksDB has reported corrupted data +static void dbug_change_status_to_corrupted(rocksdb::Status *status) { + *status = rocksdb::Status::Corruption(); +} +#endif + +// If the iterator is not valid it might be because of EOF but might be due +// to IOError or corruption. The good practice is always check it. +// https://github.com/facebook/rocksdb/wiki/Iterator#error-handling +static inline bool is_valid(rocksdb::Iterator *scan_it) { + if (scan_it->Valid()) { + return true; + } else { + rocksdb::Status s = scan_it->status(); + DBUG_EXECUTE_IF("rocksdb_return_status_corrupted", + dbug_change_status_to_corrupted(&s);); + if (s.IsIOError() || s.IsCorruption()) { + if (s.IsCorruption()) { + rdb_persist_corruption_marker(); + } + rdb_handle_io_error(s, RDB_IO_ERROR_GENERAL); + } + return false; + } +} + /** @brief Example of simple lock controls. The "table_handler" it creates is a @@ -4635,28 +5051,130 @@ std::vector Rdb_open_tables_map::get_table_names(void) const { return names; } +/* + Inspired by innobase_get_int_col_max_value from InnoDB. This returns the + maximum value a type can take on. +*/ +static ulonglong rdb_get_int_col_max_value(const Field *field) { + ulonglong max_value = 0; + switch (field->key_type()) { + case HA_KEYTYPE_BINARY: + max_value = 0xFFULL; + break; + case HA_KEYTYPE_INT8: + max_value = 0x7FULL; + break; + case HA_KEYTYPE_USHORT_INT: + max_value = 0xFFFFULL; + break; + case HA_KEYTYPE_SHORT_INT: + max_value = 0x7FFFULL; + break; + case HA_KEYTYPE_UINT24: + max_value = 0xFFFFFFULL; + break; + case HA_KEYTYPE_INT24: + max_value = 0x7FFFFFULL; + break; + case HA_KEYTYPE_ULONG_INT: + max_value = 0xFFFFFFFFULL; + break; + case HA_KEYTYPE_LONG_INT: + max_value = 0x7FFFFFFFULL; + break; + case HA_KEYTYPE_ULONGLONG: + max_value = 0xFFFFFFFFFFFFFFFFULL; + break; + case HA_KEYTYPE_LONGLONG: + max_value = 0x7FFFFFFFFFFFFFFFULL; + break; + case HA_KEYTYPE_FLOAT: + max_value = 0x1000000ULL; + break; + case HA_KEYTYPE_DOUBLE: + max_value = 0x20000000000000ULL; + break; + default: + abort(); + } + + return max_value; +} + void ha_rocksdb::load_auto_incr_value() { + ulonglong auto_incr = 0; + bool validate_last = false, use_datadic = true; +#ifndef NDEBUG + DBUG_EXECUTE_IF("myrocks_autoinc_upgrade", use_datadic = false;); + validate_last = true; +#endif + + if (use_datadic && dict_manager.get_auto_incr_val( + m_tbl_def->get_autoincr_gl_index_id(), &auto_incr)) { + update_auto_incr_val(auto_incr); + } + + // If we find nothing in the data dictionary, or if we are in debug mode, + // then call index_last to get the last value. + // + // This is needed when upgrading from a server that did not support + // persistent auto_increment, of if the table is empty. + // + // For debug mode, we are just verifying that the data dictionary value is + // greater than or equal to the maximum value in the table. + if (auto_incr == 0 || validate_last) { + auto_incr = load_auto_incr_value_from_index(); + update_auto_incr_val(auto_incr); + } + + // If we failed to find anything from the data dictionary and index, then + // initialize auto_increment to 1. + if (m_tbl_def->m_auto_incr_val == 0) { + update_auto_incr_val(1); + } +} + +ulonglong ha_rocksdb::load_auto_incr_value_from_index() { const int save_active_index = active_index; active_index = table->s->next_number_index; const uint8 save_table_status = table->status; + ulonglong last_val = 0; - /* - load_auto_incr_value() may be called by statements that - do not execute implicit commits (i.e. SHOW CREATE TABLE). - index_last() creates a snapshot. When a snapshot is created - here, it has to be released as well. (GitHub issue#189) - */ Rdb_transaction *const tx = get_or_create_tx(table->in_use); const bool is_new_snapshot = !tx->has_snapshot(); + if (is_new_snapshot) { + tx->acquire_snapshot(true); + } // Do a lookup. We only need index column, so it should be index-only. - // (another reason to make it index-only is that table->read_set is - // not set appropriately and non-index-only lookup will not read the value) + // (another reason to make it index-only is that table->read_set is not set + // appropriately and non-index-only lookup will not read the value) const bool save_keyread_only = m_keyread_only; m_keyread_only = true; + m_key_requested = true; - if (!index_last(table->record[0])) - update_auto_incr_val(); + if (!index_last(table->record[0])) { + Field *field = + table->key_info[table->s->next_number_index].key_part[0].field; + ulonglong max_val = rdb_get_int_col_max_value(field); + my_bitmap_map *const old_map = + dbug_tmp_use_all_columns(table, table->read_set); + last_val = field->val_int(); + if (last_val != max_val) { + last_val++; + } +#ifndef NDEBUG + ulonglong dd_val; + if (last_val <= max_val) { + const auto &gl_index_id = m_tbl_def->get_autoincr_gl_index_id(); + if (dict_manager.get_auto_incr_val(gl_index_id, &dd_val) && + tx->get_auto_incr(gl_index_id) == 0) { + DBUG_ASSERT(dd_val >= last_val); + } + } +#endif + dbug_tmp_restore_column_map(table->read_set, old_map); + } m_keyread_only = save_keyread_only; if (is_new_snapshot) { @@ -4669,38 +5187,46 @@ void ha_rocksdb::load_auto_incr_value() { /* Do what ha_rocksdb::index_end() does. (Why don't we use index_init/index_end? class handler defines index_init - as private, for some reason). - */ + as private, for some reason). + */ release_scan_iterator(); + + return last_val; } -/* Get PK value from table->record[0]. */ -/* - TODO(alexyang): No existing support for auto_increment on non-pk columns, see - end of ha_rocksdb::create. Also see opened issue here: - https://github.com/facebook/mysql-5.6/issues/153 -*/ -void ha_rocksdb::update_auto_incr_val() { +void ha_rocksdb::update_auto_incr_val(ulonglong val) { + ulonglong auto_incr_val = m_tbl_def->m_auto_incr_val; + while ( + auto_incr_val < val && + !m_tbl_def->m_auto_incr_val.compare_exchange_weak(auto_incr_val, val)) { + // Do nothing - just loop until auto_incr_val is >= val or we successfully + // set it + } +} + +void ha_rocksdb::update_auto_incr_val_from_field() { Field *field; - longlong new_val; + ulonglong new_val, max_val; field = table->key_info[table->s->next_number_index].key_part[0].field; + max_val = rdb_get_int_col_max_value(field); my_bitmap_map *const old_map = dbug_tmp_use_all_columns(table, table->read_set); new_val = field->val_int(); // don't increment if we would wrap around - if (new_val != std::numeric_limits::max()) { + if (new_val != max_val) { new_val++; } dbug_tmp_restore_column_map(table->read_set, old_map); - longlong auto_incr_val = m_tbl_def->m_auto_incr_val; - while (auto_incr_val < new_val && - !m_tbl_def->m_auto_incr_val.compare_exchange_weak(auto_incr_val, - new_val)) { - // Do nothing - just loop until auto_incr_val is >= new_val or - // we successfully set it + // Only update if positive value was set for auto_incr column. + if (new_val <= max_val) { + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + tx->set_auto_incr(m_tbl_def->get_autoincr_gl_index_id(), new_val); + + // Update the in memory auto_incr value in m_tbl_def. + update_auto_incr_val(new_val); } } @@ -4709,35 +5235,42 @@ int ha_rocksdb::load_hidden_pk_value() { active_index = m_tbl_def->m_key_count - 1; const uint8 save_table_status = table->status; - Rdb_transaction *const tx = get_or_create_tx(table->in_use); - const bool is_new_snapshot = !tx->has_snapshot(); + /* + We should read the latest committed value in the database. + That is, if we have an open transaction with a snapshot, we should not use + it as we may get old data. Start a new transaction to read the latest + value. + */ + Rdb_transaction *const temp_tx = new Rdb_transaction_impl(table->in_use); + temp_tx->start_tx(); + Rdb_transaction *&tx = get_tx_from_thd(table->in_use); + Rdb_transaction *save_tx= tx; + tx= temp_tx; + longlong hidden_pk_id = 1; // Do a lookup. if (!index_last(table->record[0])) { /* Decode PK field from the key */ - longlong hidden_pk_id = 0; auto err = read_hidden_pk_id_from_rowkey(&hidden_pk_id); if (err) { - if (is_new_snapshot) { - tx->release_snapshot(); - } + delete tx; + tx= save_tx; return err; } hidden_pk_id++; - longlong old = m_tbl_def->m_hidden_pk_val; - while ( - old < hidden_pk_id && - !m_tbl_def->m_hidden_pk_val.compare_exchange_weak(old, hidden_pk_id)) { - } } - if (is_new_snapshot) { - tx->release_snapshot(); + longlong old = m_tbl_def->m_hidden_pk_val; + while (old < hidden_pk_id && + !m_tbl_def->m_hidden_pk_val.compare_exchange_weak(old, hidden_pk_id)) { } + delete tx; + tx= save_tx; + table->status = save_table_status; active_index = save_active_index; @@ -4817,18 +5350,15 @@ ha_rocksdb::ha_rocksdb(my_core::handlerton *const hton, m_sk_packed_tuple(nullptr), m_end_key_packed_tuple(nullptr), m_sk_match_prefix(nullptr), m_sk_match_prefix_buf(nullptr), m_sk_packed_tuple_old(nullptr), m_dup_sk_packed_tuple(nullptr), - m_dup_sk_packed_tuple_old(nullptr), m_pack_buffer(nullptr), - m_lock_rows(RDB_LOCK_NONE), m_keyread_only(FALSE), - m_bulk_load_tx(nullptr), m_encoder_arr(nullptr), + m_dup_sk_packed_tuple_old(nullptr), m_eq_cond_lower_bound(nullptr), + m_eq_cond_upper_bound(nullptr), m_pack_buffer(nullptr), + m_lock_rows(RDB_LOCK_NONE), m_keyread_only(FALSE), m_encoder_arr(nullptr), m_row_checksums_checked(0), m_in_rpl_delete_rows(false), - m_in_rpl_update_rows(false), m_force_skip_unique_check(false) { - // TODO(alexyang): create a valid PSI_mutex_key for this mutex - mysql_mutex_init(0, &m_bulk_load_mutex, MY_MUTEX_INIT_FAST); -} + m_in_rpl_update_rows(false), m_force_skip_unique_check(false) {} -bool ha_rocksdb::same_table(const ha_rocksdb &other) const { - return m_tbl_def->base_tablename() == other.m_tbl_def->base_tablename(); +const std::string &ha_rocksdb::get_table_basename() const { + return m_tbl_def->base_tablename(); } /** @@ -4914,8 +5444,12 @@ bool ha_rocksdb::should_hide_ttl_rec(const Rdb_key_def &kd, #ifndef NDEBUG read_filter_ts += rdb_dbug_set_ttl_read_filter_ts(); #endif - return ts + kd.m_ttl_duration + read_filter_ts <= - static_cast(curr_ts); + bool is_hide_ttl = + ts + kd.m_ttl_duration + read_filter_ts <= static_cast(curr_ts); + if (is_hide_ttl) { + update_row_stats(ROWS_FILTERED); + } + return is_hide_ttl; } void ha_rocksdb::rocksdb_skip_expired_records(const Rdb_key_def &kd, @@ -5129,12 +5663,12 @@ int ha_rocksdb::convert_record_to_storage_format( Setup which fields will be unpacked when reading rows @detail - Two special cases when we still unpack all fields: + Three special cases when we still unpack all fields: - When this table is being updated (m_lock_rows==RDB_LOCK_WRITE). - When @@rocksdb_verify_row_debug_checksums is ON (In this mode, we need to - read all - fields to find whether there is a row checksum at the end. We could skip - the fields instead of decoding them, but currently we do decoding.) + read all fields to find whether there is a row checksum at the end. We could + skip the fields instead of decoding them, but currently we do decoding.) + - On index merge as bitmap is cleared during that operation @seealso ha_rocksdb::setup_field_converters() @@ -5142,20 +5676,29 @@ int ha_rocksdb::convert_record_to_storage_format( */ void ha_rocksdb::setup_read_decoders() { m_decoders_vect.clear(); + m_key_requested = false; int last_useful = 0; int skip_size = 0; for (uint i = 0; i < table->s->fields; i++) { + // bitmap is cleared on index merge, but it still needs to decode columns + const bool field_requested = + m_lock_rows == RDB_LOCK_WRITE || m_verify_row_debug_checksums || + bitmap_is_clear_all(table->read_set) || + bitmap_is_set(table->read_set, table->field[i]->field_index); + // We only need the decoder if the whole record is stored. if (m_encoder_arr[i].m_storage_type != Rdb_field_encoder::STORE_ALL) { + // the field potentially needs unpacking + if (field_requested) { + // the field is in the read set + m_key_requested = true; + } continue; } - // bitmap is cleared on index merge, but it still needs to decode columns - if (m_lock_rows == RDB_LOCK_WRITE || m_verify_row_debug_checksums || - bitmap_is_clear_all(table->read_set) || - bitmap_is_set(table->read_set, table->field[i]->field_index)) { + if (field_requested) { // We will need to decode this field m_decoders_vect.push_back({&m_encoder_arr[i], true, skip_size}); last_useful = m_decoders_vect.size(); @@ -5181,13 +5724,18 @@ void ha_rocksdb::setup_read_decoders() { } #ifndef NDEBUG -void dbug_append_garbage_at_end(std::string &on_disk_rec) { - on_disk_rec.append("abc"); +void dbug_append_garbage_at_end(rocksdb::PinnableSlice *on_disk_rec) { + std::string str(on_disk_rec->data(), on_disk_rec->size()); + on_disk_rec->Reset(); + str.append("abc"); + on_disk_rec->PinSelf(rocksdb::Slice(str)); } -void dbug_truncate_record(std::string &on_disk_rec) { on_disk_rec.resize(0); } +void dbug_truncate_record(rocksdb::PinnableSlice *on_disk_rec) { + on_disk_rec->remove_suffix(on_disk_rec->size()); +} -void dbug_modify_rec_varchar12(std::string &on_disk_rec) { +void dbug_modify_rec_varchar12(rocksdb::PinnableSlice *on_disk_rec) { std::string res; // The record is NULL-byte followed by VARCHAR(10). // Put the NULL-byte @@ -5196,7 +5744,8 @@ void dbug_modify_rec_varchar12(std::string &on_disk_rec) { res.append("\xC", 1); res.append("123456789ab", 12); - on_disk_rec.assign(res); + on_disk_rec->Reset(); + on_disk_rec->PinSelf(rocksdb::Slice(res)); } void dbug_modify_key_varchar8(String &on_disk_rec) { @@ -5218,16 +5767,15 @@ void dbug_create_err_inplace_alter() { int ha_rocksdb::convert_record_from_storage_format( const rocksdb::Slice *const key, uchar *const buf) { - DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read1", - dbug_append_garbage_at_end(m_retrieved_record);); - DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read2", - dbug_truncate_record(m_retrieved_record);); - DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read3", - dbug_modify_rec_varchar12(m_retrieved_record);); - const rocksdb::Slice retrieved_rec_slice(&m_retrieved_record.front(), - m_retrieved_record.size()); - return convert_record_from_storage_format(key, &retrieved_rec_slice, buf); + DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read1", + dbug_append_garbage_at_end(&m_retrieved_record);); + DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read2", + dbug_truncate_record(&m_retrieved_record);); + DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read3", + dbug_modify_rec_varchar12(&m_retrieved_record);); + + return convert_record_from_storage_format(key, &m_retrieved_record, buf); } int ha_rocksdb::convert_blob_from_storage_format( @@ -5390,9 +5938,13 @@ int ha_rocksdb::convert_record_from_storage_format( Rdb_key_def::get_unpack_header_size(unpack_info[0])); } - int err = m_pk_descr->unpack_record(table, buf, &rowkey_slice, - unpack_info ? &unpack_slice : nullptr, - false /* verify_checksum */); + int err = HA_EXIT_SUCCESS; + if (m_key_requested) { + err = m_pk_descr->unpack_record(table, buf, &rowkey_slice, + unpack_info ? &unpack_slice : nullptr, + false /* verify_checksum */); + } + if (err != HA_EXIT_SUCCESS) { return err; } @@ -5636,6 +6188,11 @@ int ha_rocksdb::alloc_key_buffers(const TABLE *const table_arg, m_pack_buffer = reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0))); + m_eq_cond_upper_bound = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0))); + m_eq_cond_lower_bound = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0))); + /* If inplace alter is happening, allocate special buffers for unique secondary index duplicate checking. @@ -5650,6 +6207,7 @@ int ha_rocksdb::alloc_key_buffers(const TABLE *const table_arg, if (m_pk_tuple == nullptr || m_pk_packed_tuple == nullptr || m_sk_packed_tuple == nullptr || m_sk_packed_tuple_old == nullptr || m_end_key_packed_tuple == nullptr || m_pack_buffer == nullptr || + m_eq_cond_upper_bound == nullptr || m_eq_cond_lower_bound == nullptr || (alloc_alter_buffers && (m_dup_sk_packed_tuple == nullptr || m_dup_sk_packed_tuple_old == nullptr))) { // One or more of the above allocations failed. Clean up and exit @@ -5688,6 +6246,12 @@ void ha_rocksdb::free_key_buffers() { my_free(m_dup_sk_packed_tuple_old); m_dup_sk_packed_tuple_old = nullptr; + + my_free(m_eq_cond_upper_bound); + m_eq_cond_upper_bound = nullptr; + + my_free(m_eq_cond_lower_bound); + m_eq_cond_lower_bound = nullptr; } #ifdef MARIAROCKS_NOT_YET @@ -5843,11 +6407,13 @@ int ha_rocksdb::open(const char *const name, int mode, uint test_if_locked) { */ m_verify_row_debug_checksums = false; - /* TODO: move the following to where TABLE_SHARE is opened: */ - if (table->found_next_number_field) + /* Load auto_increment value only once on first use. */ + if (table->found_next_number_field && m_tbl_def->m_auto_incr_val == 0) { load_auto_incr_value(); + } - if (has_hidden_pk(table) && + /* Load hidden pk only once on first use. */ + if (has_hidden_pk(table) && m_tbl_def->m_hidden_pk_val == 0 && (err = load_hidden_pk_value()) != HA_EXIT_SUCCESS) { free_key_buffers(); DBUG_RETURN(err); @@ -6588,12 +7154,17 @@ int rdb_normalize_tablename(const std::string &tablename, std::string *const strbuf) { DBUG_ASSERT(strbuf != nullptr); - if (tablename.size() < 2 || tablename[0] != '.' || tablename[1] != FN_LIBCHAR) { + if (tablename.size() < 2 || tablename[0] != '.' || + (tablename[1] != FN_LIBCHAR && tablename[1] != FN_LIBCHAR2)) { DBUG_ASSERT(0); // We were not passed table name? return HA_ERR_ROCKSDB_INVALID_TABLE; } size_t pos = tablename.find_first_of(FN_LIBCHAR, 2); + if (pos == std::string::npos) { + pos = tablename.find_first_of(FN_LIBCHAR2, 2); + } + if (pos == std::string::npos) { DBUG_ASSERT(0); // We were not passed table name? return HA_ERR_ROCKSDB_INVALID_TABLE; @@ -6820,6 +7391,20 @@ int ha_rocksdb::create(const char *const name, TABLE *const table_arg, m_pk_descr = m_key_descr_arr[pk_index(table_arg, m_tbl_def)]; + if (create_info->auto_increment_value) { + bool autoinc_upgrade_test = false; + m_tbl_def->m_auto_incr_val = create_info->auto_increment_value; + DBUG_EXECUTE_IF("myrocks_autoinc_upgrade", autoinc_upgrade_test = true;); + if (!autoinc_upgrade_test) { + auto s = dict_manager.put_auto_incr_val( + batch, m_tbl_def->get_autoincr_gl_index_id(), + m_tbl_def->m_auto_incr_val); + if (!s.ok()) { + goto error; + } + } + } + dict_manager.lock(); err = ddl_manager.put_and_write(m_tbl_def, batch); if (err != HA_EXIT_SUCCESS) { @@ -6835,23 +7420,6 @@ int ha_rocksdb::create(const char *const name, TABLE *const table_arg, dict_manager.unlock(); - if (create_info->auto_increment_value) - m_tbl_def->m_auto_incr_val = create_info->auto_increment_value; - - /* - We only support auto_increment at start of the PRIMARY KEY. - */ - // Field *field; - // if ((field= table_arg->next_number_field)) - /* TODO mdcallag: disable this for now to let UNIQUE indexes kind of work - if ((field= table_arg->found_next_number_field)) - { - int pk= table_arg->s->primary_key; - Field *pk_field= table_arg->key_info[pk].key_part[0].field; - if (field->field_index != pk_field->field_index) - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); - } - */ DBUG_RETURN(HA_EXIT_SUCCESS); error: @@ -6949,7 +7517,7 @@ int ha_rocksdb::read_before_key(const Rdb_key_def &kd, */ rocksdb_smart_seek(!kd.m_is_reverse_cf, m_scan_it, key_slice); - while (m_scan_it->Valid()) { + while (is_valid(m_scan_it)) { /* We are using full key and we've hit an exact match, or... @@ -6989,12 +7557,12 @@ int ha_rocksdb::read_after_key(const Rdb_key_def &kd, from the POV of the current transaction. If it has, try going to the next key. */ - while (m_scan_it->Valid() && kd.has_ttl() && + while (is_valid(m_scan_it) && kd.has_ttl() && should_hide_ttl_rec(kd, m_scan_it->value(), ttl_filter_ts)) { rocksdb_smart_next(kd.m_is_reverse_cf, m_scan_it); } - return m_scan_it->Valid() ? HA_EXIT_SUCCESS : HA_ERR_KEY_NOT_FOUND; + return is_valid(m_scan_it) ? HA_EXIT_SUCCESS : HA_ERR_KEY_NOT_FOUND; } int ha_rocksdb::position_to_correct_key( @@ -7154,7 +7722,7 @@ int ha_rocksdb::read_row_from_secondary_key(uchar *const buf, bool covered_lookup = m_keyread_only || kd.covers_lookup(table, &value, &m_lookup_bitmap); - if (covered_lookup && m_lock_rows == RDB_LOCK_NONE && !has_hidden_pk(table)) { + if (covered_lookup && m_lock_rows == RDB_LOCK_NONE) { pk_size = kd.get_primary_key_tuple(table, *m_pk_descr, &rkey, m_pk_packed_tuple); if (pk_size == RDB_INVALID_KEY_LEN) { @@ -7255,7 +7823,7 @@ int ha_rocksdb::secondary_index_read(const int keyno, uchar *const buf) { /* Use STATUS_NOT_FOUND when record not found or some error occurred */ table->status = STATUS_NOT_FOUND; - if (m_scan_it->Valid()) { + if (is_valid(m_scan_it)) { rocksdb::Slice key = m_scan_it->key(); /* Check if we've ran out of records of this index */ @@ -7276,8 +7844,7 @@ int ha_rocksdb::secondary_index_read(const int keyno, uchar *const buf) { bool covered_lookup = m_keyread_only || m_key_descr_arr[keyno]->covers_lookup( table, &value, &m_lookup_bitmap); - if (covered_lookup && m_lock_rows == RDB_LOCK_NONE && - !has_hidden_pk(table)) { + if (covered_lookup && m_lock_rows == RDB_LOCK_NONE) { rc = m_key_descr_arr[keyno]->unpack_record( table, buf, &key, &value, m_verify_row_debug_checksums); global_stats.covered_secondary_key_lookups.inc(); @@ -7376,6 +7943,15 @@ int ha_rocksdb::prepare_range_scan(const key_range *start_key, const key_range *end_key) { range_key_part= table->key_info[active_index].key_part; + + if (start_key) + { + m_save_start_range= *start_key; + m_start_range= &m_save_start_range; + } + else + m_start_range= NULL; + set_end_range(end_key); return 0; } @@ -7505,8 +8081,12 @@ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key, packed_size); uint end_key_packed_size = 0; + const key_range *cur_end_key= end_key; + if (find_flag == HA_READ_PREFIX_LAST_OR_PREV) + cur_end_key= m_start_range; + const uint eq_cond_len = - calc_eq_cond_len(kd, find_flag, slice, bytes_changed_by_succ, end_key, + calc_eq_cond_len(kd, find_flag, slice, bytes_changed_by_succ, cur_end_key, &end_key_packed_size); bool use_all_keys = false; @@ -7605,7 +8185,7 @@ int ha_rocksdb::find_icp_matching_index_rec(const bool &move_forward, while (1) { rocksdb_skip_expired_records(kd, m_scan_it, !move_forward); - if (!m_scan_it->Valid()) { + if (!is_valid(m_scan_it)) { table->status = STATUS_NOT_FOUND; return HA_ERR_END_OF_FILE; } @@ -7692,8 +8272,8 @@ int ha_rocksdb::check(THD *const thd, HA_CHECK_OPT *const check_opt) { /* For each secondary index, check that we can get a PK value from it */ // NO_LINT_DEBUG - sql_print_information("CHECKTABLE %s: Checking table %s", table_name, - table_name); + sql_print_verbose_info("CHECKTABLE %s: Checking table %s", table_name, + table_name); ha_rows UNINIT_VAR(row_checksums_at_start); // set/used iff first_index==true ha_rows row_checksums = ha_rows(-1); bool first_index = true; @@ -7708,8 +8288,8 @@ int ha_rocksdb::check(THD *const thd, HA_CHECK_OPT *const check_opt) { row_checksums_at_start = m_row_checksums_checked; int res; // NO_LINT_DEBUG - sql_print_information("CHECKTABLE %s: Checking index %s", table_name, - table->key_info[keyno].name); + sql_print_verbose_info("CHECKTABLE %s: Checking index %s", table_name, + table->key_info[keyno].name); while (1) { if (!rows) res = index_first(table->record[0]); @@ -7797,9 +8377,9 @@ int ha_rocksdb::check(THD *const thd, HA_CHECK_OPT *const check_opt) { } } // NO_LINT_DEBUG - sql_print_information("CHECKTABLE %s: ... %lld index entries checked " - "(%lld had checksums)", - table_name, rows, checksums); + sql_print_verbose_info("CHECKTABLE %s: ... %lld index entries checked " + "(%lld had checksums)", + table_name, rows, checksums); if (first_index) { row_checksums = m_row_checksums_checked - row_checksums_at_start; @@ -7810,8 +8390,8 @@ int ha_rocksdb::check(THD *const thd, HA_CHECK_OPT *const check_opt) { } if (row_checksums != ha_rows(-1)) { // NO_LINT_DEBUG - sql_print_information("CHECKTABLE %s: %lld table records had checksums", - table_name, row_checksums); + sql_print_verbose_info("CHECKTABLE %s: %lld table records had checksums", + table_name, row_checksums); } extra(HA_EXTRA_NO_KEYREAD); @@ -7869,7 +8449,7 @@ void dbug_dump_database(rocksdb::DB *const db) { rocksdb::Status ha_rocksdb::get_for_update( Rdb_transaction *const tx, rocksdb::ColumnFamilyHandle *const column_family, - const rocksdb::Slice &key, std::string *const value) const { + const rocksdb::Slice &key, rocksdb::PinnableSlice *const value) const { DBUG_ASSERT(m_lock_rows != RDB_LOCK_NONE); const bool exclusive = m_lock_rows != RDB_LOCK_READ; @@ -7955,6 +8535,9 @@ int ha_rocksdb::get_row_by_rowid(uchar *const buf, const char *const rowid, &m_retrieved_record); } + DBUG_EXECUTE_IF("rocksdb_return_status_corrupted", + dbug_change_status_to_corrupted(&s);); + if (!s.IsNotFound() && !s.ok()) { DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def, m_table_handler)); @@ -7965,9 +8548,7 @@ int ha_rocksdb::get_row_by_rowid(uchar *const buf, const char *const rowid, if (found) { /* If we found the record, but it's expired, pretend we didn't find it. */ if (!skip_ttl_check && m_pk_descr->has_ttl() && - should_hide_ttl_rec(*m_pk_descr, - rocksdb::Slice(&m_retrieved_record.front(), - m_retrieved_record.size()), + should_hide_ttl_rec(*m_pk_descr, m_retrieved_record, tx->m_snapshot_timestamp)) { DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); } @@ -8140,11 +8721,7 @@ int ha_rocksdb::index_first_intern(uchar *const buf) { DBUG_ASSERT(key != nullptr); const Rdb_key_def &kd = *m_key_descr_arr[active_index]; - if (kd.m_is_reverse_cf) { - kd.get_supremum_key(key, &key_size); - } else { - kd.get_infimum_key(key, &key_size); - } + int key_start_matching_bytes = kd.get_first_key(key, &key_size); rocksdb::Slice index_key((const char *)key, key_size); @@ -8155,7 +8732,7 @@ int ha_rocksdb::index_first_intern(uchar *const buf) { // Loop as long as we get a deadlock error AND we end up creating the // snapshot here (i.e. it did not exist prior to this) for (;;) { - setup_scan_iterator(kd, &index_key, false, Rdb_key_def::INDEX_NUMBER_SIZE); + setup_scan_iterator(kd, &index_key, false, key_start_matching_bytes); m_scan_it->Seek(index_key); m_skip_scan_it_next_call = true; @@ -8732,23 +9309,29 @@ int ha_rocksdb::check_and_lock_sk(const uint &key_id, The bloom filter may need to be disabled for this lookup. */ - const bool total_order_seek = !can_use_bloom_filter( - ha_thd(), kd, new_slice, all_parts_used); + uchar min_bound_buf[MAX_KEY_LENGTH]; + uchar max_bound_buf[MAX_KEY_LENGTH]; + rocksdb::Slice min_bound_slice; + rocksdb::Slice max_bound_slice; + const bool total_order_seek = !check_bloom_and_set_bounds( + ha_thd(), kd, new_slice, all_parts_used, + min_bound_buf, + max_bound_buf, + &min_bound_slice, + &max_bound_slice); const bool fill_cache = !THDVAR(ha_thd(), skip_fill_cache); - /* - psergey-todo: we just need to take lock, lookups not needed: - */ - std::string dummy_value; const rocksdb::Status s = - get_for_update(row_info.tx, kd.get_cf(), new_slice, &dummy_value); + get_for_update(row_info.tx, kd.get_cf(), new_slice, nullptr); if (!s.ok() && !s.IsNotFound()) { return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def, m_table_handler); } rocksdb::Iterator *const iter = row_info.tx->get_iterator( - kd.get_cf(), total_order_seek, fill_cache, true /* read current data */, + kd.get_cf(), total_order_seek, fill_cache, + min_bound_slice, max_bound_slice, + true /* read current data */, false /* acquire snapshot */); /* Need to scan the transaction to see if there is a duplicate key. @@ -8791,9 +9374,7 @@ int ha_rocksdb::check_uniqueness_and_lock( m_retrieved_record by check_and_lock_unique_pk(). */ if (is_pk(key_id, table, m_tbl_def) && found && m_pk_descr->has_ttl() && - should_hide_ttl_rec(*m_pk_descr, - rocksdb::Slice(&m_retrieved_record.front(), - m_retrieved_record.size()), + should_hide_ttl_rec(*m_pk_descr, m_retrieved_record, (row_info.tx->m_snapshot_timestamp ? row_info.tx->m_snapshot_timestamp : static_cast(std::time(nullptr))))) { @@ -8842,109 +9423,48 @@ int ha_rocksdb::bulk_load_key(Rdb_transaction *const tx, const Rdb_key_def &kd, const rocksdb::Slice &key, const rocksdb::Slice &value, bool sort) { DBUG_ENTER_FUNC(); - + int res; rocksdb::ColumnFamilyHandle *cf = kd.get_cf(); - DBUG_ASSERT(cf != nullptr); - int res = HA_EXIT_SUCCESS; + // In the case of unsorted inserts, m_sst_info allocated here is not + // used to store the keys. It is still used to indicate when tables + // are switched. + if (m_sst_info == nullptr || m_sst_info->is_committed()) { + m_sst_info.reset(new Rdb_sst_info(rdb, m_table_handler->m_table_name, + kd.get_name(), cf, *rocksdb_db_options, + THDVAR(ha_thd(), trace_sst_api))); + res = tx->start_bulk_load(this, m_sst_info); + if (res != HA_EXIT_SUCCESS) { + DBUG_RETURN(res); + } + } + DBUG_ASSERT(m_sst_info); if (sort) { - GL_INDEX_ID kd_gl_id = kd.get_gl_index_id(); - auto it = m_key_merge.find(kd_gl_id); - if (it == m_key_merge.end()) { - m_key_merge.emplace( - std::piecewise_construct, std::make_tuple(kd_gl_id), - std::make_tuple( - thd_rocksdb_tmpdir(), THDVAR(ha_thd(), merge_buf_size), - THDVAR(ha_thd(), merge_combine_read_size), - THDVAR(ha_thd(), merge_tmp_file_removal_delay_ms), cf)); - it = m_key_merge.find(kd_gl_id); - if ((res = it->second.init()) != 0) { - DBUG_RETURN(res); - } + Rdb_index_merge *key_merge; + DBUG_ASSERT(cf != nullptr); - if (m_bulk_load_tx == nullptr) { - tx->start_bulk_load(this); - m_bulk_load_tx = tx; - } + res = tx->get_key_merge(kd.get_gl_index_id(), cf, &key_merge); + if (res == HA_EXIT_SUCCESS) { + res = key_merge->add(key, value); } - res = it->second.add(key, value); } else { - if (!m_sst_info) { - m_sst_info.reset(new Rdb_sst_info(rdb, m_table_handler->m_table_name, - kd.get_name(), cf, *rocksdb_db_options, - THDVAR(ha_thd(), trace_sst_api))); - tx->start_bulk_load(this); - m_bulk_load_tx = tx; - } - - DBUG_ASSERT(m_sst_info); - res = m_sst_info->put(key, value); } DBUG_RETURN(res); } -int ha_rocksdb::finalize_bulk_load() { +int ha_rocksdb::finalize_bulk_load(bool print_client_error) { DBUG_ENTER_FUNC(); - DBUG_ASSERT_IMP(!m_key_merge.empty() || m_sst_info, - m_bulk_load_tx != nullptr); - - /* Skip if there are no possible ongoing bulk loads */ - if (m_key_merge.empty() && !m_sst_info && m_bulk_load_tx == nullptr) { - DBUG_RETURN(HA_EXIT_SUCCESS); - } - int res = HA_EXIT_SUCCESS; - RDB_MUTEX_LOCK_CHECK(m_bulk_load_mutex); - + /* Skip if there are no possible ongoing bulk loads */ if (m_sst_info) { - res = m_sst_info->commit(); + res = m_sst_info->commit(print_client_error); m_sst_info.reset(); } - - if (!m_key_merge.empty()) { - rocksdb::Slice merge_key; - rocksdb::Slice merge_val; - for (auto it = m_key_merge.begin(); it != m_key_merge.end(); it++) { - const std::string &index_name = - ddl_manager.safe_find(it->first)->get_name(); - Rdb_index_merge &rdb_merge = it->second; - Rdb_sst_info sst_info(rdb, m_table_handler->m_table_name, index_name, - rdb_merge.get_cf(), *rocksdb_db_options, - THDVAR(ha_thd(), trace_sst_api)); - - while ((res = rdb_merge.next(&merge_key, &merge_val)) == 0) { - if ((res = sst_info.put(merge_key, merge_val)) != 0) { - break; - } - } - // res == -1 => finished ok; res > 0 => error - if (res <= 0) { - if ((res = sst_info.commit()) != 0) { - break; - } - } - } - m_key_merge.clear(); - - /* - Explicitly tell jemalloc to clean up any unused dirty pages at this point. - See https://reviews.facebook.net/D63723 for more details. - */ - purge_all_jemalloc_arenas(); - } - - if (m_bulk_load_tx != nullptr) { - m_bulk_load_tx->end_bulk_load(this); - m_bulk_load_tx = nullptr; - } - - RDB_MUTEX_UNLOCK_CHECK(m_bulk_load_mutex); - DBUG_RETURN(res); } @@ -8974,7 +9494,7 @@ int ha_rocksdb::update_pk(const Rdb_key_def &kd, } if (table->next_number_field) { - update_auto_incr_val(); + update_auto_incr_val_from_field(); } int rc = HA_EXIT_SUCCESS; @@ -9205,6 +9725,50 @@ int ha_rocksdb::update_write_row(const uchar *const old_data, DBUG_RETURN(HA_EXIT_SUCCESS); } +/* + Setting iterator upper/lower bounds for Seek/SeekForPrev. + This makes RocksDB to avoid scanning tombstones outside of + the given key ranges, when prefix_same_as_start=true was not passed + (when prefix bloom filter can not be used). + Inversing upper/lower bound is necessary on reverse order CF. + This covers HA_READ_PREFIX_LAST* case as well. For example, + if given query eq condition was 12 bytes and condition was + 0x0000b3eb003f65c5e78858b8, and if doing HA_READ_PREFIX_LAST, + eq_cond_len was 11 (see calc_eq_cond_len() for details). + If the index was reverse order, upper bound would be + 0x0000b3eb003f65c5e78857, and lower bound would be + 0x0000b3eb003f65c5e78859. These cover given eq condition range. + + @param lower_bound_buf IN Buffer for lower bound + @param upper_bound_buf IN Buffer for upper bound + + @param outer_u +*/ +void ha_rocksdb::setup_iterator_bounds(const Rdb_key_def &kd, + const rocksdb::Slice &eq_cond, + uchar *lower_bound_buf, + uchar *upper_bound_buf, + rocksdb::Slice *out_lower_bound, + rocksdb::Slice *out_upper_bound) { + uint eq_cond_len = eq_cond.size(); + memcpy(upper_bound_buf, eq_cond.data(), eq_cond_len); + kd.successor(upper_bound_buf, eq_cond_len); + memcpy(lower_bound_buf, eq_cond.data(), eq_cond_len); + kd.predecessor(lower_bound_buf, eq_cond_len); + + if (kd.m_is_reverse_cf) { + *out_upper_bound = + rocksdb::Slice((const char *)lower_bound_buf, eq_cond_len); + *out_lower_bound = + rocksdb::Slice((const char *)upper_bound_buf, eq_cond_len); + } else { + *out_upper_bound = + rocksdb::Slice((const char *)upper_bound_buf, eq_cond_len); + *out_lower_bound = + rocksdb::Slice((const char *)lower_bound_buf, eq_cond_len); + } +} + /* Open a cursor */ @@ -9221,7 +9785,11 @@ void ha_rocksdb::setup_scan_iterator(const Rdb_key_def &kd, bool skip_bloom = true; const rocksdb::Slice eq_cond(slice->data(), eq_cond_len); - if (can_use_bloom_filter(ha_thd(), kd, eq_cond, use_all_keys)) { + if (check_bloom_and_set_bounds(ha_thd(), kd, eq_cond, use_all_keys, + m_eq_cond_lower_bound, + m_eq_cond_upper_bound, + &m_eq_cond_lower_bound_slice, + &m_eq_cond_upper_bound_slice)) { skip_bloom = false; } @@ -9261,7 +9829,9 @@ void ha_rocksdb::setup_scan_iterator(const Rdb_key_def &kd, read_opts.snapshot = m_scan_it_snapshot; m_scan_it = rdb->NewIterator(read_opts, kd.get_cf()); } else { - m_scan_it = tx->get_iterator(kd.get_cf(), skip_bloom, fill_cache); + m_scan_it = tx->get_iterator(kd.get_cf(), skip_bloom, fill_cache, + m_eq_cond_lower_bound_slice, + m_eq_cond_upper_bound_slice); } m_scan_it_skips_bloom = skip_bloom; } @@ -9280,14 +9850,12 @@ void ha_rocksdb::release_scan_iterator() { void ha_rocksdb::setup_iterator_for_rnd_scan() { uint key_size; - if (m_pk_descr->m_is_reverse_cf) - m_pk_descr->get_supremum_key(m_pk_packed_tuple, &key_size); - else - m_pk_descr->get_infimum_key(m_pk_packed_tuple, &key_size); + int key_start_matching_bytes = m_pk_descr->get_first_key(m_pk_packed_tuple, &key_size); rocksdb::Slice table_key((const char *)m_pk_packed_tuple, key_size); - setup_scan_iterator(*m_pk_descr, &table_key); + setup_scan_iterator(*m_pk_descr, &table_key, false, + key_start_matching_bytes); m_scan_it->Seek(table_key); m_skip_scan_it_next_call = true; } @@ -9359,7 +9927,7 @@ int ha_rocksdb::rnd_next_with_direction(uchar *const buf, bool move_forward) { #ifdef MARIAROCKS_NOT_YET stats.rows_requested++; #endif - if (!m_scan_it || !m_scan_it->Valid()) { + if (!m_scan_it || !is_valid(m_scan_it)) { /* We can get here when SQL layer has called @@ -9381,7 +9949,7 @@ int ha_rocksdb::rnd_next_with_direction(uchar *const buf, bool move_forward) { m_scan_it->Prev(); /* this call cannot fail */ } - if (!m_scan_it->Valid()) { + if (!is_valid(m_scan_it)) { rc = HA_ERR_END_OF_FILE; break; } @@ -9480,6 +10048,7 @@ int ha_rocksdb::index_init(uint idx, bool sorted) { DBUG_ASSERT(tx != nullptr); setup_read_decoders(); + m_start_range= NULL; if (!m_keyread_only) { m_key_descr_arr[idx]->get_lookup_bitmap(table, &m_lookup_bitmap); @@ -9508,6 +10077,7 @@ int ha_rocksdb::index_end() { active_index = MAX_KEY; in_range_check_pushed_down = FALSE; + m_start_range= NULL; DBUG_RETURN(HA_EXIT_SUCCESS); } @@ -9690,6 +10260,10 @@ int ha_rocksdb::info(uint flag) { uint64_t memtableCount; uint64_t memtableSize; + // the stats below are calculated from skiplist wich is a probablistic + // data structure, so the results vary between test runs + // it also can return 0 for quite a large tables which means that + // cardinality for memtable only indxes will be reported as 0 rdb->GetApproximateMemTableStats(m_pk_descr->get_cf(), r, &memtableCount, &memtableSize); @@ -9723,7 +10297,6 @@ int ha_rocksdb::info(uint flag) { if (flag & HA_STATUS_CONST) { ref_length = m_pk_descr->max_storage_fmt_length(); - // TODO: Needs to reimplement after having real index statistics for (uint i = 0; i < m_tbl_def->m_key_count; i++) { if (is_hidden_pk(i, table, m_tbl_def)) { continue; @@ -10008,20 +10581,6 @@ void ha_rocksdb::read_thd_vars(THD *const thd) { m_checksums_pct = THDVAR(thd, checksums_pct); } -const char *ha_rocksdb::thd_rocksdb_tmpdir() { - const char *tmp_dir = THDVAR(ha_thd(), tmpdir); - - /* - We want to treat an empty string as nullptr, in these cases DDL operations - will use the default --tmpdir passed to mysql instead. - */ - if (tmp_dir != nullptr && *tmp_dir == '\0') { - tmp_dir = nullptr; - } - - return (tmp_dir); -} - /** @return @@ -10064,22 +10623,24 @@ int ha_rocksdb::external_lock(THD *const thd, int lock_type) { } if (lock_type == F_UNLCK) { - Rdb_transaction *const tx = get_or_create_tx(thd); + Rdb_transaction *const tx = get_tx_from_thd(thd); - tx->io_perf_end_and_record(&m_io_perf); - tx->m_n_mysql_tables_in_use--; - if (tx->m_n_mysql_tables_in_use == 0 && - !my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { - /* - Do like InnoDB: when we get here, it's time to commit a - single-statement transaction. + if (tx) { + tx->io_perf_end_and_record(&m_io_perf); + tx->m_n_mysql_tables_in_use--; + if (tx->m_n_mysql_tables_in_use == 0 && + !my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + /* + Do like InnoDB: when we get here, it's time to commit a + single-statement transaction. - If the statement involved multiple tables, this code will be executed - for each of them, but that's ok because non-first tx->commit() calls - will be no-ops. - */ - if (tx->commit_or_rollback()) { - res = HA_ERR_INTERNAL_ERROR; + If the statement involved multiple tables, this code will be executed + for each of them, but that's ok because non-first tx->commit() calls + will be no-ops. + */ + if (tx->commit_or_rollback()) { + res = HA_ERR_INTERNAL_ERROR; + } } } } else { @@ -10213,6 +10774,13 @@ ha_rocksdb::get_range(const int &i, return myrocks::get_range(*m_key_descr_arr[i], buf); } +/* + This function is called with total_order_seek=true, but + upper/lower bound setting is not necessary. + Boundary set is useful when there is no matching key, + but in drop_index_thread's case, it means index is marked as removed, + so no further seek will happen for the index id. +*/ static bool is_myrocks_index_empty( rocksdb::ColumnFamilyHandle *cfh, const bool is_reverse_cf, const rocksdb::ReadOptions &read_opts, @@ -10283,7 +10851,7 @@ void Rdb_drop_index_thread::run() { "from cf id %u. MyRocks data dictionary may " "get corrupted.", d.cf_id); - abort_with_stack_traces(); + abort(); } rocksdb::ColumnFamilyHandle *cfh = cf_manager.get_cf(d.cf_id); DBUG_ASSERT(cfh); @@ -10372,6 +10940,7 @@ int ha_rocksdb::delete_table(const char *const tablename) { the persistent data dictionary). */ ddl_manager.remove(tbl, batch, true); + int err = dict_manager.commit(batch); if (err) { DBUG_RETURN(err); @@ -10402,10 +10971,16 @@ int ha_rocksdb::remove_rows(Rdb_tbl_def *const tbl) { const Rdb_key_def &kd = *tbl->m_key_descr_arr[i]; kd.get_infimum_key(reinterpret_cast(key_buf), &key_len); rocksdb::ColumnFamilyHandle *cf = kd.get_cf(); - + const rocksdb::Slice table_key(key_buf, key_len); + setup_iterator_bounds(kd, table_key, + m_eq_cond_lower_bound, + m_eq_cond_upper_bound, + &m_eq_cond_lower_bound_slice, + &m_eq_cond_upper_bound_slice); + opts.iterate_lower_bound = &m_eq_cond_lower_bound_slice; + opts.iterate_upper_bound = &m_eq_cond_upper_bound_slice; std::unique_ptr it(rdb->NewIterator(opts, cf)); - const rocksdb::Slice table_key(key_buf, key_len); it->Seek(table_key); while (it->Valid()) { const rocksdb::Slice key = it->key(); @@ -10484,6 +11059,7 @@ int ha_rocksdb::rename_table(const char *const from, const char *const to) { const std::unique_ptr wb = dict_manager.begin(); rocksdb::WriteBatch *const batch = wb.get(); dict_manager.lock(); + if (ddl_manager.rename(from_str, to_str, batch)) { rc = HA_ERR_NO_SUCH_TABLE; } else { @@ -10534,7 +11110,7 @@ int ha_rocksdb::extra(enum ha_extra_function operation) { If the table has blobs, then they are part of m_retrieved_record. This call invalidates them. */ - m_retrieved_record.clear(); + m_retrieved_record.Reset(); break; default: break; @@ -10702,24 +11278,21 @@ int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd, std::unordered_map> ranges; std::unordered_set ids_to_check; - std::unordered_map ids_to_keyparts; std::vector buf(table_arg->s->keys * 2 * Rdb_key_def::INDEX_NUMBER_SIZE); + std::unordered_map stats; for (uint i = 0; i < table_arg->s->keys; i++) { const auto bufp = &buf[i * 2 * Rdb_key_def::INDEX_NUMBER_SIZE]; const Rdb_key_def &kd = *m_key_descr_arr[i]; + const GL_INDEX_ID index_id = kd.get_gl_index_id(); ranges[kd.get_cf()].push_back(get_range(i, bufp)); - ids_to_check.insert(kd.get_gl_index_id()); - ids_to_keyparts[kd.get_gl_index_id()] = kd.get_key_parts(); - } - // for analyze statements, force flush on memtable to get accurate cardinality - Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); - if (thd != nullptr && THDVAR(thd, flush_memtable_on_analyze) && - !rocksdb_pause_background_work) { - for (auto it : ids_to_check) { - rdb->Flush(rocksdb::FlushOptions(), cf_manager.get_cf(it.cf_id)); - } + ids_to_check.insert(index_id); + // Initialize the stats to 0. If there are no files that contain + // this gl_index_id, then 0 should be stored for the cached stats. + stats[index_id] = Rdb_index_stats(index_id); + DBUG_ASSERT(kd.get_key_parts() > 0); + stats[index_id].m_distinct_keys_per_prefix.resize(kd.get_key_parts()); } // get RocksDB table properties for these ranges @@ -10736,15 +11309,6 @@ int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd, } int num_sst = 0; - // group stats per index id - std::unordered_map stats; - for (const auto &it : ids_to_check) { - // Initialize the stats to 0. If there are no files that contain - // this gl_index_id, then 0 should be stored for the cached stats. - stats[it] = Rdb_index_stats(it); - DBUG_ASSERT(ids_to_keyparts.count(it) > 0); - stats[it].m_distinct_keys_per_prefix.resize(ids_to_keyparts[it]); - } for (const auto &it : props) { std::vector sst_stats; Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats); @@ -10771,6 +11335,53 @@ int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd, num_sst++; } + // calculate memtable cardinality + Rdb_tbl_card_coll cardinality_collector(rocksdb_table_stats_sampling_pct); + auto read_opts = rocksdb::ReadOptions(); + read_opts.read_tier = rocksdb::ReadTier::kMemtableTier; + for (uint i = 0; i < table_arg->s->keys; i++) { + const Rdb_key_def &kd = *m_key_descr_arr[i]; + Rdb_index_stats &stat = stats[kd.get_gl_index_id()]; + + uchar r_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; + auto r = get_range(i, r_buf); + uint64_t memtableCount; + uint64_t memtableSize; + rdb->GetApproximateMemTableStats(kd.get_cf(), r, &memtableCount, + &memtableSize); + if (memtableCount < (uint64_t)stat.m_rows / 10) { + // skip tables that already have enough stats from SST files to reduce + // overhead and avoid degradation of big tables stats by sampling from + // relatively tiny (less than 10% of full data set) memtable dataset + continue; + } + + std::unique_ptr it = std::unique_ptr( + rdb->NewIterator(read_opts, kd.get_cf())); + + uchar *first_key; + uint key_size; + if (is_pk(i, table, m_tbl_def)) { + first_key = m_pk_packed_tuple; + } else { + first_key = m_sk_packed_tuple; + } + kd.get_first_key(first_key, &key_size); + rocksdb::Slice first_index_key((const char *)first_key, key_size); + + cardinality_collector.Reset(); + for (it->Seek(first_index_key); is_valid(it.get()); it->Next()) { + const rocksdb::Slice key = it->key(); + if (!kd.covers_key(key)) { + break; // end of this index + } + stat.m_rows++; + + cardinality_collector.ProcessKey(key, &kd, &stat); + } + cardinality_collector.AdjustStats(&stat); + } + // set and persist new stats ddl_manager.set_stats(stats); ddl_manager.persist_stats(true); @@ -10818,32 +11429,73 @@ void ha_rocksdb::get_auto_increment(ulonglong off, ulonglong inc, off = 1; } - longlong new_val; + Field *field; + ulonglong new_val, max_val; + field = table->key_info[table->s->next_number_index].key_part[0].field; + max_val = rdb_get_int_col_max_value(field); // Local variable reference to simplify code below - std::atomic &auto_incr = m_tbl_def->m_auto_incr_val; + auto &auto_incr = m_tbl_def->m_auto_incr_val; - if (inc == 1 && off == 1) { + if (inc == 1) { + DBUG_ASSERT(off == 1); // Optimization for the standard case where we are always simply // incrementing from the last position // Use CAS operation in a loop to make sure automically get the next auto - // increment value while ensuring tha we don't wrap around to a negative + // increment value while ensuring that we don't wrap around to a negative // number. + // + // We set auto_incr to the min of max_val and new_val + 1. This means that + // if we're at the maximum, we should be returning the same value for + // multiple rows, resulting in duplicate key errors (as expected). + // + // If we return values greater than the max, the SQL layer will "truncate" + // the value anyway, but it means that we store invalid values into + // auto_incr that will be visible in SHOW CREATE TABLE. new_val = auto_incr; - while (new_val != std::numeric_limits::max()) { - if (auto_incr.compare_exchange_weak(new_val, new_val + 1)) { + while (new_val != std::numeric_limits::max()) { + if (auto_incr.compare_exchange_weak(new_val, + std::min(new_val + 1, max_val))) { break; } } } else { - // The next value can be more complicated if either `inc` or 'off' is not 1 - longlong last_val = auto_incr; + // The next value can be more complicated if either 'inc' or 'off' is not 1 + ulonglong last_val = auto_incr; // Loop until we can correctly update the atomic value do { - if (((last_val - off) / inc) == - (std::numeric_limits::max() - off) / inc) { + DBUG_ASSERT(last_val > 0); + // Calculate the next value in the auto increment series: offset + // + N * increment where N is 0, 1, 2, ... + // + // For further information please visit: + // http://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html + // + // The following is confusing so here is an explanation: + // To get the next number in the sequence above you subtract out the + // offset, calculate the next sequence (N * increment) and then add the + // offset back in. + // + // The additions are rearranged to avoid overflow. The following is + // equivalent to (last_val - 1 + inc - off) / inc. This uses the fact + // that (a+b)/c = a/c + b/c + (a%c + b%c)/c. To show why: + // + // (a+b)/c + // = (a - a%c + a%c + b - b%c + b%c) / c + // = (a - a%c) / c + (b - b%c) / c + (a%c + b%c) / c + // = a/c + b/c + (a%c + b%c) / c + // + // Now, substitute a = last_val - 1, b = inc - off, c = inc to get the + // following statement. + ulonglong n = + (last_val - 1) / inc + ((last_val - 1) % inc + inc - off) / inc; + + // Check if n * inc + off will overflow. This can only happen if we have + // an UNSIGNED BIGINT field. + if (n > (std::numeric_limits::max() - off) / inc) { + DBUG_ASSERT(max_val == std::numeric_limits::max()); // The 'last_val' value is already equal to or larger than the largest // value in the sequence. Continuing would wrap around (technically // the behavior would be undefined). What should we do? @@ -10855,31 +11507,30 @@ void ha_rocksdb::get_auto_increment(ulonglong off, ulonglong inc, // may not be in our sequence, but it is guaranteed to be equal // to or larger than any other value already inserted. // - // For now I'm going to take option @2. - new_val = std::numeric_limits::max(); + // For now I'm going to take option 2. + // + // Returning ULLONG_MAX from get_auto_increment will cause the SQL + // layer to fail with ER_AUTOINC_READ_FAILED. This means that due to + // the SE API for get_auto_increment, inserts will fail with + // ER_AUTOINC_READ_FAILED if the column is UNSIGNED BIGINT, but + // inserts will fail with ER_DUP_ENTRY for other types (or no failure + // if the column is in a non-unique SK). + new_val = std::numeric_limits::max(); auto_incr = new_val; // Store the largest value into auto_incr break; } - // Calculate the next value in the auto increment series: - // offset + N * increment - // where N is 0, 1, 2, ... - // - // For further information please visit: - // http://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html - // - // The following is confusing so here is an explanation: - // To get the next number in the sequence above you subtract out - // the offset, calculate the next sequence (N * increment) and then add - // the offset back in. - new_val = (((last_val - off) + (inc - 1)) / inc) * inc + off; + new_val = n * inc + off; // Attempt to store the new value (plus 1 since m_auto_incr_val contains // the next available value) into the atomic value. If the current // value no longer matches what we have in 'last_val' this will fail and // we will repeat the loop (`last_val` will automatically get updated // with the current value). - } while (!auto_incr.compare_exchange_weak(last_val, new_val + 1)); + // + // See above explanation for inc == 1 for why we use std::min. + } while (!auto_incr.compare_exchange_weak(last_val, + std::min(new_val + 1, max_val))); } *first_value = new_val; @@ -10956,7 +11607,8 @@ my_core::enum_alter_inplace_result ha_rocksdb::check_if_supported_inplace_alter( ALTER_DROP_UNIQUE_INDEX | ALTER_ADD_NON_UNIQUE_NON_PRIM_INDEX | ALTER_PARTITIONED | - ALTER_ADD_UNIQUE_INDEX)) { + ALTER_ADD_UNIQUE_INDEX | + ALTER_CHANGE_CREATE_OPTION)) { DBUG_RETURN(my_core::HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -10967,16 +11619,23 @@ my_core::enum_alter_inplace_result ha_rocksdb::check_if_supported_inplace_alter( DBUG_RETURN(my_core::HA_ALTER_INPLACE_NOT_SUPPORTED); } - DBUG_RETURN(my_core::HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE); + /* We only support changing auto_increment for table options. */ + if ((ha_alter_info->handler_flags & ALTER_CHANGE_CREATE_OPTION) && + !(ha_alter_info->create_info->used_fields & HA_CREATE_USED_AUTO)) { + DBUG_RETURN(my_core::HA_ALTER_INPLACE_NOT_SUPPORTED); + } + + /* FIXME: MDEV-16099 Use alter algorithm=nocopy or algorithm=instant + for non-InnoDB engine */ + DBUG_RETURN(my_core::HA_ALTER_INPLACE_COPY_LOCK); } /** Allows the storage engine to update internal structures with concurrent writes blocked. If check_if_supported_inplace_alter() returns - HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE or - HA_ALTER_INPLACE_SHARED_AFTER_PREPARE, this function is called with - exclusive lock otherwise the same level of locking as for - inplace_alter_table() will be used. + HA_ALTER_INPLACE_COPY_NO_LOCK or HA_ALTER_INPLACE_COPY_LOCK. + this function is called with exclusive lock otherwise the same level of + locking as for inplace_alter_table() will be used. @note Storage engines are responsible for reporting any errors by calling my_error()/print_error() @@ -11006,111 +11665,122 @@ bool ha_rocksdb::prepare_inplace_alter_table( DBUG_ASSERT(altered_table != nullptr); DBUG_ASSERT(ha_alter_info != nullptr); - const uint old_n_keys = m_tbl_def->m_key_count; + Rdb_tbl_def *new_tdef = nullptr; + std::shared_ptr *old_key_descr = nullptr; + std::shared_ptr *new_key_descr = nullptr; + uint old_n_keys = m_tbl_def->m_key_count; uint new_n_keys = altered_table->s->keys; - - if (has_hidden_pk(altered_table)) { - new_n_keys += 1; - } - - const TABLE *const old_table = table; - std::shared_ptr *const old_key_descr = - m_tbl_def->m_key_descr_arr; - std::shared_ptr *const new_key_descr = - new std::shared_ptr[new_n_keys]; - - Rdb_tbl_def *const new_tdef = new Rdb_tbl_def(m_tbl_def->full_tablename()); - new_tdef->m_key_descr_arr = new_key_descr; - new_tdef->m_key_count = new_n_keys; - new_tdef->m_auto_incr_val = - m_tbl_def->m_auto_incr_val.load(std::memory_order_relaxed); - new_tdef->m_hidden_pk_val = - m_tbl_def->m_hidden_pk_val.load(std::memory_order_relaxed); - - if (ha_alter_info->handler_flags & - (ALTER_DROP_NON_UNIQUE_NON_PRIM_INDEX | - ALTER_DROP_UNIQUE_INDEX | - ALTER_ADD_NON_UNIQUE_NON_PRIM_INDEX | - ALTER_ADD_UNIQUE_INDEX) && - create_key_defs(altered_table, new_tdef, table, m_tbl_def)) { - /* Delete the new key descriptors */ - delete[] new_key_descr; - - /* - Explicitly mark as nullptr so we don't accidentally remove entries - from data dictionary on cleanup (or cause double delete[]). - */ - new_tdef->m_key_descr_arr = nullptr; - delete new_tdef; - - my_error(ER_KEY_CREATE_DURING_ALTER, MYF(0)); - DBUG_RETURN(HA_EXIT_FAILURE); - } - std::unordered_set> added_indexes; std::unordered_set dropped_index_ids; + uint n_dropped_keys = 0; + uint n_added_keys = 0; + ulonglong max_auto_incr = 0; - uint i; - uint j; + if (ha_alter_info->handler_flags & + (ALTER_DROP_NON_UNIQUE_NON_PRIM_INDEX | + ALTER_DROP_UNIQUE_INDEX | + ALTER_ADD_NON_UNIQUE_NON_PRIM_INDEX | + ALTER_ADD_UNIQUE_INDEX)) { - /* Determine which(if any) key definition(s) need to be dropped */ - for (i = 0; i < ha_alter_info->index_drop_count; i++) { - const KEY *const dropped_key = ha_alter_info->index_drop_buffer[i]; - for (j = 0; j < old_n_keys; j++) { - const KEY *const old_key = - &old_table->key_info[old_key_descr[j]->get_keyno()]; - - if (!compare_keys(old_key, dropped_key)) { - dropped_index_ids.insert(old_key_descr[j]->get_gl_index_id()); - break; - } + if (has_hidden_pk(altered_table)) { + new_n_keys += 1; } - } - /* Determine which(if any) key definitions(s) need to be added */ - int identical_indexes_found = 0; - for (i = 0; i < ha_alter_info->index_add_count; i++) { - const KEY *const added_key = - &ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]]; - for (j = 0; j < new_n_keys; j++) { - const KEY *const new_key = - &altered_table->key_info[new_key_descr[j]->get_keyno()]; - if (!compare_keys(new_key, added_key)) { - /* - Check for cases where an 'identical' index is being dropped and - re-added in a single ALTER statement. Turn this into a no-op as the - index has not changed. + const TABLE *const old_table = table; + old_key_descr = m_tbl_def->m_key_descr_arr; + new_key_descr = new std::shared_ptr[new_n_keys]; - E.G. Unique index -> non-unique index requires no change + new_tdef = new Rdb_tbl_def(m_tbl_def->full_tablename()); + new_tdef->m_key_descr_arr = new_key_descr; + new_tdef->m_key_count = new_n_keys; + new_tdef->m_auto_incr_val = + m_tbl_def->m_auto_incr_val.load(std::memory_order_relaxed); + new_tdef->m_hidden_pk_val = + m_tbl_def->m_hidden_pk_val.load(std::memory_order_relaxed); - Note that cases where the index name remains the same but the - key-parts are changed is already handled in create_inplace_key_defs. - In these cases the index needs to be rebuilt. + if (create_key_defs(altered_table, new_tdef, table, m_tbl_def)) { + /* Delete the new key descriptors */ + delete[] new_key_descr; + + /* + Explicitly mark as nullptr so we don't accidentally remove entries + from data dictionary on cleanup (or cause double delete[]). */ - if (dropped_index_ids.count(new_key_descr[j]->get_gl_index_id())) { - dropped_index_ids.erase(new_key_descr[j]->get_gl_index_id()); - identical_indexes_found++; - } else { - added_indexes.insert(new_key_descr[j]); - } + new_tdef->m_key_descr_arr = nullptr; + delete new_tdef; - break; + my_error(ER_KEY_CREATE_DURING_ALTER, MYF(0)); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + uint i; + uint j; + + /* Determine which(if any) key definition(s) need to be dropped */ + for (i = 0; i < ha_alter_info->index_drop_count; i++) { + const KEY *const dropped_key = ha_alter_info->index_drop_buffer[i]; + for (j = 0; j < old_n_keys; j++) { + const KEY *const old_key = + &old_table->key_info[old_key_descr[j]->get_keyno()]; + + if (!compare_keys(old_key, dropped_key)) { + dropped_index_ids.insert(old_key_descr[j]->get_gl_index_id()); + break; + } } } - } - const uint n_dropped_keys = - ha_alter_info->index_drop_count - identical_indexes_found; - const uint n_added_keys = - ha_alter_info->index_add_count - identical_indexes_found; - DBUG_ASSERT(dropped_index_ids.size() == n_dropped_keys); - DBUG_ASSERT(added_indexes.size() == n_added_keys); - DBUG_ASSERT(new_n_keys == (old_n_keys - n_dropped_keys + n_added_keys)); + /* Determine which(if any) key definitions(s) need to be added */ + int identical_indexes_found = 0; + for (i = 0; i < ha_alter_info->index_add_count; i++) { + const KEY *const added_key = + &ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]]; + for (j = 0; j < new_n_keys; j++) { + const KEY *const new_key = + &altered_table->key_info[new_key_descr[j]->get_keyno()]; + if (!compare_keys(new_key, added_key)) { + /* + Check for cases where an 'identical' index is being dropped and + re-added in a single ALTER statement. Turn this into a no-op as the + index has not changed. + + E.G. Unique index -> non-unique index requires no change + + Note that cases where the index name remains the same but the + key-parts are changed is already handled in create_inplace_key_defs. + In these cases the index needs to be rebuilt. + */ + if (dropped_index_ids.count(new_key_descr[j]->get_gl_index_id())) { + dropped_index_ids.erase(new_key_descr[j]->get_gl_index_id()); + identical_indexes_found++; + } else { + added_indexes.insert(new_key_descr[j]); + } + + break; + } + } + } + + n_dropped_keys = ha_alter_info->index_drop_count - identical_indexes_found; + n_added_keys = ha_alter_info->index_add_count - identical_indexes_found; + DBUG_ASSERT(dropped_index_ids.size() == n_dropped_keys); + DBUG_ASSERT(added_indexes.size() == n_added_keys); + DBUG_ASSERT(new_n_keys == (old_n_keys - n_dropped_keys + n_added_keys)); + } + if (ha_alter_info->handler_flags & ALTER_CHANGE_CREATE_OPTION) { + if (!new_tdef) { + new_tdef = m_tbl_def; + } + if (table->found_next_number_field) { + max_auto_incr = load_auto_incr_value_from_index(); + } + } ha_alter_info->handler_ctx = new Rdb_inplace_alter_ctx( new_tdef, old_key_descr, new_key_descr, old_n_keys, new_n_keys, - added_indexes, dropped_index_ids, n_added_keys, n_dropped_keys); - + added_indexes, dropped_index_ids, n_added_keys, n_dropped_keys, + max_auto_incr); DBUG_RETURN(HA_EXIT_SUCCESS); } @@ -11264,9 +11934,10 @@ int ha_rocksdb::inplace_populate_sk( bool is_unique_index = new_table_arg->key_info[index->get_keyno()].flags & HA_NOSAME; - Rdb_index_merge rdb_merge( - thd_rocksdb_tmpdir(), rdb_merge_buf_size, rdb_merge_combine_read_size, - rdb_merge_tmp_file_removal_delay, index->get_cf()); + Rdb_index_merge rdb_merge(tx->get_rocksdb_tmpdir(), rdb_merge_buf_size, + rdb_merge_combine_read_size, + rdb_merge_tmp_file_removal_delay, + index->get_cf()); if ((res = rdb_merge.init())) { DBUG_RETURN(res); @@ -11570,6 +12241,28 @@ bool ha_rocksdb::commit_inplace_alter_table( rdb_drop_idx_thread.signal(); } + if (ha_alter_info->handler_flags & ALTER_CHANGE_CREATE_OPTION) { + const std::unique_ptr wb = dict_manager.begin(); + rocksdb::WriteBatch *const batch = wb.get(); + std::unordered_set create_index_ids; + + ulonglong auto_incr_val = ha_alter_info->create_info->auto_increment_value; + + for (inplace_alter_handler_ctx **pctx = ctx_array; *pctx; pctx++) { + Rdb_inplace_alter_ctx *const ctx = + static_cast(*pctx); + auto_incr_val = std::max(auto_incr_val, ctx->m_max_auto_incr); + dict_manager.put_auto_incr_val( + batch, ctx->m_new_tdef->get_autoincr_gl_index_id(), auto_incr_val, + true /* overwrite */); + ctx->m_new_tdef->m_auto_incr_val = auto_incr_val; + } + + if (dict_manager.commit(batch)) { + DBUG_ASSERT(0); + } + } + DBUG_RETURN(HA_EXIT_SUCCESS); } @@ -11597,15 +12290,29 @@ struct rocksdb_status_counters_t { uint64_t block_cache_miss; uint64_t block_cache_hit; uint64_t block_cache_add; + uint64_t block_cache_add_failures; uint64_t block_cache_index_miss; uint64_t block_cache_index_hit; + uint64_t block_cache_index_add; + uint64_t block_cache_index_bytes_insert; + uint64_t block_cache_index_bytes_evict; uint64_t block_cache_filter_miss; uint64_t block_cache_filter_hit; + uint64_t block_cache_filter_add; + uint64_t block_cache_filter_bytes_insert; + uint64_t block_cache_filter_bytes_evict; + uint64_t block_cache_bytes_read; + uint64_t block_cache_bytes_write; + uint64_t block_cache_data_bytes_insert; uint64_t block_cache_data_miss; uint64_t block_cache_data_hit; + uint64_t block_cache_data_add; uint64_t bloom_filter_useful; uint64_t memtable_hit; uint64_t memtable_miss; + uint64_t get_hit_l0; + uint64_t get_hit_l1; + uint64_t get_hit_l2_and_up; uint64_t compaction_key_drop_new; uint64_t compaction_key_drop_obsolete; uint64_t compaction_key_drop_user; @@ -11614,11 +12321,17 @@ struct rocksdb_status_counters_t { uint64_t number_keys_updated; uint64_t bytes_written; uint64_t bytes_read; + uint64_t number_db_seek; + uint64_t number_db_seek_found; + uint64_t number_db_next; + uint64_t number_db_next_found; + uint64_t number_db_prev; + uint64_t number_db_prev_found; + uint64_t iter_bytes_read; uint64_t no_file_closes; uint64_t no_file_opens; uint64_t no_file_errors; uint64_t stall_micros; - uint64_t rate_limit_delay_millis; uint64_t num_iterators; uint64_t number_multiget_get; uint64_t number_multiget_keys_read; @@ -11651,15 +12364,29 @@ static rocksdb_status_counters_t rocksdb_status_counters; DEF_SHOW_FUNC(block_cache_miss, BLOCK_CACHE_MISS) DEF_SHOW_FUNC(block_cache_hit, BLOCK_CACHE_HIT) DEF_SHOW_FUNC(block_cache_add, BLOCK_CACHE_ADD) +DEF_SHOW_FUNC(block_cache_add_failures, BLOCK_CACHE_ADD_FAILURES) DEF_SHOW_FUNC(block_cache_index_miss, BLOCK_CACHE_INDEX_MISS) DEF_SHOW_FUNC(block_cache_index_hit, BLOCK_CACHE_INDEX_HIT) +DEF_SHOW_FUNC(block_cache_index_add, BLOCK_CACHE_INDEX_ADD) +DEF_SHOW_FUNC(block_cache_index_bytes_insert, BLOCK_CACHE_INDEX_BYTES_INSERT) +DEF_SHOW_FUNC(block_cache_index_bytes_evict, BLOCK_CACHE_INDEX_BYTES_EVICT) DEF_SHOW_FUNC(block_cache_filter_miss, BLOCK_CACHE_FILTER_MISS) DEF_SHOW_FUNC(block_cache_filter_hit, BLOCK_CACHE_FILTER_HIT) +DEF_SHOW_FUNC(block_cache_filter_add, BLOCK_CACHE_FILTER_ADD) +DEF_SHOW_FUNC(block_cache_filter_bytes_insert, BLOCK_CACHE_FILTER_BYTES_INSERT) +DEF_SHOW_FUNC(block_cache_filter_bytes_evict, BLOCK_CACHE_FILTER_BYTES_EVICT) +DEF_SHOW_FUNC(block_cache_bytes_read, BLOCK_CACHE_BYTES_READ) +DEF_SHOW_FUNC(block_cache_bytes_write, BLOCK_CACHE_BYTES_WRITE) +DEF_SHOW_FUNC(block_cache_data_bytes_insert, BLOCK_CACHE_DATA_BYTES_INSERT) DEF_SHOW_FUNC(block_cache_data_miss, BLOCK_CACHE_DATA_MISS) DEF_SHOW_FUNC(block_cache_data_hit, BLOCK_CACHE_DATA_HIT) +DEF_SHOW_FUNC(block_cache_data_add, BLOCK_CACHE_DATA_ADD) DEF_SHOW_FUNC(bloom_filter_useful, BLOOM_FILTER_USEFUL) DEF_SHOW_FUNC(memtable_hit, MEMTABLE_HIT) DEF_SHOW_FUNC(memtable_miss, MEMTABLE_MISS) +DEF_SHOW_FUNC(get_hit_l0, GET_HIT_L0) +DEF_SHOW_FUNC(get_hit_l1, GET_HIT_L1) +DEF_SHOW_FUNC(get_hit_l2_and_up, GET_HIT_L2_AND_UP) DEF_SHOW_FUNC(compaction_key_drop_new, COMPACTION_KEY_DROP_NEWER_ENTRY) DEF_SHOW_FUNC(compaction_key_drop_obsolete, COMPACTION_KEY_DROP_OBSOLETE) DEF_SHOW_FUNC(compaction_key_drop_user, COMPACTION_KEY_DROP_USER) @@ -11668,11 +12395,17 @@ DEF_SHOW_FUNC(number_keys_read, NUMBER_KEYS_READ) DEF_SHOW_FUNC(number_keys_updated, NUMBER_KEYS_UPDATED) DEF_SHOW_FUNC(bytes_written, BYTES_WRITTEN) DEF_SHOW_FUNC(bytes_read, BYTES_READ) +DEF_SHOW_FUNC(number_db_seek, NUMBER_DB_SEEK) +DEF_SHOW_FUNC(number_db_seek_found, NUMBER_DB_SEEK_FOUND) +DEF_SHOW_FUNC(number_db_next, NUMBER_DB_NEXT) +DEF_SHOW_FUNC(number_db_next_found, NUMBER_DB_NEXT_FOUND) +DEF_SHOW_FUNC(number_db_prev, NUMBER_DB_PREV) +DEF_SHOW_FUNC(number_db_prev_found, NUMBER_DB_PREV_FOUND) +DEF_SHOW_FUNC(iter_bytes_read, ITER_BYTES_READ) DEF_SHOW_FUNC(no_file_closes, NO_FILE_CLOSES) DEF_SHOW_FUNC(no_file_opens, NO_FILE_OPENS) DEF_SHOW_FUNC(no_file_errors, NO_FILE_ERRORS) DEF_SHOW_FUNC(stall_micros, STALL_MICROS) -DEF_SHOW_FUNC(rate_limit_delay_millis, RATE_LIMIT_DELAY_MILLIS) DEF_SHOW_FUNC(num_iterators, NO_ITERATORS) DEF_SHOW_FUNC(number_multiget_get, NUMBER_MULTIGET_CALLS) DEF_SHOW_FUNC(number_multiget_keys_read, NUMBER_MULTIGET_KEYS_READ) @@ -11706,6 +12439,7 @@ static void myrocks_update_status() { export_stats.rows_updated = global_stats.rows[ROWS_UPDATED]; export_stats.rows_deleted_blind = global_stats.rows[ROWS_DELETED_BLIND]; export_stats.rows_expired = global_stats.rows[ROWS_EXPIRED]; + export_stats.rows_filtered = global_stats.rows[ROWS_FILTERED]; export_stats.system_rows_deleted = global_stats.system_rows[ROWS_DELETED]; export_stats.system_rows_inserted = global_stats.system_rows[ROWS_INSERTED]; @@ -11744,6 +12478,8 @@ static SHOW_VAR myrocks_status_variables[] = { SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("rows_expired", &export_stats.rows_expired, SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_filtered", &export_stats.rows_filtered, + SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("system_rows_deleted", &export_stats.system_rows_deleted, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("system_rows_inserted", @@ -11862,15 +12598,29 @@ static SHOW_VAR rocksdb_status_vars[] = { DEF_STATUS_VAR(block_cache_miss), DEF_STATUS_VAR(block_cache_hit), DEF_STATUS_VAR(block_cache_add), + DEF_STATUS_VAR(block_cache_add_failures), DEF_STATUS_VAR(block_cache_index_miss), DEF_STATUS_VAR(block_cache_index_hit), + DEF_STATUS_VAR(block_cache_index_add), + DEF_STATUS_VAR(block_cache_index_bytes_insert), + DEF_STATUS_VAR(block_cache_index_bytes_evict), DEF_STATUS_VAR(block_cache_filter_miss), DEF_STATUS_VAR(block_cache_filter_hit), + DEF_STATUS_VAR(block_cache_filter_add), + DEF_STATUS_VAR(block_cache_filter_bytes_insert), + DEF_STATUS_VAR(block_cache_filter_bytes_evict), + DEF_STATUS_VAR(block_cache_bytes_read), + DEF_STATUS_VAR(block_cache_bytes_write), + DEF_STATUS_VAR(block_cache_data_bytes_insert), DEF_STATUS_VAR(block_cache_data_miss), DEF_STATUS_VAR(block_cache_data_hit), + DEF_STATUS_VAR(block_cache_data_add), DEF_STATUS_VAR(bloom_filter_useful), DEF_STATUS_VAR(memtable_hit), DEF_STATUS_VAR(memtable_miss), + DEF_STATUS_VAR(get_hit_l0), + DEF_STATUS_VAR(get_hit_l1), + DEF_STATUS_VAR(get_hit_l2_and_up), DEF_STATUS_VAR(compaction_key_drop_new), DEF_STATUS_VAR(compaction_key_drop_obsolete), DEF_STATUS_VAR(compaction_key_drop_user), @@ -11879,11 +12629,17 @@ static SHOW_VAR rocksdb_status_vars[] = { DEF_STATUS_VAR(number_keys_updated), DEF_STATUS_VAR(bytes_written), DEF_STATUS_VAR(bytes_read), + DEF_STATUS_VAR(number_db_seek), + DEF_STATUS_VAR(number_db_seek_found), + DEF_STATUS_VAR(number_db_next), + DEF_STATUS_VAR(number_db_next_found), + DEF_STATUS_VAR(number_db_prev), + DEF_STATUS_VAR(number_db_prev_found), + DEF_STATUS_VAR(iter_bytes_read), DEF_STATUS_VAR(no_file_closes), DEF_STATUS_VAR(no_file_opens), DEF_STATUS_VAR(no_file_errors), DEF_STATUS_VAR(stall_micros), - DEF_STATUS_VAR(rate_limit_delay_millis), DEF_STATUS_VAR(num_iterators), DEF_STATUS_VAR(number_multiget_get), DEF_STATUS_VAR(number_multiget_keys_read), @@ -11909,12 +12665,14 @@ static SHOW_VAR rocksdb_status_vars[] = { DEF_STATUS_VAR(number_superversion_releases), DEF_STATUS_VAR(number_superversion_cleanups), DEF_STATUS_VAR(number_block_not_compressed), + DEF_STATUS_VAR_PTR("row_lock_deadlocks", &rocksdb_row_lock_deadlocks, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("row_lock_wait_timeouts", + &rocksdb_row_lock_wait_timeouts, SHOW_LONGLONG), DEF_STATUS_VAR_PTR("snapshot_conflict_errors", &rocksdb_snapshot_conflict_errors, SHOW_LONGLONG), DEF_STATUS_VAR_PTR("wal_group_syncs", &rocksdb_wal_group_syncs, SHOW_LONGLONG), - DEF_STATUS_VAR_PTR("number_stat_computes", &rocksdb_number_stat_computes, - SHOW_LONGLONG), DEF_STATUS_VAR_PTR("number_sst_entry_put", &rocksdb_num_sst_entry_put, SHOW_LONGLONG), DEF_STATUS_VAR_PTR("number_sst_entry_delete", &rocksdb_num_sst_entry_delete, @@ -11981,8 +12739,8 @@ void Rdb_background_thread::run() { // InnoDB's behavior. For mode never, the wal file isn't even written, // whereas background writes to the wal file, but issues the syncs in a // background thread. - if (rdb && (rocksdb_flush_log_at_trx_commit != FLUSH_LOG_SYNC)) { - DBUG_ASSERT(!rocksdb_db_options->allow_mmap_writes); + if (rdb && (rocksdb_flush_log_at_trx_commit != FLUSH_LOG_SYNC) && + !rocksdb_db_options->allow_mmap_writes) { const rocksdb::Status s = rdb->FlushWAL(true); if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_BG_THREAD); @@ -11994,6 +12752,22 @@ void Rdb_background_thread::run() { ddl_manager.persist_stats(); } +bool ha_rocksdb::check_bloom_and_set_bounds(THD *thd, const Rdb_key_def &kd, + const rocksdb::Slice &eq_cond, + const bool use_all_keys, + uchar *lower_bound_buf, + uchar *upper_bound_buf, + rocksdb::Slice *out_lower_bound, + rocksdb::Slice *out_upper_bound) { + bool can_use_bloom = can_use_bloom_filter(thd, kd, eq_cond, use_all_keys); + if (!can_use_bloom) { + setup_iterator_bounds(kd, eq_cond, + lower_bound_buf, upper_bound_buf, + out_lower_bound, out_upper_bound); + } + return can_use_bloom; +} + /** Deciding if it is possible to use bloom filter or not. @@ -12012,9 +12786,9 @@ void Rdb_background_thread::run() { @param use_all_keys True if all key parts are set with equal conditions. This is aware of extended keys. */ -bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd, - const rocksdb::Slice &eq_cond, - const bool use_all_keys) { +bool ha_rocksdb::can_use_bloom_filter(THD *thd, const Rdb_key_def &kd, + const rocksdb::Slice &eq_cond, + const bool use_all_keys) { bool can_use = false; if (THDVAR(thd, skip_bloom_filter_on_read)) { @@ -12152,7 +12926,7 @@ void rdb_handle_io_error(const rocksdb::Status status, rdb_log_status_error(status, "failed to write to WAL"); /* NO_LINT_DEBUG */ sql_print_error("MyRocks: aborting on WAL write error."); - abort_with_stack_traces(); + abort(); break; } case RDB_IO_ERROR_BG_THREAD: { @@ -12163,7 +12937,7 @@ void rdb_handle_io_error(const rocksdb::Status status, rdb_log_status_error(status, "failed on I/O"); /* NO_LINT_DEBUG */ sql_print_error("MyRocks: aborting on I/O error."); - abort_with_stack_traces(); + abort(); break; } default: @@ -12172,16 +12946,17 @@ void rdb_handle_io_error(const rocksdb::Status status, } } else if (status.IsCorruption()) { rdb_log_status_error(status, "data corruption detected!"); + rdb_persist_corruption_marker(); /* NO_LINT_DEBUG */ sql_print_error("MyRocks: aborting because of data corruption."); - abort_with_stack_traces(); + abort(); } else if (!status.ok()) { switch (err_type) { case RDB_IO_ERROR_DICT_COMMIT: { rdb_log_status_error(status, "Failed to write to WAL (dictionary)"); /* NO_LINT_DEBUG */ sql_print_error("MyRocks: aborting on WAL write error."); - abort_with_stack_traces(); + abort(); break; } default: @@ -12338,11 +13113,43 @@ void rocksdb_set_collation_exception_list(THD *const thd, *static_cast(var_ptr) = val_copy; } -void rocksdb_set_bulk_load(THD *const thd, struct st_mysql_sys_var *const var - MY_ATTRIBUTE((__unused__)), - void *const var_ptr, const void *const save) { - Rdb_transaction *&tx = get_tx_from_thd(thd); +int mysql_value_to_bool(struct st_mysql_value *value, my_bool *return_value) { + int new_value_type = value->value_type(value); + if (new_value_type == MYSQL_VALUE_TYPE_STRING) { + char buf[16]; + int len = sizeof(buf); + const char *str = value->val_str(value, buf, &len); + if (str && (my_strcasecmp(system_charset_info, "true", str) == 0 || + my_strcasecmp(system_charset_info, "on", str) == 0)) { + *return_value = TRUE; + } else if (str && (my_strcasecmp(system_charset_info, "false", str) == 0 || + my_strcasecmp(system_charset_info, "off", str) == 0)) { + *return_value = FALSE; + } else { + return 1; + } + } else if (new_value_type == MYSQL_VALUE_TYPE_INT) { + long long intbuf; + value->val_int(value, &intbuf); + if (intbuf > 1) + return 1; + *return_value = intbuf > 0 ? TRUE : FALSE; + } else { + return 1; + } + return 0; +} + +int rocksdb_check_bulk_load( + THD *const thd, struct st_mysql_sys_var *var MY_ATTRIBUTE((__unused__)), + void *save, struct st_mysql_value *value) { + my_bool new_value; + if (mysql_value_to_bool(value, &new_value) != 0) { + return 1; + } + + Rdb_transaction *&tx = get_tx_from_thd(thd); if (tx != nullptr) { const int rc = tx->finish_bulk_load(); if (rc != 0) { @@ -12350,30 +13157,32 @@ void rocksdb_set_bulk_load(THD *const thd, struct st_mysql_sys_var *const var sql_print_error("RocksDB: Error %d finalizing last SST file while " "setting bulk loading variable", rc); - /* - MariaDB doesn't do the following: - abort_with_stack_traces(); - because it doesn't seem a good idea to crash a server when a user makes - a mistake. - Instead, we return an error to the user. The error has already been - produced inside ha_rocksdb::finalize_bulk_load(). - */ + THDVAR(thd, bulk_load) = 0; + return 1; } } - *static_cast(var_ptr) = *static_cast(save); + *static_cast(save) = new_value; + return 0; } -void rocksdb_set_bulk_load_allow_unsorted( - THD *const thd, - struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), - void *const var_ptr, const void *const save) { +int rocksdb_check_bulk_load_allow_unsorted( + THD *const thd, struct st_mysql_sys_var *var MY_ATTRIBUTE((__unused__)), + void *save, struct st_mysql_value *value) { + my_bool new_value; + if (mysql_value_to_bool(value, &new_value) != 0) { + return 1; + } + if (THDVAR(thd, bulk_load)) { my_error(ER_ERROR_WHEN_EXECUTING_COMMAND, MYF(0), "SET", "Cannot change this setting while bulk load is enabled"); - } else { - *static_cast(var_ptr) = *static_cast(save); + + return 1; } + + *static_cast(save) = new_value; + return 0; } static void rocksdb_set_max_background_jobs(THD *thd, @@ -12404,35 +13213,116 @@ static void rocksdb_set_max_background_jobs(THD *thd, RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } -void rocksdb_set_update_cf_options(THD *const /* unused */, - struct st_mysql_sys_var *const /* unused */, - void *const var_ptr, - const void *const save) { - const char *const val = *static_cast(save); - - if (!val) { - // NO_LINT_DEBUG - sql_print_warning("MyRocks: NULL is not a valid option for updates to " - "column family settings."); - return; - } +static void rocksdb_set_bytes_per_sync( + THD *thd MY_ATTRIBUTE((__unused__)), + struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { + DBUG_ASSERT(save != nullptr); + DBUG_ASSERT(rocksdb_db_options != nullptr); + DBUG_ASSERT(rocksdb_db_options->env != nullptr); RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); - DBUG_ASSERT(val != nullptr); + const ulonglong new_val = *static_cast(save); + + if (rocksdb_db_options->bytes_per_sync != new_val) { + rocksdb_db_options->bytes_per_sync = new_val; + rocksdb::Status s = + rdb->SetDBOptions({{"bytes_per_sync", std::to_string(new_val)}}); + + if (!s.ok()) { + /* NO_LINT_DEBUG */ + sql_print_warning("MyRocks: failed to update max_background_jobs. " + "Status code = %d, status = %s.", + s.code(), s.ToString().c_str()); + } + } + + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); +} + +static void rocksdb_set_wal_bytes_per_sync( + THD *thd MY_ATTRIBUTE((__unused__)), + struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { + DBUG_ASSERT(save != nullptr); + DBUG_ASSERT(rocksdb_db_options != nullptr); + DBUG_ASSERT(rocksdb_db_options->env != nullptr); + + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); + + const ulonglong new_val = *static_cast(save); + + if (rocksdb_db_options->wal_bytes_per_sync != new_val) { + rocksdb_db_options->wal_bytes_per_sync = new_val; + rocksdb::Status s = + rdb->SetDBOptions({{"wal_bytes_per_sync", std::to_string(new_val)}}); + + if (!s.ok()) { + /* NO_LINT_DEBUG */ + sql_print_warning("MyRocks: failed to update max_background_jobs. " + "Status code = %d, status = %s.", + s.code(), s.ToString().c_str()); + } + } + + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); +} + +static int +rocksdb_validate_update_cf_options(THD * /* unused */, + struct st_mysql_sys_var * /*unused*/, + void *save, struct st_mysql_value *value) { + + char buff[STRING_BUFFER_USUAL_SIZE]; + const char *str; + int length; + length = sizeof(buff); + str = value->val_str(value, buff, &length); + *(const char **)save = str; + + if (str == nullptr) { + return HA_EXIT_SUCCESS; + } - // Do the real work of applying the changes. Rdb_cf_options::Name_to_config_t option_map; // Basic sanity checking and parsing the options into a map. If this fails // then there's no point to proceed. + if (!Rdb_cf_options::parse_cf_options(str, &option_map)) { + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "rocksdb_update_cf_options", str); + return HA_EXIT_FAILURE; + } + return HA_EXIT_SUCCESS; +} + +static void +rocksdb_set_update_cf_options(THD *const /* unused */, + struct st_mysql_sys_var *const /* unused */, + void *const var_ptr, const void *const save) { + const char *const val = *static_cast(save); + + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); + + if (!val) { + *reinterpret_cast(var_ptr) = nullptr; + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); + return; + } + + DBUG_ASSERT(val != nullptr); + + // Reset the pointers regardless of how much success we had with updating + // the CF options. This will results in consistent behavior and avoids + // dealing with cases when only a subset of CF-s was successfully updated. + *reinterpret_cast(var_ptr) = my_strdup(val, MYF(0)); + + // Do the real work of applying the changes. + Rdb_cf_options::Name_to_config_t option_map; + + // This should never fail, because of rocksdb_validate_update_cf_options if (!Rdb_cf_options::parse_cf_options(val, &option_map)) { my_free(*reinterpret_cast(var_ptr)); - *reinterpret_cast(var_ptr) = nullptr; - - // NO_LINT_DEBUG - sql_print_warning("MyRocks: failed to parse the updated column family " - "options = '%s'.", val); RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); return; } @@ -12492,16 +13382,6 @@ void rocksdb_set_update_cf_options(THD *const /* unused */, } } - // Reset the pointers regardless of how much success we had with updating - // the CF options. This will results in consistent behavior and avoids - // dealing with cases when only a subset of CF-s was successfully updated. - if (val) { - my_free(*reinterpret_cast(var_ptr)); - *reinterpret_cast(var_ptr) = my_strdup(val, MYF(0)); - } else { - *reinterpret_cast(var_ptr) = nullptr; - } - // Our caller (`plugin_var_memalloc_global_update`) will call `my_free` to // free up resources used before. @@ -12570,8 +13450,26 @@ double ha_rocksdb::read_time(uint index, uint ranges, ha_rows rows) { DBUG_RETURN((rows / 20.0) + 1); } +std::string rdb_corruption_marker_file_name() { + std::string ret(rocksdb_datadir); + ret.append("/ROCKSDB_CORRUPTED"); + return ret; +} + +void sql_print_verbose_info(const char *format, ...) +{ + va_list args; + + if (global_system_variables.log_warnings > 2) { + va_start(args, format); + sql_print_information_v(format, args); + va_end(args); + } +} + } // namespace myrocks + /** Construct and emit duplicate key error message using information from table's record buffer. @@ -12615,5 +13513,6 @@ maria_declare_plugin(rocksdb_se){ myrocks::rdb_i_s_cfoptions, myrocks::rdb_i_s_compact_stats, myrocks::rdb_i_s_global_info, myrocks::rdb_i_s_ddl, myrocks::rdb_i_s_index_file_map, myrocks::rdb_i_s_lock_info, - myrocks::rdb_i_s_trx_info + myrocks::rdb_i_s_trx_info, + myrocks::rdb_i_s_deadlock_info maria_declare_plugin_end; diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index af5cc5c3024..5bf473a2fb0 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -41,6 +41,7 @@ /* RocksDB header files */ #include "rocksdb/cache.h" +#include "rocksdb/merge_operator.h" #include "rocksdb/perf_context.h" #include "rocksdb/sst_file_manager.h" #include "rocksdb/statistics.h" @@ -92,6 +93,25 @@ struct Rdb_trx_info { std::vector rdb_get_all_trx_info(); +/* + * class for exporting deadlock transaction information for + * information_schema.rocksdb_deadlock + */ +struct Rdb_deadlock_info { + struct Rdb_dl_trx_info { + ulonglong trx_id; + std::string cf_name; + std::string waiting_key; + bool exclusive_lock; + std::string index_name; + std::string table_name; + }; + std::vector path; + ulonglong victim_trx_id; +}; + +std::vector rdb_get_deadlock_info(); + /* This is - the name of the default Column Family (the CF which stores indexes which @@ -250,6 +270,8 @@ enum collations_used { */ #define ROCKSDB_SIZEOF_TTL_RECORD sizeof(longlong) +#define ROCKSDB_SIZEOF_AUTOINC_VALUE sizeof(longlong) + /* Maximum index prefix length in bytes. */ @@ -367,6 +389,7 @@ enum operation_type : int { ROWS_UPDATED, ROWS_DELETED_BLIND, ROWS_EXPIRED, + ROWS_FILTERED, ROWS_HIDDEN_NO_SNAPSHOT, ROWS_MAX }; @@ -400,6 +423,7 @@ struct st_export_stats { ulonglong rows_updated; ulonglong rows_deleted_blind; ulonglong rows_expired; + ulonglong rows_filtered; ulonglong rows_hidden_no_snapshot; ulonglong system_rows_deleted; @@ -469,6 +493,10 @@ class ha_rocksdb : public my_core::handler { /* Iterator used for range scans and for full table/index scans */ rocksdb::Iterator *m_scan_it; + + /* Same as handler::end_key but for start. Reverse-ordered scans need it */ + key_range m_save_start_range; + const key_range *m_start_range; /* Whether m_scan_it was created with skip_bloom=true */ bool m_scan_it_skips_bloom; @@ -538,6 +566,12 @@ class ha_rocksdb : public my_core::handler { uchar *m_dup_sk_packed_tuple; uchar *m_dup_sk_packed_tuple_old; + /* Buffers used for passing upper/bound eq conditions. */ + uchar *m_eq_cond_lower_bound; + uchar *m_eq_cond_upper_bound; + rocksdb::Slice m_eq_cond_lower_bound_slice; + rocksdb::Slice m_eq_cond_upper_bound_slice; + /* Temporary space for packing VARCHARs (we provide it to pack_record()/pack_index_tuple() calls). @@ -567,7 +601,7 @@ class ha_rocksdb : public my_core::handler { This is used only when we get the record with rocksdb's Get() call (The other option is when we get a rocksdb::Slice from an iterator) */ - std::string m_retrieved_record; + rocksdb::PinnableSlice m_retrieved_record; /* Type of locking to apply to rows */ enum { RDB_LOCK_NONE, RDB_LOCK_READ, RDB_LOCK_WRITE } m_lock_rows; @@ -600,12 +634,7 @@ class ha_rocksdb : public my_core::handler { bool m_update_scope_is_valid; /* SST information used for bulk loading the primary key */ - std::unique_ptr m_sst_info; - /* External merge sorts for bulk load: key ID -> merge sort instance */ - std::unordered_map m_key_merge; - Rdb_transaction *m_bulk_load_tx; - /* Mutex to protect finalizing bulk load */ - mysql_mutex_t m_bulk_load_mutex; + std::shared_ptr m_sst_info; /* MySQL index number for duplicate key error @@ -620,13 +649,25 @@ class ha_rocksdb : public my_core::handler { int secondary_index_read(const int keyno, uchar *const buf) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); void setup_iterator_for_rnd_scan(); - void setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *const slice) - MY_ATTRIBUTE((__nonnull__)) { - setup_scan_iterator(kd, slice, false, 0); - } bool is_ascending(const Rdb_key_def &keydef, enum ha_rkey_function find_flag) const MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + void setup_iterator_bounds(const Rdb_key_def &kd, + const rocksdb::Slice &eq_cond, + uchar *lower_bound_buf, + uchar *upper_bound_buf, + rocksdb::Slice *out_lower_bound, + rocksdb::Slice *out_upper_bound); + bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd, + const rocksdb::Slice &eq_cond, + const bool use_all_keys); + bool check_bloom_and_set_bounds(THD *thd, const Rdb_key_def &kd, + const rocksdb::Slice &eq_cond, + const bool use_all_keys, + uchar *lower_bound_buf, + uchar *upper_bound_buf, + rocksdb::Slice *out_lower_bound, + rocksdb::Slice *out_upper_bound); void setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *slice, const bool use_all_keys, const uint eq_cond_len) MY_ATTRIBUTE((__nonnull__)); @@ -635,7 +676,8 @@ class ha_rocksdb : public my_core::handler { rocksdb::Status get_for_update(Rdb_transaction *const tx, rocksdb::ColumnFamilyHandle *const column_family, - const rocksdb::Slice &key, std::string *const value) const; + const rocksdb::Slice &key, + rocksdb::PinnableSlice *value) const; int get_row_by_rowid(uchar *const buf, const char *const rowid, const uint rowid_size, const bool skip_lookup = false, @@ -649,8 +691,13 @@ class ha_rocksdb : public my_core::handler { rowid_size, skip_lookup, skip_ttl_check); } - void update_auto_incr_val(); void load_auto_incr_value(); + ulonglong load_auto_incr_value_from_index(); + void update_auto_incr_val(ulonglong val); + void update_auto_incr_val_from_field(); + rocksdb::Status get_datadic_auto_incr(Rdb_transaction *const tx, + const GL_INDEX_ID &gl_index_id, + ulonglong *new_val) const; longlong update_hidden_pk_val(); int load_hidden_pk_value() MY_ATTRIBUTE((__warn_unused_result__)); int read_hidden_pk_id_from_rowkey(longlong *const hidden_pk_id) @@ -696,6 +743,12 @@ class ha_rocksdb : public my_core::handler { */ std::vector m_decoders_vect; + /* + This tells if any field which is part of the key needs to be unpacked and + decoded. + */ + bool m_key_requested = false; + /* Setup field_decoders based on type of scan and table->read_set */ void setup_read_decoders(); @@ -766,9 +819,12 @@ public: my_core::TABLE_SHARE *const table_arg); ~ha_rocksdb() { int err MY_ATTRIBUTE((__unused__)); - err = finalize_bulk_load(); - DBUG_ASSERT(err == 0); - mysql_mutex_destroy(&m_bulk_load_mutex); + err = finalize_bulk_load(false); + if (err != 0) { + sql_print_error("RocksDB: Error %d finalizing bulk load while closing " + "handler.", + err); + } } /** @brief @@ -794,10 +850,9 @@ public: */ /* - See if this is the same base table - this should only be true for different - partitions of the same table. + Returns the name of the table's base name */ - bool same_table(const ha_rocksdb &other) const; + const std::string &get_table_basename() const; /** @brief This is a list of flags that indicate what functionality the storage engine @@ -1210,8 +1265,6 @@ private: Rdb_tbl_def *get_table_if_exists(const char *const tablename) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); void read_thd_vars(THD *const thd) MY_ATTRIBUTE((__nonnull__)); - const char *thd_rocksdb_tmpdir() - MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); bool contains_foreign_key(THD *const thd) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); @@ -1221,6 +1274,9 @@ private: const std::unordered_set> &indexes) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int finalize_bulk_load(bool print_client_error = true) + MY_ATTRIBUTE((__warn_unused_result__)); + public: int index_init(uint idx, bool sorted) override MY_ATTRIBUTE((__warn_unused_result__)); @@ -1264,7 +1320,7 @@ public: DBUG_ENTER_FUNC(); /* Free blob data */ - m_retrieved_record.clear(); + m_retrieved_record.Reset(); DBUG_RETURN(HA_EXIT_SUCCESS); } @@ -1335,8 +1391,6 @@ public: my_core::Alter_inplace_info *const ha_alter_info, bool commit) override; - int finalize_bulk_load() MY_ATTRIBUTE((__warn_unused_result__)); - #ifdef MARIAROCKS_NOT_YET // MDEV-10976 void set_use_read_free_rpl(const char *const whitelist); #endif @@ -1391,18 +1445,22 @@ struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx { /* Stores number of keys to drop */ const uint m_n_dropped_keys; + /* Stores the largest current auto increment value in the index */ + const ulonglong m_max_auto_incr; + Rdb_inplace_alter_ctx( Rdb_tbl_def *new_tdef, std::shared_ptr *old_key_descr, std::shared_ptr *new_key_descr, uint old_n_keys, uint new_n_keys, std::unordered_set> added_indexes, std::unordered_set dropped_index_ids, uint n_added_keys, - uint n_dropped_keys) + uint n_dropped_keys, ulonglong max_auto_incr) : my_core::inplace_alter_handler_ctx(), m_new_tdef(new_tdef), m_old_key_descr(old_key_descr), m_new_key_descr(new_key_descr), m_old_n_keys(old_n_keys), m_new_n_keys(new_n_keys), m_added_indexes(added_indexes), m_dropped_index_ids(dropped_index_ids), - m_n_added_keys(n_added_keys), m_n_dropped_keys(n_dropped_keys) {} + m_n_added_keys(n_added_keys), m_n_dropped_keys(n_dropped_keys), + m_max_auto_incr(max_auto_incr) {} ~Rdb_inplace_alter_ctx() {} @@ -1412,7 +1470,14 @@ private: Rdb_inplace_alter_ctx &operator=(const Rdb_inplace_alter_ctx &); }; -const int MYROCKS_MARIADB_PLUGIN_MATURITY_LEVEL= MariaDB_PLUGIN_MATURITY_GAMMA; +// file name indicating RocksDB data corruption +std::string rdb_corruption_marker_file_name(); + +const int MYROCKS_MARIADB_PLUGIN_MATURITY_LEVEL= MariaDB_PLUGIN_MATURITY_STABLE; extern bool prevent_myrocks_loading; + +void sql_print_verbose_info(const char *format, ...); + } // namespace myrocks + diff --git a/storage/rocksdb/mysql-test/rocksdb/include/autoinc_crash_safe.inc b/storage/rocksdb/mysql-test/rocksdb/include/autoinc_crash_safe.inc new file mode 100644 index 00000000000..ba2e7ace0c5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/autoinc_crash_safe.inc @@ -0,0 +1,150 @@ +--echo # +--echo # Testing concurrent transactions. +--echo # + +--source include/count_sessions.inc +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); +connect (con3,localhost,root,,); + +connection con1; +begin; +insert into t values (); # 1 + +connection con2; +begin; +insert into t values (); # 2 + +connection con3; +begin; +insert into t values (); # 3 + +connection con1; +insert into t values (); # 4 + +connection con2; +insert into t values (); # 5 + +connection con3; +insert into t values (); # 6 + +connection con2; +commit; + +connection con3; +rollback; + +connection con1; +commit; + +delete from t; + +--echo # Master value before restart +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +--echo # Slave value before restart +sync_slave_with_master; +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +connection slave; +--source include/stop_slave.inc +--let $rpl_server_number = 1 +--source include/rpl_restart_server.inc + +connection default; +--echo # Master value after restart +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +--let $rpl_server_number = 2 +--source include/rpl_restart_server.inc + +connection slave; +--source include/start_slave.inc +--echo # Slave value after restart +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +disconnect con1; +disconnect con2; +disconnect con3; +--source include/wait_until_count_sessions.inc + +--echo # +--echo # Testing interaction of merge markers with various DDL statements. +--echo # +connection slave; +--source include/stop_slave.inc + +connection default; + +--echo # Drop and add primary key. +alter table t modify i int; +alter table t drop primary key; +alter table t add primary key (i); +alter table t modify i int auto_increment; + +--let $rpl_server_number = 1 +--source include/rpl_restart_server.inc +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +--echo # Remove auto_increment property. +alter table t modify i int; +--let $rpl_server_number = 1 +--source include/rpl_restart_server.inc +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +--echo # Add auto_increment property. +insert into t values (123); +alter table t modify i int auto_increment; +--let $rpl_server_number = 1 +--source include/rpl_restart_server.inc +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +--echo # Add column j. +alter table t add column j int; +--let $rpl_server_number = 1 +--source include/rpl_restart_server.inc +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +--echo # Rename tables. +rename table t to t2; +rename table t2 to t; + +--let $rpl_server_number = 1 +--source include/rpl_restart_server.inc +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +--echo # Change auto_increment property +alter table t auto_increment = 1000; +--let $rpl_server_number = 1 +--source include/rpl_restart_server.inc +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +alter table t auto_increment = 1; +--let $rpl_server_number = 1 +--source include/rpl_restart_server.inc +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +alter table t drop primary key, add key (i), auto_increment = 1; +--let $rpl_server_number = 1 +--source include/rpl_restart_server.inc +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +alter table t add key (j), auto_increment = 1; +--let $rpl_server_number = 1 +--source include/rpl_restart_server.inc +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +alter table t modify i int; +alter table t add column (k int auto_increment), add key(k), auto_increment=15; +--let $rpl_server_number = 1 +--source include/rpl_restart_server.inc +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; + +--echo # Drop table. +drop table t; + +--let $rpl_server_number = 1 +--source include/rpl_restart_server.inc + +connection slave; +--source include/start_slave.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.inc b/storage/rocksdb/mysql-test/rocksdb/include/bulk_load.inc similarity index 88% rename from storage/rocksdb/mysql-test/rocksdb/t/bulk_load.inc rename to storage/rocksdb/mysql-test/rocksdb/include/bulk_load.inc index 87cb1f70f32..8ec97510dbd 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.inc +++ b/storage/rocksdb/mysql-test/rocksdb/include/bulk_load.inc @@ -1,6 +1,4 @@ ---disable_warnings -DROP TABLE IF EXISTS t1, t2, t3; ---enable_warnings +--source include/count_sessions.inc if ($data_order_desc) { @@ -20,7 +18,7 @@ eval CREATE TABLE t1( b CHAR(30), PRIMARY KEY(pk) COMMENT "$pk_cf", KEY(a) -) COLLATE 'latin1_bin'; +) ENGINE=ROCKSDB COLLATE 'latin1_bin'; # Create a second identical table to validate that bulk loading different # tables in the same session works @@ -30,7 +28,7 @@ eval CREATE TABLE t2( b CHAR(30), PRIMARY KEY(pk) COMMENT "$pk_cf", KEY(a) -) COLLATE 'latin1_bin'; +) ENGINE=ROCKSDB COLLATE 'latin1_bin'; # Create a third table using partitions to validate that bulk loading works # across a partitioned table @@ -40,7 +38,7 @@ eval CREATE TABLE t3( b CHAR(30), PRIMARY KEY(pk) COMMENT "$pk_cf", KEY(a) -) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; +) ENGINE=ROCKSDB COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; --let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")` @@ -52,7 +50,7 @@ eval CREATE TABLE t3( perl; my $fn = $ENV{'ROCKSDB_INFILE'}; open(my $fh, '>', $fn) || die "perl open($fn): $!"; -my $max = 5000000; +my $max = 2500000; my $desc = $ENV{'MTR_DATA_ORDER_DESC'}; my @chars = ("A".."Z", "a".."z", "0".."9"); my @powers_of_26 = (26 * 26 * 26 * 26, 26 * 26 * 26, 26 * 26, 26, 1); @@ -120,15 +118,12 @@ ANALYZE TABLE t1, t2, t3; SHOW TABLE STATUS WHERE name LIKE 't%'; # Make sure all the data is there. -select count(pk) from t1; -select count(a) from t1; -select count(b) from t1; -select count(pk) from t2; -select count(a) from t2; -select count(b) from t2; -select count(pk) from t3; -select count(a) from t3; -select count(b) from t3; +select count(pk),count(a) from t1; +select count(b) from t1; +select count(pk),count(a) from t2; +select count(b) from t2; +select count(pk),count(a) from t3; +select count(b) from t3; # Create a dummy file with a bulk load extesion. It should be removed when # the server starts @@ -154,3 +149,5 @@ EOF # Cleanup disconnect other; DROP TABLE t1, t2, t3; + +--source include/wait_until_count_sessions.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/include/bulk_load_unsorted.inc b/storage/rocksdb/mysql-test/rocksdb/include/bulk_load_unsorted.inc new file mode 100644 index 00000000000..5f808087e3e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/bulk_load_unsorted.inc @@ -0,0 +1,142 @@ +--source include/have_partition.inc +--source include/count_sessions.inc + +SET rocksdb_bulk_load_size=3; +SET rocksdb_bulk_load_allow_unsorted=1; + +### Test individual INSERTs ### + +# A table with only a PK won't have rows until the bulk load is finished +eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf") + ENGINE=ROCKSDB; +SET rocksdb_bulk_load=1; +--disable_query_log +let $sign = 1; +let $max = 5; +let $i = 1; +while ($i <= $max) { + let $a = 1 + $sign * $i; + let $b = 1 - $sign * $i; + let $sign = -$sign; + let $insert = INSERT INTO t1 VALUES ($a, $b); + eval $insert; + inc $i; +} +--enable_query_log +SELECT * FROM t1 FORCE INDEX (PRIMARY); +SET rocksdb_bulk_load=0; +SELECT * FROM t1 FORCE INDEX (PRIMARY); +DROP TABLE t1; + +# A table with a PK and a SK shows rows immediately +eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf", KEY(b)) + ENGINE=ROCKSDB; +SET rocksdb_bulk_load=1; +--disable_query_log +let $sign = 1; +let $max = 5; +let $i = 1; +while ($i <= $max) { + let $a = 1 + $sign * $i; + let $b = 1 - $sign * $i; + let $sign = -$sign; + let $insert = INSERT INTO t1 VALUES ($a, $b); + eval $insert; + inc $i; +} +--enable_query_log + +SELECT * FROM t1 FORCE INDEX (PRIMARY); +SET rocksdb_bulk_load=0; +SELECT * FROM t1 FORCE INDEX (PRIMARY); +DROP TABLE t1; + +# Inserting into another table finishes bulk load to the previous table +eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf") + ENGINE=ROCKSDB; +eval CREATE TABLE t2(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf") + ENGINE=ROCKSDB; + +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES (1,1); +INSERT INTO t2 VALUES (1,1); +SELECT * FROM t1 FORCE INDEX (PRIMARY); +INSERT INTO t1 VALUES (2,2); +SELECT * FROM t2 FORCE INDEX (PRIMARY); +SELECT * FROM t1 FORCE INDEX (PRIMARY); +SET rocksdb_bulk_load=0; +SELECT * FROM t1 FORCE INDEX (PRIMARY); +DROP TABLE t1, t2; + +### Test bulk load from a file ### +eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf") + ENGINE=ROCKSDB; +eval CREATE TABLE t2(a INT, b INT, PRIMARY KEY(b) COMMENT "$pk_cf") + ENGINE=ROCKSDB; +eval CREATE TABLE t3(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf") + ENGINE=ROCKSDB PARTITION BY KEY() PARTITIONS 4; + +--let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")` +# Create a text file with data to import into the table. +# PK and SK are not in any order +--let ROCKSDB_INFILE = $file +perl; +my $fn = $ENV{'ROCKSDB_INFILE'}; +open(my $fh, '>', $fn) || die "perl open($fn): $!"; +binmode $fh; +my $max = 2500000; +my $sign = 1; +for (my $ii = 0; $ii < $max; $ii++) +{ + my $a = 1 + $sign * $ii; + my $b = 1 - $sign * $ii; + $sign = -$sign; + print $fh "$a\t$b\n"; +} +close($fh); +EOF +--file_exists $file + +# Make sure a snapshot held by another user doesn't block the bulk load +connect (other,localhost,root,,); +set session transaction isolation level repeatable read; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +start transaction with consistent snapshot; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; + +connection default; +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +--disable_query_log +--echo LOAD DATA INFILE INTO TABLE t1; +eval LOAD DATA INFILE '$file' INTO TABLE t1; +--echo LOAD DATA INFILE INTO TABLE t2; +eval LOAD DATA INFILE '$file' INTO TABLE t2; +--echo LOAD DATA INFILE INTO TABLE t3; +eval LOAD DATA INFILE '$file' INTO TABLE t3; +--enable_query_log +set rocksdb_bulk_load=0; + +--remove_file $file + +# Make sure row count index stats are correct +--replace_column 6 # 7 # 8 # 9 # +SHOW TABLE STATUS WHERE name LIKE 't%'; + +ANALYZE TABLE t1, t2, t3; + +--replace_column 6 # 7 # 8 # 9 # +SHOW TABLE STATUS WHERE name LIKE 't%'; + +# Make sure all the data is there. +select count(a),count(b) from t1; +select count(a),count(b) from t2; +select count(a),count(b) from t3; + +SELECT * FROM t1 FORCE INDEX (PRIMARY) LIMIT 3; +SELECT * FROM t2 FORCE INDEX (PRIMARY) LIMIT 3; + +disconnect other; +DROP TABLE t1, t2, t3; + +--source include/wait_until_count_sessions.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_invalid_option.inc b/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_invalid_option.inc new file mode 100644 index 00000000000..8eef7ed2162 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_invalid_option.inc @@ -0,0 +1,8 @@ +--source include/shutdown_mysqld.inc + +# Expect the server to fail to come up with these options +--error 1 +--exec $MYSQLD_CMD --plugin_load=$HA_ROCKSDB_SO $_mysqld_option + +# Restart the server with the default options +--source include/start_mysqld.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/include/start_mysqld_with_option.inc b/storage/rocksdb/mysql-test/rocksdb/include/start_mysqld_with_option.inc new file mode 100644 index 00000000000..73e30b3e46c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/start_mysqld_with_option.inc @@ -0,0 +1,14 @@ +# Include this script only after using shutdown_mysqld.inc +# where $_expect_file_name was initialized. +# Write file to make mysql-test-run.pl start up the server again +--exec echo "restart:$_mysqld_option" > $_expect_file_name + +# Turn on reconnect +--enable_reconnect + +# Call script that will poll the server waiting for it to be back online again +--source include/wait_until_connected_again.inc + +# Turn off reconnect again +--disable_reconnect + diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result index 0c3ad720194..08f2329f688 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result @@ -14,7 +14,14 @@ select count(b) from t1; count(b) 300000 ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +set @tmp= @@rocksdb_max_row_locks; +set session rocksdb_max_row_locks=1000; ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY; +ERROR HY000: Status error 10 received from RocksDB: Operation aborted: Failed to acquire lock due to max_num_locks limit +set session rocksdb_bulk_load=1; +ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY; +set session rocksdb_bulk_load=0; +set session rocksdb_max_row_locks=@tmp; SELECT COUNT(*) as c FROM (SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#', `b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE INDEX(`kb`) UNION DISTINCT diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result index 5bffab74917..a8d5c07072c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result @@ -262,3 +262,34 @@ SELECT * FROM t1; a b 36 foo DROP TABLE t1; +# +# Issue #834/MDEV-15304 ALTER TABLE table_with_hidden_pk causes Can't +# write; duplicate key in table error and/or crash +# +CREATE TABLE t1 (a INT, KEY(a)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1),(1+1); +create table t2 (a int); +insert into t2 values (10),(20),(30); +BEGIN; +select * from t2; +a +10 +20 +30 +connect con1,localhost,root,,; +connection con1; +alter table t1 force; +connection default; +select * from t1; +a +connection con1; +insert into t1 values (100); +select * from t1; +a +1 +2 +100 +disconnect con1; +connection default; +rollback; +drop table t1,t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result index f8508febb01..5d947603ec5 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result @@ -778,3 +778,20 @@ set global rocksdb_force_flush_memtable_now = true; select * from t1; col1 col2 extra DROP TABLE t1; +create table t1 (i int auto_increment, key(i)) engine=rocksdb; +insert into t1 values(); +insert into t1 values(); +insert into t1 values(); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL AUTO_INCREMENT, + KEY `i` (`i`) +) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1 +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL AUTO_INCREMENT, + KEY `i` (`i`) +) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_to_start_after_corruption.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_to_start_after_corruption.result new file mode 100644 index 00000000000..9b5a335b6f8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_to_start_after_corruption.result @@ -0,0 +1,38 @@ +# +# Test how MyRocks behaves when RocksDB reports corrupted data. +# +# +# Test server crashes on corrupted data and restarts +# +create table t1 ( +pk int not null primary key, +col1 varchar(10) +) engine=rocksdb; +insert into t1 values (1,1),(2,2),(3,3); +select * from t1 where pk=1; +pk col1 +1 1 +set session debug_dbug= "+d,rocksdb_return_status_corrupted"; +select * from t1 where pk=1; +ERROR HY000: Lost connection to MySQL server during query +FOUND 1 /data corruption detected/ in allow_to_start_after_corruption_debug.err +# +# The same for scan queries +# +select * from t1; +pk col1 +1 1 +2 2 +3 3 +set session debug_dbug= "+d,rocksdb_return_status_corrupted"; +select * from t1; +ERROR HY000: Lost connection to MySQL server during query +FOUND 1 /data corruption detected/ in allow_to_start_after_corruption_debug.err +# +# Test restart failure. The server is shutdown at this point. +# +FOUND 1 /The server will exit normally and stop restart attempts/ in allow_to_start_after_corruption_debug.err +# +# Remove corruption file and restart cleanly +# +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result b/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result index ff2973230db..b666a17c81c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result @@ -27,3 +27,29 @@ ANALYZE TABLE t1; Table Op Msg_type Msg_text test.t1 analyze status OK DROP TABLE t1; +# +# MDEV-12465: Server crashes in my_scan_weight_utf8_bin upon +# collecting stats for RocksDB table +# +CREATE TABLE t1 ( +pk INT, +f1 CHAR(255), +f2 TEXT, +f3 VARCHAR(255), +f4 TEXT, +PRIMARY KEY (pk), +KEY (f4(255)) +) ENGINE=RocksDB +CHARSET utf8 +COLLATE utf8_bin +PARTITION BY KEY (pk) PARTITIONS 2; +INSERT INTO t1 VALUES +(1,'foo','bar','foo','bar'), (2,'bar','foo','bar','foo'); +ANALYZE TABLE t1 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze Warning Engine-independent statistics are not collected for column 'f2' +test.t1 analyze Warning Engine-independent statistics are not collected for column 'f4' +test.t1 analyze status OK +drop table t1; +# End of 10.2 tests diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_crash_safe.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_crash_safe.result new file mode 100644 index 00000000000..60395eced7e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_crash_safe.result @@ -0,0 +1,132 @@ +include/master-slave.inc +[connection master] +create table t (i int primary key auto_increment) engine=rocksdb; +# +# Testing concurrent transactions. +# +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connect con3,localhost,root,,; +connection con1; +begin; +insert into t values (); +connection con2; +begin; +insert into t values (); +connection con3; +begin; +insert into t values (); +connection con1; +insert into t values (); +connection con2; +insert into t values (); +connection con3; +insert into t values (); +connection con2; +commit; +connection con3; +rollback; +connection con1; +commit; +delete from t; +# Master value before restart +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 7 +# Slave value before restart +connection slave; +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 6 +connection slave; +include/stop_slave.inc +include/rpl_restart_server.inc [server_number=1] +connection default; +# Master value after restart +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 6 +include/rpl_restart_server.inc [server_number=2] +connection slave; +include/start_slave.inc +# Slave value after restart +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 6 +disconnect con1; +disconnect con2; +disconnect con3; +# +# Testing interaction of merge markers with various DDL statements. +# +connection slave; +include/stop_slave.inc +connection default; +# Drop and add primary key. +alter table t modify i int; +alter table t drop primary key; +alter table t add primary key (i); +alter table t modify i int auto_increment; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 6 +# Remove auto_increment property. +alter table t modify i int; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t NULL +# Add auto_increment property. +insert into t values (123); +alter table t modify i int auto_increment; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 124 +# Add column j. +alter table t add column j int; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 124 +# Rename tables. +rename table t to t2; +rename table t2 to t; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 124 +# Change auto_increment property +alter table t auto_increment = 1000; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 1000 +alter table t auto_increment = 1; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 124 +alter table t drop primary key, add key (i), auto_increment = 1; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 124 +alter table t add key (j), auto_increment = 1; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 124 +alter table t modify i int; +alter table t add column (k int auto_increment), add key(k), auto_increment=15; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 16 +# Drop table. +drop table t; +include/rpl_restart_server.inc [server_number=1] +connection slave; +include/start_slave.inc +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_crash_safe_partition.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_crash_safe_partition.result new file mode 100644 index 00000000000..c837fb7c77d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_crash_safe_partition.result @@ -0,0 +1,132 @@ +include/master-slave.inc +[connection master] +create table t (i int primary key auto_increment) engine=rocksdb partition by key (i) partitions 3; +# +# Testing concurrent transactions. +# +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connect con3,localhost,root,,; +connection con1; +begin; +insert into t values (); +connection con2; +begin; +insert into t values (); +connection con3; +begin; +insert into t values (); +connection con1; +insert into t values (); +connection con2; +insert into t values (); +connection con3; +insert into t values (); +connection con2; +commit; +connection con3; +rollback; +connection con1; +commit; +delete from t; +# Master value before restart +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 7 +# Slave value before restart +connection slave; +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 6 +connection slave; +include/stop_slave.inc +include/rpl_restart_server.inc [server_number=1] +connection default; +# Master value after restart +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 6 +include/rpl_restart_server.inc [server_number=2] +connection slave; +include/start_slave.inc +# Slave value after restart +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 6 +disconnect con1; +disconnect con2; +disconnect con3; +# +# Testing interaction of merge markers with various DDL statements. +# +connection slave; +include/stop_slave.inc +connection default; +# Drop and add primary key. +alter table t modify i int; +alter table t drop primary key; +alter table t add primary key (i); +alter table t modify i int auto_increment; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 1 +# Remove auto_increment property. +alter table t modify i int; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t NULL +# Add auto_increment property. +insert into t values (123); +alter table t modify i int auto_increment; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 124 +# Add column j. +alter table t add column j int; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 124 +# Rename tables. +rename table t to t2; +rename table t2 to t; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 124 +# Change auto_increment property +alter table t auto_increment = 1000; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 1000 +alter table t auto_increment = 1; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 124 +alter table t drop primary key, add key (i), auto_increment = 1; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 124 +alter table t add key (j), auto_increment = 1; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 124 +alter table t modify i int; +alter table t add column (k int auto_increment), add key(k), auto_increment=15; +include/rpl_restart_server.inc [server_number=1] +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 16 +# Drop table. +drop table t; +include/rpl_restart_server.inc [server_number=1] +connection slave; +include/start_slave.inc +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_debug.result new file mode 100644 index 00000000000..fe08cd7c361 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_debug.result @@ -0,0 +1,107 @@ +# +# Testing upgrading from server without merges for auto_increment +# to new server with such support. +# +set debug_dbug='+d,myrocks_autoinc_upgrade'; +create table t (i int primary key auto_increment); +insert into t values (); +insert into t values (); +insert into t values (); +select * from t; +i +1 +2 +3 +delete from t where i > 1; +select * from t; +i +1 +select table_name, index_name, auto_increment +from information_schema.rocksdb_ddl where table_name = 't'; +table_name index_name auto_increment +t PRIMARY NULL +set debug_dbug='-d,myrocks_autoinc_upgrade'; +insert into t values (); +insert into t values (); +insert into t values (); +select * from t; +i +1 +2 +3 +4 +select table_name, index_name, auto_increment +from information_schema.rocksdb_ddl where table_name = 't'; +table_name index_name auto_increment +t PRIMARY 5 +delete from t where i > 1; +insert into t values (); +insert into t values (); +insert into t values (); +select * from t; +i +1 +5 +6 +7 +drop table t; +# +# Testing crash safety of transactions. +# +create table t (i int primary key auto_increment); +insert into t values (); +insert into t values (); +insert into t values (); +# Before anything +begin; +insert into t values (); +insert into t values (); +set debug_dbug="+d,crash_commit_before"; +commit; +ERROR HY000: Lost connection to MySQL server during query +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 4 +select max(i) from t; +max(i) +3 +# After engine prepare +begin; +insert into t values (); +insert into t values (); +set debug_dbug="+d,crash_commit_after_prepare"; +commit; +ERROR HY000: Lost connection to MySQL server during query +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 4 +select max(i) from t; +max(i) +3 +# After binlog +begin; +insert into t values (); +insert into t values (); +set debug_dbug="+d,crash_commit_after_log"; +commit; +ERROR HY000: Lost connection to MySQL server during query +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 6 +select max(i) from t; +max(i) +5 +# After everything +begin; +insert into t values (); +insert into t values (); +set debug_dbug="+d,crash_commit_after"; +commit; +ERROR HY000: Lost connection to MySQL server during query +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +table_schema table_name auto_increment +test t 8 +select max(i) from t; +max(i) +7 +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result index 0fb3d96c58f..8cc9b070a70 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result @@ -61,3 +61,92 @@ LAST_INSERT_ID() SELECT a FROM t1 ORDER BY a; a DROP TABLE t1; +#--------------------------- +# test large autoincrement values +#--------------------------- +SET auto_increment_increment = 1; +SET auto_increment_offset = 1; +CREATE TABLE t1 (a BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (18446744073709551613, 'a'); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=18446744073709551614 DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (NULL, 'b'); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=18446744073709551615 DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (NULL, 'c'); +ERROR HY000: Failed to read auto-increment value from storage engine +SELECT * FROM t1; +a b +18446744073709551613 a +18446744073709551614 b +DROP TABLE t1; +SET auto_increment_increment = 300; +CREATE TABLE t1 (a BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (18446744073709551613, 'a'); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=18446744073709551614 DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (NULL, 'b'); +ERROR HY000: Failed to read auto-increment value from storage engine +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=18446744073709551615 DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (NULL, 'c'); +ERROR HY000: Failed to read auto-increment value from storage engine +SELECT * FROM t1; +a b +18446744073709551613 a +DROP TABLE t1; +SET auto_increment_offset = 200; +CREATE TABLE t1 (a BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (18446744073709551613, 'a'); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=18446744073709551614 DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (NULL, 'b'); +ERROR HY000: Failed to read auto-increment value from storage engine +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=18446744073709551615 DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (NULL, 'c'); +ERROR HY000: Failed to read auto-increment value from storage engine +SELECT * FROM t1; +a b +18446744073709551613 a +DROP TABLE t1; +#---------------------------------- +# Issue #792 Crash in autoincrement +#---------------------------------- +CREATE TABLE t1(C1 DOUBLE AUTO_INCREMENT KEY,C2 CHAR) ENGINE=ROCKSDB; +INSERT INTO t1 VALUES(2177,0); +DROP TABLE t1; +CREATE TABLE t0(c0 BLOB) ENGINE=ROCKSDB; +INSERT INTO t0 VALUES(0); +ALTER TABLE t0 AUTO_INCREMENT=0; +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result b/storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result deleted file mode 100644 index 28b5b6cd070..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result +++ /dev/null @@ -1 +0,0 @@ -# The test checks AUTO_INCREMENT capabilities that are not supported by RocksDB-SE. diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result index 5f1083a1bb2..bc5d685f89b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result @@ -43,6 +43,7 @@ insert t1 select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" from seq_1_to_10000; insert t2 select * from t1; +set global rocksdb_force_flush_memtable_now=1; call bloom_start(); select count(*) from t1; count(*) @@ -443,6 +444,7 @@ insert t1 select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" from seq_1_to_10000; insert t2 select * from t1; +set global rocksdb_force_flush_memtable_now=1; call bloom_start(); select count(*) from t1; count(*) @@ -843,6 +845,7 @@ insert t1 select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" from seq_1_to_10000; insert t2 select * from t1; +set global rocksdb_force_flush_memtable_now=1; call bloom_start(); select count(*) from t1; count(*) @@ -1243,6 +1246,7 @@ insert t1 select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" from seq_1_to_10000; insert t2 select * from t1; +set global rocksdb_force_flush_memtable_now=1; call bloom_start(); select count(*) from t1; count(*) @@ -1643,6 +1647,7 @@ insert t1 select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" from seq_1_to_10000; insert t2 select * from t1; +set global rocksdb_force_flush_memtable_now=1; call bloom_start(); select count(*) from t1; count(*) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter5.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter5.result new file mode 100644 index 00000000000..4f6702b85a7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter5.result @@ -0,0 +1,62 @@ +# +# Issue #809: Wrong query result with bloom filters +# +create table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4) COMMENT 'rev:bf5_1' +) engine=ROCKSDB; +create table t2(a int); +insert into t2 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t3(seq int); +insert into t3 +select +1+ A.a + B.a* 10 + C.a * 100 + D.a * 1000 +from t2 A, t2 B, t2 C, t2 D; +insert t1 +select +(seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" +from t3; +set global rocksdb_force_flush_memtable_now=1; +# Full table scan +explain +select * from t1 limit 10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10000 +select * from t1 limit 10; +id1 id2 id3 id4 id5 value value2 +1000 2000 2000 10000 10000 1000 aaabbbccc +1000 2000 2000 9999 9999 1000 aaabbbccc +1000 2000 2000 9998 9998 1000 aaabbbccc +1000 2000 2000 9997 9997 1000 aaabbbccc +1000 2000 2000 9996 9996 1000 aaabbbccc +1000 1999 1999 9995 9995 1000 aaabbbccc +1000 1999 1999 9994 9994 1000 aaabbbccc +1000 1999 1999 9993 9993 1000 aaabbbccc +1000 1999 1999 9992 9992 1000 aaabbbccc +1000 1999 1999 9991 9991 1000 aaabbbccc +# An index scan starting from the end of the table: +explain +select * from t1 order by id1 desc,id2 desc, id3 desc, id4 desc limit 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 122 NULL 1 +select * from t1 order by id1 desc,id2 desc, id3 desc, id4 desc limit 1; +id1 id2 id3 id4 id5 value value2 +1000 2000 2000 10000 10000 1000 aaabbbccc +create table t4 ( +pk int unsigned not null primary key, +kp1 int unsigned not null, +kp2 int unsigned not null, +col1 int unsigned, +key(kp1, kp2) comment 'rev:bf5_2' +) engine=rocksdb; +insert into t4 values (1, 0xFFFF, 0xFFF, 12345); +# This must not fail an assert: +select * from t4 force index(kp1) where kp1=0xFFFFFFFF and kp2<=0xFFFFFFFF order by kp2 desc; +pk kp1 kp2 col1 +drop table t1,t2,t3,t4; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result index 2496f349427..28475630564 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result @@ -43,6 +43,7 @@ insert t1 select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" from seq_1_to_10000; insert t2 select * from t1; +set global rocksdb_force_flush_memtable_now=1; call bloom_start(); select count(*) from t1; count(*) @@ -443,6 +444,7 @@ insert t1 select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" from seq_1_to_10000; insert t2 select * from t1; +set global rocksdb_force_flush_memtable_now=1; call bloom_start(); select count(*) from t1; count(*) @@ -843,6 +845,7 @@ insert t1 select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" from seq_1_to_10000; insert t2 select * from t1; +set global rocksdb_force_flush_memtable_now=1; call bloom_start(); select count(*) from t1; count(*) @@ -1243,6 +1246,7 @@ insert t1 select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" from seq_1_to_10000; insert t2 select * from t1; +set global rocksdb_force_flush_memtable_now=1; call bloom_start(); select count(*) from t1; count(*) @@ -1643,6 +1647,7 @@ insert t1 select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" from seq_1_to_10000; insert t2 select * from t1; +set global rocksdb_force_flush_memtable_now=1; call bloom_start(); select count(*) from t1; count(*) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result index 90f28929db6..21417caf760 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result @@ -1,4 +1,3 @@ -DROP TABLE IF EXISTS t1, t2, t3; Data will be ordered in ascending order CREATE TABLE t1( pk CHAR(5), @@ -6,21 +5,21 @@ a CHAR(30), b CHAR(30), PRIMARY KEY(pk) COMMENT "cf1", KEY(a) -) COLLATE 'latin1_bin'; +) ENGINE=ROCKSDB COLLATE 'latin1_bin'; CREATE TABLE t2( pk CHAR(5), a CHAR(30), b CHAR(30), PRIMARY KEY(pk) COMMENT "cf1", KEY(a) -) COLLATE 'latin1_bin'; +) ENGINE=ROCKSDB COLLATE 'latin1_bin'; CREATE TABLE t3( pk CHAR(5), a CHAR(30), b CHAR(30), PRIMARY KEY(pk) COMMENT "cf1", KEY(a) -) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; +) ENGINE=ROCKSDB COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; connect other,localhost,root,,; set session transaction isolation level repeatable read; select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; @@ -39,9 +38,9 @@ LOAD DATA INFILE INTO TABLE t3; set rocksdb_bulk_load=0; SHOW TABLE STATUS WHERE name LIKE 't%'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary -t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N +t1 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N ANALYZE TABLE t1, t2, t3; Table Op Msg_type Msg_text test.t1 analyze status OK @@ -49,36 +48,27 @@ test.t2 analyze status OK test.t3 analyze status OK SHOW TABLE STATUS WHERE name LIKE 't%'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary -t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N -select count(pk) from t1; -count(pk) -5000000 -select count(a) from t1; -count(a) -5000000 -select count(b) from t1; +t1 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N +select count(pk),count(a) from t1; +count(pk) count(a) +2500000 2500000 +select count(b) from t1; count(b) -5000000 -select count(pk) from t2; -count(pk) -5000000 -select count(a) from t2; -count(a) -5000000 -select count(b) from t2; +2500000 +select count(pk),count(a) from t2; +count(pk) count(a) +2500000 2500000 +select count(b) from t2; count(b) -5000000 -select count(pk) from t3; -count(pk) -5000000 -select count(a) from t3; -count(a) -5000000 -select count(b) from t3; +2500000 +select count(pk),count(a) from t3; +count(pk) count(a) +2500000 2500000 +select count(b) from t3; count(b) -5000000 +2500000 longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp test.bulk_load.tmp disconnect other; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_drop_table.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_drop_table.result new file mode 100644 index 00000000000..4e79d82810e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_drop_table.result @@ -0,0 +1,11 @@ +CREATE TABLE t1 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB; +SET rocksdb_bulk_load_allow_unsorted=1; +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES (1); +connect con1,localhost,root,,; +DROP TABLE t1; +connection default; +disconnect con1; +SET rocksdb_bulk_load=0; +SELECT * FROM t1; +ERROR 42S02: Table 'test.t1' doesn't exist diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result index f230b173892..3703c208d0b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result @@ -1,4 +1,4 @@ -CREATE TABLE t1(pk INT, PRIMARY KEY(pk)); +CREATE TABLE t1(pk INT, PRIMARY KEY(pk)) ENGINE=ROCKSDB; SET rocksdb_bulk_load=1; INSERT INTO t1 VALUES(10); INSERT INTO t1 VALUES(11); @@ -14,18 +14,30 @@ INSERT INTO t1 VALUES(1); INSERT INTO t1 VALUES(2); INSERT INTO t1 VALUES(20); INSERT INTO t1 VALUES(21); -# -# In MyRocks, the following statement will intentionally crash the server. -# In MariaDB, it will cause an error SET rocksdb_bulk_load=0; ERROR HY000: Rows inserted during bulk load must not overlap existing rows -# -# Despite the error, bulk load operation is over so the variable value -# will be 0: -select @@rocksdb_bulk_load; -@@rocksdb_bulk_load -0 +SHOW VARIABLES LIKE 'rocksdb_bulk_load'; +Variable_name Value +rocksdb_bulk_load OFF call mtr.add_suppression('finalizing last SST file while setting bulk loading variable'); +SELECT * FROM t1; +pk +10 +11 +FOUND 1 /RocksDB: Error [0-9]+ finalizing last SST file while setting bulk loading variable/ in rocksdb.bulk_load_errors.1.err +connect con1,localhost,root,,; +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES(1); +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(20); +INSERT INTO t1 VALUES(21); +connection default; +disconnect con1; +SELECT * FROM t1; +pk +10 +11 +FOUND 1 /RocksDB: Error [0-9]+ finalizing last SST file while disconnecting/ in rocksdb.bulk_load_errors.2.err TRUNCATE TABLE t1; SET rocksdb_bulk_load_allow_unsorted=1; SET rocksdb_bulk_load=1; @@ -53,3 +65,35 @@ pk 202 SET rocksdb_bulk_load_allow_unsorted=DEFAULT; DROP TABLE t1; +CREATE TABLE t1(c1 INT KEY) ENGINE=ROCKSDB; +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES (),(),(); +ERROR HY000: Rows must be inserted in primary key order during bulk load operation +SET rocksdb_bulk_load=0; +DROP TABLE t1; +SET @orig_table_open_cache=@@global.table_open_cache; +CREATE TABLE t1(a INT AUTO_INCREMENT, b INT, PRIMARY KEY (a)) ENGINE=ROCKSDB DEFAULT CHARSET=latin1; +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES(13, 0); +INSERT INTO t1 VALUES(2, 'test 2'); +Warnings: +Warning 1366 Incorrect integer value: 'test 2' for column 'b' at row 1 +INSERT INTO t1 VALUES(@id, @arg04); +SET @@global.table_open_cache=FALSE; +Warnings: +Warning 1292 Truncated incorrect table_open_cache value: '0' +INSERT INTO t1 VALUES(51479+0.333333333,1); +DROP TABLE t1; +SET @@global.table_open_cache=@orig_table_open_cache; +FOUND 1 /RocksDB: Error [0-9]+ finalizing bulk load while closing handler/ in rocksdb.bulk_load_errors.3.err +CREATE TABLE t1 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB; +CREATE TABLE t2 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB; +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES (1), (2); +INSERT INTO t2 VALUES (1), (2); +INSERT INTO t1 VALUES (1); +INSERT INTO t2 VALUES (3); +ERROR HY000: Rows inserted during bulk load must not overlap existing rows +SET rocksdb_bulk_load=0; +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result index c24d987a906..484c2a89c3a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result @@ -1,4 +1,3 @@ -DROP TABLE IF EXISTS t1, t2, t3; Data will be ordered in ascending order CREATE TABLE t1( pk CHAR(5), @@ -6,21 +5,21 @@ a CHAR(30), b CHAR(30), PRIMARY KEY(pk) COMMENT "rev:cf1", KEY(a) -) COLLATE 'latin1_bin'; +) ENGINE=ROCKSDB COLLATE 'latin1_bin'; CREATE TABLE t2( pk CHAR(5), a CHAR(30), b CHAR(30), PRIMARY KEY(pk) COMMENT "rev:cf1", KEY(a) -) COLLATE 'latin1_bin'; +) ENGINE=ROCKSDB COLLATE 'latin1_bin'; CREATE TABLE t3( pk CHAR(5), a CHAR(30), b CHAR(30), PRIMARY KEY(pk) COMMENT "rev:cf1", KEY(a) -) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; +) ENGINE=ROCKSDB COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; connect other,localhost,root,,; set session transaction isolation level repeatable read; select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; @@ -39,9 +38,9 @@ LOAD DATA INFILE INTO TABLE t3; set rocksdb_bulk_load=0; SHOW TABLE STATUS WHERE name LIKE 't%'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary -t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N +t1 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N ANALYZE TABLE t1, t2, t3; Table Op Msg_type Msg_text test.t1 analyze status OK @@ -49,36 +48,27 @@ test.t2 analyze status OK test.t3 analyze status OK SHOW TABLE STATUS WHERE name LIKE 't%'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary -t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N -select count(pk) from t1; -count(pk) -5000000 -select count(a) from t1; -count(a) -5000000 -select count(b) from t1; +t1 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N +select count(pk),count(a) from t1; +count(pk) count(a) +2500000 2500000 +select count(b) from t1; count(b) -5000000 -select count(pk) from t2; -count(pk) -5000000 -select count(a) from t2; -count(a) -5000000 -select count(b) from t2; +2500000 +select count(pk),count(a) from t2; +count(pk) count(a) +2500000 2500000 +select count(b) from t2; count(b) -5000000 -select count(pk) from t3; -count(pk) -5000000 -select count(a) from t3; -count(a) -5000000 -select count(b) from t3; +2500000 +select count(pk),count(a) from t3; +count(pk) count(a) +2500000 2500000 +select count(b) from t3; count(b) -5000000 +2500000 longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp test.bulk_load.tmp disconnect other; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result index b851133ab18..35a2845cb42 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result @@ -1,4 +1,3 @@ -DROP TABLE IF EXISTS t1, t2, t3; Data will be ordered in descending order CREATE TABLE t1( pk CHAR(5), @@ -6,21 +5,21 @@ a CHAR(30), b CHAR(30), PRIMARY KEY(pk) COMMENT "rev:cf1", KEY(a) -) COLLATE 'latin1_bin'; +) ENGINE=ROCKSDB COLLATE 'latin1_bin'; CREATE TABLE t2( pk CHAR(5), a CHAR(30), b CHAR(30), PRIMARY KEY(pk) COMMENT "rev:cf1", KEY(a) -) COLLATE 'latin1_bin'; +) ENGINE=ROCKSDB COLLATE 'latin1_bin'; CREATE TABLE t3( pk CHAR(5), a CHAR(30), b CHAR(30), PRIMARY KEY(pk) COMMENT "rev:cf1", KEY(a) -) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; +) ENGINE=ROCKSDB COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; connect other,localhost,root,,; set session transaction isolation level repeatable read; select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; @@ -39,9 +38,9 @@ LOAD DATA INFILE INTO TABLE t3; set rocksdb_bulk_load=0; SHOW TABLE STATUS WHERE name LIKE 't%'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary -t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N +t1 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N ANALYZE TABLE t1, t2, t3; Table Op Msg_type Msg_text test.t1 analyze status OK @@ -49,36 +48,27 @@ test.t2 analyze status OK test.t3 analyze status OK SHOW TABLE STATUS WHERE name LIKE 't%'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary -t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N -select count(pk) from t1; -count(pk) -5000000 -select count(a) from t1; -count(a) -5000000 -select count(b) from t1; +t1 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N +select count(pk),count(a) from t1; +count(pk) count(a) +2500000 2500000 +select count(b) from t1; count(b) -5000000 -select count(pk) from t2; -count(pk) -5000000 -select count(a) from t2; -count(a) -5000000 -select count(b) from t2; +2500000 +select count(pk),count(a) from t2; +count(pk) count(a) +2500000 2500000 +select count(b) from t2; count(b) -5000000 -select count(pk) from t3; -count(pk) -5000000 -select count(a) from t3; -count(a) -5000000 -select count(b) from t3; +2500000 +select count(pk),count(a) from t3; +count(pk) count(a) +2500000 2500000 +select count(b) from t3; count(b) -5000000 +2500000 longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp test.bulk_load.tmp disconnect other; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result index efd7c40ed69..12013539017 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result @@ -1,4 +1,3 @@ -DROP TABLE IF EXISTS t1, t2, t3; Data will be ordered in descending order CREATE TABLE t1( pk CHAR(5), @@ -6,21 +5,21 @@ a CHAR(30), b CHAR(30), PRIMARY KEY(pk) COMMENT "cf1", KEY(a) -) COLLATE 'latin1_bin'; +) ENGINE=ROCKSDB COLLATE 'latin1_bin'; CREATE TABLE t2( pk CHAR(5), a CHAR(30), b CHAR(30), PRIMARY KEY(pk) COMMENT "cf1", KEY(a) -) COLLATE 'latin1_bin'; +) ENGINE=ROCKSDB COLLATE 'latin1_bin'; CREATE TABLE t3( pk CHAR(5), a CHAR(30), b CHAR(30), PRIMARY KEY(pk) COMMENT "cf1", KEY(a) -) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; +) ENGINE=ROCKSDB COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; connect other,localhost,root,,; set session transaction isolation level repeatable read; select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; @@ -39,9 +38,9 @@ LOAD DATA INFILE INTO TABLE t3; set rocksdb_bulk_load=0; SHOW TABLE STATUS WHERE name LIKE 't%'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary -t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N +t1 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N ANALYZE TABLE t1, t2, t3; Table Op Msg_type Msg_text test.t1 analyze status OK @@ -49,36 +48,27 @@ test.t2 analyze status OK test.t3 analyze status OK SHOW TABLE STATUS WHERE name LIKE 't%'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary -t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N -t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N -select count(pk) from t1; -count(pk) -5000000 -select count(a) from t1; -count(a) -5000000 -select count(b) from t1; +t1 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N +t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N +select count(pk),count(a) from t1; +count(pk) count(a) +2500000 2500000 +select count(b) from t1; count(b) -5000000 -select count(pk) from t2; -count(pk) -5000000 -select count(a) from t2; -count(a) -5000000 -select count(b) from t2; +2500000 +select count(pk),count(a) from t2; +count(pk) count(a) +2500000 2500000 +select count(b) from t2; count(b) -5000000 -select count(pk) from t3; -count(pk) -5000000 -select count(a) from t3; -count(a) -5000000 -select count(b) from t3; +2500000 +select count(pk),count(a) from t3; +count(pk) count(a) +2500000 2500000 +select count(b) from t3; count(b) -5000000 +2500000 longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp test.bulk_load.tmp disconnect other; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result index 2bc8193e94f..444f997bf48 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result @@ -1,12 +1,12 @@ -DROP TABLE IF EXISTS t1; SET rocksdb_bulk_load_size=3; SET rocksdb_bulk_load_allow_unsorted=1; -CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1"); +CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1") +ENGINE=ROCKSDB; SET rocksdb_bulk_load=1; -SELECT * FROM t1; +SELECT * FROM t1 FORCE INDEX (PRIMARY); a b SET rocksdb_bulk_load=0; -SELECT * FROM t1; +SELECT * FROM t1 FORCE INDEX (PRIMARY); a b -3 5 -1 3 @@ -14,42 +14,49 @@ a b 4 -2 6 -4 DROP TABLE t1; -CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1", KEY(b)); +CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1", KEY(b)) +ENGINE=ROCKSDB; SET rocksdb_bulk_load=1; -SELECT * FROM t1; +SELECT * FROM t1 FORCE INDEX (PRIMARY); a b -6 -4 -4 -2 -2 0 --1 3 --3 5 SET rocksdb_bulk_load=0; +SELECT * FROM t1 FORCE INDEX (PRIMARY); +a b +-3 5 +-1 3 +2 0 +4 -2 +6 -4 DROP TABLE t1; -CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1"); -CREATE TABLE t2(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1"); +CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1") +ENGINE=ROCKSDB; +CREATE TABLE t2(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1") +ENGINE=ROCKSDB; SET rocksdb_bulk_load=1; INSERT INTO t1 VALUES (1,1); INSERT INTO t2 VALUES (1,1); -SELECT * FROM t1; +SELECT * FROM t1 FORCE INDEX (PRIMARY); a b 1 1 INSERT INTO t1 VALUES (2,2); -SELECT * FROM t2; +SELECT * FROM t2 FORCE INDEX (PRIMARY); a b 1 1 -SELECT * FROM t1; +SELECT * FROM t1 FORCE INDEX (PRIMARY); a b 1 1 SET rocksdb_bulk_load=0; -SELECT * FROM t1; +SELECT * FROM t1 FORCE INDEX (PRIMARY); a b 1 1 2 2 DROP TABLE t1, t2; -CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1"); -CREATE TABLE t2(a INT, b INT, PRIMARY KEY(b) COMMENT "cf1"); +CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1") +ENGINE=ROCKSDB; +CREATE TABLE t2(a INT, b INT, PRIMARY KEY(b) COMMENT "cf1") +ENGINE=ROCKSDB; CREATE TABLE t3(a INT, b INT, PRIMARY KEY(a) COMMENT "cf1") -PARTITION BY KEY() PARTITIONS 4; +ENGINE=ROCKSDB PARTITION BY KEY() PARTITIONS 4; connect other,localhost,root,,; set session transaction isolation level repeatable read; select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; @@ -68,9 +75,9 @@ LOAD DATA INFILE INTO TABLE t3; set rocksdb_bulk_load=0; SHOW TABLE STATUS WHERE name LIKE 't%'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary -t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N -t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N -t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned 0 N +t1 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N +t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N +t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned 0 N ANALYZE TABLE t1, t2, t3; Table Op Msg_type Msg_text test.t1 analyze status OK @@ -78,26 +85,27 @@ test.t2 analyze status OK test.t3 analyze status OK SHOW TABLE STATUS WHERE name LIKE 't%'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary -t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N -t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N -t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned 0 N -select count(a) from t1; -count(a) -5000000 -select count(b) from t1; -count(b) -5000000 -select count(a) from t2; -count(a) -5000000 -select count(b) from t2; -count(b) -5000000 -select count(a) from t3; -count(a) -5000000 -select count(b) from t3; -count(b) -5000000 +t1 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N +t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N +t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned 0 N +select count(a),count(b) from t1; +count(a) count(b) +2500000 2500000 +select count(a),count(b) from t2; +count(a) count(b) +2500000 2500000 +select count(a),count(b) from t3; +count(a) count(b) +2500000 2500000 +SELECT * FROM t1 FORCE INDEX (PRIMARY) LIMIT 3; +a b +-2499998 2500000 +-2499996 2499998 +-2499994 2499996 +SELECT * FROM t2 FORCE INDEX (PRIMARY) LIMIT 3; +a b +2499999 -2499997 +2499997 -2499995 +2499995 -2499993 +disconnect other; DROP TABLE t1, t2, t3; -SET rocksdb_bulk_load_allow_unsorted=0; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted_rev.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted_rev.result new file mode 100644 index 00000000000..dea69b3b089 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted_rev.result @@ -0,0 +1,111 @@ +SET rocksdb_bulk_load_size=3; +SET rocksdb_bulk_load_allow_unsorted=1; +CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "rev:cf1") +ENGINE=ROCKSDB; +SET rocksdb_bulk_load=1; +SELECT * FROM t1 FORCE INDEX (PRIMARY); +a b +SET rocksdb_bulk_load=0; +SELECT * FROM t1 FORCE INDEX (PRIMARY); +a b +6 -4 +4 -2 +2 0 +-1 3 +-3 5 +DROP TABLE t1; +CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "rev:cf1", KEY(b)) +ENGINE=ROCKSDB; +SET rocksdb_bulk_load=1; +SELECT * FROM t1 FORCE INDEX (PRIMARY); +a b +SET rocksdb_bulk_load=0; +SELECT * FROM t1 FORCE INDEX (PRIMARY); +a b +6 -4 +4 -2 +2 0 +-1 3 +-3 5 +DROP TABLE t1; +CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "rev:cf1") +ENGINE=ROCKSDB; +CREATE TABLE t2(a INT, b INT, PRIMARY KEY(a) COMMENT "rev:cf1") +ENGINE=ROCKSDB; +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES (1,1); +INSERT INTO t2 VALUES (1,1); +SELECT * FROM t1 FORCE INDEX (PRIMARY); +a b +1 1 +INSERT INTO t1 VALUES (2,2); +SELECT * FROM t2 FORCE INDEX (PRIMARY); +a b +1 1 +SELECT * FROM t1 FORCE INDEX (PRIMARY); +a b +1 1 +SET rocksdb_bulk_load=0; +SELECT * FROM t1 FORCE INDEX (PRIMARY); +a b +2 2 +1 1 +DROP TABLE t1, t2; +CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "rev:cf1") +ENGINE=ROCKSDB; +CREATE TABLE t2(a INT, b INT, PRIMARY KEY(b) COMMENT "rev:cf1") +ENGINE=ROCKSDB; +CREATE TABLE t3(a INT, b INT, PRIMARY KEY(a) COMMENT "rev:cf1") +ENGINE=ROCKSDB PARTITION BY KEY() PARTITIONS 4; +connect other,localhost,root,,; +set session transaction isolation level repeatable read; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 0 +start transaction with consistent snapshot; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 1 +connection default; +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t2; +LOAD DATA INFILE INTO TABLE t3; +set rocksdb_bulk_load=0; +SHOW TABLE STATUS WHERE name LIKE 't%'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary +t1 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N +t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N +t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned 0 N +ANALYZE TABLE t1, t2, t3; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +test.t3 analyze status OK +SHOW TABLE STATUS WHERE name LIKE 't%'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary +t1 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N +t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL 0 N +t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned 0 N +select count(a),count(b) from t1; +count(a) count(b) +2500000 2500000 +select count(a),count(b) from t2; +count(a) count(b) +2500000 2500000 +select count(a),count(b) from t3; +count(a) count(b) +2500000 2500000 +SELECT * FROM t1 FORCE INDEX (PRIMARY) LIMIT 3; +a b +2499999 -2499997 +2499997 -2499995 +2499995 -2499993 +SELECT * FROM t2 FORCE INDEX (PRIMARY) LIMIT 3; +a b +-2499998 2500000 +-2499996 2499998 +-2499994 2499996 +disconnect other; +DROP TABLE t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result b/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result index 3bd87e9ffd6..4b201d523d9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result @@ -1,3 +1,38 @@ +CREATE TABLE t0 (id int PRIMARY KEY, a int, INDEX ix_a (a)) engine=rocksdb; +insert into t0 values (0, 0),(1, 1),(2, 2),(3, 3),(4, 4), +(5, 4),(6, 4),(7, 4),(8, 4),(9, 4); +SELECT cardinality FROM information_schema.statistics where table_name="t0" and +column_name="id"; +cardinality +NULL +SELECT cardinality FROM information_schema.statistics where table_name="t0" and +column_name="a"; +cardinality +NULL +ANALYZE TABLE t0; +SELECT table_rows into @N FROM information_schema.tables +WHERE table_name = "t0"; +SELECT FLOOR(@N/cardinality) FROM +information_schema.statistics where table_name="t0" and column_name="id"; +FLOOR(@N/cardinality) +1 +SELECT FLOOR(@N/cardinality) FROM +information_schema.statistics where table_name="t0" and column_name="a"; +FLOOR(@N/cardinality) +2 +SET GLOBAL rocksdb_force_flush_memtable_now = 1; +ANALYZE TABLE t0; +SELECT table_rows into @N FROM information_schema.tables +WHERE table_name = "t0"; +SELECT FLOOR(@N/cardinality) FROM +information_schema.statistics where table_name="t0" and column_name="id"; +FLOOR(@N/cardinality) +1 +SELECT FLOOR(@N/cardinality) FROM +information_schema.statistics where table_name="t0" and column_name="a"; +FLOOR(@N/cardinality) +2 +drop table t0; DROP TABLE IF EXISTS t1,t10,t11; create table t1( id bigint not null primary key, diff --git a/storage/rocksdb/mysql-test/rocksdb/r/check_ignore_unknown_options.result b/storage/rocksdb/mysql-test/rocksdb/r/check_ignore_unknown_options.result new file mode 100644 index 00000000000..6ff49908a51 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/check_ignore_unknown_options.result @@ -0,0 +1,7 @@ +select variable_name, variable_value from information_schema.global_variables where variable_name="rocksdb_ignore_unknown_options"; +variable_name variable_value +ROCKSDB_IGNORE_UNKNOWN_OPTIONS ON +FOUND 1 /RocksDB: Compatibility check against existing database options failed/ in my_restart.err +select variable_name, variable_value from information_schema.global_variables where variable_name="rocksdb_ignore_unknown_options"; +variable_name variable_value +ROCKSDB_IGNORE_UNKNOWN_OPTIONS ON diff --git a/storage/rocksdb/mysql-test/rocksdb/r/deadlock_tracking.result b/storage/rocksdb/mysql-test/rocksdb/r/deadlock_tracking.result index d7cb89becb7..1e7509172cb 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/deadlock_tracking.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/deadlock_tracking.result @@ -66,13 +66,6 @@ KEY LOCK TYPE: EXCLUSIVE INDEX NAME: PRIMARY TABLE NAME: test.t ----------------WAITING FOR--------------- -TXN_ID -COLUMN FAMILY NAME: default -KEY -LOCK TYPE: EXCLUSIVE -INDEX NAME: PRIMARY -TABLE NAME: test.t --------TXN_ID GOT DEADLOCK--------- ----------------------------------------- @@ -122,13 +115,6 @@ KEY LOCK TYPE: EXCLUSIVE INDEX NAME: PRIMARY TABLE NAME: test.t ----------------WAITING FOR--------------- -TXN_ID -COLUMN FAMILY NAME: default -KEY -LOCK TYPE: EXCLUSIVE -INDEX NAME: PRIMARY -TABLE NAME: test.t --------TXN_ID GOT DEADLOCK--------- @@ -147,13 +133,6 @@ KEY LOCK TYPE: EXCLUSIVE INDEX NAME: PRIMARY TABLE NAME: test.t ----------------WAITING FOR--------------- -TXN_ID -COLUMN FAMILY NAME: default -KEY -LOCK TYPE: EXCLUSIVE -INDEX NAME: PRIMARY -TABLE NAME: test.t --------TXN_ID GOT DEADLOCK--------- ----------------------------------------- @@ -204,13 +183,6 @@ KEY LOCK TYPE: EXCLUSIVE INDEX NAME: PRIMARY TABLE NAME: test.t ----------------WAITING FOR--------------- -TXN_ID -COLUMN FAMILY NAME: default -KEY -LOCK TYPE: EXCLUSIVE -INDEX NAME: PRIMARY -TABLE NAME: test.t --------TXN_ID GOT DEADLOCK--------- @@ -229,13 +201,6 @@ KEY LOCK TYPE: EXCLUSIVE INDEX NAME: PRIMARY TABLE NAME: test.t ----------------WAITING FOR--------------- -TXN_ID -COLUMN FAMILY NAME: default -KEY -LOCK TYPE: EXCLUSIVE -INDEX NAME: PRIMARY -TABLE NAME: test.t --------TXN_ID GOT DEADLOCK--------- @@ -254,13 +219,6 @@ KEY LOCK TYPE: EXCLUSIVE INDEX NAME: PRIMARY TABLE NAME: test.t ----------------WAITING FOR--------------- -TXN_ID -COLUMN FAMILY NAME: default -KEY -LOCK TYPE: EXCLUSIVE -INDEX NAME: PRIMARY -TABLE NAME: test.t --------TXN_ID GOT DEADLOCK--------- ----------------------------------------- @@ -295,13 +253,6 @@ KEY LOCK TYPE: EXCLUSIVE INDEX NAME: PRIMARY TABLE NAME: test.t ----------------WAITING FOR--------------- -TXN_ID -COLUMN FAMILY NAME: default -KEY -LOCK TYPE: EXCLUSIVE -INDEX NAME: PRIMARY -TABLE NAME: test.t --------TXN_ID GOT DEADLOCK--------- ----------------------------------------- @@ -324,8 +275,12 @@ i 3 select * from t where i=2 for update; select * from t where i=3 for update; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_row_lock_deadlocks'; select * from t where i=1 for update; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +select case when variable_value-@a = 1 then 'true' else 'false' end as deadlocks from information_schema.global_status where variable_name='rocksdb_row_lock_deadlocks'; +deadlocks +true rollback; i 3 @@ -410,13 +365,6 @@ KEY LOCK TYPE: SHARED INDEX NAME: PRIMARY TABLE NAME: test.t ----------------WAITING FOR--------------- -TXN_ID -COLUMN FAMILY NAME: default -KEY -LOCK TYPE: EXCLUSIVE -INDEX NAME: PRIMARY -TABLE NAME: test.t --------TXN_ID GOT DEADLOCK--------- @@ -455,13 +403,6 @@ KEY LOCK TYPE: SHARED INDEX NAME: NOT FOUND; IDX_ID TABLE NAME: NOT FOUND; IDX_ID ----------------WAITING FOR--------------- -TXN_ID -COLUMN FAMILY NAME: default -KEY -LOCK TYPE: EXCLUSIVE -INDEX NAME: NOT FOUND; IDX_ID -TABLE NAME: NOT FOUND; IDX_ID --------TXN_ID GOT DEADLOCK--------- diff --git a/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result index a39f2d8c0d6..6bca2cbad2d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result @@ -1,17 +1,22 @@ DROP TABLE IF EXISTS is_ddl_t1; DROP TABLE IF EXISTS is_ddl_t2; +DROP TABLE IF EXISTS is_ddl_t3; CREATE TABLE is_ddl_t1 (i INT, j INT, k INT, l INT, PRIMARY KEY (i), KEY (j), KEY (k, l) COMMENT 'kl_cf') ENGINE = ROCKSDB; CREATE TABLE is_ddl_t2 (x INT, y INT, z INT, PRIMARY KEY (z, y) COMMENT 'zy_cf', KEY (x)) ENGINE = ROCKSDB; -SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%'; -TABLE_SCHEMA TABLE_NAME PARTITION_NAME INDEX_NAME INDEX_TYPE KV_FORMAT_VERSION CF -test is_ddl_t1 NULL PRIMARY 1 13 default -test is_ddl_t1 NULL j 2 13 default -test is_ddl_t1 NULL k 2 13 kl_cf -test is_ddl_t2 NULL PRIMARY 1 13 zy_cf -test is_ddl_t2 NULL x 2 13 default +CREATE TABLE is_ddl_t3 (a INT, b INT, c INT, PRIMARY KEY (a)) ENGINE = ROCKSDB +COMMENT "ttl_duration=3600;"; +SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF,TTL_DURATION,INDEX_FLAGS FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%'; +TABLE_SCHEMA TABLE_NAME PARTITION_NAME INDEX_NAME INDEX_TYPE KV_FORMAT_VERSION CF TTL_DURATION INDEX_FLAGS +test is_ddl_t1 NULL PRIMARY 1 13 default 0 0 +test is_ddl_t1 NULL j 2 13 default 0 0 +test is_ddl_t1 NULL k 2 13 kl_cf 0 0 +test is_ddl_t2 NULL PRIMARY 1 13 zy_cf 0 0 +test is_ddl_t2 NULL x 2 13 default 0 0 +test is_ddl_t3 NULL PRIMARY 1 13 default 3600 1 DROP TABLE is_ddl_t1; DROP TABLE is_ddl_t2; +DROP TABLE is_ddl_t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/i_s_deadlock.result b/storage/rocksdb/mysql-test/rocksdb/r/i_s_deadlock.result new file mode 100644 index 00000000000..36db92095e9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/i_s_deadlock.result @@ -0,0 +1,215 @@ +set @prior_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_deadlock_detect = @@rocksdb_deadlock_detect; +set @prior_max_latest_deadlocks = @@rocksdb_max_latest_deadlocks; +set global rocksdb_deadlock_detect = on; +set global rocksdb_lock_wait_timeout = 10000; +# Clears deadlock buffer of any prior deadlocks. +set global rocksdb_max_latest_deadlocks = 0; +set global rocksdb_max_latest_deadlocks = @prior_max_latest_deadlocks; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connect con3,localhost,root,,; +connection default; +show create table information_schema.rocksdb_deadlock; +Table Create Table +ROCKSDB_DEADLOCK CREATE TEMPORARY TABLE `ROCKSDB_DEADLOCK` ( + `DEADLOCK_ID` bigint(8) NOT NULL DEFAULT 0, + `TRANSACTION_ID` bigint(8) NOT NULL DEFAULT 0, + `CF_NAME` varchar(193) NOT NULL DEFAULT '', + `WAITING_KEY` varchar(513) NOT NULL DEFAULT '', + `LOCK_TYPE` varchar(193) NOT NULL DEFAULT '', + `INDEX_NAME` varchar(193) NOT NULL DEFAULT '', + `TABLE_NAME` varchar(193) NOT NULL DEFAULT '', + `ROLLED_BACK` bigint(8) NOT NULL DEFAULT 0 +) ENGINE=MEMORY DEFAULT CHARSET=utf8 +create table t (i int primary key) engine=rocksdb; +insert into t values (1), (2), (3); +select * from information_schema.rocksdb_deadlock; +DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK +Deadlock #1 +connection con1; +begin; +select * from t where i=1 for update; +i +1 +connection con2; +begin; +select * from t where i=2 for update; +i +2 +connection con1; +select * from t where i=2 for update; +connection con2; +select * from t where i=1 for update; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +rollback; +connection con1; +i +2 +rollback; +connection default; +select * from information_schema.rocksdb_deadlock; +DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0 +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1 +Deadlock #2 +connection con1; +begin; +select * from t where i=1 for update; +i +1 +connection con2; +begin; +select * from t where i=2 for update; +i +2 +connection con1; +select * from t where i=2 for update; +connection con2; +select * from t where i=1 for update; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +rollback; +connection con1; +i +2 +rollback; +connection default; +select * from information_schema.rocksdb_deadlock; +DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0 +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1 +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0 +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1 +set global rocksdb_max_latest_deadlocks = 10; +Deadlock #3 +connection con1; +begin; +select * from t where i=1 for update; +i +1 +connection con2; +begin; +select * from t where i=2 for update; +i +2 +connection con1; +select * from t where i=2 for update; +connection con2; +select * from t where i=1 for update; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +rollback; +connection con1; +i +2 +rollback; +connection default; +select * from information_schema.rocksdb_deadlock; +DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0 +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1 +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0 +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1 +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0 +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1 +set global rocksdb_max_latest_deadlocks = 1; +select * from information_schema.rocksdb_deadlock; +DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0 +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1 +connection con3; +set rocksdb_deadlock_detect_depth = 2; +Deadlock #4 +connection con1; +begin; +select * from t where i=1 for update; +i +1 +connection con2; +begin; +select * from t where i=2 for update; +i +2 +connection con3; +begin; +select * from t where i=3 for update; +i +3 +connection con1; +select * from t where i=2 for update; +connection con2; +select * from t where i=3 for update; +connection con3; +select * from t where i=1 for update; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +rollback; +connection con2; +i +3 +rollback; +connection con1; +i +2 +rollback; +connection default; +set global rocksdb_max_latest_deadlocks = 5; +select * from information_schema.rocksdb_deadlock; +DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK +Deadlock #5 +connection con1; +begin; +select * from t where i=1 for update; +i +1 +connection con2; +begin; +select * from t where i=2 for update; +i +2 +connection con3; +begin; +select * from t where i=3 lock in share mode; +i +3 +connection con1; +select * from t where i=100 for update; +i +select * from t where i=101 for update; +i +select * from t where i=2 for update; +connection con2; +select * from t where i=3 lock in share mode; +i +3 +select * from t where i=200 for update; +i +select * from t where i=201 for update; +i +select * from t where i=1 lock in share mode; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +rollback; +connection con1; +i +2 +rollback; +connection con3; +rollback; +connection default; +select * from information_schema.rocksdb_deadlock; +DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0 +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY SHARED PRIMARY test.t 1 +disconnect con1; +disconnect con2; +disconnect con3; +set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_deadlock_detect; +drop table t; +select * from information_schema.rocksdb_deadlock; +DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE INDEX_NAME TABLE_NAME 0 +DEADLOCK_ID TRANSACTION_ID default WAITING_KEY SHARED INDEX_NAME TABLE_NAME 1 +set global rocksdb_max_latest_deadlocks = 0; +# Clears deadlock buffer of any existent deadlocks. +set global rocksdb_max_latest_deadlocks = @prior_max_latest_deadlocks; +select * from information_schema.rocksdb_deadlock; +DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result index f63a271cdce..7fb9055083b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result @@ -25,10 +25,10 @@ UPDATE t1 SET filler1='to be deleted' WHERE key1=100 and key2=100; DROP TABLE t0, t1; create table t1 (key1 int, key2 int, key3 int, key (key1), key (key2), key(key3)) engine=rocksdb; insert into t1 values (1, 100, 100), (1, 200, 200), (1, 300, 300); +set global rocksdb_force_flush_memtable_now=1; analyze table t1; Table Op Msg_type Msg_text test.t1 analyze status OK -set global rocksdb_force_flush_memtable_now=1; explain select * from t1 where key1 = 1; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ref key1 key1 5 const # diff --git a/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result b/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result index 6850d8dff16..aba14e3c076 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result @@ -4,15 +4,14 @@ DROP TABLE IF EXISTS t3; SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1; create table t1 (a int) engine=rocksdb; drop table t1; -select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type<>'DDL_DROP_INDEX_ONGOING'; TYPE NAME VALUE MAX_INDEX_ID MAX_INDEX_ID max_index_id CF_FLAGS 0 default [0] CF_FLAGS 1 __system__ [0] -DDL_DROP_INDEX_ONGOING cf_id:0,index_id:max_index_id -select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type<>'DDL_DROP_INDEX_ONGOING'; count(*) -4 +3 SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0; select VALUE into @keysIn from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn'; CREATE TABLE t1 (i1 INT, i2 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue255.result b/storage/rocksdb/mysql-test/rocksdb/r/issue255.result index be9e6d1167a..c1d9ef4574c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/issue255.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue255.result @@ -6,6 +6,19 @@ t1 ROCKSDB # Fixed 1 # # # # # 6 NULL NULL NULL latin1_swedish_ci NULL 0 N INSERT INTO t1 VALUES ('538647864786478647864'); Warnings: Warning 1264 Out of range value for column 'pk' at row 1 +SELECT * FROM t1; +pk +5 +9223372036854775807 +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary +t1 ROCKSDB 10 Fixed 2 22 44 0 0 0 9223372036854775807 NULL NULL NULL latin1_swedish_ci NULL 0 N +INSERT INTO t1 VALUES (); +ERROR 23000: Duplicate entry '9223372036854775807' for key 'PRIMARY' +SELECT * FROM t1; +pk +5 +9223372036854775807 SHOW TABLE STATUS LIKE 't1'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary t1 ROCKSDB # Fixed 2 # # # # # 9223372036854775807 NULL NULL NULL latin1_swedish_ci NULL 0 N @@ -19,3 +32,37 @@ SHOW TABLE STATUS LIKE 't1'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary t1 ROCKSDB # Fixed 2 # # # # # 9223372036854775807 NULL NULL NULL latin1_swedish_ci NULL 0 N DROP TABLE t1; +CREATE TABLE t1 (pk TINYINT NOT NULL PRIMARY KEY AUTO_INCREMENT); +INSERT INTO t1 VALUES (5); +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary +t1 ROCKSDB # Fixed 1 # # # # # 6 NULL NULL NULL latin1_swedish_ci NULL 0 N +INSERT INTO t1 VALUES (1000); +Warnings: +Warning 1264 Out of range value for column 'pk' at row 1 +SELECT * FROM t1; +pk +5 +127 +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary +t1 ROCKSDB # Fixed 2 # # # # # 127 NULL NULL NULL latin1_swedish_ci NULL 0 N +INSERT INTO t1 VALUES (); +ERROR 23000: Duplicate entry '127' for key 'PRIMARY' +SELECT * FROM t1; +pk +5 +127 +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary +t1 ROCKSDB # Fixed 2 # # # # # 127 NULL NULL NULL latin1_swedish_ci NULL 0 N +INSERT INTO t1 VALUES (); +ERROR 23000: Duplicate entry '127' for key 'PRIMARY' +SELECT * FROM t1; +pk +5 +127 +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary +t1 ROCKSDB # Fixed 2 # # # # # 127 NULL NULL NULL latin1_swedish_ci NULL 0 N +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/lock_wait_timeout_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/lock_wait_timeout_stats.result index d0bfb05fd1b..96efca6e2b7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/lock_wait_timeout_stats.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/lock_wait_timeout_stats.result @@ -8,6 +8,7 @@ ROW_LOCK_WAIT_TIMEOUTS begin; set @@rocksdb_lock_wait_timeout=1; begin; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts'; insert into t values(0); ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; @@ -16,6 +17,10 @@ ROW_LOCK_WAIT_TIMEOUTS select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; ROW_LOCK_WAIT_TIMEOUTS 1 +select case when variable_value-@a = 1 then 'true' else 'false' end as waits from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts'; +waits +true +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts'; insert into t values(0); ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; @@ -24,4 +29,7 @@ ROW_LOCK_WAIT_TIMEOUTS select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; ROW_LOCK_WAIT_TIMEOUTS 2 +select case when variable_value-@a = 1 then 'true' else 'false' end as waits from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts'; +waits +true drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mariadb_port_fixes.result b/storage/rocksdb/mysql-test/rocksdb/r/mariadb_port_fixes.result index 9674b2b0c15..d9e2bf5eea5 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/mariadb_port_fixes.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/mariadb_port_fixes.result @@ -69,18 +69,19 @@ set global rocksdb_strict_collation_check=@tmp_rscc; # select plugin_name, plugin_maturity from information_schema.plugins where plugin_name like '%rocksdb%'; plugin_name plugin_maturity -ROCKSDB Gamma -ROCKSDB_CFSTATS Gamma -ROCKSDB_DBSTATS Gamma -ROCKSDB_PERF_CONTEXT Gamma -ROCKSDB_PERF_CONTEXT_GLOBAL Gamma -ROCKSDB_CF_OPTIONS Gamma -ROCKSDB_COMPACTION_STATS Gamma -ROCKSDB_GLOBAL_INFO Gamma -ROCKSDB_DDL Gamma -ROCKSDB_INDEX_FILE_MAP Gamma -ROCKSDB_LOCKS Gamma -ROCKSDB_TRX Gamma +ROCKSDB Stable +ROCKSDB_CFSTATS Stable +ROCKSDB_DBSTATS Stable +ROCKSDB_PERF_CONTEXT Stable +ROCKSDB_PERF_CONTEXT_GLOBAL Stable +ROCKSDB_CF_OPTIONS Stable +ROCKSDB_COMPACTION_STATS Stable +ROCKSDB_GLOBAL_INFO Stable +ROCKSDB_DDL Stable +ROCKSDB_INDEX_FILE_MAP Stable +ROCKSDB_LOCKS Stable +ROCKSDB_TRX Stable +ROCKSDB_DEADLOCK Stable # # MDEV-12466 : Assertion `thd->transaction.stmt.is_empty() || thd->in_sub_stmt || ... # @@ -88,3 +89,21 @@ CREATE TABLE t1 (i INT) ENGINE=RocksDB; FLUSH TABLE t1 FOR EXPORT; ERROR HY000: Storage engine ROCKSDB of the table `test`.`t1` doesn't have this option DROP TABLE t1; +# +# MDEV-16154 Server crashes in in myrocks::ha_rocksdb::load_auto_incr_value_from_inde +# +CREATE TABLE t1 (a INT) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1); +ALTER TABLE t1 AUTO_INCREMENT 10; +DROP TABLE t1; +# +# MDEV-16155: UPDATE on RocksDB table with unique constraint does not work +# +CREATE TABLE t1 (a INT, b CHAR(8), UNIQUE INDEX(a)) ENGINE=RocksDB; +INSERT INTO t1 (a,b) VALUES (1,'foo'),(2,'bar'); +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +101 foo +102 bar +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/max_open_files.result b/storage/rocksdb/mysql-test/rocksdb/r/max_open_files.result new file mode 100644 index 00000000000..5d34f4e9640 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/max_open_files.result @@ -0,0 +1,21 @@ +CALL mtr.add_suppression("RocksDB: rocksdb_max_open_files should not be greater than the open_files_limit*"); +FOUND 1 /RocksDB: rocksdb_max_open_files should not be greater than the open_files_limit/ in rocksdb.max_open_files.err +SELECT FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files; +FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files +1 +SELECT @@global.open_files_limit - 1 = @@global.rocksdb_max_open_files; +@@global.open_files_limit - 1 = @@global.rocksdb_max_open_files +1 +SELECT @@global.rocksdb_max_open_files; +@@global.rocksdb_max_open_files +0 +CREATE TABLE t1(a INT) ENGINE=ROCKSDB; +INSERT INTO t1 VALUES(0),(1),(2),(3),(4); +SET GLOBAL rocksdb_force_flush_memtable_and_lzero_now=1; +DROP TABLE t1; +SELECT @@global.rocksdb_max_open_files; +@@global.rocksdb_max_open_files +-1 +SELECT FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files; +FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files +1 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/optimizer_loose_index_scans.result b/storage/rocksdb/mysql-test/rocksdb/r/optimizer_loose_index_scans.result index 27b1779627b..1fe61fe9fc5 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/optimizer_loose_index_scans.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/optimizer_loose_index_scans.result @@ -36,7 +36,7 @@ explain select b, d from t where d > 4; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan rows_read -1509 +1505 include/diff_tables.inc [temp_orig, temp_skip] set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; set optimizer_switch = 'skip_scan=off'; @@ -44,7 +44,7 @@ explain select a, b, c, d from t where a = 5 and d <= 3; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index rows_read -251 +250 set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; explain select a, b, c, d from t where a = 5 and d <= 3; id select_type table type possible_keys key key_len ref rows Extra @@ -58,13 +58,13 @@ explain select e from t where a = 5 and d <= 3; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where rows_read -251 +250 set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; explain select e from t where a = 5 and d <= 3; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where rows_read -251 +250 include/diff_tables.inc [temp_orig, temp_skip] set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; set optimizer_switch = 'skip_scan=off'; @@ -72,13 +72,13 @@ explain select a, b, c, d from t where a = 5 and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index rows_read -251 +250 set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; explain select a, b, c, d from t where a = 5 and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan rows_read -51 +26 include/diff_tables.inc [temp_orig, temp_skip] set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; set optimizer_switch = 'skip_scan=off'; @@ -86,13 +86,13 @@ explain select e from t where a = 5 and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where rows_read -251 +250 set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; explain select e from t where a = 5 and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where rows_read -251 +250 include/diff_tables.inc [temp_orig, temp_skip] set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; set optimizer_switch = 'skip_scan=off'; @@ -100,13 +100,13 @@ explain select a, b, c, d from t where a in (1, 5) and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t range PRIMARY PRIMARY 4 NULL # Using where; Using index rows_read -502 +500 set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; explain select a, b, c, d from t where a in (1, 5) and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan rows_read -102 +52 include/diff_tables.inc [temp_orig, temp_skip] set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; set optimizer_switch = 'skip_scan=off'; @@ -114,13 +114,13 @@ explain select a, b, c, d from t where a in (1, 3, 5) and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t range PRIMARY PRIMARY 4 NULL # Using where; Using index rows_read -753 +750 set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; explain select a, b, c, d from t where a in (1, 3, 5) and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan rows_read -153 +78 include/diff_tables.inc [temp_orig, temp_skip] set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; set optimizer_switch = 'skip_scan=off'; @@ -128,13 +128,13 @@ explain select a, b, c, d from t where a in (1, 5) and b in (1, 2) and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t range PRIMARY,b PRIMARY 8 NULL # Using where; Using index rows_read -204 +200 set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; explain select a, b, c, d from t where a in (1, 5) and b in (1, 2) and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan rows_read -44 +24 include/diff_tables.inc [temp_orig, temp_skip] set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; set optimizer_switch = 'skip_scan=off'; @@ -142,13 +142,13 @@ explain select a, b, c, d from t where a in (1, 2, 3, 4, 5) and b in (1, 2, 3) a id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t range PRIMARY,b PRIMARY 8 NULL # Using where; Using index rows_read -765 +750 set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; explain select a, b, c, d from t where a in (1, 2, 3, 4, 5) and b in (1, 2, 3) and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan rows_read -165 +90 include/diff_tables.inc [temp_orig, temp_skip] set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; set optimizer_switch = 'skip_scan=off'; @@ -156,13 +156,13 @@ explain select a, b, c, d from t where a = 5 and b = 2 and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t ref PRIMARY,b PRIMARY 8 const,const # Using where; Using index rows_read -51 +50 set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; explain select a, b, c, d from t where a = 5 and b = 2 and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan rows_read -11 +6 include/diff_tables.inc [temp_orig, temp_skip] set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; set optimizer_switch = 'skip_scan=off'; @@ -170,7 +170,7 @@ explain select a+1, b, c, d from t where a = 5 and d < 3; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index rows_read -251 +250 set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; explain select a+1, b, c, d from t where a = 5 and d < 3; id select_type table type possible_keys key key_len ref rows Extra @@ -184,7 +184,7 @@ explain select b, c, d from t where a = 5 and d < 3; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index rows_read -251 +250 set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; explain select b, c, d from t where a = 5 and d < 3; id select_type table type possible_keys key key_len ref rows Extra @@ -204,7 +204,7 @@ explain select a, b, c, d from t where a = b and d >= 98; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan rows_read -9 +5 include/diff_tables.inc [temp_orig, temp_skip] set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; set optimizer_switch = 'skip_scan=on'; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/perf_context.result b/storage/rocksdb/mysql-test/rocksdb/r/perf_context.result index 6586b92d129..28f965843aa 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/perf_context.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/perf_context.result @@ -14,8 +14,13 @@ test t1 NULL BLOCK_READ_BYTE # test t1 NULL BLOCK_READ_TIME # test t1 NULL BLOCK_CHECKSUM_TIME # test t1 NULL BLOCK_DECOMPRESS_TIME # +test t1 NULL GET_READ_BYTES # +test t1 NULL MULTIGET_READ_BYTES # +test t1 NULL ITER_READ_BYTES # test t1 NULL INTERNAL_KEY_SKIPPED_COUNT # test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT # +test t1 NULL INTERNAL_RECENT_SKIPPED_COUNT # +test t1 NULL INTERNAL_MERGE_COUNT # test t1 NULL GET_SNAPSHOT_TIME # test t1 NULL GET_FROM_MEMTABLE_TIME # test t1 NULL GET_FROM_MEMTABLE_COUNT # @@ -23,9 +28,12 @@ test t1 NULL GET_POST_PROCESS_TIME # test t1 NULL GET_FROM_OUTPUT_FILES_TIME # test t1 NULL SEEK_ON_MEMTABLE_TIME # test t1 NULL SEEK_ON_MEMTABLE_COUNT # +test t1 NULL NEXT_ON_MEMTABLE_COUNT # +test t1 NULL PREV_ON_MEMTABLE_COUNT # test t1 NULL SEEK_CHILD_SEEK_TIME # test t1 NULL SEEK_CHILD_SEEK_COUNT # -test t1 NULL SEEK_IN_HEAP_TIME # +test t1 NULL SEEK_MIN_HEAP_TIME # +test t1 NULL SEEK_MAX_HEAP_TIME # test t1 NULL SEEK_INTERNAL_SEEK_TIME # test t1 NULL FIND_NEXT_USER_ENTRY_TIME # test t1 NULL WRITE_WAL_TIME # @@ -41,6 +49,12 @@ test t1 NULL NEW_TABLE_BLOCK_ITER_NANOS # test t1 NULL NEW_TABLE_ITERATOR_NANOS # test t1 NULL BLOCK_SEEK_NANOS # test t1 NULL FIND_TABLE_NANOS # +test t1 NULL BLOOM_MEMTABLE_HIT_COUNT # +test t1 NULL BLOOM_MEMTABLE_MISS_COUNT # +test t1 NULL BLOOM_SST_HIT_COUNT # +test t1 NULL BLOOM_SST_MISS_COUNT # +test t1 NULL KEY_LOCK_WAIT_TIME # +test t1 NULL KEY_LOCK_WAIT_COUNT # test t1 NULL IO_THREAD_POOL_ID # test t1 NULL IO_BYTES_WRITTEN # test t1 NULL IO_BYTES_READ # @@ -59,8 +73,13 @@ BLOCK_READ_BYTE # BLOCK_READ_TIME # BLOCK_CHECKSUM_TIME # BLOCK_DECOMPRESS_TIME # +GET_READ_BYTES # +MULTIGET_READ_BYTES # +ITER_READ_BYTES # INTERNAL_KEY_SKIPPED_COUNT # INTERNAL_DELETE_SKIPPED_COUNT # +INTERNAL_RECENT_SKIPPED_COUNT # +INTERNAL_MERGE_COUNT # GET_SNAPSHOT_TIME # GET_FROM_MEMTABLE_TIME # GET_FROM_MEMTABLE_COUNT # @@ -68,9 +87,12 @@ GET_POST_PROCESS_TIME # GET_FROM_OUTPUT_FILES_TIME # SEEK_ON_MEMTABLE_TIME # SEEK_ON_MEMTABLE_COUNT # +NEXT_ON_MEMTABLE_COUNT # +PREV_ON_MEMTABLE_COUNT # SEEK_CHILD_SEEK_TIME # SEEK_CHILD_SEEK_COUNT # -SEEK_IN_HEAP_TIME # +SEEK_MIN_HEAP_TIME # +SEEK_MAX_HEAP_TIME # SEEK_INTERNAL_SEEK_TIME # FIND_NEXT_USER_ENTRY_TIME # WRITE_WAL_TIME # @@ -86,6 +108,12 @@ NEW_TABLE_BLOCK_ITER_NANOS # NEW_TABLE_ITERATOR_NANOS # BLOCK_SEEK_NANOS # FIND_TABLE_NANOS # +BLOOM_MEMTABLE_HIT_COUNT # +BLOOM_MEMTABLE_MISS_COUNT # +BLOOM_SST_HIT_COUNT # +BLOOM_SST_MISS_COUNT # +KEY_LOCK_WAIT_TIME # +KEY_LOCK_WAIT_COUNT # IO_THREAD_POOL_ID # IO_BYTES_WRITTEN # IO_BYTES_READ # diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result index 875950336b6..f616b786f7c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -573,9 +573,6 @@ insert into t30 values ('row3', 'row3-key', 'row3-data'), ('row4', 'row4-key', 'row4-data'), ('row5', 'row5-key', 'row5-data'); -analyze table t30; -Table Op Msg_type Msg_text -test.t30 analyze status OK explain select * from t30 where key1 <='row3-key'; id select_type table type possible_keys key key_len ref rows Extra @@ -868,6 +865,7 @@ ERROR 42S02: Unknown table 'test.t45' show variables where variable_name like 'rocksdb%' and +variable_name not like 'rocksdb_max_open_files' and variable_name not like 'rocksdb_supported_compression_types'; Variable_name Value rocksdb_access_hint_on_compaction_start 1 @@ -875,6 +873,7 @@ rocksdb_advise_random_on_open ON rocksdb_allow_concurrent_memtable_write OFF rocksdb_allow_mmap_reads OFF rocksdb_allow_mmap_writes OFF +rocksdb_allow_to_start_after_corruption OFF rocksdb_blind_delete_primary_key OFF rocksdb_block_cache_size 536870912 rocksdb_block_restart_interval 16 @@ -894,7 +893,6 @@ rocksdb_compaction_sequential_deletes 0 rocksdb_compaction_sequential_deletes_count_sd OFF rocksdb_compaction_sequential_deletes_file_size 0 rocksdb_compaction_sequential_deletes_window 0 -rocksdb_concurrent_prepare ON rocksdb_create_checkpoint rocksdb_create_if_missing ON rocksdb_create_missing_column_families OFF @@ -918,7 +916,6 @@ rocksdb_enable_ttl_read_filtering ON rocksdb_enable_write_thread_adaptive_yield OFF rocksdb_error_if_exists OFF rocksdb_flush_log_at_trx_commit 0 -rocksdb_flush_memtable_on_analyze ON rocksdb_force_compute_memtable_stats ON rocksdb_force_compute_memtable_stats_cachetime 0 rocksdb_force_flush_memtable_and_lzero_now OFF @@ -926,6 +923,7 @@ rocksdb_force_flush_memtable_now OFF rocksdb_force_index_records_in_range 0 rocksdb_git_hash # rocksdb_hash_index_allow_collision ON +rocksdb_ignore_unknown_options ON rocksdb_index_type kBinarySearch rocksdb_info_log_level error_level rocksdb_io_write_timeout 0 @@ -942,8 +940,7 @@ rocksdb_max_background_jobs 2 rocksdb_max_latest_deadlocks 5 rocksdb_max_log_file_size 0 rocksdb_max_manifest_file_size 18446744073709551615 -rocksdb_max_open_files -1 -rocksdb_max_row_locks 1073741824 +rocksdb_max_row_locks 1048576 rocksdb_max_subcompactions 1 rocksdb_max_total_wal_size 0 rocksdb_merge_buf_size 67108864 @@ -978,6 +975,7 @@ rocksdb_table_cache_numshardbits 6 rocksdb_table_stats_sampling_pct 10 rocksdb_tmpdir rocksdb_trace_sst_api OFF +rocksdb_two_write_queues ON rocksdb_unsafe_for_binlog OFF rocksdb_update_cf_options rocksdb_use_adaptive_mutex OFF @@ -1464,6 +1462,7 @@ Rocksdb_rows_read # Rocksdb_rows_updated # Rocksdb_rows_deleted_blind # Rocksdb_rows_expired # +Rocksdb_rows_filtered # Rocksdb_system_rows_deleted # Rocksdb_system_rows_inserted # Rocksdb_system_rows_read # @@ -1474,11 +1473,22 @@ Rocksdb_queries_point # Rocksdb_queries_range # Rocksdb_covered_secondary_key_lookups # Rocksdb_block_cache_add # +Rocksdb_block_cache_add_failures # +Rocksdb_block_cache_bytes_read # +Rocksdb_block_cache_bytes_write # +Rocksdb_block_cache_data_add # +Rocksdb_block_cache_data_bytes_insert # Rocksdb_block_cache_data_hit # Rocksdb_block_cache_data_miss # +Rocksdb_block_cache_filter_add # +Rocksdb_block_cache_filter_bytes_evict # +Rocksdb_block_cache_filter_bytes_insert # Rocksdb_block_cache_filter_hit # Rocksdb_block_cache_filter_miss # Rocksdb_block_cache_hit # +Rocksdb_block_cache_index_add # +Rocksdb_block_cache_index_bytes_evict # +Rocksdb_block_cache_index_bytes_insert # Rocksdb_block_cache_index_hit # Rocksdb_block_cache_index_miss # Rocksdb_block_cache_miss # @@ -1495,7 +1505,11 @@ Rocksdb_compaction_key_drop_new # Rocksdb_compaction_key_drop_obsolete # Rocksdb_compaction_key_drop_user # Rocksdb_flush_write_bytes # +Rocksdb_get_hit_l0 # +Rocksdb_get_hit_l1 # +Rocksdb_get_hit_l2_and_up # Rocksdb_getupdatessince_calls # +Rocksdb_iter_bytes_read # Rocksdb_memtable_hit # Rocksdb_memtable_miss # Rocksdb_no_file_closes # @@ -1503,6 +1517,12 @@ Rocksdb_no_file_errors # Rocksdb_no_file_opens # Rocksdb_num_iterators # Rocksdb_number_block_not_compressed # +Rocksdb_number_db_next # +Rocksdb_number_db_next_found # +Rocksdb_number_db_prev # +Rocksdb_number_db_prev_found # +Rocksdb_number_db_seek # +Rocksdb_number_db_seek_found # Rocksdb_number_deletes_filtered # Rocksdb_number_keys_read # Rocksdb_number_keys_updated # @@ -1517,11 +1537,11 @@ Rocksdb_number_sst_entry_merge # Rocksdb_number_sst_entry_other # Rocksdb_number_sst_entry_put # Rocksdb_number_sst_entry_singledelete # -Rocksdb_number_stat_computes # Rocksdb_number_superversion_acquires # Rocksdb_number_superversion_cleanups # Rocksdb_number_superversion_releases # -Rocksdb_rate_limit_delay_millis # +Rocksdb_row_lock_deadlocks # +Rocksdb_row_lock_wait_timeouts # Rocksdb_snapshot_conflict_errors # Rocksdb_stall_l0_file_count_limit_slowdowns # Rocksdb_stall_locked_l0_file_count_limit_slowdowns # @@ -1549,6 +1569,7 @@ ROCKSDB_ROWS_READ ROCKSDB_ROWS_UPDATED ROCKSDB_ROWS_DELETED_BLIND ROCKSDB_ROWS_EXPIRED +ROCKSDB_ROWS_FILTERED ROCKSDB_SYSTEM_ROWS_DELETED ROCKSDB_SYSTEM_ROWS_INSERTED ROCKSDB_SYSTEM_ROWS_READ @@ -1559,11 +1580,22 @@ ROCKSDB_QUERIES_POINT ROCKSDB_QUERIES_RANGE ROCKSDB_COVERED_SECONDARY_KEY_LOOKUPS ROCKSDB_BLOCK_CACHE_ADD +ROCKSDB_BLOCK_CACHE_ADD_FAILURES +ROCKSDB_BLOCK_CACHE_BYTES_READ +ROCKSDB_BLOCK_CACHE_BYTES_WRITE +ROCKSDB_BLOCK_CACHE_DATA_ADD +ROCKSDB_BLOCK_CACHE_DATA_BYTES_INSERT ROCKSDB_BLOCK_CACHE_DATA_HIT ROCKSDB_BLOCK_CACHE_DATA_MISS +ROCKSDB_BLOCK_CACHE_FILTER_ADD +ROCKSDB_BLOCK_CACHE_FILTER_BYTES_EVICT +ROCKSDB_BLOCK_CACHE_FILTER_BYTES_INSERT ROCKSDB_BLOCK_CACHE_FILTER_HIT ROCKSDB_BLOCK_CACHE_FILTER_MISS ROCKSDB_BLOCK_CACHE_HIT +ROCKSDB_BLOCK_CACHE_INDEX_ADD +ROCKSDB_BLOCK_CACHE_INDEX_BYTES_EVICT +ROCKSDB_BLOCK_CACHE_INDEX_BYTES_INSERT ROCKSDB_BLOCK_CACHE_INDEX_HIT ROCKSDB_BLOCK_CACHE_INDEX_MISS ROCKSDB_BLOCK_CACHE_MISS @@ -1580,7 +1612,11 @@ ROCKSDB_COMPACTION_KEY_DROP_NEW ROCKSDB_COMPACTION_KEY_DROP_OBSOLETE ROCKSDB_COMPACTION_KEY_DROP_USER ROCKSDB_FLUSH_WRITE_BYTES +ROCKSDB_GET_HIT_L0 +ROCKSDB_GET_HIT_L1 +ROCKSDB_GET_HIT_L2_AND_UP ROCKSDB_GETUPDATESSINCE_CALLS +ROCKSDB_ITER_BYTES_READ ROCKSDB_MEMTABLE_HIT ROCKSDB_MEMTABLE_MISS ROCKSDB_NO_FILE_CLOSES @@ -1588,6 +1624,12 @@ ROCKSDB_NO_FILE_ERRORS ROCKSDB_NO_FILE_OPENS ROCKSDB_NUM_ITERATORS ROCKSDB_NUMBER_BLOCK_NOT_COMPRESSED +ROCKSDB_NUMBER_DB_NEXT +ROCKSDB_NUMBER_DB_NEXT_FOUND +ROCKSDB_NUMBER_DB_PREV +ROCKSDB_NUMBER_DB_PREV_FOUND +ROCKSDB_NUMBER_DB_SEEK +ROCKSDB_NUMBER_DB_SEEK_FOUND ROCKSDB_NUMBER_DELETES_FILTERED ROCKSDB_NUMBER_KEYS_READ ROCKSDB_NUMBER_KEYS_UPDATED @@ -1602,11 +1644,11 @@ ROCKSDB_NUMBER_SST_ENTRY_MERGE ROCKSDB_NUMBER_SST_ENTRY_OTHER ROCKSDB_NUMBER_SST_ENTRY_PUT ROCKSDB_NUMBER_SST_ENTRY_SINGLEDELETE -ROCKSDB_NUMBER_STAT_COMPUTES ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS ROCKSDB_NUMBER_SUPERVERSION_RELEASES -ROCKSDB_RATE_LIMIT_DELAY_MILLIS +ROCKSDB_ROW_LOCK_DEADLOCKS +ROCKSDB_ROW_LOCK_WAIT_TIMEOUTS ROCKSDB_SNAPSHOT_CONFLICT_ERRORS ROCKSDB_STALL_L0_FILE_COUNT_LIMIT_SLOWDOWNS ROCKSDB_STALL_LOCKED_L0_FILE_COUNT_LIMIT_SLOWDOWNS @@ -1636,6 +1678,7 @@ ROCKSDB_ROWS_READ ROCKSDB_ROWS_UPDATED ROCKSDB_ROWS_DELETED_BLIND ROCKSDB_ROWS_EXPIRED +ROCKSDB_ROWS_FILTERED ROCKSDB_SYSTEM_ROWS_DELETED ROCKSDB_SYSTEM_ROWS_INSERTED ROCKSDB_SYSTEM_ROWS_READ @@ -1646,11 +1689,22 @@ ROCKSDB_QUERIES_POINT ROCKSDB_QUERIES_RANGE ROCKSDB_COVERED_SECONDARY_KEY_LOOKUPS ROCKSDB_BLOCK_CACHE_ADD +ROCKSDB_BLOCK_CACHE_ADD_FAILURES +ROCKSDB_BLOCK_CACHE_BYTES_READ +ROCKSDB_BLOCK_CACHE_BYTES_WRITE +ROCKSDB_BLOCK_CACHE_DATA_ADD +ROCKSDB_BLOCK_CACHE_DATA_BYTES_INSERT ROCKSDB_BLOCK_CACHE_DATA_HIT ROCKSDB_BLOCK_CACHE_DATA_MISS +ROCKSDB_BLOCK_CACHE_FILTER_ADD +ROCKSDB_BLOCK_CACHE_FILTER_BYTES_EVICT +ROCKSDB_BLOCK_CACHE_FILTER_BYTES_INSERT ROCKSDB_BLOCK_CACHE_FILTER_HIT ROCKSDB_BLOCK_CACHE_FILTER_MISS ROCKSDB_BLOCK_CACHE_HIT +ROCKSDB_BLOCK_CACHE_INDEX_ADD +ROCKSDB_BLOCK_CACHE_INDEX_BYTES_EVICT +ROCKSDB_BLOCK_CACHE_INDEX_BYTES_INSERT ROCKSDB_BLOCK_CACHE_INDEX_HIT ROCKSDB_BLOCK_CACHE_INDEX_MISS ROCKSDB_BLOCK_CACHE_MISS @@ -1667,7 +1721,11 @@ ROCKSDB_COMPACTION_KEY_DROP_NEW ROCKSDB_COMPACTION_KEY_DROP_OBSOLETE ROCKSDB_COMPACTION_KEY_DROP_USER ROCKSDB_FLUSH_WRITE_BYTES +ROCKSDB_GET_HIT_L0 +ROCKSDB_GET_HIT_L1 +ROCKSDB_GET_HIT_L2_AND_UP ROCKSDB_GETUPDATESSINCE_CALLS +ROCKSDB_ITER_BYTES_READ ROCKSDB_MEMTABLE_HIT ROCKSDB_MEMTABLE_MISS ROCKSDB_NO_FILE_CLOSES @@ -1675,6 +1733,12 @@ ROCKSDB_NO_FILE_ERRORS ROCKSDB_NO_FILE_OPENS ROCKSDB_NUM_ITERATORS ROCKSDB_NUMBER_BLOCK_NOT_COMPRESSED +ROCKSDB_NUMBER_DB_NEXT +ROCKSDB_NUMBER_DB_NEXT_FOUND +ROCKSDB_NUMBER_DB_PREV +ROCKSDB_NUMBER_DB_PREV_FOUND +ROCKSDB_NUMBER_DB_SEEK +ROCKSDB_NUMBER_DB_SEEK_FOUND ROCKSDB_NUMBER_DELETES_FILTERED ROCKSDB_NUMBER_KEYS_READ ROCKSDB_NUMBER_KEYS_UPDATED @@ -1689,11 +1753,11 @@ ROCKSDB_NUMBER_SST_ENTRY_MERGE ROCKSDB_NUMBER_SST_ENTRY_OTHER ROCKSDB_NUMBER_SST_ENTRY_PUT ROCKSDB_NUMBER_SST_ENTRY_SINGLEDELETE -ROCKSDB_NUMBER_STAT_COMPUTES ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS ROCKSDB_NUMBER_SUPERVERSION_RELEASES -ROCKSDB_RATE_LIMIT_DELAY_MILLIS +ROCKSDB_ROW_LOCK_DEADLOCKS +ROCKSDB_ROW_LOCK_WAIT_TIMEOUTS ROCKSDB_SNAPSHOT_CONFLICT_ERRORS ROCKSDB_STALL_L0_FILE_COUNT_LIMIT_SLOWDOWNS ROCKSDB_STALL_LOCKED_L0_FILE_COUNT_LIMIT_SLOWDOWNS diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_debug.result new file mode 100644 index 00000000000..a245fa851de --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_debug.result @@ -0,0 +1,11 @@ +# +# Issue #728: Assertion `covers_key(b)' failed in int +# myrocks::Rdb_key_def::cmp_full_keys(const rocks db::Slice&, +# const rocksdb::Slice&) +# +CREATE TABLE t2(c1 TINYINT SIGNED KEY,c2 TINYINT UNSIGNED,c3 INT); +INSERT INTO t2(c1)VALUES(0); +SELECT * FROM t2 WHERE c1<=127 ORDER BY c1 DESC; +c1 c2 c3 +0 NULL NULL +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result index d7a4f9dd065..10a6a02008e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result @@ -7,5 +7,5 @@ count(*) 10000 explain select c1 from t1 where c1 > 5 limit 10; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range i i 9 NULL 9900 Using where; Using index +1 SIMPLE t1 range i i 9 NULL # Using where; Using index drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result index 1bcd3692b4a..9fc5db98d7d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result @@ -83,12 +83,12 @@ FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT WHERE TABLE_SCHEMA = 'test' GROUP BY TABLE_NAME, PARTITION_NAME; TABLE_SCHEMA TABLE_NAME PARTITION_NAME COUNT(STAT_TYPE) -test t1 NULL 43 -test t2 NULL 43 -test t4 p0 43 -test t4 p1 43 -test t4 p2 43 -test t4 p3 43 +test t1 NULL 57 +test t2 NULL 57 +test t4 p0 57 +test t4 p1 57 +test t4 p2 57 +test t4 p3 57 SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CF_OPTIONS; CF_NAME OPTION_TYPE VALUE __system__ COMPARATOR # @@ -153,9 +153,15 @@ __system__ TABLE_FACTORY::BLOCK_SIZE # __system__ TABLE_FACTORY::BLOCK_SIZE_DEVIATION # __system__ TABLE_FACTORY::BLOCK_RESTART_INTERVAL # __system__ TABLE_FACTORY::INDEX_BLOCK_RESTART_INTERVAL # +__system__ TABLE_FACTORY::METADATA_BLOCK_SIZE # +__system__ TABLE_FACTORY::PARTITION_FILTERS # +__system__ TABLE_FACTORY::USE_DELTA_ENCODING # __system__ TABLE_FACTORY::FILTER_POLICY # __system__ TABLE_FACTORY::WHOLE_KEY_FILTERING # +__system__ TABLE_FACTORY::VERIFY_COMPRESSION # +__system__ TABLE_FACTORY::READ_AMP_BYTES_PER_BIT # __system__ TABLE_FACTORY::FORMAT_VERSION # +__system__ TABLE_FACTORY::ENABLE_INDEX_COMPRESSION # cf_t1 COMPARATOR # cf_t1 MERGE_OPERATOR # cf_t1 COMPACTION_FILTER # @@ -218,9 +224,15 @@ cf_t1 TABLE_FACTORY::BLOCK_SIZE # cf_t1 TABLE_FACTORY::BLOCK_SIZE_DEVIATION # cf_t1 TABLE_FACTORY::BLOCK_RESTART_INTERVAL # cf_t1 TABLE_FACTORY::INDEX_BLOCK_RESTART_INTERVAL # +cf_t1 TABLE_FACTORY::METADATA_BLOCK_SIZE # +cf_t1 TABLE_FACTORY::PARTITION_FILTERS # +cf_t1 TABLE_FACTORY::USE_DELTA_ENCODING # cf_t1 TABLE_FACTORY::FILTER_POLICY # cf_t1 TABLE_FACTORY::WHOLE_KEY_FILTERING # +cf_t1 TABLE_FACTORY::VERIFY_COMPRESSION # +cf_t1 TABLE_FACTORY::READ_AMP_BYTES_PER_BIT # cf_t1 TABLE_FACTORY::FORMAT_VERSION # +cf_t1 TABLE_FACTORY::ENABLE_INDEX_COMPRESSION # default COMPARATOR # default MERGE_OPERATOR # default COMPACTION_FILTER # @@ -283,9 +295,15 @@ default TABLE_FACTORY::BLOCK_SIZE # default TABLE_FACTORY::BLOCK_SIZE_DEVIATION # default TABLE_FACTORY::BLOCK_RESTART_INTERVAL # default TABLE_FACTORY::INDEX_BLOCK_RESTART_INTERVAL # +default TABLE_FACTORY::METADATA_BLOCK_SIZE # +default TABLE_FACTORY::PARTITION_FILTERS # +default TABLE_FACTORY::USE_DELTA_ENCODING # default TABLE_FACTORY::FILTER_POLICY # default TABLE_FACTORY::WHOLE_KEY_FILTERING # +default TABLE_FACTORY::VERIFY_COMPRESSION # +default TABLE_FACTORY::READ_AMP_BYTES_PER_BIT # default TABLE_FACTORY::FORMAT_VERSION # +default TABLE_FACTORY::ENABLE_INDEX_COMPRESSION # rev:cf_t2 COMPARATOR # rev:cf_t2 MERGE_OPERATOR # rev:cf_t2 COMPACTION_FILTER # @@ -348,9 +366,15 @@ rev:cf_t2 TABLE_FACTORY::BLOCK_SIZE # rev:cf_t2 TABLE_FACTORY::BLOCK_SIZE_DEVIATION # rev:cf_t2 TABLE_FACTORY::BLOCK_RESTART_INTERVAL # rev:cf_t2 TABLE_FACTORY::INDEX_BLOCK_RESTART_INTERVAL # +rev:cf_t2 TABLE_FACTORY::METADATA_BLOCK_SIZE # +rev:cf_t2 TABLE_FACTORY::PARTITION_FILTERS # +rev:cf_t2 TABLE_FACTORY::USE_DELTA_ENCODING # rev:cf_t2 TABLE_FACTORY::FILTER_POLICY # rev:cf_t2 TABLE_FACTORY::WHOLE_KEY_FILTERING # +rev:cf_t2 TABLE_FACTORY::VERIFY_COMPRESSION # +rev:cf_t2 TABLE_FACTORY::READ_AMP_BYTES_PER_BIT # rev:cf_t2 TABLE_FACTORY::FORMAT_VERSION # +rev:cf_t2 TABLE_FACTORY::ENABLE_INDEX_COMPRESSION # DROP TABLE t1; DROP TABLE t2; DROP TABLE t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/skip_validate_tmp_table.result b/storage/rocksdb/mysql-test/rocksdb/r/skip_validate_tmp_table.result index 7642dcda43f..92906f22b1e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/skip_validate_tmp_table.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/skip_validate_tmp_table.result @@ -1,4 +1,20 @@ -CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB; +create table t1 (pk int primary key) engine=rocksdb; +show tables; +Tables_in_test +#mysql50#t1#sql-test +t1 +call mtr.add_suppression('Invalid .old.. table or database name .t1#sql-test.'); set session debug_dbug="+d,gen_sql_table_name"; rename table t1 to t2; set session debug_dbug= "-d,gen_sql_table_name"; +show tables; +Tables_in_test +#mysql50#t1#sql-test +t2 +show tables; +Tables_in_test +create table t2 (pk int primary key) engine=rocksdb; +show tables; +Tables_in_test +t2 +drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/transaction.result b/storage/rocksdb/mysql-test/rocksdb/r/transaction.result index fe13c1633a8..006baaf1339 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/transaction.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/transaction.result @@ -934,3 +934,27 @@ value 3 rollback; drop table t1; +# +# #802: MyRocks: Statement rollback doesnt work correctly for nested statements +# +create table t1 (a varchar(100)) engine=rocksdb; +create table t2(a int) engine=rocksdb; +insert into t2 values (1), (2); +create table t3(a varchar(100)) engine=rocksdb; +create function func() returns varchar(100) deterministic +begin +insert into t3 values ('func-called'); +set @a= (select a from t2); +return 'func-returned'; +end;// +begin; +insert into t1 values (func()); +ERROR 21000: Subquery returns more than 1 row +select * from t1; +a +# The following must not produce 'func-called': +select * from t3; +a +rollback; +drop function func; +drop table t1,t2,t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_read_filtering.result b/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_read_filtering.result index 1df6e838bcd..c66b17926b7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_read_filtering.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_read_filtering.result @@ -51,18 +51,30 @@ INSERT INTO t1 values (3); INSERT INTO t1 values (5); INSERT INTO t1 values (7); set global rocksdb_debug_ttl_rec_ts = 0; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; SELECT * FROM t1; a +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; +variable_value-@a +4 set global rocksdb_enable_ttl_read_filtering=0; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; SELECT * FROM t1; a 1 3 5 7 +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; +variable_value-@a +0 set global rocksdb_enable_ttl_read_filtering=1; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; SELECT * FROM t1; a +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; +variable_value-@a +4 DROP TABLE t1; CREATE TABLE t1 ( a int, @@ -191,20 +203,36 @@ a connection con2; set global rocksdb_force_flush_memtable_now=1; set global rocksdb_compact_cf='default'; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; SELECT * FROM t1; a +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; +variable_value-@a +1 # Switching to connection 1 connection con1; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; SELECT * FROM t1; a 1 +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; +variable_value-@a +0 UPDATE t1 set a = a + 1; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; SELECT * FROM t1; a 2 +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; +variable_value-@a +0 COMMIT; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; SELECT * FROM t1; a +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; +variable_value-@a +1 DROP TABLE t1; disconnect con1; disconnect con2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result index c9fa716dffc..a7e086fde66 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result @@ -1,4 +1,3 @@ -drop table if exists t1,t2; # # A basic test whether endspace-aware variable length encoding # works when in PK @@ -756,3 +755,16 @@ email_i 1 drop table t; set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct; set session rocksdb_verify_row_debug_checksums = @save_rocksdb_verify_row_debug_checksums; +drop table if exists t; +Warnings: +Note 1051 Unknown table 'test.t' +create table t (h varchar(31) character set utf8 collate utf8_bin not null, i varchar(19) collate latin1_bin not null, primary key(i), key(h)) engine=rocksdb; +insert into t(i,h) values('a','b'); +check table t; +Table Op Msg_type Msg_text +test.t check status OK +alter table t modify h varchar(31) character set cp1257 collate cp1257_bin not null; +check table t; +Table Op Msg_type Msg_text +test.t check status OK +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result b/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result index 3291826b290..e8456457cdd 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result @@ -4,23 +4,15 @@ call mtr.add_suppression("Aborting"); select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB'; plugin_name plugin_type ROCKSDB STORAGE ENGINE -# Check that ROCKSDB plugin is not loaded: -select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB'; -plugin_name plugin_type -# Check that MyRocks has printed an error message into server error log: -FOUND 1 /enable both use_direct_reads/ in mysqld.1.err -# Now, restart the server back with regular settings -select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB'; -plugin_name plugin_type -ROCKSDB STORAGE ENGINE -# -# Now, repeat the same with another set of invalid arguments -# -# Check that ROCKSDB plugin is not loaded: -select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB'; -plugin_name plugin_type -FOUND 1 /enable both use_direct_io_for_flush_and_compaction/ in mysqld.1.err -# Now, restart the server back with regular settings -select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB'; -plugin_name plugin_type -ROCKSDB STORAGE ENGINE +Checking direct reads +FOUND 1 /enable both use_direct_reads/ in use_direct_reads_writes.err +Checking direct writes +FOUND 1 /enable both use_direct_io_for_flush_and_compaction/ in use_direct_reads_writes.err +Checking rocksdb_flush_log_at_trx_commit +FOUND 1 /rocksdb_flush_log_at_trx_commit needs to be/ in use_direct_reads_writes.err +Validate flush_log settings when direct writes is enabled +set global rocksdb_flush_log_at_trx_commit=0; +set global rocksdb_flush_log_at_trx_commit=1; +ERROR 42000: Variable 'rocksdb_flush_log_at_trx_commit' can't be set to the value of '1' +set global rocksdb_flush_log_at_trx_commit=2; +ERROR 42000: Variable 'rocksdb_flush_log_at_trx_commit' can't be set to the value of '2' diff --git a/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result index ee23446eec0..d0a9b034927 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result @@ -3,6 +3,7 @@ SET GLOBAL rocksdb_write_ignore_missing_column_families=true; create table aaa (id int primary key, i int) engine rocksdb; set @save_rocksdb_flush_log_at_trx_commit=@@global.rocksdb_flush_log_at_trx_commit; SET GLOBAL rocksdb_flush_log_at_trx_commit=1; +insert aaa(id, i) values(0,1); select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; insert aaa(id, i) values(1,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; @@ -16,11 +17,11 @@ insert aaa(id, i) values(3,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; variable_value-@a 3 +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; SET GLOBAL rocksdb_flush_log_at_trx_commit=0; -select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; insert aaa(id, i) values(4,1); -SET GLOBAL rocksdb_flush_log_at_trx_commit=2; select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +SET GLOBAL rocksdb_flush_log_at_trx_commit=2; insert aaa(id, i) values(5,1); truncate table aaa; drop table aaa; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test index 78ddbe60da5..5eac8595f7b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test @@ -65,8 +65,16 @@ ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; # disable duplicate index warning --disable_warnings # now do same index using copy algorithm +# hitting max row locks (1M) +set @tmp= @@rocksdb_max_row_locks; +set session rocksdb_max_row_locks=1000; +--error ER_RDB_STATUS_GENERAL ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY; +set session rocksdb_bulk_load=1; +ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY; +set session rocksdb_bulk_load=0; --enable_warnings +set session rocksdb_max_row_locks=@tmp; # checksum testing SELECT COUNT(*) as c FROM diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test index 2a064dc3b00..5f2a37f235a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test @@ -96,3 +96,31 @@ DELETE FROM t1 WHERE a = 35 AND b = 'foo'; --sorted_result SELECT * FROM t1; DROP TABLE t1; + +--echo # +--echo # Issue #834/MDEV-15304 ALTER TABLE table_with_hidden_pk causes Can't +--echo # write; duplicate key in table error and/or crash +--echo # +CREATE TABLE t1 (a INT, KEY(a)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1),(1+1); +create table t2 (a int); +insert into t2 values (10),(20),(30); + +BEGIN; +select * from t2; + +connect (con1,localhost,root,,); +connection con1; +alter table t1 force; + +connection default; +select * from t1; + +connection con1; +insert into t1 values (100); +select * from t1; + +disconnect con1; +connection default; +rollback; +drop table t1,t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test index 1f3ef49e534..18ccf2e39f6 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test @@ -135,3 +135,15 @@ set global rocksdb_force_flush_memtable_now = true; select * from t1; DROP TABLE t1; + +## https://github.com/facebook/mysql-5.6/issues/736 +create table t1 (i int auto_increment, key(i)) engine=rocksdb; +insert into t1 values(); +insert into t1 values(); +insert into t1 values(); + +show create table t1; +--source include/restart_mysqld.inc +show create table t1; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_to_start_after_corruption.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_to_start_after_corruption.test new file mode 100644 index 00000000000..67b2d5f96d7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_to_start_after_corruption.test @@ -0,0 +1,75 @@ +--source include/have_rocksdb.inc +--source include/not_valgrind.inc + +--echo # +--echo # Test how MyRocks behaves when RocksDB reports corrupted data. +--echo # + +--source include/have_debug.inc + +# use custom error log to assert on error message in search_pattern_in_file.inc +--let LOG=$MYSQLTEST_VARDIR/tmp/allow_to_start_after_corruption_debug.err +--let SEARCH_FILE=$LOG + +# restart server to change error log and ignore corruptopn on startup +--let $_mysqld_option=--log-error=$LOG --rocksdb_allow_to_start_after_corruption=1 +--source include/restart_mysqld_with_option.inc + +--echo # +--echo # Test server crashes on corrupted data and restarts +--echo # +create table t1 ( + pk int not null primary key, + col1 varchar(10) +) engine=rocksdb; + +insert into t1 values (1,1),(2,2),(3,3); + +select * from t1 where pk=1; +set session debug_dbug= "+d,rocksdb_return_status_corrupted"; +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--error 2013 +select * from t1 where pk=1; +--source include/wait_until_disconnected.inc +--let SEARCH_PATTERN=data corruption detected +--source include/search_pattern_in_file.inc +--remove_file $LOG + +--echo # +--echo # The same for scan queries +--echo # + +--source include/start_mysqld_with_option.inc +select * from t1; +set session debug_dbug= "+d,rocksdb_return_status_corrupted"; +--exec echo "wait" > $_expect_file_name +--error 2013 +select * from t1; +--source include/wait_until_disconnected.inc +--let SEARCH_PATTERN=data corruption detected +--source include/search_pattern_in_file.inc +--remove_file $LOG + +--echo # +--echo # Test restart failure. The server is shutdown at this point. +--echo # + +# remove flag to ignore corruption +--let $_mysqld_option=--log-error=$LOG +--error 0 +--exec $MYSQLD_CMD --plugin_load=$HA_ROCKSDB_SO $_mysqld_option +--let SEARCH_PATTERN=The server will exit normally and stop restart attempts +--source include/search_pattern_in_file.inc +--remove_file $LOG + +--echo # +--echo # Remove corruption file and restart cleanly +--echo # + +--exec rm $MYSQLTEST_VARDIR/mysqld.$_server_id/data/#rocksdb/ROCKSDB_CORRUPTED +--source include/start_mysqld_with_option.inc + +drop table t1; + +# Restart mysqld with default options +--source include/restart_mysqld.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test b/storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test index 10722194121..b24398b1fe2 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc # # ANALYZE TABLE statements @@ -29,3 +30,28 @@ INSERT INTO t1 VALUES (5,8),(6,10),(7,11),(8,12); ANALYZE TABLE t1; DROP TABLE t1; +--echo # +--echo # MDEV-12465: Server crashes in my_scan_weight_utf8_bin upon +--echo # collecting stats for RocksDB table +--echo # + +CREATE TABLE t1 ( + pk INT, + f1 CHAR(255), + f2 TEXT, + f3 VARCHAR(255), + f4 TEXT, + PRIMARY KEY (pk), + KEY (f4(255)) +) ENGINE=RocksDB + CHARSET utf8 + COLLATE utf8_bin + PARTITION BY KEY (pk) PARTITIONS 2; +INSERT INTO t1 VALUES +(1,'foo','bar','foo','bar'), (2,'bar','foo','bar','foo'); + +ANALYZE TABLE t1 PERSISTENT FOR ALL; + +drop table t1; + +--echo # End of 10.2 tests diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_crash_safe.cnf b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_crash_safe.cnf new file mode 100644 index 00000000000..a43c4617b96 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_crash_safe.cnf @@ -0,0 +1,8 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=row +[mysqld.2] +binlog_format=row +slave_parallel_workers=1 +#rpl_skip_tx_api=ON diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_crash_safe.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_crash_safe.test new file mode 100644 index 00000000000..e61ba720aaf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_crash_safe.test @@ -0,0 +1,9 @@ +--source include/have_rocksdb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +create table t (i int primary key auto_increment) engine=rocksdb; + +--source include/autoinc_crash_safe.inc + +--source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_crash_safe_partition.cnf b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_crash_safe_partition.cnf new file mode 100644 index 00000000000..0c0b614039e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_crash_safe_partition.cnf @@ -0,0 +1,8 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=row +[mysqld.2] +binlog_format=row +#slave_parallel_workers=1 +#rpl_skip_tx_api=ON diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_crash_safe_partition.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_crash_safe_partition.test new file mode 100644 index 00000000000..56cf93db9d9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_crash_safe_partition.test @@ -0,0 +1,10 @@ +--source include/have_rocksdb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc +--source include/have_partition.inc + +create table t (i int primary key auto_increment) engine=rocksdb partition by key (i) partitions 3; + +--source include/autoinc_crash_safe.inc + +--source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_debug-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_debug-master.opt new file mode 100644 index 00000000000..83ed8522e72 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_debug-master.opt @@ -0,0 +1 @@ +--binlog-format=row diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_debug.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_debug.test new file mode 100644 index 00000000000..abcae8d98a5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_debug.test @@ -0,0 +1,118 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc +--source include/have_log_bin.inc + +--echo # +--echo # Testing upgrading from server without merges for auto_increment +--echo # to new server with such support. +--echo # + +set debug_dbug='+d,myrocks_autoinc_upgrade'; +create table t (i int primary key auto_increment); +insert into t values (); +insert into t values (); +insert into t values (); +select * from t; + +delete from t where i > 1; +select * from t; + +select table_name, index_name, auto_increment + from information_schema.rocksdb_ddl where table_name = 't'; + +set debug_dbug='-d,myrocks_autoinc_upgrade'; + +--source include/restart_mysqld.inc + +insert into t values (); +insert into t values (); +insert into t values (); +select * from t; + +select table_name, index_name, auto_increment + from information_schema.rocksdb_ddl where table_name = 't'; + +delete from t where i > 1; + +--source include/restart_mysqld.inc + +insert into t values (); +insert into t values (); +insert into t values (); +select * from t; + +drop table t; + +--echo # +--echo # Testing crash safety of transactions. +--echo # +create table t (i int primary key auto_increment); +insert into t values (); +insert into t values (); +insert into t values (); + +--echo # Before anything +begin; +insert into t values (); +insert into t values (); +set debug_dbug="+d,crash_commit_before"; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--error 2013 +commit; +--source include/wait_until_disconnected.inc +--enable_reconnect +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--source include/wait_until_connected_again.inc +--disable_reconnect +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +select max(i) from t; + +--echo # After engine prepare +begin; +insert into t values (); +insert into t values (); +set debug_dbug="+d,crash_commit_after_prepare"; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--error 2013 +commit; +--source include/wait_until_disconnected.inc +--enable_reconnect +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--source include/wait_until_connected_again.inc +--disable_reconnect +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +select max(i) from t; + +--echo # After binlog +begin; +insert into t values (); +insert into t values (); +set debug_dbug="+d,crash_commit_after_log"; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--error 2013 +commit; +--source include/wait_until_disconnected.inc +--enable_reconnect +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--source include/wait_until_connected_again.inc +--disable_reconnect +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +select max(i) from t; + +--echo # After everything +begin; +insert into t values (); +insert into t values (); +set debug_dbug="+d,crash_commit_after"; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--error 2013 +commit; +--source include/wait_until_disconnected.inc +--enable_reconnect +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--source include/wait_until_connected_again.inc +--disable_reconnect +select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't'; +select max(i) from t; + +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test index 2fe0a2e3c08..9d7f0365d07 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test @@ -64,4 +64,55 @@ SELECT LAST_INSERT_ID(); SELECT a FROM t1 ORDER BY a; DROP TABLE t1; +--echo #--------------------------- +--echo # test large autoincrement values +--echo #--------------------------- +SET auto_increment_increment = 1; +SET auto_increment_offset = 1; +CREATE TABLE t1 (a BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (18446744073709551613, 'a'); +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES (NULL, 'b'); +SHOW CREATE TABLE t1; +--error ER_AUTOINC_READ_FAILED +INSERT INTO t1 VALUES (NULL, 'c'); +SELECT * FROM t1; +DROP TABLE t1; + +SET auto_increment_increment = 300; +CREATE TABLE t1 (a BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (18446744073709551613, 'a'); +SHOW CREATE TABLE t1; +--error ER_AUTOINC_READ_FAILED +INSERT INTO t1 VALUES (NULL, 'b'); +SHOW CREATE TABLE t1; +--error ER_AUTOINC_READ_FAILED +INSERT INTO t1 VALUES (NULL, 'c'); +SELECT * FROM t1; +DROP TABLE t1; + +SET auto_increment_offset = 200; +CREATE TABLE t1 (a BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (18446744073709551613, 'a'); +SHOW CREATE TABLE t1; +--error ER_AUTOINC_READ_FAILED +INSERT INTO t1 VALUES (NULL, 'b'); +SHOW CREATE TABLE t1; +--error ER_AUTOINC_READ_FAILED +INSERT INTO t1 VALUES (NULL, 'c'); +SELECT * FROM t1; +DROP TABLE t1; + +--echo #---------------------------------- +--echo # Issue #792 Crash in autoincrement +--echo #---------------------------------- + +CREATE TABLE t1(C1 DOUBLE AUTO_INCREMENT KEY,C2 CHAR) ENGINE=ROCKSDB; +INSERT INTO t1 VALUES(2177,0); +DROP TABLE t1; + +CREATE TABLE t0(c0 BLOB) ENGINE=ROCKSDB; +INSERT INTO t0 VALUES(0); +ALTER TABLE t0 AUTO_INCREMENT=0; +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test b/storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test deleted file mode 100644 index 375571f705d..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test +++ /dev/null @@ -1,3 +0,0 @@ ---source include/have_rocksdb.inc - ---echo # The test checks AUTO_INCREMENT capabilities that are not supported by RocksDB-SE. diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter5-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter5-master.opt new file mode 100644 index 00000000000..7d63dc74bb8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter5-master.opt @@ -0,0 +1 @@ +--rocksdb_override_cf_options=rev:bf5_1={prefix_extractor=capped:4;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;}}; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter5.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter5.test new file mode 100644 index 00000000000..00968aebb62 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter5.test @@ -0,0 +1,61 @@ + +--echo # +--echo # Issue #809: Wrong query result with bloom filters +--echo # + +create table t1 ( + id1 bigint not null, + id2 bigint not null, + id3 varchar(100) not null, + id4 int not null, + id5 int not null, + value bigint, + value2 varchar(100), + primary key (id1, id2, id3, id4) COMMENT 'rev:bf5_1' +) engine=ROCKSDB; + + +create table t2(a int); +insert into t2 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t3(seq int); +insert into t3 +select + 1+ A.a + B.a* 10 + C.a * 100 + D.a * 1000 +from t2 A, t2 B, t2 C, t2 D; + +insert t1 +select + (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" +from t3; + +set global rocksdb_force_flush_memtable_now=1; + +--echo # Full table scan +explain +select * from t1 limit 10; +select * from t1 limit 10; + +--echo # An index scan starting from the end of the table: +explain +select * from t1 order by id1 desc,id2 desc, id3 desc, id4 desc limit 1; +select * from t1 order by id1 desc,id2 desc, id3 desc, id4 desc limit 1; + +# A testcase for an assertion that the fix is removing +# The only requirement for the used column family is that it is reverse-ordered +create table t4 ( + pk int unsigned not null primary key, + kp1 int unsigned not null, + kp2 int unsigned not null, + col1 int unsigned, + key(kp1, kp2) comment 'rev:bf5_2' +) engine=rocksdb; + +insert into t4 values (1, 0xFFFF, 0xFFF, 12345); + +--echo # This must not fail an assert: +select * from t4 force index(kp1) where kp1=0xFFFFFFFF and kp2<=0xFFFFFFFF order by kp2 desc; + +drop table t1,t2,t3,t4; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc index 5c122d6bd19..cf8b26847f0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc @@ -5,6 +5,7 @@ insert t1 select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" from seq_1_to_10000; insert t2 select * from t1; +set global rocksdb_force_flush_memtable_now=1; # BF conditions (prefix short(4B)|medium(20B)|long(240B)) #0 no eq condition (o, x, x) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test index 6c6c75dd37e..0db5e6d9cc4 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test @@ -7,4 +7,4 @@ --let pk_cf=cf1 --let data_order_desc=0 ---source bulk_load.inc +--source ../include/bulk_load.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_drop_table.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_drop_table.test new file mode 100644 index 00000000000..18e40fbf4ab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_drop_table.test @@ -0,0 +1,19 @@ +--source include/have_rocksdb.inc + +CREATE TABLE t1 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB; + +SET rocksdb_bulk_load_allow_unsorted=1; +SET rocksdb_bulk_load=1; + +INSERT INTO t1 VALUES (1); + +--connect (con1,localhost,root,,) +DROP TABLE t1; + +--connection default +--disconnect con1 + +# This would have crashed the server prior to the fix +SET rocksdb_bulk_load=0; +--error ER_NO_SUCH_TABLE +SELECT * FROM t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_errors.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_errors.test index 4a4c42d1fcd..1e349d0ff18 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_errors.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_errors.test @@ -1,7 +1,13 @@ --source include/have_rocksdb.inc +--source include/count_sessions.inc + +--let LOG1=$MYSQLTEST_VARDIR/tmp/rocksdb.bulk_load_errors.1.err +--let $_mysqld_option=--log-error=$LOG1 +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--source include/restart_mysqld_with_option.inc ### Bulk load ### -CREATE TABLE t1(pk INT, PRIMARY KEY(pk)); +CREATE TABLE t1(pk INT, PRIMARY KEY(pk)) ENGINE=ROCKSDB; # Make sure we get an error with out of order keys during bulk load SET rocksdb_bulk_load=1; @@ -21,19 +27,49 @@ INSERT INTO t1 VALUES(2); INSERT INTO t1 VALUES(20); INSERT INTO t1 VALUES(21); ---echo # ---echo # In MyRocks, the following statement will intentionally crash the server. ---echo # In MariaDB, it will cause an error --error ER_OVERLAPPING_KEYS SET rocksdb_bulk_load=0; ---echo # ---echo # Despite the error, bulk load operation is over so the variable value ---echo # will be 0: -select @@rocksdb_bulk_load; - +SHOW VARIABLES LIKE 'rocksdb_bulk_load'; call mtr.add_suppression('finalizing last SST file while setting bulk loading variable'); +SELECT * FROM t1; + +--let SEARCH_FILE=$LOG1 +--let SEARCH_PATTERN=RocksDB: Error [0-9]+ finalizing last SST file while setting bulk loading variable +--source include/search_pattern_in_file.inc + +--let LOG2=$MYSQLTEST_VARDIR/tmp/rocksdb.bulk_load_errors.2.err +--let $_mysqld_option=--log-error=$LOG2 +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--source include/restart_mysqld_with_option.inc +--remove_file $LOG1 + + +# Make sure we get an error in log when we disconnect and do not assert the server +--connect (con1,localhost,root,,) +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES(1); +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(20); +INSERT INTO t1 VALUES(21); +--connection default +--disconnect con1 + +SELECT * FROM t1; + +--source include/wait_until_count_sessions.inc + +--let SEARCH_FILE=$LOG2 +--let SEARCH_PATTERN=RocksDB: Error [0-9]+ finalizing last SST file while disconnecting +--source include/search_pattern_in_file.inc + +--let LOG3=$MYSQLTEST_VARDIR/tmp/rocksdb.bulk_load_errors.3.err +--let $_mysqld_option=--log-error=$LOG3 +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--source include/restart_mysqld_with_option.inc +--remove_file $LOG2 + TRUNCATE TABLE t1; ### Bulk load with unsorted PKs ### @@ -60,3 +96,46 @@ SELECT * FROM t1; SET rocksdb_bulk_load_allow_unsorted=DEFAULT; DROP TABLE t1; + +# This would trigger a debug assertion that is just an error in release builds +CREATE TABLE t1(c1 INT KEY) ENGINE=ROCKSDB; +SET rocksdb_bulk_load=1; +--error ER_KEYS_OUT_OF_ORDER +INSERT INTO t1 VALUES (),(),(); +SET rocksdb_bulk_load=0; +DROP TABLE t1; + +# Crash when table open cache closes handler with bulk load operation not finalized +SET @orig_table_open_cache=@@global.table_open_cache; +CREATE TABLE t1(a INT AUTO_INCREMENT, b INT, PRIMARY KEY (a)) ENGINE=ROCKSDB DEFAULT CHARSET=latin1; +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES(13, 0); +INSERT INTO t1 VALUES(2, 'test 2'); +INSERT INTO t1 VALUES(@id, @arg04); +SET @@global.table_open_cache=FALSE; +INSERT INTO t1 VALUES(51479+0.333333333,1); +DROP TABLE t1; +SET @@global.table_open_cache=@orig_table_open_cache; + +--let SEARCH_FILE=$LOG3 +--let SEARCH_PATTERN=RocksDB: Error [0-9]+ finalizing bulk load while closing handler +--source include/search_pattern_in_file.inc + +--source include/restart_mysqld.inc + +--remove_file $LOG3 + +# Switch between tables, but also introduce duplicate key errors +CREATE TABLE t1 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB; +CREATE TABLE t2 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB; +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES (1), (2); +INSERT INTO t2 VALUES (1), (2); +INSERT INTO t1 VALUES (1); +--error ER_OVERLAPPING_KEYS +INSERT INTO t2 VALUES (3); +SET rocksdb_bulk_load=0; +DROP TABLE t1; +DROP TABLE t2; + +--source include/wait_until_count_sessions.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test index 7c4d7aef0e5..67d68ac7a2d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test @@ -6,4 +6,4 @@ --let pk_cf=rev:cf1 --let data_order_desc=0 ---source bulk_load.inc +--source ../include/bulk_load.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test index a31e86753f3..7110fe5f1d7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test @@ -6,4 +6,4 @@ --let pk_cf=rev:cf1 --let data_order_desc=1 ---source bulk_load.inc +--source ../include/bulk_load.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test index f36990ed567..6c6e51a2a51 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test @@ -6,4 +6,4 @@ --let pk_cf=cf1 --let data_order_desc=1 ---source bulk_load.inc +--source ../include/bulk_load.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_unsorted.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_unsorted.test index 78bb9312ca5..2abeae343c9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_unsorted.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_unsorted.test @@ -3,136 +3,4 @@ --let pk_cf=cf1 ---disable_warnings -DROP TABLE IF EXISTS t1; ---enable_warnings - -SET rocksdb_bulk_load_size=3; -SET rocksdb_bulk_load_allow_unsorted=1; - -### Test individual INSERTs ### - -# A table with only a PK won't have rows until the bulk load is finished -eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf"); -SET rocksdb_bulk_load=1; ---disable_query_log -let $sign = 1; -let $max = 5; -let $i = 1; -while ($i <= $max) { - let $a = 1 + $sign * $i; - let $b = 1 - $sign * $i; - let $sign = -$sign; - let $insert = INSERT INTO t1 VALUES ($a, $b); - eval $insert; - inc $i; -} ---enable_query_log -SELECT * FROM t1; -SET rocksdb_bulk_load=0; -SELECT * FROM t1; -DROP TABLE t1; - -# A table with a PK and a SK shows rows immediately -eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf", KEY(b)); -SET rocksdb_bulk_load=1; ---disable_query_log -let $sign = 1; -let $max = 5; -let $i = 1; -while ($i <= $max) { - let $a = 1 + $sign * $i; - let $b = 1 - $sign * $i; - let $sign = -$sign; - let $insert = INSERT INTO t1 VALUES ($a, $b); - eval $insert; - inc $i; -} ---enable_query_log - -SELECT * FROM t1; -SET rocksdb_bulk_load=0; -DROP TABLE t1; - -# Inserting into another table finishes bulk load to the previous table -eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf"); -eval CREATE TABLE t2(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf"); - -SET rocksdb_bulk_load=1; -INSERT INTO t1 VALUES (1,1); -INSERT INTO t2 VALUES (1,1); -SELECT * FROM t1; -INSERT INTO t1 VALUES (2,2); -SELECT * FROM t2; -SELECT * FROM t1; -SET rocksdb_bulk_load=0; -SELECT * FROM t1; -DROP TABLE t1, t2; - -### Test bulk load from a file ### -eval CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf"); -eval CREATE TABLE t2(a INT, b INT, PRIMARY KEY(b) COMMENT "$pk_cf"); -eval CREATE TABLE t3(a INT, b INT, PRIMARY KEY(a) COMMENT "$pk_cf") - PARTITION BY KEY() PARTITIONS 4; - ---let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")` -# Create a text file with data to import into the table. -# PK and SK are not in any order ---let ROCKSDB_INFILE = $file -perl; -my $fn = $ENV{'ROCKSDB_INFILE'}; -open(my $fh, '>', $fn) || die "perl open($fn): $!"; -binmode $fh; -my $max = 5000000; -my $sign = 1; -for (my $ii = 0; $ii < $max; $ii++) -{ - my $a = 1 + $sign * $ii; - my $b = 1 - $sign * $ii; - print $fh "$a\t$b\n"; -} -close($fh); -EOF ---file_exists $file - -# Make sure a snapshot held by another user doesn't block the bulk load -connect (other,localhost,root,,); -set session transaction isolation level repeatable read; -select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; -start transaction with consistent snapshot; -select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; - -connection default; -set rocksdb_bulk_load=1; -set rocksdb_bulk_load_size=100000; ---disable_query_log ---echo LOAD DATA INFILE INTO TABLE t1; -eval LOAD DATA INFILE '$file' INTO TABLE t1; ---echo LOAD DATA INFILE INTO TABLE t2; -eval LOAD DATA INFILE '$file' INTO TABLE t2; ---echo LOAD DATA INFILE INTO TABLE t3; -eval LOAD DATA INFILE '$file' INTO TABLE t3; ---enable_query_log -set rocksdb_bulk_load=0; - ---remove_file $file - -# Make sure row count index stats are correct ---replace_column 6 # 7 # 8 # 9 # -SHOW TABLE STATUS WHERE name LIKE 't%'; - -ANALYZE TABLE t1, t2, t3; - ---replace_column 6 # 7 # 8 # 9 # -SHOW TABLE STATUS WHERE name LIKE 't%'; - -# Make sure all the data is there. -select count(a) from t1; -select count(b) from t1; -select count(a) from t2; -select count(b) from t2; -select count(a) from t3; -select count(b) from t3; - -DROP TABLE t1, t2, t3; -SET rocksdb_bulk_load_allow_unsorted=0; +--source ../include/bulk_load_unsorted.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_unsorted_rev.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_unsorted_rev.test new file mode 100644 index 00000000000..de9a5c26424 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_unsorted_rev.test @@ -0,0 +1,5 @@ +--source include/have_rocksdb.inc + +--let pk_cf=rev:cf1 + +--source ../include/bulk_load_unsorted.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test b/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test index 689753faf8d..14a82d7e462 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test @@ -2,6 +2,48 @@ --source include/restart_mysqld.inc +# Test memtable cardinality statistics +CREATE TABLE t0 (id int PRIMARY KEY, a int, INDEX ix_a (a)) engine=rocksdb; + +# populate the table with 10 reconds where cardinality of id is N and a is N/2. +insert into t0 values (0, 0),(1, 1),(2, 2),(3, 3),(4, 4), +(5, 4),(6, 4),(7, 4),(8, 4),(9, 4); + +# Assert no cardinality data exists before ANALYZE TABLE is done +SELECT cardinality FROM information_schema.statistics where table_name="t0" and +column_name="id"; +SELECT cardinality FROM information_schema.statistics where table_name="t0" and +column_name="a"; + +--disable_result_log +ANALYZE TABLE t0; +--enable_result_log + +SELECT table_rows into @N FROM information_schema.tables +WHERE table_name = "t0"; +SELECT FLOOR(@N/cardinality) FROM +information_schema.statistics where table_name="t0" and column_name="id"; +SELECT FLOOR(@N/cardinality) FROM +information_schema.statistics where table_name="t0" and column_name="a"; + +# Flush the table and re-run the test as statistics is calculated a bit +# differently for memtable and SST files +SET GLOBAL rocksdb_force_flush_memtable_now = 1; +--disable_result_log +ANALYZE TABLE t0; +--enable_result_log + +SELECT table_rows into @N FROM information_schema.tables +WHERE table_name = "t0"; +SELECT FLOOR(@N/cardinality) FROM +information_schema.statistics where table_name="t0" and column_name="id"; +SELECT FLOOR(@N/cardinality) FROM +information_schema.statistics where table_name="t0" and column_name="a"; + +drop table t0; + +# Test big table on SST + --disable_warnings DROP TABLE IF EXISTS t1,t10,t11; --enable_warnings diff --git a/storage/rocksdb/mysql-test/rocksdb/t/check_ignore_unknown_options.test b/storage/rocksdb/mysql-test/rocksdb/t/check_ignore_unknown_options.test new file mode 100644 index 00000000000..9e7c816ae6e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/check_ignore_unknown_options.test @@ -0,0 +1,26 @@ +# MariaDB: "xargs" is not present on windows builders. +# we could work around this but this is not a priority. +--source include/not_windows.inc + +--disable_warnings +let $MYSQLD_DATADIR= `select @@datadir`; +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +let $error_log= $MYSQLTEST_VARDIR/log/my_restart.err; +select variable_name, variable_value from information_schema.global_variables where variable_name="rocksdb_ignore_unknown_options"; + +--exec find $MYSQLD_DATADIR/#rocksdb/OPTIONS* | sort -n | tail -1 | xargs -0 -I {} -t sh -c "echo hello=world>>{}" + +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--shutdown_server 10 + +--error 1 +--exec $MYSQLD_CMD --plugin_load=$HA_ROCKSDB_SO --rocksdb_ignore_unknown_options=0 --loose-console --log-error=$error_log + +let SEARCH_FILE= $error_log; +let SEARCH_PATTERN= RocksDB: Compatibility check against existing database options failed; +--source include/search_pattern_in_file.inc +--enable_reconnect +--exec echo "restart" > $restart_file +--source include/wait_until_connected_again.inc +--exec find $MYSQLD_DATADIR/#rocksdb/OPTIONS* | sort -n | tail -1 | xargs -0 -I {} -t sh -c "sed -i'' -e '/hello=world/d' {}" +select variable_name, variable_value from information_schema.global_variables where variable_name="rocksdb_ignore_unknown_options"; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/deadlock_tracking.test b/storage/rocksdb/mysql-test/rocksdb/t/deadlock_tracking.test index d2abcb3b63b..9677d2dbbaa 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/deadlock_tracking.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/deadlock_tracking.test @@ -21,29 +21,29 @@ let $con3= `SELECT CONNECTION_ID()`; connection default; eval create table t (i int primary key) engine=$engine; insert into t values (1), (2), (3); ---replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/ +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ show engine rocksdb transaction status; echo Deadlock #1; --source include/simple_deadlock.inc connection default; ---replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/ +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ show engine rocksdb transaction status; echo Deadlock #2; --source include/simple_deadlock.inc connection default; ---replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/ +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ show engine rocksdb transaction status; set global rocksdb_max_latest_deadlocks = 10; echo Deadlock #3; --source include/simple_deadlock.inc connection default; ---replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/ +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ show engine rocksdb transaction status; set global rocksdb_max_latest_deadlocks = 1; ---replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/ +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ show engine rocksdb transaction status; connection con3; @@ -77,8 +77,10 @@ let $wait_condition = select count(*) = 1 from information_schema.rocksdb_trx where thread_id = $con2 and waiting_key != ""; --source include/wait_condition.inc +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_row_lock_deadlocks'; --error ER_LOCK_DEADLOCK select * from t where i=1 for update; +select case when variable_value-@a = 1 then 'true' else 'false' end as deadlocks from information_schema.global_status where variable_name='rocksdb_row_lock_deadlocks'; rollback; connection con2; @@ -91,7 +93,7 @@ rollback; connection default; set global rocksdb_max_latest_deadlocks = 5; ---replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/ +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ show engine rocksdb transaction status; echo Deadlock #5; @@ -133,7 +135,7 @@ connection con3; rollback; connection default; ---replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/ +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ show engine rocksdb transaction status; disconnect con1; @@ -143,11 +145,11 @@ disconnect con3; set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout; set global rocksdb_deadlock_detect = @prior_deadlock_detect; drop table t; ---replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/ /INDEX_ID: [0-9a-f]*/IDX_ID/ +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /INDEX_ID: [0-9a-f]*/IDX_ID/ show engine rocksdb transaction status; set global rocksdb_max_latest_deadlocks = 0; --echo # Clears deadlock buffer of any existent deadlocks. set global rocksdb_max_latest_deadlocks = @prior_max_latest_deadlocks; ---replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTIONID: [0-9]*/TXN_ID/ /INDEX_ID: [0-9a-f]*/IDX_ID/ +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /INDEX_ID: [0-9a-f]*/IDX_ID/ show engine rocksdb transaction status; --source include/wait_until_count_sessions.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 47005c1baff..3b9726986f0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -70,7 +70,6 @@ blind_delete_without_tx_api: MDEV-12286: rocksdb.blind_delete_without_tx_api tes unique_check: wrong error number autoinc_vars_thread: debug sync point wait timed out information_schema: MDEV-14372: unstable testcase -bloomfilter: MDEV-14562 ## ## Tests that fail for some other reason diff --git a/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test b/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test index 7dc3c207ecc..716f372067b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test @@ -7,6 +7,7 @@ --disable_warnings DROP TABLE IF EXISTS is_ddl_t1; DROP TABLE IF EXISTS is_ddl_t2; +DROP TABLE IF EXISTS is_ddl_t3; --enable_warnings CREATE TABLE is_ddl_t1 (i INT, j INT, k INT, l INT, @@ -16,9 +17,13 @@ CREATE TABLE is_ddl_t1 (i INT, j INT, k INT, l INT, CREATE TABLE is_ddl_t2 (x INT, y INT, z INT, PRIMARY KEY (z, y) COMMENT 'zy_cf', KEY (x)) ENGINE = ROCKSDB; +CREATE TABLE is_ddl_t3 (a INT, b INT, c INT, PRIMARY KEY (a)) ENGINE = ROCKSDB + COMMENT "ttl_duration=3600;"; + --sorted_result -SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%'; +SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF,TTL_DURATION,INDEX_FLAGS FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%'; # cleanup DROP TABLE is_ddl_t1; DROP TABLE is_ddl_t2; +DROP TABLE is_ddl_t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/i_s_deadlock.test b/storage/rocksdb/mysql-test/rocksdb/t/i_s_deadlock.test new file mode 100644 index 00000000000..21558899782 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/i_s_deadlock.test @@ -0,0 +1,158 @@ +--source include/have_rocksdb.inc + +set @prior_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_deadlock_detect = @@rocksdb_deadlock_detect; +set @prior_max_latest_deadlocks = @@rocksdb_max_latest_deadlocks; +set global rocksdb_deadlock_detect = on; +set global rocksdb_lock_wait_timeout = 10000; +--echo # Clears deadlock buffer of any prior deadlocks. +set global rocksdb_max_latest_deadlocks = 0; +set global rocksdb_max_latest_deadlocks = @prior_max_latest_deadlocks; + +# needed by simple_deadlock.inc +let $engine = rocksdb; + +--source include/count_sessions.inc +connect (con1,localhost,root,,); +let $con1= `SELECT CONNECTION_ID()`; + +connect (con2,localhost,root,,); +let $con2= `SELECT CONNECTION_ID()`; + +connect (con3,localhost,root,,); +let $con3= `SELECT CONNECTION_ID()`; + +connection default; +show create table information_schema.rocksdb_deadlock; + +create table t (i int primary key) engine=rocksdb; +insert into t values (1), (2), (3); +select * from information_schema.rocksdb_deadlock; + +echo Deadlock #1; +--source include/simple_deadlock.inc +connection default; +--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY +select * from information_schema.rocksdb_deadlock; + +echo Deadlock #2; +--source include/simple_deadlock.inc +connection default; +--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY +select * from information_schema.rocksdb_deadlock; +set global rocksdb_max_latest_deadlocks = 10; + +echo Deadlock #3; +--source include/simple_deadlock.inc +connection default; +--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY +select * from information_schema.rocksdb_deadlock; +set global rocksdb_max_latest_deadlocks = 1; +--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY +select * from information_schema.rocksdb_deadlock; + +connection con3; +set rocksdb_deadlock_detect_depth = 2; + +echo Deadlock #4; +connection con1; +begin; +select * from t where i=1 for update; + +connection con2; +begin; +select * from t where i=2 for update; + +connection con3; +begin; +select * from t where i=3 for update; + +connection con1; +send select * from t where i=2 for update; + +connection con2; +let $wait_condition = select count(*) = 1 from information_schema.rocksdb_trx +where thread_id = $con1 and waiting_key != ""; +--source include/wait_condition.inc + +send select * from t where i=3 for update; + +connection con3; +let $wait_condition = select count(*) = 1 from information_schema.rocksdb_trx +where thread_id = $con2 and waiting_key != ""; +--source include/wait_condition.inc + +--error ER_LOCK_DEADLOCK +select * from t where i=1 for update; +rollback; + +connection con2; +reap; +rollback; + +connection con1; +reap; +rollback; + +connection default; +set global rocksdb_max_latest_deadlocks = 5; +--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY +select * from information_schema.rocksdb_deadlock; + +echo Deadlock #5; +connection con1; +begin; +select * from t where i=1 for update; + +connection con2; +begin; +select * from t where i=2 for update; + +connection con3; +begin; +select * from t where i=3 lock in share mode; + +connection con1; +select * from t where i=100 for update; +select * from t where i=101 for update; +send select * from t where i=2 for update; + +connection con2; +let $wait_condition = select count(*) = 1 from information_schema.rocksdb_trx +where thread_id = $con1 and waiting_key != ""; +--source include/wait_condition.inc + +select * from t where i=3 lock in share mode; +select * from t where i=200 for update; +select * from t where i=201 for update; + +--error ER_LOCK_DEADLOCK +select * from t where i=1 lock in share mode; +rollback; + +connection con1; +reap; +rollback; + +connection con3; +rollback; + +connection default; +--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY +select * from information_schema.rocksdb_deadlock; + +disconnect con1; +disconnect con2; +disconnect con3; + +set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_deadlock_detect; +drop table t; +--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY 6 INDEX_NAME 7 TABLE_NAME +select * from information_schema.rocksdb_deadlock; +set global rocksdb_max_latest_deadlocks = 0; +--echo # Clears deadlock buffer of any existent deadlocks. +set global rocksdb_max_latest_deadlocks = @prior_max_latest_deadlocks; +--replace_column 1 DEADLOCK_ID 2 TRANSACTION_ID 4 WAITING_KEY +select * from information_schema.rocksdb_deadlock; +--source include/wait_until_count_sessions.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb.test index abf8d71911b..887b4dd6a65 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb.test @@ -70,14 +70,15 @@ while ($cnt) SELECT COUNT(*) FROM t1; +# flush the table first as statistics is calculated a bit differently for memtable and SST files +SET GLOBAL rocksdb_force_flush_memtable_now = 1; + -- disable_query_log -- disable_result_log ANALYZE TABLE t1; -- enable_result_log -- enable_query_log -SET GLOBAL rocksdb_force_flush_memtable_now = 1; - --replace_column 9 # EXPLAIN UPDATE t1 SET filler1='to be deleted' WHERE key1=100 AND key2=100; UPDATE t1 SET filler1='to be deleted' WHERE key1=100 and key2=100; @@ -95,8 +96,8 @@ while ($i <= 1000) { eval $insert; } --enable_query_log -analyze table t1; set global rocksdb_force_flush_memtable_now=1; +analyze table t1; --replace_column 9 # explain select * from t1 where key1 = 1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb2.test b/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb2.test index a4d26cf7739..2306558ff41 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb2.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb2.test @@ -5,7 +5,8 @@ # t/index_merge_innodb.test # -# Index merge tests +# Index merge tests (the test is called 'index_merge_rocksdb2' because +# 'index_merge_rocksdb' has already existed before copying 'index_merge_innodb') # # Last update: # 2006-08-07 ML test refactored (MySQL 5.1) @@ -61,6 +62,7 @@ INSERT INTO t1 SELECT id + 16, 7, 0 FROM t1; -- disable_query_log -- disable_result_log +set global rocksdb_force_flush_memtable_now=1; analyze table t1; -- enable_result_log -- enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test index 2ffc186dd8f..67dae5d6263 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test @@ -19,8 +19,8 @@ drop table t1; --let $max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1) --replace_result $max_index_id max_index_id -select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; -select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type<>'DDL_DROP_INDEX_ONGOING'; +select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type<>'DDL_DROP_INDEX_ONGOING'; SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0; select VALUE into @keysIn from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn'; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt index a0bf5759ec4..3b7d80662db 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt @@ -1,4 +1,5 @@ --rocksdb_write_disable_wal=1 +--rocksdb_flush_log_at_trx_commit=0 --rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k;level0_file_num_compaction_trigger=4;level0_slowdown_writes_trigger=256;level0_stop_writes_trigger=256;max_write_buffer_number=16;compression_per_level=kNoCompression;memtable=vector:1024 --rocksdb_override_cf_options=__system__={memtable=skip_list:16} --rocksdb_compaction_sequential_deletes=0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue255.test b/storage/rocksdb/mysql-test/rocksdb/t/issue255.test index 4f1927d366c..588b28fbee6 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/issue255.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue255.test @@ -8,6 +8,38 @@ SHOW TABLE STATUS LIKE 't1'; INSERT INTO t1 VALUES ('538647864786478647864'); --replace_column 3 # 6 # 7 # 8 # 9 # 10 # +SELECT * FROM t1; +SHOW TABLE STATUS LIKE 't1'; + +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (); +SELECT * FROM t1; +--replace_column 3 # 6 # 7 # 8 # 9 # 10 # +SHOW TABLE STATUS LIKE 't1'; + +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (); +SELECT * FROM t1; +--replace_column 3 # 6 # 7 # 8 # 9 # 10 # +SHOW TABLE STATUS LIKE 't1'; + +DROP TABLE t1; + +CREATE TABLE t1 (pk TINYINT NOT NULL PRIMARY KEY AUTO_INCREMENT); + +INSERT INTO t1 VALUES (5); +--replace_column 3 # 6 # 7 # 8 # 9 # 10 # +SHOW TABLE STATUS LIKE 't1'; + +INSERT INTO t1 VALUES (1000); +SELECT * FROM t1; +--replace_column 3 # 6 # 7 # 8 # 9 # 10 # +SHOW TABLE STATUS LIKE 't1'; + +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (); +SELECT * FROM t1; +--replace_column 3 # 6 # 7 # 8 # 9 # 10 # SHOW TABLE STATUS LIKE 't1'; --error ER_DUP_ENTRY diff --git a/storage/rocksdb/mysql-test/rocksdb/t/lock_wait_timeout_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/lock_wait_timeout_stats.test index f1777ea3e93..5288680c3bd 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/lock_wait_timeout_stats.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/lock_wait_timeout_stats.test @@ -16,16 +16,20 @@ set @@rocksdb_lock_wait_timeout=1; begin; --connection con1 +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts'; --error ER_LOCK_WAIT_TIMEOUT insert into t values(0); select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; +select case when variable_value-@a = 1 then 'true' else 'false' end as waits from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts'; --connection con2 +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts'; --error ER_LOCK_WAIT_TIMEOUT insert into t values(0); select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; +select case when variable_value-@a = 1 then 'true' else 'false' end as waits from information_schema.global_status where variable_name='rocksdb_row_lock_wait_timeouts'; --disconnect con1 --connection default diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mariadb_port_fixes.test b/storage/rocksdb/mysql-test/rocksdb/t/mariadb_port_fixes.test index 681e3d2d63f..7282ec237c2 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/mariadb_port_fixes.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/mariadb_port_fixes.test @@ -83,3 +83,21 @@ CREATE TABLE t1 (i INT) ENGINE=RocksDB; FLUSH TABLE t1 FOR EXPORT; DROP TABLE t1; +--echo # +--echo # MDEV-16154 Server crashes in in myrocks::ha_rocksdb::load_auto_incr_value_from_inde +--echo # +CREATE TABLE t1 (a INT) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1); +ALTER TABLE t1 AUTO_INCREMENT 10; + +DROP TABLE t1; + +--echo # +--echo # MDEV-16155: UPDATE on RocksDB table with unique constraint does not work +--echo # +CREATE TABLE t1 (a INT, b CHAR(8), UNIQUE INDEX(a)) ENGINE=RocksDB; +INSERT INTO t1 (a,b) VALUES (1,'foo'),(2,'bar'); +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/max_open_files.test b/storage/rocksdb/mysql-test/rocksdb/t/max_open_files.test new file mode 100644 index 00000000000..c7c5e7b2ef3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/max_open_files.test @@ -0,0 +1,53 @@ +--source include/have_rocksdb.inc + +# Basic Sysbench run fails with basic MyROCKS install due to lack of open files + +# test for over limit +CALL mtr.add_suppression("RocksDB: rocksdb_max_open_files should not be greater than the open_files_limit*"); + +--let $over_rocksdb_max_open_files=`SELECT @@global.open_files_limit + 100` +--let $under_rocksdb_max_open_files=`SELECT @@global.open_files_limit -1` +--let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/rocksdb.max_open_files.err +--let SEARCH_PATTERN=RocksDB: rocksdb_max_open_files should not be greater than the open_files_limit + +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR $over_rocksdb_max_open_files over_rocksdb_max_open_files +--let $_mysqld_option=--log-error=$SEARCH_FILE --rocksdb_max_open_files=$over_rocksdb_max_open_files +--source include/restart_mysqld_with_option.inc +--source include/search_pattern_in_file.inc + +SELECT FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files; + +# test for within limit +--let $_mysqld_option=--rocksdb_max_open_files=$under_rocksdb_max_open_files +--source include/restart_mysqld_with_option.inc + +SELECT @@global.open_files_limit - 1 = @@global.rocksdb_max_open_files; + +# test for minimal value +--let $_mysqld_option=--rocksdb_max_open_files=0 +--source include/restart_mysqld_with_option.inc + +SELECT @@global.rocksdb_max_open_files; + +# verify that we can still do work with no descriptor cache +CREATE TABLE t1(a INT) ENGINE=ROCKSDB; +INSERT INTO t1 VALUES(0),(1),(2),(3),(4); +SET GLOBAL rocksdb_force_flush_memtable_and_lzero_now=1; +DROP TABLE t1; + +# test for unlimited +--let $_mysqld_option=--rocksdb_max_open_files=-1 +--source include/restart_mysqld_with_option.inc + +SELECT @@global.rocksdb_max_open_files; + +# test for auto-tune +--let $_mysqld_option=--rocksdb_max_open_files=-2 +--source include/restart_mysqld_with_option.inc + +SELECT FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files; + +# cleanup +--let _$mysqld_option= +--source include/restart_mysqld.inc +--remove_file $SEARCH_FILE diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test index 9e904908330..4947ffb59b8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test @@ -32,7 +32,7 @@ BEGIN; insert into r1 values (5,5,5,5,5,5,5,5); update r1 set value1=value1+100 where id1=1 and id2=1 and id3='1'; ---exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb --order-by-primary-desc --rocksdb_bulk_load test +--exec ASAN_OPTIONS="detect_leaks=0" $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb --order-by-primary-desc --rocksdb_bulk_load test rollback; @@ -44,16 +44,16 @@ source include/search_pattern_in_file.inc; set @save_default_storage_engine=@@global.default_storage_engine; SET GLOBAL default_storage_engine=rocksdb; ---exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test +--exec ASAN_OPTIONS="detect_leaks=0" $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test source include/search_pattern_in_file.inc; # Sanity test mysqldump when the --innodb-stats-on-metadata is specified (no effect) --echo ==== mysqldump with --innodb-stats-on-metadata ==== ---exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --innodb-stats-on-metadata test +--exec ASAN_OPTIONS="detect_leaks=0" $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --innodb-stats-on-metadata test # testing mysqldump work with statement based binary logging SET GLOBAL binlog_format=statement; ---exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test > /dev/null +--exec ASAN_OPTIONS="detect_leaks=0" $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test > /dev/null SET GLOBAL binlog_format=row; drop table r1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test index 3631e703de6..ca9eb5d2ecf 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test @@ -29,7 +29,7 @@ let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; select variable_value into @a from information_schema.global_status where variable_name='rocksdb_block_cache_add'; ---exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb test > /dev/null +--exec ASAN_OPTIONS="detect_leaks=0" $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb test > /dev/null # verifying block cache was not filled select case when variable_value - @a > 20 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add'; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test index 9199c572933..b884738424f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test @@ -574,7 +574,6 @@ insert into t30 values ('row3', 'row3-key', 'row3-data'), ('row4', 'row4-key', 'row4-data'), ('row5', 'row5-key', 'row5-data'); -analyze table t30; --replace_column 9 # explain @@ -786,11 +785,15 @@ drop table t45; --echo # Now it fails if there is data overlap with what --echo # already exists --echo # +# We exclude rocksdb_max_open_files here because it value is dependent on +# the value of the servers open_file_limit and is expected to be different +# across distros and installs --replace_regex /[a-f0-9]{40}/#/ show variables where variable_name like 'rocksdb%' and + variable_name not like 'rocksdb_max_open_files' and variable_name not like 'rocksdb_supported_compression_types'; create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt index 792e3808f1e..320c48e5563 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt @@ -1,2 +1,3 @@ --rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50 +--log_warnings=3 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_debug.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_debug.test new file mode 100644 index 00000000000..7cd4e09e946 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_debug.test @@ -0,0 +1,14 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +--echo # +--echo # Issue #728: Assertion `covers_key(b)' failed in int +--echo # myrocks::Rdb_key_def::cmp_full_keys(const rocks db::Slice&, +--echo # const rocksdb::Slice&) +--echo # + +CREATE TABLE t2(c1 TINYINT SIGNED KEY,c2 TINYINT UNSIGNED,c3 INT); +INSERT INTO t2(c1)VALUES(0); +SELECT * FROM t2 WHERE c1<=127 ORDER BY c1 DESC; +DROP TABLE t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test index 6b8d0b90e90..a7ac236451e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test @@ -15,6 +15,7 @@ while ($i<10000) --enable_query_log analyze table t1; select count(*) from t1; +--replace_column 9 # explain select c1 from t1 where c1 > 5 limit 10; drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/skip_validate_tmp_table.test b/storage/rocksdb/mysql-test/rocksdb/t/skip_validate_tmp_table.test index 945b0079cce..c4321462dfd 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/skip_validate_tmp_table.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/skip_validate_tmp_table.test @@ -1,39 +1,39 @@ --source include/have_rocksdb.inc --source include/have_debug.inc -# Write file to make mysql-test-run.pl expect the "crash", but don't restart the -# server until it is told to --let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect -CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB; +create table t1 (pk int primary key) engine=rocksdb; # Create a .frm file without a matching table --exec cp $MYSQLTEST_VARDIR/mysqld.$_server_id/data/test/t1.frm $MYSQLTEST_VARDIR/mysqld.$_server_id/data/test/t1#sql-test.frm -# Restart the server with a .frm file exist but that table is not registered in RocksDB ---exec echo "wait" >$_expect_file_name -shutdown_server 10; ---exec echo "restart" >$_expect_file_name ---sleep 5 ---enable_reconnect ---source include/wait_until_connected_again.inc ---disable_reconnect +--source include/restart_mysqld.inc + +show tables; + +# MariaDB produces a warning: +call mtr.add_suppression('Invalid .old.. table or database name .t1#sql-test.'); # This will append '#sql-test' to the end of new name set session debug_dbug="+d,gen_sql_table_name"; rename table t1 to t2; set session debug_dbug= "-d,gen_sql_table_name"; +show tables; + # Remove the corresponding .frm files --remove_files_wildcard $MYSQLTEST_VARDIR/mysqld.$_server_id/data/test *t1*.frm --remove_files_wildcard $MYSQLTEST_VARDIR/mysqld.$_server_id/data/test *t2*.frm # Restart the server with a table registered in RocksDB but does not have a .frm file ---exec echo "wait" >$_expect_file_name -shutdown_server 10; ---exec echo "restart" >$_expect_file_name ---sleep 5 ---enable_reconnect ---source include/wait_until_connected_again.inc ---disable_reconnect +--source include/restart_mysqld.inc + +show tables; + +# try to recreate a table with the same name +create table t2 (pk int primary key) engine=rocksdb; + +show tables; + +drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/transaction.test b/storage/rocksdb/mysql-test/rocksdb/t/transaction.test index a76fa8f9871..3350db99dab 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/transaction.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/transaction.test @@ -103,3 +103,33 @@ update t1 set id=115 where id=3; rollback; drop table t1; + +--echo # +--echo # #802: MyRocks: Statement rollback doesnt work correctly for nested statements +--echo # +create table t1 (a varchar(100)) engine=rocksdb; +create table t2(a int) engine=rocksdb; +insert into t2 values (1), (2); + +create table t3(a varchar(100)) engine=rocksdb; + +delimiter //; +create function func() returns varchar(100) deterministic +begin + insert into t3 values ('func-called'); + set @a= (select a from t2); + return 'func-returned'; +end;// +delimiter ;// + +begin; +--error ER_SUBQUERY_NO_1_ROW +insert into t1 values (func()); +select * from t1; +--echo # The following must not produce 'func-called': +select * from t3; + +rollback; +drop function func; +drop table t1,t2,t3; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_read_filtering.test b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_read_filtering.test index 5a694b7b222..4e8b081c4d5 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_read_filtering.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_read_filtering.test @@ -78,22 +78,28 @@ INSERT INTO t1 values (7); set global rocksdb_debug_ttl_rec_ts = 0; # should return nothing. +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; --sorted_result SELECT * FROM t1; +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; # disable filtering set global rocksdb_enable_ttl_read_filtering=0; # should return everything +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; --sorted_result SELECT * FROM t1; +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; -# disable filtering +# enable filtering set global rocksdb_enable_ttl_read_filtering=1; # should return nothing. +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; --sorted_result SELECT * FROM t1; +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; DROP TABLE t1; @@ -286,28 +292,37 @@ SELECT * FROM t1; # <= shouldn't be filtered out here --echo # Switching to connection 2 connection con2; -# compaction doesn't do anythign since con1 snapshot is still open +# compaction doesn't do anything since con1 snapshot is still open set global rocksdb_force_flush_memtable_now=1; set global rocksdb_compact_cf='default'; # read filtered out, because on a different connection, on # this connection the records have 'expired' already so they are filtered out # even though they have not yet been removed by compaction + +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; --sorted_result SELECT * FROM t1; +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; --echo # Switching to connection 1 connection con1; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; --sorted_result SELECT * FROM t1; # <= shouldn't be filtered out here +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; UPDATE t1 set a = a + 1; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; --sorted_result SELECT * FROM t1; # <= shouldn't be filtered out here +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; COMMIT; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; --sorted_result # <= filtered out here because time has passed. SELECT * FROM t1; +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_rows_filtered'; DROP TABLE t1; disconnect con1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test index e45b6836f67..b631615c266 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test @@ -1,9 +1,5 @@ --source include/have_rocksdb.inc ---disable_warnings -drop table if exists t1,t2; ---enable_warnings - # # VARCHAR column types # @@ -73,3 +69,14 @@ select 'email_i' as index_name, count(*) AS count from t force index(email_i); drop table t; set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct; set session rocksdb_verify_row_debug_checksums = @save_rocksdb_verify_row_debug_checksums; + +# Issue #784 - Skip trailing space bytes for non-unpackable fields + +drop table if exists t; +create table t (h varchar(31) character set utf8 collate utf8_bin not null, i varchar(19) collate latin1_bin not null, primary key(i), key(h)) engine=rocksdb; +insert into t(i,h) values('a','b'); +check table t; +alter table t modify h varchar(31) character set cp1257 collate cp1257_bin not null; +check table t; +drop table t; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test b/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test index 7a053c659b2..8dfbe312ea8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test @@ -11,37 +11,51 @@ select plugin_name, plugin_type from information_schema.plugins where plugin_nam # caused an assertion in RocksDB. Now it should not be allowed and ROCKSDB # plugin will not load in such configuration. # -# We want the server to still start, so we specify default-storage-engine=myisam +--let LOG=$MYSQLTEST_VARDIR/tmp/use_direct_reads_writes.err +--let SEARCH_FILE=$LOG ---let $_mysqld_option=--rocksdb_use_direct_reads=1 --rocksdb_allow_mmap_reads=1 --default-storage-engine=myisam +--echo Checking direct reads +--let $_mysqld_option=--log-error=$LOG --rocksdb_use_direct_reads=1 --rocksdb_allow_mmap_reads=1 +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--source include/restart_mysqld_with_invalid_option.inc + +--let SEARCH_PATTERN=enable both use_direct_reads +--source include/search_pattern_in_file.inc +--remove_file $LOG + + +# Repeat with direct-writes +--echo Checking direct writes +--let $_mysqld_option=--log-error=$LOG --rocksdb_use_direct_io_for_flush_and_compaction=1 --rocksdb_allow_mmap_writes=1 +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--source include/restart_mysqld_with_invalid_option.inc + +--let SEARCH_PATTERN=enable both use_direct_io_for_flush_and_compaction +--source include/search_pattern_in_file.inc +--remove_file $LOG + + +# Verify invalid direct-writes and --rocksdb_flush_log_at_trx_commit combination at startup fails +--echo Checking rocksdb_flush_log_at_trx_commit +--let $_mysqld_option=--log-error=$LOG --rocksdb_flush_log_at_trx_commit=1 --rocksdb_allow_mmap_writes=1 +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--source include/restart_mysqld_with_invalid_option.inc + +--let SEARCH_PATTERN=rocksdb_flush_log_at_trx_commit needs to be +--source include/search_pattern_in_file.inc +--remove_file $LOG + + +# Verify rocksdb_flush_log_at_trx_commit cannot be changed if direct writes are used +--echo Validate flush_log settings when direct writes is enabled +--let $_mysqld_option=--rocksdb_flush_log_at_trx_commit=0 --rocksdb_allow_mmap_writes=1 --source include/restart_mysqld_with_option.inc ---echo # Check that ROCKSDB plugin is not loaded: -select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB'; +set global rocksdb_flush_log_at_trx_commit=0; +--error ER_WRONG_VALUE_FOR_VAR +set global rocksdb_flush_log_at_trx_commit=1; +--error ER_WRONG_VALUE_FOR_VAR +set global rocksdb_flush_log_at_trx_commit=2; ---echo # Check that MyRocks has printed an error message into server error log: -let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err; -let SEARCH_PATTERN=enable both use_direct_reads; -source include/search_pattern_in_file.inc; - ---echo # Now, restart the server back with regular settings +# Cleanup --source include/restart_mysqld.inc -select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB'; - ---echo # ---echo # Now, repeat the same with another set of invalid arguments ---echo # ---let $_mysqld_option=--rocksdb_use_direct_io_for_flush_and_compaction=1 --rocksdb_allow_mmap_writes=1 --default-storage-engine=myisam ---source include/restart_mysqld_with_option.inc - ---echo # Check that ROCKSDB plugin is not loaded: -select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB'; - -let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err; -let SEARCH_PATTERN=enable both use_direct_io_for_flush_and_compaction; -source include/search_pattern_in_file.inc; - ---echo # Now, restart the server back with regular settings ---source include/restart_mysqld.inc -select plugin_name, plugin_type from information_schema.plugins where plugin_name='RocksDB'; - diff --git a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test index c20bb1fc89c..7c30d4fcbdb 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test @@ -1,5 +1,4 @@ --source include/have_rocksdb.inc ---source include/not_windows.inc # MDEV-12427 SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=true; @@ -7,7 +6,8 @@ SET GLOBAL rocksdb_write_ignore_missing_column_families=true; create table aaa (id int primary key, i int) engine rocksdb; set @save_rocksdb_flush_log_at_trx_commit=@@global.rocksdb_flush_log_at_trx_commit; SET GLOBAL rocksdb_flush_log_at_trx_commit=1; ---exec sleep 5 +insert aaa(id, i) values(0,1); + select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; insert aaa(id, i) values(1,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; @@ -16,18 +16,16 @@ select variable_value-@a from information_schema.global_status where variable_na insert aaa(id, i) values(3,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; -SET GLOBAL rocksdb_flush_log_at_trx_commit=0; ---exec sleep 5 select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +SET GLOBAL rocksdb_flush_log_at_trx_commit=0; insert aaa(id, i) values(4,1); let $status_var=rocksdb_wal_synced; let $status_var_value=`select @a+1`; source include/wait_for_status_var.inc; -SET GLOBAL rocksdb_flush_log_at_trx_commit=2; ---exec sleep 5 select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +SET GLOBAL rocksdb_flush_log_at_trx_commit=2; insert aaa(id, i) values(5,1); let $status_var=rocksdb_wal_synced; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc b/storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc index 8f03c16e2f1..d983bdf8b58 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc @@ -30,6 +30,7 @@ INSERT INTO t1 VALUES(1, 1); connection slave; --let $slave_sql_errno= 1062 --let $not_switch_connection= 0 +--let $slave_timeout= 120 --source include/wait_for_slave_sql_error_and_skip.inc set global reset_seconds_behind_master=0; --source include/stop_slave_io.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/singledelete_idempotent_recovery.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/singledelete_idempotent_recovery.result index 3d734c9498d..89e93f6b8f0 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/r/singledelete_idempotent_recovery.result +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/singledelete_idempotent_recovery.result @@ -10,6 +10,7 @@ insert into r1 values (1, 1000); set global rocksdb_force_flush_memtable_now=1; include/rpl_start_server.inc [server_number=2] include/start_slave.inc +insert into r1 values (2,2000); delete r1 from r1 force index (i) where id2=1000; select id1,id2 from r1 force index (primary) where id1=1 and id2=1000; id1 id2 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test index 6143824eea6..ff484171213 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test @@ -62,6 +62,7 @@ SET GLOBAL SYNC_BINLOG = 1; insert into t1 values (1000000, 1, "i_am_just_here_to_trigger_a_flush"); +--error 0,2013 SET DEBUG_SYNC='now SIGNAL go'; --source include/wait_until_disconnected.inc --enable_reconnect diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_recovery.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_recovery.test index 9180afa881f..6d953ead4e9 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_recovery.test +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_recovery.test @@ -53,8 +53,14 @@ EOF --source include/rpl_start_server.inc --source include/start_slave.inc + +# Due to the binlogs being truncated, the slave may still think it's processed up to +# the truncated binlog and select master_pos_wait() can return prematurely. Add +# a new transaction to the master to force master_pos_wait() to wait. connection master; +insert into r1 values (2,2000); sync_slave_with_master; + connection slave; delete r1 from r1 force index (i) where id2=1000; select id1,id2 from r1 force index (primary) where id1=1 and id2=1000; diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result index 3d76e035e05..9f161b18c05 100644 --- a/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result +++ b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result @@ -9,6 +9,8 @@ zero_sum INT DEFAULT 0, msg VARCHAR(1024), msg_length int, msg_checksum varchar(128), +auto_inc BIGINT UNSIGNED NOT NULL AUTO_INCREMENT, +KEY(auto_inc), KEY msg_i(msg(255), zero_sum)) ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; stop slave; diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result index 3d76e035e05..9f161b18c05 100644 --- a/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result +++ b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result @@ -9,6 +9,8 @@ zero_sum INT DEFAULT 0, msg VARCHAR(1024), msg_length int, msg_checksum varchar(128), +auto_inc BIGINT UNSIGNED NOT NULL AUTO_INCREMENT, +KEY(auto_inc), KEY msg_i(msg(255), zero_sum)) ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; stop slave; diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py b/storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py index 20098f49b42..c1d3e7fb81c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py +++ b/storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py @@ -95,6 +95,8 @@ LOADERS_READY = 0 REQUEST_ID = 1 REQUEST_ID_LOCK = threading.Lock() +INSERT_ID_SET = set() + def get_next_request_id(): global REQUEST_ID with REQUEST_ID_LOCK: @@ -302,10 +304,19 @@ class PopulateWorker(WorkerThread): execute(self.cur, stmt) if i % 101 == 0: self.con.commit() + check_id(self.con.insert_id()) self.con.commit() + check_id(self.con.insert_id()) logging.info("Inserted %d rows starting at id %d" % (self.num_to_add, self.start_id)) +def check_id(id): + if id == 0: + return + if id in INSERT_ID_SET: + raise Exception("Duplicate auto_inc id %d" % id) + INSERT_ID_SET.add(id) + def populate_table(num_records): logging.info("Populate_table started for %d records" % num_records) @@ -422,6 +433,7 @@ class LoadGenWorker(WorkerThread): execute(self.cur, gen_insert(self.table, idx, self.thread_id, request_id, 0)) self.con.commit() + check_id(self.con.insert_id()) self.id_map.append(request_id) @@ -687,6 +699,7 @@ class LoadGenWorker(WorkerThread): else: self.cur_txn_state = self.TXN_COMMIT_STARTED self.con.commit() + check_id(self.con.insert_id()) if not self.con.get_server_info(): raise MySQLdb.OperationalError(MySQLdb.constants.CR.CONNECTION_ERROR, "Possible connection error on commit") diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test index 7d92bb3f83a..307211a124d 100644 --- a/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test +++ b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test @@ -17,6 +17,8 @@ CREATE TABLE t1(id INT PRIMARY KEY, msg VARCHAR(1024), msg_length int, msg_checksum varchar(128), + auto_inc BIGINT UNSIGNED NOT NULL AUTO_INCREMENT, + KEY(auto_inc), KEY msg_i(msg(255), zero_sum)) ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test index 6f6128579b5..8ef4c73c3b0 100644 --- a/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test +++ b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test @@ -18,6 +18,8 @@ CREATE TABLE t1(id INT PRIMARY KEY, msg VARCHAR(1024), msg_length int, msg_checksum varchar(128), + auto_inc BIGINT UNSIGNED NOT NULL AUTO_INCREMENT, + KEY(auto_inc), KEY msg_i(msg(255), zero_sum)) ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_to_start_after_corruption_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_to_start_after_corruption_basic.result new file mode 100644 index 00000000000..086010dc79e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_to_start_after_corruption_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_ALLOW_TO_START_AFTER_CORRUPTION; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_ALLOW_TO_START_AFTER_CORRUPTION to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ALLOW_TO_START_AFTER_CORRUPTION = 444; +ERROR HY000: Variable 'rocksdb_allow_to_start_after_corruption' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result index ede02afcb60..9af4f730a21 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result @@ -1,7 +1,85 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); SET @start_global_value = @@global.ROCKSDB_BYTES_PER_SYNC; SELECT @start_global_value; @start_global_value 0 -"Trying to set variable @@global.ROCKSDB_BYTES_PER_SYNC to 444. It should fail because it is readonly." -SET @@global.ROCKSDB_BYTES_PER_SYNC = 444; -ERROR HY000: Variable 'rocksdb_bytes_per_sync' is a read only variable +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_BYTES_PER_SYNC to 100" +SET @@global.ROCKSDB_BYTES_PER_SYNC = 100; +SELECT @@global.ROCKSDB_BYTES_PER_SYNC; +@@global.ROCKSDB_BYTES_PER_SYNC +100 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BYTES_PER_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_BYTES_PER_SYNC; +@@global.ROCKSDB_BYTES_PER_SYNC +0 +"Trying to set variable @@global.ROCKSDB_BYTES_PER_SYNC to 1" +SET @@global.ROCKSDB_BYTES_PER_SYNC = 1; +SELECT @@global.ROCKSDB_BYTES_PER_SYNC; +@@global.ROCKSDB_BYTES_PER_SYNC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BYTES_PER_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_BYTES_PER_SYNC; +@@global.ROCKSDB_BYTES_PER_SYNC +0 +"Trying to set variable @@global.ROCKSDB_BYTES_PER_SYNC to 0" +SET @@global.ROCKSDB_BYTES_PER_SYNC = 0; +SELECT @@global.ROCKSDB_BYTES_PER_SYNC; +@@global.ROCKSDB_BYTES_PER_SYNC +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BYTES_PER_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_BYTES_PER_SYNC; +@@global.ROCKSDB_BYTES_PER_SYNC +0 +"Trying to set variable @@session.ROCKSDB_BYTES_PER_SYNC to 444. It should fail because it is not session." +SET @@session.ROCKSDB_BYTES_PER_SYNC = 444; +ERROR HY000: Variable 'rocksdb_bytes_per_sync' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_BYTES_PER_SYNC to 'aaa'" +SET @@global.ROCKSDB_BYTES_PER_SYNC = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BYTES_PER_SYNC; +@@global.ROCKSDB_BYTES_PER_SYNC +0 +"Trying to set variable @@global.ROCKSDB_BYTES_PER_SYNC to 'bbb'" +SET @@global.ROCKSDB_BYTES_PER_SYNC = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BYTES_PER_SYNC; +@@global.ROCKSDB_BYTES_PER_SYNC +0 +"Trying to set variable @@global.ROCKSDB_BYTES_PER_SYNC to '-1'" +SET @@global.ROCKSDB_BYTES_PER_SYNC = '-1'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BYTES_PER_SYNC; +@@global.ROCKSDB_BYTES_PER_SYNC +0 +"Trying to set variable @@global.ROCKSDB_BYTES_PER_SYNC to '101'" +SET @@global.ROCKSDB_BYTES_PER_SYNC = '101'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BYTES_PER_SYNC; +@@global.ROCKSDB_BYTES_PER_SYNC +0 +"Trying to set variable @@global.ROCKSDB_BYTES_PER_SYNC to '484436'" +SET @@global.ROCKSDB_BYTES_PER_SYNC = '484436'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BYTES_PER_SYNC; +@@global.ROCKSDB_BYTES_PER_SYNC +0 +SET @@global.ROCKSDB_BYTES_PER_SYNC = @start_global_value; +SELECT @@global.ROCKSDB_BYTES_PER_SYNC; +@@global.ROCKSDB_BYTES_PER_SYNC +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result deleted file mode 100644 index 010ba954366..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result +++ /dev/null @@ -1,58 +0,0 @@ -drop table if exists t1; -CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb; -SHOW CREATE TABLE t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a` int(11) NOT NULL AUTO_INCREMENT, - `b` int(11) DEFAULT NULL, - PRIMARY KEY (`a`) -) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 -INSERT INTO t1 (b) VALUES (1); -INSERT INTO t1 (b) VALUES (2); -INSERT INTO t1 (b) VALUES (3); -SELECT * FROM t1; -a b -1 1 -2 2 -3 3 -set session rocksdb_flush_memtable_on_analyze=off; -ANALYZE TABLE t1; -Table Op Msg_type Msg_text -test.t1 analyze status OK -SHOW INDEXES FROM t1; -Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment -t1 0 PRIMARY 1 a A 3 NULL NULL LSMTREE -set session rocksdb_flush_memtable_on_analyze=on; -ANALYZE TABLE t1; -Table Op Msg_type Msg_text -test.t1 analyze status OK -SHOW INDEXES FROM t1; -Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment -t1 0 PRIMARY 1 a A 3 NULL NULL LSMTREE -DROP TABLE t1; -CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb; -SHOW CREATE TABLE t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a` int(11) NOT NULL AUTO_INCREMENT, - `b` int(11) DEFAULT NULL, - PRIMARY KEY (`a`) -) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 -INSERT INTO t1 (b) VALUES (1); -INSERT INTO t1 (b) VALUES (2); -INSERT INTO t1 (b) VALUES (3); -SELECT * FROM t1; -a b -1 1 -2 2 -3 3 -SHOW TABLE STATUS LIKE 't1'; -Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary -t1 ROCKSDB 10 Fixed # # # 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL 0 N -ANALYZE TABLE t1; -Table Op Msg_type Msg_text -test.t1 analyze status OK -SHOW TABLE STATUS LIKE 't1'; -Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary -t1 ROCKSDB 10 Fixed # # # 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL 0 N -DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_ignore_unknown_options_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_ignore_unknown_options_basic.result new file mode 100644 index 00000000000..621213cd79b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_ignore_unknown_options_basic.result @@ -0,0 +1,14 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_IGNORE_UNKNOWN_OPTIONS; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_IGNORE_UNKNOWN_OPTIONS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_IGNORE_UNKNOWN_OPTIONS = 444; +ERROR HY000: Variable 'rocksdb_ignore_unknown_options' is a read only variable +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result index b058ebf05f8..60f505310c6 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result @@ -1,7 +1,3 @@ -SET @start_global_value = @@global.ROCKSDB_MAX_OPEN_FILES; -SELECT @start_global_value; -@start_global_value --1 -"Trying to set variable @@global.ROCKSDB_MAX_OPEN_FILES to 444. It should fail because it is readonly." -SET @@global.ROCKSDB_MAX_OPEN_FILES = 444; -ERROR HY000: Variable 'rocksdb_max_open_files' is a read only variable +show variables like 'rocksdb_max_open_files'; +Variable_name Value +rocksdb_max_open_files # diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result index e417e4d5c4e..c925a68d4ed 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result @@ -6,11 +6,11 @@ INSERT INTO invalid_values VALUES('\'aaa\''); SET @start_global_value = @@global.ROCKSDB_MAX_ROW_LOCKS; SELECT @start_global_value; @start_global_value -1073741824 +1048576 SET @start_session_value = @@session.ROCKSDB_MAX_ROW_LOCKS; SELECT @start_session_value; @start_session_value -1073741824 +1048576 '# Setting to valid values in global scope#' "Trying to set variable @@global.ROCKSDB_MAX_ROW_LOCKS to 1" SET @@global.ROCKSDB_MAX_ROW_LOCKS = 1; @@ -21,7 +21,7 @@ SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; SET @@global.ROCKSDB_MAX_ROW_LOCKS = DEFAULT; SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; @@global.ROCKSDB_MAX_ROW_LOCKS -1073741824 +1048576 "Trying to set variable @@global.ROCKSDB_MAX_ROW_LOCKS to 1024" SET @@global.ROCKSDB_MAX_ROW_LOCKS = 1024; SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; @@ -31,7 +31,7 @@ SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; SET @@global.ROCKSDB_MAX_ROW_LOCKS = DEFAULT; SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; @@global.ROCKSDB_MAX_ROW_LOCKS -1073741824 +1048576 '# Setting to valid values in session scope#' "Trying to set variable @@session.ROCKSDB_MAX_ROW_LOCKS to 1" SET @@session.ROCKSDB_MAX_ROW_LOCKS = 1; @@ -42,7 +42,7 @@ SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; SET @@session.ROCKSDB_MAX_ROW_LOCKS = DEFAULT; SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; @@session.ROCKSDB_MAX_ROW_LOCKS -1073741824 +1048576 "Trying to set variable @@session.ROCKSDB_MAX_ROW_LOCKS to 1024" SET @@session.ROCKSDB_MAX_ROW_LOCKS = 1024; SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; @@ -52,21 +52,21 @@ SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; SET @@session.ROCKSDB_MAX_ROW_LOCKS = DEFAULT; SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; @@session.ROCKSDB_MAX_ROW_LOCKS -1073741824 +1048576 '# Testing with invalid values in global scope #' "Trying to set variable @@global.ROCKSDB_MAX_ROW_LOCKS to 'aaa'" SET @@global.ROCKSDB_MAX_ROW_LOCKS = 'aaa'; Got one of the listed errors SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; @@global.ROCKSDB_MAX_ROW_LOCKS -1073741824 +1048576 SET @@global.ROCKSDB_MAX_ROW_LOCKS = @start_global_value; SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; @@global.ROCKSDB_MAX_ROW_LOCKS -1073741824 +1048576 SET @@session.ROCKSDB_MAX_ROW_LOCKS = @start_session_value; SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; @@session.ROCKSDB_MAX_ROW_LOCKS -1073741824 +1048576 DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_concurrent_prepare_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_two_write_queues_basic.result similarity index 54% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_concurrent_prepare_basic.result rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_two_write_queues_basic.result index 11d4f2363f6..5a19016bf91 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_concurrent_prepare_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_two_write_queues_basic.result @@ -3,12 +3,12 @@ INSERT INTO valid_values VALUES(1); INSERT INTO valid_values VALUES(1024); CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; INSERT INTO invalid_values VALUES('\'aaa\''); -SET @start_global_value = @@global.ROCKSDB_CONCURRENT_PREPARE; +SET @start_global_value = @@global.ROCKSDB_TWO_WRITE_QUEUES; SELECT @start_global_value; @start_global_value 1 -"Trying to set variable @@global.ROCKSDB_CONCURRENT_PREPARE to 444. It should fail because it is readonly." -SET @@global.ROCKSDB_CONCURRENT_PREPARE = 444; -ERROR HY000: Variable 'rocksdb_concurrent_prepare' is a read only variable +"Trying to set variable @@global.ROCKSDB_TWO_WRITE_QUEUES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_TWO_WRITE_QUEUES = 444; +ERROR HY000: Variable 'rocksdb_two_write_queues' is a read only variable DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_update_cf_options.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_update_cf_options.result new file mode 100644 index 00000000000..126b4cffe8b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_update_cf_options.result @@ -0,0 +1,38 @@ +CREATE TABLE t1 (a INT, PRIMARY KEY (a) COMMENT 'update_cf1') ENGINE=ROCKSDB; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS='update_cf1={write_buffer_size=8m;target_file_size_base=2m};'; +SELECT @@global.rocksdb_update_cf_options; +@@global.rocksdb_update_cf_options +update_cf1={write_buffer_size=8m;target_file_size_base=2m}; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS=NULL; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +Variable_name Value +rocksdb_update_cf_options +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS=NULL; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +Variable_name Value +rocksdb_update_cf_options +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS=""; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +Variable_name Value +rocksdb_update_cf_options +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS=NULL; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +Variable_name Value +rocksdb_update_cf_options +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS='update_cf1={write_buffer_size=8m;target_file_size_base=2m};'; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +Variable_name Value +rocksdb_update_cf_options update_cf1={write_buffer_size=8m;target_file_size_base=2m}; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS='update_cf2={write_buffer_size=8m;target_file_size_base=2m};'; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +Variable_name Value +rocksdb_update_cf_options update_cf2={write_buffer_size=8m;target_file_size_base=2m}; +DROP TABLE t1; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS='update_cf1={write_buffer_size=8m;target_file_size_base=2m};'; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +Variable_name Value +rocksdb_update_cf_options update_cf1={write_buffer_size=8m;target_file_size_base=2m}; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS=DEFAULT; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +Variable_name Value +rocksdb_update_cf_options diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_update_cf_options_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_update_cf_options_basic.result index 5ad5394db29..ba24fafd0ec 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_update_cf_options_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_update_cf_options_basic.result @@ -32,10 +32,19 @@ SET @@global.rocksdb_update_cf_options = NULL; SELECT @@global.rocksdb_update_cf_options; @@global.rocksdb_update_cf_options NULL -SET @@global.rocksdb_update_cf_options = 'aaaaa'; +SET @@global.rocksdb_update_cf_options = NULL; SELECT @@global.rocksdb_update_cf_options; @@global.rocksdb_update_cf_options NULL +SET @@global.rocksdb_update_cf_options = ''; +SELECT @@global.rocksdb_update_cf_options; +@@global.rocksdb_update_cf_options + +SET @@global.rocksdb_update_cf_options = 'aaaaa';; +ERROR 42000: Variable 'rocksdb_update_cf_options' can't be set to the value of 'aaaaa' +SELECT @@global.rocksdb_update_cf_options; +@@global.rocksdb_update_cf_options + SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='default' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; CF_NAME OPTION_TYPE VALUE default WRITE_BUFFER_SIZE 67108864 @@ -100,7 +109,12 @@ cf1={target_file_size_base=24m};foo={max_bytes_for_level_multiplier=8}; SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf1' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; CF_NAME OPTION_TYPE VALUE cf1 TARGET_FILE_SIZE_BASE 25165824 -SET @@global.rocksdb_update_cf_options = 'default={foo=bar};'; +SET @@global.rocksdb_update_cf_options = 'default={foo=bar};';; +ERROR 42000: Variable 'rocksdb_update_cf_options' can't be set to the value of 'default={foo=bar};' +SELECT @@global.rocksdb_update_cf_options; +@@global.rocksdb_update_cf_options +cf1={target_file_size_base=24m};foo={max_bytes_for_level_multiplier=8}; +SET @@global.rocksdb_update_cf_options = NULL; SELECT @@global.rocksdb_update_cf_options; @@global.rocksdb_update_cf_options NULL diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result index 7da628b73fd..f432f1f7750 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result @@ -1,7 +1,85 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); SET @start_global_value = @@global.ROCKSDB_WAL_BYTES_PER_SYNC; SELECT @start_global_value; @start_global_value 0 -"Trying to set variable @@global.ROCKSDB_WAL_BYTES_PER_SYNC to 444. It should fail because it is readonly." -SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = 444; -ERROR HY000: Variable 'rocksdb_wal_bytes_per_sync' is a read only variable +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_WAL_BYTES_PER_SYNC to 100" +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = 100; +SELECT @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +@@global.ROCKSDB_WAL_BYTES_PER_SYNC +100 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +@@global.ROCKSDB_WAL_BYTES_PER_SYNC +0 +"Trying to set variable @@global.ROCKSDB_WAL_BYTES_PER_SYNC to 1" +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = 1; +SELECT @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +@@global.ROCKSDB_WAL_BYTES_PER_SYNC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +@@global.ROCKSDB_WAL_BYTES_PER_SYNC +0 +"Trying to set variable @@global.ROCKSDB_WAL_BYTES_PER_SYNC to 0" +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = 0; +SELECT @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +@@global.ROCKSDB_WAL_BYTES_PER_SYNC +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +@@global.ROCKSDB_WAL_BYTES_PER_SYNC +0 +"Trying to set variable @@session.ROCKSDB_WAL_BYTES_PER_SYNC to 444. It should fail because it is not session." +SET @@session.ROCKSDB_WAL_BYTES_PER_SYNC = 444; +ERROR HY000: Variable 'rocksdb_wal_bytes_per_sync' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_WAL_BYTES_PER_SYNC to 'aaa'" +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +@@global.ROCKSDB_WAL_BYTES_PER_SYNC +0 +"Trying to set variable @@global.ROCKSDB_WAL_BYTES_PER_SYNC to 'bbb'" +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +@@global.ROCKSDB_WAL_BYTES_PER_SYNC +0 +"Trying to set variable @@global.ROCKSDB_WAL_BYTES_PER_SYNC to '-1'" +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = '-1'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +@@global.ROCKSDB_WAL_BYTES_PER_SYNC +0 +"Trying to set variable @@global.ROCKSDB_WAL_BYTES_PER_SYNC to '101'" +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = '101'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +@@global.ROCKSDB_WAL_BYTES_PER_SYNC +0 +"Trying to set variable @@global.ROCKSDB_WAL_BYTES_PER_SYNC to '484436'" +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = '484436'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +@@global.ROCKSDB_WAL_BYTES_PER_SYNC +0 +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = @start_global_value; +SELECT @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +@@global.ROCKSDB_WAL_BYTES_PER_SYNC +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_to_start_after_corruption_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_to_start_after_corruption_basic.test new file mode 100644 index 00000000000..64fb2458424 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_to_start_after_corruption_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_ALLOW_TO_START_AFTER_CORRUPTION +--let $read_only=1 +--let $session=0 +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test index d1d6b2b5695..bf78f578b6c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test @@ -1,7 +1,22 @@ --source include/have_rocksdb.inc +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); + --let $sys_var=ROCKSDB_BYTES_PER_SYNC ---let $read_only=1 +--let $read_only=0 --let $session=0 --source include/rocksdb_sys_var.inc +DROP TABLE valid_values; +DROP TABLE invalid_values; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test deleted file mode 100644 index 574375cd1ea..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test +++ /dev/null @@ -1,46 +0,0 @@ ---source include/have_rocksdb.inc - ---disable_warnings -drop table if exists t1; ---enable_warnings - -## -## test cardinality for analyze statements after flushing table -## - -CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb; -SHOW CREATE TABLE t1; -INSERT INTO t1 (b) VALUES (1); -INSERT INTO t1 (b) VALUES (2); -INSERT INTO t1 (b) VALUES (3); ---sorted_result -SELECT * FROM t1; - -set session rocksdb_flush_memtable_on_analyze=off; -ANALYZE TABLE t1; -SHOW INDEXES FROM t1; - -set session rocksdb_flush_memtable_on_analyze=on; -ANALYZE TABLE t1; -SHOW INDEXES FROM t1; -DROP TABLE t1; - -## -## test data length for show table status statements for tables with few rows -## - -CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb; -SHOW CREATE TABLE t1; -INSERT INTO t1 (b) VALUES (1); -INSERT INTO t1 (b) VALUES (2); -INSERT INTO t1 (b) VALUES (3); ---sorted_result -SELECT * FROM t1; - ---replace_column 5 # 6 # 7 # -SHOW TABLE STATUS LIKE 't1'; -ANALYZE TABLE t1; ---replace_column 5 # 6 # 7 # -SHOW TABLE STATUS LIKE 't1'; - -DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_ignore_unknown_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_ignore_unknown_options_basic.test new file mode 100644 index 00000000000..f10ff2c6123 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_ignore_unknown_options_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_IGNORE_UNKNOWN_OPTIONS +--let $read_only=1 +--let $session=0 +--source ../include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test index ba3293264ab..36996761507 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test @@ -1,6 +1,8 @@ --source include/have_rocksdb.inc ---let $sys_var=ROCKSDB_MAX_OPEN_FILES ---let $read_only=1 ---let $session=0 ---source include/rocksdb_sys_var.inc +# We can not use rocksdb_sys_var.inc here as this is a global, read only option +# whose value is dependent on the servers open_files_limit. It is more fully +# tested in the rocksdb.max_open_files test. + +--replace_column 2 # +show variables like 'rocksdb_max_open_files'; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_concurrent_prepare_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_two_write_queues_basic.test similarity index 90% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_concurrent_prepare_basic.test rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_two_write_queues_basic.test index 451653fe769..43579faba82 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_concurrent_prepare_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_two_write_queues_basic.test @@ -7,7 +7,7 @@ INSERT INTO valid_values VALUES(1024); CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; INSERT INTO invalid_values VALUES('\'aaa\''); ---let $sys_var=ROCKSDB_CONCURRENT_PREPARE +--let $sys_var=ROCKSDB_TWO_WRITE_QUEUES --let $read_only=1 --let $session=0 --source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options.test new file mode 100644 index 00000000000..03626260cab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options.test @@ -0,0 +1,22 @@ +--source include/have_rocksdb.inc + +CREATE TABLE t1 (a INT, PRIMARY KEY (a) COMMENT 'update_cf1') ENGINE=ROCKSDB; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS='update_cf1={write_buffer_size=8m;target_file_size_base=2m};'; +SELECT @@global.rocksdb_update_cf_options; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS=NULL; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS=NULL; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS=""; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS=NULL; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS='update_cf1={write_buffer_size=8m;target_file_size_base=2m};'; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS='update_cf2={write_buffer_size=8m;target_file_size_base=2m};'; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +DROP TABLE t1; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS='update_cf1={write_buffer_size=8m;target_file_size_base=2m};'; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; +SET @@GLOBAL.ROCKSDB_UPDATE_CF_OPTIONS=DEFAULT; +SHOW GLOBAL VARIABLES LIKE 'rocksdb_update_cf_options'; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options_basic.test index 0e675dafed3..533b2db8204 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options_basic.test @@ -39,8 +39,17 @@ SELECT @@global.rocksdb_update_cf_options; SET @@global.rocksdb_update_cf_options = NULL; SELECT @@global.rocksdb_update_cf_options; +# Make sure that we do not double free the NULL string +SET @@global.rocksdb_update_cf_options = NULL; +SELECT @@global.rocksdb_update_cf_options; + +# Attempt setting an empty string +SET @@global.rocksdb_update_cf_options = ''; +SELECT @@global.rocksdb_update_cf_options; + # Will fail to parse. Value not updated. -SET @@global.rocksdb_update_cf_options = 'aaaaa'; +--Error ER_WRONG_VALUE_FOR_VAR +--eval SET @@global.rocksdb_update_cf_options = 'aaaaa'; SELECT @@global.rocksdb_update_cf_options; SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='default' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; @@ -87,7 +96,11 @@ SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf1' AND OPTION_TYPE='TARGET_FIL # Will fail to parse. No valid assignments included. Value not updated and # reset to NULL. -SET @@global.rocksdb_update_cf_options = 'default={foo=bar};'; +--Error ER_WRONG_VALUE_FOR_VAR +--eval SET @@global.rocksdb_update_cf_options = 'default={foo=bar};'; +SELECT @@global.rocksdb_update_cf_options; + +SET @@global.rocksdb_update_cf_options = NULL; SELECT @@global.rocksdb_update_cf_options; USE test; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test index afab0f20d40..9c2a1f4f391 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test @@ -1,6 +1,22 @@ --source include/have_rocksdb.inc +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); + --let $sys_var=ROCKSDB_WAL_BYTES_PER_SYNC ---let $read_only=1 +--let $read_only=0 --let $session=0 --source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; + diff --git a/storage/rocksdb/mysql-test/storage_engine/type_enum.rdiff b/storage/rocksdb/mysql-test/storage_engine/type_enum.rdiff deleted file mode 100644 index a402e0fb418..00000000000 --- a/storage/rocksdb/mysql-test/storage_engine/type_enum.rdiff +++ /dev/null @@ -1,20 +0,0 @@ ---- /data/src/bb-10.2-mdev12528/mysql-test/suite/storage_engine/type_enum.result 2017-06-22 00:33:46.423995639 +0300 -+++ /data/src/bb-10.2-mdev12528/mysql-test/suite/storage_engine/type_enum.reject 2017-06-22 02:55:49.599942066 +0300 -@@ -24,8 +24,6 @@ - test2 4 - test5 2 - ALTER TABLE t1 ADD COLUMN e ENUM('a','A') ; --Warnings: --Note 1291 Column 'e' has duplicated value 'a' in ENUM - SHOW COLUMNS IN t1; - Field Type Null Key Default Extra - a enum('') # # # -@@ -37,7 +35,7 @@ - a b c e - NULL - test2 4 NULL -- test3 75 a -+ test3 75 A - test5 2 NULL - SELECT a,b,c,e FROM t1 WHERE b='test2' OR a != ''; - a b c e diff --git a/storage/rocksdb/mysql-test/storage_engine/type_set.rdiff b/storage/rocksdb/mysql-test/storage_engine/type_set.rdiff deleted file mode 100644 index c5cbeaedecf..00000000000 --- a/storage/rocksdb/mysql-test/storage_engine/type_set.rdiff +++ /dev/null @@ -1,11 +0,0 @@ ---- /data/src/bb-10.2-mdev12528/mysql-test/suite/storage_engine/type_set.result 2017-06-22 00:33:46.423995639 +0300 -+++ /data/src/bb-10.2-mdev12528/mysql-test/suite/storage_engine/type_set.reject 2017-06-22 03:02:58.695939369 +0300 -@@ -30,8 +30,6 @@ - test2,test3 01,23,34,44 - test2,test4 - ALTER TABLE t1 ADD COLUMN e SET('a','A') ; --Warnings: --Note 1291 Column 'e' has duplicated value 'a' in SET - SHOW COLUMNS IN t1; - Field Type Null Key Default Extra - a set('') # # # diff --git a/storage/rocksdb/patch/port/win/io_win.h b/storage/rocksdb/patch/port/win/io_win.h deleted file mode 100644 index f5ff253bbaa..00000000000 --- a/storage/rocksdb/patch/port/win/io_win.h +++ /dev/null @@ -1,446 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -#pragma once - -#include -#include -#include - -#include "rocksdb/status.h" -#include "rocksdb/env.h" -#include "util/aligned_buffer.h" - -#include - - -namespace rocksdb { -namespace port { - -std::string GetWindowsErrSz(DWORD err); - -inline Status IOErrorFromWindowsError(const std::string& context, DWORD err) { - return ((err == ERROR_HANDLE_DISK_FULL) || (err == ERROR_DISK_FULL)) - ? Status::NoSpace(context, GetWindowsErrSz(err)) - : Status::IOError(context, GetWindowsErrSz(err)); -} - -inline Status IOErrorFromLastWindowsError(const std::string& context) { - return IOErrorFromWindowsError(context, GetLastError()); -} - -inline Status IOError(const std::string& context, int err_number) { - return (err_number == ENOSPC) - ? Status::NoSpace(context, strerror(err_number)) - : Status::IOError(context, strerror(err_number)); -} - -// Note the below two do not set errno because they are used only here in this -// file -// on a Windows handle and, therefore, not necessary. Translating GetLastError() -// to errno -// is a sad business -inline int fsync(HANDLE hFile) { - if (!FlushFileBuffers(hFile)) { - return -1; - } - - return 0; -} - -SSIZE_T pwrite(HANDLE hFile, const char* src, size_t numBytes, uint64_t offset); - -SSIZE_T pread(HANDLE hFile, char* src, size_t numBytes, uint64_t offset); - -Status fallocate(const std::string& filename, HANDLE hFile, uint64_t to_size); - -Status ftruncate(const std::string& filename, HANDLE hFile, uint64_t toSize); - -size_t GetUniqueIdFromFile(HANDLE hFile, char* id, size_t max_size); - -class WinFileData { - protected: - const std::string filename_; - HANDLE hFile_; - // If ture, the I/O issued would be direct I/O which the buffer - // will need to be aligned (not sure there is a guarantee that the buffer - // passed in is aligned). - const bool use_direct_io_; - - public: - // We want this class be usable both for inheritance (prive - // or protected) and for containment so __ctor and __dtor public - WinFileData(const std::string& filename, HANDLE hFile, bool direct_io) - : filename_(filename), hFile_(hFile), use_direct_io_(direct_io) {} - - virtual ~WinFileData() { this->CloseFile(); } - - bool CloseFile() { - bool result = true; - - if (hFile_ != NULL && hFile_ != INVALID_HANDLE_VALUE) { - result = ::CloseHandle(hFile_); - assert(result); - hFile_ = NULL; - } - return result; - } - - const std::string& GetName() const { return filename_; } - - HANDLE GetFileHandle() const { return hFile_; } - - bool use_direct_io() const { return use_direct_io_; } - - WinFileData(const WinFileData&) = delete; - WinFileData& operator=(const WinFileData&) = delete; -}; - -class WinSequentialFile : protected WinFileData, public SequentialFile { - - // Override for behavior change when creating a custom env - virtual SSIZE_T PositionedReadInternal(char* src, size_t numBytes, - uint64_t offset) const; - -public: - WinSequentialFile(const std::string& fname, HANDLE f, - const EnvOptions& options); - - ~WinSequentialFile(); - - WinSequentialFile(const WinSequentialFile&) = delete; - WinSequentialFile& operator=(const WinSequentialFile&) = delete; - - virtual Status Read(size_t n, Slice* result, char* scratch) override; - virtual Status PositionedRead(uint64_t offset, size_t n, Slice* result, - char* scratch) override; - - virtual Status Skip(uint64_t n) override; - - virtual Status InvalidateCache(size_t offset, size_t length) override; - - virtual bool use_direct_io() const override { return WinFileData::use_direct_io(); } -}; - -// mmap() based random-access -class WinMmapReadableFile : private WinFileData, public RandomAccessFile { - HANDLE hMap_; - - const void* mapped_region_; - const size_t length_; - - public: - // mapped_region_[0,length-1] contains the mmapped contents of the file. - WinMmapReadableFile(const std::string& fileName, HANDLE hFile, HANDLE hMap, - const void* mapped_region, size_t length); - - ~WinMmapReadableFile(); - - WinMmapReadableFile(const WinMmapReadableFile&) = delete; - WinMmapReadableFile& operator=(const WinMmapReadableFile&) = delete; - - virtual Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const override; - - virtual Status InvalidateCache(size_t offset, size_t length) override; - - virtual size_t GetUniqueId(char* id, size_t max_size) const override; -}; - -// We preallocate and use memcpy to append new -// data to the file. This is safe since we either properly close the -// file before reading from it, or for log files, the reading code -// knows enough to skip zero suffixes. -class WinMmapFile : private WinFileData, public WritableFile { - private: - HANDLE hMap_; - - const size_t page_size_; // We flush the mapping view in page_size - // increments. We may decide if this is a memory - // page size or SSD page size - const size_t - allocation_granularity_; // View must start at such a granularity - - size_t reserved_size_; // Preallocated size - - size_t mapping_size_; // The max size of the mapping object - // we want to guess the final file size to minimize the remapping - size_t view_size_; // How much memory to map into a view at a time - - char* mapped_begin_; // Must begin at the file offset that is aligned with - // allocation_granularity_ - char* mapped_end_; - char* dst_; // Where to write next (in range [mapped_begin_,mapped_end_]) - char* last_sync_; // Where have we synced up to - - uint64_t file_offset_; // Offset of mapped_begin_ in file - - // Do we have unsynced writes? - bool pending_sync_; - - // Can only truncate or reserve to a sector size aligned if - // used on files that are opened with Unbuffered I/O - Status TruncateFile(uint64_t toSize); - - Status UnmapCurrentRegion(); - - Status MapNewRegion(); - - virtual Status PreallocateInternal(uint64_t spaceToReserve); - - public: - WinMmapFile(const std::string& fname, HANDLE hFile, size_t page_size, - size_t allocation_granularity, const EnvOptions& options); - - ~WinMmapFile(); - - WinMmapFile(const WinMmapFile&) = delete; - WinMmapFile& operator=(const WinMmapFile&) = delete; - - virtual Status Append(const Slice& data) override; - - // Means Close() will properly take care of truncate - // and it does not need any additional information - virtual Status Truncate(uint64_t size) override; - - virtual Status Close() override; - - virtual Status Flush() override; - - // Flush only data - virtual Status Sync() override; - - /** - * Flush data as well as metadata to stable storage. - */ - virtual Status Fsync() override; - - /** - * Get the size of valid data in the file. This will not match the - * size that is returned from the filesystem because we use mmap - * to extend file by map_size every time. - */ - virtual uint64_t GetFileSize() override; - - virtual Status InvalidateCache(size_t offset, size_t length) override; - - virtual Status Allocate(uint64_t offset, uint64_t len) override; - - virtual size_t GetUniqueId(char* id, size_t max_size) const override; -}; - -class WinRandomAccessImpl { - protected: - WinFileData* file_base_; - size_t alignment_; - - // Override for behavior change when creating a custom env - virtual SSIZE_T PositionedReadInternal(char* src, size_t numBytes, - uint64_t offset) const; - - WinRandomAccessImpl(WinFileData* file_base, size_t alignment, - const EnvOptions& options); - - virtual ~WinRandomAccessImpl() {} - - Status ReadImpl(uint64_t offset, size_t n, Slice* result, - char* scratch) const; - - size_t GetAlignment() const { return alignment_; } - - public: - - WinRandomAccessImpl(const WinRandomAccessImpl&) = delete; - WinRandomAccessImpl& operator=(const WinRandomAccessImpl&) = delete; -}; - -// pread() based random-access -class WinRandomAccessFile - : private WinFileData, - protected WinRandomAccessImpl, // Want to be able to override - // PositionedReadInternal - public RandomAccessFile { - public: - WinRandomAccessFile(const std::string& fname, HANDLE hFile, size_t alignment, - const EnvOptions& options); - - ~WinRandomAccessFile(); - - virtual Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const override; - - virtual size_t GetUniqueId(char* id, size_t max_size) const override; - - virtual bool use_direct_io() const override { return WinFileData::use_direct_io(); } - - virtual Status InvalidateCache(size_t offset, size_t length) override; - - virtual size_t GetRequiredBufferAlignment() const override; -}; - -// This is a sequential write class. It has been mimicked (as others) after -// the original Posix class. We add support for unbuffered I/O on windows as -// well -// we utilize the original buffer as an alignment buffer to write directly to -// file with no buffering. -// No buffering requires that the provided buffer is aligned to the physical -// sector size (SSD page size) and -// that all SetFilePointer() operations to occur with such an alignment. -// We thus always write in sector/page size increments to the drive and leave -// the tail for the next write OR for Close() at which point we pad with zeros. -// No padding is required for -// buffered access. -class WinWritableImpl { - protected: - WinFileData* file_data_; - const uint64_t alignment_; - uint64_t next_write_offset_; // Needed because Windows does not support O_APPEND - uint64_t reservedsize_; // how far we have reserved space - - virtual Status PreallocateInternal(uint64_t spaceToReserve); - - WinWritableImpl(WinFileData* file_data, size_t alignment); - - ~WinWritableImpl() {} - - - uint64_t GetAlignement() const { return alignment_; } - - Status AppendImpl(const Slice& data); - - // Requires that the data is aligned as specified by - // GetRequiredBufferAlignment() - Status PositionedAppendImpl(const Slice& data, uint64_t offset); - - Status TruncateImpl(uint64_t size); - - Status CloseImpl(); - - Status SyncImpl(); - - uint64_t GetFileNextWriteOffset() { - // Double accounting now here with WritableFileWriter - // and this size will be wrong when unbuffered access is used - // but tests implement their own writable files and do not use - // WritableFileWrapper - // so we need to squeeze a square peg through - // a round hole here. - return next_write_offset_; - } - - Status AllocateImpl(uint64_t offset, uint64_t len); - - public: - WinWritableImpl(const WinWritableImpl&) = delete; - WinWritableImpl& operator=(const WinWritableImpl&) = delete; -}; - -class WinWritableFile : private WinFileData, - protected WinWritableImpl, - public WritableFile { - public: - WinWritableFile(const std::string& fname, HANDLE hFile, size_t alignment, - size_t capacity, const EnvOptions& options); - - ~WinWritableFile(); - - bool IsSyncThreadSafe() const override { - return true; - } - - virtual Status Append(const Slice& data) override; - - // Requires that the data is aligned as specified by - // GetRequiredBufferAlignment() - virtual Status PositionedAppend(const Slice& data, uint64_t offset) override; - - // Need to implement this so the file is truncated correctly - // when buffered and unbuffered mode - virtual Status Truncate(uint64_t size) override; - - virtual Status Close() override; - - // write out the cached data to the OS cache - // This is now taken care of the WritableFileWriter - virtual Status Flush() override; - - virtual Status Sync() override; - - virtual Status Fsync() override; - - // Indicates if the class makes use of direct I/O - // Use PositionedAppend - virtual bool use_direct_io() const override; - - virtual size_t GetRequiredBufferAlignment() const override; - - virtual uint64_t GetFileSize() override; - - virtual Status Allocate(uint64_t offset, uint64_t len) override; - - virtual size_t GetUniqueId(char* id, size_t max_size) const override; -}; - -class WinRandomRWFile : private WinFileData, - protected WinRandomAccessImpl, - protected WinWritableImpl, - public RandomRWFile { - public: - WinRandomRWFile(const std::string& fname, HANDLE hFile, size_t alignment, - const EnvOptions& options); - - ~WinRandomRWFile() {} - - // Indicates if the class makes use of direct I/O - // If false you must pass aligned buffer to Write() - virtual bool use_direct_io() const override; - - // Use the returned alignment value to allocate aligned - // buffer for Write() when use_direct_io() returns true - virtual size_t GetRequiredBufferAlignment() const override; - - // Write bytes in `data` at offset `offset`, Returns Status::OK() on success. - // Pass aligned buffer when use_direct_io() returns true. - virtual Status Write(uint64_t offset, const Slice& data) override; - - // Read up to `n` bytes starting from offset `offset` and store them in - // result, provided `scratch` size should be at least `n`. - // Returns Status::OK() on success. - virtual Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const override; - - virtual Status Flush() override; - - virtual Status Sync() override; - - virtual Status Fsync() { return Sync(); } - - virtual Status Close() override; -}; - -class WinDirectory : public Directory { - public: - WinDirectory() {} - - virtual Status Fsync() override; -}; - -class WinFileLock : public FileLock { - public: - explicit WinFileLock(HANDLE hFile) : hFile_(hFile) { - assert(hFile != NULL); - assert(hFile != INVALID_HANDLE_VALUE); - } - - ~WinFileLock(); - - private: - HANDLE hFile_; -}; -} -} diff --git a/storage/rocksdb/properties_collector.cc b/storage/rocksdb/properties_collector.cc index ee47306afa9..c2bfb2b0474 100644 --- a/storage/rocksdb/properties_collector.cc +++ b/storage/rocksdb/properties_collector.cc @@ -56,17 +56,9 @@ Rdb_tbl_prop_coll::Rdb_tbl_prop_coll(Rdb_ddl_manager *const ddl_manager, : m_cf_id(cf_id), m_ddl_manager(ddl_manager), m_last_stats(nullptr), m_rows(0l), m_window_pos(0l), m_deleted_rows(0l), m_max_deleted_rows(0l), m_file_size(0), m_params(params), - m_table_stats_sampling_pct(table_stats_sampling_pct), - m_seed(time(nullptr)), m_card_adj_extra(1.) { + m_cardinality_collector(table_stats_sampling_pct) { DBUG_ASSERT(ddl_manager != nullptr); - // We need to adjust the index cardinality numbers based on the sampling - // rate so that the output of "SHOW INDEX" command will reflect reality - // more closely. It will still be an approximation, just a better one. - if (m_table_stats_sampling_pct > 0) { - m_card_adj_extra = 100. / m_table_stats_sampling_pct; - } - m_deleted_rows_window.resize(m_params.m_window, false); } @@ -149,7 +141,7 @@ Rdb_index_stats *Rdb_tbl_prop_coll::AccessStats(const rocksdb::Slice &key) { m_last_stats->m_name = m_keydef->get_name(); } } - m_last_key.clear(); + m_cardinality_collector.Reset(); } return m_last_stats; @@ -159,7 +151,7 @@ void Rdb_tbl_prop_coll::CollectStatsForRow(const rocksdb::Slice &key, const rocksdb::Slice &value, const rocksdb::EntryType &type, const uint64_t &file_size) { - const auto stats = AccessStats(key); + auto stats = AccessStats(key); stats->m_data_size += key.size() + value.size(); @@ -185,38 +177,15 @@ void Rdb_tbl_prop_coll::CollectStatsForRow(const rocksdb::Slice &key, sql_print_error("RocksDB: Unexpected entry type found: %u. " "This should not happen so aborting the system.", type); - abort_with_stack_traces(); + abort(); break; } stats->m_actual_disk_size += file_size - m_file_size; m_file_size = file_size; - if (m_keydef != nullptr && ShouldCollectStats()) { - std::size_t column = 0; - bool new_key = true; - - if (!m_last_key.empty()) { - rocksdb::Slice last(m_last_key.data(), m_last_key.size()); - new_key = (m_keydef->compare_keys(&last, &key, &column) == 0); - } - - if (new_key) { - DBUG_ASSERT(column <= stats->m_distinct_keys_per_prefix.size()); - - for (auto i = column; i < stats->m_distinct_keys_per_prefix.size(); i++) { - stats->m_distinct_keys_per_prefix[i]++; - } - - // assign new last_key for the next call - // however, we only need to change the last key - // if one of the first n-1 columns is different - // If the n-1 prefix is the same, no sense in storing - // the new key - if (column < stats->m_distinct_keys_per_prefix.size()) { - m_last_key.assign(key.data(), key.size()); - } - } + if (m_keydef != nullptr) { + m_cardinality_collector.ProcessKey(key, m_keydef.get(), stats); } } @@ -263,8 +232,10 @@ Rdb_tbl_prop_coll::Finish(rocksdb::UserCollectedProperties *const properties) { rocksdb_num_sst_entry_other += num_sst_entry_other; } - properties->insert({INDEXSTATS_KEY, - Rdb_index_stats::materialize(m_stats, m_card_adj_extra)}); + for (Rdb_index_stats &stat : m_stats) { + m_cardinality_collector.AdjustStats(&stat); + } + properties->insert({INDEXSTATS_KEY, Rdb_index_stats::materialize(m_stats)}); return rocksdb::Status::OK(); } @@ -274,23 +245,6 @@ bool Rdb_tbl_prop_coll::NeedCompact() const { (m_max_deleted_rows > m_params.m_deletes); } -bool Rdb_tbl_prop_coll::ShouldCollectStats() { - // Zero means that we'll use all the keys to update statistics. - if (!m_table_stats_sampling_pct || - RDB_TBL_STATS_SAMPLE_PCT_MAX == m_table_stats_sampling_pct) { - return true; - } - - const int val = rand_r(&m_seed) % (RDB_TBL_STATS_SAMPLE_PCT_MAX - - RDB_TBL_STATS_SAMPLE_PCT_MIN + 1) + - RDB_TBL_STATS_SAMPLE_PCT_MIN; - - DBUG_ASSERT(val >= RDB_TBL_STATS_SAMPLE_PCT_MIN); - DBUG_ASSERT(val <= RDB_TBL_STATS_SAMPLE_PCT_MAX); - - return val <= m_table_stats_sampling_pct; -} - /* Returns the same as above, but in human-readable way for logging */ @@ -367,8 +321,7 @@ void Rdb_tbl_prop_coll::read_stats_from_tbl_props( Serializes an array of Rdb_index_stats into a network string. */ std::string -Rdb_index_stats::materialize(const std::vector &stats, - const float card_adj_extra) { +Rdb_index_stats::materialize(const std::vector &stats) { String ret; rdb_netstr_append_uint16(&ret, INDEX_STATS_VERSION_ENTRY_TYPES); for (const auto &i : stats) { @@ -384,8 +337,7 @@ Rdb_index_stats::materialize(const std::vector &stats, rdb_netstr_append_uint64(&ret, i.m_entry_merges); rdb_netstr_append_uint64(&ret, i.m_entry_others); for (const auto &num_keys : i.m_distinct_keys_per_prefix) { - const float upd_num_keys = num_keys * card_adj_extra; - rdb_netstr_append_uint64(&ret, static_cast(upd_num_keys)); + rdb_netstr_append_uint64(&ret, num_keys); } } @@ -418,7 +370,7 @@ int Rdb_index_stats::unmaterialize(const std::string &s, sql_print_error("Index stats version %d was outside of supported range. " "This should not happen so aborting the system.", version); - abort_with_stack_traces(); + abort(); } size_t needed = sizeof(stats.m_gl_index_id.cf_id) + @@ -523,4 +475,75 @@ void Rdb_index_stats::merge(const Rdb_index_stats &s, const bool &increment, } } +Rdb_tbl_card_coll::Rdb_tbl_card_coll(const uint8_t &table_stats_sampling_pct) + : m_table_stats_sampling_pct(table_stats_sampling_pct), + m_seed(time(nullptr)) {} + +bool Rdb_tbl_card_coll::IsSampingDisabled() { + // Zero means that we'll use all the keys to update statistics. + return m_table_stats_sampling_pct == 0 || + RDB_TBL_STATS_SAMPLE_PCT_MAX == m_table_stats_sampling_pct; +} + +bool Rdb_tbl_card_coll::ShouldCollectStats() { + if (IsSampingDisabled()) { + return true; // collect every key + } + + const int val = rand_r(&m_seed) % (RDB_TBL_STATS_SAMPLE_PCT_MAX - + RDB_TBL_STATS_SAMPLE_PCT_MIN + 1) + + RDB_TBL_STATS_SAMPLE_PCT_MIN; + + DBUG_ASSERT(val >= RDB_TBL_STATS_SAMPLE_PCT_MIN); + DBUG_ASSERT(val <= RDB_TBL_STATS_SAMPLE_PCT_MAX); + + return val <= m_table_stats_sampling_pct; +} + +void Rdb_tbl_card_coll::ProcessKey(const rocksdb::Slice &key, + const Rdb_key_def *keydef, + Rdb_index_stats *stats) { + if (ShouldCollectStats()) { + std::size_t column = 0; + bool new_key = true; + + if (!m_last_key.empty()) { + rocksdb::Slice last(m_last_key.data(), m_last_key.size()); + new_key = (keydef->compare_keys(&last, &key, &column) == 0); + } + + if (new_key) { + DBUG_ASSERT(column <= stats->m_distinct_keys_per_prefix.size()); + + for (auto i = column; i < stats->m_distinct_keys_per_prefix.size(); i++) { + stats->m_distinct_keys_per_prefix[i]++; + } + + // assign new last_key for the next call + // however, we only need to change the last key + // if one of the first n-1 columns is different + // If the n-1 prefix is the same, no sense in storing + // the new key + if (column < stats->m_distinct_keys_per_prefix.size()) { + m_last_key.assign(key.data(), key.size()); + } + } + } +} + +void Rdb_tbl_card_coll::Reset() { m_last_key.clear(); } + +// We need to adjust the index cardinality numbers based on the sampling +// rate so that the output of "SHOW INDEX" command will reflect reality +// more closely. It will still be an approximation, just a better one. +void Rdb_tbl_card_coll::AdjustStats(Rdb_index_stats *stats) { + if (IsSampingDisabled()) { + // no sampling was done, return as stats is + return; + } + for (int64_t &num_keys : stats->m_distinct_keys_per_prefix) { + num_keys = num_keys * 100 / m_table_stats_sampling_pct; + } +} + } // namespace myrocks diff --git a/storage/rocksdb/properties_collector.h b/storage/rocksdb/properties_collector.h index 9ae519d95c7..1441d893420 100644 --- a/storage/rocksdb/properties_collector.h +++ b/storage/rocksdb/properties_collector.h @@ -56,8 +56,7 @@ struct Rdb_index_stats { std::vector m_distinct_keys_per_prefix; std::string m_name; // name is not persisted - static std::string materialize(const std::vector &stats, - const float card_adj_extra); + static std::string materialize(const std::vector &stats); static int unmaterialize(const std::string &s, std::vector *const ret); @@ -71,6 +70,40 @@ struct Rdb_index_stats { const int64_t &estimated_data_len = 0); }; +// The helper class to calculate index cardinality +class Rdb_tbl_card_coll { + public: + explicit Rdb_tbl_card_coll(const uint8_t &table_stats_sampling_pct); + + public: + void ProcessKey(const rocksdb::Slice &key, const Rdb_key_def *keydef, + Rdb_index_stats *stats); + /* + * Resets the state of the collector to start calculating statistics for a + * next index. + */ + void Reset(); + + /* + * Cardinality statistics might be calculated using some sampling strategy. + * This method adjusts gathered statistics according to the sampling + * strategy used. Note that adjusted cardinality value is just an estimate + * and can return a value exeeding number of rows in a table, so the + * returned value should be capped by row count before using it by + * an optrimizer or displaying it to a clent. + */ + void AdjustStats(Rdb_index_stats *stats); + + private: + bool ShouldCollectStats(); + bool IsSampingDisabled(); + + private: + std::string m_last_key; + uint8_t m_table_stats_sampling_pct; + unsigned int m_seed; +}; + class Rdb_tbl_prop_coll : public rocksdb::TablePropertiesCollector { public: Rdb_tbl_prop_coll(Rdb_ddl_manager *const ddl_manager, @@ -130,9 +163,7 @@ private: uint64_t m_rows, m_window_pos, m_deleted_rows, m_max_deleted_rows; uint64_t m_file_size; Rdb_compact_params m_params; - uint8_t m_table_stats_sampling_pct; - unsigned int m_seed; - float m_card_adj_extra; + Rdb_tbl_card_coll m_cardinality_collector; }; class Rdb_tbl_prop_coll_factory diff --git a/storage/rocksdb/rdb_buff.h b/storage/rocksdb/rdb_buff.h index c9647707232..df6264029fa 100644 --- a/storage/rocksdb/rdb_buff.h +++ b/storage/rocksdb/rdb_buff.h @@ -30,7 +30,7 @@ #define be16toh _byteswap_ushort #endif -#if __APPLE__ +#if defined(__APPLE__) #include #define htobe64(x) OSSwapHostToBigInt64(x) #define be64toh(x) OSSwapBigToHostInt64(x) diff --git a/storage/rocksdb/rdb_cf_options.cc b/storage/rocksdb/rdb_cf_options.cc index d5521e62cab..96a683e5eb3 100644 --- a/storage/rocksdb/rdb_cf_options.cc +++ b/storage/rocksdb/rdb_cf_options.cc @@ -325,6 +325,13 @@ Rdb_cf_options::get_cf_comparator(const std::string &cf_name) { } } +std::shared_ptr +Rdb_cf_options::get_cf_merge_operator(const std::string &cf_name) { + return (cf_name == DEFAULT_SYSTEM_CF_NAME) + ? std::make_shared() + : nullptr; +} + void Rdb_cf_options::get_cf_options(const std::string &cf_name, rocksdb::ColumnFamilyOptions *const opts) { DBUG_ASSERT(opts != nullptr); @@ -334,6 +341,7 @@ void Rdb_cf_options::get_cf_options(const std::string &cf_name, // Set the comparator according to 'rev:' opts->comparator = get_cf_comparator(cf_name); + opts->merge_operator = get_cf_merge_operator(cf_name); } } // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_options.h b/storage/rocksdb/rdb_cf_options.h index 32f2308284f..19e5da6a79e 100644 --- a/storage/rocksdb/rdb_cf_options.h +++ b/storage/rocksdb/rdb_cf_options.h @@ -64,6 +64,9 @@ public: static const rocksdb::Comparator * get_cf_comparator(const std::string &cf_name); + std::shared_ptr + get_cf_merge_operator(const std::string &cf_name); + void get_cf_options(const std::string &cf_name, rocksdb::ColumnFamilyOptions *const opts) MY_ATTRIBUTE((__nonnull__)); diff --git a/storage/rocksdb/rdb_compact_filter.h b/storage/rocksdb/rdb_compact_filter.h index 9bc7ece6e7a..20ae3c740c1 100644 --- a/storage/rocksdb/rdb_compact_filter.h +++ b/storage/rocksdb/rdb_compact_filter.h @@ -167,7 +167,7 @@ public: sql_print_error("Decoding ttl from PK value failed in compaction filter, " "for index (%u,%u), val: %s", m_prev_index.cf_id, m_prev_index.index_id, buf.c_str()); - abort_with_stack_traces(); + abort(); } /* diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index deba2d50ccc..5664bd901b3 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -826,6 +827,25 @@ int Rdb_key_def::successor(uchar *const packed_tuple, const uint &len) { return changed; } +/* + @return Number of bytes that were changed +*/ +int Rdb_key_def::predecessor(uchar *const packed_tuple, const uint &len) { + DBUG_ASSERT(packed_tuple != nullptr); + + int changed = 0; + uchar *p = packed_tuple + len - 1; + for (; p > packed_tuple; p--) { + changed++; + if (*p != uchar(0x00)) { + *p = *p - 1; + break; + } + *p = 0xFF; + } + return changed; +} + static const std::map UNPACK_HEADER_SIZES = { {RDB_UNPACK_DATA_TAG, RDB_UNPACK_HEADER_SIZE}, {RDB_UNPACK_COVERED_DATA_TAG, RDB_UNPACK_COVERED_HEADER_SIZE}}; @@ -1429,11 +1449,11 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, MY_BITMAP covered_bitmap; my_bitmap_map covered_bits; uint curr_bitmap_pos = 0; - bitmap_init(&covered_bitmap, &covered_bits, MAX_REF_PARTS, false); const bool has_covered_bitmap = has_unpack_info && (unpack_header[0] == RDB_UNPACK_COVERED_DATA_TAG); if (has_covered_bitmap) { + bitmap_init(&covered_bitmap, &covered_bits, MAX_REF_PARTS, false); covered_bits = rdb_netbuf_to_uint16((const uchar *)unpack_header + sizeof(RDB_UNPACK_COVERED_DATA_TAG) + RDB_UNPACK_COVERED_DATA_LEN_SIZE); @@ -1508,6 +1528,18 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, } if ((this->*fpi->m_skip_func)(fpi, field, &reader)) return HA_ERR_ROCKSDB_CORRUPT_DATA; + + // If this is a space padded varchar, we need to skip the indicator + // bytes for trailing bytes. They're useless since we can't restore the + // field anyway. + // + // There is a special case for prefixed varchars where we do not + // generate unpack info, because we know prefixed varchars cannot be + // unpacked. In this case, it is not necessary to skip. + if (fpi->m_skip_func == &Rdb_key_def::skip_variable_space_pad && + !fpi->m_unpack_info_stores_value) { + unp_reader.read(fpi->m_unpack_info_uses_two_bytes ? 2 : 1); + } } } @@ -3487,6 +3519,20 @@ void Rdb_tbl_def::set_name(const std::string &name) { check_if_is_mysql_system_table(); } +GL_INDEX_ID Rdb_tbl_def::get_autoincr_gl_index_id() { + for (uint i = 0; i < m_key_count; i++) { + auto &k = m_key_descr_arr[i]; + if (k->m_index_type == Rdb_key_def::INDEX_TYPE_PRIMARY || + k->m_index_type == Rdb_key_def::INDEX_TYPE_HIDDEN_PRIMARY) { + return k->get_gl_index_id(); + } + } + + // Every table must have a primary key, even if it's hidden. + abort(); + return GL_INDEX_ID(); +} + /* Static function of type my_hash_get_key that gets invoked by the m_ddl_hash object of type my_core::HASH. @@ -3713,6 +3759,68 @@ bool Rdb_validate_tbls::compare_to_actual_tables(const std::string &datadir, return result; } +/* + Validate that all auto increment values in the data dictionary are on a + supported version. +*/ +bool Rdb_ddl_manager::validate_auto_incr() { + std::unique_ptr it(m_dict->new_iterator()); + + uchar auto_incr_entry[Rdb_key_def::INDEX_NUMBER_SIZE]; + rdb_netbuf_store_index(auto_incr_entry, Rdb_key_def::AUTO_INC); + const rocksdb::Slice auto_incr_entry_slice( + reinterpret_cast(auto_incr_entry), + Rdb_key_def::INDEX_NUMBER_SIZE); + for (it->Seek(auto_incr_entry_slice); it->Valid(); it->Next()) { + const rocksdb::Slice key = it->key(); + const rocksdb::Slice val = it->value(); + GL_INDEX_ID gl_index_id; + + if (key.size() >= Rdb_key_def::INDEX_NUMBER_SIZE && + memcmp(key.data(), auto_incr_entry, Rdb_key_def::INDEX_NUMBER_SIZE)) + break; + + if (key.size() != Rdb_key_def::INDEX_NUMBER_SIZE * 3) { + return false; + } + + if (val.size() <= Rdb_key_def::VERSION_SIZE) { + return false; + } + + // Check if we have orphaned entries for whatever reason by cross + // referencing ddl entries. + auto ptr = reinterpret_cast(key.data()); + ptr += Rdb_key_def::INDEX_NUMBER_SIZE; + rdb_netbuf_read_gl_index(&ptr, &gl_index_id); + if (!m_dict->get_index_info(gl_index_id, nullptr)) { + // NO_LINT_DEBUG + sql_print_warning("RocksDB: AUTOINC mismatch - " + "Index number (%u, %u) found in AUTOINC " + "but does not exist as a DDL entry", + gl_index_id.cf_id, gl_index_id.index_id); + return false; + } + + ptr = reinterpret_cast(val.data()); + const int version = rdb_netbuf_read_uint16(&ptr); + if (version > Rdb_key_def::AUTO_INCREMENT_VERSION) { + // NO_LINT_DEBUG + sql_print_warning("RocksDB: AUTOINC mismatch - " + "Index number (%u, %u) found in AUTOINC " + "is on unsupported version %d", + gl_index_id.cf_id, gl_index_id.index_id, version); + return false; + } + } + + if (!it->status().ok()) { + return false; + } + + return true; +} + /* Validate that all the tables in the RocksDB database dictionary match the .frm files in the datadir @@ -3877,10 +3985,18 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *const dict_arg, If validate_tables is greater than 0 run the validation. Only fail the initialzation if the setting is 1. If the setting is 2 we continue. */ - if (validate_tables > 0 && !validate_schemas()) { - if (validate_tables == 1) { - sql_print_error("RocksDB: Problems validating data dictionary " - "against .frm files, exiting"); + if (validate_tables > 0) { + std::string msg; + if (!validate_schemas()) { + msg = "RocksDB: Problems validating data dictionary " + "against .frm files, exiting"; + } else if (!validate_auto_incr()) { + msg = "RocksDB: Problems validating auto increment values in " + "data dictionary, exiting"; + } + if (validate_tables == 1 && !msg.empty()) { + // NO_LINT_DEBUG + sql_print_error("%s", msg.c_str()); return true; } } @@ -4154,6 +4270,10 @@ bool Rdb_ddl_manager::rename(const std::string &from, const std::string &to, new_rec->m_auto_incr_val = rec->m_auto_incr_val.load(std::memory_order_relaxed); new_rec->m_key_descr_arr = rec->m_key_descr_arr; + + new_rec->m_hidden_pk_val = + rec->m_hidden_pk_val.load(std::memory_order_relaxed); + // so that it's not free'd when deleting the old rec rec->m_key_descr_arr = nullptr; @@ -4613,13 +4733,16 @@ void Rdb_dict_manager::delete_index_info(rocksdb::WriteBatch *batch, const GL_INDEX_ID &gl_index_id) const { delete_with_prefix(batch, Rdb_key_def::INDEX_INFO, gl_index_id); delete_with_prefix(batch, Rdb_key_def::INDEX_STATISTICS, gl_index_id); + delete_with_prefix(batch, Rdb_key_def::AUTO_INC, gl_index_id); } bool Rdb_dict_manager::get_index_info( const GL_INDEX_ID &gl_index_id, struct Rdb_index_info *const index_info) const { - index_info->m_gl_index_id = gl_index_id; + if (index_info) { + index_info->m_gl_index_id = gl_index_id; + } bool found = false; bool error = false; @@ -4630,6 +4753,10 @@ bool Rdb_dict_manager::get_index_info( const rocksdb::Status &status = get_value(key, &value); if (status.ok()) { + if (!index_info) { + return true; + } + const uchar *const val = (const uchar *)value.c_str(); const uchar *ptr = val; index_info->m_index_dict_version = rdb_netbuf_to_uint16(val); @@ -4668,6 +4795,11 @@ bool Rdb_dict_manager::get_index_info( index_info->m_kv_version = rdb_netbuf_to_uint16(ptr); ptr += RDB_SIZEOF_KV_VERSION; index_info->m_ttl_duration = rdb_netbuf_to_uint64(ptr); + if ((index_info->m_kv_version == + Rdb_key_def::PRIMARY_FORMAT_VERSION_TTL) && + index_info->m_ttl_duration > 0) { + index_info->m_index_flags = Rdb_key_def::TTL_FLAG; + } found = true; break; @@ -4709,7 +4841,7 @@ bool Rdb_dict_manager::get_index_info( "and it may be a bug.", index_info->m_index_dict_version, index_info->m_index_type, index_info->m_kv_version, index_info->m_ttl_duration); - abort_with_stack_traces(); + abort(); } return found; @@ -4906,8 +5038,8 @@ void Rdb_dict_manager::add_create_index( rocksdb::WriteBatch *const batch) const { for (const auto &gl_index_id : gl_index_ids) { // NO_LINT_DEBUG - sql_print_information("RocksDB: Begin index creation (%u,%u)", - gl_index_id.cf_id, gl_index_id.index_id); + sql_print_verbose_info("RocksDB: Begin index creation (%u,%u)", + gl_index_id.cf_id, gl_index_id.index_id); start_create_index(batch, gl_index_id); } } @@ -4972,7 +5104,7 @@ void Rdb_dict_manager::resume_drop_indexes() const { "bug.", max_index_id_in_dict, gl_index_id.cf_id, gl_index_id.index_id); - abort_with_stack_traces(); + abort(); } } } @@ -4986,8 +5118,8 @@ void Rdb_dict_manager::rollback_ongoing_index_creation() const { for (const auto &gl_index_id : gl_index_ids) { // NO_LINT_DEBUG - sql_print_information("RocksDB: Removing incomplete create index (%u,%u)", - gl_index_id.cf_id, gl_index_id.index_id); + sql_print_verbose_info("RocksDB: Removing incomplete create index (%u,%u)", + gl_index_id.cf_id, gl_index_id.index_id); start_drop_index(batch, gl_index_id); } @@ -5021,7 +5153,7 @@ void Rdb_dict_manager::log_start_drop_index(GL_INDEX_ID gl_index_id, "from index id (%u,%u). MyRocks data dictionary may " "get corrupted.", gl_index_id.cf_id, gl_index_id.index_id); - abort_with_stack_traces(); + abort(); } } } @@ -5079,7 +5211,7 @@ void Rdb_dict_manager::add_stats( // IndexStats::materialize takes complete care of serialization including // storing the version const auto value = - Rdb_index_stats::materialize(std::vector{it}, 1.); + Rdb_index_stats::materialize(std::vector{it}); batch->Put(m_system_cfh, rocksdb::Slice((char *)key_buf, sizeof(key_buf)), value); @@ -5105,6 +5237,53 @@ Rdb_index_stats Rdb_dict_manager::get_stats(GL_INDEX_ID gl_index_id) const { return Rdb_index_stats(); } +rocksdb::Status +Rdb_dict_manager::put_auto_incr_val(rocksdb::WriteBatchBase *batch, + const GL_INDEX_ID &gl_index_id, + ulonglong val, bool overwrite) const { + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; + dump_index_id(key_buf, Rdb_key_def::AUTO_INC, gl_index_id); + const rocksdb::Slice key = + rocksdb::Slice(reinterpret_cast(key_buf), sizeof(key_buf)); + + // Value is constructed by storing the version and the value. + uchar value_buf[RDB_SIZEOF_AUTO_INCREMENT_VERSION + + ROCKSDB_SIZEOF_AUTOINC_VALUE] = {0}; + uchar *ptr = value_buf; + rdb_netbuf_store_uint16(ptr, Rdb_key_def::AUTO_INCREMENT_VERSION); + ptr += RDB_SIZEOF_AUTO_INCREMENT_VERSION; + rdb_netbuf_store_uint64(ptr, val); + ptr += ROCKSDB_SIZEOF_AUTOINC_VALUE; + const rocksdb::Slice value = + rocksdb::Slice(reinterpret_cast(value_buf), ptr - value_buf); + + if (overwrite) { + return batch->Put(m_system_cfh, key, value); + } + return batch->Merge(m_system_cfh, key, value); +} + +bool Rdb_dict_manager::get_auto_incr_val(const GL_INDEX_ID &gl_index_id, + ulonglong *new_val) const { + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; + dump_index_id(key_buf, Rdb_key_def::AUTO_INC, gl_index_id); + + std::string value; + const rocksdb::Status status = get_value( + rocksdb::Slice(reinterpret_cast(key_buf), sizeof(key_buf)), + &value); + + if (status.ok()) { + const uchar *const val = reinterpret_cast(value.data()); + + if (rdb_netbuf_to_uint16(val) <= Rdb_key_def::AUTO_INCREMENT_VERSION) { + *new_val = rdb_netbuf_to_uint64(val + RDB_SIZEOF_AUTO_INCREMENT_VERSION); + return true; + } + } + return false; +} + uint Rdb_seq_generator::get_and_update_next_number( Rdb_dict_manager *const dict) { DBUG_ASSERT(dict != nullptr); diff --git a/storage/rocksdb/rdb_datadic.h b/storage/rocksdb/rdb_datadic.h index fa0c0fd09e5..585ea4344f9 100644 --- a/storage/rocksdb/rdb_datadic.h +++ b/storage/rocksdb/rdb_datadic.h @@ -138,6 +138,7 @@ const size_t RDB_SIZEOF_INDEX_INFO_VERSION = sizeof(uint16); const size_t RDB_SIZEOF_INDEX_TYPE = sizeof(uchar); const size_t RDB_SIZEOF_KV_VERSION = sizeof(uint16); const size_t RDB_SIZEOF_INDEX_FLAGS = sizeof(uint32); +const size_t RDB_SIZEOF_AUTO_INCREMENT_VERSION = sizeof(uint16); // Possible return values for rdb_index_field_unpack_t functions. enum { @@ -237,17 +238,44 @@ public: *size = INDEX_NUMBER_SIZE; } + /* + Get the first key that you need to position at to start iterating. + + Stores into *key a "supremum" or "infimum" key value for the index. + + @return Number of bytes in the key that are usable for bloom filter use. + */ + inline int get_first_key(uchar *const key, uint *const size) const { + if (m_is_reverse_cf) + get_supremum_key(key, size); + else + get_infimum_key(key, size); + + /* Find out how many bytes of infimum are the same as m_index_number */ + uchar unmodified_key[INDEX_NUMBER_SIZE]; + rdb_netbuf_store_index(unmodified_key, m_index_number); + int i; + for (i = 0; i < INDEX_NUMBER_SIZE; i++) { + if (key[i] != unmodified_key[i]) + break; + } + return i; + } + /* Make a key that is right after the given key. */ static int successor(uchar *const packed_tuple, const uint &len); + /* Make a key that is right before the given key. */ + static int predecessor(uchar *const packed_tuple, const uint &len); + /* This can be used to compare prefixes. if X is a prefix of Y, then we consider that X = Y. */ // b describes the lookup key, which can be a prefix of a. + // b might be outside of the index_number range, if successor() is called. int cmp_full_keys(const rocksdb::Slice &a, const rocksdb::Slice &b) const { DBUG_ASSERT(covers_key(a)); - DBUG_ASSERT(covers_key(b)); return memcmp(a.data(), b.data(), std::min(a.size(), b.size())); } @@ -383,6 +411,7 @@ public: INDEX_STATISTICS = 6, MAX_INDEX_ID = 7, DDL_CREATE_INDEX_ONGOING = 8, + AUTO_INC = 9, END_DICT_INDEX_ID = 255 }; @@ -395,6 +424,7 @@ public: DDL_DROP_INDEX_ONGOING_VERSION = 1, MAX_INDEX_ID_VERSION = 1, DDL_CREATE_INDEX_ONGOING_VERSION = 1, + AUTO_INCREMENT_VERSION = 1, // Version for index stats is stored in IndexStats struct }; @@ -972,17 +1002,17 @@ public: Rdb_tbl_def &operator=(const Rdb_tbl_def &) = delete; explicit Rdb_tbl_def(const std::string &name) - : m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) { + : m_key_descr_arr(nullptr), m_hidden_pk_val(0), m_auto_incr_val(0) { set_name(name); } Rdb_tbl_def(const char *const name, const size_t &len) - : m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) { + : m_key_descr_arr(nullptr), m_hidden_pk_val(0), m_auto_incr_val(0) { set_name(std::string(name, len)); } explicit Rdb_tbl_def(const rocksdb::Slice &slice, const size_t &pos = 0) - : m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) { + : m_key_descr_arr(nullptr), m_hidden_pk_val(0), m_auto_incr_val(0) { set_name(std::string(slice.data() + pos, slice.size() - pos)); } @@ -995,7 +1025,7 @@ public: std::shared_ptr *m_key_descr_arr; std::atomic m_hidden_pk_val; - std::atomic m_auto_incr_val; + std::atomic m_auto_incr_val; /* Is this a system table */ bool m_is_mysql_system_table; @@ -1007,6 +1037,7 @@ public: const std::string &base_dbname() const { return m_dbname; } const std::string &base_tablename() const { return m_tablename; } const std::string &base_partition() const { return m_partition; } + GL_INDEX_ID get_autoincr_gl_index_id(); }; /* @@ -1119,6 +1150,8 @@ private: static void free_hash_elem(void *const data); bool validate_schemas(); + + bool validate_auto_incr(); }; /* @@ -1183,8 +1216,9 @@ private: 2. internal cf_id, index id => index information key: Rdb_key_def::INDEX_INFO(0x2) + cf_id + index_id - value: version, index_type, kv_format_version, ttl_duration + value: version, index_type, kv_format_version, index_flags, ttl_duration index_type is 1 byte, version and kv_format_version are 2 bytes. + index_flags is 4 bytes. ttl_duration is 8 bytes. 3. CF id => CF flags @@ -1213,6 +1247,11 @@ private: key: Rdb_key_def::DDL_CREATE_INDEX_ONGOING(0x8) + cf_id + index_id value: version + 9. auto_increment values + key: Rdb_key_def::AUTO_INC(0x9) + cf_id + index_id + value: version, {max auto_increment so far} + max auto_increment is 8 bytes + Data dictionary operations are atomic inside RocksDB. For example, when creating a table with two indexes, it is necessary to call Put three times. They have to be atomic. Rdb_dict_manager has a wrapper function @@ -1354,6 +1393,13 @@ public: void add_stats(rocksdb::WriteBatch *const batch, const std::vector &stats) const; Rdb_index_stats get_stats(GL_INDEX_ID gl_index_id) const; + + rocksdb::Status put_auto_incr_val(rocksdb::WriteBatchBase *batch, + const GL_INDEX_ID &gl_index_id, + ulonglong val, + bool overwrite = false) const; + bool get_auto_incr_val(const GL_INDEX_ID &gl_index_id, + ulonglong *new_val) const; }; struct Rdb_index_info { @@ -1365,6 +1411,109 @@ struct Rdb_index_info { uint64 m_ttl_duration = 0; }; +/* + @brief + Merge Operator for the auto_increment value in the system_cf + + @detail + This class implements the rocksdb Merge Operator for auto_increment values + that are stored to the data dictionary every transaction. + + The actual Merge function is triggered on compaction, memtable flushes, or + when get() is called on the same key. + + */ +class Rdb_system_merge_op : public rocksdb::AssociativeMergeOperator { + public: + /* + Updates the new value associated with a key to be the maximum of the + passed in value and the existing value. + + @param[IN] key + @param[IN] existing_value existing value for a key; nullptr if nonexistent + key + @param[IN] value + @param[OUT] new_value new value after Merge + @param[IN] logger + */ + bool Merge(const rocksdb::Slice &key, const rocksdb::Slice *existing_value, + const rocksdb::Slice &value, std::string *new_value, + rocksdb::Logger *logger) const override { + DBUG_ASSERT(new_value != nullptr); + + if (key.size() != Rdb_key_def::INDEX_NUMBER_SIZE * 3 || + GetKeyType(key) != Rdb_key_def::AUTO_INC || + value.size() != + RDB_SIZEOF_AUTO_INCREMENT_VERSION + ROCKSDB_SIZEOF_AUTOINC_VALUE || + GetVersion(value) > Rdb_key_def::AUTO_INCREMENT_VERSION) { + abort(); + } + + uint64_t merged_value = Deserialize(value); + + if (existing_value != nullptr) { + if (existing_value->size() != RDB_SIZEOF_AUTO_INCREMENT_VERSION + + ROCKSDB_SIZEOF_AUTOINC_VALUE || + GetVersion(*existing_value) > Rdb_key_def::AUTO_INCREMENT_VERSION) { + abort(); + } + + merged_value = std::max(merged_value, Deserialize(*existing_value)); + } + Serialize(merged_value, new_value); + return true; + } + + virtual const char *Name() const override { return "Rdb_system_merge_op"; } + + private: + /* + Serializes the integer data to the new_value buffer or the target buffer + the merge operator will update to + */ + void Serialize(const uint64_t data, std::string *new_value) const { + uchar value_buf[RDB_SIZEOF_AUTO_INCREMENT_VERSION + + ROCKSDB_SIZEOF_AUTOINC_VALUE] = {0}; + uchar *ptr = value_buf; + /* fill in the auto increment version */ + rdb_netbuf_store_uint16(ptr, Rdb_key_def::AUTO_INCREMENT_VERSION); + ptr += RDB_SIZEOF_AUTO_INCREMENT_VERSION; + /* fill in the auto increment value */ + rdb_netbuf_store_uint64(ptr, data); + ptr += ROCKSDB_SIZEOF_AUTOINC_VALUE; + new_value->assign(reinterpret_cast(value_buf), ptr - value_buf); + } + + /* + Gets the value of auto_increment type in the data dictionary from the + value slice + + @Note Only to be used on data dictionary keys for the auto_increment type + */ + uint64_t Deserialize(const rocksdb::Slice &s) const { + return rdb_netbuf_to_uint64(reinterpret_cast(s.data()) + + RDB_SIZEOF_AUTO_INCREMENT_VERSION); + } + + /* + Gets the type of the key of the key in the data dictionary. + + @Note Only to be used on data dictionary keys for the auto_increment type + */ + uint16_t GetKeyType(const rocksdb::Slice &s) const { + return rdb_netbuf_to_uint32(reinterpret_cast(s.data())); + } + + /* + Gets the version of the auto_increment value in the data dictionary. + + @Note Only to be used on data dictionary value for the auto_increment type + */ + uint16_t GetVersion(const rocksdb::Slice &s) const { + return rdb_netbuf_to_uint16(reinterpret_cast(s.data())); + } +}; + bool rdb_is_collation_supported(const my_core::CHARSET_INFO *const cs); } // namespace myrocks diff --git a/storage/rocksdb/rdb_i_s.cc b/storage/rocksdb/rdb_i_s.cc index 57c1bcad3bb..591d4923ca2 100644 --- a/storage/rocksdb/rdb_i_s.cc +++ b/storage/rocksdb/rdb_i_s.cc @@ -797,7 +797,7 @@ static int rdb_i_s_global_info_fill_table( "from CF with id = %u. MyRocks data dictionary may " "be corrupted.", cf_handle->GetID()); - abort_with_stack_traces(); + abort(); } snprintf(cf_id_buf, INT_BUF_LEN, "%u", cf_handle->GetID()); @@ -928,7 +928,10 @@ enum { INDEX_NUMBER, INDEX_TYPE, KV_FORMAT_VERSION, - CF + TTL_DURATION, + INDEX_FLAGS, + CF, + AUTO_INCREMENT }; } // namespace RDB_DDL_FIELD @@ -943,7 +946,11 @@ static ST_FIELD_INFO rdb_i_s_ddl_fields_info[] = { ROCKSDB_FIELD_INFO("INDEX_TYPE", sizeof(uint16_t), MYSQL_TYPE_SHORT, 0), ROCKSDB_FIELD_INFO("KV_FORMAT_VERSION", sizeof(uint16_t), MYSQL_TYPE_SHORT, 0), + ROCKSDB_FIELD_INFO("TTL_DURATION", sizeof(uint64), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("INDEX_FLAGS", sizeof(uint64), MYSQL_TYPE_LONGLONG, 0), ROCKSDB_FIELD_INFO("CF", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("AUTO_INCREMENT", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, + MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), ROCKSDB_FIELD_INFO_END}; int Rdb_ddl_scanner::add_table(Rdb_tbl_def *tdef) { @@ -954,6 +961,7 @@ int Rdb_ddl_scanner::add_table(Rdb_tbl_def *tdef) { DBUG_ASSERT(m_table != nullptr); Field **field = m_table->field; DBUG_ASSERT(field != nullptr); + const Rdb_dict_manager *dict_manager = rdb_get_dict_manager(); const std::string &dbname = tdef->base_dbname(); field[RDB_DDL_FIELD::TABLE_SCHEMA]->store(dbname.c_str(), dbname.size(), @@ -984,10 +992,20 @@ int Rdb_ddl_scanner::add_table(Rdb_tbl_def *tdef) { field[RDB_DDL_FIELD::INDEX_TYPE]->store(kd.m_index_type, true); field[RDB_DDL_FIELD::KV_FORMAT_VERSION]->store(kd.m_kv_format_version, true); + field[RDB_DDL_FIELD::TTL_DURATION]->store(kd.m_ttl_duration, true); + field[RDB_DDL_FIELD::INDEX_FLAGS]->store(kd.m_index_flags_bitmap, true); std::string cf_name = kd.get_cf()->GetName(); field[RDB_DDL_FIELD::CF]->store(cf_name.c_str(), cf_name.size(), system_charset_info); + ulonglong auto_incr; + if (dict_manager->get_auto_incr_val(tdef->get_autoincr_gl_index_id(), + &auto_incr)) { + field[RDB_DDL_FIELD::AUTO_INCREMENT]->set_notnull(); + field[RDB_DDL_FIELD::AUTO_INCREMENT]->store(auto_incr, true); + } else { + field[RDB_DDL_FIELD::AUTO_INCREMENT]->set_null(); + } ret = my_core::schema_table_store_record(m_thd, m_table); if (ret) @@ -1495,6 +1513,117 @@ static int rdb_i_s_trx_info_init(void *const p) { DBUG_RETURN(0); } +/* + Support for INFORMATION_SCHEMA.ROCKSDB_DEADLOCK dynamic table + */ +namespace RDB_DEADLOCK_FIELD { +enum { + DEADLOCK_ID = 0, + TRANSACTION_ID, + CF_NAME, + WAITING_KEY, + LOCK_TYPE, + INDEX_NAME, + TABLE_NAME, + ROLLED_BACK +}; +} // namespace RDB_TRX_FIELD + +static ST_FIELD_INFO rdb_i_s_deadlock_info_fields_info[] = { + ROCKSDB_FIELD_INFO("DEADLOCK_ID", sizeof(ulonglong), MYSQL_TYPE_LONGLONG, + 0), + ROCKSDB_FIELD_INFO("TRANSACTION_ID", sizeof(ulonglong), MYSQL_TYPE_LONGLONG, + 0), + ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("WAITING_KEY", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("LOCK_TYPE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("INDEX_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("TABLE_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("ROLLED_BACK", sizeof(ulonglong), MYSQL_TYPE_LONGLONG, + 0), + ROCKSDB_FIELD_INFO_END}; + +/* Fill the information_schema.rocksdb_trx virtual table */ +static int rdb_i_s_deadlock_info_fill_table( + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + DBUG_ASSERT(tables->table->field != nullptr); + + static const std::string str_exclusive("EXCLUSIVE"); + static const std::string str_shared("SHARED"); + + int ret = 0; + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + + if (!rdb) { + DBUG_RETURN(ret); + } + + const std::vector &all_dl_info = rdb_get_deadlock_info(); + + ulonglong id = 0; + for (const auto &info : all_dl_info) { + for (const auto &trx_info : info.path) { + tables->table->field[RDB_DEADLOCK_FIELD::DEADLOCK_ID]->store(id, true); + tables->table->field[RDB_DEADLOCK_FIELD::TRANSACTION_ID]->store( + trx_info.trx_id, true); + tables->table->field[RDB_DEADLOCK_FIELD::CF_NAME]->store( + trx_info.cf_name.c_str(), trx_info.cf_name.length(), + system_charset_info); + tables->table->field[RDB_DEADLOCK_FIELD::WAITING_KEY]->store( + trx_info.waiting_key.c_str(), trx_info.waiting_key.length(), + system_charset_info); + if (trx_info.exclusive_lock) { + tables->table->field[RDB_DEADLOCK_FIELD::LOCK_TYPE]->store( + str_exclusive.c_str(), str_exclusive.length(), system_charset_info); + } else { + tables->table->field[RDB_DEADLOCK_FIELD::LOCK_TYPE]->store( + str_shared.c_str(), str_shared.length(), system_charset_info); + } + tables->table->field[RDB_DEADLOCK_FIELD::INDEX_NAME]->store( + trx_info.index_name.c_str(), trx_info.index_name.length(), + system_charset_info); + tables->table->field[RDB_DEADLOCK_FIELD::TABLE_NAME]->store( + trx_info.table_name.c_str(), trx_info.table_name.length(), + system_charset_info); + tables->table->field[RDB_DEADLOCK_FIELD::ROLLED_BACK]->store( + trx_info.trx_id == info.victim_trx_id, true); + + /* Tell MySQL about this row in the virtual table */ + ret = static_cast( + my_core::schema_table_store_record(thd, tables->table)); + + if (ret != 0) { + break; + } + } + id++; + } + + DBUG_RETURN(ret); +} + +/* Initialize the information_schema.rocksdb_trx_info virtual table */ +static int rdb_i_s_deadlock_info_init(void *const p) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(p != nullptr); + + my_core::ST_SCHEMA_TABLE *schema; + + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_deadlock_info_fields_info; + schema->fill_table = rdb_i_s_deadlock_info_fill_table; + + DBUG_RETURN(0); +} + static int rdb_i_s_deinit(void *p MY_ATTRIBUTE((__unused__))) { DBUG_ENTER_FUNC(); DBUG_RETURN(0); @@ -1678,4 +1807,20 @@ struct st_maria_plugin rdb_i_s_trx_info = { nullptr, /* config options */ MYROCKS_MARIADB_PLUGIN_MATURITY_LEVEL }; + +struct st_maria_plugin rdb_i_s_deadlock_info = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_DEADLOCK", + "Facebook", + "RocksDB transaction information", + PLUGIN_LICENSE_GPL, + rdb_i_s_deadlock_info_init, + nullptr, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + MYROCKS_MARIADB_PLUGIN_MATURITY_LEVEL +}; } // namespace myrocks diff --git a/storage/rocksdb/rdb_i_s.h b/storage/rocksdb/rdb_i_s.h index 08d35e17ba9..d6a48bf3fec 100644 --- a/storage/rocksdb/rdb_i_s.h +++ b/storage/rocksdb/rdb_i_s.h @@ -32,4 +32,5 @@ extern struct st_maria_plugin rdb_i_s_ddl; extern struct st_maria_plugin rdb_i_s_index_file_map; extern struct st_maria_plugin rdb_i_s_lock_info; extern struct st_maria_plugin rdb_i_s_trx_info; +extern struct st_maria_plugin rdb_i_s_deadlock_info; } // namespace myrocks diff --git a/storage/rocksdb/rdb_io_watchdog.cc b/storage/rocksdb/rdb_io_watchdog.cc index 039b0d7baf1..f09efefcd2a 100644 --- a/storage/rocksdb/rdb_io_watchdog.cc +++ b/storage/rocksdb/rdb_io_watchdog.cc @@ -45,7 +45,7 @@ void Rdb_io_watchdog::expire_io_callback(union sigval timer_data) { "Shutting the service down.", m_write_timeout); - abort_with_stack_traces(); + abort(); } void Rdb_io_watchdog::io_check_callback(union sigval timer_data) { diff --git a/storage/rocksdb/rdb_perf_context.cc b/storage/rocksdb/rdb_perf_context.cc index a3cdb9698e9..ed2b6557ac5 100644 --- a/storage/rocksdb/rdb_perf_context.cc +++ b/storage/rocksdb/rdb_perf_context.cc @@ -47,8 +47,13 @@ std::string rdb_pc_stat_types[] = { "BLOCK_READ_TIME", "BLOCK_CHECKSUM_TIME", "BLOCK_DECOMPRESS_TIME", + "GET_READ_BYTES", + "MULTIGET_READ_BYTES", + "ITER_READ_BYTES", "INTERNAL_KEY_SKIPPED_COUNT", "INTERNAL_DELETE_SKIPPED_COUNT", + "INTERNAL_RECENT_SKIPPED_COUNT", + "INTERNAL_MERGE_COUNT", "GET_SNAPSHOT_TIME", "GET_FROM_MEMTABLE_TIME", "GET_FROM_MEMTABLE_COUNT", @@ -56,9 +61,12 @@ std::string rdb_pc_stat_types[] = { "GET_FROM_OUTPUT_FILES_TIME", "SEEK_ON_MEMTABLE_TIME", "SEEK_ON_MEMTABLE_COUNT", + "NEXT_ON_MEMTABLE_COUNT", + "PREV_ON_MEMTABLE_COUNT", "SEEK_CHILD_SEEK_TIME", "SEEK_CHILD_SEEK_COUNT", - "SEEK_IN_HEAP_TIME", + "SEEK_MIN_HEAP_TIME", + "SEEK_MAX_HEAP_TIME", "SEEK_INTERNAL_SEEK_TIME", "FIND_NEXT_USER_ENTRY_TIME", "WRITE_WAL_TIME", @@ -74,6 +82,12 @@ std::string rdb_pc_stat_types[] = { "NEW_TABLE_ITERATOR_NANOS", "BLOCK_SEEK_NANOS", "FIND_TABLE_NANOS", + "BLOOM_MEMTABLE_HIT_COUNT", + "BLOOM_MEMTABLE_MISS_COUNT", + "BLOOM_SST_HIT_COUNT", + "BLOOM_SST_MISS_COUNT", + "KEY_LOCK_WAIT_TIME", + "KEY_LOCK_WAIT_COUNT", "IO_THREAD_POOL_ID", "IO_BYTES_WRITTEN", "IO_BYTES_READ", @@ -107,8 +121,13 @@ static void harvest_diffs(Rdb_atomic_perf_counters *const counters) { IO_PERF_RECORD(block_read_time); IO_PERF_RECORD(block_checksum_time); IO_PERF_RECORD(block_decompress_time); + IO_PERF_RECORD(get_read_bytes); + IO_PERF_RECORD(multiget_read_bytes); + IO_PERF_RECORD(iter_read_bytes); IO_PERF_RECORD(internal_key_skipped_count); IO_PERF_RECORD(internal_delete_skipped_count); + IO_PERF_RECORD(internal_recent_skipped_count); + IO_PERF_RECORD(internal_merge_count); IO_PERF_RECORD(get_snapshot_time); IO_PERF_RECORD(get_from_memtable_time); IO_PERF_RECORD(get_from_memtable_count); @@ -116,9 +135,12 @@ static void harvest_diffs(Rdb_atomic_perf_counters *const counters) { IO_PERF_RECORD(get_from_output_files_time); IO_PERF_RECORD(seek_on_memtable_time); IO_PERF_RECORD(seek_on_memtable_count); + IO_PERF_RECORD(next_on_memtable_count); + IO_PERF_RECORD(prev_on_memtable_count); IO_PERF_RECORD(seek_child_seek_time); IO_PERF_RECORD(seek_child_seek_count); IO_PERF_RECORD(seek_min_heap_time); + IO_PERF_RECORD(seek_max_heap_time); IO_PERF_RECORD(seek_internal_seek_time); IO_PERF_RECORD(find_next_user_entry_time); IO_PERF_RECORD(write_wal_time); @@ -134,6 +156,13 @@ static void harvest_diffs(Rdb_atomic_perf_counters *const counters) { IO_PERF_RECORD(new_table_iterator_nanos); IO_PERF_RECORD(block_seek_nanos); IO_PERF_RECORD(find_table_nanos); + IO_PERF_RECORD(bloom_memtable_hit_count); + IO_PERF_RECORD(bloom_memtable_miss_count); + IO_PERF_RECORD(bloom_sst_hit_count); + IO_PERF_RECORD(bloom_sst_miss_count); + IO_PERF_RECORD(key_lock_wait_time); + IO_PERF_RECORD(key_lock_wait_count); + IO_STAT_RECORD(thread_pool_id); IO_STAT_RECORD(bytes_written); IO_STAT_RECORD(bytes_read); diff --git a/storage/rocksdb/rdb_perf_context.h b/storage/rocksdb/rdb_perf_context.h index f9b9fd48d3e..2aca3dc3bfd 100644 --- a/storage/rocksdb/rdb_perf_context.h +++ b/storage/rocksdb/rdb_perf_context.h @@ -37,8 +37,13 @@ enum { PC_BLOCK_READ_TIME, PC_BLOCK_CHECKSUM_TIME, PC_BLOCK_DECOMPRESS_TIME, + PC_GET_READ_BYTES, + PC_MULTIGET_READ_BYTES, + PC_ITER_READ_BYTES, PC_KEY_SKIPPED, PC_DELETE_SKIPPED, + PC_RECENT_SKIPPED, + PC_MERGE, PC_GET_SNAPSHOT_TIME, PC_GET_FROM_MEMTABLE_TIME, PC_GET_FROM_MEMTABLE_COUNT, @@ -46,9 +51,12 @@ enum { PC_GET_FROM_OUTPUT_FILES_TIME, PC_SEEK_ON_MEMTABLE_TIME, PC_SEEK_ON_MEMTABLE_COUNT, + PC_NEXT_ON_MEMTABLE_COUNT, + PC_PREV_ON_MEMTABLE_COUNT, PC_SEEK_CHILD_SEEK_TIME, PC_SEEK_CHILD_SEEK_COUNT, PC_SEEK_MIN_HEAP_TIME, + PC_SEEK_MAX_HEAP_TIME, PC_SEEK_INTERNAL_SEEK_TIME, PC_FIND_NEXT_USER_ENTRY_TIME, PC_WRITE_WAL_TIME, @@ -64,6 +72,12 @@ enum { PC_NEW_TABLE_ITERATOR_NANOS, PC_BLOCK_SEEK_NANOS, PC_FIND_TABLE_NANOS, + PC_BLOOM_MEMTABLE_HIT_COUNT, + PC_BLOOM_MEMTABLE_MISS_COUNT, + PC_BLOOM_SST_HIT_COUNT, + PC_BLOOM_SST_MISS_COUNT, + PC_KEY_LOCK_WAIT_TIME, + PC_KEY_LOCK_WAIT_COUNT, PC_IO_THREAD_POOL_ID, PC_IO_BYTES_WRITTEN, PC_IO_BYTES_READ, diff --git a/storage/rocksdb/rdb_psi.cc b/storage/rocksdb/rdb_psi.cc index b6bc89a02f9..b5309df5973 100644 --- a/storage/rocksdb/rdb_psi.cc +++ b/storage/rocksdb/rdb_psi.cc @@ -48,7 +48,7 @@ my_core::PSI_thread_info all_rocksdb_threads[] = { my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key, rdb_signal_bg_psi_mutex_key, rdb_signal_drop_idx_psi_mutex_key, rdb_collation_data_mutex_key, rdb_mem_cmp_space_mutex_key, key_mutex_tx_list, rdb_sysvars_psi_mutex_key, - rdb_cfm_mutex_key; + rdb_cfm_mutex_key, rdb_sst_commit_key; my_core::PSI_mutex_info all_rocksdb_mutexes[] = { {&rdb_psi_open_tbls_mutex_key, "open tables", PSI_FLAG_GLOBAL}, @@ -60,6 +60,7 @@ my_core::PSI_mutex_info all_rocksdb_mutexes[] = { {&key_mutex_tx_list, "tx_list", PSI_FLAG_GLOBAL}, {&rdb_sysvars_psi_mutex_key, "setting sysvar", PSI_FLAG_GLOBAL}, {&rdb_cfm_mutex_key, "column family manager", PSI_FLAG_GLOBAL}, + {&rdb_sst_commit_key, "sst commit", PSI_FLAG_GLOBAL}, }; my_core::PSI_rwlock_key key_rwlock_collation_exception_list, diff --git a/storage/rocksdb/rdb_psi.h b/storage/rocksdb/rdb_psi.h index 0a62f411ade..d4318ee3dba 100644 --- a/storage/rocksdb/rdb_psi.h +++ b/storage/rocksdb/rdb_psi.h @@ -40,7 +40,8 @@ extern my_core::PSI_thread_key rdb_background_psi_thread_key, extern my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key, rdb_signal_bg_psi_mutex_key, rdb_signal_drop_idx_psi_mutex_key, rdb_collation_data_mutex_key, rdb_mem_cmp_space_mutex_key, - key_mutex_tx_list, rdb_sysvars_psi_mutex_key, rdb_cfm_mutex_key; + key_mutex_tx_list, rdb_sysvars_psi_mutex_key, rdb_cfm_mutex_key, + rdb_sst_commit_key; extern my_core::PSI_rwlock_key key_rwlock_collation_exception_list, key_rwlock_read_free_rpl_tables, key_rwlock_skip_unique_check_tables; diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc index 70f19c6af11..bc3faaf010e 100644 --- a/storage/rocksdb/rdb_sst_info.cc +++ b/storage/rocksdb/rdb_sst_info.cc @@ -43,6 +43,7 @@ #include "./ha_rocksdb.h" #include "./ha_rocksdb_proto.h" #include "./rdb_cf_options.h" +#include "./rdb_psi.h" namespace myrocks { @@ -265,7 +266,6 @@ rocksdb::Status Rdb_sst_file_ordered::put(const rocksdb::Slice &key, if (!m_first_key.empty()) { rocksdb::Slice first_key_slice(m_first_key); int cmp = m_file.compare(first_key_slice, key); - DBUG_ASSERT(cmp != 0); m_use_stack = (cmp > 0); // Apply the first key to the stack or SST @@ -329,11 +329,11 @@ Rdb_sst_info::Rdb_sst_info(rocksdb::DB *const db, const std::string &tablename, const rocksdb::DBOptions &db_options, const bool &tracing) : m_db(db), m_cf(cf), m_db_options(db_options), m_curr_size(0), - m_sst_count(0), m_background_error(HA_EXIT_SUCCESS), + m_sst_count(0), m_background_error(HA_EXIT_SUCCESS), m_committed(false), #if defined(RDB_SST_INFO_USE_THREAD) m_queue(), m_mutex(), m_cond(), m_thread(nullptr), m_finished(false), #endif - m_sst_file(nullptr), m_tracing(tracing) { + m_sst_file(nullptr), m_tracing(tracing), m_print_client_error(true) { m_prefix = db->GetName() + "/"; std::string normalized_table; @@ -360,6 +360,7 @@ Rdb_sst_info::Rdb_sst_info(rocksdb::DB *const db, const std::string &tablename, // Set the maximum size to 3 times the cf's target size m_max_size = cf_descr.options.target_file_size_base * 3; } + mysql_mutex_init(rdb_sst_commit_key, &m_commit_mutex, MY_MUTEX_INIT_FAST); } Rdb_sst_info::~Rdb_sst_info() { @@ -367,6 +368,7 @@ Rdb_sst_info::~Rdb_sst_info() { #if defined(RDB_SST_INFO_USE_THREAD) DBUG_ASSERT(m_thread == nullptr); #endif + mysql_mutex_destroy(&m_commit_mutex); } int Rdb_sst_info::open_new_sst_file() { @@ -431,6 +433,8 @@ void Rdb_sst_info::close_curr_sst_file() { int Rdb_sst_info::put(const rocksdb::Slice &key, const rocksdb::Slice &value) { int rc; + DBUG_ASSERT(!m_committed); + if (m_curr_size + key.size() + value.size() >= m_max_size) { // The current sst file has reached its maximum, close it out close_curr_sst_file(); @@ -464,7 +468,21 @@ int Rdb_sst_info::put(const rocksdb::Slice &key, const rocksdb::Slice &value) { return HA_EXIT_SUCCESS; } -int Rdb_sst_info::commit() { +int Rdb_sst_info::commit(bool print_client_error) { + int ret = HA_EXIT_SUCCESS; + + // Both the transaction clean up and the ha_rocksdb handler have + // references to this Rdb_sst_info and both can call commit, so + // synchronize on the object here. + RDB_MUTEX_LOCK_CHECK(m_commit_mutex); + + if (m_committed) { + RDB_MUTEX_UNLOCK_CHECK(m_commit_mutex); + return ret; + } + + m_print_client_error = print_client_error; + if (m_curr_size > 0) { // Close out any existing files close_curr_sst_file(); @@ -483,16 +501,24 @@ int Rdb_sst_info::commit() { } #endif + m_committed = true; + RDB_MUTEX_UNLOCK_CHECK(m_commit_mutex); + // Did we get any errors? if (have_background_error()) { - return get_and_reset_background_error(); + ret = get_and_reset_background_error(); } - return HA_EXIT_SUCCESS; + m_print_client_error = true; + return ret; } void Rdb_sst_info::set_error_msg(const std::string &sst_file_name, const rocksdb::Status &s) { + + if (!m_print_client_error) + return; + #if defined(RDB_SST_INFO_USE_THREAD) // Both the foreground and background threads can set the error message // so lock the mutex to protect it. We only want the first error that diff --git a/storage/rocksdb/rdb_sst_info.h b/storage/rocksdb/rdb_sst_info.h index 1dee0fd0518..42f6458e46b 100644 --- a/storage/rocksdb/rdb_sst_info.h +++ b/storage/rocksdb/rdb_sst_info.h @@ -128,6 +128,8 @@ class Rdb_sst_info { std::string m_prefix; static std::atomic m_prefix_counter; static std::string m_suffix; + bool m_committed; + mysql_mutex_t m_commit_mutex; #if defined(RDB_SST_INFO_USE_THREAD) std::queue m_queue; std::mutex m_mutex; @@ -137,6 +139,7 @@ class Rdb_sst_info { #endif Rdb_sst_file_ordered *m_sst_file; const bool m_tracing; + bool m_print_client_error; int open_new_sst_file(); void close_curr_sst_file(); @@ -157,7 +160,8 @@ class Rdb_sst_info { ~Rdb_sst_info(); int put(const rocksdb::Slice &key, const rocksdb::Slice &value); - int commit(); + int commit(bool print_client_error = true); + bool is_committed() const { return m_committed; } bool have_background_error() { return m_background_error != 0; } diff --git a/storage/rocksdb/rdb_utils.cc b/storage/rocksdb/rdb_utils.cc index b347ac8bafe..6cddfed64c9 100644 --- a/storage/rocksdb/rdb_utils.cc +++ b/storage/rocksdb/rdb_utils.cc @@ -352,4 +352,37 @@ const char *get_rocksdb_supported_compression_types() return compression_methods_buf.c_str(); } +bool rdb_check_rocksdb_corruption() { + return !my_access(myrocks::rdb_corruption_marker_file_name().c_str(), F_OK); +} + +void rdb_persist_corruption_marker() { + const std::string &fileName(myrocks::rdb_corruption_marker_file_name()); + /* O_SYNC is not supported on windows */ + int fd = my_open(fileName.c_str(), O_CREAT | IF_WIN(0, O_SYNC), MYF(MY_WME)); + if (fd < 0) { + sql_print_error("RocksDB: Can't create file %s to mark rocksdb as " + "corrupted.", + fileName.c_str()); + } else { + sql_print_information("RocksDB: Creating the file %s to abort mysqld " + "restarts. Remove this file from the data directory " + "after fixing the corruption to recover. ", + fileName.c_str()); + } + +#ifdef _WIN32 + /* A replacement for O_SYNC flag above */ + if (fd >= 0) + my_sync(fd, MYF(0)); +#endif + + int ret = my_close(fd, MYF(MY_WME)); + if (ret) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Error (%d) closing the file %s", ret, + fileName.c_str()); + } +} + } // namespace myrocks diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h index 3feda5d82ad..3125941ee78 100644 --- a/storage/rocksdb/rdb_utils.h +++ b/storage/rocksdb/rdb_utils.h @@ -84,7 +84,7 @@ namespace myrocks { do { \ if (!(expr)) { \ my_safe_printf_stderr("\nShip assert failure: \'%s\'\n", #expr); \ - abort_with_stack_traces(); \ + abort(); \ } \ } while (0) #endif // SHIP_ASSERT @@ -250,12 +250,20 @@ inline void rdb_check_mutex_call_result(const char *function_name, // This will hopefully result in a meaningful stack trace which we can use // to efficiently debug the root cause. - abort_with_stack_traces(); + abort(); } } void rdb_log_status_error(const rocksdb::Status &s, const char *msg = nullptr); +// return true if the marker file exists which indicates that the corruption +// has been detected +bool rdb_check_rocksdb_corruption(); + +// stores a marker file in the data directory so that after restart server +// is still aware that rocksdb data is corrupted +void rdb_persist_corruption_marker(); + /* Helper functions to parse strings. */ diff --git a/storage/rocksdb/rocksdb b/storage/rocksdb/rocksdb index 9a970c81af9..ba295cda29d 160000 --- a/storage/rocksdb/rocksdb +++ b/storage/rocksdb/rocksdb @@ -1 +1 @@ -Subproject commit 9a970c81af9807071bd690f4c808c5045866291a +Subproject commit ba295cda29daee3ffe58549542804efdfd969784 diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc index b1d9681da2a..580b3fe539f 100644 --- a/storage/sphinx/ha_sphinx.cc +++ b/storage/sphinx/ha_sphinx.cc @@ -17,7 +17,7 @@ #pragma implementation // gcc: Class implementation #endif -#if _MSC_VER>=1400 +#if defined(_MSC_VER) && _MSC_VER>=1400 #define _CRT_SECURE_NO_DEPRECATE 1 #define _CRT_NONSTDC_NO_DEPRECATE 1 #endif @@ -65,7 +65,7 @@ #define MSG_WAITALL 0 #endif -#if _MSC_VER>=1400 +#if defined(_MSC_VER) && _MSC_VER>=1400 #pragma warning(push,4) #endif @@ -2539,12 +2539,6 @@ char * ha_sphinx::UnpackString () } -static inline const char * FixNull ( const char * s ) -{ - return s ? s : "(null)"; -} - - bool ha_sphinx::UnpackSchema () { SPH_ENTER_METHOD(); @@ -3450,7 +3444,7 @@ int ha_sphinx::create ( const char * name, TABLE * table_arg, HA_CREATE_INFO * ) strcasecmp ( table_arg->key_info[0].key_part[0].field->field_name.str, table->field[2]->field_name.str ) ) { my_snprintf ( sError, sizeof(sError), "%s: there must be an index on '%s' column", - name, table->field[2]->field_name.str ); + name, table_arg->field[2]->field_name.str ); break; } diff --git a/storage/sphinx/snippets_udf.cc b/storage/sphinx/snippets_udf.cc index 77f76876819..ab2764407d8 100644 --- a/storage/sphinx/snippets_udf.cc +++ b/storage/sphinx/snippets_udf.cc @@ -45,7 +45,7 @@ typedef uchar byte; /// partially copy-pasted stuff that should be moved elsewhere -#if UNALIGNED_RAM_ACCESS +#ifdef UNALIGNED_RAM_ACCESS /// pass-through wrapper template < typename T > inline T sphUnalignedRead ( const T & tRef ) @@ -83,7 +83,7 @@ void sphUnalignedWrite ( void * pPtr, const T & tVal ) *pDst++ = *pSrc++; } -#endif +#endif /* UNALIGNED_RAM_ACCESS */ #define SPHINXSE_MAX_ALLOC (16*1024*1024) diff --git a/storage/spider/spd_conn.cc b/storage/spider/spd_conn.cc index 3fa825a9d88..0ca7555385c 100644 --- a/storage/spider/spd_conn.cc +++ b/storage/spider/spd_conn.cc @@ -86,9 +86,6 @@ extern PSI_thread_key spd_key_thd_bg_crd; extern PSI_thread_key spd_key_thd_bg_mon; #endif #endif - -extern pthread_mutex_t spider_global_trx_mutex; -extern SPIDER_TRX *spider_global_trx; #endif HASH spider_open_connections; @@ -2994,9 +2991,6 @@ void *spider_bg_sts_action( DBUG_RETURN(NULL); } share->bg_sts_thd = thd; -/* - spider.trx = spider_global_trx; -*/ spider.trx = trx; spider.share = share; spider.conns = conns; @@ -3105,13 +3099,11 @@ void *spider_bg_sts_action( { if (!conns[spider.search_link_idx]) { - pthread_mutex_lock(&spider_global_trx_mutex); spider_get_conn(share, spider.search_link_idx, share->conn_keys[spider.search_link_idx], - spider_global_trx, &spider, FALSE, FALSE, SPIDER_CONN_KIND_MYSQL, + trx, &spider, FALSE, FALSE, SPIDER_CONN_KIND_MYSQL, &error_num); conns[spider.search_link_idx]->error_mode = 0; - pthread_mutex_unlock(&spider_global_trx_mutex); /* if ( error_num && @@ -3120,7 +3112,7 @@ void *spider_bg_sts_action( ) { lex_start(thd); error_num = spider_ping_table_mon_from_table( - spider_global_trx, + trx, thd, share, spider.search_link_idx, @@ -3142,7 +3134,6 @@ void *spider_bg_sts_action( } if (spider.search_link_idx != -1 && conns[spider.search_link_idx]) { - DBUG_ASSERT(!conns[spider.search_link_idx]->thd); #ifdef WITH_PARTITION_STORAGE_ENGINE if (spider_get_sts(share, spider.search_link_idx, share->bg_sts_try_time, &spider, @@ -3163,7 +3154,7 @@ void *spider_bg_sts_action( ) { lex_start(thd); error_num = spider_ping_table_mon_from_table( - spider_global_trx, + trx, thd, share, spider.search_link_idx, @@ -3369,9 +3360,6 @@ void *spider_bg_crd_action( table.s = share->table_share; table.field = share->table_share->field; table.key_info = share->table_share->key_info; -/* - spider.trx = spider_global_trx; -*/ spider.trx = trx; spider.change_table_ptr(&table, share->table_share); spider.share = share; @@ -3481,13 +3469,11 @@ void *spider_bg_crd_action( { if (!conns[spider.search_link_idx]) { - pthread_mutex_lock(&spider_global_trx_mutex); spider_get_conn(share, spider.search_link_idx, share->conn_keys[spider.search_link_idx], - spider_global_trx, &spider, FALSE, FALSE, SPIDER_CONN_KIND_MYSQL, + trx, &spider, FALSE, FALSE, SPIDER_CONN_KIND_MYSQL, &error_num); conns[spider.search_link_idx]->error_mode = 0; - pthread_mutex_unlock(&spider_global_trx_mutex); /* if ( error_num && @@ -3496,7 +3482,7 @@ void *spider_bg_crd_action( ) { lex_start(thd); error_num = spider_ping_table_mon_from_table( - spider_global_trx, + trx, thd, share, spider.search_link_idx, @@ -3518,7 +3504,6 @@ void *spider_bg_crd_action( } if (spider.search_link_idx != -1 && conns[spider.search_link_idx]) { - DBUG_ASSERT(!conns[spider.search_link_idx]->thd); #ifdef WITH_PARTITION_STORAGE_ENGINE if (spider_get_crd(share, spider.search_link_idx, share->bg_crd_try_time, &spider, &table, @@ -3539,7 +3524,7 @@ void *spider_bg_crd_action( ) { lex_start(thd); error_num = spider_ping_table_mon_from_table( - spider_global_trx, + trx, thd, share, spider.search_link_idx, @@ -3902,7 +3887,7 @@ void *spider_bg_mon_action( { lex_start(thd); error_num = spider_ping_table_mon_from_table( - spider_global_trx, + trx, thd, share, link_idx, diff --git a/storage/spider/spd_copy_tables.cc b/storage/spider/spd_copy_tables.cc index 97ad1c70631..82c0c490147 100644 --- a/storage/spider/spd_copy_tables.cc +++ b/storage/spider/spd_copy_tables.cc @@ -87,13 +87,12 @@ int spider_udf_set_copy_tables_param_default( if (!copy_tables->param_name) \ { \ if ((copy_tables->param_name = spider_get_string_between_quote( \ - start_ptr, TRUE))) \ + start_ptr, TRUE, ¶m_string_parse))) \ copy_tables->SPIDER_PARAM_STR_LEN(param_name) = \ strlen(copy_tables->param_name); \ - else { \ - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, \ - MYF(0), tmp_ptr); \ + else \ + { \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%s", copy_tables->param_name)); \ @@ -113,9 +112,7 @@ int spider_udf_set_copy_tables_param_default( { \ if (hint_num < 0 || hint_num >= max_size) \ { \ - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, \ - MYF(0), tmp_ptr); \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } else if (copy_tables->param_name[hint_num] != -1) \ break; \ @@ -128,17 +125,13 @@ int spider_udf_set_copy_tables_param_default( else if (copy_tables->param_name[hint_num] > max_val) \ copy_tables->param_name[hint_num] = max_val; \ } else { \ - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, \ - MYF(0), tmp_ptr); \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "[%d]=%d", hint_num, \ copy_tables->param_name[hint_num])); \ } else { \ - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, \ - MYF(0), tmp_ptr); \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } \ break; \ @@ -157,10 +150,11 @@ int spider_udf_set_copy_tables_param_default( copy_tables->param_name = min_val; \ else if (copy_tables->param_name > max_val) \ copy_tables->param_name = max_val; \ + param_string_parse.set_param_value(tmp_ptr2, \ + tmp_ptr2 + \ + strlen(tmp_ptr2) + 1); \ } else { \ - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, \ - MYF(0), tmp_ptr); \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%d", copy_tables->param_name)); \ @@ -179,10 +173,11 @@ int spider_udf_set_copy_tables_param_default( copy_tables->param_name = atoi(tmp_ptr2); \ if (copy_tables->param_name < min_val) \ copy_tables->param_name = min_val; \ + param_string_parse.set_param_value(tmp_ptr2, \ + tmp_ptr2 + \ + strlen(tmp_ptr2) + 1); \ } else { \ - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, \ - MYF(0), tmp_ptr); \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%d", copy_tables->param_name)); \ @@ -202,10 +197,11 @@ int spider_udf_set_copy_tables_param_default( my_strtoll10(tmp_ptr2, (char**) NULL, &error_num); \ if (copy_tables->param_name < min_val) \ copy_tables->param_name = min_val; \ + param_string_parse.set_param_value(tmp_ptr2, \ + tmp_ptr2 + \ + strlen(tmp_ptr2) + 1); \ } else { \ - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, \ - MYF(0), tmp_ptr); \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%lld", \ @@ -224,6 +220,7 @@ int spider_udf_parse_copy_tables_param( char *sprit_ptr[2]; char *tmp_ptr, *tmp_ptr2, *start_ptr; int title_length; + SPIDER_PARAM_STRING_PARSE param_string_parse; DBUG_ENTER("spider_udf_parse_copy_tables_param"); copy_tables->bulk_insert_interval = -1; copy_tables->bulk_insert_rows = -1; @@ -248,6 +245,7 @@ int spider_udf_parse_copy_tables_param( DBUG_PRINT("info",("spider param_string=%s", param_string)); sprit_ptr[0] = param_string; + param_string_parse.init(param_string, ER_SPIDER_INVALID_UDF_PARAM_NUM); while (sprit_ptr[0]) { if ((sprit_ptr[1] = strchr(sprit_ptr[0], ','))) @@ -274,10 +272,14 @@ int spider_udf_parse_copy_tables_param( title_length++; start_ptr++; } + param_string_parse.set_param_title(tmp_ptr, tmp_ptr + title_length); switch (title_length) { case 0: + error_num = param_string_parse.print_param_error(); + if (error_num) + goto error; continue; case 3: #ifndef WITHOUT_SPIDER_BG_SEARCH @@ -288,55 +290,43 @@ int spider_udf_parse_copy_tables_param( SPIDER_PARAM_STR("dtb", database); SPIDER_PARAM_INT_WITH_MAX("utc", use_table_charset, 0, 1); SPIDER_PARAM_INT_WITH_MAX("utr", use_transaction, 0, 1); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; #ifndef WITHOUT_SPIDER_BG_SEARCH case 7: SPIDER_PARAM_INT_WITH_MAX("bg_mode", bg_mode, 0, 1); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; #endif case 8: SPIDER_PARAM_STR("database", database); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 15: SPIDER_PARAM_INT_WITH_MAX("use_transaction", use_transaction, 0, 1); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 16: SPIDER_PARAM_LONGLONG("bulk_insert_rows", bulk_insert_rows, 1); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 17: SPIDER_PARAM_INT_WITH_MAX( "use_table_charset", use_table_charset, 0, 1); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 20: SPIDER_PARAM_INT("bulk_insert_interval", bulk_insert_interval, 0); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; default: - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; } + + /* Verify that the remainder of the parameter value is whitespace */ + if ((error_num = param_string_parse.has_extra_parameter_values())) + goto error; } set_default: diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index 3c1c0885617..83de99e0d2f 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -121,7 +121,10 @@ int spider_db_connect( conn->net_write_timeout = spider_param_net_write_timeout(thd, share->net_write_timeouts[link_idx]); connect_retry_interval = spider_param_connect_retry_interval(thd); - connect_retry_count = spider_param_connect_retry_count(thd); + if (conn->disable_connect_retry) + connect_retry_count = 0; + else + connect_retry_count = spider_param_connect_retry_count(thd); } else { conn->connect_timeout = spider_param_connect_timeout(NULL, share->connect_timeouts[link_idx]); diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index 6ff2e25bc27..21bbaaea2c9 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -11139,6 +11139,7 @@ int spider_mysql_handler::show_table_status( ulonglong auto_increment_value = 0; DBUG_ENTER("spider_mysql_handler::show_table_status"); DBUG_PRINT("info",("spider sts_mode=%d", sts_mode)); + if (sts_mode == 1) { pthread_mutex_lock(&conn->mta_conn_mutex); @@ -11146,6 +11147,7 @@ int spider_mysql_handler::show_table_status( conn->need_mon = &spider->need_mons[link_idx]; conn->mta_conn_mutex_lock_already = TRUE; conn->mta_conn_mutex_unlock_later = TRUE; + conn->disable_connect_retry = TRUE; spider_conn_set_timeout_from_share(conn, link_idx, spider->trx->thd, share); if ( @@ -11167,6 +11169,7 @@ int spider_mysql_handler::show_table_status( /* retry */ if ((error_num = spider_db_ping(spider, conn, link_idx))) { + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos); @@ -11175,6 +11178,7 @@ int spider_mysql_handler::show_table_status( } if ((error_num = spider_db_set_names(spider, conn, link_idx))) { + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos); @@ -11190,11 +11194,13 @@ int spider_mysql_handler::show_table_status( -1, &spider->need_mons[link_idx]) ) { + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; DBUG_RETURN(spider_db_errorno(conn)); } } else { + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos); @@ -11210,6 +11216,7 @@ int spider_mysql_handler::show_table_status( request_key.next = NULL; if (spider_param_dry_access()) { + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos); @@ -11218,11 +11225,13 @@ int spider_mysql_handler::show_table_status( } if (!(res = conn->db_conn->store_result(NULL, &request_key, &error_num))) { + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; if (error_num || (error_num = spider_db_errorno(conn))) DBUG_RETURN(error_num); - else { + else + { my_printf_error(ER_SPIDER_REMOTE_TABLE_NOT_FOUND_NUM, ER_SPIDER_REMOTE_TABLE_NOT_FOUND_STR, MYF(0), mysql_share->db_names_str[spider->conn_link_idx[link_idx]].ptr(), @@ -11231,6 +11240,7 @@ int spider_mysql_handler::show_table_status( DBUG_RETURN(ER_SPIDER_REMOTE_TABLE_NOT_FOUND_NUM); } } + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos); @@ -11278,6 +11288,7 @@ int spider_mysql_handler::show_table_status( conn->need_mon = &spider->need_mons[link_idx]; conn->mta_conn_mutex_lock_already = TRUE; conn->mta_conn_mutex_unlock_later = TRUE; + conn->disable_connect_retry = TRUE; spider_conn_set_timeout_from_share(conn, link_idx, spider->trx->thd, share); if ( @@ -11299,6 +11310,7 @@ int spider_mysql_handler::show_table_status( /* retry */ if ((error_num = spider_db_ping(spider, conn, link_idx))) { + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos); @@ -11307,6 +11319,7 @@ int spider_mysql_handler::show_table_status( } if ((error_num = spider_db_set_names(spider, conn, link_idx))) { + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos); @@ -11322,11 +11335,13 @@ int spider_mysql_handler::show_table_status( -1, &spider->need_mons[link_idx]) ) { + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; DBUG_RETURN(spider_db_errorno(conn)); } } else { + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos); @@ -11342,6 +11357,7 @@ int spider_mysql_handler::show_table_status( request_key.next = NULL; if (spider_param_dry_access()) { + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos); @@ -11350,6 +11366,7 @@ int spider_mysql_handler::show_table_status( } if (!(res = conn->db_conn->store_result(NULL, &request_key, &error_num))) { + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; if (error_num || (error_num = spider_db_errorno(conn))) @@ -11357,6 +11374,7 @@ int spider_mysql_handler::show_table_status( else DBUG_RETURN(ER_QUERY_ON_FOREIGN_DATA_SOURCE); } + conn->disable_connect_retry = FALSE; conn->mta_conn_mutex_lock_already = FALSE; conn->mta_conn_mutex_unlock_later = FALSE; SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos); @@ -11413,6 +11431,7 @@ int spider_mysql_handler::show_table_status( DBUG_PRINT("info",("spider auto_increment_value=%llu", share->lgtm_tblhnd_share->auto_increment_value)); } + DBUG_RETURN(0); } diff --git a/storage/spider/spd_direct_sql.cc b/storage/spider/spd_direct_sql.cc index 4d69d9af615..de355c1068d 100644 --- a/storage/spider/spd_direct_sql.cc +++ b/storage/spider/spd_direct_sql.cc @@ -973,13 +973,12 @@ error: if (!direct_sql->param_name) \ { \ if ((direct_sql->param_name = spider_get_string_between_quote( \ - start_ptr, TRUE))) \ + start_ptr, TRUE, ¶m_string_parse))) \ direct_sql->SPIDER_PARAM_STR_LEN(param_name) = \ strlen(direct_sql->param_name); \ - else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + else \ + { \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%s", direct_sql->param_name)); \ @@ -999,9 +998,7 @@ error: { \ if (hint_num < 0 || hint_num >= max_size) \ { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } else if (direct_sql->param_name[hint_num] != -1) \ break; \ @@ -1014,17 +1011,13 @@ error: else if (direct_sql->param_name[hint_num] > max_val) \ direct_sql->param_name[hint_num] = max_val; \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "[%d]=%d", hint_num, \ direct_sql->param_name[hint_num])); \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } \ break; \ @@ -1043,10 +1036,11 @@ error: direct_sql->param_name = min_val; \ else if (direct_sql->param_name > max_val) \ direct_sql->param_name = max_val; \ + param_string_parse.set_param_value(tmp_ptr2, \ + tmp_ptr2 + \ + strlen(tmp_ptr2) + 1); \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%d", \ @@ -1066,10 +1060,11 @@ error: direct_sql->param_name = atoi(tmp_ptr2); \ if (direct_sql->param_name < min_val) \ direct_sql->param_name = min_val; \ + param_string_parse.set_param_value(tmp_ptr2, \ + tmp_ptr2 + \ + strlen(tmp_ptr2) + 1); \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%d", direct_sql->param_name)); \ @@ -1089,10 +1084,11 @@ error: my_strtoll10(tmp_ptr2, (char**) NULL, &error_num); \ if (direct_sql->param_name < min_val) \ direct_sql->param_name = min_val; \ + param_string_parse.set_param_value(tmp_ptr2, \ + tmp_ptr2 + \ + strlen(tmp_ptr2) + 1); \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = param_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%lld", \ @@ -1112,6 +1108,7 @@ int spider_udf_parse_direct_sql_param( char *sprit_ptr[2]; char *tmp_ptr, *tmp_ptr2, *start_ptr; int title_length; + SPIDER_PARAM_STRING_PARSE param_string_parse; DBUG_ENTER("spider_udf_parse_direct_sql_param"); direct_sql->tgt_port = -1; direct_sql->tgt_ssl_vsc = -1; @@ -1148,6 +1145,7 @@ int spider_udf_parse_direct_sql_param( DBUG_PRINT("info",("spider param_string=%s", param_string)); sprit_ptr[0] = param_string; + param_string_parse.init(param_string, ER_SPIDER_INVALID_UDF_PARAM_NUM); while (sprit_ptr[0]) { if ((sprit_ptr[1] = strchr(sprit_ptr[0], ','))) @@ -1174,10 +1172,14 @@ int spider_udf_parse_direct_sql_param( title_length++; start_ptr++; } + param_string_parse.set_param_title(tmp_ptr, tmp_ptr + title_length); switch (title_length) { case 0: + error_num = param_string_parse.print_param_error(); + if (error_num) + goto error; continue; case 3: #if defined(HS_HAS_SQLCOM) && defined(HAVE_HANDLERSOCKET) @@ -1203,120 +1205,92 @@ int spider_udf_parse_direct_sql_param( SPIDER_PARAM_INT_WITH_MAX("urt", use_real_table, 0, 1); #endif SPIDER_PARAM_INT("wto", net_write_timeout, 0); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 4: SPIDER_PARAM_INT_WITH_MAX("erwm", error_rw_mode, 0, 1); SPIDER_PARAM_STR("host", tgt_host); SPIDER_PARAM_INT_WITH_MAX("port", tgt_port, 0, 65535); SPIDER_PARAM_STR("user", tgt_username); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 6: SPIDER_PARAM_STR("server", server_name); SPIDER_PARAM_STR("socket", tgt_socket); SPIDER_PARAM_HINT_WITH_MAX("iop", iop, 3, direct_sql->table_count, 0, 2); SPIDER_PARAM_STR("ssl_ca", tgt_ssl_ca); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 7: SPIDER_PARAM_STR("wrapper", tgt_wrapper); SPIDER_PARAM_STR("ssl_key", tgt_ssl_key); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 8: SPIDER_PARAM_STR("database", tgt_default_db_name); SPIDER_PARAM_STR("password", tgt_password); SPIDER_PARAM_LONGLONG("priority", priority, 0); SPIDER_PARAM_STR("ssl_cert", tgt_ssl_cert); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 10: SPIDER_PARAM_STR("ssl_cipher", tgt_ssl_cipher); SPIDER_PARAM_STR("ssl_capath", tgt_ssl_capath); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 11: #if defined(HS_HAS_SQLCOM) && defined(HAVE_HANDLERSOCKET) SPIDER_PARAM_INT_WITH_MAX("access_mode", access_mode, 0, 2); #endif - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 12: SPIDER_PARAM_STR("default_file", tgt_default_file); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 13: SPIDER_PARAM_STR("default_group", tgt_default_group); SPIDER_PARAM_INT_WITH_MAX("error_rw_mode", error_rw_mode, 0, 1); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 14: #if MYSQL_VERSION_ID < 50500 #else SPIDER_PARAM_INT_WITH_MAX("use_real_table", use_real_table, 0, 1); #endif - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 15: SPIDER_PARAM_INT_WITH_MAX("table_loop_mode", table_loop_mode, 0, 2); SPIDER_PARAM_INT("connect_timeout", connect_timeout, 0); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 16: SPIDER_PARAM_LONGLONG("bulk_insert_rows", bulk_insert_rows, 1); SPIDER_PARAM_INT("net_read_timeout", net_read_timeout, 0); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 17: SPIDER_PARAM_INT("net_write_timeout", net_write_timeout, 0); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 18: SPIDER_PARAM_INT_WITH_MAX( "connection_channel", connection_channel, 0, 63); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; case 22: SPIDER_PARAM_INT_WITH_MAX("ssl_verify_server_cert", tgt_ssl_vsc, 0, 1); - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; default: - error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, - MYF(0), tmp_ptr); + error_num = param_string_parse.print_param_error(); goto error; } + + /* Verify that the remainder of the parameter value is whitespace */ + if ((error_num = param_string_parse.has_extra_parameter_values())) + goto error; } set_default: diff --git a/storage/spider/spd_i_s.cc b/storage/spider/spd_i_s.cc index 3ef04a0dacc..8a7ad752bcd 100644 --- a/storage/spider/spd_i_s.cc +++ b/storage/spider/spd_i_s.cc @@ -164,6 +164,6 @@ struct st_maria_plugin spider_i_s_alloc_mem_maria = NULL, NULL, "1.0", - MariaDB_PLUGIN_MATURITY_GAMMA, + MariaDB_PLUGIN_MATURITY_STABLE }; #endif diff --git a/storage/spider/spd_include.h b/storage/spider/spd_include.h index ca32facfa5d..bede60412e2 100644 --- a/storage/spider/spd_include.h +++ b/storage/spider/spd_include.h @@ -526,6 +526,9 @@ typedef struct st_spider_conn st_spider_conn *bulk_access_next; #endif + bool disable_connect_retry; /* TRUE if it is unnecessary to + retry to connect after a + connection error */ bool connect_error_with_message; char connect_error_msg[MYSQL_ERRMSG_SIZE]; int connect_error; diff --git a/storage/spider/spd_param.cc b/storage/spider/spd_param.cc index 6970f19e85f..9a771fe8357 100644 --- a/storage/spider/spd_param.cc +++ b/storage/spider/spd_param.cc @@ -969,19 +969,23 @@ bool spider_param_use_default_database( } /* - FALSE: sql_log_off = 0 - TRUE: sql_log_off = 1 - */ -static MYSQL_THDVAR_BOOL( - internal_sql_log_off, /* name */ - PLUGIN_VAR_OPCMDARG, /* opt */ - "Sync sql_log_off", /* comment */ - NULL, /* check */ - NULL, /* update */ - TRUE /* def */ +-1 :don't know or does not matter; don't send 'SET SQL_LOG_OFF' statement + 0 :do send 'SET SQL_LOG_OFF 0' statement to data nodes + 1 :do send 'SET SQL_LOG_OFF 1' statement to data nodes +*/ +static MYSQL_THDVAR_INT( + internal_sql_log_off, /* name */ + PLUGIN_VAR_RQCMDARG, /* opt */ + "Manage SQL_LOG_OFF mode statement to the data nodes", /* comment */ + NULL, /* check */ + NULL, /* update */ + -1, /* default */ + -1, /* min */ + 1, /* max */ + 0 /* blk */ ); -bool spider_param_internal_sql_log_off( +int spider_param_internal_sql_log_off( THD *thd ) { DBUG_ENTER("spider_param_internal_sql_log_off"); @@ -2224,15 +2228,15 @@ char *spider_param_remote_time_zone() static int spider_remote_sql_log_off; /* - -1 :don't set - 0 :sql_log_off = 0 - 1 :sql_log_off = 1 + -1 :don't know the value on all data nodes, or does not matter + 0 :sql_log_off = 0 on all data nodes + 1 :sql_log_off = 1 on all data nodes */ static MYSQL_SYSVAR_INT( remote_sql_log_off, spider_remote_sql_log_off, PLUGIN_VAR_RQCMDARG, - "Set sql_log_off mode at connecting for improvement performance of connection if you know", + "Set SQL_LOG_OFF mode on connecting for improved performance of connection, if you know", NULL, NULL, -1, @@ -3482,7 +3486,7 @@ maria_declare_plugin(spider) spider_status_variables, spider_system_variables, SPIDER_DETAIL_VERSION, - MariaDB_PLUGIN_MATURITY_GAMMA + MariaDB_PLUGIN_MATURITY_STABLE }, spider_i_s_alloc_mem_maria maria_declare_plugin_end; diff --git a/storage/spider/spd_param.h b/storage/spider/spd_param.h index d4af48a75ea..9a358c54be5 100644 --- a/storage/spider/spd_param.h +++ b/storage/spider/spd_param.h @@ -113,7 +113,7 @@ bool spider_param_sync_time_zone( bool spider_param_use_default_database( THD *thd ); -bool spider_param_internal_sql_log_off( +int spider_param_internal_sql_log_off( THD *thd ); int spider_param_bulk_size( diff --git a/storage/spider/spd_ping_table.cc b/storage/spider/spd_ping_table.cc index 58b44ec202e..680618e3087 100644 --- a/storage/spider/spd_ping_table.cc +++ b/storage/spider/spd_ping_table.cc @@ -54,11 +54,6 @@ extern PSI_mutex_key spd_key_mutex_mon_list_update_status; extern PSI_mutex_key spd_key_mutex_mon_table_cache; #endif -#ifndef WITHOUT_SPIDER_BG_SEARCH -extern pthread_mutex_t spider_global_trx_mutex; -extern SPIDER_TRX *spider_global_trx; -#endif - HASH *spider_udf_table_mon_list_hash; uint spider_udf_table_mon_list_hash_id; const char *spider_udf_table_mon_list_hash_func_name; @@ -134,7 +129,6 @@ SPIDER_TABLE_MON_LIST *spider_get_ping_table_mon_list( ) #endif { - DBUG_ASSERT(trx != spider_global_trx); if ( table_mon_list && table_mon_list->mon_table_cache_version != mon_table_cache_version @@ -659,29 +653,17 @@ SPIDER_CONN *spider_get_ping_table_tgt_conn( ) { SPIDER_CONN *conn; DBUG_ENTER("spider_get_ping_table_tgt_conn"); -#ifndef WITHOUT_SPIDER_BG_SEARCH - if (trx == spider_global_trx) - pthread_mutex_lock(&spider_global_trx_mutex); -#endif if ( !(conn = spider_get_conn( share, 0, share->conn_keys[0], trx, NULL, FALSE, FALSE, SPIDER_CONN_KIND_MYSQL, error_num)) ) { -#ifndef WITHOUT_SPIDER_BG_SEARCH - if (trx == spider_global_trx) - pthread_mutex_unlock(&spider_global_trx_mutex); -#endif my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), share->server_names[0]); *error_num = ER_CONNECT_TO_FOREIGN_DATA_SOURCE; goto error; } #ifndef DBUG_OFF - if (trx == spider_global_trx) - { - DBUG_ASSERT(!conn->thd); - } DBUG_PRINT("info",("spider conn->thd=%p", conn->thd)); if (conn->thd) { @@ -689,10 +671,6 @@ SPIDER_CONN *spider_get_ping_table_tgt_conn( } #endif conn->error_mode = 0; -#ifndef WITHOUT_SPIDER_BG_SEARCH - if (trx == spider_global_trx) - pthread_mutex_unlock(&spider_global_trx_mutex); -#endif DBUG_RETURN(conn); error: diff --git a/storage/spider/spd_sys_table.cc b/storage/spider/spd_sys_table.cc index 9f1a9850083..ed25e4fcf32 100644 --- a/storage/spider/spd_sys_table.cc +++ b/storage/spider/spd_sys_table.cc @@ -69,11 +69,13 @@ inline int spider_write_sys_table_row(TABLE *table, bool do_handle_error = TRUE) Update a Spider system table row. @param table The spider system table. + @param do_handle_error TRUE if an error message should be printed + before returning. @return Error code returned by the update. */ -inline int spider_update_sys_table_row(TABLE *table) +inline int spider_update_sys_table_row(TABLE *table, bool do_handle_error = TRUE) { int error_num; THD *thd = table->in_use; @@ -82,7 +84,7 @@ inline int spider_update_sys_table_row(TABLE *table) error_num = table->file->ha_update_row(table->record[1], table->record[0]); reenable_binlog(thd); - if (error_num) + if (error_num && do_handle_error) { if (error_num == HA_ERR_RECORD_IS_THE_SAME) error_num = 0; @@ -101,7 +103,7 @@ inline int spider_update_sys_table_row(TABLE *table) @param do_handle_error TRUE if an error message should be printed before returning. - @return Error code returned by the update. + @return Error code returned by the delete. */ inline int spider_delete_sys_table_row(TABLE *table, int record_number = 0, @@ -1333,11 +1335,8 @@ int spider_insert_sys_table( ) { int error_num; DBUG_ENTER("spider_insert_sys_table"); - if ((error_num = table->file->ha_write_row(table->record[0]))) - { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_write_sys_table_row(table))) DBUG_RETURN(error_num); - } DBUG_RETURN(0); } @@ -1378,14 +1377,10 @@ int spider_insert_or_update_table_sts( table->file->print_error(error_num, MYF(0)); DBUG_RETURN(error_num); } - if ((error_num = table->file->ha_write_row(table->record[0]))) - { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_write_sys_table_row(table))) DBUG_RETURN(error_num); - } } else { - if ((error_num = table->file->ha_update_row(table->record[1], - table->record[0]))) + if ((error_num = spider_update_sys_table_row(table, FALSE))) { table->file->print_error(error_num, MYF(0)); DBUG_RETURN(error_num); @@ -1419,14 +1414,10 @@ int spider_insert_or_update_table_crd( table->file->print_error(error_num, MYF(0)); DBUG_RETURN(error_num); } - if ((error_num = table->file->ha_write_row(table->record[0]))) - { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_write_sys_table_row(table))) DBUG_RETURN(error_num); - } } else { - if ((error_num = table->file->ha_update_row(table->record[1], - table->record[0]))) + if ((error_num = spider_update_sys_table_row(table, FALSE))) { table->file->print_error(error_num, MYF(0)); DBUG_RETURN(error_num); @@ -1791,11 +1782,8 @@ int spider_delete_table_sts( /* no record is ok */ DBUG_RETURN(0); } else { - if ((error_num = table->file->ha_delete_row(table->record[0]))) - { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_delete_sys_table_row(table))) DBUG_RETURN(error_num); - } } DBUG_RETURN(0); @@ -1824,10 +1812,9 @@ int spider_delete_table_crd( DBUG_RETURN(0); } else { do { - if ((error_num = table->file->ha_delete_row(table->record[0]))) + if ((error_num = spider_delete_sys_table_row(table))) { spider_sys_index_end(table); - table->file->print_error(error_num, MYF(0)); DBUG_RETURN(error_num); } error_num = spider_sys_index_next_same(table, table_key); diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc index 76032ca80f0..283c9e7881d 100644 --- a/storage/spider/spd_table.cc +++ b/storage/spider/spd_table.cc @@ -151,9 +151,6 @@ PSI_mutex_key spd_key_mutex_conn; PSI_mutex_key spd_key_mutex_hs_r_conn; PSI_mutex_key spd_key_mutex_hs_w_conn; #endif -#ifndef WITHOUT_SPIDER_BG_SEARCH -PSI_mutex_key spd_key_mutex_global_trx; -#endif PSI_mutex_key spd_key_mutex_open_conn; PSI_mutex_key spd_key_mutex_allocated_thds; PSI_mutex_key spd_key_mutex_mon_table_cache; @@ -203,9 +200,6 @@ static PSI_mutex_info all_spider_mutexes[]= #if defined(HS_HAS_SQLCOM) && defined(HAVE_HANDLERSOCKET) { &spd_key_mutex_hs_r_conn, "hs_r_conn", PSI_FLAG_GLOBAL}, { &spd_key_mutex_hs_w_conn, "hs_w_conn", PSI_FLAG_GLOBAL}, -#endif -#ifndef WITHOUT_SPIDER_BG_SEARCH - { &spd_key_mutex_global_trx, "global_trx", PSI_FLAG_GLOBAL}, #endif { &spd_key_mutex_open_conn, "open_conn", PSI_FLAG_GLOBAL}, { &spd_key_mutex_allocated_thds, "allocated_thds", PSI_FLAG_GLOBAL}, @@ -386,9 +380,6 @@ pthread_mutex_t spider_allocated_thds_mutex; #ifndef WITHOUT_SPIDER_BG_SEARCH pthread_attr_t spider_pt_attr; - -pthread_mutex_t spider_global_trx_mutex; -SPIDER_TRX *spider_global_trx; #endif extern pthread_mutex_t spider_mem_calc_mutex; @@ -1048,7 +1039,8 @@ void spider_free_tmp_share_alloc( char *spider_get_string_between_quote( char *ptr, - bool alloc + bool alloc, + SPIDER_PARAM_STRING_PARSE *param_string_parse ) { char *start_ptr, *end_ptr, *tmp_ptr, *esc_ptr; bool find_flg = FALSE, esc_flg = FALSE; @@ -1139,6 +1131,10 @@ char *spider_get_string_between_quote( strcpy(esc_ptr, esc_ptr + 1); } } + + if (param_string_parse) + param_string_parse->set_param_value(start_ptr, start_ptr + strlen(start_ptr) + 1); + if (alloc) { DBUG_RETURN( @@ -1156,7 +1152,8 @@ int spider_create_string_list( uint **string_length_list, uint *list_length, char *str, - uint length + uint length, + SPIDER_PARAM_STRING_PARSE *param_string_parse ) { int roop_count; char *tmp_ptr, *tmp_ptr2, *tmp_ptr3, *esc_ptr; @@ -1164,6 +1161,7 @@ int spider_create_string_list( DBUG_ENTER("spider_create_string_list"); *list_length = 0; + param_string_parse->init_param_value(); if (!str) { *string_list = NULL; @@ -1274,6 +1272,10 @@ int spider_create_string_list( my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); DBUG_RETURN(HA_ERR_OUT_OF_MEM); } + + param_string_parse->set_param_value(tmp_ptr3, + tmp_ptr3 + strlen(tmp_ptr3) + 1); + DBUG_PRINT("info",("spider string_list[%d]=%s", roop_count, (*string_list)[roop_count])); @@ -1286,13 +1288,15 @@ int spider_create_long_list( char *str, uint length, long min_val, - long max_val + long max_val, + SPIDER_PARAM_STRING_PARSE *param_string_parse ) { int roop_count; char *tmp_ptr; DBUG_ENTER("spider_create_long_list"); *list_length = 0; + param_string_parse->init_param_value(); if (!str) { *long_list = NULL; @@ -1348,6 +1352,9 @@ int spider_create_long_list( (*long_list)[roop_count] = max_val; } + param_string_parse->set_param_value(tmp_ptr, + tmp_ptr + strlen(tmp_ptr) + 1); + #ifndef DBUG_OFF for (roop_count = 0; roop_count < (int) *list_length; roop_count++) { @@ -1365,13 +1372,15 @@ int spider_create_longlong_list( char *str, uint length, longlong min_val, - longlong max_val + longlong max_val, + SPIDER_PARAM_STRING_PARSE *param_string_parse ) { int error_num, roop_count; char *tmp_ptr; DBUG_ENTER("spider_create_longlong_list"); *list_length = 0; + param_string_parse->init_param_value(); if (!str) { *longlong_list = NULL; @@ -1428,6 +1437,9 @@ int spider_create_longlong_list( (*longlong_list)[roop_count] = max_val; } + param_string_parse->set_param_value(tmp_ptr, + tmp_ptr + strlen(tmp_ptr) + 1); + #ifndef DBUG_OFF for (roop_count = 0; roop_count < (int) *list_length; roop_count++) { @@ -1667,6 +1679,38 @@ static int spider_set_ll_value( DBUG_RETURN(error_num); } +/** + Print a parameter string error message. + + @return Error code. +*/ + +int st_spider_param_string_parse::print_param_error() +{ + if (start_title_ptr) + { + /* Restore the input delimiter characters */ + restore_delims(); + + /* Print the error message */ + switch (error_num) + { + case ER_SPIDER_INVALID_UDF_PARAM_NUM: + my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, + MYF(0), start_title_ptr); + break; + case ER_SPIDER_INVALID_CONNECT_INFO_NUM: + default: + my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, + MYF(0), start_title_ptr); + } + + return error_num; + } + else + return 0; +} + #define SPIDER_PARAM_STR_LEN(name) name ## _length #define SPIDER_PARAM_STR(title_name, param_name) \ if (!strncasecmp(tmp_ptr, title_name, title_length)) \ @@ -1675,12 +1719,11 @@ static int spider_set_ll_value( if (!share->param_name) \ { \ if ((share->param_name = spider_get_string_between_quote( \ - start_ptr, TRUE))) \ + start_ptr, TRUE, &connect_string_parse))) \ share->SPIDER_PARAM_STR_LEN(param_name) = strlen(share->param_name); \ - else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + else \ + { \ + error_num = connect_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%s", share->param_name)); \ @@ -1704,12 +1747,11 @@ static int spider_set_ll_value( &share->SPIDER_PARAM_STR_LENS(param_name), \ &share->SPIDER_PARAM_STR_LEN(param_name), \ tmp_ptr2, \ - share->SPIDER_PARAM_STR_CHARLEN(param_name)))) \ + share->SPIDER_PARAM_STR_CHARLEN(param_name), \ + &connect_string_parse))) \ goto error; \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = connect_string_parse.print_param_error(); \ goto error; \ } \ } \ @@ -1727,9 +1769,7 @@ static int spider_set_ll_value( { \ if (hint_num < 0 || hint_num >= max_size) \ { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = connect_string_parse.print_param_error(); \ goto error; \ } else if (share->param_name[hint_num].length() > 0) \ break; \ @@ -1740,9 +1780,7 @@ static int spider_set_ll_value( DBUG_PRINT("info",("spider " title_name "[%d]=%s", hint_num, \ share->param_name[hint_num].ptr())); \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = connect_string_parse.print_param_error(); \ goto error; \ } \ break; \ @@ -1759,9 +1797,7 @@ static int spider_set_ll_value( { \ if (hint_num < 0 || hint_num >= max_size) \ { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = connect_string_parse.print_param_error(); \ goto error; \ } else if (share->param_name[hint_num] != -1) \ break; \ @@ -1772,9 +1808,7 @@ static int spider_set_ll_value( DBUG_PRINT("info",("spider " title_name "[%d]=%lld", hint_num, \ share->param_name[hint_num])); \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = connect_string_parse.print_param_error(); \ goto error; \ } \ break; \ @@ -1795,12 +1829,11 @@ static int spider_set_ll_value( &share->SPIDER_PARAM_LONG_LEN(param_name), \ tmp_ptr2, \ strlen(tmp_ptr2), \ - min_val, max_val))) \ + min_val, max_val, \ + &connect_string_parse))) \ goto error; \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = connect_string_parse.print_param_error(); \ goto error; \ } \ } \ @@ -1822,12 +1855,11 @@ static int spider_set_ll_value( &share->SPIDER_PARAM_LONGLONG_LEN(param_name), \ tmp_ptr2, \ strlen(tmp_ptr2), \ - min_val, max_val))) \ + min_val, max_val, \ + &connect_string_parse))) \ goto error; \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = connect_string_parse.print_param_error(); \ goto error; \ } \ } \ @@ -1847,10 +1879,11 @@ static int spider_set_ll_value( share->param_name = min_val; \ else if (share->param_name > max_val) \ share->param_name = max_val; \ + connect_string_parse.set_param_value(tmp_ptr2, \ + tmp_ptr2 + \ + strlen(tmp_ptr2) + 1); \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = connect_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%d", share->param_name)); \ @@ -1869,10 +1902,11 @@ static int spider_set_ll_value( share->param_name = atoi(tmp_ptr2); \ if (share->param_name < min_val) \ share->param_name = min_val; \ + connect_string_parse.set_param_value(tmp_ptr2, \ + tmp_ptr2 + \ + strlen(tmp_ptr2) + 1); \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = connect_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%d", share->param_name)); \ @@ -1891,10 +1925,11 @@ static int spider_set_ll_value( share->param_name = my_atof(tmp_ptr2); \ if (share->param_name < min_val) \ share->param_name = min_val; \ + connect_string_parse.set_param_value(tmp_ptr2, \ + tmp_ptr2 + \ + strlen(tmp_ptr2) + 1); \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = connect_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%f", share->param_name)); \ @@ -1913,10 +1948,11 @@ static int spider_set_ll_value( share->param_name = my_strtoll10(tmp_ptr2, (char**) NULL, &error_num); \ if (share->param_name < min_val) \ share->param_name = min_val; \ + connect_string_parse.set_param_value(tmp_ptr2, \ + tmp_ptr2 + \ + strlen(tmp_ptr2) + 1); \ } else { \ - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ - MYF(0), tmp_ptr); \ + error_num = connect_string_parse.print_param_error(); \ goto error; \ } \ DBUG_PRINT("info",("spider " title_name "=%lld", share->param_name)); \ @@ -1938,6 +1974,7 @@ int spider_parse_connect_info( char *tmp_ptr, *tmp_ptr2, *start_ptr; int roop_count; int title_length; + SPIDER_PARAM_STRING_PARSE connect_string_parse; SPIDER_ALTER_TABLE *share_alter; #ifdef WITH_PARTITION_STORAGE_ENGINE partition_element *part_elem; @@ -2126,6 +2163,7 @@ int spider_parse_connect_info( } sprit_ptr[0] = connect_string; + connect_string_parse.init(connect_string, ER_SPIDER_INVALID_CONNECT_INFO_NUM); while (sprit_ptr[0]) { if ((sprit_ptr[1] = strchr(sprit_ptr[0], ','))) @@ -2152,10 +2190,14 @@ int spider_parse_connect_info( title_length++; start_ptr++; } + connect_string_parse.set_param_title(tmp_ptr, tmp_ptr + title_length); switch (title_length) { case 0: + error_num = connect_string_parse.print_param_error(); + if (error_num) + goto error; continue; case 3: SPIDER_PARAM_LONG_LIST_WITH_MAX("abl", access_balances, 0, @@ -2296,23 +2338,17 @@ int spider_parse_connect_info( #endif SPIDER_PARAM_INT_WITH_MAX("upu", use_pushdown_udf, 0, 1); SPIDER_PARAM_INT_WITH_MAX("utc", use_table_charset, 0, 1); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 4: SPIDER_PARAM_STR_LIST("host", tgt_hosts); SPIDER_PARAM_STR_LIST("user", tgt_usernames); SPIDER_PARAM_LONG_LIST_WITH_MAX("port", tgt_ports, 0, 65535); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 5: SPIDER_PARAM_STR_LIST("table", tgt_table_names); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 6: SPIDER_PARAM_STR_LIST("server", server_names); @@ -2322,17 +2358,13 @@ int spider_parse_connect_info( SPIDER_PARAM_STR_LIST("ssl_ca", tgt_ssl_cas); SPIDER_PARAM_NUMHINT("skc", static_key_cardinality, 3, (int) table_share->keys, spider_set_ll_value); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 7: SPIDER_PARAM_STR_LIST("wrapper", tgt_wrappers); SPIDER_PARAM_STR_LIST("ssl_key", tgt_ssl_keys); SPIDER_PARAM_STR_LIST("pk_name", tgt_pk_names); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 8: SPIDER_PARAM_STR_LIST("database", tgt_dbs); @@ -2352,18 +2384,14 @@ int spider_parse_connect_info( #endif SPIDER_PARAM_STR_LIST("ssl_cert", tgt_ssl_certs); SPIDER_PARAM_INT_WITH_MAX("bka_mode", bka_mode, 0, 2); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 9: SPIDER_PARAM_INT("max_order", max_order, 0); SPIDER_PARAM_INT("bulk_size", bulk_size, 0); SPIDER_PARAM_DOUBLE("scan_rate", scan_rate, 0); SPIDER_PARAM_DOUBLE("read_rate", read_rate, 0); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 10: SPIDER_PARAM_DOUBLE("crd_weight", crd_weight, 1); @@ -2373,9 +2401,7 @@ int spider_parse_connect_info( SPIDER_PARAM_STR_LIST("ssl_capath", tgt_ssl_capaths); SPIDER_PARAM_STR("bka_engine", bka_engine); SPIDER_PARAM_LONGLONG("first_read", first_read, 0); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 11: SPIDER_PARAM_INT_WITH_MAX("query_cache", query_cache, 0, 2); @@ -2389,9 +2415,7 @@ int spider_parse_connect_info( SPIDER_PARAM_LONG_LIST_WITH_MAX("use_hs_read", use_hs_reads, 0, 1); #endif SPIDER_PARAM_INT_WITH_MAX("casual_read", casual_read, 0, 63); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 12: SPIDER_PARAM_DOUBLE("sts_interval", sts_interval, 0); @@ -2404,9 +2428,7 @@ int spider_parse_connect_info( SPIDER_PARAM_LONG_LIST_WITH_MAX( "hs_read_port", hs_read_ports, 0, 65535); #endif - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 13: SPIDER_PARAM_STR_LIST("default_group", tgt_default_groups); @@ -2415,9 +2437,7 @@ int spider_parse_connect_info( "hs_write_port", hs_write_ports, 0, 65535); #endif SPIDER_PARAM_STR_LIST("sequence_name", tgt_sequence_names); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 14: SPIDER_PARAM_LONGLONG("internal_limit", internal_limit, 0); @@ -2434,9 +2454,7 @@ int spider_parse_connect_info( SPIDER_PARAM_STR_LIST("static_link_id", static_link_ids); SPIDER_PARAM_INT_WITH_MAX("store_last_crd", store_last_crd, 0, 1); SPIDER_PARAM_INT_WITH_MAX("store_last_sts", store_last_sts, 0, 1); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 15: SPIDER_PARAM_LONGLONG("internal_offset", internal_offset, 0); @@ -2456,9 +2474,7 @@ int spider_parse_connect_info( SPIDER_PARAM_LONG_LIST_WITH_MAX("connect_timeout", connect_timeouts, 0, 2147483647); SPIDER_PARAM_INT_WITH_MAX("error_read_mode", error_read_mode, 0, 1); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 16: SPIDER_PARAM_INT_WITH_MAX( @@ -2490,9 +2506,7 @@ int spider_parse_connect_info( #endif SPIDER_PARAM_INT_WITH_MAX( "query_cache_sync", query_cache_sync, 0, 3); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 17: SPIDER_PARAM_INT_WITH_MAX( @@ -2512,9 +2526,7 @@ int spider_parse_connect_info( SPIDER_PARAM_INT_WITH_MAX( "force_bulk_update", force_bulk_update, 0, 1); #endif - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 18: SPIDER_PARAM_INT_WITH_MAX( @@ -2527,9 +2539,7 @@ int spider_parse_connect_info( #endif SPIDER_PARAM_LONGLONG( "direct_order_limit", direct_order_limit, 0); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 19: SPIDER_PARAM_INT("init_sql_alloc_size", init_sql_alloc_size, 0); @@ -2544,9 +2554,7 @@ int spider_parse_connect_info( "load_crd_at_startup", load_crd_at_startup, 0, 1); SPIDER_PARAM_INT_WITH_MAX( "load_sts_at_startup", load_sts_at_startup, 0, 1); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 20: SPIDER_PARAM_LONGLONG_LIST_WITH_MAX( @@ -2555,16 +2563,12 @@ int spider_parse_connect_info( "delete_all_rows_type", delete_all_rows_type, 0, 1); SPIDER_PARAM_INT_WITH_MAX( "skip_parallel_search", skip_parallel_search, 0, 3); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 21: SPIDER_PARAM_LONGLONG( "semi_split_read_limit", semi_split_read_limit, 0); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 22: SPIDER_PARAM_LONG_LIST_WITH_MAX( @@ -2577,46 +2581,38 @@ int spider_parse_connect_info( "skip_default_condition", skip_default_condition, 0, 1); SPIDER_PARAM_LONGLONG( "static_mean_rec_length", static_mean_rec_length, 0); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 23: SPIDER_PARAM_INT_WITH_MAX( "internal_optimize_local", internal_optimize_local, 0, 1); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 25: SPIDER_PARAM_LONGLONG("static_records_for_status", static_records_for_status, 0); SPIDER_PARAM_NUMHINT("static_key_cardinality", static_key_cardinality, 3, (int) table_share->keys, spider_set_ll_value); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 26: SPIDER_PARAM_INT_WITH_MAX( "semi_table_lock_connection", semi_table_lock_conn, 0, 1); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; case 32: SPIDER_PARAM_LONG_LIST_WITH_MAX("monitoring_binlog_pos_at_failing", monitoring_binlog_pos_at_failing, 0, 2); - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; default: - error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; - my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, - MYF(0), tmp_ptr); + error_num = connect_string_parse.print_param_error(); goto error; } + + /* Verify that the remainder of the parameter value is whitespace */ + if ((error_num = connect_string_parse.has_extra_parameter_values())) + goto error; } } @@ -6616,10 +6612,6 @@ int spider_db_done( do_delete_thd = TRUE; } -#ifndef WITHOUT_SPIDER_BG_SEARCH - spider_free_trx(spider_global_trx, TRUE); -#endif - for (roop_count = SPIDER_DBTON_SIZE - 1; roop_count >= 0; roop_count--) { if (spider_dbton[roop_count].deinit) @@ -6802,9 +6794,6 @@ int spider_db_done( pthread_mutex_destroy(&spider_mon_table_cache_mutex); pthread_mutex_destroy(&spider_allocated_thds_mutex); pthread_mutex_destroy(&spider_open_conn_mutex); -#ifndef WITHOUT_SPIDER_BG_SEARCH - pthread_mutex_destroy(&spider_global_trx_mutex); -#endif #if defined(HS_HAS_SQLCOM) && defined(HAVE_HANDLERSOCKET) pthread_mutex_destroy(&spider_hs_w_conn_mutex); pthread_mutex_destroy(&spider_hs_r_conn_mutex); @@ -7080,18 +7069,6 @@ int spider_db_init( error_num = HA_ERR_OUT_OF_MEM; goto error_conn_mutex_init; } -#ifndef WITHOUT_SPIDER_BG_SEARCH -#if MYSQL_VERSION_ID < 50500 - if (pthread_mutex_init(&spider_global_trx_mutex, MY_MUTEX_INIT_FAST)) -#else - if (mysql_mutex_init(spd_key_mutex_global_trx, - &spider_global_trx_mutex, MY_MUTEX_INIT_FAST)) -#endif - { - error_num = HA_ERR_OUT_OF_MEM; - goto error_global_trx_mutex_init; - } -#endif #if MYSQL_VERSION_ID < 50500 if (pthread_mutex_init(&spider_open_conn_mutex, MY_MUTEX_INIT_FAST)) #else @@ -7394,16 +7371,9 @@ int spider_db_init( } } -#ifndef WITHOUT_SPIDER_BG_SEARCH - if (!(spider_global_trx = spider_get_trx(NULL, FALSE, &error_num))) - goto error; -#endif - DBUG_RETURN(0); #ifndef WITHOUT_SPIDER_BG_SEARCH -error: - roop_count = SPIDER_DBTON_SIZE; error_init_dbton: for (roop_count--; roop_count >= 0; roop_count--) { @@ -7520,10 +7490,6 @@ error_hs_r_conn_mutex_init: #endif pthread_mutex_destroy(&spider_open_conn_mutex); error_open_conn_mutex_init: -#ifndef WITHOUT_SPIDER_BG_SEARCH - pthread_mutex_destroy(&spider_global_trx_mutex); -error_global_trx_mutex_init: -#endif pthread_mutex_destroy(&spider_conn_mutex); error_conn_mutex_init: pthread_mutex_destroy(&spider_lgtm_tblhnd_share_mutex); @@ -10160,7 +10126,7 @@ void *spider_table_bg_crd_action( { spider_get_conn(share, spider->search_link_idx, share->conn_keys[spider->search_link_idx], - spider_global_trx, spider, FALSE, FALSE, SPIDER_CONN_KIND_MYSQL, + trx, spider, FALSE, FALSE, SPIDER_CONN_KIND_MYSQL, &error_num); if (conns[spider->search_link_idx]) { diff --git a/storage/spider/spd_table.h b/storage/spider/spd_table.h index 7165c4504f8..43958ca6e78 100644 --- a/storage/spider/spd_table.h +++ b/storage/spider/spd_table.h @@ -13,6 +13,210 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* + Structure used to manage Spider parameter string parsing. Types of + parameters include: + - connection strings + - UDF parameters + + A parameter string consists of one or more parameter definitions using + the following syntax: + + A comma is the separator character between multiple parameter definitions. + Parameter titles must not be quoted. Parameter values must be quoted with + single or double quotes. +*/ + +typedef struct st_spider_param_string_parse +{ + char *start_ptr; /* Pointer to the start of the parameter string */ + char *end_ptr; /* Pointer to the end of the parameter string */ + char *start_title_ptr; /* Pointer to the start of the current parameter + title */ + char *end_title_ptr; /* Pointer to the end of the current parameter + title */ + char *start_value_ptr; /* Pointer to the start of the current parameter + value */ + char *end_value_ptr; /* Pointer to the end of the current parameter + value */ + int error_num; /* Error code of the error message to print when + an error is detected */ + uint delim_title_len; /* Length of the paramater title's delimiter */ + uint delim_value_len; /* Length of the paramater value's delimiter */ + char delim_title; /* Current parameter title's delimiter character */ + char delim_value; /* Current parameter value's delimiter character */ + + /** + Initialize the parameter string parse information. + + @param param_string Pointer to the parameter string being parsed. + @param error_code Error code of the error message to print when + an error is detected. + */ + + inline void init(char *param_string, int error_code) + { + start_ptr = param_string; + end_ptr = start_ptr + strlen(start_ptr); + + init_param_title(); + init_param_value(); + + error_num = error_code; + } + + /** + Initialize the current parameter title. + */ + + inline void init_param_title() + { + start_title_ptr = end_title_ptr = NULL; + delim_title_len = 0; + delim_title = '\0'; + } + + /** + Save pointers to the start and end positions of the current parameter + title in the parameter string. Also save the parameter title's + delimiter character. + + @param start_value Pointer to the start position of the current + parameter title. + @param end_value Pointer to the end position of the current + parameter title. + */ + + inline void set_param_title(char *start_title, char *end_title) + { + start_title_ptr = start_title; + end_title_ptr = end_title; + + if (*start_title == '"' || + *start_title == '\'') + { + delim_title = *start_title; + + if (start_title >= start_ptr && *--start_title == '\\') + delim_title_len = 2; + else + delim_title_len = 1; + } + } + + /** + Initialize the current parameter value. + */ + + inline void init_param_value() + { + start_value_ptr = end_value_ptr = NULL; + delim_value_len = 0; + delim_value = '\0'; + } + + /** + Save pointers to the start and end positions of the current parameter + value in the parameter string. Also save the parameter value's + delimiter character. + + @param start_value Pointer to the start position of the current + parameter value. + @param end_value Pointer to the end position of the current + parameter value. + */ + + inline void set_param_value(char *start_value, char *end_value) + { + start_value_ptr = start_value--; + end_value_ptr = end_value; + + if (*start_value == '"' || + *start_value == '\'') + { + delim_value = *start_value; + + if (*--start_value == '\\') + delim_value_len = 2; + else + delim_value_len = 1; + } + } + + /** + Determine whether the current parameter in the parameter string has + extra parameter values. + + @return 0 Current parameter value in the parameter string + does not have extra parameter values. + <> 0 Error code indicating that the current parameter + value in the parameter string has extra + parameter values. + */ + + inline int has_extra_parameter_values() + { + int error_num = 0; + DBUG_ENTER("has_extra_parameter_values"); + + if (end_value_ptr) + { + /* There is a current parameter value */ + char *end_param_ptr = end_value_ptr; + + while (end_param_ptr < end_ptr && + (*end_param_ptr == ' ' || *end_param_ptr == '\r' || + *end_param_ptr == '\n' || *end_param_ptr == '\t')) + end_param_ptr++; + + if (end_param_ptr < end_ptr && *end_param_ptr != '\0') + { + /* Extra values in parameter definition */ + error_num = print_param_error(); + } + } + + DBUG_RETURN(error_num); + } + + /** + Restore the current parameter's input delimiter characters in the + parameter string. They were NULLed during parameter parsing. + */ + + inline void restore_delims() + { + char *end = end_title_ptr - 1; + + switch (delim_title_len) + { + case 2: + *end++ = '\\'; + /* Fall through */ + case 1: + *end = delim_title; + } + + end = end_value_ptr - 1; + switch (delim_value_len) + { + case 2: + *end++ = '\\'; + /* Fall through */ + case 1: + *end = delim_value; + } + } + + /** + Print a parameter string error message. + + @return Error code. + */ + + int print_param_error(); +} SPIDER_PARAM_STRING_PARSE; + uchar *spider_tbl_get_key( SPIDER_SHARE *share, size_t *length, @@ -60,7 +264,8 @@ void spider_free_tmp_share_alloc( char *spider_get_string_between_quote( char *ptr, - bool alloc + bool alloc, + SPIDER_PARAM_STRING_PARSE *param_string_parse = NULL ); int spider_create_string_list( @@ -68,7 +273,8 @@ int spider_create_string_list( uint **string_length_list, uint *list_length, char *str, - uint length + uint length, + SPIDER_PARAM_STRING_PARSE *param_string_parse ); int spider_create_long_list( @@ -77,7 +283,8 @@ int spider_create_long_list( char *str, uint length, long min_val, - long max_val + long max_val, + SPIDER_PARAM_STRING_PARSE *param_string_parse ); int spider_create_longlong_list( @@ -86,7 +293,8 @@ int spider_create_longlong_list( char *str, uint length, longlong min_val, - longlong max_val + longlong max_val, + SPIDER_PARAM_STRING_PARSE *param_string_parse ); int spider_increase_string_list( diff --git a/storage/spider/spd_trx.cc b/storage/spider/spd_trx.cc index 0a56eafeb6e..cf60a0376bb 100644 --- a/storage/spider/spd_trx.cc +++ b/storage/spider/spd_trx.cc @@ -1638,15 +1638,20 @@ int spider_check_and_set_sql_log_off( SPIDER_CONN *conn, int *need_mon ) { - bool internal_sql_log_off; + int internal_sql_log_off; DBUG_ENTER("spider_check_and_set_sql_log_off"); internal_sql_log_off = spider_param_internal_sql_log_off(thd); - if (internal_sql_log_off) + if (internal_sql_log_off != -1) { - spider_conn_queue_sql_log_off(conn, TRUE); - } else { - spider_conn_queue_sql_log_off(conn, FALSE); + if (internal_sql_log_off) + { + spider_conn_queue_sql_log_off(conn, TRUE); + } + else + { + spider_conn_queue_sql_log_off(conn, FALSE); + } } /* if (internal_sql_log_off && conn->sql_log_off != 1) @@ -2811,7 +2816,7 @@ int spider_internal_xa_commit_by_xid( SPIDER_TRX *trx, XID* xid ) { - TABLE *table_xa, *table_xa_member; + TABLE *table_xa, *table_xa_member= 0; int error_num; char xa_key[MAX_KEY_LENGTH]; char xa_member_key[MAX_KEY_LENGTH]; @@ -3046,7 +3051,7 @@ int spider_internal_xa_rollback_by_xid( SPIDER_TRX *trx, XID* xid ) { - TABLE *table_xa, *table_xa_member; + TABLE *table_xa, *table_xa_member= 0; int error_num; char xa_key[MAX_KEY_LENGTH]; char xa_member_key[MAX_KEY_LENGTH]; diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt index 0824ced7079..16f7ab4ce75 100644 --- a/storage/tokudb/CMakeLists.txt +++ b/storage/tokudb/CMakeLists.txt @@ -1,4 +1,4 @@ -SET(TOKUDB_VERSION 5.6.38-83.0) +SET(TOKUDB_VERSION 5.6.39-83.1) # PerconaFT only supports x86-64 and cmake-2.8.9+ IF(CMAKE_VERSION VERSION_LESS "2.8.9") MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB") @@ -35,7 +35,7 @@ SET(TOKUDB_SOURCES tokudb_thread.cc tokudb_dir_cmd.cc) MYSQL_ADD_PLUGIN(tokudb ${TOKUDB_SOURCES} STORAGE_ENGINE MODULE_ONLY - COMPONENT tokudb-engine CONFIG tokudb.cnf) + COMPONENT tokudb-engine CONFIG ${CMAKE_CURRENT_BINARY_DIR}/tokudb.cnf) IF(NOT TARGET tokudb) RETURN() @@ -46,6 +46,33 @@ CHECK_JEMALLOC() IF(NOT LIBJEMALLOC) MESSAGE(WARNING "TokuDB is enabled, but jemalloc is not. This configuration is not supported") +ELSEIF(LIBJEMALLOC STREQUAL jemalloc) + FIND_LIBRARY(LIBJEMALLOC_SO jemalloc) + IF(NOT LIBJEMALLOC_SO) + MESSAGE(FATAL_ERROR "jemalloc is present, but cannot be found?") + ENDIF() + GET_FILENAME_COMPONENT(LIBJEMALLOC_PATH ${LIBJEMALLOC_SO} REALPATH CACHE) +ENDIF() + +IF(LIBJEMALLOC_PATH AND RPM MATCHES fedora28) # TODO check for jemalloc version + UNSET(LIBJEMALLOC) + GET_DIRECTORY_PROPERTY(V DIRECTORY ${CMAKE_SOURCE_DIR} DEFINITION CPACK_RPM_tokudb-engine_PACKAGE_REQUIRES) + SET(CPACK_RPM_tokudb-engine_PACKAGE_REQUIRES "${V} jemalloc" PARENT_SCOPE) + SET(systemd_env "Environment=\"LD_PRELOAD=${LIBJEMALLOC_PATH}\"") #" + SET(cnf_malloc_lib "malloc-lib=${LIBJEMALLOC_PATH}") +ELSEIF(LIBJEMALLOC_PATH) + SET(systemd_env "#Environment=\"LD_PRELOAD=${LIBJEMALLOC_PATH}\"") #" + SET(cnf_malloc_lib "#malloc-lib=${LIBJEMALLOC_PATH}") +ELSE() + SET(systemd_env "#Environment=\"LD_PRELOAD=/path/to/libjemalloc.so\"") #" + SET(cnf_malloc_lib "#malloc-lib=/path/to/libjemalloc.so") +ENDIF() +CONFIGURE_FILE(tokudb.cnf.in tokudb.cnf @ONLY) +CONFIGURE_FILE(tokudb.conf.in tokudb.conf @ONLY) +IF(INSTALL_SYSCONFDIR) + INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/tokudb.conf + DESTINATION ${INSTALL_SYSCONFDIR}/systemd/system/mariadb.service.d/ + COMPONENT tokudb-engine) ENDIF() MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-vla" DEBUG) diff --git a/storage/tokudb/PerconaFT/.clang-format b/storage/tokudb/PerconaFT/.clang-format new file mode 100644 index 00000000000..0888185848d --- /dev/null +++ b/storage/tokudb/PerconaFT/.clang-format @@ -0,0 +1,36 @@ +Language: Cpp +BasedOnStyle: Google + +# The following parameters are default for Google style, +# but as they are important for our project they +# are set explicitly here +AlignAfterOpenBracket: Align +BreakBeforeBinaryOperators: None +ColumnLimit: 80 +PointerAlignment: Left +SpaceAfterCStyleCast: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +UseTab: Never + +# Non-default parametes +NamespaceIndentation: All +IndentWidth: 4 +TabWidth: 4 +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +BinPackParameters: false +BinPackArguments: false +ExperimentalAutoDetectBinPacking: false +AllowAllParametersOfDeclarationOnNextLine: false +#AlignConsecutiveAssignments: yes +#AlignConsecutiveDeclarations: yes +BreakStringLiterals: false +ReflowComments: true diff --git a/storage/tokudb/PerconaFT/CMakeLists.txt b/storage/tokudb/PerconaFT/CMakeLists.txt index 5c6d938808e..90ad3f2b1af 100644 --- a/storage/tokudb/PerconaFT/CMakeLists.txt +++ b/storage/tokudb/PerconaFT/CMakeLists.txt @@ -9,6 +9,12 @@ project(TokuDB) set(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "") set(CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "") +# See: https://jira.percona.com/browse/TDB-93 +IF(CMAKE_CXX_COMPILER_ID MATCHES "Clang") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-address-of-packed-member") + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-address-of-packed-member") +ENDIF() + # detect when we are being built as a subproject if (DEFINED MYSQL_PROJECT_NAME_DOCSTRING) add_definitions( -DMYSQL_TOKUDB_ENGINE=1) diff --git a/storage/tokudb/PerconaFT/README.md b/storage/tokudb/PerconaFT/README.md index d53caf00190..ffb646b67af 100644 --- a/storage/tokudb/PerconaFT/README.md +++ b/storage/tokudb/PerconaFT/README.md @@ -9,20 +9,18 @@ PerconaFT is provided as a shared library with an interface similar to Berkeley DB. To build the full MySQL product, see the instructions for -[Percona/tokudb-engine][tokudb-engine]. To build TokuMX, see the instructions -for [Percona/percona-server-mongodb][mongo]. This document covers PerconaFT only. +[Percona/percona-server][percona-server]. This document covers PerconaFT only. -[tokudb-engine]: https://github.com/Percona/tokudb-engine -[mongo]: https://github.com/Percona/percona-server-mongodb +[percona-server]: https://github.com/Percona/percona-server Building -------- PerconaFT is built using CMake >= 2.8.9. Out-of-source builds are -recommended. You need a C++11 compiler, though only GCC >= 4.7 and -Apple's Clang are tested. You also need zlib development packages -(`yum install zlib-devel` or `apt-get install zlib1g-dev`). +recommended. You need a C++11 compiler, though only some versions +of GCC >= 4.7 and Clang are tested. You also need zlib development +packages (`yum install zlib-devel` or `apt-get install zlib1g-dev`). You will also need the source code for jemalloc, checked out in `third_party/`. @@ -42,16 +40,16 @@ CC=gcc47 CXX=g++47 cmake \ cmake --build . --target install ``` -This will build `libtokudb.so` and `libtokuportability.so` and install it, +This will build `libft.so` and `libtokuportability.so` and install it, some header files, and some examples to `percona-ft/prefix/`. It will also build jemalloc and install it alongside these libraries, you should link to that if you are planning to run benchmarks or in production. ### Platforms -PerconaFT is supported on 64-bit Centos, should work on other 64-bit linux -distributions, and may work on OSX 10.8 and FreeBSD. PerconaFT is not -supported on 32-bit systems. +PerconaFT is supported on 64-bit Centos, Debian, and Ubuntu and should work +on other 64-bit linux distributions, and may work on OSX 10.8 and FreeBSD. +PerconaFT is not supported on 32-bit systems. [Transparent hugepages][transparent-hugepages] is a feature in newer linux kernel versions that causes problems for the memory usage tracking @@ -97,16 +95,9 @@ We have two publicly accessible mailing lists for TokuDB: - tokudb-dev@googlegroups.com is for discussion of the development of TokuDB. -and two for TokuMX: - - - tokumx-user@googlegroups.com is for general and support related - questions about the use of TokuMX. - - tokumx-dev@googlegroups.com is for discussion of the development of - TokuMX. - All source code and test contributions must be provided under a [BSD 2-Clause][bsd-2] license. For any small change set, the license text may be contained within the commit comment and the pull request. For larger contributions, the license must be presented in a COPYING. file in the root of the PerconaFT project. Please see the [BSD 2-Clause license template][bsd-2] for the content of the license text. -[jira]: https://tokutek.atlassian.net/browse/FT/ +[jira]: https://jira.percona.com/projects/TDB [bsd-2]: http://opensource.org/licenses/BSD-2-Clause/ diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.cc b/storage/tokudb/PerconaFT/ft/ft-ops.cc index ef2009db1b1..a98768158dd 100644 --- a/storage/tokudb/PerconaFT/ft/ft-ops.cc +++ b/storage/tokudb/PerconaFT/ft/ft-ops.cc @@ -4881,6 +4881,94 @@ static void toku_pfs_keys_init(const char *toku_instr_group_name) { toku_instr_probe_1 = new toku_instr_probe(*fti_probe_1_key); } +static void toku_pfs_keys_destroy(void) { + delete kibbutz_mutex_key; + delete minicron_p_mutex_key; + delete queue_result_mutex_key; + delete tpool_lock_mutex_key; + delete workset_lock_mutex_key; + delete bjm_jobs_lock_mutex_key; + delete log_internal_lock_mutex_key; + delete cachetable_ev_thread_lock_mutex_key; + delete cachetable_disk_nb_mutex_key; + delete safe_file_size_lock_mutex_key; + delete cachetable_m_mutex_key; + delete checkpoint_safe_mutex_key; + delete ft_ref_lock_mutex_key; + delete ft_open_close_lock_mutex_key; + delete loader_error_mutex_key; + delete bfs_mutex_key; + delete loader_bl_mutex_key; + delete loader_fi_lock_mutex_key; + delete loader_out_mutex_key; + delete result_output_condition_lock_mutex_key; + delete block_table_mutex_key; + delete rollback_log_node_cache_mutex_key; + delete txn_lock_mutex_key; + delete txn_state_lock_mutex_key; + delete txn_child_manager_mutex_key; + delete txn_manager_lock_mutex_key; + delete treenode_mutex_key; + delete locktree_request_info_mutex_key; + delete locktree_request_info_retry_mutex_key; + delete manager_mutex_key; + delete manager_escalation_mutex_key; + delete db_txn_struct_i_txn_mutex_key; + delete manager_escalator_mutex_key; + delete indexer_i_indexer_lock_mutex_key; + delete indexer_i_indexer_estimate_lock_mutex_key; + + delete tokudb_file_data_key; + delete tokudb_file_load_key; + delete tokudb_file_tmp_key; + delete tokudb_file_log_key; + + delete fti_probe_1_key; + + delete extractor_thread_key; + delete fractal_thread_key; + delete io_thread_key; + delete eviction_thread_key; + delete kibbutz_thread_key; + delete minicron_thread_key; + delete tp_internal_thread_key; + + delete result_state_cond_key; + delete bjm_jobs_wait_key; + delete cachetable_p_refcount_wait_key; + delete cachetable_m_flow_control_cond_key; + delete cachetable_m_ev_thread_cond_key; + delete bfs_cond_key; + delete result_output_condition_key; + delete manager_m_escalator_done_key; + delete lock_request_m_wait_cond_key; + delete queue_result_cond_key; + delete ws_worker_wait_key; + delete rwlock_wait_read_key; + delete rwlock_wait_write_key; + delete rwlock_cond_key; + delete tp_thread_wait_key; + delete tp_pool_wait_free_key; + delete frwlock_m_wait_read_key; + delete kibbutz_k_cond_key; + delete minicron_p_condvar_key; + delete locktree_request_info_retry_cv_key; + + delete multi_operation_lock_key; + delete low_priority_multi_operation_lock_key; + delete cachetable_m_list_lock_key; + delete cachetable_m_pending_lock_expensive_key; + delete cachetable_m_pending_lock_cheap_key; + delete cachetable_m_lock_key; + delete result_i_open_dbs_rwlock_key; + delete checkpoint_safe_rwlock_key; + delete cachetable_value_key; + delete safe_file_size_lock_rwlock_key; + + delete cachetable_disk_nb_rwlock_key; + delete toku_instr_probe_1; +} + int toku_ft_layer_init(void) { int r = 0; @@ -4917,8 +5005,7 @@ void toku_ft_layer_destroy(void) { toku_status_destroy(); partitioned_counters_destroy(); toku_scoped_malloc_destroy(); - - delete toku_instr_probe_1; + toku_pfs_keys_destroy(); // Portability must be cleaned up last toku_portability_destroy(); diff --git a/storage/tokudb/PerconaFT/ft/serialize/block_allocator.cc b/storage/tokudb/PerconaFT/ft/serialize/block_allocator.cc index 29f6daa293a..18c86539734 100644 --- a/storage/tokudb/PerconaFT/ft/serialize/block_allocator.cc +++ b/storage/tokudb/PerconaFT/ft/serialize/block_allocator.cc @@ -49,7 +49,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #include "ft/serialize/block_allocator.h" #include "ft/serialize/rbtree_mhs.h" -#if TOKU_DEBUG_PARANOID +#ifdef TOKU_DEBUG_PARANOID #define VALIDATE() Validate() #else #define VALIDATE() diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-clock-test.cc b/storage/tokudb/PerconaFT/ft/tests/ft-clock-test.cc index 26a3dae673c..00ff8cf204b 100644 --- a/storage/tokudb/PerconaFT/ft/tests/ft-clock-test.cc +++ b/storage/tokudb/PerconaFT/ft/tests/ft-clock-test.cc @@ -184,11 +184,11 @@ static void test2(int fd, FT ft_h, FTNODE *dn) { PAIR_ATTR attr; memset(&attr, 0, sizeof(attr)); toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr); - invariant(BP_STATE(*dn, 0) == (is_leaf) ? PT_ON_DISK : PT_COMPRESSED); + invariant(BP_STATE(*dn, 0) == ((is_leaf) ? PT_ON_DISK : PT_COMPRESSED)); invariant(BP_STATE(*dn, 1) == PT_AVAIL); invariant(BP_SHOULD_EVICT(*dn, 1)); toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr); - invariant(BP_STATE(*dn, 1) == (is_leaf) ? PT_ON_DISK : PT_COMPRESSED); + invariant(BP_STATE(*dn, 1) == ((is_leaf) ? PT_ON_DISK : PT_COMPRESSED)); bool req = toku_ftnode_pf_req_callback(*dn, &bfe_subset); invariant(req); diff --git a/storage/tokudb/PerconaFT/ft/tests/log-test4.cc b/storage/tokudb/PerconaFT/ft/tests/log-test4.cc index e0bbedb95bf..019852bb729 100644 --- a/storage/tokudb/PerconaFT/ft/tests/log-test4.cc +++ b/storage/tokudb/PerconaFT/ft/tests/log-test4.cc @@ -54,7 +54,7 @@ test_main (int argc __attribute__((__unused__)), { ml_lock(&logger->input_lock); toku_logger_make_space_in_inbuf(logger, 5); - snprintf(logger->inbuf.buf+logger->inbuf.n_in_buf, 5, "a1234"); + memcpy(logger->inbuf.buf+logger->inbuf.n_in_buf, "a1234", 5); logger->inbuf.n_in_buf+=5; logger->lsn.lsn++; logger->inbuf.max_lsn_in_buf = logger->lsn; diff --git a/storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp b/storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp index fbb5aa08571..41923892a15 100644 --- a/storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp +++ b/storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp @@ -39,7 +39,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #include "malloc_utils.hpp" -#if !HAVE_BITS_FUNCTEXCEPT_H +#if !defined(HAVE_BITS_FUNCTEXCEPT_H) namespace std { diff --git a/storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp b/storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp index 3e2b591430e..786d4d1c841 100644 --- a/storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp +++ b/storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp @@ -47,7 +47,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #include #include -#if HAVE_BITS_FUNCTEXCEPT_H +#ifdef HAVE_BITS_FUNCTEXCEPT_H # include diff --git a/storage/tokudb/PerconaFT/portability/memory.cc b/storage/tokudb/PerconaFT/portability/memory.cc index 9594158cf38..403abfe1d68 100644 --- a/storage/tokudb/PerconaFT/portability/memory.cc +++ b/storage/tokudb/PerconaFT/portability/memory.cc @@ -182,7 +182,7 @@ toku_memory_footprint(void * p, size_t touched) void * toku_malloc(size_t size) { -#if __APPLE__ +#if defined(__APPLE__) if (size == 0) { return nullptr; } @@ -209,7 +209,7 @@ toku_malloc(size_t size) { } void *toku_malloc_aligned(size_t alignment, size_t size) { -#if __APPLE__ +#if defined(__APPLE__) if (size == 0) { return nullptr; } @@ -245,7 +245,7 @@ toku_calloc(size_t nmemb, size_t size) { void * toku_realloc(void *p, size_t size) { -#if __APPLE__ +#if defined(__APPLE__) if (size == 0) { if (p != nullptr) { toku_free(p); @@ -276,7 +276,7 @@ toku_realloc(void *p, size_t size) { } void *toku_realloc_aligned(size_t alignment, void *p, size_t size) { -#if __APPLE__ +#if defined(__APPLE__) if (size == 0) { if (p != nullptr) { toku_free(p); @@ -345,7 +345,7 @@ toku_free(void *p) { void * toku_xmalloc(size_t size) { -#if __APPLE__ +#if defined(__APPLE__) if (size == 0) { return nullptr; } @@ -375,7 +375,7 @@ void* toku_xmalloc_aligned(size_t alignment, size_t size) // Fail with a resource_assert if the allocation fails (don't return an error code). // Requires: alignment is a power of two. { -#if __APPLE__ +#if defined(__APPLE__) if (size == 0) { return nullptr; } @@ -409,7 +409,7 @@ toku_xcalloc(size_t nmemb, size_t size) { void * toku_xrealloc(void *v, size_t size) { -#if __APPLE__ +#if defined(__APPLE__) if (size == 0) { if (v != nullptr) { toku_free(v); diff --git a/storage/tokudb/PerconaFT/portability/tests/test-max-data.cc b/storage/tokudb/PerconaFT/portability/tests/test-max-data.cc index dbbea974a49..fb5fc37111a 100644 --- a/storage/tokudb/PerconaFT/portability/tests/test-max-data.cc +++ b/storage/tokudb/PerconaFT/portability/tests/test-max-data.cc @@ -64,7 +64,7 @@ int main(int argc, char *const argv[]) { if (verbose) printf("maxdata=%" PRIu64 " 0x%" PRIx64 "\n", maxdata, maxdata); // check the data size -#if defined(__x86_64__) || defined(__aarch64__) +#if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) assert(maxdata > (1ULL << 32)); #elif __i386__ assert(maxdata < (1ULL << 32)); diff --git a/storage/tokudb/PerconaFT/portability/toku_assert.h b/storage/tokudb/PerconaFT/portability/toku_assert.h index b0a7be3287b..e8bcf5a395f 100644 --- a/storage/tokudb/PerconaFT/portability/toku_assert.h +++ b/storage/tokudb/PerconaFT/portability/toku_assert.h @@ -126,7 +126,7 @@ void db_env_do_backtrace(FILE *outf); #define resource_assert_zero(a) assert_zero(a) // indicates resource must be available, otherwise unrecoverable #define resource_assert_equals(a, b) assert_equals(a, b) // indicates resource must be available, otherwise unrecoverable -#if TOKU_DEBUG_PARANOID +#if defined(TOKU_DEBUG_PARANOID) #define paranoid_invariant(a) assert(a) #define paranoid_invariant_null(a) assert_null(a) #define paranoid_invariant_notnull(a) assert(a) diff --git a/storage/tokudb/PerconaFT/portability/toku_instrumentation.h b/storage/tokudb/PerconaFT/portability/toku_instrumentation.h index 8c9390edc0a..c300f9275b8 100644 --- a/storage/tokudb/PerconaFT/portability/toku_instrumentation.h +++ b/storage/tokudb/PerconaFT/portability/toku_instrumentation.h @@ -52,6 +52,8 @@ class toku_instr_key { UU(const char *name)) {} explicit toku_instr_key(UU(pfs_key_t key_id)) {} + + ~toku_instr_key() {} }; typedef toku_instr_probe_empty toku_instr_probe; diff --git a/storage/tokudb/PerconaFT/portability/toku_portability.h b/storage/tokudb/PerconaFT/portability/toku_portability.h index e459cfb8779..9c5bf891eba 100644 --- a/storage/tokudb/PerconaFT/portability/toku_portability.h +++ b/storage/tokudb/PerconaFT/portability/toku_portability.h @@ -71,7 +71,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #include #include -#if __FreeBSD__ +#if defined(__FreeBSD__) #include #endif @@ -159,7 +159,7 @@ extern "C" { #endif // Deprecated functions. -#if !defined(TOKU_ALLOW_DEPRECATED) +#if !defined(TOKU_ALLOW_DEPRECATED) && !defined(__clang__) int creat(const char *pathname, mode_t mode) __attribute__((__deprecated__)); int fstat(int fd, struct stat *buf) __attribute__((__deprecated__)); int stat(const char *path, struct stat *buf) __attribute__((__deprecated__)); diff --git a/storage/tokudb/PerconaFT/portability/toku_pthread.h b/storage/tokudb/PerconaFT/portability/toku_pthread.h index 44de01244d2..e3bd3bce598 100644 --- a/storage/tokudb/PerconaFT/portability/toku_pthread.h +++ b/storage/tokudb/PerconaFT/portability/toku_pthread.h @@ -168,11 +168,7 @@ typedef struct toku_mutex_aligned { } #else // __linux__, at least #define ZERO_COND_INITIALIZER \ - { \ - { \ - { 0 } \ - } \ - } + {} #endif static inline void toku_mutexattr_init(toku_pthread_mutexattr_t *attr) { diff --git a/storage/tokudb/PerconaFT/portability/toku_race_tools.h b/storage/tokudb/PerconaFT/portability/toku_race_tools.h index 9ed46ec909d..96712ffffdc 100644 --- a/storage/tokudb/PerconaFT/portability/toku_race_tools.h +++ b/storage/tokudb/PerconaFT/portability/toku_race_tools.h @@ -95,8 +95,8 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. # define TOKU_ANNOTATE_IGNORE_WRITES_BEGIN() ((void) 0) # define TOKU_ANNOTATE_IGNORE_WRITES_END() ((void) 0) # define TOKU_VALGRIND_RESET_MUTEX_ORDERING_INFO(mutex) +#undef RUNNING_ON_VALGRIND # define RUNNING_ON_VALGRIND (0U) - #endif // Valgrind 3.10.1 (and previous versions). diff --git a/storage/tokudb/PerconaFT/portability/toku_time.h b/storage/tokudb/PerconaFT/portability/toku_time.h index a1278ef0337..c4c45b8e8c7 100644 --- a/storage/tokudb/PerconaFT/portability/toku_time.h +++ b/storage/tokudb/PerconaFT/portability/toku_time.h @@ -43,6 +43,9 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #include #include #include +#if defined(__powerpc__) +# include +#endif static inline float toku_tdiff (struct timeval *a, struct timeval *b) { return (float)((a->tv_sec - b->tv_sec) + 1e-6 * (a->tv_usec - b->tv_usec)); @@ -106,6 +109,8 @@ static inline tokutime_t toku_time_now(void) { uint64_t result; __asm __volatile__ ("mrs %[rt], cntvct_el0" : [rt] "=r" (result)); return result; +#elif defined(__powerpc__) + return __ppc_get_timebase(); #else #error No timer implementation for this platform #endif diff --git a/storage/tokudb/PerconaFT/src/tests/checkpoint_stress.cc b/storage/tokudb/PerconaFT/src/tests/checkpoint_stress.cc index 135a9843ce4..d3e5ddd5031 100644 --- a/storage/tokudb/PerconaFT/src/tests/checkpoint_stress.cc +++ b/storage/tokudb/PerconaFT/src/tests/checkpoint_stress.cc @@ -351,7 +351,7 @@ test_main (int argc, char * const argv[]) { // arg that suppresses valgrind on this child process break; } - // otherwise, fall through to an error + /* fall through */ // otherwise, fall through to an error case 'h': case '?': usage(argv[0]); diff --git a/storage/tokudb/PerconaFT/src/tests/directory_lock.cc b/storage/tokudb/PerconaFT/src/tests/directory_lock.cc index f040e680903..b28a71704cf 100644 --- a/storage/tokudb/PerconaFT/src/tests/directory_lock.cc +++ b/storage/tokudb/PerconaFT/src/tests/directory_lock.cc @@ -69,7 +69,7 @@ static void verify_shared_ops_fail(DB_ENV* env, DB* db) { uint32_t flags = 0; DBT key,val; DBT in_key,in_val; - uint32_t in_key_data, in_val_data = 0; + uint32_t in_key_data = 0, in_val_data = 0; memset(&in_key, 0, sizeof(in_key)); memset(&in_val, 0, sizeof(in_val)); in_key.size = sizeof(in_key_data); diff --git a/storage/tokudb/PerconaFT/src/tests/loader-cleanup-test.cc b/storage/tokudb/PerconaFT/src/tests/loader-cleanup-test.cc index ea894683c23..a229cb5b565 100644 --- a/storage/tokudb/PerconaFT/src/tests/loader-cleanup-test.cc +++ b/storage/tokudb/PerconaFT/src/tests/loader-cleanup-test.cc @@ -172,12 +172,12 @@ err_type_str (enum test_type t) { case einval_o: return "open"; case enospc_fc: return "fclose"; case abort_via_poll: return "abort_via_poll"; - case commit: assert(0); - case abort_txn: assert(0); - case abort_loader: assert(0); + case commit: abort(); + case abort_txn: abort(); + case abort_loader: abort(); } // I know that Barry prefers the single-return case, but writing the code this way means that the compiler will complain if I forget something in the enum. -Bradley - assert(0); + abort(); return NULL; } @@ -193,12 +193,12 @@ err_msg_type_str (enum test_type t) { case einval_o: return "EINVAL"; case enospc_fc: return "ENOSPC"; case abort_via_poll: return "non-zero"; - case commit: assert(0); - case abort_txn: assert(0); - case abort_loader: assert(0); + case commit: abort(); + case abort_txn: abort(); + case abort_loader: abort(); } // I know that Barry prefers the single-return case, but writing the code this way means that the compiler will complain if I forget something in the enum. -Bradley - assert(0); + abort(); return NULL; } @@ -873,7 +873,7 @@ static void run_test(enum test_type t, int trigger) case abort_via_poll: poll_count_trigger = trigger; break; default: - assert(0); + abort(); } diff --git a/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-abort.cc b/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-abort.cc index a8455c0f406..425c12e1a90 100644 --- a/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-abort.cc +++ b/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-abort.cc @@ -81,7 +81,7 @@ put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals memcpy(dest_key->data, &pri_data[dbnum], dest_key->size); break; default: - assert(0); + abort(); } if (dest_val) { @@ -95,9 +95,9 @@ put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals } break; case DB_DBT_REALLOC: - assert(0); + abort(); default: - assert(0); + abort(); } } diff --git a/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-srcdb-fdelete-all.cc b/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-srcdb-fdelete-all.cc index e823a74627d..75479cb69c4 100644 --- a/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-srcdb-fdelete-all.cc +++ b/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-srcdb-fdelete-all.cc @@ -85,7 +85,7 @@ put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals memcpy(dest_key->data, &pri_data[dbnum], dest_key->size); break; default: - assert(0); + abort(); } if (dest_val) { @@ -99,9 +99,9 @@ put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals } break; case DB_DBT_REALLOC: - assert(0); + abort(); default: - assert(0); + abort(); } } diff --git a/storage/tokudb/PerconaFT/src/tests/recover-del-multiple.cc b/storage/tokudb/PerconaFT/src/tests/recover-del-multiple.cc index c2ee80c438f..9f4b1cd9cb8 100644 --- a/storage/tokudb/PerconaFT/src/tests/recover-del-multiple.cc +++ b/storage/tokudb/PerconaFT/src/tests/recover-del-multiple.cc @@ -84,7 +84,7 @@ put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals memcpy(dest_key->data, &pri_data[dbnum], dest_key->size); break; default: - assert(0); + abort(); } if (dest_val) { @@ -98,9 +98,9 @@ put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals } break; case DB_DBT_REALLOC: - assert(0); + abort(); default: - assert(0); + abort(); } } diff --git a/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-abort.cc b/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-abort.cc index d045800960c..da40a61f24b 100644 --- a/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-abort.cc +++ b/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-abort.cc @@ -81,7 +81,7 @@ put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals memcpy(dest_key->data, &pri_data[dbnum], dest_key->size); break; default: - assert(0); + abort(); } if (dest_val) { @@ -95,9 +95,9 @@ put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals } break; case DB_DBT_REALLOC: - assert(0); + abort(); default: - assert(0); + abort(); } } diff --git a/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc b/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc index cc99ab560d8..45f0b465db4 100644 --- a/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc +++ b/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc @@ -158,7 +158,7 @@ do_args(int argc, char * const argv[]) { choices[i] = -1; } - char c; + signed char c; while ((c = getopt(argc, argv, "vqhcrO:A:B:C:D:E:F:G:H:I:J:X:")) != -1) { switch (c) { case 'v': @@ -217,7 +217,7 @@ do_args(int argc, char * const argv[]) { // arg that suppresses valgrind on this child process break; } - // otherwise, fall through to an error + /* fall through */ // otherwise, fall through to an error default: usage(); break; diff --git a/storage/tokudb/PerconaFT/src/tests/test-prepare3.cc b/storage/tokudb/PerconaFT/src/tests/test-prepare3.cc index 5cb3796a26b..f57fc963529 100644 --- a/storage/tokudb/PerconaFT/src/tests/test-prepare3.cc +++ b/storage/tokudb/PerconaFT/src/tests/test-prepare3.cc @@ -128,6 +128,7 @@ static void check_prepared_list (enum prepared_state ps[NTXNS], long count, DB_P goto next; case PREPARED: count_prepared++; + /* fall through */ case MAYBE_COMMITTED: case MAYBE_ABORTED: count_maybe_prepared++; diff --git a/storage/tokudb/PerconaFT/util/scoped_malloc.cc b/storage/tokudb/PerconaFT/util/scoped_malloc.cc index 55bbab39332..e13e1c2424a 100644 --- a/storage/tokudb/PerconaFT/util/scoped_malloc.cc +++ b/storage/tokudb/PerconaFT/util/scoped_malloc.cc @@ -94,7 +94,7 @@ namespace toku { } void destroy() { -#if TOKU_SCOPED_MALLOC_DEBUG +#ifdef TOKU_SCOPED_MALLOC_DEBUG printf("%s %p %p\n", __FUNCTION__, this, m_stack); #endif if (m_stack != NULL) { diff --git a/storage/tokudb/ha_tokudb.h b/storage/tokudb/ha_tokudb.h index c80be207005..b3b86e8b528 100644 --- a/storage/tokudb/ha_tokudb.h +++ b/storage/tokudb/ha_tokudb.h @@ -941,7 +941,7 @@ public: public: #endif -#if TOKU_INCLUDE_ALTER_55 +#if defined(TOKU_INCLUDE_ALTER_55) public: // Returns true of the 5.6 inplace alter table interface is used. bool try_hot_alter_table(); @@ -1037,7 +1037,7 @@ private: #if TOKU_INCLUDE_WRITE_FRM_DATA int write_frm_data(const uchar *frm_data, size_t frm_len); #endif -#if TOKU_INCLUDE_UPSERT +#if defined(TOKU_INCLUDE_UPSERT) private: int fast_update(THD *thd, List &update_fields, List &update_values, Item *conds); bool check_fast_update(THD *thd, List &update_fields, List &update_values, Item *conds); diff --git a/storage/tokudb/ha_tokudb_alter_55.cc b/storage/tokudb/ha_tokudb_alter_55.cc index 3e6a38c97de..ac473f3cbd8 100644 --- a/storage/tokudb/ha_tokudb_alter_55.cc +++ b/storage/tokudb/ha_tokudb_alter_55.cc @@ -23,7 +23,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." -#if TOKU_INCLUDE_ALTER_55 +#if defined(TOKU_INCLUDE_ALTER_55) #include "ha_tokudb_alter_common.cc" diff --git a/storage/tokudb/ha_tokudb_alter_56.cc b/storage/tokudb/ha_tokudb_alter_56.cc index c2d07110ecb..fe6ad1b45f5 100644 --- a/storage/tokudb/ha_tokudb_alter_56.cc +++ b/storage/tokudb/ha_tokudb_alter_56.cc @@ -257,13 +257,13 @@ static bool only_flags(ulong bits, ulong mask) { // HA_ALTER_INPLACE_EXCLUSIVE_LOCK: prepare and alter runs with MDL X -// HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE: prepare runs with MDL X, +// HA_ALTER_INPLACE_COPY_LOCK: prepare runs with MDL X, // alter runs with MDL SNW // HA_ALTER_INPLACE_SHARED_LOCK: prepare and alter methods called with MDL SNW, // concurrent reads, no writes -// HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE: prepare runs with MDL X, +// HA_ALTER_INPLACE_COPY_NO_LOCK: prepare runs with MDL X, // alter runs with MDL SW // HA_ALTER_INPLACE_NO_LOCK: prepare and alter methods called with MDL SW, @@ -319,7 +319,9 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter( // we grab an exclusive MDL for the drop index. result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK; } else { - result = HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE; + /* FIXME: MDEV-16099 Use alter algorithm=nocopy + or algorithm=instant for non-InnoDB engine */ + result = HA_ALTER_INPLACE_COPY_LOCK; // someday, allow multiple hot indexes via alter table add key. // don't forget to change the store_lock function. @@ -336,7 +338,9 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter( tokudb::sysvars::create_index_online(thd)) { // external_lock set WRITE_ALLOW_WRITE which allows writes // concurrent with the index creation - result = HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE; + /* FIXME: MDEV-16099 Use alter algorithm=nocopy + or algorithm=instant for non-InnoDB engine */ + result = HA_ALTER_INPLACE_COPY_NO_LOCK; } } } @@ -509,7 +513,9 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter( ALTER_RECREATE_TABLE | ALTER_COLUMN_DEFAULT)) { ctx->optimize_needed = true; - result = HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE; + /* FIXME: MDEV-16099 Use alter algorithm=nocopy + or algorithm=instant for non-InnoDB engine */ + result = HA_ALTER_INPLACE_COPY_NO_LOCK; } #endif diff --git a/storage/tokudb/ha_tokudb_update.cc b/storage/tokudb/ha_tokudb_update.cc index 2e56d4c6698..105c12a569d 100644 --- a/storage/tokudb/ha_tokudb_update.cc +++ b/storage/tokudb/ha_tokudb_update.cc @@ -23,7 +23,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." -#if TOKU_INCLUDE_UPSERT +#if defined(TOKU_INCLUDE_UPSERT) // Point updates and upserts diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc index cee8575eebb..437c71f69dd 100644 --- a/storage/tokudb/hatoku_hton.cc +++ b/storage/tokudb/hatoku_hton.cc @@ -62,7 +62,7 @@ static bool tokudb_show_status( THD* thd, stat_print_fn* print, enum ha_stat_type); -#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL +#if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) static void tokudb_handle_fatal_signal(handlerton* hton, THD* thd, int sig); #endif static int tokudb_close_connection(handlerton* hton, THD* thd); @@ -393,7 +393,7 @@ static int tokudb_init_func(void *p) { tokudb_hton->panic = tokudb_end; tokudb_hton->flush_logs = tokudb_flush_logs; tokudb_hton->show_status = tokudb_show_status; -#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL +#if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) tokudb_hton->handle_fatal_signal = tokudb_handle_fatal_signal; #endif @@ -978,7 +978,7 @@ static bool tokudb_sync_on_prepare(void) { } static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) { - TOKUDB_DBUG_ENTER(""); + TOKUDB_DBUG_ENTER("%u", all); TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter"); int r = 0; @@ -1006,6 +1006,22 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) { r = txn->xa_prepare(txn, &thd_xid, syncflag); // test hook to induce a crash on a debug build DBUG_EXECUTE_IF("tokudb_crash_prepare_after", DBUG_SUICIDE();); + + // XA log entries can be interleaved in the binlog since XA prepare on the master + // flushes to the binlog. There can be log entries from different clients pushed + // into the binlog before XA commit is executed on the master. Therefore, the slave + // thread must be able to juggle multiple XA transactions. Tokudb does this by + // zapping the client transaction context on the slave when executing the XA prepare + // and expecting to process XA commit with commit_by_xid (which supplies the XID so + // that the transaction can be looked up and committed). + if (r == 0 && all && thd->slave_thread) { + TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "zap txn context %u", thd_sql_command(thd)); + if (thd_sql_command(thd) == SQLCOM_XA_PREPARE) { + trx->all = NULL; + trx->sub_sp_level = NULL; + trx->sp_level = NULL; + } + } } else { TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "nothing to prepare %d", all); } @@ -1036,6 +1052,7 @@ static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) { static int tokudb_commit_by_xid(handlerton* hton, XID* xid) { TOKUDB_DBUG_ENTER(""); TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter"); + TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "xid %p", xid); int r = 0; DB_TXN* txn = NULL; TOKU_XA_XID* toku_xid = (TOKU_XA_XID*)xid; @@ -1055,6 +1072,7 @@ cleanup: static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) { TOKUDB_DBUG_ENTER(""); TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter"); + TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "xid %p", xid); int r = 0; DB_TXN* txn = NULL; TOKU_XA_XID* toku_xid = (TOKU_XA_XID*)xid; @@ -1517,7 +1535,7 @@ static bool tokudb_show_status( return false; } -#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL +#if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) static void tokudb_handle_fatal_signal( TOKUDB_UNUSED(handlerton* hton), TOKUDB_UNUSD(THD* thd), @@ -1599,7 +1617,7 @@ struct st_mysql_storage_engine tokudb_storage_engine = { MYSQL_HANDLERTON_INTERFACE_VERSION }; -#if TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING +#if defined(TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING) struct tokudb_search_txn_extra { bool match_found; uint64_t match_txn_id; @@ -1768,7 +1786,7 @@ static void tokudb_lock_timeout_callback( mysql_thread_id, (int)qs->length, qs->str); -#if TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING +#if defined(TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING) uint64_t blocking_thread_id = 0; if (tokudb_txn_id_to_client_id( thd, diff --git a/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result b/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result index 74148bd4e74..992f380591f 100644 --- a/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result +++ b/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result @@ -6,6 +6,7 @@ CREATE DATABASE new_db; CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb; ALTER TABLE test.t1 RENAME new_db.t1; The content of "test" directory: +db.opt The content of "new_db" directory: db.opt t1.frm diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_sync_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_sync_tokudb.result index 7186da6f69a..12dfab60b66 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_sync_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_sync_tokudb.result @@ -58,6 +58,7 @@ t1 CREATE TABLE `t1` ( ) ENGINE=TokuDB DEFAULT CHARSET=latin1 STATS_PERSISTENT=0 PARTITION BY RANGE (`a`) (PARTITION `p0` VALUES LESS THAN MAXVALUE ENGINE = TokuDB) +db.opt t1.frm t1.par SET DEBUG_SYNC='before_open_in_get_all_tables SIGNAL parked WAIT_FOR open'; @@ -83,6 +84,7 @@ connection default; TABLE_SCHEMA TABLE_NAME PARTITION_NAME PARTITION_ORDINAL_POSITION PARTITION_DESCRIPTION TABLE_ROWS test t1 p0 1 10 1 test t1 p10 2 MAXVALUE 3 +db.opt t1.frm t1.par SHOW CREATE TABLE t1; @@ -100,4 +102,5 @@ a 21 33 drop table t1; +db.opt SET DEBUG_SYNC = 'RESET'; diff --git a/storage/tokudb/tokudb.cnf b/storage/tokudb/tokudb.cnf.in similarity index 62% rename from storage/tokudb/tokudb.cnf rename to storage/tokudb/tokudb.cnf.in index 4def635ddf0..de9b5b711ee 100644 --- a/storage/tokudb/tokudb.cnf +++ b/storage/tokudb/tokudb.cnf.in @@ -4,3 +4,6 @@ plugin-load-add=ha_tokudb.so +[mysqld_safe] +# it might be necessary to uncomment the following line if jemalloc >= 5.0.0 +@cnf_malloc_lib@ diff --git a/storage/tokudb/tokudb.conf.in b/storage/tokudb/tokudb.conf.in new file mode 100644 index 00000000000..d22f6686d91 --- /dev/null +++ b/storage/tokudb/tokudb.conf.in @@ -0,0 +1,3 @@ +[Service] +# it might be necessary to uncomment the following line if jemalloc >= 5.0.0 +@systemd_env@ diff --git a/storage/tokudb/tokudb_sysvars.cc b/storage/tokudb/tokudb_sysvars.cc index bbc39dc550a..58fe5a47133 100644 --- a/storage/tokudb/tokudb_sysvars.cc +++ b/storage/tokudb/tokudb_sysvars.cc @@ -74,7 +74,7 @@ my_bool dir_per_db = FALSE; char* version = (char*) TOKUDB_VERSION_STR; // file system reserve as a percentage of total disk space -#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL +#if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) char* gdb_path = NULL; my_bool gdb_on_fatal = FALSE; #endif @@ -411,7 +411,7 @@ static MYSQL_SYSVAR_BOOL(dir_per_db, dir_per_db, 0, "TokuDB store ft files in db directories", NULL, tokudb_dir_per_db_update, FALSE); -#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL +#if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) static MYSQL_SYSVAR_STR( gdb_path, gdb_path, @@ -894,7 +894,7 @@ static MYSQL_THDVAR_ULONGLONG( ~0ULL, 1); -#if TOKU_INCLUDE_UPSERT +#if defined(TOKU_INCLUDE_UPSERT) static MYSQL_THDVAR_BOOL( disable_slow_update, PLUGIN_VAR_THDLOCAL, @@ -951,7 +951,7 @@ st_mysql_sys_var* system_variables[] = { MYSQL_SYSVAR(version), MYSQL_SYSVAR(write_status_frequency), MYSQL_SYSVAR(dir_per_db), -#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL +#if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) MYSQL_SYSVAR(gdb_path), MYSQL_SYSVAR(gdb_on_fatal), #endif @@ -997,7 +997,7 @@ st_mysql_sys_var* system_variables[] = { MYSQL_SYSVAR(rpl_unique_checks), MYSQL_SYSVAR(rpl_unique_checks_delay), -#if TOKU_INCLUDE_UPSERT +#if defined(TOKU_INCLUDE_UPSERT) MYSQL_SYSVAR(disable_slow_update), MYSQL_SYSVAR(disable_slow_upsert), #endif @@ -1055,7 +1055,7 @@ my_bool disable_prefetching(THD* thd) { my_bool disable_slow_alter(THD* thd) { return (THDVAR(thd, disable_slow_alter) != 0); } -#if TOKU_INCLUDE_UPSERT +#if defined(TOKU_INCLUDE_UPSERT) my_bool disable_slow_update(THD* thd) { return (THDVAR(thd, disable_slow_update) != 0); } diff --git a/storage/tokudb/tokudb_sysvars.h b/storage/tokudb/tokudb_sysvars.h index 7701f211729..52489cd82ff 100644 --- a/storage/tokudb/tokudb_sysvars.h +++ b/storage/tokudb/tokudb_sysvars.h @@ -104,7 +104,7 @@ extern uint write_status_frequency; extern my_bool dir_per_db; extern char* version; -#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL +#if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) extern char* gdb_path; extern my_bool gdb_on_fatal; #endif diff --git a/strings/decimal.c b/strings/decimal.c index c64fe189565..325615147c8 100644 --- a/strings/decimal.c +++ b/strings/decimal.c @@ -2248,7 +2248,7 @@ static int do_div_mod(const decimal_t *from1, const decimal_t *from2, */ norm_factor=DIG_BASE/(*start2+1); norm2=(dec1)(norm_factor*start2[0]); - if (likely(len2>0)) + if (unlikely(len2>0)) norm2+=(dec1)(norm_factor*start2[1]/DIG_BASE); if (*start1 < *start2) @@ -2270,7 +2270,7 @@ static int do_div_mod(const decimal_t *from1, const decimal_t *from2, guess=(norm_factor*x+norm_factor*y/DIG_BASE)/norm2; if (unlikely(guess >= DIG_BASE)) guess=DIG_BASE-1; - if (likely(len2>0)) + if (unlikely(len2>0)) { /* hmm, this is a suspicious trick - I removed normalization here */ if (start2[1]*guess > (x-guess*start2[0])*DIG_BASE+y) diff --git a/tests/mysql_client_fw.c b/tests/mysql_client_fw.c index 73dc443f782..2a529c12c63 100644 --- a/tests/mysql_client_fw.c +++ b/tests/mysql_client_fw.c @@ -768,7 +768,7 @@ static void do_verify_prepare_field(MYSQL_RES *result, { MYSQL_FIELD *field; CHARSET_INFO *cs; - ulonglong expected_field_length; + ulonglong expected_field_length= length; if (!(field= mysql_fetch_field_direct(result, no))) { @@ -777,7 +777,7 @@ static void do_verify_prepare_field(MYSQL_RES *result, } cs= get_charset(field->charsetnr, 0); DIE_UNLESS(cs); - if ((expected_field_length= length * cs->mbmaxlen) > UINT_MAX32) + if ((expected_field_length*= cs->mbmaxlen) > UINT_MAX32) expected_field_length= UINT_MAX32; if (!opt_silent) { diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index e90453411cb..0abeab8de1c 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -467,7 +467,7 @@ static void test_prepare_simple() strmov(query, "SHOW SLAVE STATUS"); stmt= mysql_simple_prepare(mysql, query); check_stmt(stmt); - DIE_UNLESS(mysql_stmt_field_count(stmt) == 50); + DIE_UNLESS(mysql_stmt_field_count(stmt) == 53); mysql_stmt_close(stmt); /* show master status */ @@ -1056,6 +1056,7 @@ static void test_wl4435_2() rc= mysql_query(mysql, "DROP PROCEDURE p1"); myquery(rc); } + mct_close_log(); } @@ -20236,8 +20237,383 @@ static void test_proxy_header() test_proxy_header_ignore(); } + +static void test_bulk_autoinc() +{ + int rc; + MYSQL_STMT *stmt; + MYSQL_BIND bind[1]; + MYSQL_ROW row; + char indicator[]= {0, STMT_INDICATOR_NULL, 0/*STMT_INDICATOR_IGNORE*/}; + my_bool error[1]; + int i, id[]= {2, 3, 777}, count= sizeof(id)/sizeof(id[0]); + MYSQL_RES *result; + + rc= mysql_query(mysql, "DROP TABLE IF EXISTS ai_field_value"); + myquery(rc); + rc= mysql_query(mysql, "CREATE TABLE ai_field_value (id int not null primary key auto_increment)"); + myquery(rc); + stmt= mysql_stmt_init(mysql); + rc= mysql_stmt_prepare(stmt, "INSERT INTO ai_field_value(id) values(?)", -1); + check_execute(stmt, rc); + + memset(bind, 0, sizeof(bind)); + bind[0].buffer_type = MYSQL_TYPE_LONG; + bind[0].buffer = (void *)id; + bind[0].buffer_length = 0; + bind[0].is_null = NULL; + bind[0].length = NULL; + bind[0].error = error; + bind[0].u.indicator= indicator; + + mysql_stmt_attr_set(stmt, STMT_ATTR_ARRAY_SIZE, (void*)&count); + rc= mysql_stmt_bind_param(stmt, bind); + check_execute(stmt, rc); + + rc= mysql_stmt_execute(stmt); + check_execute(stmt, rc); + + mysql_stmt_close(stmt); + + rc= mysql_query(mysql, "SELECT id FROM ai_field_value"); + myquery(rc); + + result= mysql_store_result(mysql); + mytest(result); + + i= 0; + while ((row= mysql_fetch_row(result))) + { + DIE_IF(atoi(row[0]) != id[i++]); + } + rc= mysql_query(mysql, "DROP TABLE ai_field_value"); + myquery(rc); +} + #endif + +static void print_metadata(MYSQL_RES *rs_metadata, int num_fields) +{ + int i; + MYSQL_FIELD *fields= mysql_fetch_fields(rs_metadata); + + for (i = 0; i < num_fields; ++i) + { + mct_log(" - %d: name: '%s'/'%s'; table: '%s'/'%s'; " + "db: '%s'; catalog: '%s'; length: %d; max_length: %d; " + "type: %d; decimals: %d\n", + (int) i, + (const char *) fields[i].name, + (const char *) fields[i].org_name, + (const char *) fields[i].table, + (const char *) fields[i].org_table, + (const char *) fields[i].db, + (const char *) fields[i].catalog, + (int) fields[i].length, + (int) fields[i].max_length, + (int) fields[i].type, + (int) fields[i].decimals); + + } +} + +static void test_explain_meta() +{ + MYSQL_STMT *stmt; + int num_fields; + char query[MAX_TEST_QUERY_LENGTH]; + MYSQL_RES *rs_metadata; + int rc; + + myheader("test_explain_meta"); + mct_start_logging("test_explain_meta"); + + strmov(query, "SELECT 1"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("SELECT number of fields: %d\n", (int) num_fields); + if (num_fields != 1) + { + mct_close_log(); + DIE("num_fields != 1"); + } + mysql_stmt_close(stmt); + + strmov(query, "EXPLAIN SELECT 1"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("EXPALIN number of fields: %d\n", (int) num_fields); + if (num_fields != 10) + { + mct_close_log(); + DIE("num_fields != 10"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + strmov(query, "EXPLAIN format=json SELECT 1"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("EXPALIN JSON number of fields: %d\n", (int) num_fields); + if (num_fields != 1) + { + mct_close_log(); + DIE("num_fields != 1"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + + strmov(query, "ANALYZE SELECT 1"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("ANALYZE number of fields: %d\n", (int) num_fields); + if (num_fields != 13) + { + mct_close_log(); + DIE("num_fields != 13"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + strmov(query, "ANALYZE format=json SELECT 1"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("ANALYZE JSON number of fields: %d\n", (int) num_fields); + if (num_fields != 1) + { + mct_close_log(); + DIE("num_fields != 1"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + rc= mysql_query(mysql, "CREATE TABLE t1 (a int)"); + myquery(rc); + + strmov(query, "EXPLAIN INSERT INTO t1 values (1)"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("EXPALIN INSERT number of fields: %d\n", (int) num_fields); + if (num_fields != 10) + { + mct_close_log(); + DIE("num_fields != 10"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + strmov(query, "EXPLAIN format=json INSERT INTO t1 values(1)"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("EXPALIN JSON INSERT number of fields: %d\n", (int) num_fields); + if (num_fields != 1) + { + mct_close_log(); + DIE("num_fields != 1"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + + strmov(query, "ANALYZE INSERT INTO t1 values(1)"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("ANALYZE INSERT number of fields: %d\n", (int) num_fields); + if (num_fields != 13) + { + mct_close_log(); + DIE("num_fields != 13"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + strmov(query, "ANALYZE format=json INSERT INTO t1 values(1)"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("ANALYZE JSON INSERT number of fields: %d\n", (int) num_fields); + if (num_fields != 1) + { + mct_close_log(); + DIE("num_fields != 1"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + + strmov(query, "EXPLAIN UPDATE t1 set a=2"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("EXPALIN UPDATE number of fields: %d\n", (int) num_fields); + if (num_fields != 10) + { + mct_close_log(); + DIE("num_fields != 10"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + strmov(query, "EXPLAIN format=json UPDATE t1 set a=2"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("EXPALIN JSON UPDATE number of fields: %d\n", (int) num_fields); + if (num_fields != 1) + { + mct_close_log(); + DIE("num_fields != 1"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + + strmov(query, "ANALYZE UPDATE t1 set a=2"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("ANALYZE UPDATE number of fields: %d\n", (int) num_fields); + if (num_fields != 13) + { + mct_close_log(); + DIE("num_fields != 13"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + strmov(query, "ANALYZE format=json UPDATE t1 set a=2"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("ANALYZE JSON UPDATE number of fields: %d\n", (int) num_fields); + if (num_fields != 1) + { + mct_close_log(); + DIE("num_fields != 1"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + + strmov(query, "EXPLAIN DELETE FROM t1"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("EXPALIN DELETE number of fields: %d\n", (int) num_fields); + if (num_fields != 10) + { + mct_close_log(); + DIE("num_fields != 10"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + strmov(query, "EXPLAIN format=json DELETE FROM t1"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("EXPALIN JSON DELETE number of fields: %d\n", (int) num_fields); + if (num_fields != 1) + { + mct_close_log(); + DIE("num_fields != 1"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + + strmov(query, "ANALYZE DELETE FROM t1"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("ANALYZE DELETE number of fields: %d\n", (int) num_fields); + if (num_fields != 13) + { + mct_close_log(); + DIE("num_fields != 13"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + strmov(query, "ANALYZE format=json DELETE FROM t1"); + stmt= mysql_simple_prepare(mysql, query); + check_stmt(stmt); + + rs_metadata= mysql_stmt_result_metadata(stmt); + + num_fields= mysql_stmt_field_count(stmt); + mct_log("ANALYZE JSON DELETE number of fields: %d\n", (int) num_fields); + if (num_fields != 1) + { + mct_close_log(); + DIE("num_fields != 1"); + } + print_metadata(rs_metadata, num_fields); + mysql_stmt_close(stmt); + + rc= mysql_query(mysql, "DROP TABLE t1"); + myquery(rc); + mct_close_log(); +} + static struct my_tests_st my_tests[]= { { "disable_query_logs", disable_query_logs }, { "test_view_sp_list_fields", test_view_sp_list_fields }, @@ -20246,7 +20622,7 @@ static struct my_tests_st my_tests[]= { #ifdef EMBEDDED_LIBRARY { "test_embedded_start_stop", test_embedded_start_stop }, #endif -#if NOT_YET_WORKING +#ifdef NOT_YET_WORKING { "test_drop_temp", test_drop_temp }, #endif { "test_fetch_seek", test_fetch_seek }, @@ -20523,7 +20899,9 @@ static struct my_tests_st my_tests[]= { { "test_mdev14454", test_mdev14454 }, #ifndef EMBEDDED_LIBRARY { "test_proxy_header", test_proxy_header}, + { "test_bulk_autoinc", test_bulk_autoinc}, #endif + { "test_explain_meta", test_explain_meta }, { 0, 0 } }; diff --git a/unittest/sql/mf_iocache-t.cc b/unittest/sql/mf_iocache-t.cc index f27e49d7ea3..dbb16e51c70 100644 --- a/unittest/sql/mf_iocache-t.cc +++ b/unittest/sql/mf_iocache-t.cc @@ -188,10 +188,76 @@ void mdev9044() close_cached_file(&info); } +/* 2 Reads (with my_b_fill) in cache makes second read to fail */ +void mdev10259() +{ + int res; + uchar buf[200]; + memset(buf, FILL, sizeof(buf)); + + diag("MDEV-10259- mysqld crash with certain statement length and order with" + " Galera and encrypt-tmp-files=1"); + + init_io_cache_encryption(); + + res= open_cached_file(&info, 0, 0, CACHE_SIZE, 0); + ok(res == 0, "open_cached_file" INFO_TAIL); + + res= my_b_write(&info, buf, sizeof(buf)); + ok(res == 0 && info.pos_in_file == 0, "200 write" INFO_TAIL); + + res= my_b_flush_io_cache(&info, 1); + ok(res == 0, "flush" INFO_TAIL); + + my_off_t saved_pos= my_b_tell(&info); + res= reinit_io_cache(&info, READ_CACHE, 0, 0, 0); + ok(res == 0, "reinit READ_CACHE" INFO_TAIL); + + size_t s= my_b_fill(&info); + ok(s == 200, "fill" INFO_TAIL); + + s= my_b_fill(&info); + ok(s == 0, "fill" INFO_TAIL); + + s= my_b_fill(&info); + ok(s == 0, "fill" INFO_TAIL); + + res= reinit_io_cache(&info, WRITE_CACHE, saved_pos, 0, 0); + ok(res == 0, "reinit WRITE_CACHE" INFO_TAIL); + + res= reinit_io_cache(&info, READ_CACHE, 0, 0, 0); + ok(res == 0, "reinit READ_CACHE" INFO_TAIL); + + ok(200 == my_b_bytes_in_cache(&info),"my_b_bytes_in_cache == 200"); + + s= my_b_fill(&info); + ok(s == 0, "fill" INFO_TAIL); + + s= my_b_fill(&info); + ok(s == 0, "fill" INFO_TAIL); + + s= my_b_fill(&info); + ok(s == 0, "fill" INFO_TAIL); + + res= reinit_io_cache(&info, WRITE_CACHE, saved_pos, 0, 0); + ok(res == 0, "reinit WRITE_CACHE" INFO_TAIL); + + res= reinit_io_cache(&info, READ_CACHE, 0, 0, 0); + ok(res == 0, "reinit READ_CACHE" INFO_TAIL); + + ok(200 == my_b_bytes_in_cache(&info),"my_b_bytes_in_cache == 200"); + + res= my_b_read(&info, buf, sizeof(buf)) || data_bad(buf, sizeof(buf)); + ok(res == 0 && info.pos_in_file == 0, "large read" INFO_TAIL); + + close_cached_file(&info); + +} + int main(int argc __attribute__((unused)),char *argv[]) { MY_INIT(argv[0]); - plan(29); + plan(46); /* temp files with and without encryption */ encrypt_tmp_files= 1; @@ -203,6 +269,10 @@ int main(int argc __attribute__((unused)),char *argv[]) /* regression tests */ mdev9044(); + encrypt_tmp_files= 1; + mdev10259(); + encrypt_tmp_files= 0; + my_end(0); return exit_status(); } diff --git a/win/upgrade_wizard/CMakeLists.txt b/win/upgrade_wizard/CMakeLists.txt index 2186a79f732..81c9c0d572c 100644 --- a/win/upgrade_wizard/CMakeLists.txt +++ b/win/upgrade_wizard/CMakeLists.txt @@ -11,6 +11,9 @@ IF(CMAKE_USING_VC_FREE_TOOLS) ENDIF() # We need MFC +# /permissive- flag does not play well with MFC, disable it. +STRING(REPLACE "/permissive-" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + FIND_PACKAGE(MFC) IF(NOT MFC_FOUND) IF(BUILD_RELEASE)