diff --git a/CMakeLists.txt b/CMakeLists.txt index 0251e06fb9f..560e7b87d33 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -341,7 +341,13 @@ SET(WITH_SAFEMALLOC "AUTO" CACHE STRING "Use safemalloc memory debugger. Will re IF(WITH_SAFEMALLOC MATCHES "ON") ADD_DEFINITIONS( -DSAFEMALLOC) -ELSEIF(WITH_SAFEMALLOC MATCHES "AUTO" AND NOT WIN32 AND NOT WITH_VALGRIND) +ELSEIF(WITH_SAFEMALLOC MATCHES "AUTO" + AND NOT WIN32 + AND NOT WITH_VALGRIND + AND NOT WITH_ASAN + AND NOT WITH_UBSAN + AND NOT WITH_TSAN + AND NOT WITH_MSAN) SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC") SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC") ENDIF() diff --git a/client/mysql.cc b/client/mysql.cc index e04276d02d6..194feb2e144 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -3212,6 +3212,26 @@ static int reconnect(void) return 0; } +#ifndef EMBEDDED_LIBRARY +static void status_info_cb(void *data, enum enum_mariadb_status_info type, ...) +{ + va_list ap; + va_start(ap, type); + if (type == SESSION_TRACK_TYPE && va_arg(ap, int) == SESSION_TRACK_SCHEMA) + { + MARIADB_CONST_STRING *val= va_arg(ap, MARIADB_CONST_STRING *); + my_free(current_db); + if (val->length) + current_db= my_strndup(PSI_NOT_INSTRUMENTED, val->str, val->length, MYF(MY_FAE)); + else + current_db= NULL; + } + va_end(ap); +} +#else +#define mysql_optionsv(A,B,C,D) do { } while(0) +#endif + static void get_current_db() { MYSQL_RES *res; @@ -5025,6 +5045,8 @@ sql_real_connect(char *host,char *database,char *user,char *password, mysql_close(&mysql); } mysql_init(&mysql); + if (!one_database) + mysql_optionsv(&mysql, MARIADB_OPT_STATUS_CALLBACK, status_info_cb, NULL); if (opt_init_command) mysql_options(&mysql, MYSQL_INIT_COMMAND, opt_init_command); if (opt_connect_timeout) diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index 193cceab2fb..7df590fc499 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -1033,9 +1033,16 @@ Exit_status process_event(PRINT_EVENT_INFO *print_event_info, Log_event *ev, binlog, even if they have a server_id. Also, we have to read the format_description event so that we can parse subsequent events. + Don't skip Unknown events either since we don't know their `server_id`s. */ - if (ev_type != ROTATE_EVENT && is_server_id_excluded(ev->server_id)) - goto end; + switch (ev_type) { + case ROTATE_EVENT: + case UNKNOWN_EVENT: + break; + default: + if (is_server_id_excluded(ev->server_id)) + goto end; + } } if ((stop_datetime_given && ev->when >= stop_datetime) || (pos >= stop_position_mot)) @@ -3400,7 +3407,8 @@ int main(int argc, char** argv) } /* - Emit a warning if we finished processing input before reaching the stop + Emit warning(s) (in Gtid_event_filter::verify_completed_state() for GTID(s)) + if we finished processing input before reaching the stop boundaries indicated by --stop-datetime or --stop-position. */ if (stop_datetime_given && stop_datetime > last_processed_ev.datetime) @@ -3410,6 +3418,8 @@ int main(int argc, char** argv) stop_position > last_processed_ev.position) warning("Did not reach stop position %llu before end of input", stop_position); + if (position_gtid_filter) + position_gtid_filter->verify_final_state(); /* If enable flashback, need to print the events from the end to the diff --git a/client/mysqldump.cc b/client/mysqldump.cc index 4327c4328b9..9b85111fc22 100644 --- a/client/mysqldump.cc +++ b/client/mysqldump.cc @@ -184,7 +184,7 @@ static DYNAMIC_STRING extended_row; static DYNAMIC_STRING dynamic_where; static MYSQL_RES *get_table_name_result= NULL; static MEM_ROOT glob_root; -static MYSQL_RES *routine_res, *routine_list_res; +static MYSQL_RES *routine_res, *routine_list_res, *slave_status_res= NULL; #include @@ -1996,6 +1996,8 @@ static void free_resources() mysql_free_result(routine_res); if (routine_list_res) mysql_free_result(routine_list_res); + if (slave_status_res) + mysql_free_result(slave_status_res); if (mysql) { mysql_close(mysql); @@ -6432,17 +6434,19 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos, static int do_stop_slave_sql(MYSQL *mysql_con) { - MYSQL_RES *slave; MYSQL_ROW row; + DBUG_ASSERT( + !slave_status_res // do_stop_slave_sql() should only be called once + ); - if (mysql_query_with_error_report(mysql_con, &slave, + if (mysql_query_with_error_report(mysql_con, &slave_status_res, multi_source ? "SHOW ALL SLAVES STATUS" : "SHOW SLAVE STATUS")) return(1); /* Loop over all slaves */ - while ((row= mysql_fetch_row(slave))) + while ((row= mysql_fetch_row(slave_status_res))) { if (row[11 + multi_source]) { @@ -6457,13 +6461,11 @@ static int do_stop_slave_sql(MYSQL *mysql_con) if (mysql_query_with_error_report(mysql_con, 0, query)) { - mysql_free_result(slave); return 1; } } } } - mysql_free_result(slave); return(0); } @@ -6587,32 +6589,35 @@ static int do_show_slave_status(MYSQL *mysql_con, int have_mariadb_gtid, static int do_start_slave_sql(MYSQL *mysql_con) { - MYSQL_RES *slave; MYSQL_ROW row; int error= 0; DBUG_ENTER("do_start_slave_sql"); - /* We need to check if the slave sql is stopped in the first place */ - if (mysql_query_with_error_report(mysql_con, &slave, - multi_source ? - "SHOW ALL SLAVES STATUS" : - "SHOW SLAVE STATUS")) - DBUG_RETURN(1); + /* + do_start_slave_sql() should normally be called + sometime after do_stop_slave_sql() succeeds + */ + if (!slave_status_res) + DBUG_RETURN(error); + mysql_data_seek(slave_status_res, 0); - while ((row= mysql_fetch_row(slave))) + while ((row= mysql_fetch_row(slave_status_res))) { DBUG_PRINT("info", ("Connection: '%s' status: '%s'", multi_source ? row[0] : "", row[11 + multi_source])); if (row[11 + multi_source]) { - /* if SLAVE SQL is not running, we don't start it */ - if (strcmp(row[11 + multi_source], "Yes")) + /* + If SLAVE_SQL was not running but is now, + we start it anyway to warn the unexpected state change. + */ + if (strcmp(row[11 + multi_source], "No")) { char query[160]; if (multi_source) - sprintf(query, "START SLAVE '%.80s'", row[0]); + sprintf(query, "START SLAVE '%.80s' SQL_THREAD", row[0]); else - strmov(query, "START SLAVE"); + strmov(query, "START SLAVE SQL_THREAD"); if (mysql_query_with_error_report(mysql_con, 0, query)) { @@ -6623,7 +6628,6 @@ static int do_start_slave_sql(MYSQL *mysql_con) } } } - mysql_free_result(slave); DBUG_RETURN(error); } diff --git a/cmake/aws_sdk.cmake b/cmake/aws_sdk.cmake index a0d46e1a892..1fed40cdbac 100644 --- a/cmake/aws_sdk.cmake +++ b/cmake/aws_sdk.cmake @@ -4,19 +4,27 @@ MACRO (SKIP_AWS_SDK MSG) RETURN() ENDMACRO() -FUNCTION (CHECK_AWS_SDK RETVAL REASON) +FUNCTION (CHECK_AWS_SDK COMPONENTS RETVAL REASON) # AWS_SDK_EXTERNAL_PROJECT must be ON IF(NOT AWS_SDK_EXTERNAL_PROJECT) - SKIP_AWS_SDK("AWS_SDK_EXTERNAL_PROJECT is not ON") + FOREACH(comp ${COMPONENTS}) + FIND_PACKAGE(aws-cpp-sdk-${comp} CONFIG QUIET) + IF (NOT aws-cpp-sdk-${comp}_FOUND) + SKIP_AWS_SDK("AWS_SDK_EXTERNAL_PROJECT is not ON and aws-cpp-sdk-${comp} not found") + ENDIF() + ENDFOREACH() + SET(${RETVAL} ON PARENT_SCOPE) ENDIF() IF(NOT NOT_FOR_DISTRIBUTION) SKIP_AWS_SDK("AWS SDK has Apache 2.0 License which is not compatible with GPLv2. Set -DNOT_FOR_DISTRIBUTION=ON if you need it") ENDIF() + IF(CMAKE_VERSION VERSION_LESS "3.15") + SKIP_AWS_SDK("CMake too old") + ENDIF() # Check compiler support IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU") - EXECUTE_PROCESS(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) - IF (GCC_VERSION VERSION_LESS 4.8) - SKIP_AWS_SDK("GCC VERSION too old (${GCC_VERSION}, required is 4.8 or later") + IF (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) + SKIP_AWS_SDK("GCC VERSION too old (${GCC_VERSION}, required is 4.9 or later") ENDIF() ELSEIF (CMAKE_CXX_COMPILER_ID MATCHES "Clang") IF ((CMAKE_CXX_COMPILER_VERSION AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.3) OR @@ -36,35 +44,27 @@ FUNCTION (CHECK_AWS_SDK RETVAL REASON) SKIP_AWS_SDK("OS unsupported by AWS SDK") ENDIF() - # Build from source, using ExternalProject_Add - # AWS C++ SDK requires cmake 2.8.12 - IF(CMAKE_VERSION VERSION_LESS "2.8.12") - SKIP_AWS_SDK("CMake is too old") - ENDIF() - IF(UNIX) - # Check librairies required for building SDK - FIND_PACKAGE(CURL) - SET_PACKAGE_PROPERTIES(CURL PROPERTIES TYPE REQUIRED) - IF(NOT CURL_FOUND) - SKIP_AWS_SDK("AWS C++ SDK requires libcurl development package") + IF("${WITH_ZLIB}" STREQUAL "bundled") + # Breaks FIND_PACKAGE(ZLIB) + SKIP_AWS_SDK("Incompatible with WITH_ZLIB=bundled") ENDIF() - FIND_PATH(UUID_INCLUDE_DIR uuid/uuid.h) - IF(NOT UUID_INCLUDE_DIR) - SKIP_AWS_SDK("AWS C++ SDK requires uuid development package") - ENDIF() - IF(NOT APPLE) - FIND_LIBRARY(UUID_LIBRARIES uuid) - SET_PACKAGE_PROPERTIES(UUID_LIBRARIES PROPERTIES TYPE REQUIRED) - IF(NOT UUID_LIBRARIES) - SKIP_AWS_SDK("AWS C++ SDK requires uuid development package") + # Check libraries required for building SDK + FOREACH(pkg CURL ZLIB OpenSSL) + FIND_PACKAGE(${pkg}) + IF(NOT ${pkg}_FOUND) + SKIP_AWS_SDK("AWS C++ SDK requires ${pkg}") ENDIF() - FIND_PACKAGE(OpenSSL) - SET_PACKAGE_PROPERTIES(OpenSSL PROPERTIES TYPE REQUIRED) - IF(NOT OPENSSL_FOUND) - SKIP_AWS_SDK("AWS C++ SDK requires openssl development package") + SET_PACKAGE_PROPERTIES(${pkg} PROPERTIES TYPE REQUIRED) + ENDFOREACH() + # Also check for required libraries explicitely - they might be + # missing, even if check above succeeds, e.g when using own copy + # of zlib + FOREACH(lib OpenSSL::Crypto ZLIB::ZLIB CURL::libcurl) + IF(NOT TARGET ${lib}) + SKIP_AWS_SDK("AWS C++ SDK requires ${lib}") ENDIF() - ENDIF() + ENDFOREACH() ENDIF() SET(${RETVAL} ON PARENT_SCOPE) ENDFUNCTION() @@ -85,14 +85,4 @@ FUNCTION(USE_AWS_SDK_LIBS) SET_PROPERTY(GLOBAL PROPERTY AWS_SDK_LIBS ${comp} APPEND) TARGET_LINK_LIBRARIES(${target} aws-cpp-sdk-${comp}) ENDFOREACH() - TARGET_LINK_LIBRARIES(${target} aws-cpp-sdk-core) - TARGET_INCLUDE_DIRECTORIES(${target} PRIVATE ${PROJECT_BINARY_DIR}/extra/aws_sdk/aws_sdk_cpp/include) - # Link OS libraries that AWS SDK depends on - IF(WIN32) - TARGET_LINK_LIBRARIES(${target} bcrypt winhttp wininet userenv version) - ELSE() - FIND_PACKAGE(CURL REQUIRED) - FIND_PACKAGE(OpenSSL REQUIRED) - TARGET_LINK_LIBRARIES(${target} ${OPENSSL_LIBRARIES} ${CURL_LIBRARIES} ${UUID_LIBRARIES}) - ENDIF() ENDFUNCTION() diff --git a/cmake/build_configurations/mysql_release.cmake b/cmake/build_configurations/mysql_release.cmake index e43a9ea3e9f..6b7b4d698b1 100644 --- a/cmake/build_configurations/mysql_release.cmake +++ b/cmake/build_configurations/mysql_release.cmake @@ -120,7 +120,10 @@ ELSEIF(DEB) SET(WITH_ZLIB system CACHE STRING "") SET(WITH_LIBWRAP ON) SET(HAVE_EMBEDDED_PRIVILEGE_CONTROL ON) - SET(PLUGIN_AUTH_SOCKET YES CACHE STRING "") + # No hurd implementation + IF(NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "i686-AT386") + SET(PLUGIN_AUTH_SOCKET YES CACHE STRING "") + ENDIF() SET(WITH_EMBEDDED_SERVER ON CACHE BOOL "") SET(WITH_PCRE system CACHE STRING "") SET(CLIENT_PLUGIN_ZSTD OFF) diff --git a/cmake/install_macros.cmake b/cmake/install_macros.cmake index 4a4e3dae9b6..cad94a64193 100644 --- a/cmake/install_macros.cmake +++ b/cmake/install_macros.cmake @@ -255,6 +255,7 @@ FUNCTION(INSTALL_RUNTIME_DEPS) GET_PROPERTY(installed_targets GLOBAL PROPERTY INSTALLED_TARGETS) # Exclude all dependencies that are shared libraries from the # same build. + FILE(TO_CMAKE_PATH "$ENV{PATH}" _path_list) FOREACH(tgt ${installed_targets}) SET(exclude_libs) GET_TARGET_PROPERTY(link_libraries ${tgt} LINK_LIBRARIES) @@ -286,8 +287,9 @@ FUNCTION(INSTALL_RUNTIME_DEPS) ".*system32/.*\\.dll" # Windows stuff POST_INCLUDE_REGEXES DIRECTORIES - ${VCPKG_INSTALLED_DIR}/${VCPKG_TARGET_TRIPLET}/bin - $<$:${VCPKG_INSTALLED_DIR}/${VCPKG_TARGET_TRIPLET}/debug/bin> + $<$:${VCPKG_INSTALLED_DIR}/${VCPKG_TARGET_TRIPLET}/bin + $<$,$>:${VCPKG_INSTALLED_DIR}/${VCPKG_TARGET_TRIPLET}/debug/bin> + ${_path_list} ) ENDFOREACH() ENDFUNCTION() diff --git a/cmake/os/WindowsCache.cmake b/cmake/os/WindowsCache.cmake index 7034dfad5ca..bc9bacf902a 100644 --- a/cmake/os/WindowsCache.cmake +++ b/cmake/os/WindowsCache.cmake @@ -361,4 +361,5 @@ SET(HAVE_LINUX_UNISTD_H CACHE INTERNAL "") SET(OFF64_T CACHE INTERNAL "") SET(Z_HAVE_UNISTD_H CACHE INTERNAL "") SET(HAVE_OFF64_T CACHE FALSE INTERNAL "") +SET(HAVE_AUXV_GETAUXVAL CACHE INTERNAL "") ENDIF(MSVC) diff --git a/cmake/plugin.cmake b/cmake/plugin.cmake index 008b2562ffb..1a5f2ccd65a 100644 --- a/cmake/plugin.cmake +++ b/cmake/plugin.cmake @@ -161,6 +161,9 @@ MACRO(MYSQL_ADD_PLUGIN) PROPERTIES COMPILE_DEFINITIONS "EMBEDDED_LIBRARY${version_string}") ENDIF() ADD_DEPENDENCIES(${target}_embedded GenError ${ARG_DEPENDS}) + IF(ARG_LINK_LIBRARIES) + TARGET_LINK_LIBRARIES (${target}_embedded ${ARG_LINK_LIBRARIES}) + ENDIF() ENDIF() ENDIF() diff --git a/cmake/wsrep.cmake b/cmake/wsrep.cmake index a01a1d68f64..95df72400c7 100644 --- a/cmake/wsrep.cmake +++ b/cmake/wsrep.cmake @@ -49,8 +49,6 @@ Then restart the build. SET(WSREP_VERSION "${WSREP_API_VERSION}.${WSREP_PATCH_VERSION}" CACHE INTERNAL "WSREP version") - SET(WSREP_PROC_INFO ${WITH_WSREP}) - SET(WSREP_PATCH_VERSION "wsrep_${WSREP_VERSION}") if (NOT WITH_WSREP_ALL) SET(WSREP_LIB_WITH_UNIT_TESTS OFF CACHE BOOL diff --git a/config.h.cmake b/config.h.cmake index 97ed71065c5..cf3c1afe4ed 100644 --- a/config.h.cmake +++ b/config.h.cmake @@ -542,7 +542,6 @@ this is the case with Microsoft Windows VirtualFree(MEM_DECOMMIT) */ #ifndef EMBEDDED_LIBRARY #cmakedefine WSREP_INTERFACE_VERSION "@WSREP_INTERFACE_VERSION@" #cmakedefine WITH_WSREP 1 -#cmakedefine WSREP_PROC_INFO 1 #endif #if !defined(__STDC_FORMAT_MACROS) diff --git a/debian/additions/debian-start.inc.sh b/debian/additions/debian-start.inc.sh index 94fc81296cf..e52fbe012c2 100755 --- a/debian/additions/debian-start.inc.sh +++ b/debian/additions/debian-start.inc.sh @@ -84,7 +84,13 @@ function check_root_accounts() { logger -p daemon.info -i -t"$0" "Checking for insecure root accounts." - ret=$(echo "SELECT count(*) FROM mysql.user WHERE user='root' and password='' and password_expired='N' and plugin in ('', 'mysql_native_password', 'mysql_old_password');" | $MARIADB --skip-column-names) + ret=$(echo " + SELECT count(*) FROM mysql.global_priv + WHERE user='root' AND + JSON_VALUE(priv, '$.plugin') in ('mysql_native_password', 'mysql_old_password', 'parsec') AND + JSON_VALUE(priv, '$.authentication_string') = '' AND + JSON_VALUE(priv, '$.password_last_changed') != 0 + " | $MARIADB --skip-column-names) if [ "$ret" -ne "0" ] then logger -p daemon.warn -i -t"$0" "WARNING: mysql.user contains $ret root accounts without password!" diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh index e23101d1eee..0e11a98f830 100755 --- a/debian/autobake-deb.sh +++ b/debian/autobake-deb.sh @@ -44,11 +44,10 @@ add_lsb_base_depends() sed -e 's#lsof #lsb-base (>= 3.0-10),\n lsof #' -i debian/control } -replace_uring_with_aio() +remove_uring() { - sed 's/liburing-dev/libaio-dev/g' -i debian/control - sed -e '/-DIGNORE_AIO_CHECK=ON/d' \ - -e '/-DWITH_URING=ON/d' -i debian/rules + sed -e '/liburing-dev/d' -i debian/control + sed -e '/-DWITH_URING=ON/d' -i debian/rules } disable_libfmt() @@ -96,7 +95,7 @@ in # Debian "buster") disable_libfmt - replace_uring_with_aio + remove_uring ;& "bullseye") add_lsb_base_depends @@ -107,7 +106,7 @@ in # so no removal is necessary. if [[ ! "$architecture" =~ amd64|arm64|armel|armhf|i386|mips64el|mipsel|ppc64el|s390x ]] then - replace_uring_with_aio + remove_uring fi ;& "trixie"|"forky"|"sid") @@ -116,8 +115,8 @@ in ;; # Ubuntu "focal") - replace_uring_with_aio disable_libfmt + remove_uring ;& "jammy"|"kinetic") add_lsb_base_depends diff --git a/debian/control b/debian/control index 0f451a2a380..7300d3296e9 100644 --- a/debian/control +++ b/debian/control @@ -9,17 +9,17 @@ Build-Depends: bison, default-jdk, dh-exec, dh-package-notes, - flex [amd64], + flex [amd64 arm64], gdb , libaio-dev [linux-any], - libboost-atomic-dev [amd64], - libboost-chrono-dev [amd64], - libboost-date-time-dev [amd64], + libboost-atomic-dev [amd64 arm64], + libboost-chrono-dev [amd64 arm64], + libboost-date-time-dev [amd64 arm64], libboost-dev, - libboost-filesystem-dev [amd64], - libboost-regex-dev [amd64], - libboost-system-dev [amd64], - libboost-thread-dev [amd64], + libboost-filesystem-dev [amd64 arm64], + libboost-regex-dev [amd64 arm64], + libboost-system-dev [amd64 arm64], + libboost-thread-dev [amd64 arm64], libbz2-dev, libcrack2-dev (>= 2.9.0), libcurl4-openssl-dev | libcurl4-dev, diff --git a/debian/mariadb-test-data.lintian-overrides b/debian/mariadb-test-data.lintian-overrides index 1cce591412f..92f5c9bec24 100644 --- a/debian/mariadb-test-data.lintian-overrides +++ b/debian/mariadb-test-data.lintian-overrides @@ -1,6 +1,3 @@ -# These should be moved, see https://jira.mariadb.org/browse/MDEV-21654 -arch-dependent-file-in-usr-share [usr/share/mariadb/mariadb-test/suite/plugins/pam/pam_mariadb_mtr.so] -arch-independent-package-contains-binary-or-object [usr/share/mariadb/mariadb-test/suite/plugins/pam/pam_mariadb_mtr.so] # Mainly for support for *BSD family. Not right way to do but this is test package and not for production incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mariadb/mariadb-test/std_data/checkDBI_DBD-MariaDB.pl] incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mariadb/mariadb-test/suite/engines/rr_trx/run_stress_tx_rr.pl] diff --git a/debian/rules b/debian/rules index ac33739e752..e358134f299 100644 --- a/debian/rules +++ b/debian/rules @@ -87,9 +87,6 @@ endif # quality standards in Debian. Also building it requires an extra 4 GB of disk # space which makes native Debian builds fail as the total disk space needed # for MariaDB becomes over 10 GB. Only build CS via autobake-deb.sh. - # - # Note: Don't use '-DWITH_URING=ON' as some Buildbot builders are missing it - # and would fail permanently. PATH=$${MYSQL_BUILD_PATH:-"/usr/lib/ccache:/usr/local/bin:/usr/bin:/bin"} \ dh_auto_configure --builddirectory=$(BUILDDIR) -- \ -DCMAKE_BUILD_TYPE=RelWithDebInfo \ @@ -103,6 +100,8 @@ endif -DPLUGIN_AWS_KEY_MANAGEMENT=NO \ -DPLUGIN_COLUMNSTORE=NO \ -DIGNORE_AIO_CHECK=ON \ + -DWITH_URING=ON \ + -DWITH_LIBAIO=ON \ -DDEB=$(DEB_VENDOR) # This is needed, otherwise 'make test' will run before binaries have been built diff --git a/extra/aws_sdk/CMakeLists.txt b/extra/aws_sdk/CMakeLists.txt index 7c4b8d8195c..bacd6b5f784 100644 --- a/extra/aws_sdk/CMakeLists.txt +++ b/extra/aws_sdk/CMakeLists.txt @@ -8,19 +8,46 @@ IF(SDK_LIBS_COUNT EQUAL 0) RETURN() ENDIF() -CHECK_AWS_SDK(RETVAL REASON) +CHECK_AWS_SDK("${SDK_LIBS}" RETVAL REASON) IF(NOT RETVAL) MESSAGE(FATAL_ERROR "AWS C++ will not be built (${REASON}), but dependency on following components is found ${SDK_LIBS}. Use CHECK_AWS_SDK() function before trying to build with SDK components") ENDIF() +IF(NOT AWS_SDK_EXTERNAL_PROJECT) + # System aws sdk libraries found + RETURN() +ENDIF() + +SET(all_libs + aws-crt-cpp + aws-c-event-stream + aws-c-common + aws-c-http + aws-c-mqtt + aws-c-cal + aws-c-auth + aws-c-io + aws-checksums + aws-c-s3 + aws-c-sdkutils + aws-c-compression + aws-cpp-sdk-core +) +IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") + LIST(APPEND all_libs s2n) +ENDIF() + +FOREACH(lib ${SDK_LIBS}) + LIST(APPEND all_libs aws-cpp-sdk-${lib}) +ENDFOREACH() +LIST(REMOVE_DUPLICATES all_libs) SET(byproducts) - -FOREACH(lib ${SDK_LIBS} core) - SET(lib aws-cpp-sdk-${lib}) - ADD_LIBRARY(${lib} STATIC IMPORTED GLOBAL) +MAKE_DIRECTORY(${PROJECT_BINARY_DIR}/extra/aws_sdk/aws_sdk_cpp/include) +FOREACH(lib ${all_libs}) + ADD_LIBRARY(${lib} UNKNOWN IMPORTED GLOBAL) ADD_DEPENDENCIES(${lib} aws_sdk_cpp) SET (loc "${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}") @@ -28,47 +55,85 @@ FOREACH(lib ${SDK_LIBS} core) SET(byproducts ${byproducts} BUILD_BYPRODUCTS ${loc}) ENDIF() SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LOCATION ${loc}) + TARGET_INCLUDE_DIRECTORIES(${lib} INTERFACE ${PROJECT_BINARY_DIR}/extra/aws_sdk/aws_sdk_cpp/include) ENDFOREACH() -# To be compatible with older cmake, we use older version of the SDK -IF(CMAKE_VERSION LESS "3.0") - SET(GIT_TAG "1.0.8") +# Define link dependencies between AWS C libs +TARGET_LINK_LIBRARIES(aws-cpp-sdk-core INTERFACE aws-crt-cpp aws-c-event-stream aws-c-common) +TARGET_LINK_LIBRARIES(aws-crt-cpp INTERFACE aws-c-http aws-c-mqtt aws-c-cal aws-c-auth aws-c-common aws-c-io + aws-checksums aws-c-event-stream aws-c-s3 aws-c-sdkutils) +TARGET_LINK_LIBRARIES(aws-c-event-stream INTERFACE aws-c-io aws-c-common aws-checksums) +TARGET_LINK_LIBRARIES(aws-c-http INTERFACE aws-c-io aws-c-compression aws-c-cal aws-c-common) +TARGET_LINK_LIBRARIES(aws-c-mqtt INTERFACE aws-c-http aws-c-io aws-c-common aws-c-cal aws-c-common) +TARGET_LINK_LIBRARIES(aws-c-auth INTERFACE aws-c-sdkutils aws-c-cal aws-c-http aws-c-io aws-c-common) +TARGET_LINK_LIBRARIES(aws-c-io INTERFACE aws-c-common aws-c-cal) +TARGET_LINK_LIBRARIES(aws-checksums INTERFACE aws-c-common) +TARGET_LINK_LIBRARIES(aws-c-compression INTERFACE aws-c-common) +TARGET_LINK_LIBRARIES(aws-c-s3 INTERFACE aws-c-auth aws-checksums aws-c-http aws-c-io aws-c-cal aws-c-common aws-c-sdkutils aws-c-common) +TARGET_LINK_LIBRARIES(aws-c-sdkutils INTERFACE aws-c-common) + +IF(WIN32) + TARGET_LINK_LIBRARIES(aws-c-io INTERFACE bcrypt crypt32 secur32 ncrypt ws2_32) + TARGET_LINK_LIBRARIES(aws-c-common INTERFACE shlwapi) + TARGET_LINK_LIBRARIES(aws-cpp-sdk-core INTERFACE winhttp wininet version userenv) ELSE() - SET(GIT_TAG "1.8.29") + TARGET_LINK_LIBRARIES(aws-c-cal INTERFACE OpenSSL::Crypto) + TARGET_LINK_LIBRARIES(aws-cpp-sdk-core INTERFACE ZLIB::ZLIB CURL::libcurl) + + # Dependencies below are from CMakeLists.txt for aws-c-common + SET(THREADS_PREFER_PTHREAD_FLAG ON) + FIND_PACKAGE(Threads REQUIRED) + TARGET_LINK_LIBRARIES(aws-c-common INTERFACE ${CMAKE_DL_LIBS} Threads::Threads) + IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") + TARGET_LINK_LIBRARIES(aws-c-common INTERFACE s2n m rt) + ELSEIF(APPLE) + TARGET_LINK_LIBRARIES(aws-c-common INTERFACE "-framework CoreFoundation") + TARGET_LINK_LIBRARIES(aws-c-io INTERFACE "-framework Security" "-framework Network") + ENDIF() ENDIF() -IF(MSVC_CRT_TYPE MATCHES "/MD") - SET(FORCE_SHARED_CRT ON) -ELSE() - SET(FORCE_SHARED_CRT OFF) -ENDIF() +FOREACH(lib ${SDK_LIBS}) + TARGET_LINK_LIBRARIES(aws-cpp-sdk-${lib} INTERFACE aws-cpp-sdk-core) +ENDFOREACH() + +SET(GIT_TAG "1.11.582") LIST(REMOVE_DUPLICATES SDK_LIBS) STRING( REPLACE ";" "!" SDK_LIBS_STR "${SDK_LIBS}") -#MESSAGE("SDK_LIBS_STR=${SDK_LIBS_STR}") + +IF(MSVC) + SET(BUILD_AND_INSTALL_COMMANDS + BUILD_COMMAND ${CMAKE_COMMAND} --build --config RelWithDebInfo --verbose --parallel + INSTALL_COMMAND ${CMAKE_COMMAND} --install --config RelWithDebInfo) + SET(BUILD_TYPE -DCMAKE_BUILD_TYPE=RelWithDebInfo) +ELSE() + SET(BUILD_AND_INSTALL_COMMANDS) + SET(BUILD_TYPE) +ENDIF() ExternalProject_Add( aws_sdk_cpp GIT_REPOSITORY "https://github.com/awslabs/aws-sdk-cpp.git" GIT_TAG ${GIT_TAG} + GIT_SHALLOW TRUE UPDATE_COMMAND "" SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/aws-sdk-cpp" LIST_SEPARATOR ! ${byproducts} CMAKE_ARGS + ${BUILD_TYPE} + -DCMAKE_BUILD_TYPE=RelWithDebInfo + -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DBUILD_ONLY=${SDK_LIBS_STR} -DBUILD_SHARED_LIBS=OFF - -DFORCE_SHARED_CRT=${FORCE_SHARED_CRT} + -DFORCE_SHARED_CRT=ON -DENABLE_TESTING=OFF - "-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG} ${PIC_FLAG}" - "-DCMAKE_CXX_FLAGS_RELWITHDEBINFO=${CMAKE_CXX_FLAGS_RELWITHDEBINFO} ${PIC_FLAG}" - "-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE} ${PIC_FLAG}" - "-DCMAKE_CXX_FLAGS_MINSIZEREL=${CMAKE_CXX_FLAGS_MINSIZEREL} ${PIC_FLAG}" - "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}" - "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}" + -DAWS_WARNINGS_ARE_ERRORS=OFF ${EXTRA_SDK_CMAKE_FLAGS} -DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp -DCMAKE_INSTALL_LIBDIR=lib - TEST_COMMAND "" + TEST_COMMAND "" + ${BUILD_AND_INSTALL_COMMANDS} ) + SET_TARGET_PROPERTIES(aws_sdk_cpp PROPERTIES EXCLUDE_FROM_ALL TRUE) diff --git a/extra/mariabackup/CMakeLists.txt b/extra/mariabackup/CMakeLists.txt index fbc415ce0d4..a71030887c4 100644 --- a/extra/mariabackup/CMakeLists.txt +++ b/extra/mariabackup/CMakeLists.txt @@ -107,6 +107,8 @@ MYSQL_ADD_EXECUTABLE(mbstream TARGET_LINK_LIBRARIES(mbstream mysys ) + +TARGET_INCLUDE_DIRECTORIES(mbstream PRIVATE ${PROJECT_SOURCE_DIR}/tpool) ADD_DEPENDENCIES(mbstream GenError) IF(MSVC) diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc index 79b9223f603..7ff2a6f71cb 100644 --- a/extra/mariabackup/backup_copy.cc +++ b/extra/mariabackup/backup_copy.cc @@ -1349,6 +1349,8 @@ backup_files(ds_ctxt *ds_data, const char *from) } } } + if (!backup_mroonga_files_from_datadir(ds_data, from)) + goto out; msg("Finished backing up non-InnoDB tables and files"); out: datadir_iter_free(it); @@ -1515,7 +1517,9 @@ ibx_copy_incremental_over_full() "aws-kms-key")) || !(ret = backup_files_from_datadir(ds_data, xtrabackup_incremental_dir, - "aria_log"))) + "aria_log")) || + !(ret = backup_mroonga_files_from_datadir(ds_data, + xtrabackup_incremental_dir))) goto cleanup; /* copy supplementary files */ @@ -2078,6 +2082,47 @@ bool backup_files_from_datadir(ds_ctxt_t *ds_data, return ret; } +bool backup_mroonga_files_from_datadir(ds_ctxt_t *ds_data, + const char *dir_path) +{ + os_file_dir_t dir= os_file_opendir(dir_path); + if (dir == IF_WIN(INVALID_HANDLE_VALUE, nullptr)) return false; + + os_file_stat_t info; + bool ret= true; + while (os_file_readdir_next_file(dir_path, dir, &info) == 0) + { + + if (info.type != OS_FILE_TYPE_FILE) + continue; + + const char *pname = strrchr(info.name, '/'); +#ifdef _WIN32 + if (const char *last = strrchr(info.name, '\\')) + { + if (!pname || last > pname) + pname = last; + } +#endif + if (!pname) + pname = info.name; + + if (!strstr(pname, ".mrn")) + continue; + + if (xtrabackup_prepare && xtrabackup_incremental_dir && + file_exists(info.name)) + unlink(info.name); + + std::string full_path(dir_path); + full_path.append(1, '/').append(info.name); + if (!(ret = ds_data->copy_file(full_path.c_str() , info.name, 1))) + break; + } + os_file_closedir(dir); + return ret; +} + static int rocksdb_remove_checkpoint_directory() { diff --git a/extra/mariabackup/backup_copy.h b/extra/mariabackup/backup_copy.h index 869bfff19a3..2bf663ce4b8 100644 --- a/extra/mariabackup/backup_copy.h +++ b/extra/mariabackup/backup_copy.h @@ -60,6 +60,8 @@ const char *trim_dotslash(const char *path); bool backup_files_from_datadir(ds_ctxt_t *ds_data, const char *dir_path, const char *prefix); +bool backup_mroonga_files_from_datadir(ds_ctxt_t *ds_data, + const char *dir_path); bool is_system_table(const char *dbname, const char *tablename); std::unique_ptr> diff --git a/extra/mariabackup/backup_mysql.cc b/extra/mariabackup/backup_mysql.cc index 052752b8f5e..fdf4719c1f5 100644 --- a/extra/mariabackup/backup_mysql.cc +++ b/extra/mariabackup/backup_mysql.cc @@ -563,12 +563,36 @@ select_incremental_lsn_from_history(lsn_t *incremental_lsn) { MYSQL_RES *mysql_result; char query[1000]; - char buf[100]; + char buf[NAME_LEN*2+3]; + + size_t opt_incremental_history_name_len= 0; + size_t opt_incremental_history_uuid_len= 0; + + if (opt_incremental_history_name) + opt_incremental_history_name_len= + strlen(opt_incremental_history_name); + + if (opt_incremental_history_uuid) + opt_incremental_history_uuid_len= + strlen(opt_incremental_history_uuid); + + if (opt_incremental_history_name_len*2 > sizeof(buf)) + die("Incremental history table name '%s' is too long.", + opt_incremental_history_name); + + if (opt_incremental_history_uuid_len*2 > sizeof(buf)) + die("Incremental history uuid '%s' is too long.", + opt_incremental_history_uuid); + + if (opt_incremental_history_name && opt_incremental_history_name[0] + && opt_incremental_history_uuid && opt_incremental_history_uuid[0]) + die("It is allowed to use either --incremental-history-name " + "or --incremental-history-uuid, but not both."); if (opt_incremental_history_name) { mysql_real_escape_string(mysql_connection, buf, opt_incremental_history_name, - (unsigned long)strlen(opt_incremental_history_name)); + (unsigned long) opt_incremental_history_name_len); snprintf(query, sizeof(query), "SELECT innodb_to_lsn " "FROM " XB_HISTORY_TABLE " " @@ -577,11 +601,10 @@ select_incremental_lsn_from_history(lsn_t *incremental_lsn) "ORDER BY innodb_to_lsn DESC LIMIT 1", buf); } - - if (opt_incremental_history_uuid) { + else if (opt_incremental_history_uuid) { mysql_real_escape_string(mysql_connection, buf, opt_incremental_history_uuid, - (unsigned long)strlen(opt_incremental_history_uuid)); + (unsigned long) opt_incremental_history_uuid_len); snprintf(query, sizeof(query), "SELECT innodb_to_lsn " "FROM " XB_HISTORY_TABLE " " @@ -591,6 +614,8 @@ select_incremental_lsn_from_history(lsn_t *incremental_lsn) buf); } + /* xb_mysql_query(..,.., true) will die on error, so + mysql_result can't be nullptr */ mysql_result = xb_mysql_query(mysql_connection, query, true); ut_ad(mysql_num_fields(mysql_result) == 1); @@ -1691,7 +1716,7 @@ write_xtrabackup_info(ds_ctxt *datasink, char buf_end_time[100]; tm tm; std::ostringstream oss; - const char *xb_stream_name[] = {"file", "tar", "xbstream"}; + const char *xb_stream_name[] = {"file", "mbstream"}; uuid = read_mysql_one_value(connection, "SELECT UUID()"); server_version = read_mysql_one_value(connection, "SELECT VERSION()"); @@ -1774,6 +1799,10 @@ write_xtrabackup_info(ds_ctxt *datasink, goto cleanup; } + xb_mysql_query(connection, + "ALTER TABLE IF EXISTS " XB_HISTORY_TABLE + " MODIFY format ENUM('file', 'tar', 'mbstream') DEFAULT NULL", false); + xb_mysql_query(connection, "CREATE TABLE IF NOT EXISTS " XB_HISTORY_TABLE "(" "uuid VARCHAR(40) NOT NULL PRIMARY KEY," @@ -1791,7 +1820,7 @@ write_xtrabackup_info(ds_ctxt *datasink, "innodb_to_lsn BIGINT UNSIGNED DEFAULT NULL," "partial ENUM('Y', 'N') DEFAULT NULL," "incremental ENUM('Y', 'N') DEFAULT NULL," - "format ENUM('file', 'tar', 'xbstream') DEFAULT NULL," + "format ENUM('file', 'tar', 'mbstream') DEFAULT NULL," "compressed ENUM('Y', 'N') DEFAULT NULL" ") CHARACTER SET utf8 ENGINE=innodb", false); @@ -1942,7 +1971,7 @@ void capture_tool_command(int argc, char **argv) { /* capture tool name tool args */ - tool_name = strrchr(argv[0], '/'); + tool_name = strrchr(argv[0], IF_WIN('\\', '/')); tool_name = tool_name ? tool_name + 1 : argv[0]; make_argv(tool_args, sizeof(tool_args), argc, argv); diff --git a/extra/mariabackup/fil_cur.cc b/extra/mariabackup/fil_cur.cc index 7eaec010c68..36fd9af1cb2 100644 --- a/extra/mariabackup/fil_cur.cc +++ b/extra/mariabackup/fil_cur.cc @@ -186,7 +186,6 @@ xb_fil_cur_open( } #else err = fstat(cursor->file.m_file, &cursor->statinfo); - MSAN_STAT_WORKAROUND(&cursor->statinfo); #endif if (max_file_size < (ulonglong)cursor->statinfo.st_size) { cursor->statinfo.st_size = (ulonglong)max_file_size; diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 2f74bafeb21..d04695b5a53 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -377,6 +377,10 @@ static my_bool opt_check_privileges; extern const char *innodb_checksum_algorithm_names[]; extern TYPELIB innodb_checksum_algorithm_typelib; extern TYPELIB innodb_flush_method_typelib; +#ifdef __linux__ +extern const char *innodb_linux_aio_names[]; +extern TYPELIB innodb_linux_aio_typelib; +#endif extern TYPELIB innodb_doublewrite_typelib; /** Ignored option */ static ulong innodb_flush_method; @@ -1147,29 +1151,63 @@ static void backup_file_op(uint32_t space_id, int type, } } -static bool check_if_fts_table(const char *file_name) { - const char *table_name_start = strrchr(file_name, '/'); +/** Check whether the spacename belongs to internal FTS table +@param space_name space name to be checked +@return true if it is fts table or false otherwise */ +static bool check_if_fts_table(const char *space_name) { + /* There are two types of FTS internal table + 1) FTS common tables (FTS__ + 2) FTS INDEX auxiliary table (FTS___ */ + const char *table_name_start = strrchr(space_name, '/'); if (table_name_start) ++table_name_start; else - table_name_start = file_name; - - if (!starts_with(table_name_start,"FTS_")) - return false; + table_name_start = space_name; const char *table_name_end = strrchr(table_name_start, '.'); if (!table_name_end) - table_name_end = table_name_start + strlen(table_name_start); - ptrdiff_t table_name_len = table_name_end - table_name_end; + table_name_end = + table_name_start + strlen(table_name_start) - 1; + if (!starts_with(table_name_start,"FTS_")) + return false; - for (const char **suffix = fts_common_tables; *suffix; ++suffix) - if (!strncmp(table_name_start, *suffix, table_name_len)) + /* Skip FTS_ */ + const char *table_name_suffix = strchr(table_name_start, '_'); + if (!table_name_suffix || + table_name_suffix == table_name_end) { + return false; + } + table_name_suffix++; + + /* Skip _ */ + table_name_suffix = strchr(table_name_suffix, '_'); + if (!table_name_suffix || + table_name_end == table_name_suffix) { + return false; + } + table_name_suffix++; + + ptrdiff_t table_name_len = table_name_end - table_name_suffix; + + /* Compare only common tables */ + for (const char **suffix = fts_common_tables; *suffix; ++suffix) { + if (!strncmp(table_name_suffix, *suffix, table_name_len)) return true; + } + + /* Skip index_id on fts table name */ + table_name_suffix = strchr(table_name_suffix, '_'); + if (!table_name_suffix || + table_name_suffix == table_name_end) { + return false; + } + table_name_suffix++; + + table_name_len = table_name_end - table_name_suffix; for (size_t i = 0; fts_index_selector[i].suffix; ++i) - if (!strncmp(table_name_start, fts_index_selector[i].suffix, - table_name_len)) + if (!strncmp(table_name_suffix, fts_index_selector[i].suffix, + table_name_len)) return true; - return false; } @@ -1194,7 +1232,20 @@ static void backup_file_op_fail(uint32_t space_id, int type, msg("DDL tracking : create %" PRIu32 " \"%.*s\"", space_id, int(len), name); fail = !check_if_skip_table(spacename.c_str()); - error= "create"; + if (!opt_no_lock && fail && + check_if_fts_table(spacename.c_str())) { + /* Ignore the FTS internal table because InnoDB does + create intermediate table and their associative FTS + internal table when table is being rebuilt during + prepare phase. Also, backup_set_alter_copy_lock() + downgrades the MDL_BACKUP_DDL before prepare phase + of alter. This leads to the FTS internal table being + created in the late phase of backup. + mariabackup --prepare should be able to handle + this case. */ + fail = false; + } + error= "create"; break; case FILE_MODIFY: break; @@ -1335,6 +1386,9 @@ enum options_xtrabackup OPT_INNODB_READ_IO_THREADS, OPT_INNODB_WRITE_IO_THREADS, OPT_INNODB_USE_NATIVE_AIO, +#ifdef __linux__ + OPT_INNODB_LINUX_AIO, +#endif OPT_INNODB_PAGE_SIZE, OPT_INNODB_BUFFER_POOL_FILENAME, OPT_INNODB_LOCK_WAIT_TIMEOUT, @@ -1947,6 +2001,14 @@ struct my_option xb_server_options[] = (G_PTR*) &srv_use_native_aio, (G_PTR*) &srv_use_native_aio, 0, GET_BOOL, NO_ARG, TRUE, 0, 0, 0, 0, 0}, +#ifdef __linux__ + {"innodb_linux_aio", OPT_INNODB_LINUX_AIO, + "Which linux AIO implementation to use, auto (io_uring, failing to aio) or explicit", + (G_PTR*) &srv_linux_aio_method, + (G_PTR*) &srv_linux_aio_method, + &innodb_linux_aio_typelib, GET_ENUM, REQUIRED_ARG, + SRV_LINUX_AIO_AUTO, 0, 0, 0, 0, 0}, +#endif {"innodb_page_size", OPT_INNODB_PAGE_SIZE, "The universal page size of the database.", (G_PTR*) &innobase_page_size, (G_PTR*) &innobase_page_size, 0, @@ -2517,26 +2579,7 @@ static bool innodb_init_param() ut_ad(DATA_MYSQL_BINARY_CHARSET_COLL == my_charset_bin.number); -#ifdef _WIN32 - srv_use_native_aio = TRUE; - -#elif defined(LINUX_NATIVE_AIO) - - if (srv_use_native_aio) { - msg("InnoDB: Using Linux native AIO"); - } -#elif defined(HAVE_URING) - - if (srv_use_native_aio) { - msg("InnoDB: Using liburing"); - } -#else - /* Currently native AIO is supported only on windows and linux - and that also when the support is compiled in. In all other - cases, we ignore the setting of innodb_use_native_aio. */ - srv_use_native_aio = FALSE; - -#endif + srv_use_native_aio= tpool::supports_native_aio(); /* Assign the default value to srv_undo_dir if it's not specified, as my_getopt does not support default values for string options. We also @@ -2571,9 +2614,6 @@ static bool innodb_init_param() } } -#ifdef _WIN32 - srv_use_native_aio = TRUE; -#endif return false; error: @@ -2581,6 +2621,7 @@ error: return true; } +alignas(8) static byte log_hdr_buf[log_t::START_OFFSET + SIZE_OF_FILE_CHECKPOINT]; /** Initialize an InnoDB log file header in log_hdr_buf[] */ @@ -4182,7 +4223,6 @@ next_file: return(-1); } - MSAN_STAT_WORKAROUND(&statinfo); info->size = statinfo.st_size; if (S_ISDIR(statinfo.st_mode)) { @@ -4544,7 +4584,7 @@ xb_register_filter_entry( databases_hash->cell_get(my_crc32c(0, name, p - name)) ->search(&xb_filter_entry_t::name_hash, [dbname](xb_filter_entry_t* f) - { return f && !strcmp(f->name, dbname); }); + { return !f || !strcmp(f->name, dbname); }); if (!*prev) { (*prev = xb_new_filter_entry(dbname)) ->has_tables = TRUE; @@ -4678,7 +4718,7 @@ xb_load_list_file( FILE* fp; /* read and store the filenames */ - fp = fopen(filename, "r"); + fp = fopen(filename, "rt"); if (!fp) { die("Can't open %s", filename); @@ -5090,7 +5130,7 @@ class BackupStages { bool stage_start(Backup_datasinks &backup_datasinks, CorruptedPages &corrupted_pages) { - msg("BACKUP STAGE START"); + msg("Starting BACKUP STAGE START"); if (!opt_no_lock) { if (opt_safe_slave_backup) { if (!wait_for_safe_slave(mysql_connection)) { @@ -5104,6 +5144,7 @@ class BackupStages { msg("Error on BACKUP STAGE START query execution"); return(false); } + msg("Acquired locks for BACKUP STAGE START"); } InnodbDataCopier innodb_data_copier(backup_datasinks, @@ -5134,14 +5175,18 @@ class BackupStages { DBUG_MARIABACKUP_EVENT_LOCK("after_aria_background", {}); + msg("Finished BACKUP STAGE START"); return true; } bool stage_flush() { - msg("BACKUP STAGE FLUSH"); - if (!opt_no_lock && !lock_for_backup_stage_flush(m_bs_con)) { - msg("Error on BACKUP STAGE FLUSH query execution"); - return false; + msg("Starting BACKUP STAGE FLUSH"); + if (!opt_no_lock) { + if (!lock_for_backup_stage_flush(m_bs_con)) { + msg("Error on BACKUP STAGE FLUSH query execution"); + return false; + } + msg("Acquired locks for BACKUP STAGE FLUSH"); } auto tables_in_use = get_tables_in_use(mysql_connection); // Copy non-stats-log non-in-use tables of non-InnoDB-Aria-RocksDB engines @@ -5189,17 +5234,20 @@ class BackupStages { xb_mysql_query(mysql_connection, "SET debug_sync='now WAIT_FOR copy_started'", false, true); ); - + msg("Finished BACKUP STAGE FLUSH"); return true; } bool stage_block_ddl(Backup_datasinks &backup_datasinks, CorruptedPages &corrupted_pages) { + msg("Started BACKUP STAGE BLOCK_DDL"); if (!opt_no_lock) { if (!lock_for_backup_stage_block_ddl(m_bs_con)) { - msg("BACKUP STAGE BLOCK_DDL"); + msg("Error on BACKUP STAGE BLOCK_DDL " + "query execution"); return false; } + msg("Acquired locks for BACKUP STAGE BLOCK_DDL"); if (have_galera_enabled) { xb_mysql_query(mysql_connection, "SET SESSION wsrep_sync_wait=0", false); @@ -5261,14 +5309,18 @@ class BackupStages { DBUG_MARIABACKUP_EVENT_LOCK("after_stage_block_ddl", {}); + msg("Finished BACKUP STAGE BLOCK_DDL"); return true; } bool stage_block_commit(Backup_datasinks &backup_datasinks) { - msg("BACKUP STAGE BLOCK_COMMIT"); - if (!opt_no_lock && !lock_for_backup_stage_commit(m_bs_con)) { - msg("Error on BACKUP STAGE BLOCK_COMMIT query execution"); - return false; + msg("Starting BACKUP STAGE BLOCK_COMMIT"); + if (!opt_no_lock) { + if (!lock_for_backup_stage_commit(m_bs_con)) { + msg("Error on BACKUP STAGE BLOCK_COMMIT query execution"); + return false; + } + msg("Acquired locks for BACKUP STAGE BLOCK_COMMIT"); } // Copy log tables tail @@ -5368,11 +5420,13 @@ class BackupStages { "FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS", false); } - return backup_datasinks.backup_low(); + bool res= backup_datasinks.backup_low(); + msg("Finishing BACKUP STAGE BLOCK_COMMIT"); + return res; } bool stage_end(Backup_datasinks &backup_datasinks) { - msg("BACKUP STAGE END"); + msg("Starting BACKUP STAGE END"); /* release all locks */ if (!opt_no_lock) { unlock_all(m_bs_con); @@ -5495,7 +5549,6 @@ fail: xb_fil_io_init(); if (os_aio_init()) { - msg("Error: cannot initialize AIO subsystem"); goto fail; } @@ -5543,10 +5596,6 @@ fail: backup_datasinks.init(); - if (!select_history()) { - goto fail; - } - /* open the log file */ memset(&stat_info, 0, sizeof(MY_STAT)); dst_log_file = ds_open(backup_datasinks.m_redo, LOG_FILE_NAME, &stat_info); @@ -5561,6 +5610,11 @@ fail: if (innodb_log_checkpoint_now != false) { mysql_read_query_result(mysql_connection); } + + if (!select_history()) { + goto fail; + } + /* label it */ recv_sys.file_checkpoint = log_sys.next_checkpoint_lsn; log_hdr_init(); diff --git a/extra/wolfssl/CMakeLists.txt b/extra/wolfssl/CMakeLists.txt index 954de89101e..359e4148a36 100644 --- a/extra/wolfssl/CMakeLists.txt +++ b/extra/wolfssl/CMakeLists.txt @@ -138,6 +138,8 @@ if(MSVC) remove_definitions(-DHAVE_CONFIG_H) target_compile_definitions(wolfssl PRIVATE WOLFSSL_HAVE_MIN WOLFSSL_HAVE_MAX) + # Workaround https://github.com/wolfSSL/wolfssl/issues/9004 + target_compile_definitions(wolfssl PRIVATE WOLFSSL_NO_SOCK SOCKET_INVALID=-1) endif() set_property(TARGET wolfssl PROPERTY POSITION_INDEPENDENT_CODE ON) diff --git a/extra/wolfssl/wolfssl b/extra/wolfssl/wolfssl index 239b85c8043..b077c81eb63 160000 --- a/extra/wolfssl/wolfssl +++ b/extra/wolfssl/wolfssl @@ -1 +1 @@ -Subproject commit 239b85c80438bf60d9a5b9e0ebe9ff097a760d0d +Subproject commit b077c81eb635392e694ccedbab8b644297ec0285 diff --git a/include/my_service_manager.h b/include/my_service_manager.h index 5b797718f83..f3a7b952090 100644 --- a/include/my_service_manager.h +++ b/include/my_service_manager.h @@ -44,7 +44,7 @@ #define SD_LISTEN_FDS_START (0) #define sd_notify(X, Y) #define sd_notifyf(E, F, ...) -#ifdef _WIN32 +#if defined (_WIN32) && !defined(EMBEDDED_LIBRARY) #define service_manager_extend_timeout(I, F, ...) \ mysqld_win_extend_service_timeout(I) #else diff --git a/include/my_valgrind.h b/include/my_valgrind.h index dfe2c3db7b3..94713f2a300 100644 --- a/include/my_valgrind.h +++ b/include/my_valgrind.h @@ -37,11 +37,6 @@ # define MEM_GET_VBITS(a,b,len) __msan_copy_shadow(b,a,len) # define MEM_SET_VBITS(a,b,len) __msan_copy_shadow(a,b,len) # define REDZONE_SIZE 8 -# ifdef __linux__ -# define MSAN_STAT_WORKAROUND(st) MEM_MAKE_DEFINED(st, sizeof(*st)) -# else -# define MSAN_STAT_WORKAROUND(st) ((void) 0) -# endif #elif defined(HAVE_VALGRIND_MEMCHECK_H) && defined(HAVE_valgrind) # include # define HAVE_MEM_CHECK @@ -54,7 +49,6 @@ # define MEM_GET_VBITS(a,b,len) VALGRIND_GET_VBITS(a,b,len) # define MEM_SET_VBITS(a,b,len) VALGRIND_SET_VBITS(a,b,len) # define REDZONE_SIZE 8 -# define MSAN_STAT_WORKAROUND(st) ((void) 0) #elif defined(__SANITIZE_ADDRESS__) && (!defined(_MSC_VER) || defined (__clang__)) # include /* How to do manual poisoning: @@ -68,7 +62,6 @@ https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning */ # define MEM_CHECK_DEFINED(a,len) ((void) 0) # define MEM_GET_VBITS(a,b,len) ((void) 0) # define MEM_SET_VBITS(a,b,len) ((void) 0) -# define MSAN_STAT_WORKAROUND(st) ((void) 0) # define REDZONE_SIZE 8 #else # define MEM_UNDEFINED(a,len) ((void) 0) @@ -80,7 +73,6 @@ https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning */ # define MEM_GET_VBITS(a,b,len) ((void) 0) # define MEM_SET_VBITS(a,b,len) ((void) 0) # define REDZONE_SIZE 0 -# define MSAN_STAT_WORKAROUND(st) ((void) 0) #endif /* __has_feature(memory_sanitizer) */ #ifdef TRASH_FREED_MEMORY diff --git a/include/ssl_compat.h b/include/ssl_compat.h index 3327eee0d8e..6bec4f48285 100644 --- a/include/ssl_compat.h +++ b/include/ssl_compat.h @@ -55,6 +55,10 @@ #ifdef HAVE_WOLFSSL #undef ERR_remove_state #define ERR_remove_state(x) do {} while(0) +#undef SSL_get_cipher +#define SSL_get_cipher(ssl) (SSL_version(ssl) == TLS1_3_VERSION ? wolfSSL_get_cipher(ssl) : wolfSSL_get_cipher_name(ssl)) +#undef SSL_get_cipher_list +#define SSL_get_cipher_list(ctx,i) wolfSSL_get_cipher_list(i) #elif defined (HAVE_ERR_remove_thread_state) #define ERR_remove_state(X) ERR_remove_thread_state(NULL) #endif /* HAVE_ERR_remove_thread_state */ diff --git a/libmariadb b/libmariadb index b5a2c9f3c27..b790c6c149c 160000 --- a/libmariadb +++ b/libmariadb @@ -1 +1 @@ -Subproject commit b5a2c9f3c275861447ca21ee1f01560135ec6c2f +Subproject commit b790c6c149c9119fb73c416e993af1c7ef256b34 diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt index 95b013a0c90..d5b968f1036 100644 --- a/libmysqld/CMakeLists.txt +++ b/libmysqld/CMakeLists.txt @@ -21,7 +21,6 @@ INCLUDE_DIRECTORIES( ${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/libmysqld ${CMAKE_SOURCE_DIR}/sql -${CMAKE_SOURCE_DIR}/tpool ${CMAKE_BINARY_DIR}/sql ${PCRE_INCLUDE_DIRS} ${LIBFMT_INCLUDE_DIR} diff --git a/mysql-test/main/alter_table.result b/mysql-test/main/alter_table.result index 20c0876974f..b9625313a62 100644 --- a/mysql-test/main/alter_table.result +++ b/mysql-test/main/alter_table.result @@ -3099,6 +3099,42 @@ alter table t1 drop constraint t1_fk_t2_id, drop t2_id, drop t2_id; ERROR 42000: Can't DROP COLUMN `t2_id`; check that it exists drop table t1, t2; # +# MDEV-29001 DROP DEFAULT makes SHOW CREATE non-idempotent +# +SET @save_sql_mode=@@sql_mode; +SET sql_mode=strict_all_tables; +create table t1 ( +a int, +b int default 0, +c int not null, +d int not null default 1); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT 0, + `c` int(11) NOT NULL, + `d` int(11) NOT NULL DEFAULT 1 +) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci +alter table t1 +alter a drop default, +alter b drop default, +alter c drop default, +alter d drop default; +SHOW create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + `c` int(11) NOT NULL, + `d` int(11) NOT NULL +) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci +insert t1 values (default, default, default, default); +ERROR HY000: Field 'c' doesn't have a default value +insert t1 values (default, default, 0, 0); +drop table t1; +set sql_mode= @save_sql_mode; +# # End of 10.6 tests # # diff --git a/mysql-test/main/alter_table.test b/mysql-test/main/alter_table.test index fbd2c465e0d..220b68e7249 100644 --- a/mysql-test/main/alter_table.test +++ b/mysql-test/main/alter_table.test @@ -2403,6 +2403,30 @@ create table t1(id int primary key, t2_id int, constraint t1_fk_t2_id foreign ke alter table t1 drop constraint t1_fk_t2_id, drop t2_id, drop t2_id; drop table t1, t2; +--echo # +--echo # MDEV-29001 DROP DEFAULT makes SHOW CREATE non-idempotent +--echo # +SET @save_sql_mode=@@sql_mode; +SET sql_mode=strict_all_tables; +create table t1 ( + a int, + b int default 0, + c int not null, + d int not null default 1); +show create table t1; +alter table t1 + alter a drop default, + alter b drop default, + alter c drop default, + alter d drop default; +SHOW create table t1; +--error ER_NO_DEFAULT_FOR_FIELD +insert t1 values (default, default, default, default); +insert t1 values (default, default, 0, 0); + +drop table t1; +set sql_mode= @save_sql_mode; + --echo # --echo # End of 10.6 tests --echo # diff --git a/mysql-test/main/alter_table_errors.result b/mysql-test/main/alter_table_errors.result index ffbc43beeb6..07792aa0dca 100644 --- a/mysql-test/main/alter_table_errors.result +++ b/mysql-test/main/alter_table_errors.result @@ -1,3 +1,6 @@ +# +# MDEV-16110 ALTER with ALGORITHM=INPLACE breaks temporary table with virtual columns +# create table t (a int, v int as (a)) engine=innodb; alter table t change column a b tinyint, algorithm=inplace; ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY @@ -28,8 +31,13 @@ t2 CREATE TEMPORARY TABLE `t2` ( `v` int(11) GENERATED ALWAYS AS (`a`) VIRTUAL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci drop temporary table t1, t2; +unlock tables; +# +# MDEV-18083 ASAN heap-use-after-free in Field::set_warning_truncated_wrong_value upon inserting into temporary table +# create temporary table t1 (a int); alter table t1 add column f text; insert into t1 values ('x','foo'); ERROR 22007: Incorrect integer value: 'x' for column `test`.`t1`.`a` at row 1 drop temporary table t1; +# End of 10.2 tests diff --git a/mysql-test/main/alter_table_errors.test b/mysql-test/main/alter_table_errors.test index 8726410ea0a..4a64ade3594 100644 --- a/mysql-test/main/alter_table_errors.test +++ b/mysql-test/main/alter_table_errors.test @@ -1,8 +1,8 @@ --source include/have_innodb.inc -# -# MDEV-16110 ALTER with ALGORITHM=INPLACE breaks temporary table with virtual columns -# +--echo # +--echo # MDEV-16110 ALTER with ALGORITHM=INPLACE breaks temporary table with virtual columns +--echo # create table t (a int, v int as (a)) engine=innodb; --error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON alter table t change column a b tinyint, algorithm=inplace; @@ -20,12 +20,15 @@ lock table t2 write; alter table t2 change column a b int, algorithm=inplace; show create table t2; drop temporary table t1, t2; +unlock tables; -# -# MDEV-18083 ASAN heap-use-after-free in Field::set_warning_truncated_wrong_value upon inserting into temporary table -# +--echo # +--echo # MDEV-18083 ASAN heap-use-after-free in Field::set_warning_truncated_wrong_value upon inserting into temporary table +--echo # create temporary table t1 (a int); alter table t1 add column f text; --error ER_TRUNCATED_WRONG_VALUE_FOR_FIELD insert into t1 values ('x','foo'); drop temporary table t1; + +--echo # End of 10.2 tests diff --git a/mysql-test/main/alter_table_lock.result b/mysql-test/main/alter_table_lock.result index 5a787055478..170a7c47e6b 100644 --- a/mysql-test/main/alter_table_lock.result +++ b/mysql-test/main/alter_table_lock.result @@ -60,3 +60,22 @@ drop table t1; # # End of 10.11 tests # +# +# MDEV-35611 Assertion failure in Diagnostics_area::sql_errno upon interrupted ALTER +# +CREATE TABLE t (a INT) ENGINE=MyISAM; +INSERT INTO t VALUES (1); +LOCK TABLE t READ; +connection con1; +SET max_statement_time=0.001; +ALTER TABLE t FORCE; +ERROR 70100: Query execution was interrupted (max_statement_time exceeded) +ALTER TABLE IF EXISTS t FORCE; +ERROR 70100: Query execution was interrupted (max_statement_time exceeded) +disconnect con1; +connection default; +UNLOCK TABLES; +DROP TABLE t; +# +# End of 11.4 tests +# diff --git a/mysql-test/main/alter_table_lock.test b/mysql-test/main/alter_table_lock.test index a1dc9ac831f..a01bd721e04 100644 --- a/mysql-test/main/alter_table_lock.test +++ b/mysql-test/main/alter_table_lock.test @@ -78,3 +78,25 @@ drop table t1; --echo # --echo # End of 10.11 tests --echo # + +--echo # +--echo # MDEV-35611 Assertion failure in Diagnostics_area::sql_errno upon interrupted ALTER +--echo # +CREATE TABLE t (a INT) ENGINE=MyISAM; +INSERT INTO t VALUES (1); +LOCK TABLE t READ; +--connection con1 +SET max_statement_time=0.001; +--error ER_STATEMENT_TIMEOUT +ALTER TABLE t FORCE; +--error ER_STATEMENT_TIMEOUT +ALTER TABLE IF EXISTS t FORCE; +# Cleanup +--disconnect con1 +--connection default +UNLOCK TABLES; +DROP TABLE t; + +--echo # +--echo # End of 11.4 tests +--echo # diff --git a/mysql-test/main/analyze_stmt_slow_query_log.result b/mysql-test/main/analyze_stmt_slow_query_log.result index a0c4b45dee0..14391a20523 100644 --- a/mysql-test/main/analyze_stmt_slow_query_log.result +++ b/mysql-test/main/analyze_stmt_slow_query_log.result @@ -10,6 +10,7 @@ a 1 2 drop table t1; +FLUSH SLOW LOGS; # explain: id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra # explain: 1 INSERT t1 ALL NULL NULL NULL NULL NULL NULL 100.00 100.00 NULL # explain: id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra diff --git a/mysql-test/main/analyze_stmt_slow_query_log.test b/mysql-test/main/analyze_stmt_slow_query_log.test index 355292e3f6a..0bc9f7aba69 100644 --- a/mysql-test/main/analyze_stmt_slow_query_log.test +++ b/mysql-test/main/analyze_stmt_slow_query_log.test @@ -26,6 +26,7 @@ select * from t1 where a<3; --enable_cursor_protocol drop table t1; let SLOW_LOG_FILE= `select @@slow_query_log_file`; +FLUSH SLOW LOGS; # select @@slow_query_log_file; diff --git a/mysql-test/main/case.result b/mysql-test/main/case.result index 8ce6eecb882..b28f738a98a 100644 --- a/mysql-test/main/case.result +++ b/mysql-test/main/case.result @@ -1,4 +1,3 @@ -drop table if exists t1, t2; select CASE "b" when "a" then 1 when "b" then 2 END as exp; exp 2 @@ -165,7 +164,7 @@ t1 CREATE TABLE `t1` ( `COALESCE(1,1.0)` decimal(2,1) NOT NULL, `COALESCE(1,'1')` varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci NOT NULL, `COALESCE(1.1,'1')` varchar(4) CHARACTER SET latin1 COLLATE latin1_swedish_ci NOT NULL, - `COALESCE('a' COLLATE latin1_bin,'b')` varchar(1) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL + `COALESCE('a' COLLATE latin1_bin,'b')` varchar(1) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; CREATE TABLE t1 SELECT IFNULL('a' COLLATE latin1_swedish_ci, 'b' COLLATE latin1_bin); @@ -572,6 +571,22 @@ id select_type table type possible_keys key key_len ref rows filtered Extra Warnings: Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where case `test`.`t1`.`a` when `test`.`t1`.`b` then 1 end = 1 and case when `test`.`t1`.`a` then `test`.`t1`.`b` else 1 end = 3 DROP TABLE t1; -# # End of 10.3 test # +# MDEV-25415 CASE function handles NULL inconsistently +# +select case 'X' when null then 1 when 'X' then 2 else 3 end; +case 'X' when null then 1 when 'X' then 2 else 3 end +2 +select case 'X' when 1/1 then 1 when 'X' then 2 else 3 end; +case 'X' when 1/1 then 1 when 'X' then 2 else 3 end +2 +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'X' +select case 'X' when 1/0 then 1 when 'X' then 2 else 3 end; +case 'X' when 1/0 then 1 when 'X' then 2 else 3 end +2 +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'X' +Warning 1365 Division by 0 +# End of 10.11 test diff --git a/mysql-test/main/case.test b/mysql-test/main/case.test index bb0fc18c0a3..ba027aacde0 100644 --- a/mysql-test/main/case.test +++ b/mysql-test/main/case.test @@ -2,11 +2,6 @@ # Testing of CASE # - ---disable_warnings -drop table if exists t1, t2; ---enable_warnings - select CASE "b" when "a" then 1 when "b" then 2 END as exp; select CASE "c" when "a" then 1 when "b" then 2 END as exp; select CASE "c" when "a" then 1 when "b" then 2 ELSE 3 END as exp; @@ -412,7 +407,13 @@ SELECT * FROM t1 WHERE DROP TABLE t1; +--echo # End of 10.3 test --echo # ---echo # End of 10.3 test +--echo # MDEV-25415 CASE function handles NULL inconsistently --echo # +select case 'X' when null then 1 when 'X' then 2 else 3 end; +select case 'X' when 1/1 then 1 when 'X' then 2 else 3 end; +select case 'X' when 1/0 then 1 when 'X' then 2 else 3 end; + +--echo # End of 10.11 test diff --git a/mysql-test/main/check_constraint.result b/mysql-test/main/check_constraint.result index c173cfbfa22..830fff3243f 100644 --- a/mysql-test/main/check_constraint.result +++ b/mysql-test/main/check_constraint.result @@ -317,9 +317,63 @@ insert t1 (a) values (1); insert t1 (a) values (-1); ERROR 23000: CONSTRAINT `t1.b` failed for `test`.`t1` drop table t1; -# # End of 10.4 tests # +# MDEV-36662 CHECK constraint does not repeat in case of error +# +create table t1 (d date check (d > 2020-01-01)); +insert into t1 values ('2023-12-05'); +ERROR 22007: Truncated incorrect datetime value: '2018' +INSERT into t1 values ('2024-12-05'); +ERROR 22007: Truncated incorrect datetime value: '2018' +create or replace table t1 (d time check (d > "a")); +insert into t1 values ('22:30'); +ERROR 22007: Truncated incorrect time value: 'a' +insert into t1 values ('23:30'); +ERROR 22007: Truncated incorrect time value: 'a' +create or replace table t1 (d datetime check (d > "a")); +insert into t1 values ('2023-12-05'); +ERROR 22007: Truncated incorrect datetime value: 'a' +insert into t1 values ('2024-12-05'); +ERROR 22007: Truncated incorrect datetime value: 'a' +create or replace table t1 (d timestamp check (d > "a")); +insert into t1 values ('2023-12-05'); +ERROR 22007: Truncated incorrect datetime value: 'a' +insert into t1 values ('2024-12-05'); +ERROR 22007: Truncated incorrect datetime value: 'a' +create or replace table t1 (d year check (d > "a")); +insert into t1 values ('2023'); +ERROR 22007: Truncated incorrect DECIMAL value: 'a' +insert into t1 values ('2024'); +ERROR 22007: Truncated incorrect DECIMAL value: 'a' +create or replace table t1 (d int check (d > "a")); +insert into t1 values (0); +ERROR 22007: Truncated incorrect DECIMAL value: 'a' +insert into t1 values (1); +ERROR 22007: Truncated incorrect DECIMAL value: 'a' +create or replace table t1 (d real check (d > "a")); +insert into t1 values (0.1); +ERROR 22007: Truncated incorrect DOUBLE value: 'a' +insert into t1 values (1.1); +ERROR 22007: Truncated incorrect DOUBLE value: 'a' +create or replace table t1 (d decimal check (d > "a")); +insert into t1 values (0); +ERROR 22007: Truncated incorrect DECIMAL value: 'a' +insert into t1 values (1); +ERROR 22007: Truncated incorrect DECIMAL value: 'a' +create or replace table t1 (d bool check (d != "a")); +insert into t1 values (0); +ERROR 22007: Truncated incorrect DECIMAL value: 'a' +insert into t1 values (1); +ERROR 22007: Truncated incorrect DECIMAL value: 'a' +drop table t1; +create or replace table t1 (d varchar(30) check (d != 1)); +insert into t1 values ("a"); +ERROR 22007: Truncated incorrect DECIMAL value: 'a' +insert into t1 values ("b"); +ERROR 22007: Truncated incorrect DECIMAL value: 'b' +drop table t1; +# End of 10.11 tests # # MDEV-32439 INSERT IGNORE VALUES (one row) errors on constraint # @@ -376,6 +430,4 @@ SELECT * FROM t1; v1 v2 1 2 DROP TABLE t1; -# # End of 11.4 tests -# diff --git a/mysql-test/main/check_constraint.test b/mysql-test/main/check_constraint.test index 42d36dbb5f5..81fe6d162dc 100644 --- a/mysql-test/main/check_constraint.test +++ b/mysql-test/main/check_constraint.test @@ -244,9 +244,65 @@ insert t1 (a) values (1); insert t1 (a) values (-1); drop table t1; ---echo # --echo # End of 10.4 tests + --echo # +--echo # MDEV-36662 CHECK constraint does not repeat in case of error +--echo # +create table t1 (d date check (d > 2020-01-01)); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values ('2023-12-05'); +--error ER_TRUNCATED_WRONG_VALUE +INSERT into t1 values ('2024-12-05'); +create or replace table t1 (d time check (d > "a")); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values ('22:30'); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values ('23:30'); +create or replace table t1 (d datetime check (d > "a")); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values ('2023-12-05'); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values ('2024-12-05'); +create or replace table t1 (d timestamp check (d > "a")); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values ('2023-12-05'); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values ('2024-12-05'); +create or replace table t1 (d year check (d > "a")); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values ('2023'); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values ('2024'); +create or replace table t1 (d int check (d > "a")); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values (0); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values (1); +create or replace table t1 (d real check (d > "a")); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values (0.1); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values (1.1); +create or replace table t1 (d decimal check (d > "a")); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values (0); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values (1); +create or replace table t1 (d bool check (d != "a")); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values (0); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values (1); +drop table t1; +create or replace table t1 (d varchar(30) check (d != 1)); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values ("a"); +--error ER_TRUNCATED_WRONG_VALUE +insert into t1 values ("b"); +drop table t1; + +--echo # End of 10.11 tests --echo # --echo # MDEV-32439 INSERT IGNORE VALUES (one row) errors on constraint @@ -276,6 +332,4 @@ SHOW WARNINGS; SELECT * FROM t1; DROP TABLE t1; ---echo # --echo # End of 11.4 tests ---echo # diff --git a/mysql-test/main/column_compression.result b/mysql-test/main/column_compression.result index 026bcd7d54e..b0e533caf4b 100644 --- a/mysql-test/main/column_compression.result +++ b/mysql-test/main/column_compression.result @@ -3018,4 +3018,60 @@ SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1; f nc,mmmmmmmmmmd DROP TABLE t1; +# +# MDEV-24726 Assertion `0' failed in Field_varstring_compressed::key_cmp +# +# VARCHAR +create table t1 (a varchar(8) compressed) character set utf8mb4; +create algorithm=temptable view v1 as select * from t1; +insert into t1 values ('foo'),('bar'),('foo'); +select * from v1 where a in (select a from t1); +a +foo +foo +bar +drop view v1; +drop table t1; +create table t1 (f1 varchar(8)) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t1 values (''); +create table t2 (f2 varchar(8) compressed) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t2 values ('a'),('b'); +select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1; +f1 + +drop table t1, t2; +# BLOB +create table t1 (a text compressed) character set utf8mb4; +create algorithm=temptable view v1 as select * from t1; +insert into t1 values ('foo'),('bar'),('foo'); +select * from v1 where a in (select a from t1); +a +foo +foo +bar +drop view v1; +drop table t1; +create table t1 (f1 text) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t1 values (''); +create table t2 (f2 text compressed) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t2 values ('a'),('b'); +select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1; +f1 + +drop table t1, t2; +# +# MDEV-16808 Assertion on compressed blob as key field +# +set join_cache_level= 3; +create table t1 (col_blob text) engine=innodb; +create table t2 (col_blob text compressed) engine=innodb; +select * from t1 join t2 using ( col_blob ); +col_blob +drop tables t1, t2; +create table t (a text compressed,b text) engine=innodb; +create table t4 like t; +set session join_cache_level=3; +select * from (select * from t) as t natural join (select * from t) as t1; +a b +drop tables t, t4; # End of 10.5 tests diff --git a/mysql-test/main/column_compression.test b/mysql-test/main/column_compression.test index f2fb302acb2..617df6731b8 100644 --- a/mysql-test/main/column_compression.test +++ b/mysql-test/main/column_compression.test @@ -524,4 +524,57 @@ INSERT INTO t1 VALUES ('c','n'),('d','mmmmmmmmmm'); SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1; DROP TABLE t1; +--echo # +--echo # MDEV-24726 Assertion `0' failed in Field_varstring_compressed::key_cmp +--echo # + +--echo # VARCHAR +create table t1 (a varchar(8) compressed) character set utf8mb4; +create algorithm=temptable view v1 as select * from t1; +insert into t1 values ('foo'),('bar'),('foo'); +select * from v1 where a in (select a from t1); +# cleanup +drop view v1; +drop table t1; + +create table t1 (f1 varchar(8)) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t1 values (''); +create table t2 (f2 varchar(8) compressed) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t2 values ('a'),('b'); +select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1; +# cleanup +drop table t1, t2; + +--echo # BLOB +create table t1 (a text compressed) character set utf8mb4; +create algorithm=temptable view v1 as select * from t1; +insert into t1 values ('foo'),('bar'),('foo'); +select * from v1 where a in (select a from t1); +# cleanup +drop view v1; +drop table t1; + +create table t1 (f1 text) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t1 values (''); +create table t2 (f2 text compressed) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t2 values ('a'),('b'); +select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1; +# cleanup +drop table t1, t2; + +--echo # +--echo # MDEV-16808 Assertion on compressed blob as key field +--echo # +set join_cache_level= 3; +create table t1 (col_blob text) engine=innodb; +create table t2 (col_blob text compressed) engine=innodb; +select * from t1 join t2 using ( col_blob ); +drop tables t1, t2; + +create table t (a text compressed,b text) engine=innodb; +create table t4 like t; +set session join_cache_level=3; +select * from (select * from t) as t natural join (select * from t) as t1; +drop tables t, t4; + --echo # End of 10.5 tests diff --git a/mysql-test/main/column_compression_parts.result b/mysql-test/main/column_compression_parts.result index b062064c22e..8c6dcb79728 100644 --- a/mysql-test/main/column_compression_parts.result +++ b/mysql-test/main/column_compression_parts.result @@ -127,7 +127,6 @@ ALTER TABLE t1 MODIFY COLUMN a VARCHAR(1000); ALTER TABLE t1 ALTER COLUMN a DROP DEFAULT; INSERT INTO t1 VALUES (REPEAT('b',100),11); INSERT INTO t1 VALUES (default,10); -ERROR HY000: Field 'a' doesn't have a default value ALTER TABLE t1 MODIFY COLUMN a VARCHAR(1000) COMPRESSED; SHOW CREATE TABLE t1; Table Create Table @@ -139,6 +138,7 @@ t1 CREATE TABLE `t1` ( (PARTITION `p0` VALUES LESS THAN (100,'sss') ENGINE = MyISAM) SELECT * from t1 ORDER BY id; a id +NULL 10 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 11 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 23 zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 24 diff --git a/mysql-test/main/column_compression_parts.test b/mysql-test/main/column_compression_parts.test index 7d784c064e2..c75817c0e2e 100644 --- a/mysql-test/main/column_compression_parts.test +++ b/mysql-test/main/column_compression_parts.test @@ -72,7 +72,6 @@ ALTER TABLE t1 ALTER COLUMN a DROP DEFAULT; ALTER TABLE t1 MODIFY COLUMN a VARCHAR(1000); ALTER TABLE t1 ALTER COLUMN a DROP DEFAULT; INSERT INTO t1 VALUES (REPEAT('b',100),11); ---error ER_NO_DEFAULT_FOR_FIELD INSERT INTO t1 VALUES (default,10); ALTER TABLE t1 MODIFY COLUMN a VARCHAR(1000) COMPRESSED; diff --git a/mysql-test/main/ctype_binary.result b/mysql-test/main/ctype_binary.result index 44696d63a63..ddb68288386 100644 --- a/mysql-test/main/ctype_binary.result +++ b/mysql-test/main/ctype_binary.result @@ -2621,7 +2621,7 @@ GROUP_CONCAT(CASE WHEN a THEN a ELSE '' END) 1234567 SELECT COALESCE(a,'') FROM t1 GROUP BY 1; Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def COALESCE(a,'') 253 9 7 Y 128 39 63 +def COALESCE(a,'') 253 9 7 N 129 39 63 COALESCE(a,'') 1234567 # All columns must be VARCHAR(9) with the same length: @@ -2640,7 +2640,7 @@ t2 CREATE TABLE `t2` ( `IFNULL(a,'')` varbinary(9) NOT NULL, `IF(a,a,'')` varbinary(9) DEFAULT NULL, `CASE WHEN a THEN a ELSE '' END` varbinary(9) DEFAULT NULL, - `COALESCE(a,'')` varbinary(9) DEFAULT NULL + `COALESCE(a,'')` varbinary(9) NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t2; CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1; diff --git a/mysql-test/main/ctype_cp1251.result b/mysql-test/main/ctype_cp1251.result index 88acf8bf3e1..9de7d0dd922 100644 --- a/mysql-test/main/ctype_cp1251.result +++ b/mysql-test/main/ctype_cp1251.result @@ -3033,7 +3033,7 @@ GROUP_CONCAT(CASE WHEN a THEN a ELSE '' END) 1234567 SELECT COALESCE(a,'') FROM t1 GROUP BY 1; Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def COALESCE(a,'') 253 9 7 Y 0 39 51 +def COALESCE(a,'') 253 9 7 N 1 39 51 COALESCE(a,'') 1234567 # All columns must be VARCHAR(9) with the same length: @@ -3052,7 +3052,7 @@ t2 CREATE TABLE `t2` ( `IFNULL(a,'')` varchar(9) CHARACTER SET cp1251 COLLATE cp1251_general_ci NOT NULL, `IF(a,a,'')` varchar(9) CHARACTER SET cp1251 COLLATE cp1251_general_ci DEFAULT NULL, `CASE WHEN a THEN a ELSE '' END` varchar(9) CHARACTER SET cp1251 COLLATE cp1251_general_ci DEFAULT NULL, - `COALESCE(a,'')` varchar(9) CHARACTER SET cp1251 COLLATE cp1251_general_ci DEFAULT NULL + `COALESCE(a,'')` varchar(9) CHARACTER SET cp1251 COLLATE cp1251_general_ci NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t2; CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1; diff --git a/mysql-test/main/ctype_latin1.result b/mysql-test/main/ctype_latin1.result index 1bdb83ee086..5c584ebec3c 100644 --- a/mysql-test/main/ctype_latin1.result +++ b/mysql-test/main/ctype_latin1.result @@ -3342,7 +3342,7 @@ GROUP_CONCAT(CASE WHEN a THEN a ELSE '' END) 1234567 SELECT COALESCE(a,'') FROM t1 GROUP BY 1; Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def COALESCE(a,'') 253 9 7 Y 0 39 8 +def COALESCE(a,'') 253 9 7 N 1 39 8 COALESCE(a,'') 1234567 # All columns must be VARCHAR(9) with the same length: @@ -3361,7 +3361,7 @@ t2 CREATE TABLE `t2` ( `IFNULL(a,'')` varchar(9) CHARACTER SET latin1 COLLATE latin1_swedish_ci NOT NULL, `IF(a,a,'')` varchar(9) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL, `CASE WHEN a THEN a ELSE '' END` varchar(9) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL, - `COALESCE(a,'')` varchar(9) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL + `COALESCE(a,'')` varchar(9) CHARACTER SET latin1 COLLATE latin1_swedish_ci NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t2; CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1; diff --git a/mysql-test/main/ctype_ucs.result b/mysql-test/main/ctype_ucs.result index 9728d3ed08d..84ee60e19bf 100644 --- a/mysql-test/main/ctype_ucs.result +++ b/mysql-test/main/ctype_ucs.result @@ -4224,7 +4224,7 @@ GROUP_CONCAT(CASE WHEN a THEN a ELSE '' END) 1234567 SELECT COALESCE(a,'') FROM t1 GROUP BY 1; Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def COALESCE(a,'') 253 9 7 Y 0 39 8 +def COALESCE(a,'') 253 9 7 N 1 39 8 COALESCE(a,'') 1234567 # All columns must be VARCHAR(9) with the same length: @@ -4243,7 +4243,7 @@ t2 CREATE TABLE `t2` ( `IFNULL(a,'')` varchar(9) CHARACTER SET ucs2 COLLATE ucs2_general_ci NOT NULL, `IF(a,a,'')` varchar(9) CHARACTER SET ucs2 COLLATE ucs2_general_ci DEFAULT NULL, `CASE WHEN a THEN a ELSE '' END` varchar(9) CHARACTER SET ucs2 COLLATE ucs2_general_ci DEFAULT NULL, - `COALESCE(a,'')` varchar(9) CHARACTER SET ucs2 COLLATE ucs2_general_ci DEFAULT NULL + `COALESCE(a,'')` varchar(9) CHARACTER SET ucs2 COLLATE ucs2_general_ci NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t2; CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1; diff --git a/mysql-test/main/ctype_utf8.result b/mysql-test/main/ctype_utf8.result index 3d75a013486..1182028c925 100644 --- a/mysql-test/main/ctype_utf8.result +++ b/mysql-test/main/ctype_utf8.result @@ -4970,7 +4970,7 @@ GROUP_CONCAT(CASE WHEN a THEN a ELSE '' END) 1234567 SELECT COALESCE(a,'') FROM t1 GROUP BY 1; Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def COALESCE(a,'') 253 27 7 Y 0 39 33 +def COALESCE(a,'') 253 27 7 N 1 39 33 COALESCE(a,'') 1234567 # All columns must be VARCHAR(9) with the same length: @@ -4989,7 +4989,7 @@ t2 CREATE TABLE `t2` ( `IFNULL(a,'')` varchar(9) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL, `IF(a,a,'')` varchar(9) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL, `CASE WHEN a THEN a ELSE '' END` varchar(9) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL, - `COALESCE(a,'')` varchar(9) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL + `COALESCE(a,'')` varchar(9) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t2; CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1; diff --git a/mysql-test/main/debug_sync.result b/mysql-test/main/debug_sync.result index 4c1711a6d6b..6bc2a5cbe24 100644 --- a/mysql-test/main/debug_sync.result +++ b/mysql-test/main/debug_sync.result @@ -320,3 +320,30 @@ SHOW VARIABLES LIKE 'DEBUG_SYNC'; Variable_name Value debug_sync ON - current signals: 's2,s7,s1,s5' SET DEBUG_SYNC= 'RESET'; +# +# MDEV-30364 Assertion MDL_EXCLUSIVE on DISCARD TABLESPACE in LOCK TABLE mode +# +create table t (c int) engine=innodb; +connect con1,localhost,root; +set debug_sync='get_schema_column WAIT_FOR go'; +select column_name from information_schema.columns +where table_schema='test' and table_name='t'; +connection default; +lock table t write; +alter table t discard tablespace; +connect con2,localhost,root; +disconnect con2; +connection default; +ERROR 70100: Query execution was interrupted +set debug_sync='now SIGNAL go'; +connection con1; +column_name +c +disconnect con1; +connection default; +unlock tables; +drop table t; +set debug_sync= 'reset'; +# +# End of 10.6 tests +# diff --git a/mysql-test/main/debug_sync.test b/mysql-test/main/debug_sync.test index 0c5bee3e220..1c8638832c1 100644 --- a/mysql-test/main/debug_sync.test +++ b/mysql-test/main/debug_sync.test @@ -18,6 +18,7 @@ # We need the Debug Sync Facility. # --source include/have_debug_sync.inc +--source include/have_innodb.inc # # We are checking privileges, which the embedded server cannot do. @@ -448,3 +449,42 @@ SHOW VARIABLES LIKE 'DEBUG_SYNC'; # SET DEBUG_SYNC= 'RESET'; +--echo # +--echo # MDEV-30364 Assertion MDL_EXCLUSIVE on DISCARD TABLESPACE in LOCK TABLE mode +--echo # +create table t (c int) engine=innodb; +--connect con1,localhost,root +set debug_sync='get_schema_column WAIT_FOR go'; +send select column_name from information_schema.columns +where table_schema='test' and table_name='t'; + +--connection default +let $wait_condition=select 1 from information_schema.processlist where state like 'debug sync point%'; +--source include/wait_condition.inc +let $connid=`select connection_id()`; +lock table t write; +send alter table t discard tablespace; + +--connect con2,localhost,root +--disable_query_log +--eval kill query $connid +--enable_query_log +--disconnect con2 + +--connection default +--error ER_QUERY_INTERRUPTED +reap; +set debug_sync='now SIGNAL go'; + +--connection con1 +reap; +--disconnect con1 + +--connection default +unlock tables; +drop table t; +set debug_sync= 'reset'; + +--echo # +--echo # End of 10.6 tests +--echo # diff --git a/mysql-test/main/default.result b/mysql-test/main/default.result index 7234001fcf0..885b97d9151 100644 --- a/mysql-test/main/default.result +++ b/mysql-test/main/default.result @@ -3433,10 +3433,8 @@ DEFAULT(a) CASE a WHEN 0 THEN 1 ELSE 2 END NULL 2 DROP TABLE t; DROP VIEW v; -# # End of 10.2 test # -# # MDEV-22703 DEFAULT() on a BLOB column can overwrite the default # record, which can cause crashes when accessing already released # memory. @@ -3451,10 +3449,8 @@ length(DEFAULT(h)) 25 INSERT INTO t1 () VALUES (); drop table t1; -# # End of 10.3 test # -# # MDEV-26423: MariaDB server crash in Create_tmp_table::finalize # CREATE TABLE t1 (pk text DEFAULT length(uuid())); @@ -3484,7 +3480,15 @@ column_name column_default has_default is_nullable a NULL 1 YES drop view v1; drop table t1; -# # End of 10.4 test # +# MDEV-37320 ASAN errors in Field::is_null / Item_param::assign_default +# +create table t1 (f01 timestamp, f03 timestamp); +insert into t1 () values (); +create trigger tr before insert on t1 for each row set @a=1; +prepare stmt from "update t1 set f03 = ?"; +execute stmt using default; +drop table t1; +# End of 10.6 test ALTER DATABASE test CHARACTER SET utf8mb4 COLLATE utf8mb4_uca1400_ai_ci; diff --git a/mysql-test/main/default.test b/mysql-test/main/default.test index af770500145..d806c09509e 100644 --- a/mysql-test/main/default.test +++ b/mysql-test/main/default.test @@ -2140,9 +2140,8 @@ CREATE ALGORITHM=TEMPTABLE VIEW v AS SELECT * FROM t; SELECT DISTINCT DEFAULT(a), CASE a WHEN 0 THEN 1 ELSE 2 END FROM v GROUP BY a WITH ROLLUP; DROP TABLE t; DROP VIEW v; ---echo # + --echo # End of 10.2 test ---echo # --echo # --echo # MDEV-22703 DEFAULT() on a BLOB column can overwrite the default @@ -2160,9 +2159,7 @@ SELECT length(DEFAULT(h)) FROM t1; INSERT INTO t1 () VALUES (); drop table t1; ---echo # --echo # End of 10.3 test ---echo # --echo # --echo # MDEV-26423: MariaDB server crash in Create_tmp_table::finalize @@ -2186,9 +2183,18 @@ select column_name, column_default, column_default is not null as 'has_default', drop view v1; drop table t1; ---echo # --echo # End of 10.4 test ---echo # +--echo # +--echo # MDEV-37320 ASAN errors in Field::is_null / Item_param::assign_default +--echo # +create table t1 (f01 timestamp, f03 timestamp); +insert into t1 () values (); +create trigger tr before insert on t1 for each row set @a=1; +prepare stmt from "update t1 set f03 = ?"; +execute stmt using default; +drop table t1; + +--echo # End of 10.6 test --source include/test_db_charset_restore.inc diff --git a/mysql-test/main/derived.test b/mysql-test/main/derived.test index b1aaf561875..1b4687b094a 100644 --- a/mysql-test/main/derived.test +++ b/mysql-test/main/derived.test @@ -1204,6 +1204,7 @@ drop table t1,t2,t3; --echo # Tests from the bug report +--disable_view_protocol CREATE TABLE t (pk INT PRIMARY KEY); INSERT INTO t VALUES (1), (2), (3); @@ -1258,6 +1259,7 @@ SHOW CREATE VIEW v1; DROP VIEW v_t, v1; DROP TABLE t; +--enable_view_protocol --echo # Tests on views created using SELECT statements that contain derived columns diff --git a/mysql-test/main/derived_cond_pushdown.result b/mysql-test/main/derived_cond_pushdown.result index 61139f76356..1f0d1514cb9 100644 --- a/mysql-test/main/derived_cond_pushdown.result +++ b/mysql-test/main/derived_cond_pushdown.result @@ -22727,8 +22727,8 @@ where dt.a=t1.a and t3.a < 3 from t1 limit 5; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 1000 +2 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 2 Using where 2 DEPENDENT SUBQUERY ref key0 key0 5 test.t1.a 1 -2 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 LATERAL DERIVED t2 ref a a 5 test.t1.a 10 select a, diff --git a/mysql-test/main/derived_split_innodb.result b/mysql-test/main/derived_split_innodb.result index 7bb79b3c7b8..809b15859bb 100644 --- a/mysql-test/main/derived_split_innodb.result +++ b/mysql-test/main/derived_split_innodb.result @@ -969,4 +969,311 @@ cnt 6 DROP TABLE t1; # End of 10.4 tests +# +# MDEV-30711: Crash in add_keyuses_for_splitting() when joining with a derived table +# +create table t1 (a int); +insert into t1 values (1),(2); +create table t2 (a int, index(a)); +insert into t2 values (1),(3); +create view v1 as +select +nullif(tbl2.COL1,123) as COL10 +from +t1 left join +(select 1 as COL1, a from t2) tbl2 on t1.a=tbl2.a; +create table t10 (grp_id int, a int, index(grp_id)); +insert into t10 select A.seq, B.seq from seq_1_to_100 A, seq_1_to_100 B; +analyze table t10; +Table Op Msg_type Msg_text +test.t10 analyze status Engine-independent statistics collected +test.t10 analyze status Table is already up to date +explain +select * from +v1, +(select grp_id, count(*) from t10 group by grp_id) T +where +T.grp_id=v1.COL10; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 +1 PRIMARY t2 ref a a 5 test.t1.a 1 Using where; Using index +1 PRIMARY ref key0 key0 5 func 10 Using where +2 DERIVED t10 index grp_id grp_id 5 NULL 10000 Using index; Using temporary; Using filesort +drop table t1,t2, t10; +drop view v1; +# End of 10.11 tests +# +# MDEV-37057 Wrong result with LATERAL DERIVED +# +CREATE TABLE t1 ( +a int NOT NULL, +b int default null, +amount decimal DEFAULT NULL, +KEY t1_IDX (a,b) USING BTREE +) ENGINE=INNODB; +CREATE TABLE t2 ( +a int NOT NULL, +b int default null, +name varchar(50) DEFAULT NULL, +KEY t2_IDX (a,b) USING BTREE +) ENGINE=INNODB; +INSERT INTO t1 VALUES +(1, NULL, 10.0000), (2, 2, 20.0000), (3, 3, 30.0000), (4, 4, 40.0000), +(5, 5, NULL), (6, 6, NULL), (7, 7, 70.0000), (8, 8, 80.0000); +INSERT INTO t2 VALUES +(1, NULL, 'A'), (2,2, 'B'), (3,3, 'C'), (4,4, 'D'), (5,5, NULL), (6,6, NULL), +(7,7, 'E'), (8,8, 'F'), (9,9, 'G'), (10,10,'H'), (11,11, NULL), (12,12, NULL); +# Must use Split-Materialized: +explain $query; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 range t2_IDX t2_IDX 4 NULL 1 Using index condition +1 PRIMARY ref key0 key0 10 test.t2.a,test.t2.b 1 Using where +2 LATERAL DERIVED t1 ref t1_IDX t1_IDX 9 test.t2.a,test.t2.b 1 Using index condition +$query; +a b name total_amt +1 NULL A 10 +# Compare with correct result: +set statement optimizer_switch='split_materialized=off' for $query; +a b name total_amt +1 NULL A 10 +DROP TABLE t1,t2; +# +# MDEV-37230 Incorrect handling of NULL join conditions when using +# split-materialized +# +create table t1 +( +a int not null, +b int, +c int, +d int, +amount decimal, +key t1_ix1 (a,b) +) engine=innodb; +insert into t1 values (0, NULL, 0, NULL, 10.0000), (1, 1, 1, 1, 10.0000), +(2, 2, 2, 2, 20.0000), (3, 3, 3, 3, 30.0000), (4, 4, 4, 4, 40.0000), +(5, 5, 5, 5, NULL), (6, 6, 6, 6, NULL), (7, 7, 7, 7, 70.0000), +(8, 8, 8, 8, 80.0000); +create table t2 +( +a int NOT NULL, +b int, +name varchar(50), +key t2_ix1 (a,b) +) engine=innodb; +insert into t2 values (0, NULL, 'a'), (1, NULL, 'A'), (2, 2, 'B'), (3,3, 'C'), +(4,4, 'D'), (5,5, NULL), (6,6, NULL), (7,7, 'E'), (8,8, 'F'), (9,9, 'G'), +(10,10,'H'), (11,11, NULL), (12,12, NULL); +create table t3 +( +a int not null, +b int, +description varchar(50), +key t3_ix1 (a,b) +); +insert into t3 values (1, 1, 'bar'),(2,2,'buz'),(0,NULL, 'gold'); +insert into t3 select seq, seq, 'junk' from seq_3_to_13; +analyze table t1, t2, t3; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +test.t3 analyze status Engine-independent statistics collected +test.t3 analyze status OK +set optimizer_switch='default'; +set statement optimizer_switch='split_materialized=on' for explain format=json select * from t1 +join t2 on t1.a = t2.a and t1.b <=> t2.b +join +( +select a, b, description from t3 group by a, b +) dt on dt.a = t1.a and dt.b <=> t1.b and dt.b <=> t2.b +where dt.a < 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "cost": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["t1_ix1"], + "key": "t1_ix1", + "key_length": "4", + "used_key_parts": ["a"], + "loops": 1, + "rows": 1, + "cost": "REPLACED", + "filtered": 100, + "index_condition": "t1.a < 1" + } + }, + { + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["t2_ix1"], + "key": "t2_ix1", + "key_length": "9", + "used_key_parts": ["a", "b"], + "ref": ["test.t1.a", "test.t1.b"], + "loops": 1, + "rows": 1, + "cost": "REPLACED", + "filtered": 100, + "index_condition": "t1.b <=> t2.b" + } + }, + { + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "9", + "used_key_parts": ["a", "b"], + "ref": ["test.t1.a", "test.t1.b"], + "loops": 1, + "rows": 1, + "cost": "REPLACED", + "filtered": 100, + "attached_condition": "dt.b <=> t1.b and dt.b <=> t2.b", + "materialized": { + "lateral": 1, + "query_block": { + "select_id": 3, + "cost": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t3", + "access_type": "ref", + "possible_keys": ["t3_ix1"], + "key": "t3_ix1", + "key_length": "9", + "used_key_parts": ["a", "b"], + "ref": ["test.t1.a", "test.t1.b"], + "loops": 1, + "rows": 1, + "cost": "REPLACED", + "filtered": 100, + "index_condition": "t3.a < 1 and t3.b <=> t1.b and t3.b <=> t2.b and t3.a = t2.a" + } + } + ] + } + } + } + } + ] + } +} +set statement optimizer_switch='split_materialized=on' for select * from t1 +join t2 on t1.a = t2.a and t1.b <=> t2.b +join +( +select a, b, description from t3 group by a, b +) dt on dt.a = t1.a and dt.b <=> t1.b and dt.b <=> t2.b +where dt.a < 1; +a b c d amount a b name a b description +0 NULL 0 NULL 10 0 NULL a 0 NULL gold +set statement optimizer_switch='split_materialized=off' for explain format=json select * from t1 +join t2 on t1.a = t2.a and t1.b <=> t2.b +join +( +select a, b, description from t3 group by a, b +) dt on dt.a = t1.a and dt.b <=> t1.b and dt.b <=> t2.b +where dt.a < 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "cost": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["t1_ix1"], + "key": "t1_ix1", + "key_length": "4", + "used_key_parts": ["a"], + "loops": 1, + "rows": 1, + "cost": "REPLACED", + "filtered": 100, + "index_condition": "t1.a < 1" + } + }, + { + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["t2_ix1"], + "key": "t2_ix1", + "key_length": "9", + "used_key_parts": ["a", "b"], + "ref": ["test.t1.a", "test.t1.b"], + "loops": 1, + "rows": 1, + "cost": "REPLACED", + "filtered": 100, + "index_condition": "t1.b <=> t2.b" + } + }, + { + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "9", + "used_key_parts": ["a", "b"], + "ref": ["test.t1.a", "test.t1.b"], + "loops": 1, + "rows": 1, + "cost": "REPLACED", + "filtered": 100, + "attached_condition": "dt.b <=> t1.b and dt.b <=> t2.b", + "materialized": { + "query_block": { + "select_id": 3, + "cost": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t3", + "access_type": "range", + "possible_keys": ["t3_ix1"], + "key": "t3_ix1", + "key_length": "4", + "used_key_parts": ["a"], + "loops": 1, + "rows": 1, + "cost": "REPLACED", + "filtered": 100, + "index_condition": "t3.a < 1" + } + } + ] + } + } + } + } + ] + } +} +set statement optimizer_switch='split_materialized=off' for select * from t1 +join t2 on t1.a = t2.a and t1.b <=> t2.b +join +( +select a, b, description from t3 group by a, b +) dt on dt.a = t1.a and dt.b <=> t1.b and dt.b <=> t2.b +where dt.a < 1; +a b c d amount a b name a b description +0 NULL 0 NULL 10 0 NULL a 0 NULL gold +drop table t1, t2, t3; +# End of 11.4 tests SET GLOBAL innodb_stats_persistent=@save_innodb_stats_persistent; diff --git a/mysql-test/main/derived_split_innodb.test b/mysql-test/main/derived_split_innodb.test index b4d37290d14..fce8a40b4a4 100644 --- a/mysql-test/main/derived_split_innodb.test +++ b/mysql-test/main/derived_split_innodb.test @@ -561,4 +561,141 @@ DROP TABLE t1; --echo # End of 10.4 tests +--echo # +--echo # MDEV-30711: Crash in add_keyuses_for_splitting() when joining with a derived table +--echo # +create table t1 (a int); +insert into t1 values (1),(2); +create table t2 (a int, index(a)); +insert into t2 values (1),(3); + +create view v1 as +select + nullif(tbl2.COL1,123) as COL10 +from + t1 left join + (select 1 as COL1, a from t2) tbl2 on t1.a=tbl2.a; + +create table t10 (grp_id int, a int, index(grp_id)); +insert into t10 select A.seq, B.seq from seq_1_to_100 A, seq_1_to_100 B; +analyze table t10; + +explain +select * from + v1, + (select grp_id, count(*) from t10 group by grp_id) T +where + T.grp_id=v1.COL10; + +drop table t1,t2, t10; +drop view v1; + +--echo # End of 10.11 tests + +--echo # +--echo # MDEV-37057 Wrong result with LATERAL DERIVED +--echo # + +CREATE TABLE t1 ( + a int NOT NULL, + b int default null, + amount decimal DEFAULT NULL, + KEY t1_IDX (a,b) USING BTREE +) ENGINE=INNODB; + +CREATE TABLE t2 ( + a int NOT NULL, + b int default null, + name varchar(50) DEFAULT NULL, + KEY t2_IDX (a,b) USING BTREE +) ENGINE=INNODB; + +INSERT INTO t1 VALUES +(1, NULL, 10.0000), (2, 2, 20.0000), (3, 3, 30.0000), (4, 4, 40.0000), +(5, 5, NULL), (6, 6, NULL), (7, 7, 70.0000), (8, 8, 80.0000); + +INSERT INTO t2 VALUES +(1, NULL, 'A'), (2,2, 'B'), (3,3, 'C'), (4,4, 'D'), (5,5, NULL), (6,6, NULL), +(7,7, 'E'), (8,8, 'F'), (9,9, 'G'), (10,10,'H'), (11,11, NULL), (12,12, NULL); + +let $query= +SELECT t2.a,t2.b,t2.name,t.total_amt FROM t2 +LEFT JOIN +( + SELECT a, b, sum(amount) total_amt FROM t1 GROUP BY a, b +) AS t ON t2.a=t.a and t2.b<=>t.b +WHERE t2.a < 2; + +--echo # Must use Split-Materialized: +evalp explain $query; +evalp $query; +--echo # Compare with correct result: +evalp set statement optimizer_switch='split_materialized=off' for $query; + +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-37230 Incorrect handling of NULL join conditions when using +--echo # split-materialized +--echo # + +create table t1 +( + a int not null, + b int, + c int, + d int, + amount decimal, + key t1_ix1 (a,b) +) engine=innodb; + +insert into t1 values (0, NULL, 0, NULL, 10.0000), (1, 1, 1, 1, 10.0000), +(2, 2, 2, 2, 20.0000), (3, 3, 3, 3, 30.0000), (4, 4, 4, 4, 40.0000), +(5, 5, 5, 5, NULL), (6, 6, 6, 6, NULL), (7, 7, 7, 7, 70.0000), +(8, 8, 8, 8, 80.0000); + +create table t2 +( + a int NOT NULL, + b int, + name varchar(50), + key t2_ix1 (a,b) +) engine=innodb; + +insert into t2 values (0, NULL, 'a'), (1, NULL, 'A'), (2, 2, 'B'), (3,3, 'C'), +(4,4, 'D'), (5,5, NULL), (6,6, NULL), (7,7, 'E'), (8,8, 'F'), (9,9, 'G'), +(10,10,'H'), (11,11, NULL), (12,12, NULL); + +create table t3 +( + a int not null, + b int, + description varchar(50), + key t3_ix1 (a,b) +); +insert into t3 values (1, 1, 'bar'),(2,2,'buz'),(0,NULL, 'gold'); +insert into t3 select seq, seq, 'junk' from seq_3_to_13; + +let $q= +select * from t1 +join t2 on t1.a = t2.a and t1.b <=> t2.b +join +( + select a, b, description from t3 group by a, b +) dt on dt.a = t1.a and dt.b <=> t1.b and dt.b <=> t2.b +where dt.a < 1; + +analyze table t1, t2, t3; +set optimizer_switch='default'; +--source include/analyze-format.inc +eval set statement optimizer_switch='split_materialized=on' for explain format=json $q; +eval set statement optimizer_switch='split_materialized=on' for $q; +--source include/analyze-format.inc +eval set statement optimizer_switch='split_materialized=off' for explain format=json $q; +eval set statement optimizer_switch='split_materialized=off' for $q; + +drop table t1, t2, t3; + +--echo # End of 11.4 tests + SET GLOBAL innodb_stats_persistent=@save_innodb_stats_persistent; diff --git a/mysql-test/main/desc_index_min_max.result b/mysql-test/main/desc_index_min_max.result index 5698ddbe3f6..3b6e59cc7fd 100644 --- a/mysql-test/main/desc_index_min_max.result +++ b/mysql-test/main/desc_index_min_max.result @@ -113,13 +113,13 @@ min(a) 2 explain select max(200 - a) from t1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL a 5 NULL 100 Using index +1 SIMPLE t1 index NULL a 5 NULL # Using index select max(200 - a) from t1; max(200 - a) 198 explain select min(200 - a) from t1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL a 5 NULL 100 Using index +1 SIMPLE t1 index NULL a 5 NULL # Using index select min(200 - a) from t1; min(200 - a) 0 @@ -174,6 +174,4 @@ SELECT MAX(a) FROM t1 WHERE a <= 0.6789; MAX(a) 0.6789 drop table t1; -# # end of test 11.4 -# diff --git a/mysql-test/main/desc_index_min_max.test b/mysql-test/main/desc_index_min_max.test index fc786fc45d5..99128e1584e 100644 --- a/mysql-test/main/desc_index_min_max.test +++ b/mysql-test/main/desc_index_min_max.test @@ -102,13 +102,13 @@ eval $query; # double reversion let $query= select max(200 - a) from t1; -replace_column 9 100; +replace_column 9 #; eval explain $query; eval $query; let $query= select min(200 - a) from t1; -replace_column 9 100; +replace_column 9 #; eval explain $query; eval $query; @@ -162,6 +162,5 @@ eval $query; # Cleanup drop table t1; ---echo # + --echo # end of test 11.4 ---echo # diff --git a/mysql-test/main/func_hybrid_type.result b/mysql-test/main/func_hybrid_type.result index dd9774cd24b..cbeec3cecea 100644 --- a/mysql-test/main/func_hybrid_type.result +++ b/mysql-test/main/func_hybrid_type.result @@ -3430,7 +3430,7 @@ CASE WHEN TRUE THEN COALESCE(NULL) ELSE 4 END NULL SELECT COALESCE(COALESCE(NULL), 1.1) AS c0, IF(0, COALESCE(NULL), 1.1) AS c1; Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def c0 246 4 3 Y 32896 1 63 +def c0 246 4 3 N 32897 1 63 def c1 246 4 3 Y 32896 1 63 c0 c1 1.1 1.1 @@ -3795,8 +3795,8 @@ FROM t1; SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `f0` decimal(1,0) DEFAULT NULL, - `f1` decimal(1,0) DEFAULT NULL + `f0` decimal(1,0) NOT NULL, + `f1` decimal(1,0) NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1, t2; SET sql_mode=DEFAULT; @@ -4366,3 +4366,146 @@ LEAST( CAST( 0 AS CHAR ), OLD_PASSWORD( 1 ) ) # # End of 10.6 tests # +# +# MDEV-36581: COALESCE() returns nullable column while IFNULL() does not +# +CREATE OR REPLACE VIEW test_coalesce_vs_ifnull AS +SELECT +COALESCE(operation_date, '1970-01-01 00:00:00') AS coalesced_date, +IFNULL(operation_date, '1970-01-01 00:00:00') AS ifnull_date +FROM ( +SELECT NULL AS operation_date +) AS t; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE +FROM INFORMATION_SCHEMA.COLUMNS +WHERE TABLE_NAME = 'test_coalesce_vs_ifnull'; +COLUMN_NAME IS_NULLABLE COLUMN_TYPE +coalesced_date NO varchar(19) +ifnull_date NO varchar(19) +DROP VIEW test_coalesce_vs_ifnull; +CREATE VIEW v2 as SELECT COALESCE(c, NULL) AS c_col, IFNULL(c, 10) AS i_col FROM (SELECT NULL AS c) AS t; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v2'; +COLUMN_NAME IS_NULLABLE COLUMN_TYPE +c_col YES binary(0) +i_col NO varchar(2) +DROP VIEW v2; +CREATE VIEW v3 as SELECT COALESCE(c, 10, NULL) AS c_col, IFNULL(c, 10) AS i_col FROM (SELECT NULL AS c) AS t; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v3'; +COLUMN_NAME IS_NULLABLE COLUMN_TYPE +c_col NO varchar(2) +i_col NO varchar(2) +DROP VIEW v3; +CREATE VIEW v4 AS SELECT COALESCE(c, NULL, NULL) as c_col FROM (SELECT NULL AS c) AS t; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v4'; +COLUMN_NAME IS_NULLABLE COLUMN_TYPE +c_col YES binary(0) +DROP VIEW v4; +CREATE VIEW v5 AS SELECT COALESCE(c, COALESCE(NULL, 10), NULL) as c_col FROM (SELECT NULL AS c) AS t; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v5'; +COLUMN_NAME IS_NULLABLE COLUMN_TYPE +c_col NO varchar(2) +DROP VIEW v5; +CREATE TABLE t (c1 INT, c2 DOUBLE, c3 VARCHAR(5), c4 DATE); +INSERT INTO t values (1, 2.3, 'four', '2025-05-06'); +SELECT COALESCE(c1, 10) AS coalesced_c1, IFNULL(c1, 10) AS ifnull_c1 FROM t; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def coalesced_c1 3 11 1 N 32897 0 63 +def ifnull_c1 3 11 1 N 32897 0 63 +coalesced_c1 ifnull_c1 +1 1 +SELECT COALESCE(c1, NULL) AS coalesced_c1, IFNULL(c1, NULL) AS ifnull_c1 FROM t; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def coalesced_c1 3 11 1 Y 32896 0 63 +def ifnull_c1 3 11 1 Y 32896 0 63 +coalesced_c1 ifnull_c1 +1 1 +SELECT COALESCE(c2, NULL) AS coalesced_c2, IFNULL(c2, NULL) as ifnull_c2 FROM t; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def coalesced_c2 5 22 3 Y 32896 31 63 +def ifnull_c2 5 22 3 Y 32896 31 63 +coalesced_c2 ifnull_c2 +2.3 2.3 +SELECT COALESCE(c3, 'two') as coalesced_c1, COALESCE(c4, '2025-07-08') AS coalesced_date FROM t; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def coalesced_c1 253 5 4 N 1 39 8 +def coalesced_date 253 10 10 N 1 39 8 +coalesced_c1 coalesced_date +four 2025-05-06 +INSERT INTO t values (2, 3.4, NULL, NULL); +SELECT COALESCE(c3, 'two') AS coalesced_c3, IFNULL(c3, 'three') AS ifnull_c3 FROM t WHERE c1 = 2; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def coalesced_c3 253 5 3 N 1 39 8 +def ifnull_c3 253 5 5 N 1 39 8 +coalesced_c3 ifnull_c3 +two three +SELECT COALESCE(c3, 'four', NULL) AS coalesced_c3, COALESCE(COALESCE(c3, NULL), NULL) AS coalesced_c3_null FROM t WHERE c1 = 2; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def coalesced_c3 253 5 4 N 1 39 8 +def coalesced_c3_null 253 5 0 Y 0 39 8 +coalesced_c3 coalesced_c3_null +four NULL +SELECT COALESCE(c4, COALESCE('2025-05-06', NULL)) AS coalesced_date FROM t WHERE c1 = 2; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def coalesced_date 253 10 10 N 1 39 8 +coalesced_date +2025-05-06 +DROP TABLE t; +SET sql_mode=''; +CREATE TABLE t1 (a UUID, b VARCHAR(32) NOT NULL); +INSERT INTO t1 VALUES (NULL, '1'); +CREATE TABLE t2 AS SELECT COALESCE(a, b), IFNULL(a, b) FROM t1; +Warnings: +Warning 1292 Incorrect uuid value: '1' +Warning 1292 Incorrect uuid value: '1' +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `COALESCE(a, b)` uuid DEFAULT NULL, + `IFNULL(a, b)` uuid DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't2'; +COLUMN_NAME IS_NULLABLE COLUMN_TYPE +COALESCE(a, b) YES uuid +IFNULL(a, b) YES uuid +DROP TABLE t1, t2; +CREATE TABLE t (c1 INET6, c2 INET4); +INSERT INTO t VALUES ('::', '0.0.0.0'), (NULL, NULL); +CREATE TABLE t1 AS +SELECT +COALESCE(c1, '::1') AS inet6_c1_c, IFNULL(c1, '::1') AS inet6_c1_i, +COALESCE(c2, '0.0.0.0') AS inet4_c2_c, IFNULL(c2, '0.0.0.0') AS inet4_c2_i +FROM t; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; +COLUMN_NAME IS_NULLABLE COLUMN_TYPE +inet6_c1_c NO inet6 +inet6_c1_i NO inet6 +inet4_c2_c NO inet4 +inet4_c2_i NO inet4 +CREATE TABLE t2 AS +SELECT +COALESCE(c1, 'ipv6') AS inet6_c1_c, IFNULL(c1, 'ipv6') AS inet6_c1_i, +COALESCE(c2, 'ipv4') AS inet4_c2_c, IFNULL(c2, 'ipv4') AS inet4_c2_i +FROM t; +Warnings: +Warning 1292 Incorrect inet6 value: 'ipv6' +Warning 1292 Incorrect inet6 value: 'ipv6' +Warning 1292 Incorrect inet4 value: 'ipv4' +Warning 1292 Incorrect inet4 value: 'ipv4' +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't2'; +COLUMN_NAME IS_NULLABLE COLUMN_TYPE +inet6_c1_c YES inet6 +inet6_c1_i YES inet6 +inet4_c2_c YES inet4 +inet4_c2_i YES inet4 +CREATE TABLE t3 AS SELECT COALESCE(c1, '::1') AS inet4_c1_c, IFNULL(c1, '::1') as inet6_c1_i FROM t WHERE c1 IS NULL; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't3'; +COLUMN_NAME IS_NULLABLE COLUMN_TYPE +inet4_c1_c NO inet6 +inet6_c1_i NO inet6 +CREATE TABLE t4 AS SELECT COALESCE(c1, 'foo') AS inet4_c1_c, IFNULL(c1, 'bar') as inet6_c1_i FROM t WHERE c1 IS NOT NULL; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't3'; +COLUMN_NAME IS_NULLABLE COLUMN_TYPE +inet4_c1_c NO inet6 +inet6_c1_i NO inet6 +DROP TABLE t, t1, t2, t3, t4; +# End of 10.11 tests diff --git a/mysql-test/main/func_hybrid_type.test b/mysql-test/main/func_hybrid_type.test index e1e347b115f..e334597b7fd 100644 --- a/mysql-test/main/func_hybrid_type.test +++ b/mysql-test/main/func_hybrid_type.test @@ -1164,3 +1164,110 @@ SELECT LEAST( CAST( 0 AS CHAR ), OLD_PASSWORD( 1 ) ); --echo # --echo # End of 10.6 tests --echo # + +--echo # +--echo # MDEV-36581: COALESCE() returns nullable column while IFNULL() does not +--echo # + +CREATE OR REPLACE VIEW test_coalesce_vs_ifnull AS +SELECT + COALESCE(operation_date, '1970-01-01 00:00:00') AS coalesced_date, + IFNULL(operation_date, '1970-01-01 00:00:00') AS ifnull_date +FROM ( + SELECT NULL AS operation_date +) AS t; + +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE +FROM INFORMATION_SCHEMA.COLUMNS +WHERE TABLE_NAME = 'test_coalesce_vs_ifnull'; + +DROP VIEW test_coalesce_vs_ifnull; + +# Tests on views + +CREATE VIEW v2 as SELECT COALESCE(c, NULL) AS c_col, IFNULL(c, 10) AS i_col FROM (SELECT NULL AS c) AS t; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v2'; + +DROP VIEW v2; + +CREATE VIEW v3 as SELECT COALESCE(c, 10, NULL) AS c_col, IFNULL(c, 10) AS i_col FROM (SELECT NULL AS c) AS t; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v3'; + +DROP VIEW v3; + +CREATE VIEW v4 AS SELECT COALESCE(c, NULL, NULL) as c_col FROM (SELECT NULL AS c) AS t; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v4'; + +DROP VIEW v4; + +CREATE VIEW v5 AS SELECT COALESCE(c, COALESCE(NULL, 10), NULL) as c_col FROM (SELECT NULL AS c) AS t; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v5'; + +DROP VIEW v5; + +# Tests on tables + +CREATE TABLE t (c1 INT, c2 DOUBLE, c3 VARCHAR(5), c4 DATE); +INSERT INTO t values (1, 2.3, 'four', '2025-05-06'); + +--enable_metadata +--disable_ps_protocol +--disable_view_protocol +SELECT COALESCE(c1, 10) AS coalesced_c1, IFNULL(c1, 10) AS ifnull_c1 FROM t; +SELECT COALESCE(c1, NULL) AS coalesced_c1, IFNULL(c1, NULL) AS ifnull_c1 FROM t; +SELECT COALESCE(c2, NULL) AS coalesced_c2, IFNULL(c2, NULL) as ifnull_c2 FROM t; +SELECT COALESCE(c3, 'two') as coalesced_c1, COALESCE(c4, '2025-07-08') AS coalesced_date FROM t; +--enable_view_protocol +--enable_ps_protocol +--disable_metadata + +INSERT INTO t values (2, 3.4, NULL, NULL); +--enable_metadata +--disable_ps_protocol +--disable_view_protocol +SELECT COALESCE(c3, 'two') AS coalesced_c3, IFNULL(c3, 'three') AS ifnull_c3 FROM t WHERE c1 = 2; +SELECT COALESCE(c3, 'four', NULL) AS coalesced_c3, COALESCE(COALESCE(c3, NULL), NULL) AS coalesced_c3_null FROM t WHERE c1 = 2; +SELECT COALESCE(c4, COALESCE('2025-05-06', NULL)) AS coalesced_date FROM t WHERE c1 = 2; +--enable_view_protocol +--enable_ps_protocol +--disable_metadata + +DROP TABLE t; + +# Case when one type cannot alwasy be converted to another safely +SET sql_mode=''; +CREATE TABLE t1 (a UUID, b VARCHAR(32) NOT NULL); +INSERT INTO t1 VALUES (NULL, '1'); +CREATE TABLE t2 AS SELECT COALESCE(a, b), IFNULL(a, b) FROM t1; + +SHOW CREATE TABLE t2; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't2'; + +DROP TABLE t1, t2; + +CREATE TABLE t (c1 INET6, c2 INET4); +INSERT INTO t VALUES ('::', '0.0.0.0'), (NULL, NULL); + +CREATE TABLE t1 AS +SELECT + COALESCE(c1, '::1') AS inet6_c1_c, IFNULL(c1, '::1') AS inet6_c1_i, + COALESCE(c2, '0.0.0.0') AS inet4_c2_c, IFNULL(c2, '0.0.0.0') AS inet4_c2_i +FROM t; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; + +CREATE TABLE t2 AS +SELECT + COALESCE(c1, 'ipv6') AS inet6_c1_c, IFNULL(c1, 'ipv6') AS inet6_c1_i, + COALESCE(c2, 'ipv4') AS inet4_c2_c, IFNULL(c2, 'ipv4') AS inet4_c2_i +FROM t; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't2'; + +CREATE TABLE t3 AS SELECT COALESCE(c1, '::1') AS inet4_c1_c, IFNULL(c1, '::1') as inet6_c1_i FROM t WHERE c1 IS NULL; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't3'; + +CREATE TABLE t4 AS SELECT COALESCE(c1, 'foo') AS inet4_c1_c, IFNULL(c1, 'bar') as inet6_c1_i FROM t WHERE c1 IS NOT NULL; +SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't3'; + +DROP TABLE t, t1, t2, t3, t4; + +--echo # End of 10.11 tests diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result index 2a3d406263f..1a596a48319 100644 --- a/mysql-test/main/func_json.result +++ b/mysql-test/main/func_json.result @@ -978,10 +978,8 @@ FROM (SELECT * FROM json_test) AS json_test_values; json_object("a", json_compact(a), "b", json_compact(b)) {"a": [1,2,3], "b": {"a":"foo"}} DROP TABLE json_test; -# # End of 10.2 tests # -# # MDEV-12854 Synchronize CREATE..SELECT data type and result set metadata data type for INT functions # SELECT @@ -1513,10 +1511,8 @@ JSON_VALID(' {"number": 01E-4}') select JSON_VALID(' {"number": 0E-4.0}'); JSON_VALID(' {"number": 0E-4.0}') 0 -# # End of 10.4 tests # -# # MDEV-16620 JSON_ARRAYAGG # CREATE TABLE t1 (a INT); @@ -1748,10 +1744,8 @@ NULL Warnings: Warning 4036 Character disallowed in JSON in argument 1 to function 'json_extract' at position 2 SET @@collation_connection= @save_collation_connection; -# # End of 10.5 tests # -# # MDEV-26054 Server crashes in Item_func_json_arrayagg::get_str_from_field # CREATE TABLE t (a VARCHAR(8)); @@ -1787,6 +1781,15 @@ FROM JSON_TABLE (@data, '$[*]' COLUMNS (data text PATH '$.Data')) AS t; data # +# MDEV-21530: json_extract STILL crashes in Item_func_json_extract::read_json +# +select null<=>json_extract('1',json_object(null,'{ }',null,null),'{}'); +null<=>json_extract('1',json_object(null,'{ }',null,null),'{}') +1 +Warnings: +Warning 4042 Syntax error in JSON path in argument 2 to function 'json_extract' at position 1 +# End of 10.6 tests +# # MDEV-35614 JSON_UNQUOTE doesn't work with emojis # SELECT HEX(JSON_UNQUOTE('"\\ud83d\\ude0a"')) as hex_smiley; @@ -1824,9 +1827,6 @@ show warnings; Level Code Message Warning 4035 Broken JSON string in argument 1 to function 'json_unquote' at position 13 # -# End of 10.6 tests -# -# # MDEV-31147 json_normalize does not work correctly with MSAN build # CREATE TABLE t1 (val JSON); @@ -1836,10 +1836,8 @@ SELECT * FROM t1; val normalized_json 15 1.5E1 DROP TABLE t1; -# # End of 10.8 tests # -# # MDEV-27677: Implement JSON_OVERLAPS() # # Testing scalar json datatypes @@ -2691,10 +2689,8 @@ SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); JSON_VALUE(@json,'$.A[last-1][last-1].key1') NULL SET @@collation_connection= @save_collation_connection; -# # End of 10.9 Test # -# # MDEV-32007: JSON_VALUE and JSON_EXTRACT doesn't handle dash (-) # as first character in key # @@ -4883,7 +4879,6 @@ SELECT JSON_SCHEMA_VALID(@a,'{}'); JSON_SCHEMA_VALID(@a,'{}') NULL # End of 11.1 test -# Beginning of 11.2 # # MDEV-30145: JSON_TABLE: allow to retrieve the key when iterating on JSON objects # @@ -5074,9 +5069,6 @@ FROM JSON_TABLE( JSON_KEY_VALUE('{"key1":{"a":1, "b": [1,2,3, {"some_key":"some_val", "c":3}]}, "key2":"val2"}', '$.key1.b[0]'), '$[*]' COLUMNS (k VARCHAR(20) PATH '$.key', v VARCHAR(20) PATH '$.value', id FOR ORDINALITY)) AS jt; k v id -# End of 11.2 test -# -# Beginning of 11.2 tests # # MDEV-26182: Implement json_array_intersect() # @@ -5273,6 +5265,4 @@ SET @obj1='{ "a": 1,"b": 2,"c": 3}'; SELECT JSON_OBJECT_FILTER_KEYS (@obj1,@arr1); JSON_OBJECT_FILTER_KEYS (@obj1,@arr1) NULL -# # End of 11.2 Test -# diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test index db91d1d36c1..7090189ad4c 100644 --- a/mysql-test/main/func_json.test +++ b/mysql-test/main/func_json.test @@ -619,9 +619,7 @@ SELECT json_object("a", json_compact(a), "b", json_compact(b)) FROM (SELECT * FROM json_test) AS json_test_values; DROP TABLE json_test; ---echo # --echo # End of 10.2 tests ---echo # --echo # --echo # MDEV-12854 Synchronize CREATE..SELECT data type and result set metadata data type for INT functions @@ -983,9 +981,7 @@ select JSON_VALID(' {"number": 00E-4}'); select JSON_VALID(' {"number": 01E-4}'); select JSON_VALID(' {"number": 0E-4.0}'); ---echo # --echo # End of 10.4 tests ---echo # -- echo # -- echo # MDEV-16620 JSON_ARRAYAGG @@ -1207,9 +1203,7 @@ SELECT JSON_EXTRACT('{"a": 1,"b": 2}','$.a'); SET @@collation_connection= @save_collation_connection; ---echo # --echo # End of 10.5 tests ---echo # --echo # --echo # MDEV-26054 Server crashes in Item_func_json_arrayagg::get_str_from_field @@ -1245,6 +1239,14 @@ SELECT FROM JSON_TABLE (@data, '$[*]' COLUMNS (data text PATH '$.Data')) AS t; +--echo # +--echo # MDEV-21530: json_extract STILL crashes in Item_func_json_extract::read_json +--echo # + +select null<=>json_extract('1',json_object(null,'{ }',null,null),'{}'); + +--echo # End of 10.6 tests + --echo # --echo # MDEV-35614 JSON_UNQUOTE doesn't work with emojis --echo # @@ -1265,10 +1267,6 @@ select json_unquote(json_extract(@v,'$.color')) as unquoted, collation(json_unqu SELECT JSON_UNQUOTE('"\\uc080\\ude0a"') as invalid_utf8mb4; show warnings; ---echo # ---echo # End of 10.6 tests ---echo # - --echo # --echo # MDEV-31147 json_normalize does not work correctly with MSAN build --echo # @@ -1278,9 +1276,7 @@ INSERT INTO t1 (val) VALUES ('15'); SELECT * FROM t1; DROP TABLE t1; ---echo # --echo # End of 10.8 tests ---echo # --echo # --echo # MDEV-27677: Implement JSON_OVERLAPS() @@ -1954,9 +1950,7 @@ SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); SET @@collation_connection= @save_collation_connection; ---echo # --echo # End of 10.9 Test ---echo # --echo # --echo # MDEV-32007: JSON_VALUE and JSON_EXTRACT doesn't handle dash (-) @@ -3837,8 +3831,6 @@ SELECT JSON_SCHEMA_VALID(@a,'{}'); --echo # End of 11.1 test ---echo # Beginning of 11.2 - --echo # --echo # MDEV-30145: JSON_TABLE: allow to retrieve the key when iterating on JSON objects --echo # @@ -3988,16 +3980,10 @@ FROM JSON_TABLE( JSON_KEY_VALUE('{"key1":{"a":1, "b": [1,2,3, {"some_key":"some_val", "c":3}]}, "key2":"val2"}', '$.key1.b[0]'), '$[*]' COLUMNS (k VARCHAR(20) PATH '$.key', v VARCHAR(20) PATH '$.value', id FOR ORDINALITY)) AS jt; ---echo # End of 11.2 test - ---echo # ---echo # Beginning of 11.2 tests --echo # --echo # MDEV-26182: Implement json_array_intersect() --echo # - - --echo # JSON_ARRAY_INTERSECT() --echo # Scalar as elements @@ -4176,6 +4162,4 @@ SET CHARACTER SET utf8; SET @obj1='{ "a": 1,"b": 2,"c": 3}'; SELECT JSON_OBJECT_FILTER_KEYS (@obj1,@arr1); ---echo # --echo # End of 11.2 Test ---echo # diff --git a/mysql-test/main/gis.result b/mysql-test/main/gis.result index 2a32dcc61a9..092ce5e3e7c 100644 --- a/mysql-test/main/gis.result +++ b/mysql-test/main/gis.result @@ -5005,8 +5005,8 @@ LEAST(POINT(1,1),0x60); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `COALESCE(0x60,POINT(1,1))` longblob DEFAULT NULL, - `COALESCE(POINT(1,1),0x60)` longblob DEFAULT NULL, + `COALESCE(0x60,POINT(1,1))` longblob NOT NULL, + `COALESCE(POINT(1,1),0x60)` longblob NOT NULL, `LEAST(0x60,POINT(1,1))` longblob DEFAULT NULL, `LEAST(POINT(1,1),0x60)` longblob DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci diff --git a/mysql-test/main/having_cond_pushdown.result b/mysql-test/main/having_cond_pushdown.result index f94fb237bb7..9011b14a86e 100644 --- a/mysql-test/main/having_cond_pushdown.result +++ b/mysql-test/main/having_cond_pushdown.result @@ -6304,3 +6304,165 @@ SELECT LOAD_FILE('') AS f, a FROM t1 GROUP BY f, a HAVING f = a; f a DROP TABLE t1; End of 10.5 tests +# +# MDEV-19269 Pushdown into IN subquery is not made on the second +# execution of stmt +# +create table t1 (a int, b int); +create table t2 (x int, y int); +insert into t1 values (1,1),(2,2); +insert into t2 values (1,1),(2,2),(2,3); +prepare stmt from " +EXPLAIN FORMAT=JSON +SELECT * FROM t1 +WHERE a = b + AND (a,b) IN (SELECT t2.x, COUNT(t2.y) FROM t2 WHERE @a=1 GROUP BY t2.x);"; +set @a=2; +execute stmt; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "message": "Impossible WHERE noticed after reading const tables" + }, + "subqueries": [ + { + "materialization": { + "query_block": { + "select_id": 2, + "table": { + "message": "Impossible WHERE" + } + } + } + } + ] + } +} +set @a=1; +# we expect to see having_condition in both the below statements +execute stmt; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "cost": 0.021147833, + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "loops": 1, + "rows": 2, + "cost": 0.01034841, + "filtered": 100, + "attached_condition": "t1.b = t1.a and t1.a is not null and t1.a is not null" + } + }, + { + "table": { + "table_name": "", + "access_type": "eq_ref", + "possible_keys": ["distinct_key"], + "key": "distinct_key", + "key_length": "12", + "used_key_parts": ["x", "COUNT(t2.y)"], + "ref": ["test.t1.a", "test.t1.a"], + "loops": 2, + "rows": 1, + "cost": 0.010799423, + "filtered": 100, + "attached_condition": "t1.a = ``.`COUNT(t2.y)`", + "materialized": { + "unique": 1, + "materialization": { + "query_block": { + "select_id": 2, + "cost": 0.012403489, + "having_condition": "`COUNT(t2.y)` = t2.x", + "temporary_table": { + "nested_loop": [ + { + "table": { + "table_name": "t2", + "access_type": "ALL", + "loops": 1, + "rows": 3, + "cost": 0.010504815, + "filtered": 100 + } + } + ] + } + } + } + } + } + } + ] + } +} +execute stmt; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "cost": 0.021147833, + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "loops": 1, + "rows": 2, + "cost": 0.01034841, + "filtered": 100, + "attached_condition": "t1.b = t1.a and t1.a is not null and t1.a is not null" + } + }, + { + "table": { + "table_name": "", + "access_type": "eq_ref", + "possible_keys": ["distinct_key"], + "key": "distinct_key", + "key_length": "12", + "used_key_parts": ["x", "COUNT(t2.y)"], + "ref": ["test.t1.a", "test.t1.a"], + "loops": 2, + "rows": 1, + "cost": 0.010799423, + "filtered": 100, + "attached_condition": "t1.a = ``.`COUNT(t2.y)`", + "materialized": { + "unique": 1, + "materialization": { + "query_block": { + "select_id": 2, + "cost": 0.012403489, + "having_condition": "`COUNT(t2.y)` = t2.x", + "temporary_table": { + "nested_loop": [ + { + "table": { + "table_name": "t2", + "access_type": "ALL", + "loops": 1, + "rows": 3, + "cost": 0.010504815, + "filtered": 100 + } + } + ] + } + } + } + } + } + } + ] + } +} +drop table t1, t2; +End of 10.11 tests diff --git a/mysql-test/main/having_cond_pushdown.test b/mysql-test/main/having_cond_pushdown.test index 58a05a159cb..acd1e508181 100644 --- a/mysql-test/main/having_cond_pushdown.test +++ b/mysql-test/main/having_cond_pushdown.test @@ -1751,3 +1751,31 @@ SELECT LOAD_FILE('') AS f, a FROM t1 GROUP BY f, a HAVING f = a; DROP TABLE t1; --echo End of 10.5 tests + +--echo # +--echo # MDEV-19269 Pushdown into IN subquery is not made on the second +--echo # execution of stmt +--echo # + +create table t1 (a int, b int); +create table t2 (x int, y int); + +insert into t1 values (1,1),(2,2); +insert into t2 values (1,1),(2,2),(2,3); + +prepare stmt from " +EXPLAIN FORMAT=JSON +SELECT * FROM t1 +WHERE a = b + AND (a,b) IN (SELECT t2.x, COUNT(t2.y) FROM t2 WHERE @a=1 GROUP BY t2.x);"; + +set @a=2; +execute stmt; +set @a=1; +--echo # we expect to see having_condition in both the below statements +execute stmt; +execute stmt; + +drop table t1, t2; + +--echo End of 10.11 tests diff --git a/mysql-test/main/index_merge_innodb_notembedded.result b/mysql-test/main/index_merge_innodb_notembedded.result new file mode 100644 index 00000000000..001e82167f6 --- /dev/null +++ b/mysql-test/main/index_merge_innodb_notembedded.result @@ -0,0 +1,102 @@ + +# MDEV-36410 wrong result with index_merge on indexes having descending primary key +# +set optimizer_trace='enabled=on'; +SET @save_sort_buffer_size=@@sort_buffer_size; +SET SESSION sort_buffer_size = 1024*16; +CREATE TABLE t1 ( +id bigint(20) NOT NULL, +title varchar(255) NOT NULL, +status tinyint(4) DEFAULT 0, +country_code varchar(5) DEFAULT NULL, +PRIMARY KEY (id), +KEY idx_status (status), +KEY idx_country_code_status_id (country_code,status,id DESC) +) ENGINE=InnoDB; +INSERT INTO t1(id,title,status,country_code) +SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500; +# This must not use index_merge: +EXPLAIN +SELECT * FROM t1 WHERE country_code ='C1' and `status` =1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref idx_status,idx_country_code_status_id idx_status 2 const 50 Using where +set @trace= (select JSON_EXTRACT(trace, '$**.range_scan_alternatives[*]') +from INFORMATION_SCHEMA.OPTIMIZER_TRACE); +select json_detailed(json_extract(@trace, '$[*].index')) as INDEXES; +INDEXES +[ + "idx_status", + "idx_country_code_status_id" +] +select json_detailed(json_extract(@trace, '$[*].rowid_ordered')) as ROR; +ROR +[ + true, + false +] +DROP table t1; +# Now, try with indexes using ASC ordering and PK using DESC +CREATE TABLE t1 ( +id bigint(20) NOT NULL, +title varchar(255) NOT NULL, +status tinyint(4) DEFAULT 0, +country_code varchar(5) DEFAULT NULL, +PRIMARY KEY (id DESC), +KEY idx_status (status), +KEY idx_country_code_status_id (country_code,status,id) +) ENGINE=InnoDB; +INSERT INTO t1(id,title,status,country_code) +SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500; +# Must not use index_merge: +EXPLAIN +SELECT * FROM t1 WHERE country_code ='C1' and status = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref idx_status,idx_country_code_status_id idx_status 2 const 50 Using where +set @trace= (select JSON_EXTRACT(trace, '$**.range_scan_alternatives[*]') +from INFORMATION_SCHEMA.OPTIMIZER_TRACE); +select json_detailed(json_extract(@trace, '$[*].index')) as INDEXES; +INDEXES +[ + "idx_status", + "idx_country_code_status_id" +] +select json_detailed(json_extract(@trace, '$[*].rowid_ordered')) as ROR; +ROR +[ + true, + false +] +DROP TABLE t1; +# Now, try with indexes using DESC ordering and PK using DESC +CREATE TABLE t1 ( +id bigint(20) NOT NULL, +title varchar(255) NOT NULL, +status tinyint(4) DEFAULT 0, +country_code varchar(5) DEFAULT NULL, +PRIMARY KEY (id DESC), +KEY idx_status (status), +KEY idx_country_code_status_id (country_code,status,id DESC) +) ENGINE=InnoDB; +INSERT INTO t1(id,title,status,country_code) +SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500; +# Must not use index_merge: +EXPLAIN +SELECT * FROM t1 WHERE country_code ='C1' and status = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref idx_status,idx_country_code_status_id idx_status 2 const 50 Using where +set @trace= (select JSON_EXTRACT(trace, '$**.range_scan_alternatives[*]') +from INFORMATION_SCHEMA.OPTIMIZER_TRACE); +select json_detailed(json_extract(@trace, '$[*].index')) as INDEXES; +INDEXES +[ + "idx_status", + "idx_country_code_status_id" +] +select json_detailed(json_extract(@trace, '$[*].rowid_ordered')) as ROR; +ROR +[ + true, + false +] +DROP TABLE t1; +SET sort_buffer_size= @save_sort_buffer_size; diff --git a/mysql-test/main/index_merge_innodb_notembedded.test b/mysql-test/main/index_merge_innodb_notembedded.test new file mode 100644 index 00000000000..eb38108562f --- /dev/null +++ b/mysql-test/main/index_merge_innodb_notembedded.test @@ -0,0 +1,90 @@ +--source include/have_innodb.inc +--source include/have_sequence.inc +--source include/not_embedded.inc + +--echo +--echo # MDEV-36410 wrong result with index_merge on indexes having descending primary key +--echo # + +set optimizer_trace='enabled=on'; +SET @save_sort_buffer_size=@@sort_buffer_size; +SET SESSION sort_buffer_size = 1024*16; + +CREATE TABLE t1 ( + id bigint(20) NOT NULL, + title varchar(255) NOT NULL, + status tinyint(4) DEFAULT 0, + country_code varchar(5) DEFAULT NULL, + PRIMARY KEY (id), + KEY idx_status (status), + KEY idx_country_code_status_id (country_code,status,id DESC) +) ENGINE=InnoDB; + +INSERT INTO t1(id,title,status,country_code) +SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500; + +--echo # This must not use index_merge: +EXPLAIN +SELECT * FROM t1 WHERE country_code ='C1' and `status` =1; +set @trace= (select JSON_EXTRACT(trace, '$**.range_scan_alternatives[*]') + from INFORMATION_SCHEMA.OPTIMIZER_TRACE); +select json_detailed(json_extract(@trace, '$[*].index')) as INDEXES; +select json_detailed(json_extract(@trace, '$[*].rowid_ordered')) as ROR; + +#select JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives[*].index')) AS JS from INFORMATION_SCHEMA.OPTIMIZER_TRACE; + +DROP table t1; + +--echo # Now, try with indexes using ASC ordering and PK using DESC +CREATE TABLE t1 ( + id bigint(20) NOT NULL, + title varchar(255) NOT NULL, + status tinyint(4) DEFAULT 0, + country_code varchar(5) DEFAULT NULL, + PRIMARY KEY (id DESC), + KEY idx_status (status), + KEY idx_country_code_status_id (country_code,status,id) +) ENGINE=InnoDB; + +INSERT INTO t1(id,title,status,country_code) +SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500; + +--echo # Must not use index_merge: +EXPLAIN +SELECT * FROM t1 WHERE country_code ='C1' and status = 1; + +set @trace= (select JSON_EXTRACT(trace, '$**.range_scan_alternatives[*]') + from INFORMATION_SCHEMA.OPTIMIZER_TRACE); +select json_detailed(json_extract(@trace, '$[*].index')) as INDEXES; +select json_detailed(json_extract(@trace, '$[*].rowid_ordered')) as ROR; + +DROP TABLE t1; + +--echo # Now, try with indexes using DESC ordering and PK using DESC +CREATE TABLE t1 ( + id bigint(20) NOT NULL, + title varchar(255) NOT NULL, + status tinyint(4) DEFAULT 0, + country_code varchar(5) DEFAULT NULL, + PRIMARY KEY (id DESC), + KEY idx_status (status), + KEY idx_country_code_status_id (country_code,status,id DESC) +) ENGINE=InnoDB; + +INSERT INTO t1(id,title,status,country_code) +SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500; + +--echo # Must not use index_merge: +EXPLAIN +SELECT * FROM t1 WHERE country_code ='C1' and status = 1; + +set @trace= (select JSON_EXTRACT(trace, '$**.range_scan_alternatives[*]') + from INFORMATION_SCHEMA.OPTIMIZER_TRACE); +select json_detailed(json_extract(@trace, '$[*].index')) as INDEXES; +select json_detailed(json_extract(@trace, '$[*].rowid_ordered')) as ROR; + +DROP TABLE t1; + +SET sort_buffer_size= @save_sort_buffer_size; + + diff --git a/mysql-test/main/json_debug_nonembedded_noasan.result b/mysql-test/main/json_debug_nonembedded.result similarity index 75% rename from mysql-test/main/json_debug_nonembedded_noasan.result rename to mysql-test/main/json_debug_nonembedded.result index 0e7458edd9f..57d823dbc97 100644 --- a/mysql-test/main/json_debug_nonembedded_noasan.result +++ b/mysql-test/main/json_debug_nonembedded.result @@ -3,6 +3,8 @@ # SET @saved_dbug = @@debug_dbug; SET debug_dbug='+d,json_check_min_stack_requirement'; +SELECT * from JSON_TABLE('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt; +ERROR HY000: Thread stack overrun: 'used bytes' used of a 'available' byte stack, and 'X' bytes needed. Consider increasing the thread_stack system variable. SET @json1= '{"key1":"val1"}'; SET @json2= '{"key1":"val1"}'; SELECT JSON_OVERLAPS(@json1, @json2); diff --git a/mysql-test/main/json_debug_nonembedded_noasan.test b/mysql-test/main/json_debug_nonembedded.test similarity index 76% rename from mysql-test/main/json_debug_nonembedded_noasan.test rename to mysql-test/main/json_debug_nonembedded.test index 68fb37bf006..73b50f5dde8 100644 --- a/mysql-test/main/json_debug_nonembedded_noasan.test +++ b/mysql-test/main/json_debug_nonembedded.test @@ -1,6 +1,5 @@ -- source include/not_embedded.inc --source include/have_debug.inc ---source include/not_asan.inc --echo # --echo # MDEV-28762: recursive call of some json functions without stack control @@ -9,6 +8,10 @@ SET @saved_dbug = @@debug_dbug; SET debug_dbug='+d,json_check_min_stack_requirement'; +--replace_regex /overrun: [0-9]* bytes used of a [0-9]* byte stack, and [0-9]* bytes needed/overrun: 'used bytes' used of a 'available' byte stack, and 'X' bytes needed/ +--error ER_STACK_OVERRUN_NEED_MORE +SELECT * from JSON_TABLE('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt; + SET @json1= '{"key1":"val1"}'; SET @json2= '{"key1":"val1"}'; diff --git a/mysql-test/main/lock_sync.result b/mysql-test/main/lock_sync.result index 69958811d68..8398b54b465 100644 --- a/mysql-test/main/lock_sync.result +++ b/mysql-test/main/lock_sync.result @@ -840,28 +840,3 @@ SET DEBUG_SYNC="RESET"; disconnect con1; disconnect con2; DROP TABLES t1, t2; -# -# MDEV-28567 Assertion `0' in open_tables upon function-related operation -# -CREATE TABLE t1 (a INT); -CREATE TABLE t2 (b INT); -CREATE TRIGGER tr1 BEFORE INSERT ON t1 FOR EACH ROW UPDATE t2 SET b = 0; -CREATE TRIGGER tr2 BEFORE INSERT ON t2 FOR EACH ROW UPDATE t1 SET a = 6; -CREATE VIEW v1 AS SELECT * FROM t1; -SET AUTOCOMMIT=OFF; -SELECT * FROM t1; -a -DROP TRIGGER tr1; -INSERT INTO t2 SELECT * FROM t2; -SELECT f() FROM t2; -ERROR 42000: FUNCTION test.f does not exist -set debug_sync= 'after_open_table_mdl_shared signal s1'; -ALTER VIEW v1 AS SELECT f() FROM t1; -CREATE FUNCTION f() RETURNS INT RETURN 1; -set debug_sync= 'now wait_for s1'; -SELECT * FROM ( SELECT * FROM v1 ) sq; -COMMIT; -DROP VIEW v1; -DROP FUNCTION IF EXISTS f; -DROP TABLE t1, t2; -set debug_sync= 'reset'; diff --git a/mysql-test/main/lock_sync.test b/mysql-test/main/lock_sync.test index 844d00d3a33..ed958f1b084 100644 --- a/mysql-test/main/lock_sync.test +++ b/mysql-test/main/lock_sync.test @@ -1079,123 +1079,3 @@ DROP TABLES t1, t2; # Check that all connections opened by test cases in this file are really # gone so execution of other tests won't be affected by their presence. --source include/wait_until_count_sessions.inc - ---echo # ---echo # MDEV-28567 Assertion `0' in open_tables upon function-related operation ---echo # -# To get MDL trace run this case like this: -# mtr --mysqld=--debug-dbug=d,mdl,query:i:o,/tmp/mdl.log ... -# Cleanup trace like this: -# sed -i -re '/(mysql|performance_schema|sys|mtr)\// d; /MDL_BACKUP_|MDL_INTENTION_/ d; /\/(t2|tr1|tr2)/ d' /tmp/mdl.log - -CREATE TABLE t1 (a INT); -CREATE TABLE t2 (b INT); -CREATE TRIGGER tr1 BEFORE INSERT ON t1 FOR EACH ROW UPDATE t2 SET b = 0; -CREATE TRIGGER tr2 BEFORE INSERT ON t2 FOR EACH ROW UPDATE t1 SET a = 6; -CREATE VIEW v1 AS SELECT * FROM t1; - -SET AUTOCOMMIT=OFF; -SELECT * FROM t1; -# T@6 -# Seized: test/t1 (MDL_SHARED_READ) - ---connect (con1,localhost,root,,test) ---send - DROP TRIGGER tr1; -# T@7 -# Seized: test/t1 (MDL_SHARED_NO_WRITE) -# Waiting: test/t1 (MDL_EXCLUSIVE) -# Waiting: test/t1 (MDL_SHARED_WRITE) -# Deadlock: test/t1 (MDL_SHARED_WRITE) - ---connection default ---error 0, ER_LOCK_DEADLOCK -INSERT INTO t2 SELECT * FROM t2; -# T@6 -# Released: test/t1 (MDL_SHARED_READ) -# T@7 -# Acquired: test/t1 (MDL_EXCLUSIVE) (good) ---error ER_SP_DOES_NOT_EXIST -SELECT f() FROM t2; -# T@6 -# Seized: test/f (MDL_SHARED) -# T@7 -# Released: test/t1 (MDL_EXCLUSIVE) -# Good1: continue T@6 below -# Bad1: continue T@8 below - -# Now we hold test/f, the below code creates concurrent -# waiting of 3 threads for test/f which leads to deadlock (Bad) - -# To achive Good comment out 'now wait_for s1' below and run multiple times. - ---connect (con2,localhost,root,,test) -set debug_sync= 'after_open_table_mdl_shared signal s1'; ---send - ALTER VIEW v1 AS SELECT f() FROM t1; -# T@8 -# Good2: Waiting: test/v1 (MDL_EXCLUSIVE) -# Good2-3: continue T@7 below -# Good5: Acquired: test/v1 (MDL_EXCLUSIVE) -# Good5: Seized: test/v1 (MDL_EXCLUSIVE) -# Good5-6: continue T@7 below -# Good7: Seized: test/t1 (MDL_SHARED_READ) -# Good7: Waiting: test/f (MDL_SHARED) -# Good7-8: continue T@7 below -# Good9: Acquired: test/f (MDL_SHARED) -# Good9: Released: test/f (MDL_SHARED) -# Good9: Released: test/t1 (MDL_SHARED_READ) -# Good9: Released: test/v1 (MDL_EXCLUSIVE) -# Good9: command finished without error -# Bad1: Seized: test/v1 (MDL_EXCLUSIVE) -# Bad1: Seized: test/v1 (MDL_EXCLUSIVE) -# Bad1: Seized: test/t1 (MDL_SHARED_READ) -# Bad1-2: continue T@6 below -# Bad4: Waiting: test/f (MDL_SHARED) -# Bad4: Deadlock: test/f (MDL_SHARED) -# Bad4: command finished with error - ---connection con1 ---reap ---send -CREATE FUNCTION f() RETURNS INT RETURN 1; -# T@7 -# Good3: Waiting: test/f (MDL_EXCLUSIVE) -# Good3-4: continue T@6 below -# Good6: Acquired: test/f (MDL_EXCLUSIVE) -# Good6-7: continue T@8 above -# Good8: Released: test/f (MDL_EXCLUSIVE) -# Good8-9: continue T@8 above -# Bad3: Waiting: test/f (MDL_EXCLUSIVE) -# Bad3-4: continue T@8 above - ---connection default -set debug_sync= 'now wait_for s1'; ---disable_result_log -SELECT * FROM ( SELECT * FROM v1 ) sq; ---enable_result_log -# T@6 -# Good1: Seized: test/v1 (MDL_SHARED_READ) -# Good1-2: continue T@8 above -# Good4: Seized: test/t1 (MDL_SHARED_READ) -# Bad2: Waiting: test/v1 (MDL_SHARED_READ) -# Bad2-3: continue T@7 above - -# Cleanup -COMMIT; -# Good4: Released: test/t1 (MDL_SHARED_READ) -# Good4: Released: test/v1 (MDL_SHARED_READ) -# Good4: Released: test/f (MDL_SHARED) -# Good4-5: continue T@8 above - ---connection con2 ---error 0, ER_SP_DOES_NOT_EXIST ---reap ---disconnect con1 ---disconnect con2 ---connection default ---source include/wait_until_count_sessions.inc -DROP VIEW v1; -DROP FUNCTION IF EXISTS f; -DROP TABLE t1, t2; -set debug_sync= 'reset'; diff --git a/mysql-test/main/log_state.result b/mysql-test/main/log_state.result index 3172be9818f..bed4c7369bd 100644 --- a/mysql-test/main/log_state.result +++ b/mysql-test/main/log_state.result @@ -313,6 +313,137 @@ disconnect con2; disconnect con1; connection default; drop procedure p1; +# +# MDEV-35353 Rows_examined is always 0 in the slow query log +# for union all queries +# +SET GLOBAL log_output = "TABLE"; +SET GLOBAL slow_query_log = ON; +SET GLOBAL long_query_time = 0.0; +TRUNCATE TABLE mysql.slow_log; +create table t1(a int, b int); +insert into t1 select seq, seq from seq_1_to_20; +connect con2,localhost,root,,; +select sum(a) from t1 +union all +select sum(b) from t1; +sum(a) +210 +210 +SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%union all%'; +rows_examined sql_text +40 select sum(a) from t1 +union all +select sum(b) from t1 +TRUNCATE TABLE mysql.slow_log; +select sum(a) from t1 +union +select sum(b) from t1; +sum(a) +210 +SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%union%'; +rows_examined sql_text +41 select sum(a) from t1 +union +select sum(b) from t1 +TRUNCATE TABLE mysql.slow_log; +select sum(a) from t1 +except all +select sum(b) from t1; +sum(a) +SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%except all%'; +rows_examined sql_text +40 select sum(a) from t1 +except all +select sum(b) from t1 +TRUNCATE TABLE mysql.slow_log; +select sum(a) from t1 +except +select sum(b) from t1; +sum(a) +SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%except%'; +rows_examined sql_text +40 select sum(a) from t1 +except +select sum(b) from t1 +TRUNCATE TABLE mysql.slow_log; +select sum(a) from t1 +intersect all +select sum(b) from t1; +sum(a) +210 +SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%intersect all%'; +rows_examined sql_text +41 select sum(a) from t1 +intersect all +select sum(b) from t1 +TRUNCATE TABLE mysql.slow_log; +select sum(a) from t1 +intersect +select sum(b) from t1; +sum(a) +210 +SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%intersect%'; +rows_examined sql_text +41 select sum(a) from t1 +intersect +select sum(b) from t1 +disconnect con2; +connection default; +DROP TABLE t1; +TRUNCATE TABLE mysql.slow_log; +# +# MDEV-37195 Rows_examined is always 0 in the slow query log +# for queries with a subquery and degenerate select +# +SET GLOBAL log_output = "TABLE"; +SET GLOBAL slow_query_log = ON; +SET GLOBAL long_query_time = 0.0; +TRUNCATE TABLE mysql.slow_log; +CREATE TABLE t1 (id INT); +INSERT INTO t1(id) SELECT seq FROM seq_1_to_10; +connect con2,localhost,root,,; +SELECT 100 in (SELECT id FROM t1) AS res; +res +0 +SELECT rows_examined,sql_text FROM mysql.slow_log WHERE sql_text LIKE '%SELECT 100%'; +rows_examined sql_text +10 SELECT 100 in (SELECT id FROM t1) AS res +TRUNCATE TABLE mysql.slow_log; +SELECT 100 in ( +SELECT id FROM t1 +UNION +SELECT id FROM t1 +) AS res; +res +0 +SELECT rows_examined,sql_text FROM mysql.slow_log WHERE sql_text LIKE '%SELECT 100%'; +rows_examined sql_text +20 SELECT 100 in ( +SELECT id FROM t1 +UNION +SELECT id FROM t1 +) AS res +TRUNCATE TABLE mysql.slow_log; +SELECT 100 in ( +SELECT id FROM t1 +UNION ALL +SELECT id FROM t1 +) AS res; +res +0 +SELECT rows_examined,sql_text FROM mysql.slow_log WHERE sql_text LIKE '%SELECT 100%'; +rows_examined sql_text +20 SELECT 100 in ( +SELECT id FROM t1 +UNION ALL +SELECT id FROM t1 +) AS res +disconnect con2; +connection default; +DROP TABLE t1; +TRUNCATE TABLE mysql.slow_log; +End of 10.11 tests SET GLOBAL long_query_time = @save_long_query_time; SET GLOBAL log_output = @old_log_output; SET global general_log = @old_general_log; diff --git a/mysql-test/main/log_state.test b/mysql-test/main/log_state.test index 9686ace3f4b..68610db4738 100644 --- a/mysql-test/main/log_state.test +++ b/mysql-test/main/log_state.test @@ -3,6 +3,7 @@ --source include/not_embedded.inc --source include/have_csv.inc +--source include/have_sequence.inc call mtr.add_suppression("options .* --log_slow_queries is not set"); @@ -370,6 +371,130 @@ disconnect con1; connection default; drop procedure p1; +########################################################################### + +--echo # +--echo # MDEV-35353 Rows_examined is always 0 in the slow query log +--echo # for union all queries +--echo # + +SET GLOBAL log_output = "TABLE"; +SET GLOBAL slow_query_log = ON; +SET GLOBAL long_query_time = 0.0; + +# clear slow_log of any residual slow queries +TRUNCATE TABLE mysql.slow_log; +create table t1(a int, b int); +insert into t1 select seq, seq from seq_1_to_20; + +connect (con2,localhost,root,,); +--disable_ps_protocol +select sum(a) from t1 +union all +select sum(b) from t1; + +SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%union all%'; + +TRUNCATE TABLE mysql.slow_log; + +select sum(a) from t1 +union +select sum(b) from t1; + +SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%union%'; + +TRUNCATE TABLE mysql.slow_log; + +select sum(a) from t1 +except all +select sum(b) from t1; + +SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%except all%'; + +TRUNCATE TABLE mysql.slow_log; + +select sum(a) from t1 +except +select sum(b) from t1; + +SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%except%'; + +TRUNCATE TABLE mysql.slow_log; + +select sum(a) from t1 +intersect all +select sum(b) from t1; + +SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%intersect all%'; + +TRUNCATE TABLE mysql.slow_log; + +select sum(a) from t1 +intersect +select sum(b) from t1; + +SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%intersect%'; + +disconnect con2; +connection default; +DROP TABLE t1; + +TRUNCATE TABLE mysql.slow_log; + +########################################################################### + +--echo # +--echo # MDEV-37195 Rows_examined is always 0 in the slow query log +--echo # for queries with a subquery and degenerate select +--echo # + +SET GLOBAL log_output = "TABLE"; +SET GLOBAL slow_query_log = ON; +SET GLOBAL long_query_time = 0.0; + +# clear slow_log of any residual slow queries +TRUNCATE TABLE mysql.slow_log; + +CREATE TABLE t1 (id INT); + +INSERT INTO t1(id) SELECT seq FROM seq_1_to_10; + +connect (con2,localhost,root,,); +--disable_ps_protocol + +SELECT 100 in (SELECT id FROM t1) AS res; + +SELECT rows_examined,sql_text FROM mysql.slow_log WHERE sql_text LIKE '%SELECT 100%'; + +TRUNCATE TABLE mysql.slow_log; + +SELECT 100 in ( + SELECT id FROM t1 + UNION + SELECT id FROM t1 +) AS res; + +SELECT rows_examined,sql_text FROM mysql.slow_log WHERE sql_text LIKE '%SELECT 100%'; + +TRUNCATE TABLE mysql.slow_log; + +SELECT 100 in ( + SELECT id FROM t1 + UNION ALL + SELECT id FROM t1 +) AS res; + +SELECT rows_examined,sql_text FROM mysql.slow_log WHERE sql_text LIKE '%SELECT 100%'; + +disconnect con2; +connection default; +DROP TABLE t1; + +TRUNCATE TABLE mysql.slow_log; + +--echo End of 10.11 tests +########################################################################### + # Reset global system variables to initial values if forgotten somewhere above. SET GLOBAL long_query_time = @save_long_query_time; SET GLOBAL log_output = @old_log_output; diff --git a/mysql-test/main/log_tables.result b/mysql-test/main/log_tables.result index 005b8913f10..b24ae3c1657 100644 --- a/mysql-test/main/log_tables.result +++ b/mysql-test/main/log_tables.result @@ -1022,6 +1022,37 @@ select 'evil-doing', sleep(1.1) select 'after evil-doing', sleep(0.2) set global log_output=default; drop user u@localhost; +# End of 10.5 tests +# +# MDEV-34928 CREATE TABLE does not check valid engine for log tables +# +set global general_log='on'; +show create table mysql.general_log; +Table Create Table +general_log CREATE TABLE `general_log` ( + `event_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6), + `user_host` mediumtext NOT NULL, + `thread_id` bigint(21) unsigned NOT NULL, + `server_id` int(10) unsigned NOT NULL, + `command_type` varchar(64) NOT NULL, + `argument` mediumtext NOT NULL +) ENGINE=CSV DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci COMMENT='General log' +create or replace table mysql.general_log (a int) engine=innodb; +ERROR HY000: Storage engine InnoDB cannot be used for log tables +create or replace table mysql.slow_log (a int) engine=innodb; +ERROR HY000: Storage engine InnoDB cannot be used for log tables +create temporary table t (c int) engine=innodb; +insert into t values (1); +set global log_output='table'; +set session autocommit=0; +update t set c=0; +truncate t; +select a; +ERROR 42S22: Unknown column 'a' in 'SELECT' +drop temporary table t; +set @@global.log_output= @old_log_output; +set @@global.general_log= @old_general_log; +# End of 10.6 tests SET @@global.log_output= @old_log_output; SET @@global.slow_query_log= @old_slow_query_log; SET @@global.general_log= @old_general_log; diff --git a/mysql-test/main/log_tables.test b/mysql-test/main/log_tables.test index cb07d006dd7..7ca30fe06b9 100644 --- a/mysql-test/main/log_tables.test +++ b/mysql-test/main/log_tables.test @@ -2,6 +2,7 @@ -- source include/not_embedded.inc --source include/have_csv.inc +--source include/have_innodb.inc SET SQL_MODE=""; SET @old_log_output= @@global.log_output; @@ -1060,6 +1061,31 @@ set global log_output=default; drop user u@localhost; --enable_cursor_protocol +--echo # End of 10.5 tests + +--echo # +--echo # MDEV-34928 CREATE TABLE does not check valid engine for log tables +--echo # +set global general_log='on'; +show create table mysql.general_log; +--error ER_UNSUPORTED_LOG_ENGINE +create or replace table mysql.general_log (a int) engine=innodb; +--error ER_UNSUPORTED_LOG_ENGINE +create or replace table mysql.slow_log (a int) engine=innodb; +create temporary table t (c int) engine=innodb; +insert into t values (1); +set global log_output='table'; +set session autocommit=0; +update t set c=0; +truncate t; +--error ER_BAD_FIELD_ERROR +select a; +drop temporary table t; +set @@global.log_output= @old_log_output; +set @@global.general_log= @old_general_log; + +--echo # End of 10.6 tests + SET @@global.log_output= @old_log_output; SET @@global.slow_query_log= @old_slow_query_log; SET @@global.general_log= @old_general_log; diff --git a/mysql-test/main/long_unique.result b/mysql-test/main/long_unique.result index e273c9e2238..272500a4a8e 100644 --- a/mysql-test/main/long_unique.result +++ b/mysql-test/main/long_unique.result @@ -1242,6 +1242,7 @@ t1 CREATE TABLE `t1` ( insert into t1 value(concat(repeat('s',3000),'1')); insert into t1 value(concat(repeat('s',3000),'2')); ERROR 23000: Duplicate entry 'sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss...' for key 'a' +update t1 set a= concat(repeat('s',3000),'2'); insert into t1 value(concat(repeat('a',3000),'2')); drop table t1; create table t1(a varchar(4000), b longblob , c varchar(5000), d longblob, diff --git a/mysql-test/main/long_unique.test b/mysql-test/main/long_unique.test index 14e83c50a58..f49f14c1984 100644 --- a/mysql-test/main/long_unique.test +++ b/mysql-test/main/long_unique.test @@ -405,6 +405,7 @@ show create table t1; insert into t1 value(concat(repeat('s',3000),'1')); --error ER_DUP_ENTRY insert into t1 value(concat(repeat('s',3000),'2')); +update t1 set a= concat(repeat('s',3000),'2'); insert into t1 value(concat(repeat('a',3000),'2')); drop table t1; diff --git a/mysql-test/main/long_unique_bugs.result b/mysql-test/main/long_unique_bugs.result index 9059168576f..6094ff09808 100644 --- a/mysql-test/main/long_unique_bugs.result +++ b/mysql-test/main/long_unique_bugs.result @@ -356,6 +356,7 @@ ERROR 42000: Specified key was too long; max key length is 2300 bytes # create table t1(a int, unique(a) using hash); #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES) +insert into t1 values(200),(150),(149),(148),(147),(146),(145),(144),(143),(142),(141),(140),(139),(138),(137),(136),(135),(134),(133),(132),(131),(130),(129),(128),(127),(126),(125),(124),(123),(122),(121),(120),(119),(118),(117),(116),(115),(114),(113),(112),(111),(110),(109),(108),(107),(106),(105),(104),(103),(102),(101),(100),(99),(98),(97),(96),(95),(94),(93),(92),(91),(90),(89),(88),(87),(86),(85),(84),(83),(82),(81),(80),(79),(78),(77),(76),(75),(74),(73),(72),(71),(70),(69),(68),(67),(66),(65),(64),(63),(62),(61),(60),(59),(58),(57),(56),(55),(54),(53),(52),(51),(50),(49),(48),(47),(46),(45),(44),(43),(42),(41),(40),(39),(38),(37),(36),(35),(34),(33),(32),(31),(30),(29),(28),(27),(26),(25),(24),(23),(22),(21),(20),(19),(18),(17),(16),(15),(14),(13),(12),(11),(10),(9),(8),(7),(6),(5),(4),(3),(2),(1); drop table t1; # # MDEV-21804 Assertion `marked_for_read()' failed upon INSERT into table with long unique blob under binlog_row_image=NOBLOB @@ -809,3 +810,36 @@ hex(c1) hex(c2) c3 hex(c4) NULL NULL NULL NULL drop table t1; # End of 10.5 tests +# +# MDEV-36852 Table definition gets corrupt after adding unique hash key +# +create table t1 (a text, b int, foreign key(a) references x(x)) engine=myisam; +Warnings: +Note 1071 Specified key was too long; max key length is 1000 bytes +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` text DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `a` (`a`(250)) +) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci +alter table t1 add unique(a), add key(a); +Warnings: +Note 1071 Specified key was too long; max key length is 1000 bytes +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` text DEFAULT NULL, + `b` int(11) DEFAULT NULL, + UNIQUE KEY `a` (`a`) USING HASH, + KEY `a_2` (`a`(250)) +) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci +drop table t1; +# +# MDEV-37203 UBSAN: applying zero offset to null pointer in strings/ctype-uca.inl | my_uca_strnncollsp_onelevel_utf8mb4 | handler::check_duplicate_long_entries_update +# +create table t (a int,b text unique key); +insert into t (a) values (1); +update t set a=2; +drop table t; +# End of 10.6 tests diff --git a/mysql-test/main/long_unique_bugs.test b/mysql-test/main/long_unique_bugs.test index 6808afb01d7..676d239b20e 100644 --- a/mysql-test/main/long_unique_bugs.test +++ b/mysql-test/main/long_unique_bugs.test @@ -332,17 +332,8 @@ CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria; --echo # MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes --echo # create table t1(a int, unique(a) using hash); ---let $count=150 ---let insert_stmt= insert into t1 values(200) -while ($count) -{ - --let $insert_stmt=$insert_stmt,($count) - --dec $count -} ---disable_query_log --echo #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES) ---eval $insert_stmt ---enable_query_log +insert into t1 values(200),(150),(149),(148),(147),(146),(145),(144),(143),(142),(141),(140),(139),(138),(137),(136),(135),(134),(133),(132),(131),(130),(129),(128),(127),(126),(125),(124),(123),(122),(121),(120),(119),(118),(117),(116),(115),(114),(113),(112),(111),(110),(109),(108),(107),(106),(105),(104),(103),(102),(101),(100),(99),(98),(97),(96),(95),(94),(93),(92),(91),(90),(89),(88),(87),(86),(85),(84),(83),(82),(81),(80),(79),(78),(77),(76),(75),(74),(73),(72),(71),(70),(69),(68),(67),(66),(65),(64),(63),(62),(61),(60),(59),(58),(57),(56),(55),(54),(53),(52),(51),(50),(49),(48),(47),(46),(45),(44),(43),(42),(41),(40),(39),(38),(37),(36),(35),(34),(33),(32),(31),(30),(29),(28),(27),(26),(25),(24),(23),(22),(21),(20),(19),(18),(17),(16),(15),(14),(13),(12),(11),(10),(9),(8),(7),(6),(5),(4),(3),(2),(1); drop table t1; --echo # @@ -756,3 +747,23 @@ select hex(c1), hex(c2), c3, hex(c4) from t1; drop table t1; --echo # End of 10.5 tests + +--echo # +--echo # MDEV-36852 Table definition gets corrupt after adding unique hash key +--echo # + +create table t1 (a text, b int, foreign key(a) references x(x)) engine=myisam; +show create table t1; +alter table t1 add unique(a), add key(a); +show create table t1; +drop table t1; + +--echo # +--echo # MDEV-37203 UBSAN: applying zero offset to null pointer in strings/ctype-uca.inl | my_uca_strnncollsp_onelevel_utf8mb4 | handler::check_duplicate_long_entries_update +--echo # +create table t (a int,b text unique key); +insert into t (a) values (1); +update t set a=2; +drop table t; + +--echo # End of 10.6 tests diff --git a/mysql-test/main/long_unique_innodb.result b/mysql-test/main/long_unique_innodb.result index 032066d1bf4..02e8e289047 100644 --- a/mysql-test/main/long_unique_innodb.result +++ b/mysql-test/main/long_unique_innodb.result @@ -137,3 +137,39 @@ disconnect con2; # MDEV-20131 Assertion `!pk->has_virtual()' failed create table t1 (a text, primary key(a(1871))) engine=innodb; ERROR 42000: Specified key was too long; max key length is 1536 bytes +# End of 10.4 tests +# +# MDEV-37268 ER_NOT_KEYFILE or assertion failure upon REPLACE into table with unique hash under READ-COMMITTED +# +create table t1 (id int not null primary key, f varchar(100), unique(f) using hash) engine=innodb; +insert t1 values (1,'x'); +set transaction isolation level read committed; +replace t1 values (2,'x'); +select * from t1; +id f +2 x +drop table t1; +create table t1 (id int, f longtext, primary key (id), unique(f)) engine=innodb partition by key (id) partitions 9; +insert t1 (id) values (1),(2); +set transaction isolation level read committed; +update ignore t1 set f = 'x'; +ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported +select * from t1; +id f +1 NULL +2 NULL +drop table t1; +# +# MDEV-37310 Non-debug failing assertion node->pcur->rel_pos == BTR_PCUR_ON upon violating long unique under READ-COMMITTED +# +create table t1 (id int, f blob, unique(id,f)) engine=innodb partition by key(id) partitions 2; +insert t1 values (1,'foo'),(2,'foo'); +set transaction isolation level read committed; +update ignore t1 set id = 2; +ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported +select * from t1; +id f +1 foo +2 foo +drop table t1; +# End of 10.6 tests diff --git a/mysql-test/main/long_unique_innodb.test b/mysql-test/main/long_unique_innodb.test index 9afde7f2b8a..8b68f0be595 100644 --- a/mysql-test/main/long_unique_innodb.test +++ b/mysql-test/main/long_unique_innodb.test @@ -1,4 +1,5 @@ --source include/have_innodb.inc +--source include/have_partition.inc # # MDEV-371 Unique indexes for blobs @@ -144,3 +145,36 @@ disconnect con2; --error ER_TOO_LONG_KEY create table t1 (a text, primary key(a(1871))) engine=innodb; + +--echo # End of 10.4 tests + +--echo # +--echo # MDEV-37268 ER_NOT_KEYFILE or assertion failure upon REPLACE into table with unique hash under READ-COMMITTED +--echo # +create table t1 (id int not null primary key, f varchar(100), unique(f) using hash) engine=innodb; +insert t1 values (1,'x'); +set transaction isolation level read committed; +replace t1 values (2,'x'); +select * from t1; +drop table t1; + +create table t1 (id int, f longtext, primary key (id), unique(f)) engine=innodb partition by key (id) partitions 9; +insert t1 (id) values (1),(2); +set transaction isolation level read committed; +--error ER_NOT_SUPPORTED_YET +update ignore t1 set f = 'x'; +select * from t1; +drop table t1; + +--echo # +--echo # MDEV-37310 Non-debug failing assertion node->pcur->rel_pos == BTR_PCUR_ON upon violating long unique under READ-COMMITTED +--echo # +create table t1 (id int, f blob, unique(id,f)) engine=innodb partition by key(id) partitions 2; +insert t1 values (1,'foo'),(2,'foo'); +set transaction isolation level read committed; +--error ER_NOT_SUPPORTED_YET +update ignore t1 set id = 2; +select * from t1; +drop table t1; + +--echo # End of 10.6 tests diff --git a/mysql-test/main/long_unique_innodb_debug.result b/mysql-test/main/long_unique_innodb_debug.result new file mode 100644 index 00000000000..497f2af39bf --- /dev/null +++ b/mysql-test/main/long_unique_innodb_debug.result @@ -0,0 +1,255 @@ +# +# MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records +# +## INSERT +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +# Keep a Read View open to prevent purge +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +# Create delete marked secondary index Record ('a', 10) +insert t1 values(10, 'a'); +delete from t1; +# Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +insert t1 values(15, 'a'); +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +# Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10) +set transaction isolation level read committed; +insert t1 values(5, 'a'); +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 23000: Duplicate entry 'a' for key 'col2' +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +5 a +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## INSERT, row-level locking proof +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +# Keep a Read View open to prevent purge +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +# Create delete marked secondary index Record ('a', 10) +insert t1 values(10, 'a'); +delete from t1; +# Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum +set transaction isolation level read committed; +set debug_sync="ha_write_row_end SIGNAL checked_duplicate WAIT_FOR do_insert"; +insert t1 values(15, 'a'); +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +# Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10) +set session innodb_lock_wait_timeout= 1; +set transaction isolation level read committed; +insert t1 values(5, 'a'); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +set debug_sync="now SIGNAL do_insert"; +connection con1; +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +15 a +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update t1 set col2='a' where col1=5; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 23000: Duplicate entry 'a' for key 'col2' +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +5 b +15 a +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## INSERT IGNORE +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +insert t1 values(10, 'a'); +delete from t1; +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +insert ignore t1 values(15, 'a'), (16, 'b'); +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +insert t1 values(5, 'a'); +set debug_sync="now SIGNAL do_insert"; +connection con1; +Warnings: +Warning 1062 Duplicate entry 'a' for key 'col2' +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +5 a +16 b +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE IGNORE +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +insert into t1 values( 9, 'd'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update ignore t1 set col2=chr(92+col1) where col1<=9; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +5 b +9 d +15 a +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE modifying PK +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update t1 set col2='a', col1=4 where col1=5; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 23000: Duplicate entry 'a' for key 'col2' +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +5 b +15 a +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE IGNORE modifying PK +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +insert into t1 values( 9, 'd'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update ignore t1 set col2=chr(92+col1), col1=col1-1 where col1<=9; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +5 b +9 d +15 a +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +# End of 10.6 tests diff --git a/mysql-test/main/long_unique_innodb_debug.test b/mysql-test/main/long_unique_innodb_debug.test new file mode 100644 index 00000000000..d1a0673b54a --- /dev/null +++ b/mysql-test/main/long_unique_innodb_debug.test @@ -0,0 +1,242 @@ +--source include/have_innodb.inc +--source include/have_debug_sync.inc + +--echo # +--echo # MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records +--echo # + +--disable_view_protocol +--echo ## INSERT +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +--echo # Keep a Read View open to prevent purge +start transaction; +select * from t1; +--connect con1,localhost,root +--echo # Create delete marked secondary index Record ('a', 10) +insert t1 values(10, 'a'); +delete from t1; +--echo # Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send insert t1 values(15, 'a') +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +--echo # Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10) +set transaction isolation level read committed; +insert t1 values(5, 'a'); +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_DUP_ENTRY +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## INSERT, row-level locking proof +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +--echo # Keep a Read View open to prevent purge +start transaction; +select * from t1; + +--connect con1,localhost,root +--echo # Create delete marked secondary index Record ('a', 10) +insert t1 values(10, 'a'); +delete from t1; +--echo # Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum +set transaction isolation level read committed; +set debug_sync="ha_write_row_end SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send insert t1 values(15, 'a') + +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +--echo # Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10) +set session innodb_lock_wait_timeout= 1; +set transaction isolation level read committed; +--error ER_LOCK_WAIT_TIMEOUT +insert t1 values(5, 'a'); +set debug_sync="now SIGNAL do_insert"; + +--connection con1 +--reap + +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +--connect con1,localhost,root +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update t1 set col2='a' where col1=5 +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_DUP_ENTRY +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## INSERT IGNORE +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, 'a'); +delete from t1; +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send insert ignore t1 values(15, 'a'), (16, 'b') +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +insert t1 values(5, 'a'); +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE IGNORE +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +--connect con1,localhost,root +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +insert into t1 values( 9, 'd'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update ignore t1 set col2=chr(92+col1) where col1<=9 +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_NOT_SUPPORTED_YET +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE modifying PK +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +--connect con1,localhost,root +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update t1 set col2='a', col1=4 where col1=5 +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_DUP_ENTRY +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE IGNORE modifying PK +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +--connect con1,localhost,root +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +insert into t1 values( 9, 'd'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update ignore t1 set col2=chr(92+col1), col1=col1-1 where col1<=9 +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_NOT_SUPPORTED_YET +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; +--enable_view_protocol + +--echo # End of 10.6 tests diff --git a/mysql-test/main/lowercase_fs_off.result b/mysql-test/main/lowercase_fs_off.result index 40e608fa620..e70c29593c9 100644 --- a/mysql-test/main/lowercase_fs_off.result +++ b/mysql-test/main/lowercase_fs_off.result @@ -274,6 +274,23 @@ CREATE TABLE t1 (a mariadb_schema.date); DROP TABLE t1; # End of 10.3 tests # +# MDEV-36979 Same alias name with different case on same table is not working in functions +# +create table t1 (a int); +insert t1 values (1); +create table T1 (b int); +insert T1 values (10); +create function t1test(val int) returns int +begin +return (select a from t1 where exists (select b from T1 where b > val)); +end// +select t1test(1); +t1test(1) +1 +drop function t1test; +drop table t1, T1; +# End of 10.11 tests +# # MDEV-32973 SHOW TABLES LIKE shows temporary tables with non-matching names # create temporary table t1 (a int); diff --git a/mysql-test/main/lowercase_fs_off.test b/mysql-test/main/lowercase_fs_off.test index 4bff916db83..54de923ddaf 100644 --- a/mysql-test/main/lowercase_fs_off.test +++ b/mysql-test/main/lowercase_fs_off.test @@ -149,6 +149,25 @@ DROP TABLE t1; --echo # End of 10.3 tests +--echo # +--echo # MDEV-36979 Same alias name with different case on same table is not working in functions +--echo # +create table t1 (a int); +insert t1 values (1); +create table T1 (b int); +insert T1 values (10); +delimiter //; +create function t1test(val int) returns int +begin + return (select a from t1 where exists (select b from T1 where b > val)); +end// +delimiter ;// +select t1test(1); +drop function t1test; +drop table t1, T1; + +--echo # End of 10.11 tests + --echo # --echo # MDEV-32973 SHOW TABLES LIKE shows temporary tables with non-matching names --echo # diff --git a/mysql-test/main/mdl_sync.result b/mysql-test/main/mdl_sync.result index c614548359e..287d52c08a0 100644 --- a/mysql-test/main/mdl_sync.result +++ b/mysql-test/main/mdl_sync.result @@ -3015,3 +3015,34 @@ connection default; SET debug_sync='RESET'; DROP TABLE t1; disconnect con1; +# +# MDEV-28567 Assertion `0' in open_tables upon function-related operation +# +CREATE VIEW v1 AS SELECT 1; +CREATE FUNCTION f1() RETURNS INT RETURN (SELECT * FROM v1); +connect con1, localhost, root; +SET debug_sync='open_and_process_table SIGNAL ready1 WAIT_FOR go1'; +ALTER VIEW v1 AS SELECT f1(); +connect con2, localhost, root; +SET debug_sync='now WAIT_FOR ready1'; +SET debug_sync='mdl_after_find_deadlock SIGNAL ready2'; +SELECT f1(); +connect con3, localhost, root; +SET debug_sync='now WAIT_FOR ready2'; +SET debug_sync='mdl_after_find_deadlock SIGNAL ready3'; +DROP FUNCTION f1; +connection default; +SET debug_sync='now WAIT_FOR ready3'; +SET debug_sync='now SIGNAL go1'; +connection con3; +disconnect con3; +connection con2; +f1() +1 +disconnect con2; +connection con1; +ERROR 42000: FUNCTION test.f1 does not exist +disconnect con1; +connection default; +DROP VIEW v1; +SET debug_sync='reset'; diff --git a/mysql-test/main/mdl_sync.test b/mysql-test/main/mdl_sync.test index b0fcf911e06..1daa0f42c5f 100644 --- a/mysql-test/main/mdl_sync.test +++ b/mysql-test/main/mdl_sync.test @@ -4010,6 +4010,58 @@ DROP TABLE t1; disconnect con1; + +--echo # +--echo # MDEV-28567 Assertion `0' in open_tables upon function-related operation +--echo # +# +# This test covers deadlock recovery code in open_tables() after +# open_and_process_routine(). ALTER VIEW v1 connection must fall as a deadlock +# victim to hit this code. +# +# default connection cannot be used since it may have deadlock weight spoiled +# by "MDL deadlock overweight" feature from previous deadlocks. In this case +# ALTER VIEW v1 won't fall as a deadlock victim and the test won't cover +# intended code. +# +CREATE VIEW v1 AS SELECT 1; +CREATE FUNCTION f1() RETURNS INT RETURN (SELECT * FROM v1); + +connect con1, localhost, root; +SET debug_sync='open_and_process_table SIGNAL ready1 WAIT_FOR go1'; +send ALTER VIEW v1 AS SELECT f1(); # X v1, S f1 + +connect con2, localhost, root; +SET debug_sync='now WAIT_FOR ready1'; +SET debug_sync='mdl_after_find_deadlock SIGNAL ready2'; +send SELECT f1(); # S f1, SR v1 + +connect con3, localhost, root; +SET debug_sync='now WAIT_FOR ready2'; +SET debug_sync='mdl_after_find_deadlock SIGNAL ready3'; +send DROP FUNCTION f1; # X f1 + +connection default; +SET debug_sync='now WAIT_FOR ready3'; +SET debug_sync='now SIGNAL go1'; + +connection con3; +reap; +disconnect con3; + +connection con2; +reap; +disconnect con2; + +connection con1; +--error ER_SP_DOES_NOT_EXIST +reap; +disconnect con1; + +connection default; +DROP VIEW v1; +SET debug_sync='reset'; + # Check that all connections opened by test cases in this file are really # gone so execution of other tests won't be affected by their presence. --source include/wait_until_count_sessions.inc diff --git a/mysql-test/main/mysql-interactive.result b/mysql-test/main/mysql-interactive.result index cdb0931418a..0eb3a53cac7 100644 --- a/mysql-test/main/mysql-interactive.result +++ b/mysql-test/main/mysql-interactive.result @@ -24,3 +24,41 @@ MariaDB [(none)]> select 1; MariaDB [(none)]> exit Bye +# End of 10.4 tests +# +# MDEV-36701 command line client doesn't check session_track information +# +create database db1; +use db1; +drop database db1; +create database db1; +execute immediate "use db1"; +execute immediate "drop database db1"; +exit +Welcome to the MariaDB monitor. Commands end with ; or \g. +Your MariaDB connection id is X +Server version: Y +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +MariaDB [(none)]> create database db1; +Query OK, 1 row affected + +MariaDB [(none)]> use db1; +Database changed +MariaDB [db1]> drop database db1; +Query OK, 0 rows affected + +MariaDB [(none)]> create database db1; +Query OK, 1 row affected + +MariaDB [(none)]> execute immediate "use db1"; +Query OK, 0 rows affected + +MariaDB [db1]> execute immediate "drop database db1"; +Query OK, 0 rows affected + +MariaDB [(none)]> exit +Bye +# End of 10.11 tests diff --git a/mysql-test/main/mysql-interactive.test b/mysql-test/main/mysql-interactive.test index e387f937226..1c8fc6010f7 100644 --- a/mysql-test/main/mysql-interactive.test +++ b/mysql-test/main/mysql-interactive.test @@ -22,3 +22,27 @@ if ($sys_errno == 127) skip no socat; } remove_file $MYSQL_TMP_DIR/mysql_in; + +--echo # End of 10.4 tests + +--echo # +--echo # MDEV-36701 command line client doesn't check session_track information +--echo # +# test old behavior (make sure session tracking didn't break it) +# and new one, that didn't work before +write_file $MYSQL_TMP_DIR/mysql_in; +create database db1; +use db1; +drop database db1; +create database db1; +execute immediate "use db1"; +execute immediate "drop database db1"; +exit +EOF +let TERM=dumb; +replace_regex /id is \d+/id is X/ /Server version: .*/Server version: Y/ / \(\d+\.\d+ sec\)//; +error 0,127; +exec socat -t10 EXEC:"$MYSQL",pty STDIO < $MYSQL_TMP_DIR/mysql_in; +remove_file $MYSQL_TMP_DIR/mysql_in; + +--echo # End of 10.11 tests diff --git a/mysql-test/main/mysqladmin.test b/mysql-test/main/mysqladmin.test index af9b3eb5bda..c8956d2ecd2 100644 --- a/mysql-test/main/mysqladmin.test +++ b/mysql-test/main/mysqladmin.test @@ -1,5 +1,7 @@ # Embedded server doesn't support external clients --source include/not_embedded.inc +# MDEV-37169 - msan unknown failure +--source include/not_msan.inc # # Test "mysqladmin ping" # diff --git a/mysql-test/main/null.result b/mysql-test/main/null.result index cdbcbb12fab..15f6dfd2a36 100644 --- a/mysql-test/main/null.result +++ b/mysql-test/main/null.result @@ -263,11 +263,11 @@ t1 CREATE TABLE `t1` ( `c01` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c02` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c03` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci NOT NULL, - `c04` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, + `c04` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci NOT NULL, `c05` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c06` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, - `c07` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, - `c08` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, + `c07` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci NOT NULL, + `c08` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci NOT NULL, `c09` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c10` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c11` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result index ec55f3b0700..f84246c638e 100644 --- a/mysql-test/main/opt_trace.result +++ b/mysql-test/main/opt_trace.result @@ -12203,6 +12203,23 @@ exp1 exp2 ] 1 DROP TABLE t1; # +# MDEV-30334 Optimizer trace produces invalid JSON with WHERE subquery +# Simple code rearrangement to stop it displaying an unsigned int in a String. +# +SET optimizer_trace= 'enabled=on'; +CREATE TABLE t1 (id INT PRIMARY KEY); +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (a INT); +INSERT INTO t2 VALUES (3),(4); +SELECT * FROM t1 WHERE id < ( SELECT SUM(a) FROM t2 ); +id +1 +2 +SELECT JSON_VALID(trace) FROM information_schema.optimizer_trace; +JSON_VALID(trace) +1 +DROP TABLE t1, t2; +# # End of 10.4 tests # set optimizer_trace='enabled=on'; @@ -12921,9 +12938,9 @@ SUBQ a 985 1 985 2 # The trace must be empty: -select json_detailed(json_extract(trace, '$**.range-checked-for-each-record')) +select json_detailed(json_extract(trace, '$**.range-checked-for-each-record')) as TRACE from information_schema.optimizer_trace; -json_detailed(json_extract(trace, '$**.range-checked-for-each-record')) +TRACE NULL # The trace must be empty: select json_detailed(json_extract(trace, '$**.join_execution')) diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test index d364eae50ed..451f6d09749 100644 --- a/mysql-test/main/opt_trace.test +++ b/mysql-test/main/opt_trace.test @@ -855,6 +855,23 @@ SELECT b, a FROM t1 WHERE b <> 'p' OR a = 4 GROUP BY b, a HAVING a <= 7; SELECT DROP TABLE t1; --enable_view_protocol +--echo # +--echo # MDEV-30334 Optimizer trace produces invalid JSON with WHERE subquery +--echo # Simple code rearrangement to stop it displaying an unsigned int in a String. +--echo # + +SET optimizer_trace= 'enabled=on'; + +CREATE TABLE t1 (id INT PRIMARY KEY); +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (a INT); +INSERT INTO t2 VALUES (3),(4); + +SELECT * FROM t1 WHERE id < ( SELECT SUM(a) FROM t2 ); +SELECT JSON_VALID(trace) FROM information_schema.optimizer_trace; + +DROP TABLE t1, t2; + --echo # --echo # End of 10.4 tests --echo # @@ -1214,7 +1231,7 @@ select from t3; --echo # The trace must be empty: -select json_detailed(json_extract(trace, '$**.range-checked-for-each-record')) +select json_detailed(json_extract(trace, '$**.range-checked-for-each-record')) as TRACE from information_schema.optimizer_trace; --echo # The trace must be empty: select json_detailed(json_extract(trace, '$**.join_execution')) diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result index b47a590628a..3793ecd0152 100644 --- a/mysql-test/main/ps.result +++ b/mysql-test/main/ps.result @@ -5965,9 +5965,145 @@ t1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VI # Clean up DEALLOCATE PREPARE stmt; DROP VIEW t1; -# # End of 10.4 tests # +# MDEV-35330: Assertion `marked_for_read()` failed in VSec9::VSec9 | Item_func_from_unixtime::get_date +# +CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a)); +INSERT INTO t VALUES (1,'2025-07-18 18:37:10'); +SELECT * FROM t; +a b +1 2025-07-18 18:37:10 +EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT; +SELECT * FROM t; +a b +1 1970-01-01 09:00:01 +DROP TABLE t; +CREATE TABLE t (a INT, b INT DEFAULT (a+5)); +INSERT INTO t values (1,2), (2,DEFAULT); +EXECUTE IMMEDIATE 'INSERT INTO t VALUES (3,4), (4,?)' USING DEFAULT; +SELECT * FROM t; +a b +1 2 +2 7 +3 4 +4 9 +EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT; +SELECT * FROM t; +a b +1 6 +2 7 +3 8 +4 9 +DROP TABLE t; +CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a)); +INSERT INTO t VALUES (1,'2025-07-18 18:37:10'); +SELECT * FROM t; +a b +1 2025-07-18 18:37:10 +PREPARE s FROM 'UPDATE t SET b=?'; +EXECUTE s USING DEFAULT; +SELECT * FROM t; +a b +1 1970-01-01 09:00:01 +DROP TABLE t; +CREATE TABLE t (a INT, b TIMESTAMP DEFAULT FROM_UNIXTIME(a), c INT DEFAULT (a+5)); +INSERT INTO t VALUES (1,'2025-07-18 18:37:10',3); +SELECT * FROM t; +a b c +1 2025-07-18 18:37:10 3 +EXECUTE IMMEDIATE 'UPDATE t SET b=?, c=?' USING DEFAULT, DEFAULT; +SELECT * FROM t; +a b c +1 1970-01-01 09:00:01 6 +DROP TABLE t; +# End of 10.6 tests +# +# MDEV-34322: ASAN heap-buffer-overflow in Field::is_null / Item_param::assign_default or bogus ER_BAD_NULL_ERROR +# +CREATE TABLE t1 (a INT NOT NULL DEFAULT '0', b INT); +INSERT INTO t1 VALUES (1,11); +CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL; +EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT; +DROP TABLE t1; +CREATE TABLE t1 (a INT NOT NULL DEFAULT (30 + 100), b INT); +INSERT INTO t1 VALUES (1,11); +CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL; +EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT; +DROP TABLE t1; +CREATE TABLE t1 (a INT NOT NULL DEFAULT (b + 100), b INT); +INSERT INTO t1 VALUES (1,11); +CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL; +EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT; +DROP TABLE t1; +CREATE TABLE t1 (a INT NOT NULL DEFAULT (FLOOR(RAND()*100)), b INT); +INSERT INTO t1 VALUES (1,11); +CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL; +EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT; +DROP TABLE t1; +# +# MDEV-32694: ASAN errors in Binary_string::alloced_length / reset_stmt_params +# +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7); +PREPARE stmt FROM 'BEGIN NOT ATOMIC SELECT * FROM t1 LIMIT ?; SELECT * FROM t1 LIMIT ?; END'; +# Expected output is row (1) produced by the first query +# the rows (1), (2), (3) produced by the second one +EXECUTE stmt USING 1, 3; +a +1 +a +1 +2 +3 +ALTER TABLE t1 ADD COLUMN f INT; +# Because metadata of the table t1 has been changed, +# the second execution of the same prepared statement should result in +# re-compilation of the first statement enclosed in the BEGIN / END block +# and since different actual values are provided to positional parameters +# on the second execution, the exepected output is the row (1), (2), (3) +# produced by the first query and the rows (1), (2), (3), (4), (5) +# produced by the second one +EXECUTE stmt USING 3, 5; +a f +1 NULL +2 NULL +3 NULL +a f +1 NULL +2 NULL +3 NULL +4 NULL +5 NULL +DEALLOCATE PREPARE stmt; +DROP TABLE t1; +# Check that the order of parameters preserved after re-compilation of a +# failed statement inside anonymous BEGIN / END block. +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +PREPARE stmt FROM 'BEGIN NOT ATOMIC SELECT a, ?, ? FROM t1; END'; +# Expected output is the row (1, 10, 20) +EXECUTE stmt USING 10, 20; +a ? ? +1 10 20 +ALTER TABLE t1 ADD COLUMN b INT; +# Expected output is the row (1, 300, 400) +EXECUTE stmt USING 300, 400; +a ? ? +1 300 400 +ALTER TABLE t1 DROP COLUMN b; +# Expected output is the row (1, 500, 700) +EXECUTE stmt USING 500, 700; +a ? ? +1 500 700 +ALTER TABLE t1 ADD COLUMN b INT; +# Expected output is the row (1, 700, 900) +EXECUTE stmt USING 700, 900; +a ? ? +1 700 900 +DEALLOCATE PREPARE stmt; +DROP TABLE t1; +# End of 11.4 tests # # MDEV-25008 Delete query gets stuck on mariadb , same query works on MySQL 8.0.21 # @@ -5986,7 +6122,5 @@ execute stmt using @var2; delete from t1 where a=1 or b=2; ERROR HY000: You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column drop table t1, t2; -# # End of 11.7 tests -# ALTER DATABASE test CHARACTER SET utf8mb4 COLLATE utf8mb4_uca1400_ai_ci; diff --git a/mysql-test/main/ps.test b/mysql-test/main/ps.test index e095c5e278b..90eb15b38b1 100644 --- a/mysql-test/main/ps.test +++ b/mysql-test/main/ps.test @@ -5447,9 +5447,137 @@ EXECUTE stmt; DEALLOCATE PREPARE stmt; DROP VIEW t1; ---echo # --echo # End of 10.4 tests + --echo # +--echo # MDEV-35330: Assertion `marked_for_read()` failed in VSec9::VSec9 | Item_func_from_unixtime::get_date +--echo # + +CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a)); +INSERT INTO t VALUES (1,'2025-07-18 18:37:10'); + +SELECT * FROM t; + +EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT; + +SELECT * FROM t; + +DROP TABLE t; + +CREATE TABLE t (a INT, b INT DEFAULT (a+5)); +INSERT INTO t values (1,2), (2,DEFAULT); +EXECUTE IMMEDIATE 'INSERT INTO t VALUES (3,4), (4,?)' USING DEFAULT; + +SELECT * FROM t; + +EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT; + +SELECT * FROM t; + +DROP TABLE t; + +CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a)); +INSERT INTO t VALUES (1,'2025-07-18 18:37:10'); + +SELECT * FROM t; + +PREPARE s FROM 'UPDATE t SET b=?'; +EXECUTE s USING DEFAULT; + +SELECT * FROM t; + +DROP TABLE t; + +CREATE TABLE t (a INT, b TIMESTAMP DEFAULT FROM_UNIXTIME(a), c INT DEFAULT (a+5)); +INSERT INTO t VALUES (1,'2025-07-18 18:37:10',3); + +SELECT * FROM t; + +EXECUTE IMMEDIATE 'UPDATE t SET b=?, c=?' USING DEFAULT, DEFAULT; + +SELECT * FROM t; + +DROP TABLE t; +--echo # End of 10.6 tests +--echo # +--echo # MDEV-34322: ASAN heap-buffer-overflow in Field::is_null / Item_param::assign_default or bogus ER_BAD_NULL_ERROR +--echo # +CREATE TABLE t1 (a INT NOT NULL DEFAULT '0', b INT); +INSERT INTO t1 VALUES (1,11); +CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL; +EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT; +# Cleanup +DROP TABLE t1; + +CREATE TABLE t1 (a INT NOT NULL DEFAULT (30 + 100), b INT); +INSERT INTO t1 VALUES (1,11); +CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL; +EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT; +# Cleanup +DROP TABLE t1; + +CREATE TABLE t1 (a INT NOT NULL DEFAULT (b + 100), b INT); +INSERT INTO t1 VALUES (1,11); +CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL; +EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT; +# Cleanup +DROP TABLE t1; + +CREATE TABLE t1 (a INT NOT NULL DEFAULT (FLOOR(RAND()*100)), b INT); +INSERT INTO t1 VALUES (1,11); +CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL; +EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT; +# Cleanup +DROP TABLE t1; + +--echo # +--echo # MDEV-32694: ASAN errors in Binary_string::alloced_length / reset_stmt_params +--echo # + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7); + +PREPARE stmt FROM 'BEGIN NOT ATOMIC SELECT * FROM t1 LIMIT ?; SELECT * FROM t1 LIMIT ?; END'; +--echo # Expected output is row (1) produced by the first query +--echo # the rows (1), (2), (3) produced by the second one +EXECUTE stmt USING 1, 3; +ALTER TABLE t1 ADD COLUMN f INT; + +--echo # Because metadata of the table t1 has been changed, +--echo # the second execution of the same prepared statement should result in +--echo # re-compilation of the first statement enclosed in the BEGIN / END block +--echo # and since different actual values are provided to positional parameters +--echo # on the second execution, the exepected output is the row (1), (2), (3) +--echo # produced by the first query and the rows (1), (2), (3), (4), (5) +--echo # produced by the second one +EXECUTE stmt USING 3, 5; + +# Cleanup +DEALLOCATE PREPARE stmt; +DROP TABLE t1; + +--echo # Check that the order of parameters preserved after re-compilation of a +--echo # failed statement inside anonymous BEGIN / END block. +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +PREPARE stmt FROM 'BEGIN NOT ATOMIC SELECT a, ?, ? FROM t1; END'; +--echo # Expected output is the row (1, 10, 20) +EXECUTE stmt USING 10, 20; +ALTER TABLE t1 ADD COLUMN b INT; +--echo # Expected output is the row (1, 300, 400) +EXECUTE stmt USING 300, 400; +ALTER TABLE t1 DROP COLUMN b; +--echo # Expected output is the row (1, 500, 700) +EXECUTE stmt USING 500, 700; +ALTER TABLE t1 ADD COLUMN b INT; +--echo # Expected output is the row (1, 700, 900) +EXECUTE stmt USING 700, 900; + +# Cleanup +DEALLOCATE PREPARE stmt; +DROP TABLE t1; + +--echo # End of 11.4 tests --echo # --echo # MDEV-25008 Delete query gets stuck on mariadb , same query works on MySQL 8.0.21 @@ -5473,8 +5601,6 @@ execute stmt using @var2; delete from t1 where a=1 or b=2; drop table t1, t2; ---echo # --echo # End of 11.7 tests ---echo # --source include/test_db_charset_restore.inc diff --git a/mysql-test/main/repair.result b/mysql-test/main/repair.result index bbccf67f54e..57bf62fe55d 100644 --- a/mysql-test/main/repair.result +++ b/mysql-test/main/repair.result @@ -265,6 +265,26 @@ test.t1 repair status OK set myisam_repair_threads = default; drop table t1; # End of 10.2 tests +USE test; +CREATE TEMPORARY TABLE t(c INT NOT NULL) ENGINE=CSV; +INSERT INTO t VALUES(1); +REPAIR TABLE t; +Table Op Msg_type Msg_text +test.t repair status OK +DELETE FROM t; +# +# MDEV-23207 Assertion `tl->table == __null' failed in THD::open_temporary_table +# +create table t1 (pk int primary key) engine=innodb partition by hash(pk) partitions 10; +create table t2 (c int) engine=innodb; +create temporary table t3 (c int); +repair table t1, t2, t3; +Table Op Msg_type Msg_text +test.t1 repair status OK +test.t2 repair status OK +test.t3 repair status OK +drop tables t1, t2; +# End of 10.11 tests # # MDEV-33737 The way of ignoring alter-algorithm is inconsistent with # other options and with itself diff --git a/mysql-test/main/repair.test b/mysql-test/main/repair.test index e038b7bd419..e486ce54b61 100644 --- a/mysql-test/main/repair.test +++ b/mysql-test/main/repair.test @@ -5,6 +5,7 @@ --source include/have_sequence.inc --source include/default_charset.inc --source include/have_innodb.inc +--source include/have_partition.inc call mtr.add_suppression("character set is multi-byte"); call mtr.add_suppression("exists only for compatibility"); @@ -281,6 +282,23 @@ drop table t1; --echo # End of 10.2 tests +USE test; +CREATE TEMPORARY TABLE t(c INT NOT NULL) ENGINE=CSV; +INSERT INTO t VALUES(1); +REPAIR TABLE t; +DELETE FROM t; + +--echo # +--echo # MDEV-23207 Assertion `tl->table == __null' failed in THD::open_temporary_table +--echo # +create table t1 (pk int primary key) engine=innodb partition by hash(pk) partitions 10; +create table t2 (c int) engine=innodb; +create temporary table t3 (c int); +repair table t1, t2, t3; +drop tables t1, t2; + +--echo # End of 10.11 tests + --echo # --echo # MDEV-33737 The way of ignoring alter-algorithm is inconsistent with --echo # other options and with itself diff --git a/mysql-test/main/show.result b/mysql-test/main/show.result index e453ee90c7e..f03fb6f6e4b 100644 --- a/mysql-test/main/show.result +++ b/mysql-test/main/show.result @@ -65,3 +65,37 @@ drop table t1; # # End of 10.3 tests # +# +# MDEV-31721: Cursor protocol increases the counter of "Empty_queries" for select +# +FLUSH STATUS; +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +SELECT COUNT(*) FROM t1; +COUNT(*) +1 +SELECT * FROM t1; +a +1 +SELECT * FROM t1 LIMIT 0; +a +SHOW STATUS LIKE "Empty_queries"; +Variable_name Value +Empty_queries 1 +DROP TABLE t1; +#------------------------------ +CREATE TABLE t1 (a INT); +CREATE TABLE t2 (b INT); +INSERT INTO t1 VALUES (1); +INSERT INTO t2 VALUES (2); +SELECT * FROM t1 UNION SELECT * FROM t2; +a +1 +2 +SELECT * FROM t1 UNION SELECT * FROM t2 LIMIT 0; +a +SHOW STATUS LIKE "Empty_queries"; +Variable_name Value +Empty_queries 2 +DROP TABLE t1, t2; +# End of 10.11 tests diff --git a/mysql-test/main/show.test b/mysql-test/main/show.test index 37c30000e59..27b7ab39b0b 100644 --- a/mysql-test/main/show.test +++ b/mysql-test/main/show.test @@ -56,3 +56,40 @@ drop table t1; --echo # --echo # End of 10.3 tests --echo # + +--echo # +--echo # MDEV-31721: Cursor protocol increases the counter of "Empty_queries" for select +--echo # + +FLUSH STATUS; + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); + +--disable_ps2_protocol +SELECT COUNT(*) FROM t1; +SELECT * FROM t1; +SELECT * FROM t1 LIMIT 0; +--enable_ps2_protocol + +SHOW STATUS LIKE "Empty_queries"; + +DROP TABLE t1; + +--echo #------------------------------ + +CREATE TABLE t1 (a INT); +CREATE TABLE t2 (b INT); +INSERT INTO t1 VALUES (1); +INSERT INTO t2 VALUES (2); + +--disable_ps2_protocol +SELECT * FROM t1 UNION SELECT * FROM t2; +SELECT * FROM t1 UNION SELECT * FROM t2 LIMIT 0; +--enable_ps2_protocol + +SHOW STATUS LIKE "Empty_queries"; + +DROP TABLE t1, t2; + +--echo # End of 10.11 tests diff --git a/mysql-test/main/sp-bugs2.result b/mysql-test/main/sp-bugs2.result index 3e48f0fb5d2..0a8df79a903 100644 --- a/mysql-test/main/sp-bugs2.result +++ b/mysql-test/main/sp-bugs2.result @@ -21,3 +21,42 @@ Warnings: Note 1050 Table 't2' already exists DROP PROCEDURE sp; DROP TABLE t1, t2; +# +# MDEV-36979 Same alias name with different case on same table is not working in functions +# +create table t1 ( id int primary key auto_increment, name varchar(10)); +insert into t1 (name) values ('wrbyviwb'); +insert into t1 (name) values ('wrbyrwb1'); +insert into t1 (name) values ('wrbrwb3'); +select cnt.name from t1 cnt join ( select CMT.id from t1 CMT where CMT.id=1) t2 on t2.id=cnt.id; +name +wrbyviwb +create function t1test(val int) returns varchar(400) charset utf8 +begin +declare output varchar(400) default ''; +set output = (select cnt.name from t1 cnt join ( select CMT.id from t1 CMT where CMT.id=val) t2 on t2.id=cnt.id); +return output; +end// +select t1test(1); +t1test(1) +wrbyviwb +drop function t1test; +drop table t1; +# +# MDEV-36814 MariaDB 10.11.9 Signal 11 crash on second Stored Procedure call +# +set names utf8; +create table t1 (a varchar(1000)); +create procedure p1(in p_a varchar(1000)) insert into t1 values (p_a);// +create procedure p2(in s varchar(10)) +begin +if s = '1' then set @startDate = now(); end if; +if s = '2' then set @startDate = '2025-05-23'; end if; +call p1(concat(s, @startDate, ' and ')); +end;// +call p2('1'); +call p2('2'); +drop table t1; +drop procedure p1; +drop procedure p2; +# End of 10.11 tests diff --git a/mysql-test/main/sp-bugs2.test b/mysql-test/main/sp-bugs2.test index 2579e3485c6..01c6106c0ca 100644 --- a/mysql-test/main/sp-bugs2.test +++ b/mysql-test/main/sp-bugs2.test @@ -27,3 +27,44 @@ CALL sp(); DROP PROCEDURE sp; DROP TABLE t1, t2; +--echo # +--echo # MDEV-36979 Same alias name with different case on same table is not working in functions +--echo # +create table t1 ( id int primary key auto_increment, name varchar(10)); +insert into t1 (name) values ('wrbyviwb'); +insert into t1 (name) values ('wrbyrwb1'); +insert into t1 (name) values ('wrbrwb3'); +select cnt.name from t1 cnt join ( select CMT.id from t1 CMT where CMT.id=1) t2 on t2.id=cnt.id; +delimiter //; +create function t1test(val int) returns varchar(400) charset utf8 +begin + declare output varchar(400) default ''; + set output = (select cnt.name from t1 cnt join ( select CMT.id from t1 CMT where CMT.id=val) t2 on t2.id=cnt.id); + return output; +end// +delimiter ;// +select t1test(1); +drop function t1test; +drop table t1; + +--echo # +--echo # MDEV-36814 MariaDB 10.11.9 Signal 11 crash on second Stored Procedure call +--echo # +set names utf8; +create table t1 (a varchar(1000)); +--delimiter // +create procedure p1(in p_a varchar(1000)) insert into t1 values (p_a);// +create procedure p2(in s varchar(10)) +begin + if s = '1' then set @startDate = now(); end if; + if s = '2' then set @startDate = '2025-05-23'; end if; + call p1(concat(s, @startDate, ' and ')); +end;// +--delimiter ; +call p2('1'); +call p2('2'); +drop table t1; +drop procedure p1; +drop procedure p2; + +--echo # End of 10.11 tests diff --git a/mysql-test/main/ssl.result b/mysql-test/main/ssl.result index 653024f1808..4204b346cc9 100644 --- a/mysql-test/main/ssl.result +++ b/mysql-test/main/ssl.result @@ -2195,11 +2195,20 @@ drop table t1; SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; have_ssl 1 +# +# MDEV-7697 Client reports ERROR 2006 (MySQL server has gone away) or ERROR 2013 (Lost connection to server during query) while executing AES* functions under SSL +# select aes_decrypt('MySQL','adf'); aes_decrypt('MySQL','adf') NULL select 'still connected?'; still connected? still connected? +# +# MDEV-35581 On servers linked against WolfSSL SSL_Cipher and SSL_cipher_list are always the same +# +select variable_value like '%:%' from information_schema.session_status where variable_name='ssl_cipher_list'; +variable_value like '%:%' +1 connection default; disconnect ssl_con; diff --git a/mysql-test/main/ssl.test b/mysql-test/main/ssl.test index 642af380be9..53d93ff1593 100644 --- a/mysql-test/main/ssl.test +++ b/mysql-test/main/ssl.test @@ -29,12 +29,17 @@ SHOW STATUS LIKE 'Ssl_server_not_after'; # Check ssl turned on SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; -# -# MDEV-7697 Client reports ERROR 2006 (MySQL server has gone away) or ERROR 2013 (Lost connection to server during query) while executing AES* functions under SSL -# +--echo # +--echo # MDEV-7697 Client reports ERROR 2006 (MySQL server has gone away) or ERROR 2013 (Lost connection to server during query) while executing AES* functions under SSL +--echo # select aes_decrypt('MySQL','adf'); select 'still connected?'; +--echo # +--echo # MDEV-35581 On servers linked against WolfSSL SSL_Cipher and SSL_cipher_list are always the same +--echo # +select variable_value like '%:%' from information_schema.session_status where variable_name='ssl_cipher_list'; + connection default; disconnect ssl_con; diff --git a/mysql-test/main/ssl_cipher_tlsv13.result b/mysql-test/main/ssl_cipher_tlsv13.result new file mode 100644 index 00000000000..d3437a7af18 --- /dev/null +++ b/mysql-test/main/ssl_cipher_tlsv13.result @@ -0,0 +1,11 @@ +# +# MDEV-35580 Server using WolfSSL shows different name than OpenSSL for some ciphers +# +connect c,localhost,root,,,,,SSL; +show status like 'ssl_cipher'; +Variable_name Value +Ssl_cipher TLS_AES_256_GCM_SHA384 +show status like 'ssl_version'; +Variable_name Value +Ssl_version TLSv1.3 +# End of 10.11 tests diff --git a/mysql-test/main/ssl_cipher_tlsv13.test b/mysql-test/main/ssl_cipher_tlsv13.test new file mode 100644 index 00000000000..14a2b5fbf49 --- /dev/null +++ b/mysql-test/main/ssl_cipher_tlsv13.test @@ -0,0 +1,11 @@ +--source include/have_ssl_communication.inc +--source include/have_tlsv13.inc +--echo # +--echo # MDEV-35580 Server using WolfSSL shows different name than OpenSSL for some ciphers +--echo # +connect c,localhost,root,,,,,SSL; + +show status like 'ssl_cipher'; +show status like 'ssl_version'; + +--echo # End of 10.11 tests diff --git a/mysql-test/main/statistics_json.result b/mysql-test/main/statistics_json.result index ec02f6c7e5d..8ee570711aa 100644 --- a/mysql-test/main/statistics_json.result +++ b/mysql-test/main/statistics_json.result @@ -7802,4 +7802,955 @@ EXPLAIN } } drop table t1; +# +# MDEV-36765 JSON Histogram cannot handle >1 byte characters +# +CREATE TABLE t1 (f varchar(50)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; +INSERT INTO t1 VALUES (UNHEX('E983A8E996800AE983A8E99680')); +SET STATEMENT histogram_type=JSON_HB FOR ANALYZE TABLE t1 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +SELECT min_value, max_value, hist_type +FROM mysql.column_stats WHERE db_name = 'test' AND table_name = 't1'; +min_value max_value hist_type +部門 +部門 部門 +部門 JSON_HB +DROP TABLE t1; +create table t1 ( +col1 varchar(10) charset utf8 +); +set names utf8; +select hex('б'), collation('б'); +hex('б') collation('б') +D0B1 utf8mb3_uca1400_ai_ci +insert into t1 values +('а'),('б'),('в'),('г'),('д'),('е'),('ж'),('з'),('и'),('й'); +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +select hex(col1) from t1; +hex(col1) +D0B0 +D0B1 +D0B2 +D0B3 +D0B4 +D0B5 +D0B6 +D0B7 +D0B8 +D0B9 +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +json_detailed(json_extract(histogram, '$**.histogram_hb')) +[ + [ + { + "start": "а", + "size": 0.1, + "ndv": 1 + }, + { + "start": "б", + "size": 0.1, + "ndv": 1 + }, + { + "start": "в", + "size": 0.1, + "ndv": 1 + }, + { + "start": "г", + "size": 0.1, + "ndv": 1 + }, + { + "start": "д", + "size": 0.1, + "ndv": 1 + }, + { + "start": "е", + "size": 0.1, + "ndv": 1 + }, + { + "start": "ж", + "size": 0.1, + "ndv": 1 + }, + { + "start": "з", + "size": 0.1, + "ndv": 1 + }, + { + "start": "и", + "size": 0.1, + "ndv": 1 + }, + { + "start": "й", + "end": "й", + "size": 0.1, + "ndv": 1 + } + ] +] +explain extended select * from t1 where col1 < 'а'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where `test`.`t1`.`col1` < 'а' +explain extended select * from t1 where col1 < 'в'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 20.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where `test`.`t1`.`col1` < 'в' +explain extended select * from t1 where col1 < 'д'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 40.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where `test`.`t1`.`col1` < 'д' +explain extended select * from t1 where col1 < 'ж'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 60.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where `test`.`t1`.`col1` < 'ж' +explain extended select * from t1 where col1 < 'й'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 90.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where `test`.`t1`.`col1` < 'й' +delete from t1; +insert into t1 values +('"а'),('"б'),('"в'),('"г'),('"д'),('"е'),('"ж'),('"з'),('"и'),('"й'); +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +json_detailed(json_extract(histogram, '$**.histogram_hb')) +[ + [ + { + "start": "\"а", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"б", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"в", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"г", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"д", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"е", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"ж", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"з", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"и", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"й", + "end": "\"й", + "size": 0.1, + "ndv": 1 + } + ] +] +select hex(col1) from t1; +hex(col1) +22D0B9 +22D0B8 +22D0B7 +22D0B6 +22D0B5 +22D0B4 +22D0B3 +22D0B2 +22D0B1 +22D0B0 +explain extended select * from t1 where col1 < '"а'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where `test`.`t1`.`col1` < '"а' +explain extended select * from t1 where col1 < '"в'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 20.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where `test`.`t1`.`col1` < '"в' +explain extended select * from t1 where col1 < '"д'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 40.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where `test`.`t1`.`col1` < '"д' +explain extended select * from t1 where col1 < '"ж'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 60.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where `test`.`t1`.`col1` < '"ж' +explain extended select * from t1 where col1 < '"й'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 90.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where `test`.`t1`.`col1` < '"й' +drop table t1; +select JSON_UNQUOTE(CONVERT('"ФФ"' using cp1251)); +JSON_UNQUOTE(CONVERT('"ФФ"' using cp1251)) +ФФ +# +# MDEV-36977 Histogram code lacks coverage for non-latin characters +# +create table t1 ( +col1 varchar(10) charset utf8 collate utf8mb3_general_ci +); +set names utf8; +select hex('Ꙃ'), collation('Ꙃ'); +hex('Ꙃ') collation('Ꙃ') +EA9982 utf8mb3_uca1400_ai_ci +insert into t1 values +('Ꙩ'),('Ꙛ'),('ꙮ'),('Ꙃ'),('Ꚛ'),('ꘐ'),('ꘜ'),('ꕫ'),('ꖿ'), ('ꙛ'); +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +select col1, hex(col1) from t1 order by col1; +col1 hex(col1) +ꕫ EA95AB +ꖿ EA96BF +ꘐ EA9890 +ꘜ EA989C +Ꙃ EA9982 +Ꙛ EA999A +ꙛ EA999B +Ꙩ EA99A8 +ꙮ EA99AE +Ꚛ EA9A9A +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +json_detailed(json_extract(histogram, '$**.histogram_hb')) +[ + [ + { + "start": "ꕫ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "ꖿ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "ꘐ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "ꘜ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "Ꙃ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "Ꙛ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "ꙛ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "Ꙩ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "ꙮ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "Ꚛ", + "end": "Ꚛ", + "size": 0.1, + "ndv": 1 + } + ] +] +analyze select * from t1 where col1 < 'ꕫ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 10.00 0.00 Using where +analyze select * from t1 where col1 < 'ꖿ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 10.00 10.00 Using where +analyze select * from t1 where col1 < 'Ꙃ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 40.00 40.00 Using where +analyze select * from t1 where col1 < 'ꙛ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 60.00 60.00 Using where +analyze select * from t1 where col1 < 'Ꚛ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 90.00 90.00 Using where +delete from t1; +insert into t1 values +('"Ꙩ'),('"Ꙛ'),('"ꙮ'),('"Ꙃ'),('"Ꚛ'),('"ꘐ'),('"ꘜ'),('"ꕫ'),('"ꖿ'), ('"ꙛ'); +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +json_detailed(json_extract(histogram, '$**.histogram_hb')) +[ + [ + { + "start": "\"ꕫ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"ꖿ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"ꘐ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"ꘜ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"Ꙃ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"Ꙛ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"ꙛ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"Ꙩ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"ꙮ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"Ꚛ", + "end": "\"Ꚛ", + "size": 0.1, + "ndv": 1 + } + ] +] +select col1, hex(col1) from t1 order by col1; +col1 hex(col1) +"ꕫ 22EA95AB +"ꖿ 22EA96BF +"ꘐ 22EA9890 +"ꘜ 22EA989C +"Ꙃ 22EA9982 +"Ꙛ 22EA999A +"ꙛ 22EA999B +"Ꙩ 22EA99A8 +"ꙮ 22EA99AE +"Ꚛ 22EA9A9A +analyze select * from t1 where col1 < '"ꕫ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 10.00 0.00 Using where +analyze select * from t1 where col1 < '"ꖿ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 10.00 10.00 Using where +analyze select * from t1 where col1 < '"Ꙃ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 40.00 40.00 Using where +analyze select * from t1 where col1 < '"ꙛ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 60.00 60.00 Using where +analyze select * from t1 where col1 < '"Ꚛ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 90.00 90.00 Using where +drop table t1; +create table t1 ( +col1 varchar(10) charset utf32 collate utf32_uca1400_ai_ci +); +show variables like "histogram_size"; +Variable_name Value +histogram_size 254 +SET NAMES utf8mb4; +select hex('🌀'), collation('🌀'); +hex('?') collation('?') +F09F8C80 utf8mb4_uca1400_ai_ci +insert into t1 values +('𝄞'),('🌀'),('😎'),('😀'),('🂡'),('🌚'), ('🀄'),('𝄢'), ('😺'), ('🧸'); +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +select col1, hex(col1) from t1 order by col1; +col1 hex(col1) +𝄞 0001D11E +𝄢 0001D122 +🀄 0001F004 +🂡 0001F0A1 +🌀 0001F300 +🌚 0001F31A +🧸 0001F9F8 +😀 0001F600 +😎 0001F60E +😺 0001F63A +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +json_detailed(json_extract(histogram, '$**.histogram_hb')) +[ + [ + { + "start": "𝄞", + "size": 0.1, + "ndv": 1 + }, + { + "start": "𝄢", + "size": 0.1, + "ndv": 1 + }, + { + "start": "🀄", + "size": 0.1, + "ndv": 1 + }, + { + "start": "🂡", + "size": 0.1, + "ndv": 1 + }, + { + "start": "🌀", + "size": 0.1, + "ndv": 1 + }, + { + "start": "🌚", + "size": 0.1, + "ndv": 1 + }, + { + "start": "🧸", + "size": 0.1, + "ndv": 1 + }, + { + "start": "😀", + "size": 0.1, + "ndv": 1 + }, + { + "start": "😎", + "size": 0.1, + "ndv": 1 + }, + { + "start": "😺", + "end": "😺", + "size": 0.1, + "ndv": 1 + } + ] +] +analyze select * from t1 where col1 < '𝄞'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 10.00 0.00 Using where +analyze select * from t1 where col1 < '𝄢'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 10.00 10.00 Using where +analyze select * from t1 where col1 < '🂡'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 30.00 30.00 Using where +analyze select * from t1 where col1 < '🌚'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 50.00 50.00 Using where +analyze select * from t1 where col1 < '😺'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 90.00 90.00 Using where +delete from t1; +insert into t1 values +('"𝄞'),('"🌀'),('"😎'),('"😀'),('"🂡'),('"🌚'), ('"🀄'),('"𝄢'), ('"😺'), ('"🧸'); +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +json_detailed(json_extract(histogram, '$**.histogram_hb')) +[ + [ + { + "start": "\"𝄞", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"𝄢", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"🀄", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"🂡", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"🌀", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"🌚", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"🧸", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"😀", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"😎", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"😺", + "end": "\"😺", + "size": 0.1, + "ndv": 1 + } + ] +] +select col1, hex(col1) from t1 order by col1; +col1 hex(col1) +"𝄞 000000220001D11E +"𝄢 000000220001D122 +"🀄 000000220001F004 +"🂡 000000220001F0A1 +"🌀 000000220001F300 +"🌚 000000220001F31A +"🧸 000000220001F9F8 +"😀 000000220001F600 +"😎 000000220001F60E +"😺 000000220001F63A +analyze select * from t1 where col1 < '"𝄞'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 10.00 0.00 Using where +analyze select * from t1 where col1 < '"𝄢'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 10.00 10.00 Using where +analyze select * from t1 where col1 < '"🂡'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 30.00 30.00 Using where +analyze select * from t1 where col1 < '"🌚'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 50.00 50.00 Using where +analyze select * from t1 where col1 < '"😺'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 90.00 90.00 Using where +drop table t1; +create table t1 ( +col1 varchar(10) charset utf32 collate utf32_general_ci +); +show variables like "histogram_size"; +Variable_name Value +histogram_size 254 +SET NAMES utf8mb4; +insert into t1 values +('𝄞'),('🌀'),('б'),('😀'),('🂡'),('🀄'), ('ꕫ'),('Ꙃ'), ('😺'), ('d'); +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +select col1, hex(col1) from t1 order by col1; +col1 hex(col1) +d 00000064 +б 00000431 +ꕫ 0000A56B +Ꙃ 0000A642 +😺 0001F63A +🀄 0001F004 +🂡 0001F0A1 +😀 0001F600 +🌀 0001F300 +𝄞 0001D11E +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +json_detailed(json_extract(histogram, '$**.histogram_hb')) +[ + [ + { + "start": "d", + "size": 0.1, + "ndv": 1 + }, + { + "start": "б", + "size": 0.1, + "ndv": 1 + }, + { + "start": "ꕫ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "Ꙃ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "𝄞", + "end": "𝄞", + "size": 0.6, + "ndv": 1 + } + ] +] +analyze select * from t1 where col1 < 'd'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 10.00 0.00 Using where +analyze select * from t1 where col1 < 'ꕫ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 20.00 20.00 Using where +analyze select * from t1 where col1 < '𝄞'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 40.00 40.00 Using where +analyze select * from t1 where col1 < '🂡'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 40.00 40.00 Using where +analyze select * from t1 where col1 < '😺'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 40.00 40.00 Using where +delete from t1; +insert into t1 values +('"𝄞'),('"🌀'),('"б'),('"😀'),('"🂡'),('"🌚'), ('"ꕫ'),('"Ꙃ'), ('"😺'), ('"d'); +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +json_detailed(json_extract(histogram, '$**.histogram_hb')) +[ + [ + { + "start": "\"d", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"б", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"ꕫ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"Ꙃ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"😺", + "end": "\"😺", + "size": 0.6, + "ndv": 1 + } + ] +] +select col1, hex(col1) from t1 order by col1; +col1 hex(col1) +"d 0000002200000064 +"б 0000002200000431 +"ꕫ 000000220000A56B +"Ꙃ 000000220000A642 +"🌀 000000220001F300 +"😀 000000220001F600 +"🂡 000000220001F0A1 +"🌚 000000220001F31A +"😺 000000220001F63A +"𝄞 000000220001D11E +analyze select * from t1 where col1 < '"d'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 10.00 0.00 Using where +analyze select * from t1 where col1 < '"ꕫ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 20.00 20.00 Using where +analyze select * from t1 where col1 < '"𝄞'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 40.00 40.00 Using where +analyze select * from t1 where col1 < '"🂡'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 40.00 40.00 Using where +analyze select * from t1 where col1 < '"😺'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 40.00 40.00 Using where +drop table t1; +create table t1 ( +col1 varchar(10) charset utf32 COLLATE utf32_uca1400_ai_ci +); +show variables like "histogram_size"; +Variable_name Value +histogram_size 254 +SET NAMES utf8mb4; +insert into t1 values +('𝄞'),('🌀'),('б'),('😀'),('🂡'),('🀄'), ('ꕫ'),('Ꙃ'), ('😺'), ('d'); +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +select col1, hex(col1) from t1 order by col1; +col1 hex(col1) +𝄞 0001D11E +🀄 0001F004 +🂡 0001F0A1 +🌀 0001F300 +😀 0001F600 +😺 0001F63A +d 00000064 +б 00000431 +Ꙃ 0000A642 +ꕫ 0000A56B +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +json_detailed(json_extract(histogram, '$**.histogram_hb')) +[ + [ + { + "start": "𝄞", + "size": 0.1, + "ndv": 1 + }, + { + "start": "🀄", + "size": 0.1, + "ndv": 1 + }, + { + "start": "🂡", + "size": 0.1, + "ndv": 1 + }, + { + "start": "🌀", + "size": 0.1, + "ndv": 1 + }, + { + "start": "😀", + "size": 0.1, + "ndv": 1 + }, + { + "start": "😺", + "size": 0.1, + "ndv": 1 + }, + { + "start": "d", + "size": 0.1, + "ndv": 1 + }, + { + "start": "б", + "size": 0.1, + "ndv": 1 + }, + { + "start": "Ꙃ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "ꕫ", + "end": "ꕫ", + "size": 0.1, + "ndv": 1 + } + ] +] +analyze select * from t1 where col1 < 'd'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 60.00 60.00 Using where +analyze select * from t1 where col1 < 'ꕫ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 90.00 90.00 Using where +analyze select * from t1 where col1 < '𝄞'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 10.00 0.00 Using where +analyze select * from t1 where col1 < '🂡'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 20.00 20.00 Using where +analyze select * from t1 where col1 < '😺'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 50.00 50.00 Using where +delete from t1; +insert into t1 values +('"𝄞'),('"🌀'),('"б'),('"😀'),('"🂡'),('"🌚'), ('"ꕫ'),('"Ꙃ'), ('"😺'), ('"d'); +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +json_detailed(json_extract(histogram, '$**.histogram_hb')) +[ + [ + { + "start": "\"𝄞", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"🂡", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"🌀", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"🌚", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"😀", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"😺", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"d", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"б", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"Ꙃ", + "size": 0.1, + "ndv": 1 + }, + { + "start": "\"ꕫ", + "end": "\"ꕫ", + "size": 0.1, + "ndv": 1 + } + ] +] +select col1, hex(col1) from t1 order by col1; +col1 hex(col1) +"𝄞 000000220001D11E +"🂡 000000220001F0A1 +"🌀 000000220001F300 +"🌚 000000220001F31A +"😀 000000220001F600 +"😺 000000220001F63A +"d 0000002200000064 +"б 0000002200000431 +"Ꙃ 000000220000A642 +"ꕫ 000000220000A56B +analyze select * from t1 where col1 < '"d'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 60.00 60.00 Using where +analyze select * from t1 where col1 < '"ꕫ'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 90.00 90.00 Using where +analyze select * from t1 where col1 < '"𝄞'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 10.00 0.00 Using where +analyze select * from t1 where col1 < '"🂡'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 10.00 10.00 Using where +analyze select * from t1 where col1 < '"😺'; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 50.00 50.00 Using where +drop table t1; +# End of 10.11 tests ALTER DATABASE test CHARACTER SET utf8mb4 COLLATE utf8mb4_uca1400_ai_ci; diff --git a/mysql-test/main/statistics_json.test b/mysql-test/main/statistics_json.test index edc4cb1f5a4..cdd27f4393d 100644 --- a/mysql-test/main/statistics_json.test +++ b/mysql-test/main/statistics_json.test @@ -484,4 +484,209 @@ explain format=json select * from t1 where a > 'y'; drop table t1; +--echo # +--echo # MDEV-36765 JSON Histogram cannot handle >1 byte characters +--echo # + +CREATE TABLE t1 (f varchar(50)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; +INSERT INTO t1 VALUES (UNHEX('E983A8E996800AE983A8E99680')); +SET STATEMENT histogram_type=JSON_HB FOR ANALYZE TABLE t1 PERSISTENT FOR ALL; +SELECT min_value, max_value, hist_type +FROM mysql.column_stats WHERE db_name = 'test' AND table_name = 't1'; + +DROP TABLE t1; + +create table t1 ( + col1 varchar(10) charset utf8 +); +set names utf8; +select hex('б'), collation('б'); +insert into t1 values +('а'),('б'),('в'),('г'),('д'),('е'),('ж'),('з'),('и'),('й'); + +analyze table t1 persistent for all; +select hex(col1) from t1; +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; + +explain extended select * from t1 where col1 < 'а'; +explain extended select * from t1 where col1 < 'в'; +explain extended select * from t1 where col1 < 'д'; +explain extended select * from t1 where col1 < 'ж'; +explain extended select * from t1 where col1 < 'й'; + +delete from t1; +insert into t1 values +('"а'),('"б'),('"в'),('"г'),('"д'),('"е'),('"ж'),('"з'),('"и'),('"й'); + +analyze table t1 persistent for all; +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +select hex(col1) from t1; +explain extended select * from t1 where col1 < '"а'; +explain extended select * from t1 where col1 < '"в'; +explain extended select * from t1 where col1 < '"д'; +explain extended select * from t1 where col1 < '"ж'; +explain extended select * from t1 where col1 < '"й'; + +drop table t1; + +# JSON_UNQUOTE was touched by this patch also +select JSON_UNQUOTE(CONVERT('"ФФ"' using cp1251)); + + +--echo # +--echo # MDEV-36977 Histogram code lacks coverage for non-latin characters +--echo # + +create table t1 ( + col1 varchar(10) charset utf8 collate utf8mb3_general_ci +); +set names utf8; +select hex('Ꙃ'), collation('Ꙃ'); +insert into t1 values +('Ꙩ'),('Ꙛ'),('ꙮ'),('Ꙃ'),('Ꚛ'),('ꘐ'),('ꘜ'),('ꕫ'),('ꖿ'), ('ꙛ'); + +analyze table t1 persistent for all; +select col1, hex(col1) from t1 order by col1; +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; + +analyze select * from t1 where col1 < 'ꕫ'; +analyze select * from t1 where col1 < 'ꖿ'; +analyze select * from t1 where col1 < 'Ꙃ'; +analyze select * from t1 where col1 < 'ꙛ'; +analyze select * from t1 where col1 < 'Ꚛ'; + +delete from t1; +insert into t1 values +('"Ꙩ'),('"Ꙛ'),('"ꙮ'),('"Ꙃ'),('"Ꚛ'),('"ꘐ'),('"ꘜ'),('"ꕫ'),('"ꖿ'), ('"ꙛ'); + +analyze table t1 persistent for all; +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +select col1, hex(col1) from t1 order by col1; +analyze select * from t1 where col1 < '"ꕫ'; +analyze select * from t1 where col1 < '"ꖿ'; +analyze select * from t1 where col1 < '"Ꙃ'; +analyze select * from t1 where col1 < '"ꙛ'; +analyze select * from t1 where col1 < '"Ꚛ'; + +drop table t1; + +create table t1 ( + col1 varchar(10) charset utf32 collate utf32_uca1400_ai_ci +); +show variables like "histogram_size"; +SET NAMES utf8mb4; +--disable_service_connection +select hex('🌀'), collation('🌀'); +--enable_service_connection +insert into t1 values +('𝄞'),('🌀'),('😎'),('😀'),('🂡'),('🌚'), ('🀄'),('𝄢'), ('😺'), ('🧸'); + + +analyze table t1 persistent for all; +select col1, hex(col1) from t1 order by col1; +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; + +analyze select * from t1 where col1 < '𝄞'; +analyze select * from t1 where col1 < '𝄢'; +analyze select * from t1 where col1 < '🂡'; +analyze select * from t1 where col1 < '🌚'; +analyze select * from t1 where col1 < '😺'; + +delete from t1; +insert into t1 values +('"𝄞'),('"🌀'),('"😎'),('"😀'),('"🂡'),('"🌚'), ('"🀄'),('"𝄢'), ('"😺'), ('"🧸'); + +analyze table t1 persistent for all; +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +select col1, hex(col1) from t1 order by col1; +analyze select * from t1 where col1 < '"𝄞'; +analyze select * from t1 where col1 < '"𝄢'; +analyze select * from t1 where col1 < '"🂡'; +analyze select * from t1 where col1 < '"🌚'; +analyze select * from t1 where col1 < '"😺'; + +drop table t1; + +# Different behavior for collation utf32_uca1400_ai_ci and utf32_general_ci + +create table t1 ( + col1 varchar(10) charset utf32 collate utf32_general_ci +); +show variables like "histogram_size"; +SET NAMES utf8mb4; +insert into t1 values +('𝄞'),('🌀'),('б'),('😀'),('🂡'),('🀄'), ('ꕫ'),('Ꙃ'), ('😺'), ('d'); + + +analyze table t1 persistent for all; +select col1, hex(col1) from t1 order by col1; +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; + +analyze select * from t1 where col1 < 'd'; +analyze select * from t1 where col1 < 'ꕫ'; +analyze select * from t1 where col1 < '𝄞'; +analyze select * from t1 where col1 < '🂡'; +analyze select * from t1 where col1 < '😺'; + +delete from t1; +insert into t1 values +('"𝄞'),('"🌀'),('"б'),('"😀'),('"🂡'),('"🌚'), ('"ꕫ'),('"Ꙃ'), ('"😺'), ('"d'); + +analyze table t1 persistent for all; +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +select col1, hex(col1) from t1 order by col1; +analyze select * from t1 where col1 < '"d'; +analyze select * from t1 where col1 < '"ꕫ'; +analyze select * from t1 where col1 < '"𝄞'; +analyze select * from t1 where col1 < '"🂡'; +analyze select * from t1 where col1 < '"😺'; + +drop table t1; + +create table t1 ( + col1 varchar(10) charset utf32 COLLATE utf32_uca1400_ai_ci +); +show variables like "histogram_size"; +SET NAMES utf8mb4; +insert into t1 values +('𝄞'),('🌀'),('б'),('😀'),('🂡'),('🀄'), ('ꕫ'),('Ꙃ'), ('😺'), ('d'); + + +analyze table t1 persistent for all; +select col1, hex(col1) from t1 order by col1; +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; + +analyze select * from t1 where col1 < 'd'; +analyze select * from t1 where col1 < 'ꕫ'; +analyze select * from t1 where col1 < '𝄞'; +analyze select * from t1 where col1 < '🂡'; +analyze select * from t1 where col1 < '😺'; + +delete from t1; +insert into t1 values +('"𝄞'),('"🌀'),('"б'),('"😀'),('"🂡'),('"🌚'), ('"ꕫ'),('"Ꙃ'), ('"😺'), ('"d'); + +analyze table t1 persistent for all; +select json_detailed(json_extract(histogram, '$**.histogram_hb')) +from mysql.column_stats where db_name=database() and table_name='t1'; +select col1, hex(col1) from t1 order by col1; +analyze select * from t1 where col1 < '"d'; +analyze select * from t1 where col1 < '"ꕫ'; +analyze select * from t1 where col1 < '"𝄞'; +analyze select * from t1 where col1 < '"🂡'; +analyze select * from t1 where col1 < '"😺'; + +drop table t1; + +--echo # End of 10.11 tests + --source include/test_db_charset_restore.inc diff --git a/mysql-test/main/statistics_upgrade_not_done.test b/mysql-test/main/statistics_upgrade_not_done.test index d38a387bd27..4bdae3b5145 100644 --- a/mysql-test/main/statistics_upgrade_not_done.test +++ b/mysql-test/main/statistics_upgrade_not_done.test @@ -1,4 +1,6 @@ --source include/not_embedded.inc +# MDEV-37169 - msan unknown failure +--source include/not_msan.inc --source include/mysql_upgrade_preparation.inc --source include/have_innodb.inc diff --git a/mysql-test/main/subselect.result b/mysql-test/main/subselect.result index 17d4a4fb80d..f79d0f90cb5 100644 --- a/mysql-test/main/subselect.result +++ b/mysql-test/main/subselect.result @@ -7601,10 +7601,8 @@ Warning 1292 Truncated incorrect DECIMAL value: 'e' Warning 1292 Truncated incorrect DECIMAL value: 'e' drop view v1; drop table t1, t2, t3; -# # End of 10.6 tests # -# # MDEV-32656: ASAN errors in base_list_iterator::next / # setup_table_map upon 2nd execution of PS # (10.10 part) @@ -7643,6 +7641,4 @@ Warning 1292 Truncated incorrect DECIMAL value: 'e' Warning 1292 Truncated incorrect DECIMAL value: 'e' DROP VIEW v1; DROP TABLE t1, t2, t3; -# # End of 10.10 tests -# diff --git a/mysql-test/main/subselect.test b/mysql-test/main/subselect.test index 53db5dd70df..9f4ebf9bcbc 100644 --- a/mysql-test/main/subselect.test +++ b/mysql-test/main/subselect.test @@ -6466,9 +6466,7 @@ insert into t2 select (('e','e') IN (SELECT v1.id, v1.id FROM v1 JOIN t3)); drop view v1; drop table t1, t2, t3; ---echo # --echo # End of 10.6 tests ---echo # --echo # --echo # MDEV-32656: ASAN errors in base_list_iterator::next / @@ -6502,6 +6500,4 @@ EXECUTE stmt; DROP VIEW v1; DROP TABLE t1, t2, t3; ---echo # --echo # End of 10.10 tests ---echo # diff --git a/mysql-test/main/subselect4.result b/mysql-test/main/subselect4.result index 30474ab31ad..1733758420a 100644 --- a/mysql-test/main/subselect4.result +++ b/mysql-test/main/subselect4.result @@ -3367,3 +3367,11 @@ Variable_name Value Handler_read_rnd_next 0 drop table t1,t2,t3; # End of 10.4 tests +# +# MDEV-29300 Assertion `*ref && (*ref)->fixed()' failed in Item_field::fix_outer_field on SELECT +# +CREATE TABLE t(c INT); +SELECT (SELECT 0 GROUP BY c HAVING (SELECT c)) FROM t GROUP BY c; +(SELECT 0 GROUP BY c HAVING (SELECT c)) +DROP TABLE t; +# End of 10.11 tests diff --git a/mysql-test/main/subselect4.test b/mysql-test/main/subselect4.test index 85790fa5110..f8c08cc017b 100644 --- a/mysql-test/main/subselect4.test +++ b/mysql-test/main/subselect4.test @@ -2664,3 +2664,12 @@ drop table t1,t2,t3; --echo # End of 10.4 tests +--echo # +--echo # MDEV-29300 Assertion `*ref && (*ref)->fixed()' failed in Item_field::fix_outer_field on SELECT +--echo # + +CREATE TABLE t(c INT); +SELECT (SELECT 0 GROUP BY c HAVING (SELECT c)) FROM t GROUP BY c; +DROP TABLE t; + +--echo # End of 10.11 tests diff --git a/mysql-test/main/subselect_no_exists_to_in.result b/mysql-test/main/subselect_no_exists_to_in.result index c09cd240798..606193c748a 100644 --- a/mysql-test/main/subselect_no_exists_to_in.result +++ b/mysql-test/main/subselect_no_exists_to_in.result @@ -7603,10 +7603,8 @@ Warning 1292 Truncated incorrect DECIMAL value: 'e' Warning 1292 Truncated incorrect DECIMAL value: 'e' drop view v1; drop table t1, t2, t3; -# # End of 10.6 tests # -# # MDEV-32656: ASAN errors in base_list_iterator::next / # setup_table_map upon 2nd execution of PS # (10.10 part) @@ -7645,9 +7643,7 @@ Warning 1292 Truncated incorrect DECIMAL value: 'e' Warning 1292 Truncated incorrect DECIMAL value: 'e' DROP VIEW v1; DROP TABLE t1, t2, t3; -# # End of 10.10 tests -# set optimizer_switch=default; select @@optimizer_switch like '%exists_to_in=off%'; @@optimizer_switch like '%exists_to_in=off%' diff --git a/mysql-test/main/subselect_no_mat.result b/mysql-test/main/subselect_no_mat.result index d2d6103d5ea..642e4682a55 100644 --- a/mysql-test/main/subselect_no_mat.result +++ b/mysql-test/main/subselect_no_mat.result @@ -7598,10 +7598,8 @@ Warning 1292 Truncated incorrect DECIMAL value: 'e' Warning 1292 Truncated incorrect DECIMAL value: 'e' drop view v1; drop table t1, t2, t3; -# # End of 10.6 tests # -# # MDEV-32656: ASAN errors in base_list_iterator::next / # setup_table_map upon 2nd execution of PS # (10.10 part) @@ -7640,9 +7638,7 @@ Warning 1292 Truncated incorrect DECIMAL value: 'e' Warning 1292 Truncated incorrect DECIMAL value: 'e' DROP VIEW v1; DROP TABLE t1, t2, t3; -# # End of 10.10 tests -# set optimizer_switch=default; select @@optimizer_switch like '%materialization=on%'; @@optimizer_switch like '%materialization=on%' diff --git a/mysql-test/main/subselect_no_opts.result b/mysql-test/main/subselect_no_opts.result index 4a0a8e6b1d1..147aa49b4d6 100644 --- a/mysql-test/main/subselect_no_opts.result +++ b/mysql-test/main/subselect_no_opts.result @@ -7596,10 +7596,8 @@ Warning 1292 Truncated incorrect DECIMAL value: 'e' Warning 1292 Truncated incorrect DECIMAL value: 'e' drop view v1; drop table t1, t2, t3; -# # End of 10.6 tests # -# # MDEV-32656: ASAN errors in base_list_iterator::next / # setup_table_map upon 2nd execution of PS # (10.10 part) @@ -7638,7 +7636,5 @@ Warning 1292 Truncated incorrect DECIMAL value: 'e' Warning 1292 Truncated incorrect DECIMAL value: 'e' DROP VIEW v1; DROP TABLE t1, t2, t3; -# # End of 10.10 tests -# set @optimizer_switch_for_subselect_test=null; diff --git a/mysql-test/main/subselect_no_scache.result b/mysql-test/main/subselect_no_scache.result index 41687dd2f99..0334601bb3a 100644 --- a/mysql-test/main/subselect_no_scache.result +++ b/mysql-test/main/subselect_no_scache.result @@ -7607,10 +7607,8 @@ Warning 1292 Truncated incorrect DECIMAL value: 'e' Warning 1292 Truncated incorrect DECIMAL value: 'e' drop view v1; drop table t1, t2, t3; -# # End of 10.6 tests # -# # MDEV-32656: ASAN errors in base_list_iterator::next / # setup_table_map upon 2nd execution of PS # (10.10 part) @@ -7649,9 +7647,7 @@ Warning 1292 Truncated incorrect DECIMAL value: 'e' Warning 1292 Truncated incorrect DECIMAL value: 'e' DROP VIEW v1; DROP TABLE t1, t2, t3; -# # End of 10.10 tests -# set optimizer_switch=default; select @@optimizer_switch like '%subquery_cache=on%'; @@optimizer_switch like '%subquery_cache=on%' diff --git a/mysql-test/main/subselect_no_semijoin.result b/mysql-test/main/subselect_no_semijoin.result index 9117d88d743..0e11fa36045 100644 --- a/mysql-test/main/subselect_no_semijoin.result +++ b/mysql-test/main/subselect_no_semijoin.result @@ -7596,10 +7596,8 @@ Warning 1292 Truncated incorrect DECIMAL value: 'e' Warning 1292 Truncated incorrect DECIMAL value: 'e' drop view v1; drop table t1, t2, t3; -# # End of 10.6 tests # -# # MDEV-32656: ASAN errors in base_list_iterator::next / # setup_table_map upon 2nd execution of PS # (10.10 part) @@ -7638,10 +7636,8 @@ Warning 1292 Truncated incorrect DECIMAL value: 'e' Warning 1292 Truncated incorrect DECIMAL value: 'e' DROP VIEW v1; DROP TABLE t1, t2, t3; -# # End of 10.10 tests # -# # MDEV-19714: JOIN::pseudo_bits_cond is not visible in EXPLAIN FORMAT=JSON # CREATE TABLE t1 ( a INT ); diff --git a/mysql-test/main/table_elim.result b/mysql-test/main/table_elim.result index b095dc44cb1..5c78d296b6b 100644 --- a/mysql-test/main/table_elim.result +++ b/mysql-test/main/table_elim.result @@ -362,6 +362,7 @@ id select_type table type possible_keys key key_len ref rows Extra explain select t1.a from t1 left join t3 on t3.pk1=t1.a and t3.pk2 IS NULL; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 4 +1 SIMPLE t3 ref pk1 pk1 10 test.t1.a,const 1 Using where; Using index drop table t1,t2,t3; # # Multi-equality tests @@ -1055,3 +1056,59 @@ DROP VIEW v; # # End of 10.10 tests # +# +# MDEV-36215: Table elimination wrongly done +# +CREATE TABLE t1(null_col INT, notnull_col INT NOT NULL); +CREATE TABLE t2(unique_col INT, UNIQUE(unique_col)); +INSERT INTO t1 VALUES (1,100), (NULL, 101); +INSERT INTO t2 VALUES (NULL), (NULL); +# Test with 'unique_col IS NULL' +# Here, table t2 should not be eliminated: +explain +select t1.null_col from t1 left join t2 on (t2.unique_col is null); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 +1 SIMPLE t2 ref unique_col unique_col 5 const 2 Using where; Using index +select t1.null_col from t1 left join t2 on (t2.unique_col is null); +null_col +1 +1 +NULL +NULL +# Check without table elimination: +set statement optimizer_switch='table_elimination=off' for +select t1.null_col from t1 left join t2 on (t2.unique_col is null); +null_col +1 +1 +NULL +NULL +# Test with 'unique_col <=> nullable_col' +# t2 must not be eliminated: +explain +select t1.null_col from t1 left join t2 on t1.null_col<=>t2.unique_col; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 +1 SIMPLE t2 ref unique_col unique_col 5 test.t1.null_col 1 Using where; Using index +select t1.null_col from t1 left join t2 on t1.null_col<=>t2.unique_col; +null_col +1 +NULL +NULL +# Check without table elimination: +set statement optimizer_switch='table_elimination=off' for +select t1.null_col from t1 left join t2 on t1.null_col<=>t2.unique_col; +null_col +1 +NULL +NULL +# Table t2 will be eliminated: +explain +select t1.null_col from t1 left join t2 on (t2.unique_col<=>t1.notnull_col); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 +DROP TABLE t1, t2; +# +# End of 10.11 tests +# diff --git a/mysql-test/main/table_elim.test b/mysql-test/main/table_elim.test index de0cd413088..4158d2ca5ac 100644 --- a/mysql-test/main/table_elim.test +++ b/mysql-test/main/table_elim.test @@ -803,3 +803,44 @@ DROP VIEW v; --echo # --echo # End of 10.10 tests --echo # + +--echo # +--echo # MDEV-36215: Table elimination wrongly done +--echo # + +CREATE TABLE t1(null_col INT, notnull_col INT NOT NULL); +CREATE TABLE t2(unique_col INT, UNIQUE(unique_col)); +INSERT INTO t1 VALUES (1,100), (NULL, 101); +INSERT INTO t2 VALUES (NULL), (NULL); + + +--echo # Test with 'unique_col IS NULL' +--echo # Here, table t2 should not be eliminated: +explain +select t1.null_col from t1 left join t2 on (t2.unique_col is null); +select t1.null_col from t1 left join t2 on (t2.unique_col is null); + +--echo # Check without table elimination: +set statement optimizer_switch='table_elimination=off' for +select t1.null_col from t1 left join t2 on (t2.unique_col is null); + +--echo # Test with 'unique_col <=> nullable_col' +--echo # t2 must not be eliminated: +explain +select t1.null_col from t1 left join t2 on t1.null_col<=>t2.unique_col; +select t1.null_col from t1 left join t2 on t1.null_col<=>t2.unique_col; + +--echo # Check without table elimination: +set statement optimizer_switch='table_elimination=off' for +select t1.null_col from t1 left join t2 on t1.null_col<=>t2.unique_col; + +--echo # Table t2 will be eliminated: +explain +select t1.null_col from t1 left join t2 on (t2.unique_col<=>t1.notnull_col); + + +DROP TABLE t1, t2; + +--echo # +--echo # End of 10.11 tests +--echo # diff --git a/mysql-test/main/type_bit.result b/mysql-test/main/type_bit.result index ab657ad05b8..206418c6d7c 100644 --- a/mysql-test/main/type_bit.result +++ b/mysql-test/main/type_bit.result @@ -825,12 +825,12 @@ c SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `c` decimal(1,0) DEFAULT NULL + `c` decimal(1,0) NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t2; SELECT COALESCE(val, 1) FROM t1; Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def COALESCE(val, 1) 246 2 1 Y 32896 0 63 +def COALESCE(val, 1) 246 2 1 N 32897 0 63 COALESCE(val, 1) 0 DROP TABLE t1; diff --git a/mysql-test/main/update_innodb.result b/mysql-test/main/update_innodb.result index e6ef6b26a50..0a0e74fda56 100644 --- a/mysql-test/main/update_innodb.result +++ b/mysql-test/main/update_innodb.result @@ -251,3 +251,23 @@ update orders, customer, nation set orders.o_comment = "+++" where o_orderDATE o_custkey = c_custkey and c_nationkey = n_nationkey and n_name='PERU'; DROP DATABASE dbt3_s001; set default_storage_engine=@save_default_storage_engine; +use test; +# +# MDEV-37281 incorrect isolation level in update with unique using hash or without overlap +# +create table t1 (id int, e varchar(100), a int, unique (e) using hash) engine=innodb; +insert t1 values(10, '2000-01-01', 0); +insert t1 values(20, '2000-01-02', 1); +insert t1 values(30, '2000-01-03', 2); +set session innodb_snapshot_isolation=0; +set transaction isolation level read committed; +start transaction; +update t1 set a=10 where a=0; +connect con1,localhost,root; +set session innodb_snapshot_isolation=0; +set transaction isolation level read committed; +update t1 set a=20 where a=1; +connection default; +drop table t1; +disconnect con1; +# End of 11.4.tests diff --git a/mysql-test/main/update_innodb.test b/mysql-test/main/update_innodb.test index ad728fb8e28..4ae6b6b69da 100644 --- a/mysql-test/main/update_innodb.test +++ b/mysql-test/main/update_innodb.test @@ -193,7 +193,6 @@ let $c1= c_nationkey = n_nationkey and n_name='PERU'; - explain update orders, customer, nation set orders.o_comment = "+++" where o_orderDATE between '1992-01-01' and '1992-06-30' and o_custkey = c_custkey and c_nationkey = n_nationkey and n_name='PERU'; @@ -208,4 +207,25 @@ update orders, customer, nation set orders.o_comment = "+++" where o_orderDATE DROP DATABASE dbt3_s001; set default_storage_engine=@save_default_storage_engine; +use test; +--echo # +--echo # MDEV-37281 incorrect isolation level in update with unique using hash or without overlap +--echo # +create table t1 (id int, e varchar(100), a int, unique (e) using hash) engine=innodb; +insert t1 values(10, '2000-01-01', 0); +insert t1 values(20, '2000-01-02', 1); +insert t1 values(30, '2000-01-03', 2); +set session innodb_snapshot_isolation=0; +set transaction isolation level read committed; +start transaction; +update t1 set a=10 where a=0; +--connect con1,localhost,root +set session innodb_snapshot_isolation=0; +set transaction isolation level read committed; +update t1 set a=20 where a=1; +--connection default +drop table t1; +--disconnect con1 + +--echo # End of 11.4.tests diff --git a/mysql-test/main/xa.result b/mysql-test/main/xa.result index a692e5ec311..fb87114837c 100644 --- a/mysql-test/main/xa.result +++ b/mysql-test/main/xa.result @@ -1,5 +1,4 @@ call mtr.add_suppression("Deadlock found when trying to get lock; try restarting transaction"); -drop table if exists t1, t2; create table t1 (a int) engine=innodb; xa start 'test1'; insert t1 values (10); @@ -135,11 +134,13 @@ connection con2; update t1 set c = 'aa' where a = 1; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction select count(*) from t1; +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the ROLLBACK ONLY state +xa end 'a','c'; +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the ROLLBACK ONLY state +xa rollback 'a','c'; +select count(*) from t1; count(*) 2 -xa end 'a','c'; -ERROR XA102: XA_RBDEADLOCK: Transaction branch was rolled back: deadlock was detected -xa rollback 'a','c'; disconnect con2; connect con3,localhost,root,,; connection con3; @@ -207,7 +208,7 @@ connection default; UPDATE t1 SET a=5 WHERE a=1; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction XA END 'xid1'; -ERROR XA102: XA_RBDEADLOCK: Transaction branch was rolled back: deadlock was detected +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the ROLLBACK ONLY state XA ROLLBACK 'xid1'; XA START 'xid1'; XA END 'xid1'; @@ -593,6 +594,95 @@ formatID gtrid_length bqual_length data xa rollback '4'; ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back set @@global.read_only=@sav_read_only; -# # End of 10.5 tests # +# MDEV-24981 LOAD INDEX may cause rollback of prepared XA transaction +# +create table t1 (f1 integer primary key) engine=innodb; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) NOT NULL, + PRIMARY KEY (`f1`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci +xa start 'a'; +insert into t1 values (1); +xa end 'a'; +xa prepare 'a'; +load index into cache t1 key(primary); +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the PREPARED state +xa commit 'a'; +select count(*) from t1; +count(*) +1 +drop table t1; +# +# MDEV-37315 Assertion `!xid_state.xid_cache_element' failed in trans_xa_rollback +# +create table t1 (a int) engine=innodb; +create table t2 (b int) engine=innodb; +create table t3 (c int) engine=innodb; +insert t1 values (1); +insert t2 values (2); +connect con1,localhost,root; +xa start 'r:foo'; +insert t3 values (3); +xa end 'r:foo'; +xa prepare 'r:foo'; +disconnect con1; +connection default; +xa recover; +formatID gtrid_length bqual_length data +1 5 0 r:foo +xa start 'r:bar'; +select * from t1 for update; +a +1 +connect con2,localhost,root; +xa start 'r:baz'; +update t2 set b=12; +update t1 set a=13; +connection default; +xa rollback 'r:foo'; +ERROR XAE09: XAER_OUTSIDE: Some work is done outside global transaction +select * from t2 for update; +Got one of the listed errors +xa rollback 'r:foo'; +ERROR XAE09: XAER_OUTSIDE: Some work is done outside global transaction +connection con2; +disconnect con2; +connection default; +xa rollback 'r:bar'; +xa rollback 'r:foo'; +connect con1,localhost,root; +xa start 'c:foo'; +insert t3 values (103); +xa end 'c:foo'; +xa prepare 'c:foo'; +disconnect con1; +connection default; +xa recover; +formatID gtrid_length bqual_length data +1 5 0 c:foo +xa start 'c:bar'; +select * from t1 for update; +a +1 +connect con2,localhost,root; +xa start 'c:baz'; +update t2 set b=102; +update t1 set a=103; +connection default; +xa commit 'c:foo'; +ERROR XAE09: XAER_OUTSIDE: Some work is done outside global transaction +select * from t2 for update; +Got one of the listed errors +xa commit 'c:foo'; +ERROR XAE09: XAER_OUTSIDE: Some work is done outside global transaction +connection con2; +disconnect con2; +connection default; +xa rollback 'c:bar'; +xa commit 'c:foo'; +drop table t1,t2,t3; +# End of 10.11 tests diff --git a/mysql-test/main/xa.test b/mysql-test/main/xa.test index a4c60ef316d..104509b6cc6 100644 --- a/mysql-test/main/xa.test +++ b/mysql-test/main/xa.test @@ -1,7 +1,7 @@ # # WL#1756 # --- source include/have_innodb.inc +--source include/have_innodb.inc --source include/not_embedded.inc # Save the initial number of concurrent sessions @@ -12,10 +12,6 @@ call mtr.add_suppression("Deadlock found when trying to get lock; try restarting call mtr.add_suppression("InnoDB: Transaction was aborted due to "); --enable_query_log - ---disable_warnings -drop table if exists t1, t2; ---enable_warnings create table t1 (a int) engine=innodb; xa start 'test1'; insert t1 values (10); @@ -165,10 +161,12 @@ update t1 set c = 'bb' where a = 2; --sleep 1 --error ER_LOCK_DEADLOCK update t1 set c = 'aa' where a = 1; +--error ER_XAER_RMFAIL select count(*) from t1; ---error ER_XA_RBDEADLOCK +--error ER_XAER_RMFAIL xa end 'a','c'; xa rollback 'a','c'; +select count(*) from t1; --disconnect con2 connect (con3,localhost,root,,); @@ -274,7 +272,7 @@ WHERE ID=$conn_id AND STATE='Searching rows for update'; --error ER_LOCK_DEADLOCK UPDATE t1 SET a=5 WHERE a=1; ---error ER_XA_RBDEADLOCK +--error ER_XAER_RMFAIL XA END 'xid1'; XA ROLLBACK 'xid1'; @@ -750,7 +748,90 @@ xa rollback '4'; set @@global.read_only=@sav_read_only; +--echo # End of 10.5 tests --echo # ---echo # End of 10.5 tests +--echo # MDEV-24981 LOAD INDEX may cause rollback of prepared XA transaction --echo # +create table t1 (f1 integer primary key) engine=innodb; +show create table t1; +xa start 'a'; +insert into t1 values (1); +xa end 'a'; +xa prepare 'a'; +--error ER_XAER_RMFAIL +load index into cache t1 key(primary); +xa commit 'a'; +select count(*) from t1; +drop table t1; + +--echo # +--echo # MDEV-37315 Assertion `!xid_state.xid_cache_element' failed in trans_xa_rollback +--echo # +create table t1 (a int) engine=innodb; +create table t2 (b int) engine=innodb; +create table t3 (c int) engine=innodb; +insert t1 values (1); +insert t2 values (2); + +connect con1,localhost,root; +xa start 'r:foo'; +insert t3 values (3); +xa end 'r:foo'; +xa prepare 'r:foo'; +disconnect con1; +connection default; +xa recover; + +xa start 'r:bar'; +select * from t1 for update; +connect con2,localhost,root; +xa start 'r:baz'; +update t2 set b=12; +send update t1 set a=13; +connection default; +--error ER_XAER_OUTSIDE +xa rollback 'r:foo'; +--error ER_LOCK_DEADLOCK,ER_XAER_RMFAIL +select * from t2 for update; +--error ER_XAER_OUTSIDE +xa rollback 'r:foo'; +connection con2; +reap; +disconnect con2; +connection default; +xa rollback 'r:bar'; +xa rollback 'r:foo'; + +connect con1,localhost,root; +xa start 'c:foo'; +insert t3 values (103); +xa end 'c:foo'; +xa prepare 'c:foo'; +disconnect con1; +connection default; +xa recover; + +xa start 'c:bar'; +select * from t1 for update; +connect con2,localhost,root; +xa start 'c:baz'; +update t2 set b=102; +send update t1 set a=103; +connection default; +--error ER_XAER_OUTSIDE +xa commit 'c:foo'; +--error ER_LOCK_DEADLOCK,ER_XAER_RMFAIL +select * from t2 for update; +--error ER_XAER_OUTSIDE +xa commit 'c:foo'; +connection con2; +reap; +disconnect con2; +connection default; +xa rollback 'c:bar'; +xa commit 'c:foo'; + +drop table t1,t2,t3; + +--echo # End of 10.11 tests diff --git a/mysql-test/mariadb-test-run.pl b/mysql-test/mariadb-test-run.pl index 4e9aa8b60f2..4f98775d87b 100755 --- a/mysql-test/mariadb-test-run.pl +++ b/mysql-test/mariadb-test-run.pl @@ -4559,7 +4559,7 @@ sub extract_warning_lines ($$) { qr|InnoDB: io_setup\(\) attempt|, qr|InnoDB: io_setup\(\) failed with EAGAIN|, qr|io_uring_queue_init\(\) failed with|, - qr|InnoDB: liburing disabled|, + qr|InnoDB: io_uring failed: falling back to libaio|, qr/InnoDB: Failed to set O_DIRECT on file/, qr|setrlimit could not change the size of core files to 'infinity';|, qr|failed to retrieve the MAC address|, diff --git a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_warn_stop_gtid.result b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_warn_stop_gtid.result new file mode 100644 index 00000000000..574715fbbc8 --- /dev/null +++ b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_warn_stop_gtid.result @@ -0,0 +1,217 @@ +# +# Clear the existing binary log state. +# +RESET MASTER; +SET @@SESSION.gtid_domain_id= 0; +SET @@SESSION.server_id= 1; +SET @@SESSION.gtid_seq_no= 1; +create table t1 (a int); +insert into t1 values (1); +SET @@SESSION.gtid_domain_id= 1; +insert into t1 values (2); +flush binary logs; +insert into t1 values (3); +# Tag binlog_f2_mid +insert into t1 values (4); +insert into t1 values (5); +SET @@SESSION.gtid_domain_id= 0; +insert into t1 values (6); +insert into t1 values (7); +flush binary logs; +drop table t1; +# Ensuring binary log order is correct +# +# +# Test using --read-from-remote-server +# +connection default; +# +# --stop-position tests +# +# Case 1.a) With one binlog file, a --stop-position before the end of +# the file should not result in a warning +# MYSQL_BINLOG --read-from-remote-server --stop-position=binlog_f1_pre_rotate binlog_f1_full --result-file=tmp/warn_position_test_file.out 2>&1 +# +# Case 1.b) With one binlog file, a --stop-position at the exact end of +# the file should not result in a warning +# MYSQL_BINLOG --read-from-remote-server --stop-position=binlog_f1_end binlog_f1_full --result-file=tmp/warn_position_test_file.out 2>&1 +# +# Case 1.c) With one binlog file, a --stop-position past the end of the +# file should(!) result in a warning +# MYSQL_BINLOG --read-from-remote-server --short-form --stop-position=binlog_f1_over_eof binlog_f1_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 1-1-2 before end of input +# +# Case 2.a) With two binlog files, a --stop-position targeting b2 which +# exists in the size of b1 should: +# 1) not provide any warnings +# 2) not prevent b2 from outputting its desired events before the +# stop position +# MYSQL_BINLOG --read-from-remote-server --stop-position=binlog_f2_mid binlog_f1_full binlog_f2_full --result-file=tmp/warn_position_test_file.out 2>&1 +include/assert_grep.inc [Ensure all intended GTIDs are present] +include/assert_grep.inc [Ensure the next GTID binlogged is _not_ present] +# +# Case 2.b) With two binlog files, a --stop-position targeting the end +# of binlog 2 should: +# 1) not provide any warnings +# 2) not prevent b2 from outputting its entire binary log +# MYSQL_BINLOG --read-from-remote-server --stop-position=binlog_f2_end binlog_f1_full binlog_f2_full --result-file=tmp/warn_position_test_file.out 2>&1 +include/assert_grep.inc [Ensure a GTID exists for each transaction] +include/assert_grep.inc [Ensure the last GTID binlogged is present] +# +# Case 2.c) With two binlog files, a --stop-position targeting beyond +# the eof of binlog 2 should: +# 1) provide a warning that the stop position was not reached +# 2) not prevent b2 from outputting its entire binary log +# MYSQL_BINLOG --read-from-remote-server --stop-position=binlog_f2_over_eof binlog_f1_full binlog_f2_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 0-1-5 before end of input +include/assert_grep.inc [Ensure a GTID exists for each transaction] +# +# +# Test using local binlog files +# +connection default; +# +# --stop-position tests +# +# Case 1.a) With one binlog file, a --stop-position before the end of +# the file should not result in a warning +# MYSQL_BINLOG --stop-position=binlog_f1_pre_rotate binlog_f1_full --result-file=tmp/warn_position_test_file.out 2>&1 +# +# Case 1.b) With one binlog file, a --stop-position at the exact end of +# the file should not result in a warning +# MYSQL_BINLOG --stop-position=binlog_f1_end binlog_f1_full --result-file=tmp/warn_position_test_file.out 2>&1 +# +# Case 1.c) With one binlog file, a --stop-position past the end of the +# file should(!) result in a warning +# MYSQL_BINLOG --short-form --stop-position=binlog_f1_over_eof binlog_f1_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 1-1-2 before end of input +# +# Case 2.a) With two binlog files, a --stop-position targeting b2 which +# exists in the size of b1 should: +# 1) not provide any warnings +# 2) not prevent b2 from outputting its desired events before the +# stop position +# MYSQL_BINLOG --stop-position=binlog_f2_mid binlog_f1_full binlog_f2_full --result-file=tmp/warn_position_test_file.out 2>&1 +include/assert_grep.inc [Ensure all intended GTIDs are present] +include/assert_grep.inc [Ensure the next GTID binlogged is _not_ present] +# +# Case 2.b) With two binlog files, a --stop-position targeting the end +# of binlog 2 should: +# 1) not provide any warnings +# 2) not prevent b2 from outputting its entire binary log +# MYSQL_BINLOG --stop-position=binlog_f2_end binlog_f1_full binlog_f2_full --result-file=tmp/warn_position_test_file.out 2>&1 +include/assert_grep.inc [Ensure a GTID exists for each transaction] +include/assert_grep.inc [Ensure the last GTID binlogged is present] +# +# Case 2.c) With two binlog files, a --stop-position targeting beyond +# the eof of binlog 2 should: +# 1) provide a warning that the stop position was not reached +# 2) not prevent b2 from outputting its entire binary log +# MYSQL_BINLOG --stop-position=binlog_f2_over_eof binlog_f1_full binlog_f2_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 0-1-5 before end of input +include/assert_grep.inc [Ensure a GTID exists for each transaction] +# Poison GTID lists with an extraneous GTID domain - expect additional `9-1-1` warnings +# +# +# Test extraneous stop GTID using --read-from-remote-server +# +connection default; +# +# --stop-position tests +# +# Case 1.a) With one binlog file, a --stop-position before the end of +# the file should not result in a warning +# MYSQL_BINLOG --read-from-remote-server --stop-position=binlog_f1_pre_rotate binlog_f1_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 9-1-1 before end of input +# +# Case 1.b) With one binlog file, a --stop-position at the exact end of +# the file should not result in a warning +# MYSQL_BINLOG --read-from-remote-server --stop-position=binlog_f1_end binlog_f1_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 9-1-1 before end of input +# +# Case 1.c) With one binlog file, a --stop-position past the end of the +# file should(!) result in a warning +# MYSQL_BINLOG --read-from-remote-server --short-form --stop-position=binlog_f1_over_eof binlog_f1_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 1-1-2 before end of input +WARNING: Did not reach stop position 9-1-1 before end of input +# +# Case 2.a) With two binlog files, a --stop-position targeting b2 which +# exists in the size of b1 should: +# 1) not provide any warnings +# 2) not prevent b2 from outputting its desired events before the +# stop position +# MYSQL_BINLOG --read-from-remote-server --stop-position=binlog_f2_mid binlog_f1_full binlog_f2_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 9-1-1 before end of input +include/assert_grep.inc [Ensure all intended GTIDs are present] +include/assert_grep.inc [Ensure the next GTID binlogged is _not_ present] +# +# Case 2.b) With two binlog files, a --stop-position targeting the end +# of binlog 2 should: +# 1) not provide any warnings +# 2) not prevent b2 from outputting its entire binary log +# MYSQL_BINLOG --read-from-remote-server --stop-position=binlog_f2_end binlog_f1_full binlog_f2_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 9-1-1 before end of input +include/assert_grep.inc [Ensure a GTID exists for each transaction] +include/assert_grep.inc [Ensure the last GTID binlogged is present] +# +# Case 2.c) With two binlog files, a --stop-position targeting beyond +# the eof of binlog 2 should: +# 1) provide a warning that the stop position was not reached +# 2) not prevent b2 from outputting its entire binary log +# MYSQL_BINLOG --read-from-remote-server --stop-position=binlog_f2_over_eof binlog_f1_full binlog_f2_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 0-1-5 before end of input +WARNING: Did not reach stop position 9-1-1 before end of input +include/assert_grep.inc [Ensure a GTID exists for each transaction] +# +# +# Test extraneous stop GTID using local binlog files +# +connection default; +# +# --stop-position tests +# +# Case 1.a) With one binlog file, a --stop-position before the end of +# the file should not result in a warning +# MYSQL_BINLOG --stop-position=binlog_f1_pre_rotate binlog_f1_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 9-1-1 before end of input +# +# Case 1.b) With one binlog file, a --stop-position at the exact end of +# the file should not result in a warning +# MYSQL_BINLOG --stop-position=binlog_f1_end binlog_f1_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 9-1-1 before end of input +# +# Case 1.c) With one binlog file, a --stop-position past the end of the +# file should(!) result in a warning +# MYSQL_BINLOG --short-form --stop-position=binlog_f1_over_eof binlog_f1_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 1-1-2 before end of input +WARNING: Did not reach stop position 9-1-1 before end of input +# +# Case 2.a) With two binlog files, a --stop-position targeting b2 which +# exists in the size of b1 should: +# 1) not provide any warnings +# 2) not prevent b2 from outputting its desired events before the +# stop position +# MYSQL_BINLOG --stop-position=binlog_f2_mid binlog_f1_full binlog_f2_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 9-1-1 before end of input +include/assert_grep.inc [Ensure all intended GTIDs are present] +include/assert_grep.inc [Ensure the next GTID binlogged is _not_ present] +# +# Case 2.b) With two binlog files, a --stop-position targeting the end +# of binlog 2 should: +# 1) not provide any warnings +# 2) not prevent b2 from outputting its entire binary log +# MYSQL_BINLOG --stop-position=binlog_f2_end binlog_f1_full binlog_f2_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 9-1-1 before end of input +include/assert_grep.inc [Ensure a GTID exists for each transaction] +include/assert_grep.inc [Ensure the last GTID binlogged is present] +# +# Case 2.c) With two binlog files, a --stop-position targeting beyond +# the eof of binlog 2 should: +# 1) provide a warning that the stop position was not reached +# 2) not prevent b2 from outputting its entire binary log +# MYSQL_BINLOG --stop-position=binlog_f2_over_eof binlog_f1_full binlog_f2_full --result-file=tmp/warn_position_test_file.out 2>&1 +WARNING: Did not reach stop position 0-1-5 before end of input +WARNING: Did not reach stop position 9-1-1 before end of input +include/assert_grep.inc [Ensure a GTID exists for each transaction] +# +# End of binlog_mysqlbinlog_warn_stop_gtid.test diff --git a/mysql-test/suite/binlog/r/binlog_unsafe.result b/mysql-test/suite/binlog/r/binlog_unsafe.result index fe5af9333e8..6eb7f3196d2 100644 --- a/mysql-test/suite/binlog/r/binlog_unsafe.result +++ b/mysql-test/suite/binlog/r/binlog_unsafe.result @@ -2540,8 +2540,8 @@ Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Gtid # # BEGIN GTID #-#-# master-bin.000001 # Annotate_rows # # insert into t2(a) values(new.a) master-bin.000001 # Table_map # # table_id: # (test.t1) -master-bin.000001 # Table_map # # table_id: # (test.t3) master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Table_map # # table_id: # (test.t3) master-bin.000001 # Write_rows_v1 # # table_id: # master-bin.000001 # Write_rows_v1 # # table_id: # master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F diff --git a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_warn_stop_gtid.test b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_warn_stop_gtid.test new file mode 100644 index 00000000000..de8aac6eee4 --- /dev/null +++ b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_warn_stop_gtid.test @@ -0,0 +1,103 @@ +# +# Test ensures that --stop-position work correctly for mysqlbinlog. This high +# level test sets up the binary log (and tags certain locations for comparison), +# and the helper file binlog_mysqlbinlog_warn_stop_position.inc performs the +# actual tests. +# +# References: +# MDEV-34614 mysqlbinlog warn on EOF before GTID in --stop-position +# +--source include/have_log_bin.inc + +--let $binlog_out_relpath= tmp/warn_position_test_file.out +--let $binlog_out= $MYSQLTEST_VARDIR/$binlog_out_relpath + +--echo # +--echo # Clear the existing binary log state. +--echo # +RESET MASTER; + +SET @@SESSION.gtid_domain_id= 0; +SET @@SESSION.server_id= 1; +--let $binlog_f1= query_get_value(SHOW MASTER STATUS, File, 1) +SET @@SESSION.gtid_seq_no= 1; +create table t1 (a int); +insert into t1 values (1); +SET @@SESSION.gtid_domain_id= 1; +insert into t1 values (2); +--let $binlog_f1_pre_rotate= `SELECT @@GLOBAL.gtid_binlog_pos` +flush binary logs; +--let $binlog_f1_end= `SELECT @@GLOBAL.gtid_binlog_pos` + +--let $binlog_f2= query_get_value(SHOW MASTER STATUS, File, 1) +insert into t1 values (3); +--echo # Tag binlog_f2_mid +--let $binlog_f2_mid= `SELECT @@GLOBAL.gtid_binlog_pos` +--let $binlog_f1_over_eof= $binlog_f2_mid +insert into t1 values (4); +--let $binlog_f2_gtid_after_midpoint= `SELECT @@GLOBAL.gtid_binlog_pos` +insert into t1 values (5); +SET @@SESSION.gtid_domain_id= 0; +insert into t1 values (6); +insert into t1 values (7); +--let $binlog_f2_last_gtid= `SELECT REGEXP_SUBSTR(@@GLOBAL.gtid_binlog_pos, '0-1-\\\\\\\\d++')` +flush binary logs; +--let $binlog_f2_end= `SELECT @@GLOBAL.gtid_binlog_pos` +drop table t1; +--let $binlog_f2_over_eof= `SELECT @@GLOBAL.gtid_binlog_pos` + +--echo # Ensuring binary log order is correct +--let $binlog_f1_show= query_get_value(SHOW BINARY LOGS, Log_name, 1) +if (`SELECT strcmp('$binlog_f1','$binlog_f1_show') != 0`) +{ + --echo # Real binlog_f1: $binlog_f1 + --echo # First binlog in SHOW BINLOG FILES: $binlog_f1_show + --die Wrong order of binary log files in SHOW BINARY LOGS +} +--let $binlog_f2_show= query_get_value(SHOW BINARY LOGS, Log_name, 2) +if (`SELECT strcmp('$binlog_f2','$binlog_f2_show') != 0`) +{ + --echo # Real binlog_f2: $binlog_f2 + --echo # First binlog in SHOW BINLOG FILES: $binlog_f2_show + --die Wrong order of binary log files in SHOW BINARY LOGS +} + +--let $domain_id= [01] + +--echo # +--echo # +--echo # Test using --read-from-remote-server +--echo # +--let $read_from_remote_server= 1 +--source binlog_mysqlbinlog_warn_stop_position.inc + +--echo # +--echo # +--echo # Test using local binlog files +--echo # +--let $read_from_remote_server= 0 +--source binlog_mysqlbinlog_warn_stop_position.inc + +--echo # Poison GTID lists with an extraneous GTID domain - expect additional `9-1-1` warnings +--let $binlog_f1_pre_rotate= 9-1-1 +--let $binlog_f2_mid= $binlog_f1_pre_rotate,$binlog_f2_mid +--let $binlog_f1_end= $binlog_f1_pre_rotate,$binlog_f1_end +--let $binlog_f2_end= $binlog_f1_pre_rotate,$binlog_f2_end +--let $binlog_f1_over_eof= $binlog_f1_pre_rotate,$binlog_f1_over_eof +--let $binlog_f2_over_eof= $binlog_f1_pre_rotate,$binlog_f2_over_eof +--echo # +--echo # +--echo # Test extraneous stop GTID using --read-from-remote-server +--echo # +--let $read_from_remote_server= 1 +--source binlog_mysqlbinlog_warn_stop_position.inc + +--echo # +--echo # +--echo # Test extraneous stop GTID using local binlog files +--echo # +--let $read_from_remote_server= 0 +--source binlog_mysqlbinlog_warn_stop_position.inc + +--echo # +--echo # End of binlog_mysqlbinlog_warn_stop_gtid.test diff --git a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_warn_stop_position.inc b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_warn_stop_position.inc index 2c3c565d692..f89c7bf83c8 100644 --- a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_warn_stop_position.inc +++ b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_warn_stop_position.inc @@ -50,9 +50,9 @@ if (!$read_from_remote_server) --echo # --echo # Case 1.c) With one binlog file, a --stop-position past the end of the --echo # file should(!) result in a warning ---let $binlog_f1_over_eof= `SELECT $binlog_f1_end + 1` --echo # MYSQL_BINLOG $PARAM_READ_FROM_REMOTE_OUT --short-form --stop-position=binlog_f1_over_eof binlog_f1_full --result-file=$binlog_out_relpath 2>&1 --replace_result $binlog_f1_over_eof +--sorted_result --exec $MYSQL_BINLOG $PARAM_READ_FROM_REMOTE --short-form --stop-position=$binlog_f1_over_eof $binlog_f1_full --result-file=$binlog_out 2>&1 --echo # @@ -65,7 +65,6 @@ if (!$read_from_remote_server) --exec $MYSQL_BINLOG $PARAM_READ_FROM_REMOTE --stop-position=$binlog_f2_mid $binlog_f1_full $binlog_f2_full --result-file=$binlog_out 2>&1 --let $server_id= `SELECT @@GLOBAL.server_id` ---let $domain_id= `SELECT @@GLOBAL.gtid_domain_id` --let $assert_file= $binlog_out --let $assert_text= Ensure all intended GTIDs are present --let $assert_select= GTID $domain_id-$server_id- @@ -86,7 +85,6 @@ if (!$read_from_remote_server) --exec $MYSQL_BINLOG $PARAM_READ_FROM_REMOTE --stop-position=$binlog_f2_end $binlog_f1_full $binlog_f2_full --result-file=$binlog_out 2>&1 --let $server_id= `SELECT @@GLOBAL.server_id` ---let $domain_id= `SELECT @@GLOBAL.gtid_domain_id` --let $assert_text= Ensure a GTID exists for each transaction --let $assert_select= GTID $domain_id-$server_id- --let $assert_count= 8 @@ -102,13 +100,12 @@ if (!$read_from_remote_server) --echo # the eof of binlog 2 should: --echo # 1) provide a warning that the stop position was not reached --echo # 2) not prevent b2 from outputting its entire binary log ---let $binlog_f2_over_eof= `SELECT $binlog_f2_end + 1` --echo # MYSQL_BINLOG $PARAM_READ_FROM_REMOTE_OUT --stop-position=binlog_f2_over_eof binlog_f1_full binlog_f2_full --result-file=$binlog_out_relpath 2>&1 --replace_result $binlog_f2_over_eof +--sorted_result --exec $MYSQL_BINLOG $PARAM_READ_FROM_REMOTE --stop-position=$binlog_f2_over_eof $binlog_f1_full $binlog_f2_full --result-file=$binlog_out 2>&1 --let $server_id= `SELECT @@GLOBAL.server_id` ---let $domain_id= `SELECT @@GLOBAL.gtid_domain_id` --let $assert_text= Ensure a GTID exists for each transaction --let $assert_select= GTID $domain_id-$server_id- --let $assert_count= 8 diff --git a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_warn_stop_position.test b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_warn_stop_position.test index 472e208229c..889257931d8 100644 --- a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_warn_stop_position.test +++ b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_warn_stop_position.test @@ -64,6 +64,10 @@ if ($binlog_f2_mid > $binlog_f1_end) --die Mid point chosen to end in binlog 2 does not exist in earlier binlog } +--let $domain_id= `SELECT @@GLOBAL.gtid_domain_id` +--let $binlog_f1_over_eof= `SELECT $binlog_f1_end + 1` +--let $binlog_f2_over_eof= `SELECT $binlog_f2_end + 1` + --echo # --echo # --echo # Test using --read-from-remote-server diff --git a/mysql-test/suite/engines/funcs/r/ta_set_drop_default.result b/mysql-test/suite/engines/funcs/r/ta_set_drop_default.result index 0cd5032b323..d618a8fed0b 100644 --- a/mysql-test/suite/engines/funcs/r/ta_set_drop_default.result +++ b/mysql-test/suite/engines/funcs/r/ta_set_drop_default.result @@ -24,8 +24,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -34,7 +32,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` tinyint(4) NOT NULL, - `c2` tinyint(4), + `c2` tinyint(4) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -65,8 +63,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -75,7 +71,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` smallint(6) NOT NULL, - `c2` smallint(6), + `c2` smallint(6) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -106,8 +102,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -116,7 +110,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` mediumint(9) NOT NULL, - `c2` mediumint(9), + `c2` mediumint(9) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -147,8 +141,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -157,7 +149,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` int(11) NOT NULL, - `c2` int(11), + `c2` int(11) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -188,8 +180,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -198,7 +188,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` int(11) NOT NULL, - `c2` int(11), + `c2` int(11) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -229,8 +219,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -239,7 +227,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` bigint(20) NOT NULL, - `c2` bigint(20), + `c2` bigint(20) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -270,8 +258,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -280,7 +266,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` double NOT NULL, - `c2` double, + `c2` double DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -311,8 +297,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -321,7 +305,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` double NOT NULL, - `c2` double, + `c2` double DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -352,8 +336,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -362,7 +344,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` float NOT NULL, - `c2` float, + `c2` float DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -393,8 +375,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -403,7 +383,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` decimal(10,0) NOT NULL, - `c2` decimal(10,0), + `c2` decimal(10,0) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -434,8 +414,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -444,7 +422,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` decimal(10,0) NOT NULL, - `c2` decimal(10,0), + `c2` decimal(10,0) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -475,8 +453,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -485,7 +461,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` tinyint(4) NOT NULL, - `c2` tinyint(4), + `c2` tinyint(4) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -516,8 +492,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -526,7 +500,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` smallint(6) NOT NULL, - `c2` smallint(6), + `c2` smallint(6) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -557,8 +531,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -567,7 +539,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` mediumint(9) NOT NULL, - `c2` mediumint(9), + `c2` mediumint(9) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -598,8 +570,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -608,7 +578,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` int(11) NOT NULL, - `c2` int(11), + `c2` int(11) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -639,8 +609,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -649,7 +617,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` int(11) NOT NULL, - `c2` int(11), + `c2` int(11) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -680,8 +648,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -690,7 +656,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` bigint(20) NOT NULL, - `c2` bigint(20), + `c2` bigint(20) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -721,8 +687,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -731,7 +695,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` double NOT NULL, - `c2` double, + `c2` double DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -762,8 +726,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -772,7 +734,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` double NOT NULL, - `c2` double, + `c2` double DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -803,8 +765,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -813,7 +773,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` float NOT NULL, - `c2` float, + `c2` float DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -844,8 +804,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -854,7 +812,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` decimal(10,0) NOT NULL, - `c2` decimal(10,0), + `c2` decimal(10,0) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; @@ -885,8 +843,6 @@ t1 CREATE TABLE `t1` ( ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; INSERT IGNORE INTO t1(c1) VALUES(2); -Warnings: -Warning 1364 Field 'c2' doesn't have a default value SELECT * FROM t1 ORDER BY c1; c1 c2 1 10 @@ -895,7 +851,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` decimal(10,0) NOT NULL, - `c2` decimal(10,0), + `c2` decimal(10,0) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t1; diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index 23bf40c409a..438b1e63cfd 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -14,3 +14,5 @@ galera_wan : MDEV-35940 Unallowed state transition: donor -> synced in galera_wa galera_vote_rejoin_ddl : MDEV-35940 Unallowed state transition: donor -> synced in galera_wan MW-329 : MDEV-35951 Complete freeze during MW-329 test galera_vote_rejoin_dml : MDEV-35964 Assertion `ist_seqno >= cc_seqno' failed in galera_vote_rejoin_dml +galera_var_notify_cmd : MDEV-37257 WSREP: Notification command failed: 1 (Operation not permitted) +galera_var_notify_ssl_ipv6 : MDEV-37257 WSREP: Notification command failed: 1 (Operation not permitted) diff --git a/mysql-test/suite/galera/r/enforce_storage_engine2.result b/mysql-test/suite/galera/r/enforce_storage_engine2.result index 07ce1ca3dd6..46037aff9b4 100644 --- a/mysql-test/suite/galera/r/enforce_storage_engine2.result +++ b/mysql-test/suite/galera/r/enforce_storage_engine2.result @@ -7,23 +7,15 @@ connection node_1; connection node_1; CREATE TABLE t1(i INT) ENGINE=INNODB; CREATE TABLE t2(i INT) ENGINE=MYISAM; -Warnings: -Note 1266 Using storage engine InnoDB for table 't2' -Note 1266 Using storage engine InnoDB for table 't2' +ERROR HY000: The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement connection node_2; SHOW TABLES; Tables_in_test t1 -t2 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `i` int(11) DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci -SHOW CREATE TABLE t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `i` int(11) DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci -DROP TABLE t1, t2; +DROP TABLE t1; # End of tests diff --git a/mysql-test/suite/galera/r/galera_aria.result b/mysql-test/suite/galera/r/galera_aria.result new file mode 100644 index 00000000000..435a0525a0f --- /dev/null +++ b/mysql-test/suite/galera/r/galera_aria.result @@ -0,0 +1,25 @@ +connection node_2; +connection node_1; +set session sql_mode=''; +SET @@enforce_storage_engine=INNODB; +CREATE TABLE t1 (c INT ) ENGINE=ARIA; +ERROR HY000: The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement +SHOW WARNINGS; +Level Code Message +Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement +Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set +Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement +Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set +CREATE TABLE t1 (c INT ); +DROP TABLE t1; +CREATE TABLE t1 (c INT ) ENGINE=INNODB; +DROP TABLE t1; +SET @@enforce_storage_engine=ARIA; +CREATE TABLE t1 (c INT ) ENGINE=INNODB; +ERROR HY000: The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement +SHOW WARNINGS; +Level Code Message +Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement +Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set +Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement +Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set diff --git a/mysql-test/suite/galera/r/mysql-wsrep#198.result b/mysql-test/suite/galera/r/mysql-wsrep#198.result index bf58018ec76..2be2d6c0186 100644 --- a/mysql-test/suite/galera/r/mysql-wsrep#198.result +++ b/mysql-test/suite/galera/r/mysql-wsrep#198.result @@ -7,14 +7,18 @@ SELECT 1 FROM DUAL; 1 1 LOCK TABLE t2 WRITE; +connect node_2_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_2; +connection node_2_ctrl; +SET SESSION wsrep_sync_wait=0; connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; connection node_2a; OPTIMIZE TABLE t1,t2;; +connection node_2_ctrl; +SET SESSION wsrep_sync_wait = 0; connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2; connection node_2b; REPAIR TABLE t1,t2;; -connection node_2; -SET SESSION wsrep_sync_wait = 0; +connection node_2_ctrl; connection node_1; INSERT INTO t2 VALUES (1); connection node_2; @@ -34,3 +38,4 @@ DROP TABLE t2; connection node_1; disconnect node_2a; disconnect node_2b; +disconnect node_2_ctrl; diff --git a/mysql-test/suite/galera/t/enforce_storage_engine2.test b/mysql-test/suite/galera/t/enforce_storage_engine2.test index 7a822bced59..dd52ea9e239 100644 --- a/mysql-test/suite/galera/t/enforce_storage_engine2.test +++ b/mysql-test/suite/galera/t/enforce_storage_engine2.test @@ -1,5 +1,6 @@ --source include/galera_cluster.inc --source include/have_innodb.inc +--source include/have_aria.inc --echo # --echo # MDEV-9312: storage engine not enforced during galera cluster @@ -7,14 +8,21 @@ --echo # --connection node_1 CREATE TABLE t1(i INT) ENGINE=INNODB; +# +# This is not anymore supported because enforce_storage_engine +# is local setting and final used storage engine +# on other members of cluster depend on their configuration. +# Currently, there is no way to query remote node +# configuration. +# +--error ER_OPTION_PREVENTS_STATEMENT CREATE TABLE t2(i INT) ENGINE=MYISAM; --connection node_2 SHOW TABLES; SHOW CREATE TABLE t1; -SHOW CREATE TABLE t2; # Cleanup -DROP TABLE t1, t2; +DROP TABLE t1; --echo # End of tests diff --git a/mysql-test/suite/galera/t/galera_aria.test b/mysql-test/suite/galera/t/galera_aria.test new file mode 100644 index 00000000000..24dd2e5048b --- /dev/null +++ b/mysql-test/suite/galera/t/galera_aria.test @@ -0,0 +1,19 @@ +--source include/galera_cluster.inc +--source include/have_aria.inc +--source include/log_bin.inc + +set session sql_mode=''; +SET @@enforce_storage_engine=INNODB; +--error ER_OPTION_PREVENTS_STATEMENT +CREATE TABLE t1 (c INT ) ENGINE=ARIA; +SHOW WARNINGS; + +CREATE TABLE t1 (c INT ); +DROP TABLE t1; +CREATE TABLE t1 (c INT ) ENGINE=INNODB; +DROP TABLE t1; + +SET @@enforce_storage_engine=ARIA; +--error ER_OPTION_PREVENTS_STATEMENT +CREATE TABLE t1 (c INT ) ENGINE=INNODB; +SHOW WARNINGS; diff --git a/mysql-test/suite/galera/t/mysql-wsrep#198.test b/mysql-test/suite/galera/t/mysql-wsrep#198.test index 98dea684f0d..78facd64356 100644 --- a/mysql-test/suite/galera/t/mysql-wsrep#198.test +++ b/mysql-test/suite/galera/t/mysql-wsrep#198.test @@ -10,21 +10,33 @@ SELECT 1 FROM DUAL; LOCK TABLE t2 WRITE; +--connect node_2_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_2 +--connection node_2_ctrl +SET SESSION wsrep_sync_wait=0; + --connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 --connection node_2a --send OPTIMIZE TABLE t1,t2; +--connection node_2_ctrl +SET SESSION wsrep_sync_wait = 0; +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'acquiring total order isolation%'; +--let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST +--source include/wait_condition_with_debug_and_kill.inc + --connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2 --connection node_2b --send REPAIR TABLE t1,t2; ---connection node_2 -SET SESSION wsrep_sync_wait = 0; ---let $wait_condition = SELECT COUNT(*) BETWEEN 1 AND 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'Waiting to execute in isolation%'; +--connection node_2_ctrl +--let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'acquiring total order isolation%'; --let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST --source include/wait_condition_with_debug_and_kill.inc --connection node_1 +# We have LOCK TABLE in node_2 so this could fail on lock wait +# or next statement is fast enought and succeed +--error 0,ER_LOCK_WAIT_TIMEOUT INSERT INTO t2 VALUES (1); --connection node_2 @@ -43,3 +55,4 @@ DROP TABLE t2; --disconnect node_2a --disconnect node_2b +--disconnect node_2_ctrl diff --git a/mysql-test/suite/galera_3nodes/r/inconsistency_shutdown.result b/mysql-test/suite/galera_3nodes/r/inconsistency_shutdown.result index cd4087e01ca..bbcad5ee4db 100644 --- a/mysql-test/suite/galera_3nodes/r/inconsistency_shutdown.result +++ b/mysql-test/suite/galera_3nodes/r/inconsistency_shutdown.result @@ -32,8 +32,8 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 6; UPDATE t1 SET f2 = 1 WHERE f1 = 7; UPDATE t1 SET f2 = 1 WHERE f1 = 8; connection node_2; -SET wsrep_on=OFF; -SET wsrep_on=ON; +# make sure all events landed to slave queue +set wsrep_sync_wait=0; UNLOCK TABLES; SET SESSION wsrep_on = ON; SET SESSION wsrep_sync_wait = 15; @@ -56,7 +56,8 @@ f1 f2 7 1 8 1 connection node_2; -SET GLOBAL wsrep_on=OFF; +# Gracefully restart the node +set wsrep_on=OFF; # restart DROP TABLE t1; connection node_1; @@ -73,11 +74,15 @@ INSERT INTO t1 VALUES (8, 0); COMMIT; CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 INT); connection node_2; +# Allow 1K slave queue without flow control SET GLOBAL wsrep_provider_options='gcs.fc_limit=1K'; +# Introduce inconsistency SET wsrep_on=OFF; DROP TABLE t2; SET wsrep_on=ON; +# set up sync point to ensure DROP TABLE replication order below SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync'; +# Build up slave queue: LOCK TABLES t1 READ; connection node_1; UPDATE t1 SET f2 = 1 WHERE f1 = 1; @@ -86,18 +91,19 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 3; UPDATE t1 SET f2 = 1 WHERE f1 = 4; UPDATE t1 SET f2 = 2 WHERE f1 = 4; /* dependent applier */; +# interleave a failing statement connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; connection node_2a; DROP TABLE t2;; +# make sure DROP TABLE from above has replicated connection node_2; -SET wsrep_on=OFF; +set wsrep_sync_wait=0; "Wait for DROP TABLE to replicate" SET SESSION wsrep_on = 0; -SET SESSION wsrep_on = 0; +SET SESSION wsrep_on = 1; SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync'; SET GLOBAL wsrep_provider_options = 'dbug='; "DROP TABLE replicated" -SET wsrep_on=ON; connection node_1; UPDATE t1 SET f2 = 3 WHERE f1 = 4; /* dependent applier */ @@ -106,8 +112,7 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 6; UPDATE t1 SET f2 = 1 WHERE f1 = 7; UPDATE t1 SET f2 = 1 WHERE f1 = 8; connection node_2; -SET wsrep_on=OFF; -SET wsrep_on=ON; +# make sure all events landed to slave queue UNLOCK TABLES; connection node_2a; ERROR 42S02: Unknown table 'test.t2' @@ -128,11 +133,11 @@ f1 f2 7 1 8 1 connection node_2; -SET SESSION wsrep_on = ON; +set wsrep_on=OFF; SET SESSION wsrep_sync_wait = 15; -SET SESSION wsrep_on = ON; +# Wait for the node to shutdown replication SET SESSION wsrep_sync_wait = 15; -SET GLOBAL wsrep_on=OFF; +# Gracefully restart the node # restart DROP TABLE t1; CALL mtr.add_suppression("Can't find record in 't1'"); diff --git a/mysql-test/suite/galera_3nodes/t/inconsistency_shutdown.test b/mysql-test/suite/galera_3nodes/t/inconsistency_shutdown.test index 347433a6f14..dcd8a7b15ca 100644 --- a/mysql-test/suite/galera_3nodes/t/inconsistency_shutdown.test +++ b/mysql-test/suite/galera_3nodes/t/inconsistency_shutdown.test @@ -33,6 +33,7 @@ SET wsrep_on=OFF; DELETE FROM t1 WHERE f1 = 2; DELETE FROM t1 WHERE f1 = 4; SET wsrep_on=ON; +--source include/galera_wait_ready.inc # Build up slave queue: # - first 8 events will be picked by slave threads @@ -51,11 +52,11 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 7; UPDATE t1 SET f2 = 1 WHERE f1 = 8; --connection node_2 -# make sure all events landed to slave queue -SET wsrep_on=OFF; +--echo # make sure all events landed to slave queue +set wsrep_sync_wait=0; --let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_recv_queue'; --source include/wait_condition.inc -SET wsrep_on=ON; + UNLOCK TABLES; --source include/wsrep_wait_disconnect.inc # Wait for the node to shutdown replication @@ -70,8 +71,8 @@ SHOW STATUS LIKE 'wsrep_cluster_size'; SELECT * FROM t1; --connection node_2 -#Gracefully restart the node -SET GLOBAL wsrep_on=OFF; +--echo # Gracefully restart the node +set wsrep_on=OFF; --source include/shutdown_mysqld.inc --source include/start_mysqld.inc --source include/galera_wait_ready.inc @@ -98,20 +99,21 @@ COMMIT; CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 INT); --connection node_2 -# Allow 1K slave queue without flow control +--echo # Allow 1K slave queue without flow control SET GLOBAL wsrep_provider_options='gcs.fc_limit=1K'; -# Introduce inconsistency -SET wsrep_on=OFF; --let $wait_condition = SELECT COUNT(*)=1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't2'; --source include/wait_condition.inc +--echo # Introduce inconsistency +SET wsrep_on=OFF; DROP TABLE t2; SET wsrep_on=ON; +--source include/galera_wait_ready.inc -# set up sync point to ensure DROP TABLE replication order below +--echo # set up sync point to ensure DROP TABLE replication order below --let galera_sync_point = after_replicate_sync --source include/galera_set_sync_point.inc -# Build up slave queue: +--echo # Build up slave queue: # - first 8 events will be picked by slave threads # - one more event will be waiting in slave queue LOCK TABLES t1 READ; @@ -123,20 +125,19 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 3; UPDATE t1 SET f2 = 1 WHERE f1 = 4; UPDATE t1 SET f2 = 2 WHERE f1 = 4; /* dependent applier */; -# interleave a failing statement +--echo # interleave a failing statement --connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 --connection node_2a --send DROP TABLE t2; -# make sure DROP TABLE from above has replicated +--echo # make sure DROP TABLE from above has replicated --connection node_2 -SET wsrep_on=OFF; +set wsrep_sync_wait=0; --echo "Wait for DROP TABLE to replicate" --source include/galera_wait_sync_point.inc --source include/galera_signal_sync_point.inc --source include/galera_clear_sync_point.inc --echo "DROP TABLE replicated" -SET wsrep_on=ON; --connection node_1 UPDATE t1 SET f2 = 3 WHERE f1 = 4; /* dependent applier */ @@ -146,11 +147,10 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 7; UPDATE t1 SET f2 = 1 WHERE f1 = 8; --connection node_2 -# make sure all events landed to slave queue -SET wsrep_on=OFF; +--echo # make sure all events landed to slave queue --let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_recv_queue'; --source include/wait_condition.inc -SET wsrep_on=ON; + UNLOCK TABLES; --connection node_2a @@ -165,12 +165,13 @@ SHOW STATUS LIKE 'wsrep_cluster_size'; SELECT * FROM t1; --connection node_2 +set wsrep_on=OFF; --source include/wsrep_wait_disconnect.inc -# Wait for the node to shutdown replication +--echo # Wait for the node to shutdown replication --let $members=0 --source include/wsrep_wait_membership.inc -# Gracefully restart the node -SET GLOBAL wsrep_on=OFF; +--echo # Gracefully restart the node + --source include/shutdown_mysqld.inc --source include/start_mysqld.inc --source include/galera_wait_ready.inc diff --git a/mysql-test/suite/innodb/r/doublewrite_debug.result b/mysql-test/suite/innodb/r/doublewrite_debug.result index a743217f34e..e1d2b0137e1 100644 --- a/mysql-test/suite/innodb/r/doublewrite_debug.result +++ b/mysql-test/suite/innodb/r/doublewrite_debug.result @@ -26,13 +26,13 @@ SET GLOBAL innodb_fast_shutdown = 0; # restart: --debug_dbug=+d,ib_log_checkpoint_avoid_hard --innodb_flush_sync=0 begin; insert into t1 values (6, repeat('%', 400)); +SET GLOBAL innodb_max_dirty_pages_pct_lwm=0, innodb_max_dirty_pages_pct=0; # Make the first page dirty for system tablespace set global innodb_saved_page_number_debug = 0; set global innodb_fil_make_page_dirty_debug = 0; # Make the second page dirty for system tablespace set global innodb_saved_page_number_debug = 1; set global innodb_fil_make_page_dirty_debug = 0; -set global innodb_buf_flush_list_now = 1; # Kill the server # Make the 1st page (page_no=0) and 2nd page (page_no=1) # of the system tablespace all zeroes. diff --git a/mysql-test/suite/innodb/r/innodb-ac-non-locking-select.result b/mysql-test/suite/innodb/r/innodb-ac-non-locking-select.result index 5d12c1076e7..bfc746a2879 100644 --- a/mysql-test/suite/innodb/r/innodb-ac-non-locking-select.result +++ b/mysql-test/suite/innodb/r/innodb-ac-non-locking-select.result @@ -3,12 +3,19 @@ INSERT INTO t1 VALUES(0, "0"); INSERT INTO t1 VALUES(1, "1"); INSERT INTO t1 VALUES(2, "2"); INSERT INTO t1 VALUES(3, "3"); +set innodb_snapshot_isolation=0; connect con1,localhost,root,,; +set innodb_snapshot_isolation=0; connect con2,localhost,root,,; +set innodb_snapshot_isolation=0; connect con3,localhost,root,,; +set innodb_snapshot_isolation=0; connect con4,localhost,root,,; +set innodb_snapshot_isolation=0; connect con5,localhost,root,,; +set innodb_snapshot_isolation=0; connect con6,localhost,root,,; +set innodb_snapshot_isolation=0; connection default; SET AUTOCOMMIT=0; BEGIN; diff --git a/mysql-test/suite/innodb/r/innodb-alter.result b/mysql-test/suite/innodb/r/innodb-alter.result index 8ea757e0244..3e51b1cc7de 100644 --- a/mysql-test/suite/innodb/r/innodb-alter.result +++ b/mysql-test/suite/innodb/r/innodb-alter.result @@ -53,7 +53,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` int(11) NOT NULL, - `c2` int(11), + `c2` int(11) DEFAULT NULL, `ct` text DEFAULT NULL, PRIMARY KEY (`c1`), KEY `c2` (`c2`) diff --git a/mysql-test/suite/innodb/r/innodb-wl5980-alter.result b/mysql-test/suite/innodb/r/innodb-wl5980-alter.result index 70ba67cff07..ba9b246d573 100644 --- a/mysql-test/suite/innodb/r/innodb-wl5980-alter.result +++ b/mysql-test/suite/innodb/r/innodb-wl5980-alter.result @@ -58,7 +58,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` int(11) NOT NULL, - `c2` int(11), + `c2` int(11) DEFAULT NULL, `ct` text DEFAULT NULL, PRIMARY KEY (`c1`), KEY `c2` (`c2`) diff --git a/mysql-test/suite/innodb/r/innodb_buffer_pool_shrink.result b/mysql-test/suite/innodb/r/innodb_buffer_pool_shrink.result new file mode 100644 index 00000000000..187dcfbd587 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb_buffer_pool_shrink.result @@ -0,0 +1,11 @@ +call mtr.add_suppression("innodb_buffer_pool_size change aborted"); +CREATE TABLE t (c INT) ENGINE=InnoDB PARTITION BY HASH(c) PARTITIONS 512; +BEGIN; +SELECT * FROM t LOCK IN SHARE MODE; +c +SET @save_size = @@GLOBAL.innodb_buffer_pool_size; +SET GLOBAL innodb_buffer_pool_size=6291456; +COMMIT; +SET GLOBAL innodb_buffer_pool_size=6291456; +SET GLOBAL innodb_buffer_pool_size = @save_size; +DROP TABLE t; diff --git a/mysql-test/suite/innodb/r/innodb_buffer_pool_shrink_temporary.result b/mysql-test/suite/innodb/r/innodb_buffer_pool_shrink_temporary.result new file mode 100644 index 00000000000..bfaf8df7a2e --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb_buffer_pool_shrink_temporary.result @@ -0,0 +1,17 @@ +call mtr.add_suppression("innodb_buffer_pool_size change aborted"); +SET @b=REPEAT('0',1048576); +CREATE TEMPORARY TABLE t (c MEDIUMTEXT) ENGINE=InnoDB; +INSERT INTO t VALUES +(@b),(@b),(@b),(@b),(@b),(@b),(@b),(@b),(@b),(@b),(@b); +SET STATEMENT max_statement_time=0.000001 FOR +SET GLOBAL innodb_buffer_pool_size=6291456; +SET STATEMENT max_statement_time=0.000001 FOR +SET GLOBAL innodb_buffer_pool_size=6291456; +SET STATEMENT max_statement_time=0.000001 FOR +SET GLOBAL innodb_buffer_pool_size=6291456; +SET GLOBAL innodb_buffer_pool_size=6291456; +SET GLOBAL innodb_buffer_pool_size=16777216; +CHECKSUM TABLE t; +Table Checksum +test.t 4050893687 +DROP TEMPORARY TABLE t; diff --git a/mysql-test/suite/innodb/r/lock_isolation.result b/mysql-test/suite/innodb/r/lock_isolation.result index f1e1b5aac52..0ffa5714b18 100644 --- a/mysql-test/suite/innodb/r/lock_isolation.result +++ b/mysql-test/suite/innodb/r/lock_isolation.result @@ -167,7 +167,6 @@ SELECT * FROM t FORCE INDEX (b) FOR UPDATE; a b 1 NULL COMMIT; -disconnect con_weird; connection consistent; SELECT * FROM t FORCE INDEX (b) FOR UPDATE; a b @@ -231,9 +230,67 @@ UPDATE t SET b=4 WHERE a=1; connection consistent; SELECT * FROM t WHERE a=1 FOR UPDATE; ERROR HY000: Record has changed since last read in table 't' -disconnect consistent; disconnect disable_purging; connection default; SET DEBUG_SYNC="RESET"; DROP TABLE t; +CREATE TABLE t1(a INT) ENGINE=InnoDB STATS_PERSISTENT=0; +CREATE TABLE t2(a INT) ENGINE=InnoDB STATS_PERSISTENT=0; +BEGIN; +INSERT INTO t1 SET a=1; +connection con_weird; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +BEGIN; +INSERT INTO t2 SET a=1; +connection consistent; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +BEGIN; +INSERT INTO t2 SET a=2; +connection default; +COMMIT; +connection con_weird; +SELECT * FROM t1; +a +1 +COMMIT; +connection consistent; +SELECT * FROM t1; +ERROR HY000: Record has changed since last read in table 't1' +COMMIT; +connection default; +BEGIN; +INSERT INTO t1 SET a=2; +connection con_weird; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +INSERT INTO t2 SET a=3; +connection consistent; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +INSERT INTO t2 SET a=2; +connection default; +COMMIT; +connection con_weird; +SELECT * FROM t1; +a +1 +2 +COMMIT; +disconnect con_weird; +connection consistent; +SELECT * FROM t1; +ERROR HY000: Record has changed since last read in table 't1' +COMMIT; +disconnect consistent; +connection default; +DROP TABLE t1,t2; +# +# MDEV-37215 SELECT...FOR UPDATE crash under SERIALIZABLE +# +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB; +SELECT * FROM t1 FOR UPDATE; +a +DROP TABLE t1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; # End of 10.6 tests diff --git a/mysql-test/suite/innodb/r/scrub_debug.result b/mysql-test/suite/innodb/r/scrub_debug.result index 7b0a9fd501c..7da2e0c51b0 100644 --- a/mysql-test/suite/innodb/r/scrub_debug.result +++ b/mysql-test/suite/innodb/r/scrub_debug.result @@ -15,5 +15,21 @@ FLUSH TABLE t1 FOR EXPORT; NOT FOUND /repairman/ in t1.ibd UNLOCK TABLES; DROP TABLE t1; +# +# MDEV-37183 innodb_immediate_scrub_data_uncompressed=ON may break +# crash recovery +# +SET GLOBAL innodb_limit_optimistic_insert_debug=0; +CREATE TABLE t(a VARCHAR(1) PRIMARY KEY,INDEX(a DESC)) ENGINE=InnoDB; +INSERT INTO t VALUES('2'),('1'),(''),('6'),('4'),('3'); +SET GLOBAL innodb_limit_optimistic_insert_debug=3; +INSERT INTO t VALUES('8'); +CHECK TABLE t; +Table Op Msg_type Msg_text +test.t check status OK +SELECT COUNT(*) FROM t; +COUNT(*) +7 +DROP TABLE t; SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=@save_debug; SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=@save_scrub; diff --git a/mysql-test/suite/innodb/r/trx_deadlock.result b/mysql-test/suite/innodb/r/trx_deadlock.result new file mode 100644 index 00000000000..28b3c987888 --- /dev/null +++ b/mysql-test/suite/innodb/r/trx_deadlock.result @@ -0,0 +1,82 @@ +# +# MDEV-36959 Deadlock does not rollback transaction fully +# +CREATE TABLE t1(col1 INT PRIMARY KEY, col2 INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1, 1), (2, 2); +SELECT * FROM t1; +col1 col2 +1 1 +2 2 +connect con1,localhost,root,,; +START TRANSACTION; +# Trx-1: Lock 1st record +UPDATE t1 SET col2=10 where col1=1; +connection default; +START TRANSACTION; +# Trx-2: Lock 2nd record +UPDATE t1 SET col2=100 where col1=2; +connection con1; +# Trx-1: Try locking 1st record : Wait +UPDATE t1 SET col2=10 where col1=2; +connection default; +# Wait for Trx-1 to get into lock wait stage +# Trx-2: Try locking 2nd record : Deadlock +UPDATE t1 SET col2=100 where col1=1; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +connection con1; +ROLLBACK; +connection default; +SELECT @@in_transaction; +@@in_transaction +0 +UPDATE t1 SET col2=10 where col1=1; +UPDATE t1 SET col2=100 where col1=2; +SELECT @@in_transaction; +@@in_transaction +0 +ROLLBACK; +SELECT * FROM t1; +col1 col2 +1 10 +2 100 +DROP TABLE t1; +# +# MDEV-37141 DML committed within XA transaction block after deadlock error and implicit rollback +# +CREATE TABLE t1(col1 INT PRIMARY KEY, col2 INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1, 1), (2, 2); +SELECT * FROM t1; +col1 col2 +1 1 +2 2 +connection con1; +XA BEGIN 'x1'; +# XA Trx-1: Lock 1st record +UPDATE t1 SET col2=10 where col1=1; +connection default; +XA BEGIN 'x2'; +# XA Trx-2: Lock 2nd record +UPDATE t1 SET col2=100 where col1=2; +connection con1; +# XA Trx-1: Try locking 1st record : Wait +UPDATE t1 SET col2=10 where col1=2; +connection default; +# Wait for XA Trx-1 to get into lock wait stage +# XA Trx-2: Try locking 2nd record : Deadlock +UPDATE t1 SET col2=100 where col1=1; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +INSERT INTO t1 VALUES (3, 3), (4, 4); +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the ROLLBACK ONLY state +XA END 'x2'; +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the ROLLBACK ONLY state +XA ROLLBACK 'x2'; +connection con1; +XA END 'x1'; +XA ROLLBACK 'x1'; +connection default; +SELECT * FROM t1; +col1 col2 +1 1 +2 2 +DROP TABLE t1; +disconnect con1; diff --git a/mysql-test/suite/innodb/t/doublewrite.test b/mysql-test/suite/innodb/t/doublewrite.test index a9c81901c47..da059bf80ea 100644 --- a/mysql-test/suite/innodb/t/doublewrite.test +++ b/mysql-test/suite/innodb/t/doublewrite.test @@ -60,7 +60,10 @@ connection default; flush table t1 for export; ---let CLEANUP_IF_CHECKPOINT=drop table t1, unexpected_checkpoint; +# If we are skipping the test at this point due to an unexpected +# checkpoint then page cleaner could be active after reading the +# initial checkpoint information +--let CLEANUP_IF_CHECKPOINT=XA COMMIT 'x'; drop table t1; --source ../include/no_checkpoint_end.inc --copy_file $MYSQLD_DATADIR/ibdata1 $MYSQLD_DATADIR/ibdata1.bak diff --git a/mysql-test/suite/innodb/t/doublewrite_debug.test b/mysql-test/suite/innodb/t/doublewrite_debug.test index b207823e3d1..e31cf34dbc1 100644 --- a/mysql-test/suite/innodb/t/doublewrite_debug.test +++ b/mysql-test/suite/innodb/t/doublewrite_debug.test @@ -51,6 +51,8 @@ let $restart_parameters=--debug_dbug=+d,ib_log_checkpoint_avoid_hard --innodb_fl begin; insert into t1 values (6, repeat('%', 400)); +SET GLOBAL innodb_max_dirty_pages_pct_lwm=0, innodb_max_dirty_pages_pct=0; + --echo # Make the first page dirty for system tablespace set global innodb_saved_page_number_debug = 0; set global innodb_fil_make_page_dirty_debug = 0; @@ -59,7 +61,11 @@ set global innodb_fil_make_page_dirty_debug = 0; set global innodb_saved_page_number_debug = 1; set global innodb_fil_make_page_dirty_debug = 0; -set global innodb_buf_flush_list_now = 1; +let $wait_condition = +SELECT variable_value = 0 +FROM information_schema.global_status +WHERE variable_name = 'INNODB_BUFFER_POOL_PAGES_DIRTY'; +--source include/wait_condition.inc --let CLEANUP_IF_CHECKPOINT=drop table t1, unexpected_checkpoint; --source ../include/no_checkpoint_end.inc diff --git a/mysql-test/suite/innodb/t/innodb-ac-non-locking-select.test b/mysql-test/suite/innodb/t/innodb-ac-non-locking-select.test index 3376367b0ba..ae664416bd5 100644 --- a/mysql-test/suite/innodb/t/innodb-ac-non-locking-select.test +++ b/mysql-test/suite/innodb/t/innodb-ac-non-locking-select.test @@ -9,12 +9,19 @@ INSERT INTO t1 VALUES(1, "1"); INSERT INTO t1 VALUES(2, "2"); INSERT INTO t1 VALUES(3, "3"); +set innodb_snapshot_isolation=0; --connect (con1,localhost,root,,) +set innodb_snapshot_isolation=0; --connect (con2,localhost,root,,) +set innodb_snapshot_isolation=0; --connect (con3,localhost,root,,) +set innodb_snapshot_isolation=0; --connect (con4,localhost,root,,) +set innodb_snapshot_isolation=0; --connect (con5,localhost,root,,) +set innodb_snapshot_isolation=0; --connect (con6,localhost,root,,) +set innodb_snapshot_isolation=0; connection default; # Disable query log to avoid non-deterministic output conflicts diff --git a/mysql-test/suite/innodb/t/innodb_buffer_pool_shrink.test b/mysql-test/suite/innodb/t/innodb_buffer_pool_shrink.test new file mode 100644 index 00000000000..886e31955c6 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_buffer_pool_shrink.test @@ -0,0 +1,14 @@ +--source include/have_innodb.inc +--source include/have_partition.inc +call mtr.add_suppression("innodb_buffer_pool_size change aborted"); +CREATE TABLE t (c INT) ENGINE=InnoDB PARTITION BY HASH(c) PARTITIONS 512; +BEGIN; +SELECT * FROM t LOCK IN SHARE MODE; +SET @save_size = @@GLOBAL.innodb_buffer_pool_size; +--error 0,ER_WRONG_USAGE +SET GLOBAL innodb_buffer_pool_size=6291456; +COMMIT; +--error 0,ER_WRONG_USAGE +SET GLOBAL innodb_buffer_pool_size=6291456; +SET GLOBAL innodb_buffer_pool_size = @save_size; +DROP TABLE t; diff --git a/mysql-test/suite/innodb/t/innodb_buffer_pool_shrink_temporary.opt b/mysql-test/suite/innodb/t/innodb_buffer_pool_shrink_temporary.opt new file mode 100644 index 00000000000..d8ba7cf7b0f --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_buffer_pool_shrink_temporary.opt @@ -0,0 +1 @@ +--innodb-buffer-pool-size=16m diff --git a/mysql-test/suite/innodb/t/innodb_buffer_pool_shrink_temporary.test b/mysql-test/suite/innodb/t/innodb_buffer_pool_shrink_temporary.test new file mode 100644 index 00000000000..cf2ea4ad175 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_buffer_pool_shrink_temporary.test @@ -0,0 +1,20 @@ +--source include/have_innodb.inc +call mtr.add_suppression("innodb_buffer_pool_size change aborted"); +SET @b=REPEAT('0',1048576); +CREATE TEMPORARY TABLE t (c MEDIUMTEXT) ENGINE=InnoDB; +INSERT INTO t VALUES +(@b),(@b),(@b),(@b),(@b),(@b),(@b),(@b),(@b),(@b),(@b); +--error 0,ER_WRONG_USAGE +SET STATEMENT max_statement_time=0.000001 FOR +SET GLOBAL innodb_buffer_pool_size=6291456; +--error 0,ER_WRONG_USAGE +SET STATEMENT max_statement_time=0.000001 FOR +SET GLOBAL innodb_buffer_pool_size=6291456; +--error 0,ER_WRONG_USAGE +SET STATEMENT max_statement_time=0.000001 FOR +SET GLOBAL innodb_buffer_pool_size=6291456; +--error 0,ER_WRONG_USAGE +SET GLOBAL innodb_buffer_pool_size=6291456; +SET GLOBAL innodb_buffer_pool_size=16777216; +CHECKSUM TABLE t; +DROP TEMPORARY TABLE t; diff --git a/mysql-test/suite/innodb/t/lock_isolation.test b/mysql-test/suite/innodb/t/lock_isolation.test index 8b3904ec354..d1f2d2febce 100644 --- a/mysql-test/suite/innodb/t/lock_isolation.test +++ b/mysql-test/suite/innodb/t/lock_isolation.test @@ -175,7 +175,6 @@ ROLLBACK; --reap SELECT * FROM t FORCE INDEX (b) FOR UPDATE; COMMIT; ---disconnect con_weird --connection consistent SELECT * FROM t FORCE INDEX (b) FOR UPDATE; @@ -247,12 +246,65 @@ UPDATE t SET b=4 WHERE a=1; --connection consistent --error ER_CHECKREAD SELECT * FROM t WHERE a=1 FOR UPDATE; ---disconnect consistent --disconnect disable_purging --connection default SET DEBUG_SYNC="RESET"; DROP TABLE t; +CREATE TABLE t1(a INT) ENGINE=InnoDB STATS_PERSISTENT=0; +CREATE TABLE t2(a INT) ENGINE=InnoDB STATS_PERSISTENT=0; +BEGIN; INSERT INTO t1 SET a=1; +--connection con_weird +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +BEGIN; INSERT INTO t2 SET a=1; +--connection consistent +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +BEGIN; INSERT INTO t2 SET a=2; +--connection default +COMMIT; +--connection con_weird +SELECT * FROM t1; +COMMIT; +--connection consistent +--disable_ps2_protocol +--error ER_CHECKREAD +SELECT * FROM t1; +--enable_ps2_protocol +COMMIT; +--connection default +BEGIN; INSERT INTO t1 SET a=2; +--connection con_weird +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +START TRANSACTION WITH CONSISTENT SNAPSHOT; INSERT INTO t2 SET a=3; +--connection consistent +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +START TRANSACTION WITH CONSISTENT SNAPSHOT; INSERT INTO t2 SET a=2; +--connection default +COMMIT; +--connection con_weird +SELECT * FROM t1; +COMMIT; +--disconnect con_weird +--connection consistent +--disable_ps2_protocol +--error ER_CHECKREAD +SELECT * FROM t1; +--enable_ps2_protocol +COMMIT; +--disconnect consistent +--connection default +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-37215 SELECT...FOR UPDATE crash under SERIALIZABLE +--echo # + +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB; +SELECT * FROM t1 FOR UPDATE; +DROP TABLE t1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; + --source include/wait_until_count_sessions.inc --echo # End of 10.6 tests diff --git a/mysql-test/suite/innodb/t/scrub_debug.test b/mysql-test/suite/innodb/t/scrub_debug.test index 8cebfca6106..b1603e961fd 100644 --- a/mysql-test/suite/innodb/t/scrub_debug.test +++ b/mysql-test/suite/innodb/t/scrub_debug.test @@ -24,5 +24,20 @@ FLUSH TABLE t1 FOR EXPORT; -- source include/search_pattern_in_file.inc UNLOCK TABLES; DROP TABLE t1; + +--echo # +--echo # MDEV-37183 innodb_immediate_scrub_data_uncompressed=ON may break +--echo # crash recovery +--echo # +SET GLOBAL innodb_limit_optimistic_insert_debug=0; +# Note: MariaDB 10.6 fails to reproduce the crash; it maps DESC to ASC. +CREATE TABLE t(a VARCHAR(1) PRIMARY KEY,INDEX(a DESC)) ENGINE=InnoDB; +INSERT INTO t VALUES('2'),('1'),(''),('6'),('4'),('3'); +SET GLOBAL innodb_limit_optimistic_insert_debug=3; +INSERT INTO t VALUES('8'); +CHECK TABLE t; +SELECT COUNT(*) FROM t; +DROP TABLE t; + SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=@save_debug; SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=@save_scrub; diff --git a/mysql-test/suite/innodb/t/trx_deadlock.opt b/mysql-test/suite/innodb/t/trx_deadlock.opt new file mode 100644 index 00000000000..00ea161cd6e --- /dev/null +++ b/mysql-test/suite/innodb/t/trx_deadlock.opt @@ -0,0 +1 @@ +--innodb_lock_wait_timeout=60 diff --git a/mysql-test/suite/innodb/t/trx_deadlock.test b/mysql-test/suite/innodb/t/trx_deadlock.test new file mode 100644 index 00000000000..c0998fd6d10 --- /dev/null +++ b/mysql-test/suite/innodb/t/trx_deadlock.test @@ -0,0 +1,100 @@ +--echo # +--echo # MDEV-36959 Deadlock does not rollback transaction fully +--echo # + +--source include/have_log_bin.inc +--source include/have_innodb.inc +--source include/count_sessions.inc + +CREATE TABLE t1(col1 INT PRIMARY KEY, col2 INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1, 1), (2, 2); +SELECT * FROM t1; + +--connect (con1,localhost,root,,) +START TRANSACTION; +--echo # Trx-1: Lock 1st record +UPDATE t1 SET col2=10 where col1=1; + +--connection default +START TRANSACTION; +--echo # Trx-2: Lock 2nd record +UPDATE t1 SET col2=100 where col1=2; + +--connection con1 +--echo # Trx-1: Try locking 1st record : Wait +--send UPDATE t1 SET col2=10 where col1=2 + +--connection default +--echo # Wait for Trx-1 to get into lock wait stage +let $wait_condition= + SELECT COUNT(*) >= 2 FROM INFORMATION_SCHEMA.INNODB_LOCKS + WHERE lock_table like "%t1%"; +--source include/wait_condition.inc + +--echo # Trx-2: Try locking 2nd record : Deadlock +--error ER_LOCK_DEADLOCK +UPDATE t1 SET col2=100 where col1=1; + +--connection con1 +--reap +ROLLBACK; + +--connection default +SELECT @@in_transaction; +UPDATE t1 SET col2=10 where col1=1; +UPDATE t1 SET col2=100 where col1=2; +SELECT @@in_transaction; +ROLLBACK; + +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # MDEV-37141 DML committed within XA transaction block after deadlock error and implicit rollback +--echo # + +CREATE TABLE t1(col1 INT PRIMARY KEY, col2 INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1, 1), (2, 2); +SELECT * FROM t1; + +--connection con1 +XA BEGIN 'x1'; +--echo # XA Trx-1: Lock 1st record +UPDATE t1 SET col2=10 where col1=1; + +--connection default +XA BEGIN 'x2'; +--echo # XA Trx-2: Lock 2nd record +UPDATE t1 SET col2=100 where col1=2; + +--connection con1 +--echo # XA Trx-1: Try locking 1st record : Wait +--send UPDATE t1 SET col2=10 where col1=2 + +--connection default +--echo # Wait for XA Trx-1 to get into lock wait stage +let $wait_condition= + SELECT COUNT(*) >= 2 FROM INFORMATION_SCHEMA.INNODB_LOCKS + WHERE lock_table like "%t1%"; +--source include/wait_condition.inc + +--echo # XA Trx-2: Try locking 2nd record : Deadlock +--error ER_LOCK_DEADLOCK +UPDATE t1 SET col2=100 where col1=1; +--error ER_XAER_RMFAIL +INSERT INTO t1 VALUES (3, 3), (4, 4); +--error ER_XAER_RMFAIL +XA END 'x2'; +XA ROLLBACK 'x2'; + +--connection con1 +--reap +XA END 'x1'; +XA ROLLBACK 'x1'; + +--connection default +SELECT * FROM t1; +DROP TABLE t1; + +--disconnect con1 +--source include/wait_until_count_sessions.inc diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result index 93fce5a42b1..ea4c6325ef2 100644 --- a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result +++ b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result @@ -994,3 +994,15 @@ FTS_DOC_ID f1 f2 4294967298 txt bbb 100000000000 aaa bbb DROP TABLE t1; +# +# MDEV-30363 Failing assertion: trx->error_state == DB_SUCCESS +# in que_run_threads +# +CREATE TABLE server_stopword (value VARCHAR(1))engine=innodb; +SET GLOBAL innodb_ft_server_stopword_table='test/server_stopword'; +CREATE TABLE t (t VARCHAR(1) COLLATE utf8_unicode_ci, +FULLTEXT (t))engine=innodb; +TRUNCATE TABLE t; +DROP TABLE t; +DROP TABLE server_stopword; +SET GLOBAL innodb_ft_server_stopword_table= default; diff --git a/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test b/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test index c0836372e9e..3608fa3ff6a 100644 --- a/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test +++ b/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test @@ -958,3 +958,16 @@ CREATE FULLTEXT INDEX i ON t1 (f2); SELECT * FROM t1 WHERE match(f2) against("bbb"); # Cleanup DROP TABLE t1; + +--echo # +--echo # MDEV-30363 Failing assertion: trx->error_state == DB_SUCCESS +--echo # in que_run_threads +--echo # +CREATE TABLE server_stopword (value VARCHAR(1))engine=innodb; +SET GLOBAL innodb_ft_server_stopword_table='test/server_stopword'; +CREATE TABLE t (t VARCHAR(1) COLLATE utf8_unicode_ci, + FULLTEXT (t))engine=innodb; +TRUNCATE TABLE t; +DROP TABLE t; +DROP TABLE server_stopword; +SET GLOBAL innodb_ft_server_stopword_table= default; diff --git a/mysql-test/suite/mariabackup/backup_ssl.result b/mysql-test/suite/mariabackup/backup_ssl.result index 3598c27b452..ac323ce0db4 100644 --- a/mysql-test/suite/mariabackup/backup_ssl.result +++ b/mysql-test/suite/mariabackup/backup_ssl.result @@ -15,3 +15,8 @@ DROP USER backup_user; # MDEV-32473 --disable-ssl doesn't disable it # # tcp skip-ssl +# +# MDEV-37143 Mariadb-backup fails on Windows with SSL certificate is self-signed error +# +# do not fail with passwordless with default protocol +# do not fail with passwordless with 127.0.0.1 TCP diff --git a/mysql-test/suite/mariabackup/backup_ssl.test b/mysql-test/suite/mariabackup/backup_ssl.test index 1ae0b287a88..bb8203986f5 100644 --- a/mysql-test/suite/mariabackup/backup_ssl.test +++ b/mysql-test/suite/mariabackup/backup_ssl.test @@ -19,8 +19,14 @@ echo # MDEV-31855 validate ssl certificates using client password in the interna echo #; # fails to connect, passwordless root echo # tcp ssl ssl-verify-server-cert; +let $host=; +if ($MARIADB_UPGRADE_EXE) { + # On Windows, we need host different from "127.0.0.1","::1" or "localhost" + # to trigger self-signed error + let $host=--host=127.0.0.2; +} error 1; -exec $XTRABACKUP --no-defaults --protocol=tcp --user=root --port=$MASTER_MYPORT --backup --target-dir=$targetdir; +exec $XTRABACKUP --no-defaults $host --protocol=tcp --user=root --port=$MASTER_MYPORT --backup --target-dir=$targetdir; --echo # --echo # MDEV-32473 --disable-ssl doesn't disable it @@ -29,3 +35,17 @@ exec $XTRABACKUP --no-defaults --protocol=tcp --user=root --port=$MASTER_MYPORT echo # tcp skip-ssl; exec $XTRABACKUP --no-defaults --protocol=tcp --user=root --skip-ssl --port=$MASTER_MYPORT --backup --target-dir=$targetdir; rmdir $targetdir; + +--echo # +--echo # MDEV-37143 Mariadb-backup fails on Windows with SSL certificate is self-signed error +--echo # +--echo # do not fail with passwordless with default protocol +let $port_or_socket=--socket=$MASTER_MYSOCK; +if ($MARIADB_UPGRADE_EXE) { # windows + let $port_or_socket=--port=$MASTER_MYPORT; +} +exec $XTRABACKUP --no-defaults --user=root --backup $port_or_socket --target-dir=$targetdir; +rmdir $targetdir; +--echo # do not fail with passwordless with 127.0.0.1 TCP +exec $XTRABACKUP --no-defaults --host=127.0.0.1 --protocol=tcp --port=$MASTER_MYPORT --user=root --backup --target-dir=$targetdir; +rmdir $targetdir; diff --git a/mysql-test/suite/mariabackup/mroonga.opt b/mysql-test/suite/mariabackup/mroonga.opt new file mode 100644 index 00000000000..d5a1e5190a7 --- /dev/null +++ b/mysql-test/suite/mariabackup/mroonga.opt @@ -0,0 +1 @@ +--loose-plugin-load-add=$HA_MROONGA_SO --loose-plugin-mroonga=ON diff --git a/mysql-test/suite/mariabackup/mroonga.result b/mysql-test/suite/mariabackup/mroonga.result new file mode 100644 index 00000000000..e60f7da47f4 --- /dev/null +++ b/mysql-test/suite/mariabackup/mroonga.result @@ -0,0 +1,15 @@ +# +# MDEV-34425 mroonga files are not copied by mariabackup +# +CREATE TABLE t(c TEXT, FULLTEXT(c)) ENGINE=Mroonga; +INSERT INTO t VALUES('Once upon a time'),('There was a wicked witch'),('Who ate everybody up'); +# mariadb-backup backup +# shutdown server +# remove datadir +# xtrabackup move back +# restart +SELECT * FROM t WHERE MATCH(c) AGAINST('wicked'); +c +There was a wicked witch +DROP TABLE t; +# End 10.11 tests diff --git a/mysql-test/suite/mariabackup/mroonga.test b/mysql-test/suite/mariabackup/mroonga.test new file mode 100644 index 00000000000..21897f4ed7a --- /dev/null +++ b/mysql-test/suite/mariabackup/mroonga.test @@ -0,0 +1,23 @@ +if (`SELECT COUNT(*)=0 FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_NAME = 'mroonga' AND PLUGIN_STATUS='ACTIVE'`) +{ + --skip needs mroonga plugin +} + +--echo # +--echo # MDEV-34425 mroonga files are not copied by mariabackup +--echo # + +CREATE TABLE t(c TEXT, FULLTEXT(c)) ENGINE=Mroonga; +INSERT INTO t VALUES('Once upon a time'),('There was a wicked witch'),('Who ate everybody up'); +echo # mariadb-backup backup; +let $targetdir=$MYSQL_TMP_DIR/backup; +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --prepare --target-dir=$targetdir; +-- source include/restart_and_restore.inc +--enable_result_log +SELECT * FROM t WHERE MATCH(c) AGAINST('wicked'); +DROP TABLE t; +rmdir $targetdir; + +--echo # End 10.11 tests diff --git a/mysql-test/suite/mariabackup/partial.result b/mysql-test/suite/mariabackup/partial.result index cefda922868..27d515dfb8a 100644 --- a/mysql-test/suite/mariabackup/partial.result +++ b/mysql-test/suite/mariabackup/partial.result @@ -25,3 +25,15 @@ i DROP TABLE t1; DROP TABLE t2; DROP TABLE t21; +# +# MDEV-36287 maribackup ignores tables-file option +# +CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB; +CREATE TABLE t2(f1 INT NOT NULL)ENGINE=InnoDB; +INSERT INTO t1 values(1); +# Only backup table t1 by creating tables_file +# Backup with --tables-file option +# table t2 is skipped. Shows only t1 +t1.frm +t1.ibd +DROP TABLE t2, t1; diff --git a/mysql-test/suite/mariabackup/partial.test b/mysql-test/suite/mariabackup/partial.test index af6da274102..e9f4d90640b 100644 --- a/mysql-test/suite/mariabackup/partial.test +++ b/mysql-test/suite/mariabackup/partial.test @@ -78,3 +78,25 @@ DROP TABLE t1; DROP TABLE t2; DROP TABLE t21; rmdir $targetdir; + +--echo # +--echo # MDEV-36287 maribackup ignores tables-file option +--echo # +CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB; +CREATE TABLE t2(f1 INT NOT NULL)ENGINE=InnoDB; +INSERT INTO t1 values(1); +let targetdir=$MYSQLTEST_VARDIR/tmp/backup; +let tables_list=$MYSQLTEST_VARDIR/tmp/tables_list.out; +--echo # Only backup table t1 by creating tables_file +--exec echo "test.t1" > $tables_list + +--echo # Backup with --tables-file option +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --tables-file=$tables_list --target-dir=$targetdir; +--enable_result_log + +--echo # table t2 is skipped. Shows only t1 +list_files $targetdir/test; +DROP TABLE t2, t1; +rmdir $targetdir; +remove_file $tables_list; diff --git a/mysql-test/suite/mariabackup/xb_history.result b/mysql-test/suite/mariabackup/xb_history.result index 3750c47c2b1..51223bb5b32 100644 --- a/mysql-test/suite/mariabackup/xb_history.result +++ b/mysql-test/suite/mariabackup/xb_history.result @@ -17,7 +17,7 @@ mariadb_backup_history CREATE TABLE `mariadb_backup_history` ( `innodb_to_lsn` bigint(20) unsigned DEFAULT NULL, `partial` enum('Y','N') DEFAULT NULL, `incremental` enum('Y','N') DEFAULT NULL, - `format` enum('file','tar','xbstream') DEFAULT NULL, + `format` enum('file','tar','mbstream') DEFAULT NULL, `compressed` enum('Y','N') DEFAULT NULL, PRIMARY KEY (`uuid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_uca1400_ai_ci diff --git a/mysql-test/suite/mariabackup/xbstream.result b/mysql-test/suite/mariabackup/xbstream.result index 93f68eca9ef..61fb6ca70ac 100644 --- a/mysql-test/suite/mariabackup/xbstream.result +++ b/mysql-test/suite/mariabackup/xbstream.result @@ -1,8 +1,28 @@ CREATE TABLE t(i INT) ENGINE INNODB; INSERT INTO t VALUES(1); -# xtrabackup backup to stream +# xtrabackup full backup to stream +INSERT INTO t VALUES(2), (3), (4); +# xtrabackup incremental backup to stream +# checking recording of history +SELECT tool_name, name, partial, incremental, format, compressed +FROM mysql.mariadb_backup_history +ORDER BY innodb_from_lsn; +tool_name mariadb-backup +name fullback +partial Y +incremental N +format mbstream +compressed N +tool_name mariadb-backup +name incr_1 +partial N +incremental Y +format mbstream +compressed N # xbstream extract # xtrabackup prepare +# xbstream extract for incremental backup +# xtrabackup incremental prepare # shutdown server # remove datadir # xtrabackup move back @@ -10,4 +30,8 @@ INSERT INTO t VALUES(1); SELECT * FROM t; i 1 +2 +3 +4 DROP TABLE t; +DROP TABLE mysql.mariadb_backup_history; diff --git a/mysql-test/suite/mariabackup/xbstream.test b/mysql-test/suite/mariabackup/xbstream.test index 8429a3b587d..16258274b18 100644 --- a/mysql-test/suite/mariabackup/xbstream.test +++ b/mysql-test/suite/mariabackup/xbstream.test @@ -4,11 +4,30 @@ CREATE TABLE t(i INT) ENGINE INNODB; INSERT INTO t VALUES(1); let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; +let $incr_dir=$MYSQLTEST_VARDIR/tmp/backup_incr; mkdir $targetdir; -let $streamfile=$MYSQLTEST_VARDIR/tmp/backup.xb; +mkdir $incr_dir; + +let $streamfile=$MYSQLTEST_VARDIR/tmp/backup.xb; +let $stream_incr_file=$MYSQLTEST_VARDIR/tmp/backup_incr.xb; + +echo # xtrabackup full backup to stream; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --databases-exclude=foobar --history=fullback --stream=xbstream > $streamfile 2>$targetdir/backup_stream.log; + +INSERT INTO t VALUES(2), (3), (4); + +echo # xtrabackup incremental backup to stream; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --incremental-history-name=fullback --history=incr_1 --stream=xbstream > $stream_incr_file 2>$targetdir/backup_incr.log; + +echo # checking recording of history; +--replace_result mariadb-backup.exe mariadb-backup +--vertical_results +SELECT tool_name, name, partial, incremental, format, compressed +FROM mysql.mariadb_backup_history +ORDER BY innodb_from_lsn; + +--horizontal_results -echo # xtrabackup backup to stream; -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --databases-exclude=foobar --stream=xbstream > $streamfile 2>$targetdir/backup_stream.log; echo # xbstream extract; --disable_result_log exec $XBSTREAM -x -C $targetdir < $streamfile; @@ -16,9 +35,17 @@ exec $XBSTREAM -x -C $targetdir < $streamfile; echo # xtrabackup prepare; exec $XTRABACKUP --prepare --target-dir=$targetdir; +echo # xbstream extract for incremental backup; +exec $XBSTREAM -x -C $incr_dir < $stream_incr_file; + +echo # xtrabackup incremental prepare; +exec $XTRABACKUP --prepare --target-dir=$targetdir --incremental-dir=$incr_dir; + -- source include/restart_and_restore.inc --enable_result_log SELECT * FROM t; DROP TABLE t; +DROP TABLE mysql.mariadb_backup_history; rmdir $targetdir; - +remove_file $streamfile; +remove_file $stream_incr_file; diff --git a/mysql-test/suite/multi_source/mariadb-dump_slave.result b/mysql-test/suite/multi_source/mariadb-dump_slave.result new file mode 100644 index 00000000000..41fa0536bba --- /dev/null +++ b/mysql-test/suite/multi_source/mariadb-dump_slave.result @@ -0,0 +1,56 @@ +connect active_master, 127.0.0.1, root, , , $SERVER_MYPORT_2; +connect inactive_master, 127.0.0.1, root, , , $SERVER_MYPORT_3; +connect slave, 127.0.0.1, root, , , $SERVER_MYPORT_1; +CHANGE MASTER TO +master_host='127.0.0.1', master_port=MYPORT_2, master_user='root'; +START SLAVE SQL_THREAD; +CHANGE MASTER 'inactive' TO +master_host='127.0.0.1', master_port=MYPORT_3; +include/wait_for_slave_sql_to_start.inc +# Control State +Connection_name = '' +Connection_name = 'inactive' +Slave_IO_Running = 'No' +Slave_IO_Running = 'No' +Slave_SQL_Running = 'Yes' +Slave_SQL_Running = 'No' +# Basic +$MYSQL_DUMP --compact --dump-slave --include-master-host-port test +/*M!999999\- enable the sandbox mode */ +CHANGE MASTER '' TO MASTER_HOST='127.0.0.1', MASTER_PORT=MYPORT_2, MASTER_LOG_FILE='', MASTER_LOG_POS=4; +CHANGE MASTER 'inactive' TO MASTER_HOST='127.0.0.1', MASTER_PORT=MYPORT_3, MASTER_LOG_FILE='', MASTER_LOG_POS=0; + +-- SET GLOBAL gtid_slave_pos=''; +# MDEV-7611 mysqldump --dump-slave always starts stopped slave +$MYSQL_DUMP --compact --dump-slave test +/*M!999999\- enable the sandbox mode */ +CHANGE MASTER '' TO MASTER_LOG_FILE='', MASTER_LOG_POS=4; +CHANGE MASTER 'inactive' TO MASTER_LOG_FILE='', MASTER_LOG_POS=0; + +-- SET GLOBAL gtid_slave_pos=''; +include/wait_for_slave_sql_to_start.inc +Connection_name = '' +Connection_name = 'inactive' +Slave_IO_Running = 'No' +Slave_IO_Running = 'No' +Slave_SQL_Running = 'Yes' +Slave_SQL_Running = 'No' +# MDEV-5624 mysqldump --dump-slave option does not restart the replication if the dump has failed +$MYSQL_DUMP --compact --dump-slave no_such_db +/*M!999999\- enable the sandbox mode */ +CHANGE MASTER '' TO MASTER_LOG_FILE='', MASTER_LOG_POS=4; +CHANGE MASTER 'inactive' TO MASTER_LOG_FILE='', MASTER_LOG_POS=0; + +include/wait_for_slave_sql_to_start.inc +Connection_name = '' +Connection_name = 'inactive' +Slave_IO_Running = 'No' +Slave_IO_Running = 'No' +Slave_SQL_Running = 'Yes' +Slave_SQL_Running = 'No' +# Cleanup +STOP SLAVE SQL_THREAD; +disconnect active_master; +disconnect inactive_master; +include/wait_for_slave_sql_to_stop.inc +disconnect slave; diff --git a/mysql-test/suite/multi_source/mariadb-dump_slave.test b/mysql-test/suite/multi_source/mariadb-dump_slave.test new file mode 100644 index 00000000000..35cdb6887ec --- /dev/null +++ b/mysql-test/suite/multi_source/mariadb-dump_slave.test @@ -0,0 +1,55 @@ +# `mariadb-dump --dump-slave` multi-source interactions +# (see `main.rpl_mysqldump_slave` for general testing with single-source) + +--source include/have_log_bin.inc +--let $status_items= Connection_name, Slave_IO_Running, Slave_SQL_Running +--let $all_slaves_status= 1 + +# $MYSQL_DUMP dumps the $SERVER_MYPORT_1 server +--connect ( active_master, 127.0.0.1, root, , , $SERVER_MYPORT_2) +--connect (inactive_master, 127.0.0.1, root, , , $SERVER_MYPORT_3) +--connect ( slave, 127.0.0.1, root, , , $SERVER_MYPORT_1) + +--replace_result $SERVER_MYPORT_2 MYPORT_2 +eval CHANGE MASTER TO + master_host='127.0.0.1', master_port=$SERVER_MYPORT_2, master_user='root'; +START SLAVE SQL_THREAD; +--replace_result $SERVER_MYPORT_3 MYPORT_3 +eval CHANGE MASTER 'inactive' TO + master_host='127.0.0.1', master_port=$SERVER_MYPORT_3; +# wait for the active default '' connection only +--source include/wait_for_slave_sql_to_start.inc + + +--echo # Control State +--source include/show_slave_status.inc + +--echo # Basic +--echo \$MYSQL_DUMP --compact --dump-slave --include-master-host-port test +--replace_result $SERVER_MYPORT_2 MYPORT_2 $SERVER_MYPORT_3 MYPORT_3 +--exec $MYSQL_DUMP --compact --dump-slave --include-master-host-port test + + +# The 'inactive' connection should remain stopped +# while the active '' connection should restart. + +--echo # MDEV-7611 mysqldump --dump-slave always starts stopped slave +--echo \$MYSQL_DUMP --compact --dump-slave test +--exec $MYSQL_DUMP --compact --dump-slave test +--source include/wait_for_slave_sql_to_start.inc +--source include/show_slave_status.inc + +--echo # MDEV-5624 mysqldump --dump-slave option does not restart the replication if the dump has failed +--echo \$MYSQL_DUMP --compact --dump-slave no_such_db +--error 2 +--exec $MYSQL_DUMP --compact --dump-slave no_such_db +--source include/wait_for_slave_sql_to_start.inc +--source include/show_slave_status.inc + + +--echo # Cleanup +STOP SLAVE SQL_THREAD; +--disconnect active_master +--disconnect inactive_master +--source include/wait_for_slave_sql_to_stop.inc +--disconnect slave diff --git a/mysql-test/suite/parts/r/alter_table.result b/mysql-test/suite/parts/r/alter_table.result index 558b8f4845c..9c3b06a33cc 100644 --- a/mysql-test/suite/parts/r/alter_table.result +++ b/mysql-test/suite/parts/r/alter_table.result @@ -380,6 +380,17 @@ disconnect con1; connection default; drop user u@localhost; drop database db; +# +# MDEV-37328 Assertion failure in make_empty_rec upon CONVERT PARTITION +# +create table t (f1 int, f2 int, f3 int, f4 int, f5 int, f6 int, f7 int) engine=myisam +partition by list (f3) ( +partition p0 values in (null,0), +partition p1 values in (1,2,3), +partition p2 default +); +alter table t convert partition p0 to table tp; +drop table if exists tp, t; # End of 10.11 tests # # MDEV-22164 without validation for exchange partition/convert in diff --git a/mysql-test/suite/parts/t/alter_table.test b/mysql-test/suite/parts/t/alter_table.test index 8a4de46a03c..a1ee5fffac6 100644 --- a/mysql-test/suite/parts/t/alter_table.test +++ b/mysql-test/suite/parts/t/alter_table.test @@ -343,6 +343,18 @@ alter table t1 convert table tp to partition p2 values less than (1000); drop user u@localhost; drop database db; +--echo # +--echo # MDEV-37328 Assertion failure in make_empty_rec upon CONVERT PARTITION +--echo # +create table t (f1 int, f2 int, f3 int, f4 int, f5 int, f6 int, f7 int) engine=myisam + partition by list (f3) ( + partition p0 values in (null,0), + partition p1 values in (1,2,3), + partition p2 default +); +alter table t convert partition p0 to table tp; +drop table if exists tp, t; + --echo # End of 10.11 tests --echo # diff --git a/mysql-test/suite/perfschema/r/server_init.result b/mysql-test/suite/perfschema/r/server_init.result index 82f53ad5b74..7839e713104 100644 --- a/mysql-test/suite/perfschema/r/server_init.result +++ b/mysql-test/suite/perfschema/r/server_init.result @@ -24,15 +24,11 @@ where name like "wait/synch/mutex/mysys/THR_LOCK_charset"; count(name) 1 select count(name) from mutex_instances -<<<<<<< HEAD -where name like "wait/synch/mutex/sql/LOCK_thread_count"; -======= where name like "wait/synch/mutex/sql/LOCK_open"; count(name) 1 select count(name) from mutex_instances -where name like "wait/synch/mutex/sql/LOCK_thd_list"; ->>>>>>> merge-perfschema-5.7 +where name like "wait/synch/mutex/sql/LOCK_thread_count"; count(name) 1 select count(name) from mutex_instances @@ -52,11 +48,7 @@ where name like "wait/synch/mutex/sql/LOCK_crypt"; count(name) 1 select count(name) from mutex_instances -<<<<<<< HEAD where name like "wait/synch/mutex/sql/LOCK_active_mi"; -======= -where name like "wait/synch/mutex/sql/LOCK_slave_list"; ->>>>>>> 258e16fa2de... WIP 5.7 P_S count(name) 1 select count(name) from mutex_instances @@ -96,13 +88,10 @@ where name like "wait/synch/mutex/sql/LOCK_audit_mask"; count(name) 1 select count(name) from mutex_instances -<<<<<<< HEAD -======= where name like "wait/synch/mutex/sql/LOCK_transaction_cache"; count(name) 1 select count(name) from mutex_instances ->>>>>>> merge-perfschema-5.7 where name like "wait/synch/mutex/sql/LOCK_plugin"; count(name) 1 diff --git a/mysql-test/suite/period/r/innodb_debug.result b/mysql-test/suite/period/r/innodb_debug.result new file mode 100644 index 00000000000..d5559b6637b --- /dev/null +++ b/mysql-test/suite/period/r/innodb_debug.result @@ -0,0 +1,228 @@ +# +# MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records +# +## INSERT +create table t1 ( +id int, s date, e date, +period for p(s,e), +unique(id, p without overlaps) +) engine=innodb; +start transaction; +select * from t1; +id s e +connect con1,localhost,root; +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +insert t1 values(10, date'2010-09-09', date'2010-11-10'); +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +insert t1 values(10, date'2010-10-10', date'2010-11-12'); +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 23000: Duplicate entry '10-2010-11-10-2010-09-09' for key 'id' +connection default; +select * from t1; +id s e +commit; +select * from t1; +id s e +10 2010-10-10 2010-11-12 +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE +create table t1 ( +id int, s date, e date, +period for p(s,e), +unique(id, p without overlaps) +) engine=innodb; +start transaction; +select * from t1; +id s e +connect con1,localhost,root; +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update t1 set e=e + interval 2 month where s=date'2010-09-09'; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 23000: Duplicate entry '10-2010-11-10-2010-09-09' for key 'id' +connection default; +select * from t1; +id s e +commit; +select * from t1; +id s e +10 2010-09-09 2010-09-10 +10 2010-10-10 2010-12-12 +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## INSERT IGNORE +create table t1 ( +id int, s date, e date, +period for p(s,e), +unique(id, p without overlaps) +) engine=innodb; +start transaction; +select * from t1; +id s e +connect con1,localhost,root; +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +insert ignore t1 values(10, date'2010-09-09', date'2010-11-10'),(11, date'2010-09-09', date'2010-11-10'); +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +insert t1 values(10, date'2010-10-10', date'2010-11-12'); +set debug_sync="now SIGNAL do_insert"; +connection con1; +Warnings: +Warning 1062 Duplicate entry '10-2010-11-10-2010-09-09' for key 'id' +connection default; +select * from t1; +id s e +commit; +select * from t1; +id s e +10 2010-10-10 2010-11-12 +11 2010-09-09 2010-11-10 +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE IGNORE +create table t1 ( +id int, s date, e date, +period for p(s,e), +unique(id, p without overlaps) +) engine=innodb; +start transaction; +select * from t1; +id s e +connect con1,localhost,root; +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update ignore t1 set e=e + interval 2 month where s=date'2010-09-09'; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint WITHOUT OVERLAPS is not currently supported +connection default; +select * from t1; +id s e +commit; +select * from t1; +id s e +10 2010-09-09 2010-09-10 +10 2010-10-10 2010-12-12 +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE modifying PK +create table t1 ( +id int, s date, e date, +period for p(s,e), +primary key (id, p without overlaps) +) engine=innodb; +start transaction; +select * from t1; +id s e +connect con1,localhost,root; +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update t1 set e=e + interval 2 month where s=date'2010-09-09'; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 23000: Duplicate entry '10-2010-11-10-2010-09-09' for key 'PRIMARY' +connection default; +select * from t1; +id s e +commit; +select * from t1; +id s e +10 2010-09-09 2010-09-10 +10 2010-10-10 2010-12-12 +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE IGNORE modifying PK +create table t1 ( +id int, s date, e date, +period for p(s,e), +primary key (id, p without overlaps) +) engine=innodb; +start transaction; +select * from t1; +id s e +connect con1,localhost,root; +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update ignore t1 set e=e + interval 2 month where s=date'2010-09-09'; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint WITHOUT OVERLAPS is not currently supported +connection default; +select * from t1; +id s e +commit; +select * from t1; +id s e +10 2010-09-09 2010-09-10 +10 2010-10-10 2010-12-12 +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +# End of 10.6 tests diff --git a/mysql-test/suite/period/r/long_unique.result b/mysql-test/suite/period/r/long_unique.result index 5c5f4297fb9..fa7817fb562 100644 --- a/mysql-test/suite/period/r/long_unique.result +++ b/mysql-test/suite/period/r/long_unique.result @@ -15,3 +15,20 @@ INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01'); DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01'; ERROR 23000: Duplicate entry 'foo' for key 'b' DROP TABLE t1; +# End of 10.5 tests +# +# MDEV-37312 ASAN errors or assertion failure upon attempt to UPDATE FOR PORTION violating long unique under READ COMMITTED +# +create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb; +insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01'); +set transaction isolation level read committed; +update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1; +ERROR 23000: Duplicate entry 'foo' for key 'f' +drop table t1; +create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb partition by hash (a); +insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01'); +set transaction isolation level read committed; +update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1; +ERROR 23000: Duplicate entry 'foo' for key 'f' +drop table t1; +# End of 10.6 tests diff --git a/mysql-test/suite/period/r/perfschema.result b/mysql-test/suite/period/r/perfschema.result new file mode 100644 index 00000000000..7dff0f28c39 --- /dev/null +++ b/mysql-test/suite/period/r/perfschema.result @@ -0,0 +1,26 @@ +# +# MDEV-36720 Possible memory leak on updating table with index without overlaps +# +create table t1 (id int not null primary key auto_increment, rel_id int not null, start_date datetime not null, end_date datetime not null, period for p (start_date, end_date), unique key (rel_id, p without overlaps)); +insert into t1 (rel_id, start_date, end_date) values (1, '2024-01-01', '2025-01-01'); +select object_type,object_schema,object_name,owner_thread_id,owner_event_id,internal_lock,external_lock from performance_schema.table_handles where object_name='t1'; +object_type object_schema object_name owner_thread_id owner_event_id internal_lock external_lock +TABLE test t1 0 0 WRITE CONCURRENT INSERT NULL +update t1 set end_date = '2025-01-16' where id = 1; +select object_type,object_schema,object_name,owner_thread_id,owner_event_id,internal_lock,external_lock from performance_schema.table_handles where object_name='t1'; +object_type object_schema object_name owner_thread_id owner_event_id internal_lock external_lock +TABLE test t1 0 0 WRITE NULL +update t1 set end_date = '2025-01-16' where id = 1; +select object_type,object_schema,object_name,owner_thread_id,owner_event_id,internal_lock,external_lock from performance_schema.table_handles where object_name='t1'; +object_type object_schema object_name owner_thread_id owner_event_id internal_lock external_lock +TABLE test t1 0 0 WRITE NULL +update t1 set end_date = '2025-01-16' where id = 1; +select object_type,object_schema,object_name,owner_thread_id,owner_event_id,internal_lock,external_lock from performance_schema.table_handles where object_name='t1'; +object_type object_schema object_name owner_thread_id owner_event_id internal_lock external_lock +TABLE test t1 0 0 WRITE NULL +update t1 set end_date = '2025-01-16' where id = 1; +select object_type,object_schema,object_name,owner_thread_id,owner_event_id,internal_lock,external_lock from performance_schema.table_handles where object_name='t1'; +object_type object_schema object_name owner_thread_id owner_event_id internal_lock external_lock +TABLE test t1 0 0 WRITE NULL +drop table t1; +# End of 10.11 tests diff --git a/mysql-test/suite/period/t/innodb_debug.test b/mysql-test/suite/period/t/innodb_debug.test new file mode 100644 index 00000000000..dc49f2d55f5 --- /dev/null +++ b/mysql-test/suite/period/t/innodb_debug.test @@ -0,0 +1,221 @@ +--source include/have_innodb.inc +--source include/have_debug_sync.inc + +--echo # +--echo # MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records +--echo # + +--echo ## INSERT +create table t1 ( + id int, s date, e date, + period for p(s,e), + unique(id, p without overlaps) +) engine=innodb; + +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send insert t1 values(10, date'2010-09-09', date'2010-11-10') +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +insert t1 values(10, date'2010-10-10', date'2010-11-12'); +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_DUP_ENTRY +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE +create table t1 ( + id int, s date, e date, + period for p(s,e), + unique(id, p without overlaps) +) engine=innodb; + +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update t1 set e=e + interval 2 month where s=date'2010-09-09' +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_DUP_ENTRY +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## INSERT IGNORE +create table t1 ( + id int, s date, e date, + period for p(s,e), + unique(id, p without overlaps) +) engine=innodb; + +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send insert ignore t1 values(10, date'2010-09-09', date'2010-11-10'),(11, date'2010-09-09', date'2010-11-10') +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +insert t1 values(10, date'2010-10-10', date'2010-11-12'); +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE IGNORE +create table t1 ( + id int, s date, e date, + period for p(s,e), + unique(id, p without overlaps) +) engine=innodb; + +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update ignore t1 set e=e + interval 2 month where s=date'2010-09-09' +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_NOT_SUPPORTED_YET +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE modifying PK +create table t1 ( + id int, s date, e date, + period for p(s,e), + primary key (id, p without overlaps) +) engine=innodb; + +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update t1 set e=e + interval 2 month where s=date'2010-09-09' +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_DUP_ENTRY +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE IGNORE modifying PK +create table t1 ( + id int, s date, e date, + period for p(s,e), + primary key (id, p without overlaps) +) engine=innodb; + +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update ignore t1 set e=e + interval 2 month where s=date'2010-09-09' +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set innodb_snapshot_isolation=0; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_NOT_SUPPORTED_YET +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + + +--echo # End of 10.6 tests + diff --git a/mysql-test/suite/period/t/long_unique.test b/mysql-test/suite/period/t/long_unique.test index c2dcd3f6c3f..bca2f15ebae 100644 --- a/mysql-test/suite/period/t/long_unique.test +++ b/mysql-test/suite/period/t/long_unique.test @@ -1,3 +1,4 @@ +--source include/have_innodb.inc --source include/have_partition.inc --echo # @@ -21,3 +22,23 @@ INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01'); DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01'; DROP TABLE t1; +--echo # End of 10.5 tests + +--echo # +--echo # MDEV-37312 ASAN errors or assertion failure upon attempt to UPDATE FOR PORTION violating long unique under READ COMMITTED +--echo # +create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb; +insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01'); +set transaction isolation level read committed; +--error ER_DUP_ENTRY +update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1; +drop table t1; + +create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb partition by hash (a); +insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01'); +set transaction isolation level read committed; +--error ER_DUP_ENTRY +update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1; +drop table t1; + +--echo # End of 10.6 tests diff --git a/mysql-test/suite/period/t/perfschema.test b/mysql-test/suite/period/t/perfschema.test new file mode 100644 index 00000000000..783099f8683 --- /dev/null +++ b/mysql-test/suite/period/t/perfschema.test @@ -0,0 +1,19 @@ +--source include/not_embedded.inc +--source include/have_perfschema.inc +--echo # +--echo # MDEV-36720 Possible memory leak on updating table with index without overlaps +--echo # +create table t1 (id int not null primary key auto_increment, rel_id int not null, start_date datetime not null, end_date datetime not null, period for p (start_date, end_date), unique key (rel_id, p without overlaps)); +insert into t1 (rel_id, start_date, end_date) values (1, '2024-01-01', '2025-01-01'); +select object_type,object_schema,object_name,owner_thread_id,owner_event_id,internal_lock,external_lock from performance_schema.table_handles where object_name='t1'; +update t1 set end_date = '2025-01-16' where id = 1; +select object_type,object_schema,object_name,owner_thread_id,owner_event_id,internal_lock,external_lock from performance_schema.table_handles where object_name='t1'; +update t1 set end_date = '2025-01-16' where id = 1; +select object_type,object_schema,object_name,owner_thread_id,owner_event_id,internal_lock,external_lock from performance_schema.table_handles where object_name='t1'; +update t1 set end_date = '2025-01-16' where id = 1; +select object_type,object_schema,object_name,owner_thread_id,owner_event_id,internal_lock,external_lock from performance_schema.table_handles where object_name='t1'; +update t1 set end_date = '2025-01-16' where id = 1; +select object_type,object_schema,object_name,owner_thread_id,owner_event_id,internal_lock,external_lock from performance_schema.table_handles where object_name='t1'; +drop table t1; + +--echo # End of 10.11 tests diff --git a/mysql-test/suite/plugins/r/simple_password_check.result b/mysql-test/suite/plugins/r/simple_password_check.result index 0826b7f6637..8046761d708 100644 --- a/mysql-test/suite/plugins/r/simple_password_check.result +++ b/mysql-test/suite/plugins/r/simple_password_check.result @@ -261,6 +261,22 @@ install soname "simple_password_check"; MARIADB-ADMIN: unable to change password; error: 'The MariaDB server is running with the --strict-password-validation option so it cannot execute this statement' # All done uninstall plugin simple_password_check; -# # End of 10.4 tests # +# MDEV-30190 Password check plugin prevents changing grants for CURRENT_USER +# +select priv into @old_priv from mysql.global_priv where user='root' and host='localhost'; +install soname "simple_password_check"; +grant all on db1.* to current_user; +select current_user; +current_user +root@localhost +show grants; +Grants for root@localhost +GRANT ALL PRIVILEGES ON *.* TO `root`@`localhost` WITH GRANT OPTION +GRANT ALL PRIVILEGES ON `db1`.* TO `root`@`localhost` +GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION +revoke all on db1.* from current_user; +uninstall plugin simple_password_check; +update mysql.global_priv set priv=@old_priv where user='root' and host='localhost'; +# End of 10.11 tests diff --git a/mysql-test/suite/plugins/t/simple_password_check.test b/mysql-test/suite/plugins/t/simple_password_check.test index 442585e89d4..5b55512591e 100644 --- a/mysql-test/suite/plugins/t/simple_password_check.test +++ b/mysql-test/suite/plugins/t/simple_password_check.test @@ -165,6 +165,19 @@ install soname "simple_password_check"; --echo # All done uninstall plugin simple_password_check; ---echo # --echo # End of 10.4 tests + --echo # +--echo # MDEV-30190 Password check plugin prevents changing grants for CURRENT_USER +--echo # +select priv into @old_priv from mysql.global_priv where user='root' and host='localhost'; +install soname "simple_password_check"; +grant all on db1.* to current_user; +select current_user; +show grants; +revoke all on db1.* from current_user; +uninstall plugin simple_password_check; +#cleanup +update mysql.global_priv set priv=@old_priv where user='root' and host='localhost'; + +--echo # End of 10.11 tests diff --git a/mysql-test/suite/rpl/r/rbr_lsu_off.result b/mysql-test/suite/rpl/r/rbr_lsu_off.result new file mode 100644 index 00000000000..65eedbad1d3 --- /dev/null +++ b/mysql-test/suite/rpl/r/rbr_lsu_off.result @@ -0,0 +1,34 @@ +include/master-slave.inc +[connection master] +# +# MDEV-33957 UPDATE fails on replica replicating blob virtual column in +# NOBLOB mode when replica logging is off +# +connection slave; +select @@log_slave_updates; +@@log_slave_updates +0 +connection master; +set binlog_row_image= 'NOBLOB'; +create table t ( +c int primary key, d int, +i blob generated always as (c), key k(i)) engine=innodb; +Warnings: +Note 1071 Specified key was too long; max key length is 3072 bytes +insert into t (c) values (1); +update t set d= 0; +connection slave; +connection master; +drop table t; +create table t ( +c int primary key, d int, +i blob generated always as (c), key k(i)) engine=innodb +partition by key (c) partitions 2; +Warnings: +Note 1071 Specified key was too long; max key length is 3072 bytes +insert into t (c) values (1); +update t set d= 0; +connection slave; +connection master; +drop table t; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_alter_convert_partition.result b/mysql-test/suite/rpl/r/rpl_alter_convert_partition.result new file mode 100644 index 00000000000..a6b42d11e66 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_alter_convert_partition.result @@ -0,0 +1,80 @@ +include/master-slave.inc +[connection master] +# +# Ensure initial CREATE TABLE with partitioned data is replicated +# correctly +connection master; +create table t (a int, b int, key(a)) engine=innodb partition by range (b) (partition p1 values less than (10), partition pn values less than (maxvalue)); +insert into t values (1,5),(2,100); +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +include/diff_tables.inc [master:test.t,slave:test.t] +# +# Ensure ALTER TABLE .. CONVERT PARTITION .. TO TABLE replicates +# correctly +connection master; +alter table t convert partition p1 to table offspring; +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +include/diff_tables.inc [master:test.t,slave:test.t] +include/diff_tables.inc [master:test.offspring,slave:test.offspring] +# +# Ensure data can be inserted into existing table after +# ALTER TABLE .. CONVERT PARTITION .. TO TABLE +connection master; +insert into t values (3, 6); +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +include/diff_tables.inc [master:test.t,slave:test.t] +# +# Ensure data can be inserted into offspring table after +# ALTER TABLE .. CONVERT PARTITION .. TO TABLE +connection master; +insert into offspring values (4, 101); +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +include/diff_tables.inc [master:test.offspring,slave:test.offspring] +# +# Ensure data can be updated in existing table after +# ALTER TABLE .. CONVERT PARTITION .. TO TABLE +connection master; +update t set b=b+1 where a=3; +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +include/diff_tables.inc [master:test.t,slave:test.t] +# +# Ensure data can be updated in offspring table after +# ALTER TABLE .. CONVERT PARTITION .. TO TABLE +connection master; +update offspring set b=b+1 where a=4; +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +include/diff_tables.inc [master:test.offspring,slave:test.offspring] +# +# Ensure data can be deleted in existing table after +# ALTER TABLE .. CONVERT PARTITION .. TO TABLE +connection master; +delete from t; +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +include/diff_tables.inc [master:test.t,slave:test.t] +# +# Ensure data can be deleted in offspring table after +# ALTER TABLE .. CONVERT PARTITION .. TO TABLE +connection master; +delete from offspring; +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +include/diff_tables.inc [master:test.offspring,slave:test.offspring] +connection master; +drop table t, offspring; +include/rpl_end.inc +# End of rpl_alter_convert_partition diff --git a/mysql-test/suite/rpl/r/rpl_conditional_comments.result b/mysql-test/suite/rpl/r/rpl_conditional_comments.result index 036824d60aa..8bfccb9c6a8 100644 --- a/mysql-test/suite/rpl/r/rpl_conditional_comments.result +++ b/mysql-test/suite/rpl/r/rpl_conditional_comments.result @@ -88,5 +88,19 @@ c1 3 20 connection master; +insert t1 values /*! (100);insert t1 values */ (200) // +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'insert t1 values */ (200)' at line 1 +select * from t1; +c1 +62 +3 +20 +connection slave; +select * from t1; +c1 +62 +3 +20 +connection master; DROP TABLE t1; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_mysqldump_gtid_slave_pos.result b/mysql-test/suite/rpl/r/rpl_mysqldump_gtid_slave_pos.result index 6fafa256b77..6375643a313 100644 --- a/mysql-test/suite/rpl/r/rpl_mysqldump_gtid_slave_pos.result +++ b/mysql-test/suite/rpl/r/rpl_mysqldump_gtid_slave_pos.result @@ -51,7 +51,6 @@ after initial slave got in sync include/stop_slave.inc # 3. A include/stop_slave.inc -include/stop_slave.inc # 4. set statement sql_log_bin=0 for delete from mysql.gtid_slave_pos; insert into mysql.gtid_slave_pos values (99 + 2, 1, 1, 1); diff --git a/mysql-test/suite/rpl/r/rpl_parallel_seq.result b/mysql-test/suite/rpl/r/rpl_parallel_seq.result index 02287d54e33..faeb93fcf68 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel_seq.result +++ b/mysql-test/suite/rpl/r/rpl_parallel_seq.result @@ -82,6 +82,30 @@ SELECT @@global.gtid_binlog_state, @@global.gtid_slave_pos as "all through 101 h @@global.gtid_binlog_state all through 101 have been committed 0-1-101 0-1-101 connection slave; +include/stop_slave.inc +set @saved_mode= @@global.slave_parallel_mode; +set @@global.slave_parallel_mode = conservative; +include/start_slave.inc +connection master; +INSERT INTO ti SET a=2; +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +lock table ti write; +SET GLOBAL debug_dbug= "+d,halt_past_mark_start_commit"; +connection master; +INSERT INTO ti SET a=35570; +ALTER SEQUENCE s2 restart with 1; +include/save_master_gtid.inc +connection slave; +unlock tables; +SET debug_sync = "now SIGNAL past_mark_continue"; +include/sync_with_master_gtid.inc +include/stop_slave.inc +SET @@global.slave_parallel_mode = @saved_mode; +SET @@global.debug_dbug = @@GLOBAL.debug_dbug; +include/start_slave.inc +connection slave; flush tables with read lock; connection master; CREATE OR REPLACE SEQUENCE s3 ENGINE=innodb; diff --git a/mysql-test/suite/rpl/r/rpl_rbr_monitor.result b/mysql-test/suite/rpl/r/rpl_rbr_monitor.result deleted file mode 100644 index e8ac6277233..00000000000 --- a/mysql-test/suite/rpl/r/rpl_rbr_monitor.result +++ /dev/null @@ -1,43 +0,0 @@ -include/master-slave.inc -[connection master] -connection master; -create table t1(a int primary key) engine=innodb; -connection slave; -connection slave1; -begin; -insert into t1(a) values(1); -connection master; -select * from t1; -a -insert into t1(a) values(1); -#monitoring write rows -connection slave; -#monitoring update rows -connection slave1; -rollback; -begin; -select a from t1 for update; -a -1 -connection master; -update t1 set a = a + 1 ; -connection slave; -#monitoring delete rows -connection slave1; -rollback; -begin; -select * from t1 for update; -a -2 -connection master; -delete from t1; -connection slave; -select * from t1; -a -2 -connection slave1; -rollback; -connection master; -drop table t1; -connection slave; -include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_seconds_behind_master_spike.result b/mysql-test/suite/rpl/r/rpl_seconds_behind_master_spike.result index c208ee2efad..389ebfc901b 100644 --- a/mysql-test/suite/rpl/r/rpl_seconds_behind_master_spike.result +++ b/mysql-test/suite/rpl/r/rpl_seconds_behind_master_spike.result @@ -4,17 +4,18 @@ connection slave; include/stop_slave.inc CHANGE MASTER TO MASTER_USE_GTID=NO; include/start_slave.inc -include/stop_slave.inc -SET @save_dbug= @@GLOBAL.debug_dbug; -SET @@global.debug_dbug="+d,pause_sql_thread_on_relay_fde_after_trans"; -SET @@global.debug_dbug="+d,negate_clock_diff_with_master"; -include/start_slave.inc # Future events must be logged at least 2 seconds after # the slave starts connection master; # Write events to ensure slave will be consistent with master create table t1 (a int); insert into t1 values (1); +connection slave; +# Enable breakpoints +SET @save_dbug= @@GLOBAL.debug_dbug; +SET @@GLOBAL.debug_dbug= +'+d,pause_sql_thread_on_fde,negate_clock_diff_with_master'; +connection master; # Flush logs on master forces slave to generate a Format description # event in its relay log flush logs; @@ -26,8 +27,8 @@ count(*)=1 1 # The relay log FDE has been processed - here we check to ensure it was # not considered in Seconds_Behind_Master calculation -connection slave1; -# Safely resume slave SQL thread +Seconds_Behind_Master = '0' +# Safely resume slave SQL (worker) thread # Prove SQL thread is in state "debug sync point: now" SET @@global.debug_dbug="-d,pause_sql_thread_on_fde"; SET DEBUG_SYNC='now SIGNAL sql_thread_continue'; diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result b/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result index c1880d19deb..40a1af830e7 100644 --- a/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result @@ -9,6 +9,8 @@ set @save_semi_sync_master_enabled= @@global.rpl_semi_sync_master_enabled; set @save_semi_sync_wp= @@global.rpl_semi_sync_master_wait_point; set @save_bgc_count= @@global.binlog_commit_wait_count; set @save_bgc_usec= @@global.binlog_commit_wait_usec; +set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point; +set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave; set @save_debug_dbug= @@global.debug_dbug; set @@global.binlog_commit_wait_count=3; set @@global.binlog_commit_wait_usec=10000000; @@ -56,8 +58,6 @@ connection slave; # the binlogging to semi-sync, and starting the wait for ACK; and during # this pause, semi-sync is manually switched off and on. connection master; -set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point; -set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave; set @@global.rpl_semi_sync_master_wait_point= AFTER_SYNC; set @@global.rpl_semi_sync_master_wait_no_slave= 1; set @@global.debug_dbug="+d,semisync_log_skip_trx_wait"; @@ -112,11 +112,70 @@ commit; # Cleanup connection master; drop table tn; +set @@global.debug_dbug=@save_debug_dbug; +# +# MDEV-36934 +# The server could indefinitely hang due to a memory leak which tried to +# pthread signal on a destroyed condition variable. In effect, no +# connections could commit transactions because there would be a thread +# stuck on a never-returning call to pthread_cond_signal() while +# holding Repl_semi_sync_master::LOCK_log. +connection master; +set @@global.rpl_semi_sync_master_wait_point= AFTER_COMMIT; +set @@global.rpl_semi_sync_master_wait_no_slave= 0; +# Ensure servers are in proper state +connection master; +connection slave; +# Test case initial set-up +connection master; +create table t_36934 (a int) engine=innodb; +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +# Pause the user transaction before inserting into Active_tranx +connect user_con,localhost,root,,; +SET debug_sync= 'semisync_at_write_tranx_in_binlog SIGNAL at_write_tranx_in_binlog WAIT_FOR resume_write_tranx_in_binlog'; +insert into t_36934 values (1); +connection server_1; +set debug_sync="now wait_for at_write_tranx_in_binlog"; +# Disconnect the slave (note that the binlog dump thread won't yet be +# notified of a binlog update from the last transaction, so the slave +# should neither receiver nor ACK the transaction). +connection slave; +include/stop_slave.inc +# Waiting for master to realize the slave has disconnected.. +connection server_1; +# ..done +# Resuming transaction (it will exit commit_trx early without waiting) +set debug_sync="now signal resume_write_tranx_in_binlog"; +connection user_con; +disconnect user_con; +# Force delete the user thread (FLUSH THREADS ensures the thread won't +# stay in the thread cache) +connection master; +FLUSH THREADS; +# BUG: Re-connect slave. MDEV-36934 reports that the master would hang +# when the slave would re-connect and try to ACK the last transaction +# who's thread has been deleted +connection slave; +include/start_slave.inc +# Try to commit another transaction (prior to MDEV-36934 fixes, this +# would hang indefinitely) +connection master; +set debug_sync="RESET"; +insert into t_36934 values (2); +connection server_1; +# Waiting 30s for last query to complete.. +connection master; +# ..done +# Cleanup +connection master; set @@global.rpl_semi_sync_master_wait_point= @old_master_wait_point; set @@global.rpl_semi_sync_master_wait_no_slave= @old_master_wait_no_slave; set @@global.debug_dbug=@save_debug_dbug; set @@global.rpl_semi_sync_master_enabled= @save_semi_sync_master_enabled; set @@global.rpl_semi_sync_master_wait_point= @save_semi_sync_wp; +drop table t_36934; connection slave; include/stop_slave.inc set @@global.rpl_semi_sync_slave_enabled= @save_semi_sync_slave_enabled; diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_master_disable_with_slave.result b/mysql-test/suite/rpl/r/rpl_semi_sync_master_disable_with_slave.result index 3b0686c351e..b3edb47a1bb 100644 --- a/mysql-test/suite/rpl/r/rpl_semi_sync_master_disable_with_slave.result +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_master_disable_with_slave.result @@ -11,11 +11,8 @@ connection slave; connection master; SELECT ID INTO @binlog_dump_tid FROM information_schema.PROCESSLIST WHERE COMMAND = 'Binlog Dump'; -# Control State -SELECT STATE FROM information_schema.PROCESSLIST WHERE ID = @binlog_dump_tid; -STATE -Master has sent all binlog to slave; waiting for more updates -SHOW STATUS LIKE 'Rpl_semi_sync_master_clients'; +# Wait for the Control State +SHOW STATUS LIKE 'Rpl\_semi\_sync\_master\_clients'; Variable_name Value Rpl_semi_sync_master_clients 1 # Disable Semi-Sync while the dump thread is still connected to its slave @@ -23,14 +20,14 @@ SET @@GLOBAL.rpl_semi_sync_master_enabled = 0; SELECT STATE FROM information_schema.PROCESSLIST WHERE ID = @binlog_dump_tid; STATE Master has sent all binlog to slave; waiting for more updates -SHOW STATUS LIKE 'Rpl_semi_sync_master_clients'; +SHOW STATUS LIKE 'Rpl\_semi\_sync\_master\_clients'; Variable_name Value Rpl_semi_sync_master_clients 1 # Disconnect the slave and wait until the master's dump thread is gone connection slave; STOP SLAVE; connection master; -SHOW STATUS LIKE 'Rpl_semi_sync_master_clients'; +SHOW STATUS LIKE 'Rpl\_semi\_sync\_master\_clients'; Variable_name Value Rpl_semi_sync_master_clients 0 # Cleanup diff --git a/mysql-test/suite/rpl/t/rbr_lsu_off-slave.opt b/mysql-test/suite/rpl/t/rbr_lsu_off-slave.opt new file mode 100644 index 00000000000..2f2a9c436fc --- /dev/null +++ b/mysql-test/suite/rpl/t/rbr_lsu_off-slave.opt @@ -0,0 +1,2 @@ +--log-slave-updates=0 + diff --git a/mysql-test/suite/rpl/t/rbr_lsu_off.test b/mysql-test/suite/rpl/t/rbr_lsu_off.test new file mode 100644 index 00000000000..2fb1c0452bd --- /dev/null +++ b/mysql-test/suite/rpl/t/rbr_lsu_off.test @@ -0,0 +1,35 @@ +--source include/have_innodb.inc +--source include/have_partition.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +--echo # +--echo # MDEV-33957 UPDATE fails on replica replicating blob virtual column in +--echo # NOBLOB mode when replica logging is off +--echo # + +--connection slave +select @@log_slave_updates; +--connection master +set binlog_row_image= 'NOBLOB'; +create table t ( + c int primary key, d int, + i blob generated always as (c), key k(i)) engine=innodb; +insert into t (c) values (1); +update t set d= 0; +--sync_slave_with_master + +--connection master +drop table t; +create table t ( + c int primary key, d int, + i blob generated always as (c), key k(i)) engine=innodb + partition by key (c) partitions 2; +insert into t (c) values (1); +update t set d= 0; +--sync_slave_with_master + +--connection master +drop table t; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_alter_convert_partition.test b/mysql-test/suite/rpl/t/rpl_alter_convert_partition.test new file mode 100644 index 00000000000..ef5aa3ed9cb --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_alter_convert_partition.test @@ -0,0 +1,117 @@ +# +# This test ensures that ALTER TABLE ... CONVERT PARTITION ... TO TABLE +# works with replication. I.e., the partitioning is done correctly, and +# after partitioning, both tables can be updated correctly. +# +# References: +# MDEV-36906: RBR crashes upon DML after CONVERT PARTITION +# + +--source include/have_innodb.inc +--source include/have_partition.inc +--source include/master-slave.inc + + +--echo # +--echo # Ensure initial CREATE TABLE with partitioned data is replicated +--echo # correctly +--connection master +create table t (a int, b int, key(a)) engine=innodb partition by range (b) (partition p1 values less than (10), partition pn values less than (maxvalue)); +insert into t values (1,5),(2,100); +--source include/save_master_gtid.inc +--connection slave +--source include/sync_with_master_gtid.inc +--let $diff_tables=master:test.t,slave:test.t + +--source include/diff_tables.inc + +--echo # +--echo # Ensure ALTER TABLE .. CONVERT PARTITION .. TO TABLE replicates +--echo # correctly +--connection master +alter table t convert partition p1 to table offspring; +--source include/save_master_gtid.inc +--connection slave +--source include/sync_with_master_gtid.inc +--let $diff_tables=master:test.t,slave:test.t +--source include/diff_tables.inc +--let $diff_tables=master:test.offspring,slave:test.offspring +--source include/diff_tables.inc + + +--echo # +--echo # Ensure data can be inserted into existing table after +--echo # ALTER TABLE .. CONVERT PARTITION .. TO TABLE +--connection master +insert into t values (3, 6); +--source include/save_master_gtid.inc +--connection slave +--source include/sync_with_master_gtid.inc +--let $diff_tables=master:test.t,slave:test.t +--source include/diff_tables.inc + + +--echo # +--echo # Ensure data can be inserted into offspring table after +--echo # ALTER TABLE .. CONVERT PARTITION .. TO TABLE +--connection master +insert into offspring values (4, 101); +--source include/save_master_gtid.inc +--connection slave +--source include/sync_with_master_gtid.inc +--let $diff_tables=master:test.offspring,slave:test.offspring +--source include/diff_tables.inc + + +--echo # +--echo # Ensure data can be updated in existing table after +--echo # ALTER TABLE .. CONVERT PARTITION .. TO TABLE +--connection master +update t set b=b+1 where a=3; +--source include/save_master_gtid.inc +--connection slave +--source include/sync_with_master_gtid.inc +--let $diff_tables=master:test.t,slave:test.t +--source include/diff_tables.inc + + +--echo # +--echo # Ensure data can be updated in offspring table after +--echo # ALTER TABLE .. CONVERT PARTITION .. TO TABLE +--connection master +update offspring set b=b+1 where a=4; +--source include/save_master_gtid.inc +--connection slave +--source include/sync_with_master_gtid.inc +--let $diff_tables=master:test.offspring,slave:test.offspring +--source include/diff_tables.inc + + +--echo # +--echo # Ensure data can be deleted in existing table after +--echo # ALTER TABLE .. CONVERT PARTITION .. TO TABLE +--connection master +delete from t; +--source include/save_master_gtid.inc +--connection slave +--source include/sync_with_master_gtid.inc +--let $diff_tables=master:test.t,slave:test.t +--source include/diff_tables.inc + + +--echo # +--echo # Ensure data can be deleted in offspring table after +--echo # ALTER TABLE .. CONVERT PARTITION .. TO TABLE +--connection master +delete from offspring; +--source include/save_master_gtid.inc +--connection slave +--source include/sync_with_master_gtid.inc +--let $diff_tables=master:test.offspring,slave:test.offspring +--source include/diff_tables.inc + + +--connection master +drop table t, offspring; +--source include/rpl_end.inc +--echo # End of rpl_alter_convert_partition diff --git a/mysql-test/suite/rpl/t/rpl_conditional_comments.test b/mysql-test/suite/rpl/t/rpl_conditional_comments.test index 6e4ec8745f4..343ea0d3d13 100644 --- a/mysql-test/suite/rpl/t/rpl_conditional_comments.test +++ b/mysql-test/suite/rpl/t/rpl_conditional_comments.test @@ -80,5 +80,17 @@ sync_slave_with_master; select * from t1; connection master; +# +# Bug#37117875 Binlog record error when delimiter is set to other symbols +# +delimiter //; +--error ER_PARSE_ERROR +insert t1 values /*! (100);insert t1 values */ (200) // +delimiter ;// +select * from t1; +sync_slave_with_master; +select * from t1; +connection master; + DROP TABLE t1; --source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_mysqldump_gtid_slave_pos.test b/mysql-test/suite/rpl/t/rpl_mysqldump_gtid_slave_pos.test index 94b9645a3b3..6145a490ddd 100644 --- a/mysql-test/suite/rpl/t/rpl_mysqldump_gtid_slave_pos.test +++ b/mysql-test/suite/rpl/t/rpl_mysqldump_gtid_slave_pos.test @@ -73,7 +73,6 @@ select @@global.gtid_slave_pos as "after initial slave got in sync"; --echo # 3. A # Two dumps prepared to be restored in the following loop --exec $MYSQL_DUMP_SLAVE --no-autocommit=0 --dump-slave --gtid mysql gtid_slave_pos > $MYSQLTEST_VARDIR/tmp/dump_2.sql ---source include/stop_slave.inc --exec $MYSQL_DUMP_SLAVE --no-autocommit=0 --master-data --gtid mysql gtid_slave_pos > $MYSQLTEST_VARDIR/tmp/dump_1.sql diff --git a/mysql-test/suite/rpl/t/rpl_parallel_seq.test b/mysql-test/suite/rpl/t/rpl_parallel_seq.test index 9522a976b89..17870eb3b86 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_seq.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_seq.test @@ -128,6 +128,53 @@ SET DEBUG_SYNC = 'now SIGNAL continue_worker'; SELECT @@global.gtid_binlog_state, @@global.gtid_slave_pos as "all through 101 have been committed"; +# +# MDEV-35570 parallel slave ALTER-SEQUNCE attemted to binlog out-of-order. +# Let two transactions I_1 -> AS_2 where AS_2 depends on a commit parent I_1. +# Under the bug condition AS_2 may complete its work including binlogging +# while I_1 is slowly executing Xid_log_event. +# The test simulate the slowness, AS_2 must defer its completion. +# +--connection slave +--source include/stop_slave.inc +set @saved_mode= @@global.slave_parallel_mode; +set @@global.slave_parallel_mode = conservative; +--source include/start_slave.inc + +--connection master +INSERT INTO ti SET a=2; +--source include/save_master_gtid.inc + +--connection slave +--source include/sync_with_master_gtid.inc +# allow to proceed to sync with the 1st following WFPT2SC wait condtion +lock table ti write; +# allow to proceed into commit to sync with the 2nd following WFPC wait condition +--let $saved_dbug= @@GLOBAL.debug_dbug +SET GLOBAL debug_dbug= "+d,halt_past_mark_start_commit"; + +--connection master +INSERT INTO ti SET a=35570; +ALTER SEQUENCE s2 restart with 1; +--source include/save_master_gtid.inc + +--connection slave +--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to start commit" +--source include/wait_condition.inc +# the 1st wait release +unlock tables; + +--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit" +--source include/wait_condition.inc +# the 2nd wait release +SET debug_sync = "now SIGNAL past_mark_continue"; + +--source include/sync_with_master_gtid.inc +--source include/stop_slave.inc +SET @@global.slave_parallel_mode = @saved_mode; +--eval SET @@global.debug_dbug = $saved_dbug +--source include/start_slave.inc + # MDEV-31792 Assertion in MDL_context::acquire_lock upon parallel replication of CREATE SEQUENCE --let $iter = 3 diff --git a/mysql-test/suite/rpl/t/rpl_rbr_monitor.test b/mysql-test/suite/rpl/t/rpl_rbr_monitor.test deleted file mode 100644 index 2bc1f9cd482..00000000000 --- a/mysql-test/suite/rpl/t/rpl_rbr_monitor.test +++ /dev/null @@ -1,76 +0,0 @@ -# -# Mdev-7409 On RBR, extend the PROCESSLIST info to include at least the name of -# the recently used table -# This testcase create Write_rows_log_event , Update_rows_log_event and -# Delete_rows_log_event which is blocked on slave and we will check whether -# whether processinfo includes table name or not. ---source include/have_innodb.inc ---source include/have_binlog_format_row.inc ---source include/master-slave.inc ---enable_connect_log - ---connection master -create table t1(a int primary key) engine=innodb; - ---sync_slave_with_master ---connection slave1 -begin; -insert into t1(a) values(1); ---connection master -select * from t1; - -insert into t1(a) values(1); ---save_master_pos - ---echo #monitoring write rows ---connection slave - - -let $wait_condition= SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST - WHERE DB = 'test' AND STATE LIKE "Write_rows_log_event::write_row(%) on table %"; ---source include/wait_condition.inc - - ---echo #monitoring update rows ---connection slave1 -rollback; ---sync_with_master -begin; -select a from t1 for update; - ---connection master -update t1 set a = a + 1 ; ---save_master_pos - ---connection slave -let $wait_condition= SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST - WHERE DB = 'test' AND STATE LIKE "Update_rows_log_event::find_row(%) on table %"; ---source include/wait_condition.inc - ---echo #monitoring delete rows ---connection slave1 -rollback; ---sync_with_master -begin; -select * from t1 for update; - ---connection master -delete from t1; ---save_master_pos - ---connection slave -select * from t1; -let $wait_condition= SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST - WHERE DB = 'test' AND STATE LIKE "Delete_rows_log_event::find_row(%) on table %"; ---source include/wait_condition.inc - -#CleanUp ---connection slave1 -rollback; ---sync_with_master - ---connection master -drop table t1; ---sync_slave_with_master - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_seconds_behind_master_spike.combinations b/mysql-test/suite/rpl/t/rpl_seconds_behind_master_spike.combinations new file mode 100644 index 00000000000..1909ee4d407 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_seconds_behind_master_spike.combinations @@ -0,0 +1,5 @@ +[serial] +--slave-parallel-threads=0 + +[parallel] +--slave-parallel-threads=1 diff --git a/mysql-test/suite/rpl/t/rpl_seconds_behind_master_spike.test b/mysql-test/suite/rpl/t/rpl_seconds_behind_master_spike.test index a80dedb7383..a46233e9677 100644 --- a/mysql-test/suite/rpl/t/rpl_seconds_behind_master_spike.test +++ b/mysql-test/suite/rpl/t/rpl_seconds_behind_master_spike.test @@ -18,6 +18,8 @@ # # References: # MDEV-16091: Seconds_Behind_Master spikes to millions of seconds +# MDEV-25999: Unrealistic "Seconds behind master" value +# MDEV-36840: Seconds_Behind_Master Spike at Log Rotation on Parallel Replica # --source include/have_debug.inc --source include/have_debug_sync.inc @@ -31,12 +33,6 @@ CHANGE MASTER TO MASTER_USE_GTID=NO; --source include/start_slave.inc ---source include/stop_slave.inc -SET @save_dbug= @@GLOBAL.debug_dbug; -SET @@global.debug_dbug="+d,pause_sql_thread_on_relay_fde_after_trans"; -SET @@global.debug_dbug="+d,negate_clock_diff_with_master"; ---source include/start_slave.inc - --let $sleep_time=2 --echo # Future events must be logged at least $sleep_time seconds after --echo # the slave starts @@ -46,33 +42,38 @@ SET @@global.debug_dbug="+d,negate_clock_diff_with_master"; --echo # Write events to ensure slave will be consistent with master create table t1 (a int); insert into t1 values (1); ---let $t_master_events_logged= `SELECT UNIX_TIMESTAMP()` +--sync_slave_with_master +let $wait_condition= SELECT STATE LIKE '%waiting for more updates' + FROM information_schema.PROCESSLIST WHERE COMMAND = 'Slave_SQL'; +--source include/wait_condition.inc +--echo # Enable breakpoints +SET @save_dbug= @@GLOBAL.debug_dbug; +SET @@GLOBAL.debug_dbug= + '+d,pause_sql_thread_on_fde,negate_clock_diff_with_master'; + +--connection master --echo # Flush logs on master forces slave to generate a Format description --echo # event in its relay log flush logs; --connection slave --echo # On the next FDE, the slave should have the master CREATE/INSERT events +--sleep $sleep_time SET DEBUG_SYNC='now WAIT_FOR paused_on_fde'; select count(*)=1 from t1; --echo # The relay log FDE has been processed - here we check to ensure it was --echo # not considered in Seconds_Behind_Master calculation ---connection slave1 -let $sbm= query_get_value(SHOW SLAVE STATUS, Seconds_Behind_Master, 1); ---let $t_now= `SELECT UNIX_TIMESTAMP()` +# `Seconds_Behind_Master` must remains 0. +# * MDEV-16091: `Seconds_Behind_Master` should not point at the FDE's timestamp, +# which is the time of `start_slave.inc`. +# * MDEV-25999 & MDEV-36840: For a new event stream, `Seconds_Behind_Master` +# should not point at anything at all, because FDEs are not user content. +--let $status_items= Seconds_Behind_Master +--source include/show_slave_status.inc -# Ensure Seconds_Behind_Master does not point beyond when we have proven the -# events we have proven to have executed. The extra second is needed as a -# buffer because the recorded times are not exact with when the events were -# recorded on the master. -if(`select $sbm > $t_now - $t_master_events_logged + 1`) -{ - die "A relay log event was incorrectly used to set Seconds_Behind_Master"; -} - ---echo # Safely resume slave SQL thread +--echo # Safely resume slave SQL (worker) thread --let $dbug_wait_state="debug sync point: now" --echo # Prove SQL thread is in state $dbug_wait_state diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test b/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test index 5a158150452..3cd9d054852 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test @@ -36,6 +36,8 @@ set @save_semi_sync_master_enabled= @@global.rpl_semi_sync_master_enabled; set @save_semi_sync_wp= @@global.rpl_semi_sync_master_wait_point; set @save_bgc_count= @@global.binlog_commit_wait_count; set @save_bgc_usec= @@global.binlog_commit_wait_usec; +set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point; +set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave; set @save_debug_dbug= @@global.debug_dbug; set @@global.binlog_commit_wait_count=3; set @@global.binlog_commit_wait_usec=10000000; @@ -111,8 +113,6 @@ drop table t1, t2, t3; --connection master -set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point; -set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave; set @@global.rpl_semi_sync_master_wait_point= AFTER_SYNC; set @@global.rpl_semi_sync_master_wait_no_slave= 1; --eval set @@global.debug_dbug="+d,semisync_log_skip_trx_wait" @@ -207,11 +207,111 @@ commit; --echo # Cleanup --connection master drop table tn; +set @@global.debug_dbug=@save_debug_dbug; + + +--echo # +--echo # MDEV-36934 +--echo # The server could indefinitely hang due to a memory leak which tried to +--echo # pthread signal on a destroyed condition variable. In effect, no +--echo # connections could commit transactions because there would be a thread +--echo # stuck on a never-returning call to pthread_cond_signal() while +--echo # holding Repl_semi_sync_master::LOCK_log. + +--connection master +set @@global.rpl_semi_sync_master_wait_point= AFTER_COMMIT; +set @@global.rpl_semi_sync_master_wait_no_slave= 0; + +--echo # Ensure servers are in proper state +--connection master +let $status_var= rpl_semi_sync_master_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; +--connection slave +let $status_var= rpl_semi_sync_slave_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; + +--echo # Test case initial set-up +--connection master +create table t_36934 (a int) engine=innodb; +--source include/save_master_gtid.inc +--connection slave +--source include/sync_with_master_gtid.inc + +--echo # Pause the user transaction before inserting into Active_tranx +--connect(user_con,localhost,root,,) +SET debug_sync= 'semisync_at_write_tranx_in_binlog SIGNAL at_write_tranx_in_binlog WAIT_FOR resume_write_tranx_in_binlog'; +--send insert into t_36934 values (1) + +--connection server_1 +set debug_sync="now wait_for at_write_tranx_in_binlog"; + +--echo # Disconnect the slave (note that the binlog dump thread won't yet be +--echo # notified of a binlog update from the last transaction, so the slave +--echo # should neither receiver nor ACK the transaction). +--connection slave +--source include/stop_slave.inc + +--echo # Waiting for master to realize the slave has disconnected.. +--connection server_1 +let $status_var= rpl_semi_sync_master_clients; +let $status_var_value= 0; +source include/wait_for_status_var.inc; +--echo # ..done + +--echo # Resuming transaction (it will exit commit_trx early without waiting) +set debug_sync="now signal resume_write_tranx_in_binlog"; + +--connection user_con +--reap +--let $user_con_tid= `SELECT connection_id()` +--disconnect user_con +--source include/wait_until_disconnected.inc + +--echo # Force delete the user thread (FLUSH THREADS ensures the thread won't +--echo # stay in the thread cache) +--connection master +FLUSH THREADS; + +--echo # BUG: Re-connect slave. MDEV-36934 reports that the master would hang +--echo # when the slave would re-connect and try to ACK the last transaction +--echo # who's thread has been deleted +--connection slave +--source include/start_slave.inc + +--echo # Try to commit another transaction (prior to MDEV-36934 fixes, this +--echo # would hang indefinitely) +--connection master +set debug_sync="RESET"; +--send insert into t_36934 values (2) + +--connection server_1 +--echo # Waiting 30s for last query to complete.. +--let $wait_timeout= 30 +--let $wait_condition= SELECT count(*)=0 FROM information_schema.processlist WHERE info LIKE 'insert into t_36934%'; +--source include/wait_condition.inc + +# Variable `success` is set by wait_condition.inc +if (!$success) +{ + --echo # ..error + --die Query is hung +} + +--connection master +--reap +--echo # ..done + + +--echo # Cleanup +--connection master set @@global.rpl_semi_sync_master_wait_point= @old_master_wait_point; set @@global.rpl_semi_sync_master_wait_no_slave= @old_master_wait_no_slave; set @@global.debug_dbug=@save_debug_dbug; set @@global.rpl_semi_sync_master_enabled= @save_semi_sync_master_enabled; set @@global.rpl_semi_sync_master_wait_point= @save_semi_sync_wp; +drop table t_36934; --connection slave --source include/stop_slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_master_disable_with_slave.test b/mysql-test/suite/rpl/t/rpl_semi_sync_master_disable_with_slave.test index 28d8e7fcf7b..14130f6eeab 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_master_disable_with_slave.test +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_master_disable_with_slave.test @@ -33,9 +33,12 @@ SELECT ID INTO @binlog_dump_tid FROM information_schema.PROCESSLIST WHERE COMMAND = 'Binlog Dump'; --enable_cursor_protocol ---echo # Control State -SELECT STATE FROM information_schema.PROCESSLIST WHERE ID = @binlog_dump_tid; -SHOW STATUS LIKE 'Rpl_semi_sync_master_clients'; +--echo # Wait for the Control State +let $wait_condition= + SELECT STATE LIKE 'Master has sent all binlog to slave%' + FROM information_schema.PROCESSLIST WHERE ID = @binlog_dump_tid; +--source include/wait_condition.inc +SHOW STATUS LIKE 'Rpl\_semi\_sync\_master\_clients'; --echo # Disable Semi-Sync while the dump thread is still connected to its slave SET @@GLOBAL.rpl_semi_sync_master_enabled = 0; @@ -43,7 +46,7 @@ SET @@GLOBAL.rpl_semi_sync_master_enabled = 0; --source include/wait_for_status_var.inc SELECT STATE FROM information_schema.PROCESSLIST WHERE ID = @binlog_dump_tid; -SHOW STATUS LIKE 'Rpl_semi_sync_master_clients'; +SHOW STATUS LIKE 'Rpl\_semi\_sync\_master\_clients'; --echo # Disconnect the slave and wait until the master's dump thread is gone --connection slave @@ -54,9 +57,10 @@ STOP SLAVE; # MDEV-36359: The disconnection would crash the master and leave the wait with # error 2013 'Lost connection to server during query' ---let $wait_condition= SELECT COUNT(*)=0 FROM information_schema.PROCESSLIST WHERE ID = @binlog_dump_tid +let $wait_condition= SELECT COUNT(*)=0 + FROM information_schema.PROCESSLIST WHERE ID = @binlog_dump_tid; --source include/wait_condition.inc -SHOW STATUS LIKE 'Rpl_semi_sync_master_clients'; +SHOW STATUS LIKE 'Rpl\_semi\_sync\_master\_clients'; --echo # Cleanup --eval SET @@GLOBAL.rpl_semi_sync_master_enabled= $orig_master_enabled diff --git a/mysql-test/suite/sql_sequence/default.result b/mysql-test/suite/sql_sequence/default.result index 75c6c0adb55..aec5d528938 100644 --- a/mysql-test/suite/sql_sequence/default.result +++ b/mysql-test/suite/sql_sequence/default.result @@ -292,6 +292,25 @@ a b 10 j DROP TABLE t1; DROP SEQUENCE s1; -# # End of 10.3 tests +# in UPDATE +create sequence s1 cache 0; +create table t1 (id int unsigned default nextval(s1)); +insert t1 values (); +update t1 set id=default; +prepare stmt from "update t1 set id=?"; +execute stmt using default; +deallocate prepare stmt; +drop table t1; +drop sequence s1; # +# MDEV-37302 Assertion failure in Table_triggers_list::add_tables_and_routines_for_triggers upon attempt to insert DEFAULT into non-insertable view +# +create table t1 (f int); +create algorithm=temptable view v1 as select * from t1; +create trigger tr before update on t1 for each row set @a=1; +insert v1 values (default); +ERROR HY000: The target table v1 of the INSERT is not insertable-into +drop view v1; +drop table t1; +# End of 10.6 tests diff --git a/mysql-test/suite/sql_sequence/default.test b/mysql-test/suite/sql_sequence/default.test index f965089d992..5cbfe237cd3 100644 --- a/mysql-test/suite/sql_sequence/default.test +++ b/mysql-test/suite/sql_sequence/default.test @@ -216,6 +216,28 @@ SELECT a, b FROM t1; DROP TABLE t1; DROP SEQUENCE s1; ---echo # --echo # End of 10.3 tests + +--echo # in UPDATE +create sequence s1 cache 0; +create table t1 (id int unsigned default nextval(s1)); +insert t1 values (); +update t1 set id=default; +prepare stmt from "update t1 set id=?"; +execute stmt using default; +deallocate prepare stmt; +drop table t1; +drop sequence s1; + --echo # +--echo # MDEV-37302 Assertion failure in Table_triggers_list::add_tables_and_routines_for_triggers upon attempt to insert DEFAULT into non-insertable view +--echo # +create table t1 (f int); +create algorithm=temptable view v1 as select * from t1; +create trigger tr before update on t1 for each row set @a=1; +--error ER_NON_INSERTABLE_TABLE +insert v1 values (default); +drop view v1; +drop table t1; + +--echo # End of 10.6 tests diff --git a/mysql-test/suite/sql_sequence/grant.result b/mysql-test/suite/sql_sequence/grant.result index fc3421efcb6..0ea9f9d0667 100644 --- a/mysql-test/suite/sql_sequence/grant.result +++ b/mysql-test/suite/sql_sequence/grant.result @@ -97,7 +97,92 @@ ERROR 42000: SELECT, INSERT command denied to user 'u'@'localhost' for table `my disconnect con1; connection default; drop user u; +create user u_alter; +create table t1 (id int); +grant alter on t1 to u_alter; +connect con_alter,localhost,u_alter,,mysqltest_1; +alter table t1 modify id int default nextval(s1); +ERROR 42000: SELECT, INSERT command denied to user 'u_alter'@'localhost' for table `mysqltest_1`.`s1` +connection default; +grant insert, select on s1 to u_alter; +connection con_alter; +alter table t1 modify id int default nextval(s1); +disconnect con_alter; +connection default; +drop user u_alter; drop database mysqltest_1; # -# End of 10.11 tests +# MDEV-36870 Spurious unrelated permission error when selecting from table with default that uses nextval(sequence) # +create database db1; +use db1; +create sequence s1 cache 0; +create table t1 (id int unsigned default (10+nextval(s1))); +insert t1 values (); +create table t2 (id int unsigned default nextval(s1), b int default(default(id))); +insert t2 values (); +create function f1(x int) returns int sql security invoker +begin +select id+x into x from t1; +return x; +insert t1 values (); +end| +create user u1@localhost; +grant select on db1.* to u1@localhost; +grant execute on db1.* to u1@localhost; +grant all privileges on test.* to u1@localhost; +use test; +create table t3 (id int unsigned default (20+nextval(db1.s1)), b int); +insert t3 values (); +create sequence s2 cache 0; +create table t4 (id int unsigned default (10+nextval(s2)), b int); +insert t4 values (); +connect u1,localhost,u1,,db1; +select * from t1; +id +11 +connection default; +flush tables; +connection u1; +select * from t1; +id +11 +select default(id) from t1; +ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1` +select * from t2; +id b +2 3 +select f1(100); +f1(100) +111 +select column_name, data_type, column_default from information_schema.columns where table_schema='db1' and table_name='t1'; +column_name data_type column_default +id int (10 + nextval(`db1`.`s1`)) +use test; +insert t3 values (); +ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1` +insert t4 values (); +insert t3 (b) select 5; +ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1` +insert t4 (b) select 5; +update t3 set id=default; +ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1` +update t4 set id=default; +prepare stmt from "update t3 set id=?"; +execute stmt using default; +ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1` +prepare stmt from "update t4 set id=?"; +execute stmt using default; +deallocate prepare stmt; +insert t4 (b) values ((select * from db1.t1)); +insert t4 (b) values ((select default(id) from db1.t1)); +ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1` +connection default; +disconnect u1; +select nextval(db1.s1) as 'must be 5'; +must be 5 +5 +drop user u1@localhost; +drop database db1; +drop table t3, t4, s2; +# End of 10.6 tests diff --git a/mysql-test/suite/sql_sequence/grant.test b/mysql-test/suite/sql_sequence/grant.test index c205bd34223..dfde1275246 100644 --- a/mysql-test/suite/sql_sequence/grant.test +++ b/mysql-test/suite/sql_sequence/grant.test @@ -106,12 +106,121 @@ create table t1 (a int not null default(nextval(s1)), --connection default drop user u; -# -# Cleanup -# - +# ALTER for table with DEFAULT NEXTVAL(seq) column needs INSERT/SELECT on seq +# just like CREATE does in the example above +create user u_alter; +create table t1 (id int); +grant alter on t1 to u_alter; +--connect(con_alter,localhost,u_alter,,mysqltest_1) +--error ER_TABLEACCESS_DENIED_ERROR +alter table t1 modify id int default nextval(s1); +--connection default +grant insert, select on s1 to u_alter; +--connection con_alter +alter table t1 modify id int default nextval(s1); +--disconnect con_alter +--connection default +drop user u_alter; drop database mysqltest_1; --echo # ---echo # End of 10.11 tests +--echo # MDEV-36870 Spurious unrelated permission error when selecting from table with default that uses nextval(sequence) --echo # + +# various tests for permission checking on sequences +create database db1; +use db1; +create sequence s1 cache 0; +create table t1 (id int unsigned default (10+nextval(s1))); +insert t1 values (); + +create table t2 (id int unsigned default nextval(s1), b int default(default(id))); +insert t2 values (); + +# INSERT affects prelocking, but is never actually executed +delimiter |; +create function f1(x int) returns int sql security invoker +begin + select id+x into x from t1; + return x; + insert t1 values (); +end| +delimiter ;| + +create user u1@localhost; +grant select on db1.* to u1@localhost; +grant execute on db1.* to u1@localhost; +grant all privileges on test.* to u1@localhost; + +use test; +create table t3 (id int unsigned default (20+nextval(db1.s1)), b int); +insert t3 values (); + +create sequence s2 cache 0; +create table t4 (id int unsigned default (10+nextval(s2)), b int); +insert t4 values (); + +connect u1,localhost,u1,,db1; + +# table already in the cache. must be re-fixed +# SELECT * - no error +select * from t1; + +# not in cache +connection default; +flush tables; +connection u1; +# SELECT * - no error +select * from t1; + +# SELECT DEFAULT() - error +--error ER_TABLEACCESS_DENIED_ERROR +select default(id) from t1; + +# default(default(nextval)) +select * from t2; + +# SELECT but table has TL_WRITE because of prelocking +select f1(100); + +# opening the table for I_S +select column_name, data_type, column_default from information_schema.columns where table_schema='db1' and table_name='t1'; + +use test; +# insert +--error ER_TABLEACCESS_DENIED_ERROR +insert t3 values (); +insert t4 values (); +#insert select +--error ER_TABLEACCESS_DENIED_ERROR +insert t3 (b) select 5; +insert t4 (b) select 5; +#update +--error ER_TABLEACCESS_DENIED_ERROR +update t3 set id=default; +update t4 set id=default; + +# PS UPDATE with ? = DEFAULT +prepare stmt from "update t3 set id=?"; +--error ER_TABLEACCESS_DENIED_ERROR +execute stmt using default; +prepare stmt from "update t4 set id=?"; +execute stmt using default; +deallocate prepare stmt; + +# SELECT * in a subquery, like INSERT t3 VALUES ((SELECT * FROM t1)); +# with sequences both on t3 and t1 +insert t4 (b) values ((select * from db1.t1)); +--error ER_TABLEACCESS_DENIED_ERROR +insert t4 (b) values ((select default(id) from db1.t1)); + +connection default; +disconnect u1; +--disable_ps2_protocol +select nextval(db1.s1) as 'must be 5'; +--enable_ps2_protocol +drop user u1@localhost; +drop database db1; +drop table t3, t4, s2; + +--echo # End of 10.6 tests diff --git a/mysql-test/suite/sys_vars/r/innodb_linux_aio_basic.result b/mysql-test/suite/sys_vars/r/innodb_linux_aio_basic.result new file mode 100644 index 00000000000..5f72c246d71 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_linux_aio_basic.result @@ -0,0 +1,21 @@ +select @@global.innodb_linux_aio; +@@global.innodb_linux_aio +auto +select @@session.innodb_linux_aio; +ERROR HY000: Variable 'innodb_linux_aio' is a GLOBAL variable +show global variables like 'innodb_linux_aio'; +Variable_name Value +innodb_linux_aio auto +show session variables like 'innodb_linux_aio'; +Variable_name Value +innodb_linux_aio auto +select * from information_schema.global_variables where variable_name='innodb_linux_aio'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LINUX_AIO auto +select * from information_schema.session_variables where variable_name='innodb_linux_aio'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LINUX_AIO auto +set global innodb_linux_aio='auto'; +ERROR HY000: Variable 'innodb_linux_aio' is a read only variable +set session innodb_linux_aio='aio'; +ERROR HY000: Variable 'innodb_linux_aio' is a read only variable diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result index 1043684ba2c..2ff85735db9 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result @@ -5,6 +5,7 @@ variable_name not in ( 'innodb_evict_tables_on_commit_debug', # one may want to override this 'innodb_use_native_aio', # default value depends on OS 'innodb_log_file_buffering', # only available on Linux and Windows +'innodb_linux_aio', # existence depends on OS 'innodb_buffer_pool_load_pages_abort') # debug build only, and is only for testing order by variable_name; VARIABLE_NAME INNODB_ADAPTIVE_FLUSHING diff --git a/mysql-test/suite/sys_vars/t/innodb_linux_aio_basic.test b/mysql-test/suite/sys_vars/t/innodb_linux_aio_basic.test new file mode 100644 index 00000000000..e5dcdb081cb --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_linux_aio_basic.test @@ -0,0 +1,23 @@ +--source include/have_innodb.inc +--source include/linux.inc +# enum readonly + +# +# show values; +# +select @@global.innodb_linux_aio; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.innodb_linux_aio; +show global variables like 'innodb_linux_aio'; +show session variables like 'innodb_linux_aio'; +select * from information_schema.global_variables where variable_name='innodb_linux_aio'; +select * from information_schema.session_variables where variable_name='innodb_linux_aio'; + +# +# show that it's read-only +# +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +set global innodb_linux_aio='auto'; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +set session innodb_linux_aio='aio'; + diff --git a/mysql-test/suite/sys_vars/t/innodb_read_io_threads_basic.test b/mysql-test/suite/sys_vars/t/innodb_read_io_threads_basic.test index 0fd7a5eafcb..ed14dddc21c 100644 --- a/mysql-test/suite/sys_vars/t/innodb_read_io_threads_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_read_io_threads_basic.test @@ -34,5 +34,7 @@ set global innodb_read_io_threads=64; select @@innodb_read_io_threads; --error ER_GLOBAL_VARIABLE set session innodb_read_io_threads=1; +--disable_warnings set global innodb_read_io_threads=@n; +--enable_warnings diff --git a/mysql-test/suite/sys_vars/t/sysvars_innodb.test b/mysql-test/suite/sys_vars/t/sysvars_innodb.test index 4c5ad1f676c..250eb8b5c8f 100644 --- a/mysql-test/suite/sys_vars/t/sysvars_innodb.test +++ b/mysql-test/suite/sys_vars/t/sysvars_innodb.test @@ -16,5 +16,6 @@ select VARIABLE_NAME, SESSION_VALUE, DEFAULT_VALUE, VARIABLE_SCOPE, VARIABLE_TYP 'innodb_evict_tables_on_commit_debug', # one may want to override this 'innodb_use_native_aio', # default value depends on OS 'innodb_log_file_buffering', # only available on Linux and Windows + 'innodb_linux_aio', # existence depends on OS 'innodb_buffer_pool_load_pages_abort') # debug build only, and is only for testing order by variable_name; diff --git a/mysql-test/suite/vcol/r/query_cache.result b/mysql-test/suite/vcol/r/query_cache.result new file mode 100644 index 00000000000..4528715a564 --- /dev/null +++ b/mysql-test/suite/vcol/r/query_cache.result @@ -0,0 +1,20 @@ +# +# MDEV-29186 Query cache makes virtual column function RAND() non-random +# +set global query_cache_type=1; +set query_cache_type=1; +set rand_seed1=1234567890, rand_seed2=20; +create table t1 (a int, b float as (rand())); +insert into t1 (a) values (1); +select * from t1; +a b +1 0.449343 +select * from t1; +a b +1 0.797372 +set global query_cache_type=default; +select * from t1; +a b +1 0.638829 +drop table t1; +# End of 10.11 tests diff --git a/mysql-test/suite/vcol/t/query_cache.test b/mysql-test/suite/vcol/t/query_cache.test new file mode 100644 index 00000000000..46bcba7d03f --- /dev/null +++ b/mysql-test/suite/vcol/t/query_cache.test @@ -0,0 +1,18 @@ +--echo # +--echo # MDEV-29186 Query cache makes virtual column function RAND() non-random +--echo # + +set global query_cache_type=1; +set query_cache_type=1; +set rand_seed1=1234567890, rand_seed2=20; +create table t1 (a int, b float as (rand())); +insert into t1 (a) values (1); +--disable_ps2_protocol +select * from t1; +select * from t1; +set global query_cache_type=default; +select * from t1; +--enable_ps2_protocol +drop table t1; + +--echo # End of 10.11 tests diff --git a/mysql-test/suite/versioning/r/delete_history,32bit.rdiff b/mysql-test/suite/versioning/r/delete_history,32bit.rdiff index ded21820c1c..a72f1290184 100644 --- a/mysql-test/suite/versioning/r/delete_history,32bit.rdiff +++ b/mysql-test/suite/versioning/r/delete_history,32bit.rdiff @@ -9,3 +9,232 @@ create or replace procedure p() delete history from t1 before system_time '2039-01-01 23:00'; call p; select * from t1; +@@ -275,65 +275,65 @@ + a row_start row_end + 1 2000-01-01 00:00:00.000000 2000-01-01 00:00:01.000000 + 2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +-3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++3 2000-01-01 00:00:02.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + select *, row_start, row_end from t for system_time as of @ts1; + a row_start row_end +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + 2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 + execute immediate "select *, row_start, row_end from t for system_time as of ?" using @ts1; + a row_start row_end +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + 2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 + prepare stmt from 'select *, row_start, row_end from t for system_time as of ?'; + execute stmt using @ts1; + a row_start row_end +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + 2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 + Execute stmt using @ts1; + a row_start row_end +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + 2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 + select *, row_start, row_end from t for system_time as of @ts2; + a row_start row_end +-3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++3 2000-01-01 00:00:02.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute immediate "select *, row_start, row_end from t for system_time as of ?" using @ts2; + a row_start row_end +-3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++3 2000-01-01 00:00:02.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute stmt using @ts2; + a row_start row_end +-3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++3 2000-01-01 00:00:02.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + Execute stmt using @ts2; + a row_start row_end +-3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++3 2000-01-01 00:00:02.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + drop prepare stmt; + execute immediate "delete history from t before system_time @ts1"; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end + 1 2000-01-01 00:00:00.000000 2000-01-01 00:00:01.000000 + 2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +-3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++3 2000-01-01 00:00:02.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute immediate "delete history from t before system_time @ts2"; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end + 2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +-3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++3 2000-01-01 00:00:02.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute immediate "delete history from t before system_time ?" using @ts3; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end +-3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++3 2000-01-01 00:00:02.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute immediate "delete history from t before system_time ?" using @ts3; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end +-3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++3 2000-01-01 00:00:02.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + update t set a= a + 1 where a < 100; + set @ts4= '2000-01-01 00:00:04'; + set timestamp= unix_timestamp(@ts4); +@@ -342,14 +342,14 @@ + a row_start row_end + 3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 + 4 2000-01-01 00:00:03.000000 2000-01-01 00:00:04.000000 +-5 2000-01-01 00:00:04.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++5 2000-01-01 00:00:04.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute immediate "delete history from t before system_time ?" using '2000-01-01 00:00:04'; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end + 4 2000-01-01 00:00:03.000000 2000-01-01 00:00:04.000000 +-5 2000-01-01 00:00:04.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++5 2000-01-01 00:00:04.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + set @ts5= '2000-01-01 00:00:05'; + set timestamp= unix_timestamp(@ts5); + update t set a= a + 1 where a < 100; +@@ -365,8 +365,8 @@ + 5 2000-01-01 00:00:04.000000 2000-01-01 00:00:05.000000 + 6 2000-01-01 00:00:05.000000 2000-01-01 00:00:06.000000 + 7 2000-01-01 00:00:06.000000 2000-01-01 00:00:07.000000 +-8 2000-01-01 00:00:07.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++8 2000-01-01 00:00:07.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + prepare stmt from 'delete history from t before system_time ?'; + execute stmt using @ts4; + select *, row_start, row_end from t for system_time all order by a; +@@ -375,36 +375,36 @@ + 5 2000-01-01 00:00:04.000000 2000-01-01 00:00:05.000000 + 6 2000-01-01 00:00:05.000000 2000-01-01 00:00:06.000000 + 7 2000-01-01 00:00:06.000000 2000-01-01 00:00:07.000000 +-8 2000-01-01 00:00:07.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++8 2000-01-01 00:00:07.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute stmt using @ts5; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end + 5 2000-01-01 00:00:04.000000 2000-01-01 00:00:05.000000 + 6 2000-01-01 00:00:05.000000 2000-01-01 00:00:06.000000 + 7 2000-01-01 00:00:06.000000 2000-01-01 00:00:07.000000 +-8 2000-01-01 00:00:07.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++8 2000-01-01 00:00:07.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute stmt using @ts6; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end + 6 2000-01-01 00:00:05.000000 2000-01-01 00:00:06.000000 + 7 2000-01-01 00:00:06.000000 2000-01-01 00:00:07.000000 +-8 2000-01-01 00:00:07.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++8 2000-01-01 00:00:07.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute stmt using '2000-01-01 00:00:06'; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end + 6 2000-01-01 00:00:05.000000 2000-01-01 00:00:06.000000 + 7 2000-01-01 00:00:06.000000 2000-01-01 00:00:07.000000 +-8 2000-01-01 00:00:07.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++8 2000-01-01 00:00:07.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute stmt using '2000-01-01 00:00:06.000001'; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end + 7 2000-01-01 00:00:06.000000 2000-01-01 00:00:07.000000 +-8 2000-01-01 00:00:07.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++8 2000-01-01 00:00:07.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + set @ts8= '2000-01-01 00:00:08'; + set timestamp= unix_timestamp(@ts8); + delete from t; +@@ -438,8 +438,8 @@ + 1 2000-01-01 00:00:00.000000 2000-01-01 00:00:01.000000 + 2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 + 3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 +-4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++4 2000-01-01 00:00:03.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute stmt using @ts1, 0; + Warnings: + Warning 1292 Truncated incorrect DOUBLE value: '2000-01-01 00:00:01' +@@ -452,40 +452,40 @@ + 1 2000-01-01 00:00:00.000000 2000-01-01 00:00:01.000000 + 2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 + 3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 +-4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++4 2000-01-01 00:00:03.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute stmt using @ts2, 0; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end + 2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 + 3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 +-4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++4 2000-01-01 00:00:03.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute stmt using @@timestamp, NULL; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end + 2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 + 3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 +-4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++4 2000-01-01 00:00:03.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute stmt using NULL, NULL; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end + 2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 + 3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 +-4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++4 2000-01-01 00:00:03.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute stmt using @ts2, 1; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end + 3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 +-4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++4 2000-01-01 00:00:03.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + execute stmt using @ts2, @ts2; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end +-4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +-100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 ++4 2000-01-01 00:00:03.000000 2038-01-19 03:14:07.999999 ++100 2000-01-01 00:00:00.000000 2038-01-19 03:14:07.999999 + delete from t; + select *, row_start, row_end from t for system_time all order by a; + a row_start row_end diff --git a/mysql-test/suite/versioning/r/delete_history.result b/mysql-test/suite/versioning/r/delete_history.result index 5681902a5f7..61ad71a86d7 100644 --- a/mysql-test/suite/versioning/r/delete_history.result +++ b/mysql-test/suite/versioning/r/delete_history.result @@ -251,3 +251,257 @@ t CREATE TABLE `t` ( PARTITIONS 2 drop table t; # End of 10.9 tests +# +# MDEV-34046 Parameterized PS converts error to warning, causes replication problems +# +create table t (a int) with system versioning; +set timestamp= unix_timestamp('2000-01-01 00:00:00'); +insert into t values (1), (100); +delete history from t before system_time @@timestamp; +ERROR HY000: Illegal parameter data type double for operation 'FOR SYSTEM_TIME' +execute immediate "delete history from t before system_time @@timestamp"; +ERROR HY000: Illegal parameter data type double for operation 'FOR SYSTEM_TIME' +execute immediate "delete history from t before system_time ?" using @@timestamp; +ERROR HY000: Illegal parameter data type double for operation 'FOR SYSTEM_TIME' +set @ts1= '2000-01-01 00:00:01'; +set timestamp= unix_timestamp(@ts1); +update t set a= a + 1 where a < 100; +set @ts2= '2000-01-01 00:00:02'; +set timestamp= unix_timestamp(@ts2); +update t set a= a + 1 where a < 100; +set @ts3= '2000-01-01 00:00:03'; +set timestamp= unix_timestamp(@ts3); +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +1 2000-01-01 00:00:00.000000 2000-01-01 00:00:01.000000 +2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +select *, row_start, row_end from t for system_time as of @ts1; +a row_start row_end +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +execute immediate "select *, row_start, row_end from t for system_time as of ?" using @ts1; +a row_start row_end +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +prepare stmt from 'select *, row_start, row_end from t for system_time as of ?'; +execute stmt using @ts1; +a row_start row_end +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +Execute stmt using @ts1; +a row_start row_end +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +select *, row_start, row_end from t for system_time as of @ts2; +a row_start row_end +3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute immediate "select *, row_start, row_end from t for system_time as of ?" using @ts2; +a row_start row_end +3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute stmt using @ts2; +a row_start row_end +3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +Execute stmt using @ts2; +a row_start row_end +3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +drop prepare stmt; +execute immediate "delete history from t before system_time @ts1"; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +1 2000-01-01 00:00:00.000000 2000-01-01 00:00:01.000000 +2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute immediate "delete history from t before system_time @ts2"; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute immediate "delete history from t before system_time ?" using @ts3; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute immediate "delete history from t before system_time ?" using @ts3; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +3 2000-01-01 00:00:02.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +update t set a= a + 1 where a < 100; +set @ts4= '2000-01-01 00:00:04'; +set timestamp= unix_timestamp(@ts4); +update t set a= a + 1 where a < 100; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 +4 2000-01-01 00:00:03.000000 2000-01-01 00:00:04.000000 +5 2000-01-01 00:00:04.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute immediate "delete history from t before system_time ?" using '2000-01-01 00:00:04'; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +4 2000-01-01 00:00:03.000000 2000-01-01 00:00:04.000000 +5 2000-01-01 00:00:04.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +set @ts5= '2000-01-01 00:00:05'; +set timestamp= unix_timestamp(@ts5); +update t set a= a + 1 where a < 100; +set @ts6= '2000-01-01 00:00:06'; +set timestamp= unix_timestamp(@ts6); +update t set a= a + 1 where a < 100; +set @ts7= '2000-01-01 00:00:07'; +set timestamp= unix_timestamp(@ts7); +update t set a= a + 1 where a < 100; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +4 2000-01-01 00:00:03.000000 2000-01-01 00:00:04.000000 +5 2000-01-01 00:00:04.000000 2000-01-01 00:00:05.000000 +6 2000-01-01 00:00:05.000000 2000-01-01 00:00:06.000000 +7 2000-01-01 00:00:06.000000 2000-01-01 00:00:07.000000 +8 2000-01-01 00:00:07.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +prepare stmt from 'delete history from t before system_time ?'; +execute stmt using @ts4; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +4 2000-01-01 00:00:03.000000 2000-01-01 00:00:04.000000 +5 2000-01-01 00:00:04.000000 2000-01-01 00:00:05.000000 +6 2000-01-01 00:00:05.000000 2000-01-01 00:00:06.000000 +7 2000-01-01 00:00:06.000000 2000-01-01 00:00:07.000000 +8 2000-01-01 00:00:07.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute stmt using @ts5; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +5 2000-01-01 00:00:04.000000 2000-01-01 00:00:05.000000 +6 2000-01-01 00:00:05.000000 2000-01-01 00:00:06.000000 +7 2000-01-01 00:00:06.000000 2000-01-01 00:00:07.000000 +8 2000-01-01 00:00:07.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute stmt using @ts6; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +6 2000-01-01 00:00:05.000000 2000-01-01 00:00:06.000000 +7 2000-01-01 00:00:06.000000 2000-01-01 00:00:07.000000 +8 2000-01-01 00:00:07.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute stmt using '2000-01-01 00:00:06'; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +6 2000-01-01 00:00:05.000000 2000-01-01 00:00:06.000000 +7 2000-01-01 00:00:06.000000 2000-01-01 00:00:07.000000 +8 2000-01-01 00:00:07.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute stmt using '2000-01-01 00:00:06.000001'; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +7 2000-01-01 00:00:06.000000 2000-01-01 00:00:07.000000 +8 2000-01-01 00:00:07.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +set @ts8= '2000-01-01 00:00:08'; +set timestamp= unix_timestamp(@ts8); +delete from t; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +7 2000-01-01 00:00:06.000000 2000-01-01 00:00:07.000000 +8 2000-01-01 00:00:07.000000 2000-01-01 00:00:08.000000 +100 2000-01-01 00:00:00.000000 2000-01-01 00:00:08.000000 +execute immediate "delete history from t before system_time from_unixtime(?)" using @@timestamp; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +8 2000-01-01 00:00:07.000000 2000-01-01 00:00:08.000000 +100 2000-01-01 00:00:00.000000 2000-01-01 00:00:08.000000 +execute stmt using '2020-01-01'; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +drop prepare stmt; +set timestamp= unix_timestamp('2000-01-01 00:00:00'); +insert into t values (1), (100); +set @ts1= '2000-01-01 00:00:01'; +set timestamp= unix_timestamp(@ts1); +update t set a= a + 1 where a < 100; +set timestamp= @@timestamp + 1; +set @ts2= @@timestamp; +update t set a= a + 1 where a < 100; +set timestamp= @@timestamp + 1; +update t set a= a + 1 where a < 100; +prepare stmt from 'delete history from t before system_time from_unixtime(? + ?)'; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +1 2000-01-01 00:00:00.000000 2000-01-01 00:00:01.000000 +2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 +4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute stmt using @ts1, 0; +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: '2000-01-01 00:00:01' +Warning 1292 Truncated incorrect DOUBLE value: '2000-01-01 00:00:01' +Warning 1292 Truncated incorrect DOUBLE value: '2000-01-01 00:00:01' +Warning 1292 Truncated incorrect DOUBLE value: '2000-01-01 00:00:01' +Warning 1292 Truncated incorrect DOUBLE value: '2000-01-01 00:00:01' +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +1 2000-01-01 00:00:00.000000 2000-01-01 00:00:01.000000 +2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 +4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute stmt using @ts2, 0; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 +4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute stmt using @@timestamp, NULL; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 +4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute stmt using NULL, NULL; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +2 2000-01-01 00:00:01.000000 2000-01-01 00:00:02.000000 +3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 +4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute stmt using @ts2, 1; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +3 2000-01-01 00:00:02.000000 2000-01-01 00:00:03.000000 +4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +execute stmt using @ts2, @ts2; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +4 2000-01-01 00:00:03.000000 2106-02-07 06:28:15.999999 +100 2000-01-01 00:00:00.000000 2106-02-07 06:28:15.999999 +delete from t; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +4 2000-01-01 00:00:03.000000 2000-01-01 00:00:03.000000 +100 2000-01-01 00:00:00.000000 2000-01-01 00:00:03.000000 +execute stmt using @ts2, @ts2; +select *, row_start, row_end from t for system_time all order by a; +a row_start row_end +drop prepare stmt; +drop table t; +set timestamp= default; +# +# MDEV-37164 Assertion `vers_conditions.delete_history' failed upon PREPARE +# +create table t (a int) with system versioning; +insert into t values (1),(2); +prepare stmt from 'select * from t for system_time as of timestamp ?'; +drop table t; +# End of 10.11 tests diff --git a/mysql-test/suite/versioning/t/delete_history.test b/mysql-test/suite/versioning/t/delete_history.test index d6b2732824a..cdf4c0432e0 100644 --- a/mysql-test/suite/versioning/t/delete_history.test +++ b/mysql-test/suite/versioning/t/delete_history.test @@ -257,4 +257,121 @@ drop table t; --echo # End of 10.9 tests +--echo # +--echo # MDEV-34046 Parameterized PS converts error to warning, causes replication problems +--echo # +create table t (a int) with system versioning; +set timestamp= unix_timestamp('2000-01-01 00:00:00'); +insert into t values (1), (100); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +delete history from t before system_time @@timestamp; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +execute immediate "delete history from t before system_time @@timestamp"; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +execute immediate "delete history from t before system_time ?" using @@timestamp; + +set @ts1= '2000-01-01 00:00:01'; set timestamp= unix_timestamp(@ts1); +update t set a= a + 1 where a < 100; +set @ts2= '2000-01-01 00:00:02'; set timestamp= unix_timestamp(@ts2); +update t set a= a + 1 where a < 100; +set @ts3= '2000-01-01 00:00:03'; set timestamp= unix_timestamp(@ts3); +select *, row_start, row_end from t for system_time all order by a; +select *, row_start, row_end from t for system_time as of @ts1; +execute immediate "select *, row_start, row_end from t for system_time as of ?" using @ts1; +prepare stmt from 'select *, row_start, row_end from t for system_time as of ?'; +execute stmt using @ts1; +Execute stmt using @ts1; +select *, row_start, row_end from t for system_time as of @ts2; +execute immediate "select *, row_start, row_end from t for system_time as of ?" using @ts2; +execute stmt using @ts2; +Execute stmt using @ts2; +drop prepare stmt; + +execute immediate "delete history from t before system_time @ts1"; +select *, row_start, row_end from t for system_time all order by a; +execute immediate "delete history from t before system_time @ts2"; +select *, row_start, row_end from t for system_time all order by a; +execute immediate "delete history from t before system_time ?" using @ts3; +select *, row_start, row_end from t for system_time all order by a; +execute immediate "delete history from t before system_time ?" using @ts3; +select *, row_start, row_end from t for system_time all order by a; +update t set a= a + 1 where a < 100; +set @ts4= '2000-01-01 00:00:04'; set timestamp= unix_timestamp(@ts4); +update t set a= a + 1 where a < 100; +select *, row_start, row_end from t for system_time all order by a; +execute immediate "delete history from t before system_time ?" using '2000-01-01 00:00:04'; +select *, row_start, row_end from t for system_time all order by a; +set @ts5= '2000-01-01 00:00:05'; set timestamp= unix_timestamp(@ts5); +update t set a= a + 1 where a < 100; +set @ts6= '2000-01-01 00:00:06'; set timestamp= unix_timestamp(@ts6); +update t set a= a + 1 where a < 100; +set @ts7= '2000-01-01 00:00:07'; set timestamp= unix_timestamp(@ts7); +update t set a= a + 1 where a < 100; +select *, row_start, row_end from t for system_time all order by a; +prepare stmt from 'delete history from t before system_time ?'; +execute stmt using @ts4; +select *, row_start, row_end from t for system_time all order by a; +execute stmt using @ts5; +select *, row_start, row_end from t for system_time all order by a; +execute stmt using @ts6; +select *, row_start, row_end from t for system_time all order by a; +execute stmt using '2000-01-01 00:00:06'; +select *, row_start, row_end from t for system_time all order by a; +execute stmt using '2000-01-01 00:00:06.000001'; +select *, row_start, row_end from t for system_time all order by a; +set @ts8= '2000-01-01 00:00:08'; set timestamp= unix_timestamp(@ts8); +delete from t; +select *, row_start, row_end from t for system_time all order by a; +execute immediate "delete history from t before system_time from_unixtime(?)" using @@timestamp; +select *, row_start, row_end from t for system_time all order by a; +execute stmt using '2020-01-01'; +select *, row_start, row_end from t for system_time all order by a; +drop prepare stmt; + +# Check expression +set timestamp= unix_timestamp('2000-01-01 00:00:00'); +insert into t values (1), (100); +set @ts1= '2000-01-01 00:00:01'; set timestamp= unix_timestamp(@ts1); +update t set a= a + 1 where a < 100; +set timestamp= @@timestamp + 1; +set @ts2= @@timestamp; +update t set a= a + 1 where a < 100; +set timestamp= @@timestamp + 1; +update t set a= a + 1 where a < 100; + +prepare stmt from 'delete history from t before system_time from_unixtime(? + ?)'; +select *, row_start, row_end from t for system_time all order by a; +execute stmt using @ts1, 0; +select *, row_start, row_end from t for system_time all order by a; +execute stmt using @ts2, 0; +select *, row_start, row_end from t for system_time all order by a; +execute stmt using @@timestamp, NULL; +select *, row_start, row_end from t for system_time all order by a; +execute stmt using NULL, NULL; +select *, row_start, row_end from t for system_time all order by a; +execute stmt using @ts2, 1; +select *, row_start, row_end from t for system_time all order by a; +execute stmt using @ts2, @ts2; +select *, row_start, row_end from t for system_time all order by a; +delete from t; +select *, row_start, row_end from t for system_time all order by a; +execute stmt using @ts2, @ts2; +select *, row_start, row_end from t for system_time all order by a; + +drop prepare stmt; + +drop table t; +set timestamp= default; + +--echo # +--echo # MDEV-37164 Assertion `vers_conditions.delete_history' failed upon PREPARE +--echo # +create table t (a int) with system versioning; +insert into t values (1),(2); +prepare stmt from 'select * from t for system_time as of timestamp ?'; +drop table t; + +--echo # End of 10.11 tests + --source suite/versioning/common_finish.inc diff --git a/mysql-test/suite/wsrep/r/MDEV-20625.result b/mysql-test/suite/wsrep/r/MDEV-20625.result index 3e2b621c8f9..d5e9df07374 100644 --- a/mysql-test/suite/wsrep/r/MDEV-20625.result +++ b/mysql-test/suite/wsrep/r/MDEV-20625.result @@ -1,4 +1,5 @@ SET GLOBAL wsrep_on=ON; +ERROR HY000: Galera replication not supported SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size'; Variable_name Value wsrep_cluster_size 0 diff --git a/mysql-test/suite/wsrep/r/variables_debug.result b/mysql-test/suite/wsrep/r/variables_debug.result index 2eedc248f96..9390ddf64a2 100644 --- a/mysql-test/suite/wsrep/r/variables_debug.result +++ b/mysql-test/suite/wsrep/r/variables_debug.result @@ -138,6 +138,7 @@ WSREP_SST_METHOD WSREP_SST_RECEIVE_ADDRESS WSREP_START_POSITION WSREP_STATUS_FILE +WSREP_STRICT_DDL WSREP_SYNC_WAIT WSREP_TRX_FRAGMENT_SIZE WSREP_TRX_FRAGMENT_UNIT diff --git a/mysql-test/suite/wsrep/r/wsrep_off.result b/mysql-test/suite/wsrep/r/wsrep_off.result new file mode 100644 index 00000000000..95cd804e35c --- /dev/null +++ b/mysql-test/suite/wsrep/r/wsrep_off.result @@ -0,0 +1,6 @@ +SET GLOBAL wsrep_on=ON; +ERROR HY000: Galera replication not supported +REPAIR TABLE performance_schema.setup_objects; +Table Op Msg_type Msg_text +performance_schema.setup_objects repair note The storage engine for the table doesn't support repair +SET GLOBAL wsrep_on=OFF; diff --git a/mysql-test/suite/wsrep/t/MDEV-20625.test b/mysql-test/suite/wsrep/t/MDEV-20625.test index 2a537fe432e..7dcb622fde0 100644 --- a/mysql-test/suite/wsrep/t/MDEV-20625.test +++ b/mysql-test/suite/wsrep/t/MDEV-20625.test @@ -5,6 +5,7 @@ --source include/have_wsrep_provider.inc --source include/have_binlog_format_row.inc +--error ER_GALERA_REPLICATION_NOT_SUPPORTED SET GLOBAL wsrep_on=ON; SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size'; SET GLOBAL wsrep_on=OFF; diff --git a/mysql-test/suite/wsrep/t/variables.test b/mysql-test/suite/wsrep/t/variables.test index 762d783a09c..c82d0ae02c2 100644 --- a/mysql-test/suite/wsrep/t/variables.test +++ b/mysql-test/suite/wsrep/t/variables.test @@ -3,7 +3,7 @@ --source include/have_innodb.inc --source include/galera_no_debug_sync.inc ---let $galera_version=26.4.21 +--let $galera_version=26.4.23 source include/check_galera_version.inc; source include/galera_variables_ok.inc; diff --git a/mysql-test/suite/wsrep/t/variables_debug.test b/mysql-test/suite/wsrep/t/variables_debug.test index e55dbd4fa1f..e50cee28a15 100644 --- a/mysql-test/suite/wsrep/t/variables_debug.test +++ b/mysql-test/suite/wsrep/t/variables_debug.test @@ -5,7 +5,7 @@ --source include/have_debug_sync.inc --source include/galera_have_debug_sync.inc ---let $galera_version=26.4.21 +--let $galera_version=26.4.23 source include/check_galera_version.inc; source include/galera_variables_ok_debug.inc; diff --git a/mysql-test/suite/wsrep/t/wsrep_off.cnf b/mysql-test/suite/wsrep/t/wsrep_off.cnf new file mode 100644 index 00000000000..77eae0c4acd --- /dev/null +++ b/mysql-test/suite/wsrep/t/wsrep_off.cnf @@ -0,0 +1,17 @@ +# Use default setting for mysqld processes +!include include/default_mysqld.cnf + +[mysqld] +wsrep-on=OFF +wsrep-provider=@ENV.WSREP_PROVIDER +log-bin +binlog-format=row +loose-wsrep_cluster_address=gcomm:// +loose-wsrep_node_address='127.0.0.1:@mysqld.1.#galera_port' +loose-wsrep-incoming-address=127.0.0.1:@mysqld.1.port + +[mysqld.1] +wsrep-on=OFF +#galera_port=@OPT.port +#ist_port=@OPT.port +#sst_port=@OPT.port diff --git a/mysql-test/suite/wsrep/t/wsrep_off.test b/mysql-test/suite/wsrep/t/wsrep_off.test new file mode 100644 index 00000000000..27e64c92e93 --- /dev/null +++ b/mysql-test/suite/wsrep/t/wsrep_off.test @@ -0,0 +1,8 @@ +--source include/have_innodb.inc +--source include/have_wsrep_provider.inc +--source include/have_binlog_format_row.inc + +--error ER_GALERA_REPLICATION_NOT_SUPPORTED +SET GLOBAL wsrep_on=ON; +REPAIR TABLE performance_schema.setup_objects; +SET GLOBAL wsrep_on=OFF; diff --git a/mysys/CMakeLists.txt b/mysys/CMakeLists.txt index 8f96e000e1c..803ee6b77e8 100644 --- a/mysys/CMakeLists.txt +++ b/mysys/CMakeLists.txt @@ -90,10 +90,16 @@ ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64|i386|i686") ENDIF() ENDIF() ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64") - IF(CMAKE_C_COMPILER_ID STREQUAL "GNU") - include(CheckCXXSourceCompiles) + IF(CMAKE_C_COMPILER_ID MATCHES "Clang|GNU") + include(CheckCSourceCompiles) - CHECK_CXX_SOURCE_COMPILES(" + CHECK_C_COMPILER_FLAG(-march=armv8-a+crc+crypto HAVE_ARMV8_CRC_CRYPTO_MARCH) + IF(HAVE_ARMV8_CRC_CRYPTO_MARCH) + SET(SAVE_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") + SET(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -march=armv8-a+crc+crypto") + ENDIF() + + CHECK_C_SOURCE_COMPILES(" #define CRC32CX(crc, value) __asm__(\"crc32cx %w[c], %w[c], %x[v]\":[c]\"+r\"(crc):[v]\"r\"(value)) asm(\".arch_extension crc\"); unsigned int foo(unsigned int ret) { @@ -110,7 +116,7 @@ ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64") #endif }" HAVE_ARMV8_CRC) - CHECK_CXX_SOURCE_COMPILES(" + CHECK_C_SOURCE_COMPILES(" asm(\".arch_extension crypto\"); unsigned int foo(unsigned int ret) { __asm__(\"pmull v2.1q, v2.1d, v1.1d\"); @@ -126,10 +132,8 @@ ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64") #endif }" HAVE_ARMV8_CRYPTO) - CHECK_C_COMPILER_FLAG(-march=armv8-a+crc+crypto HAVE_ARMV8_CRC_CRYPTO_MARCH) - IF(HAVE_ARMV8_CRC_CRYPTO_MARCH) - CHECK_INCLUDE_FILE(arm_acle.h HAVE_ARM_ACLE_H -march=armv8-a+crc+crypto) + CHECK_INCLUDE_FILE(arm_acle.h HAVE_ARM_ACLE_H) IF(HAVE_ARM_ACLE_H) ADD_DEFINITIONS(-DHAVE_ARMV8_CRC_CRYPTO_INTRINSICS) ENDIF() @@ -142,6 +146,8 @@ ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64") SET(MYSYS_SOURCES ${MYSYS_SOURCES} crc32/crc32_arm64.c) SET_SOURCE_FILES_PROPERTIES(crc32/crc32_arm64.c PROPERTIES COMPILE_FLAGS "-march=armv8-a+crc+crypto") + + SET(CMAKE_REQUIRED_FLAGS "${SAVE_CMAKE_REQUIRED_FLAGS}") ENDIF() ENDIF() ENDIF() diff --git a/mysys/charset.c b/mysys/charset.c index 6f12820bdbf..cbcce8c3d0b 100644 --- a/mysys/charset.c +++ b/mysys/charset.c @@ -552,7 +552,7 @@ my_read_charset_file(MY_CHARSET_LOADER *loader, if (my_parse_charset_xml(loader, (char *) buf, len)) { - my_printf_error(EE_UNKNOWN_CHARSET, "Error while parsing '%s': %s\n", + my_printf_error(EE_UNKNOWN_CHARSET, "Error while parsing '%s': %s", MYF(0), filename, loader->error); goto error; } diff --git a/mysys/crc32/crc32_arm64.c b/mysys/crc32/crc32_arm64.c index 7c25d383173..6be2750ae08 100644 --- a/mysys/crc32/crc32_arm64.c +++ b/mysys/crc32/crc32_arm64.c @@ -59,7 +59,7 @@ my_crc32_t crc32c_aarch64_available(void) # else /* __APPLE__ */ # include -# ifdef __FreeBSD__ +# if defined(__FreeBSD__) || defined(__OpenBSD__) static unsigned long getauxval(unsigned int key) { unsigned long val; diff --git a/mysys/crc32/crc32c.cc b/mysys/crc32/crc32c.cc index 0d65d0027d4..30788130d59 100644 --- a/mysys/crc32/crc32c.cc +++ b/mysys/crc32/crc32c.cc @@ -455,10 +455,12 @@ static int arch_ppc_probe(void) { return arch_ppc_crc32; } -# elif defined __FreeBSD__ -# include +# elif defined(__FreeBSD__) || defined(__OpenBSD__) # include -# include +# ifdef __FreeBSD__ +# include +# include +# endif static int arch_ppc_probe(void) { unsigned long cpufeatures; arch_ppc_crc32 = 0; @@ -470,12 +472,12 @@ static int arch_ppc_probe(void) { return arch_ppc_crc32; } -# elif defined(_AIX) || defined(__OpenBSD__) +# elif defined(_AIX) static int arch_ppc_probe(void) { arch_ppc_crc32 = 0; # if defined(__powerpc64__) - // AIX 7.1+/OpenBSD has vector crypto features on all POWER 8+ + // AIX 7.1+ has vector crypto features on all POWER 8+ arch_ppc_crc32 = 1; # endif /* __powerpc64__ */ diff --git a/mysys/crc32/crc32c_x86.cc b/mysys/crc32/crc32c_x86.cc index fb5dc19f7a5..a66093e54cc 100644 --- a/mysys/crc32/crc32c_x86.cc +++ b/mysys/crc32/crc32c_x86.cc @@ -25,6 +25,9 @@ #else # include # ifdef __APPLE__ /* AVX512 states are not enabled in XCR0 */ +# elif __GNUC__ >= 15 +# define TARGET "pclmul,avx10.1,vpclmulqdq" +# define USE_VPCLMULQDQ __attribute__((target(TARGET))) # elif __GNUC__ >= 14 || (defined __clang_major__ && __clang_major__ >= 18) # define TARGET "pclmul,evex512,avx512f,avx512dq,avx512bw,avx512vl,vpclmulqdq" # define USE_VPCLMULQDQ __attribute__((target(TARGET))) diff --git a/mysys/mf_qsort.c b/mysys/mf_qsort.c index 4dee20750c0..fbd75451d9f 100644 --- a/mysys/mf_qsort.c +++ b/mysys/mf_qsort.c @@ -38,7 +38,7 @@ do { \ if (swap_ptrs) \ { \ reg1 char **a = (char**) (A), **b = (char**) (B); \ - char *tmp = *a; *a++ = *b; *b++ = tmp; \ + char *tmp = *a; *a = *b; *b = tmp; \ } \ else \ { \ @@ -190,16 +190,16 @@ qsort_t my_qsort(void *base_ptr, size_t count, size_t size, qsort_cmp cmp) This ensures that the stack is keept small. */ - if ((int) (high_ptr - low) <= 0) + if ((longlong) (high_ptr - low) <= 0) { - if ((int) (high - low_ptr) <= 0) + if ((longlong) (high - low_ptr) <= 0) { POP(low, high); /* Nothing more to sort */ } else low = low_ptr; /* Ignore small left part. */ } - else if ((int) (high - low_ptr) <= 0) + else if ((longlong) (high - low_ptr) <= 0) high = high_ptr; /* Ignore small right part. */ else if ((high_ptr - low) > (high - low_ptr)) { diff --git a/mysys/my_lib.c b/mysys/my_lib.c index f905e757869..7365b9fa1ba 100644 --- a/mysys/my_lib.c +++ b/mysys/my_lib.c @@ -334,13 +334,6 @@ int my_fstat(File Filedes, MY_STAT *stat_area, DBUG_PRINT("my",("fd: %d MyFlags: %lu", Filedes, MyFlags)); #ifdef _WIN32 DBUG_RETURN(my_win_fstat(Filedes, stat_area)); -#elif defined HAVE_valgrind - { - int s= fstat(Filedes, stat_area); - if (!s) - MSAN_STAT_WORKAROUND(stat_area); - DBUG_RETURN(s); - } #else DBUG_RETURN(fstat(Filedes, (struct stat *) stat_area)); #endif @@ -361,7 +354,6 @@ MY_STAT *my_stat(const char *path, MY_STAT *stat_area, myf my_flags) #ifndef _WIN32 if (!stat((char *) path, (struct stat *) stat_area)) { - MSAN_STAT_WORKAROUND(stat_area); DBUG_RETURN(stat_area); } #else diff --git a/mysys/my_symlink.c b/mysys/my_symlink.c index 8db915770a8..94081c542db 100644 --- a/mysys/my_symlink.c +++ b/mysys/my_symlink.c @@ -115,7 +115,6 @@ int my_is_symlink(const char *filename __attribute__((unused))) struct stat stat_buff; if (lstat(filename, &stat_buff)) return 0; - MSAN_STAT_WORKAROUND(&stat_buff); return !!S_ISLNK(stat_buff.st_mode); #elif defined (_WIN32) DWORD dwAttr = GetFileAttributes(filename); diff --git a/plugin/auth_pam/testing/CMakeLists.txt b/plugin/auth_pam/testing/CMakeLists.txt index 151823b9419..9217e23ce02 100644 --- a/plugin/auth_pam/testing/CMakeLists.txt +++ b/plugin/auth_pam/testing/CMakeLists.txt @@ -10,6 +10,8 @@ IF(CMAKE_C_COMPILER_ID MATCHES "Clang") PROPERTY COMPILE_FLAGS "-Wno-incompatible-pointer-types-discards-qualifiers") ENDIF() -SET(dest DESTINATION "${INSTALL_MYSQLTESTDIR}/suite/plugins/pam" COMPONENT Test) -INSTALL(TARGETS pam_mariadb_mtr ${dest}) -INSTALL(FILES mariadb_mtr.conf RENAME mariadb_mtr ${dest}) +IF (NOT DEB) # avoid arch-dependent-file-in-usr-share error + SET(dest DESTINATION "${INSTALL_MYSQLTESTDIR}/suite/plugins/pam" COMPONENT Test) + INSTALL(TARGETS pam_mariadb_mtr ${dest}) + INSTALL(FILES mariadb_mtr.conf RENAME mariadb_mtr ${dest}) +ENDIF() diff --git a/plugin/aws_key_management/CMakeLists.txt b/plugin/aws_key_management/CMakeLists.txt index 3c6ca018273..80e1fef0d3e 100644 --- a/plugin/aws_key_management/CMakeLists.txt +++ b/plugin/aws_key_management/CMakeLists.txt @@ -1,7 +1,10 @@ INCLUDE(aws_sdk) -CHECK_AWS_SDK(HAVE_AWS_SDK REASON) +CHECK_AWS_SDK("kms;core" HAVE_AWS_SDK REASON) IF(NOT HAVE_AWS_SDK) MESSAGE_ONCE(AWS_KEY_MANAGEMENT_NO_AWS_SDK "Can't build aws_key_management - AWS SDK not available (${REASON})") + IF(PLUGIN_AWS_KEY_MANAGEMENT MATCHES "^(STATIC|DYNAMIC)$") + MESSAGE(FATAL_ERROR "Can't build reqired plugin aws_key_management: ${REASON}") + ENDIF() ADD_FEATURE_INFO(AWS_KEY_MANAGEMENT "OFF" "AWS Encryption Key Management Plugin") RETURN() ENDIF() @@ -11,7 +14,7 @@ MYSQL_ADD_PLUGIN(aws_key_management COMPONENT aws-key-management) IF(TARGET aws_key_management) - USE_AWS_SDK_LIBS(aws_key_management kms) + USE_AWS_SDK_LIBS(aws_key_management kms core) + ADD_FEATURE_INFO(AWS_KEY_MANAGEMENT "ON" "AWS Encryption Key Management Plugin") ENDIF() -ADD_FEATURE_INFO(AWS_KEY_MANAGEMENT "ON" "AWS Encryption Key Management Plugin") diff --git a/plugin/aws_key_management/aws_key_management_plugin.cc b/plugin/aws_key_management/aws_key_management_plugin.cc index 3a6a803862f..5bb1dcd0c24 100644 --- a/plugin/aws_key_management/aws_key_management_plugin.cc +++ b/plugin/aws_key_management/aws_key_management_plugin.cc @@ -551,7 +551,7 @@ static int generate_and_save_datakey(uint keyid, uint version) return(-1); } unsigned int len= (unsigned int)byteBuffer.GetLength(); - if (write(fd, byteBuffer.GetUnderlyingData(), len) != len) + if ((unsigned int)write(fd, byteBuffer.GetUnderlyingData(), len) != len) { my_printf_error(ER_UNKNOWN_ERROR, "AWS KMS plugin: can't write to %s", ME_ERROR_LOG_ONLY, filename); close(fd); diff --git a/plugin/disks/information_schema_disks.cc b/plugin/disks/information_schema_disks.cc index 01df32590eb..d42d30395a8 100644 --- a/plugin/disks/information_schema_disks.cc +++ b/plugin/disks/information_schema_disks.cc @@ -32,6 +32,9 @@ #include #endif #endif +#ifndef PATH_MAX +#define PATH_MAX 4096 +#endif #include #include #include /* check_global_access() */ diff --git a/plugin/type_inet/mysql-test/type_inet/type_inet4.result b/plugin/type_inet/mysql-test/type_inet/type_inet4.result index 6fc949e36fa..a18f5685895 100644 --- a/plugin/type_inet/mysql-test/type_inet/type_inet4.result +++ b/plugin/type_inet/mysql-test/type_inet/type_inet4.result @@ -493,7 +493,7 @@ FROM t1; SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `COALESCE(a, '0.0.0.0')` inet4 DEFAULT NULL, + `COALESCE(a, '0.0.0.0')` inet4 NOT NULL, `LEAST(a,'0.0.0.0')` inet4 DEFAULT NULL, `GREATEST(a,'0.0.0.0')` inet4 DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci @@ -527,7 +527,7 @@ FROM t1; SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `COALESCE(a, 0x00000000)` inet4 DEFAULT NULL, + `COALESCE(a, 0x00000000)` inet4 NOT NULL, `LEAST(a,0x00000000)` inet4 DEFAULT NULL, `GREATEST(a,0x00000000)` inet4 DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci @@ -1940,7 +1940,7 @@ SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( `COALESCE(a,a)` inet4 NOT NULL, - `COALESCE(a,b)` inet4 DEFAULT NULL + `COALESCE(a,b)` inet4 NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t2; CREATE TABLE t2 AS SELECT a AS ca,a AS cb FROM t1 UNION SELECT a,b FROM t1; diff --git a/plugin/type_inet/mysql-test/type_inet/type_inet6.result b/plugin/type_inet/mysql-test/type_inet/type_inet6.result index 3c769222f79..a739a8af0e7 100644 --- a/plugin/type_inet/mysql-test/type_inet/type_inet6.result +++ b/plugin/type_inet/mysql-test/type_inet/type_inet6.result @@ -487,7 +487,7 @@ FROM t1; SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `COALESCE(a, '::')` inet6 DEFAULT NULL, + `COALESCE(a, '::')` inet6 NOT NULL, `LEAST(a,'::')` inet6 DEFAULT NULL, `GREATEST(a,'::')` inet6 DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci @@ -521,7 +521,7 @@ FROM t1; SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `COALESCE(a, 0x00000000000000000000000000000000)` inet6 DEFAULT NULL, + `COALESCE(a, 0x00000000000000000000000000000000)` inet6 NOT NULL, `LEAST(a,0x00000000000000000000000000000000)` inet6 DEFAULT NULL, `GREATEST(a,0x00000000000000000000000000000000)` inet6 DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci @@ -2125,7 +2125,7 @@ SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( `COALESCE(a,a)` inet6 NOT NULL, - `COALESCE(a,b)` inet6 DEFAULT NULL + `COALESCE(a,b)` inet6 NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci DROP TABLE t2; CREATE TABLE t2 AS SELECT a AS ca,a AS cb FROM t1 UNION SELECT a,b FROM t1; diff --git a/plugin/type_test/mysql-test/type_test/type_test_double.result b/plugin/type_test/mysql-test/type_test/type_test_double.result index 7ccec9f0d21..91520eaad9a 100644 --- a/plugin/type_test/mysql-test/type_test/type_test_double.result +++ b/plugin/type_test/mysql-test/type_test/type_test_double.result @@ -318,9 +318,9 @@ FROM t1; SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `c1` test_double DEFAULT NULL, - `c2` test_double DEFAULT NULL, - `c3` test_double DEFAULT NULL + `c1` test_double NOT NULL, + `c2` test_double NOT NULL, + `c3` test_double NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci SELECT * FROM t2 ORDER BY c1; c1 c2 c3 diff --git a/plugin/type_test/mysql-test/type_test/type_test_int8.result b/plugin/type_test/mysql-test/type_test/type_test_int8.result index ba894a07af6..f55211446ec 100644 --- a/plugin/type_test/mysql-test/type_test/type_test_int8.result +++ b/plugin/type_test/mysql-test/type_test/type_test_int8.result @@ -297,9 +297,9 @@ FROM t1; SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `c1` test_int8(20) DEFAULT NULL, - `c2` decimal(20,1) DEFAULT NULL, - `c3` double DEFAULT NULL + `c1` test_int8(20) NOT NULL, + `c2` decimal(20,1) NOT NULL, + `c3` double NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci SELECT * FROM t2 ORDER BY c1; c1 c2 c3 diff --git a/plugin/type_uuid/mysql-test/type_uuid/type_uuid.result b/plugin/type_uuid/mysql-test/type_uuid/type_uuid.result index 17cfc8196c0..da2cd2e12e8 100644 --- a/plugin/type_uuid/mysql-test/type_uuid/type_uuid.result +++ b/plugin/type_uuid/mysql-test/type_uuid/type_uuid.result @@ -1579,7 +1579,7 @@ FROM t1; SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `COALESCE(a, '00000000-0000-0000-0000-000000000000')` uuid DEFAULT NULL, + `COALESCE(a, '00000000-0000-0000-0000-000000000000')` uuid NOT NULL, `LEAST(a,'00000000-0000-0000-0000-000000000000')` uuid DEFAULT NULL, `GREATEST(a,'00000000-0000-0000-0000-000000000000')` uuid DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci @@ -1615,7 +1615,7 @@ FROM t1; SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `COALESCE(a, 0x00000000000000000000000000000000)` uuid DEFAULT NULL, + `COALESCE(a, 0x00000000000000000000000000000000)` uuid NOT NULL, `LEAST(a,0x00000000000000000000000000000000)` uuid DEFAULT NULL, `GREATEST(a,0x00000000000000000000000000000000)` uuid DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh index 09aa87f6bc3..2f4cb82a49a 100644 --- a/scripts/mysql_install_db.sh +++ b/scripts/mysql_install_db.sh @@ -673,7 +673,7 @@ then echo echo "Two all-privilege accounts were created." echo "One is root@localhost, it has no password, but you need to" - echo "be system 'root' user to connect. Use, for example, sudo mysql" + echo "be system 'root' user to connect. Use, for example, sudo mariadb" echo "The second is $auth_root_socket_user@localhost, it has no password either, but" echo "you need to be the system '$auth_root_socket_user' user to connect." echo "After connecting you can set the password, if you would need to be" diff --git a/sql-common/client.c b/sql-common/client.c index 2d5c6742541..599429b45c6 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -1993,6 +1993,29 @@ error: return res; } +/** + Checks if self-signed certificate error should be ignored. +*/ +static my_bool is_local_connection(const char *hostname, enum enum_vio_type viotype) +{ + const char *local_host_names[]= { +#ifdef _WIN32 + "localhost", +#endif + "127.0.0.1", "::1"}; + size_t i; + + if (viotype != VIO_TYPE_TCPIP || !hostname) + return TRUE; + + for (i= 0; i < array_elements(local_host_names); i++) + { + if (strcmp(hostname, local_host_names[i]) == 0) + return TRUE; + } + return FALSE; +} + #define MAX_CONNECTION_ATTR_STORAGE_LENGTH 65536 /** @@ -2121,6 +2144,7 @@ static int send_client_reply_packet(MCPVIO_EXT *mpvio, enum enum_ssl_init_error ssl_init_error; const char *cert_error; unsigned long ssl_error; + my_bool is_local; /* Send mysql->client_flag, max_packet_size - unencrypted otherwise @@ -2168,10 +2192,11 @@ static int send_client_reply_packet(MCPVIO_EXT *mpvio, } DBUG_PRINT("info", ("IO layer change done!")); + is_local= is_local_connection(mysql->host, vio_type); /* Verify server cert */ if ((!mysql->options.extension || !mysql->options.extension->tls_allow_invalid_server_cert) && - ssl_verify_server_cert(mysql, &cert_error, vio_type == VIO_TYPE_SOCKET)) + ssl_verify_server_cert(mysql, &cert_error, is_local)) { set_mysql_extended_error(mysql, CR_SSL_CONNECTION_ERROR, unknown_sqlstate, ER(CR_SSL_CONNECTION_ERROR), cert_error); @@ -2180,14 +2205,13 @@ static int send_client_reply_packet(MCPVIO_EXT *mpvio, if (mysql->tls_self_signed_error) { /* - If the transport is secure (see opt_require_secure_transport) we - allow a self-signed cert as we know it came from the server. + If connection is local, we allow self-signed cert. If no password or plugin uses insecure protocol - refuse the cert. Otherwise one last cert check after auth. */ - if (vio_type == VIO_TYPE_SOCKET) + if (is_local) mysql->tls_self_signed_error= 0; else if (!mysql->passwd || !mysql->passwd[0] || !mpvio->plugin->hash_password_bin) diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 8738c7b70d1..187c47f7d6e 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -61,7 +61,6 @@ ${PCRE_INCLUDE_DIRS} ${ZLIB_INCLUDE_DIRS} ${SSL_INCLUDE_DIRS} ${CMAKE_BINARY_DIR}/sql -${CMAKE_SOURCE_DIR}/tpool ) ADD_CUSTOM_COMMAND( diff --git a/sql/datadict.cc b/sql/datadict.cc index c8e45154629..8c1c4ddc266 100644 --- a/sql/datadict.cc +++ b/sql/datadict.cc @@ -141,8 +141,6 @@ Table_type dd_frm_type(THD *thd, char *path, LEX_CSTRING *engine_name, if (mysql_file_fstat(file, &state, MYF(MY_WME))) goto err; - MSAN_STAT_WORKAROUND(&state); - if (mysql_file_seek(file, 0, SEEK_SET, MYF(MY_WME))) goto err; diff --git a/sql/discover.cc b/sql/discover.cc index 201169357a2..4295bd69562 100644 --- a/sql/discover.cc +++ b/sql/discover.cc @@ -72,7 +72,7 @@ int readfrm(const char *name, const uchar **frmdata, size_t *len) error= 2; if (mysql_file_fstat(file, &state, MYF(0))) goto err; - MSAN_STAT_WORKAROUND(&state); + read_len= (size_t)MY_MIN(FRM_MAX_SIZE, state.st_size); // safety // Read whole frm file diff --git a/sql/field.cc b/sql/field.cc index e17fc7d2a20..a776f5dffb6 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -8531,6 +8531,59 @@ Field *Field_varstring::make_new_field(MEM_ROOT *root, TABLE *new_table, } +Field *Field_varstring_compressed::make_new_field(MEM_ROOT *root, TABLE *new_table, + bool keep_type) +{ + Field_varstring *res; + if (new_table->s->is_optimizer_tmp_table()) + { + /* + Compressed field cannot be part of a key. For optimizer temporary + table we create uncompressed substitute. + */ + res= new (root) Field_varstring(ptr, field_length, length_bytes, null_ptr, + null_bit, Field::NONE, &field_name, + new_table->s, charset()); + if (res) + { + res->init_for_make_new_field(new_table, orig_table); + /* See Column_definition::create_length_to_internal_length_string() */ + res->field_length--; + } + } + else + res= (Field_varstring*) Field::make_new_field(root, new_table, keep_type); + if (res) + res->length_bytes= length_bytes; + return res; +} + +Field *Field_blob_compressed::make_new_field(MEM_ROOT *root, TABLE *new_table, + bool keep_type) +{ + Field_blob *res; + if (new_table->s->is_optimizer_tmp_table()) + { + /* + Compressed field cannot be part of a key. For optimizer temporary + table we create uncompressed substitute. + */ + res= new (root) Field_blob(ptr, null_ptr, null_bit, Field::NONE, &field_name, + new_table->s, packlength, charset()); + if (res) + { + res->init_for_make_new_field(new_table, orig_table); + /* See Column_definition::create_length_to_internal_length_string() */ + res->field_length--; + } + } + else + res= (Field_blob *) Field::make_new_field(root, new_table, keep_type); + return res; +} + + + Field *Field_varstring::new_key_field(MEM_ROOT *root, TABLE *new_table, uchar *new_ptr, uint32 length, uchar *new_null_ptr, uint new_null_bit) diff --git a/sql/field.h b/sql/field.h index bf99eb2820a..41c4d290ea5 100644 --- a/sql/field.h +++ b/sql/field.h @@ -655,6 +655,7 @@ public: bool fix_session_expr(THD *thd); bool cleanup_session_expr(); bool fix_and_check_expr(THD *thd, TABLE *table); + bool check_access(THD *thd); inline bool is_equal(const Virtual_column_info* vcol) const; /* Same as is_equal() but for comparing with different table */ bool is_equivalent(THD *thd, TABLE_SHARE *share, TABLE_SHARE *vcol_share, @@ -1588,7 +1589,14 @@ public: { ptr=ADD_TO_PTR(ptr,ptr_diff, uchar*); if (null_ptr) + { null_ptr=ADD_TO_PTR(null_ptr,ptr_diff,uchar*); + if (table) + { + DBUG_ASSERT(null_ptr < ptr); + DBUG_ASSERT(ptr - null_ptr <= (int)table->s->rec_buff_length); + } + } } /* @@ -4381,6 +4389,7 @@ private: { DBUG_ASSERT(0); return 0; } using Field_varstring::key_cmp; Binlog_type_info binlog_type_info() const override; + Field *make_new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type) override; }; @@ -4822,6 +4831,7 @@ private: override { DBUG_ASSERT(0); return 0; } Binlog_type_info binlog_type_info() const override; + Field *make_new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type) override; }; diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 44e1c8ae0cc..2f56cee02fc 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -429,7 +429,6 @@ void ha_partition::init_handler_variables() m_top_entry= NO_CURRENT_PART_ID; m_rec_length= 0; m_last_part= 0; - m_rec0= 0; m_err_rec= NULL; m_curr_key_info[0]= NULL; m_curr_key_info[1]= NULL; @@ -2225,7 +2224,7 @@ int ha_partition::copy_partitions(ulonglong * const copied, goto init_error; while (TRUE) { - if ((result= file->ha_rnd_next(m_rec0))) + if ((result= file->ha_rnd_next(table->record[0]))) { if (result != HA_ERR_END_OF_FILE) goto error; @@ -2251,7 +2250,7 @@ int ha_partition::copy_partitions(ulonglong * const copied, /* Copy record to new handler */ (*copied)++; DBUG_ASSERT(!m_new_file[new_part]->row_logging); - result= m_new_file[new_part]->ha_write_row(m_rec0); + result= m_new_file[new_part]->ha_write_row(table->record[0]); if (result) goto error; } @@ -3837,7 +3836,6 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) DBUG_RETURN(HA_ERR_INITIALIZATION); } m_start_key.length= 0; - m_rec0= table->record[0]; m_rec_length= table_share->reclength; if (!m_part_ids_sorted_by_num_of_records) { @@ -4750,15 +4748,15 @@ int ha_partition::update_row(const uchar *old_data, const uchar *new_data) */ { Abort_on_warning_instant_set old_abort_on_warning(thd, 0); - error= get_part_for_buf(old_data, m_rec0, m_part_info, &old_part_id); + error= get_part_for_buf(old_data, table->record[0], m_part_info, &old_part_id); } DBUG_ASSERT(!error); DBUG_ASSERT(old_part_id == m_last_part); DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), old_part_id)); #endif - if (unlikely((error= get_part_for_buf(new_data, m_rec0, m_part_info, - &new_part_id)))) + if (unlikely((error= get_part_for_buf(new_data, table->record[0], + m_part_info, &new_part_id)))) goto exit; if (unlikely(!bitmap_is_set(&(m_part_info->lock_partitions), new_part_id))) { @@ -5586,7 +5584,7 @@ int ha_partition::rnd_pos_by_record(uchar *record) { DBUG_ENTER("ha_partition::rnd_pos_by_record"); - if (unlikely(get_part_for_buf(record, m_rec0, m_part_info, &m_last_part))) + if (unlikely(get_part_for_buf(record, table->record[0], m_part_info, &m_last_part))) DBUG_RETURN(1); int err= m_file[m_last_part]->rnd_pos_by_record(record); @@ -6370,7 +6368,7 @@ int ha_partition::read_range_first(const key_range *start_key, m_start_key.key= NULL; m_index_scan_type= partition_read_range; - error= common_index_read(m_rec0, MY_TEST(start_key)); + error= common_index_read(table->record[0], MY_TEST(start_key)); DBUG_RETURN(error); } @@ -10427,7 +10425,7 @@ void ha_partition::print_error(int error, myf errflag) str.append('('); str.append_ulonglong(m_last_part); str.append(STRING_WITH_LEN(" != ")); - if (get_part_for_buf(m_err_rec, m_rec0, m_part_info, &part_id)) + if (get_part_for_buf(m_err_rec, table->record[0], m_part_info, &part_id)) str.append('?'); else str.append_ulonglong(part_id); @@ -11405,7 +11403,7 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair) while (true) { - if ((result= m_file[read_part_id]->ha_rnd_next(m_rec0))) + if ((result= m_file[read_part_id]->ha_rnd_next(table->record[0]))) { if (result != HA_ERR_END_OF_FILE) break; @@ -11451,7 +11449,7 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair) Insert row into correct partition. Notice that there are no commit for every N row, so the repair will be one large transaction! */ - if ((result= m_file[correct_part_id]->ha_write_row(m_rec0))) + if ((result= m_file[correct_part_id]->ha_write_row(table->record[0]))) { /* We have failed to insert a row, it might have been a duplicate! @@ -11495,7 +11493,7 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair) } /* Delete row from wrong partition. */ - if ((result= m_file[read_part_id]->ha_delete_row(m_rec0))) + if ((result= m_file[read_part_id]->ha_delete_row(table->record[0]))) { if (m_file[correct_part_id]->has_transactions_and_rollback()) break; diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 6c3d2328cd3..f71b8eb11b0 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -322,7 +322,6 @@ private: and if clustered pk, [0]= current index, [1]= pk, [2]= NULL */ KEY *m_curr_key_info[3]; // Current index - uchar *m_rec0; // table->record[0] const uchar *m_err_rec; // record which gave error QUEUE m_queue; // Prio queue used by sorted read @@ -568,6 +567,17 @@ public: { m_file[part_id]->update_create_info(create_info); } + + void column_bitmaps_signal() override + { + for (uint i= bitmap_get_first_set(&m_opened_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_opened_partitions, i)) + { + m_file[i]->column_bitmaps_signal(); + } + } + private: int copy_partitions(ulonglong * const copied, ulonglong * const deleted); void cleanup_new_partition(uint part_count); diff --git a/sql/handler.cc b/sql/handler.cc index 7aa8235d2de..52799873d00 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -505,6 +505,8 @@ int ha_init_errors(void) SETMSG(HA_ERR_FTS_TOO_MANY_WORDS_IN_PHRASE, "Too many words in a FTS phrase or proximity search"); SETMSG(HA_ERR_FK_DEPTH_EXCEEDED, "Foreign key cascade delete/update exceeds"); SETMSG(HA_ERR_TABLESPACE_MISSING, ER_DEFAULT(ER_TABLESPACE_MISSING)); + SETMSG(HA_ERR_INCOMPATIBLE_DEFINITION, + "Mismatch between table definitions in sql and storage layer"); /* Register the error messages for use with my_error(). */ return my_error_register(get_handler_errmsgs, HA_ERR_FIRST, HA_ERR_LAST); @@ -3446,7 +3448,7 @@ int handler::create_lookup_handler() if (!(tmp= clone(table->s->normalized_path.str, table->in_use->mem_root))) return 1; lookup_handler= tmp; - return lookup_handler->ha_external_lock(table->in_use, F_RDLCK); + return lookup_handler->ha_external_lock(table->in_use, F_WRLCK); } LEX_CSTRING *handler::engine_name() @@ -6424,7 +6426,8 @@ static int ha_create_table_from_share(THD *thd, TABLE_SHARE *share, Table_path_buffer name_buff; Lex_cstring name= table.file->get_canonical_filename(share->path, &name_buff); - int error= table.file->ha_create(name.str, &table, create_info); + int error= table.check_sequence_privileges(thd) ? 1 : + table.file->ha_create(name.str, &table, create_info); if (error) { @@ -7802,16 +7805,17 @@ int handler::ha_reset() { lookup_handler->ha_external_unlock(table->in_use); lookup_handler->close(); + PSI_CALL_close_table(table_share, lookup_handler->m_psi); delete lookup_handler; lookup_handler= this; } DBUG_RETURN(reset()); } -#ifdef WITH_WSREP static int wsrep_after_row(THD *thd) { DBUG_ENTER("wsrep_after_row"); +#ifdef WITH_WSREP if (thd->internal_transaction()) DBUG_RETURN(0); @@ -7835,9 +7839,32 @@ static int wsrep_after_row(THD *thd) { DBUG_RETURN(ER_LOCK_DEADLOCK); } +#endif /* WITH_WSREP */ DBUG_RETURN(0); } -#endif /* WITH_WSREP */ + + +static bool long_unique_fields_differ(KEY *keyinfo, const uchar *other) +{ + uint key_parts= fields_in_hash_keyinfo(keyinfo); + KEY_PART_INFO *keypart= keyinfo->key_part - key_parts; + my_ptrdiff_t off= other - keypart->field->table->record[0]; + DBUG_ASSERT(off); + do + { + Field *field= keypart->field; + if (field->is_null() || field->is_null(off)) + return true; + else if (f_is_blob(keypart->key_type) && keypart->length) + { + if (field->cmp_prefix(field->ptr, field->ptr + off, keypart->length)) + return true; + } + else if (field->cmp_offset(off)) + return true; + } while (keypart++ < keyinfo->key_part); + return false; +} /** @@ -7846,92 +7873,70 @@ static int wsrep_after_row(THD *thd) int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no) { - int result, error= 0; + int result; + /* Skip just written row in the case of HA_CHECK_UNIQUE_AFTER_WRITE */ + bool skip_self= ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE; KEY *key_info= table->key_info + key_no; - Field *hash_field= key_info->key_part->field; uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL]; - String *blob_storage; DBUG_ENTER("handler::check_duplicate_long_entry_key"); DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY && key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL) || key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL); - if (hash_field->is_real_null()) + if (key_info->key_part->field->is_real_null()) DBUG_RETURN(0); + if (skip_self) + position(table->record[0]); + key_copy(ptr, new_rec, key_info, key_info->key_length, false); result= lookup_handler->ha_index_init(key_no, 0); if (result) DBUG_RETURN(result); - blob_storage= (String*)alloca(sizeof(String)*table->s->virtual_not_stored_blob_fields); + auto blob_storage= (String*)alloca(sizeof(String)*table->s->virtual_not_stored_blob_fields); table->remember_blob_values(blob_storage); store_record(table, file->lookup_buffer); result= lookup_handler->ha_index_read_map(table->record[0], ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT); - if (!result) + if (result) + goto end; + + // restore pointers after swap_values in TABLE::update_virtual_fields() + for (Field **vf= table->vfield; *vf; vf++) { - bool is_same; - Field * t_field; - Item_func_hash * temp= (Item_func_hash *)hash_field->vcol_info->expr; - Item ** arguments= temp->arguments(); - uint arg_count= temp->argument_count(); - // restore pointers after swap_values in TABLE::update_virtual_fields() - for (Field **vf= table->vfield; *vf; vf++) + if (!(*vf)->stored_in_db() && (*vf)->flags & BLOB_FLAG && + bitmap_is_set(table->read_set, (*vf)->field_index)) + ((Field_blob*)*vf)->swap_value_and_read_value(); + } + do + { + if (!long_unique_fields_differ(key_info, lookup_buffer)) { - if (!(*vf)->stored_in_db() && (*vf)->flags & BLOB_FLAG && - bitmap_is_set(table->read_set, (*vf)->field_index)) - ((Field_blob*)*vf)->swap_value_and_read_value(); - } - do - { - my_ptrdiff_t diff= table->file->lookup_buffer - new_rec; - is_same= true; - for (uint j=0; is_same && j < arg_count; j++) + lookup_handler->position(table->record[0]); + if (skip_self && !memcmp(ref, lookup_handler->ref, ref_length)) { - DBUG_ASSERT(arguments[j]->type() == Item::FIELD_ITEM || - // this one for left(fld_name,length) - arguments[j]->type() == Item::FUNC_ITEM); - if (arguments[j]->type() == Item::FIELD_ITEM) - { - t_field= static_cast(arguments[j])->field; - if (t_field->cmp_offset(diff)) - is_same= false; - } - else - { - Item_func_left *fnc= static_cast(arguments[j]); - DBUG_ASSERT(Lex_ident_routine(fnc->func_name_cstring()). - streq("left"_LEX_CSTRING)); - DBUG_ASSERT(fnc->arguments()[0]->type() == Item::FIELD_ITEM); - t_field= static_cast(fnc->arguments()[0])->field; - uint length= (uint)fnc->arguments()[1]->val_int(); - if (t_field->cmp_prefix(t_field->ptr, t_field->ptr + diff, length)) - is_same= false; - } + skip_self= false; // cannot happen twice, so let's save a memcpy + continue; } + result= HA_ERR_FOUND_DUPP_KEY; + table->file->lookup_errkey= key_no; + memcpy(table->file->dup_ref, lookup_handler->ref, ref_length); + goto end; } - while (!is_same && - !(result= lookup_handler->ha_index_next_same(table->record[0], - ptr, key_info->key_length))); - if (is_same) - error= HA_ERR_FOUND_DUPP_KEY; - goto exit; - } - if (result != HA_ERR_KEY_NOT_FOUND) - error= result; -exit: - if (error == HA_ERR_FOUND_DUPP_KEY) - { - table->file->lookup_errkey= key_no; - lookup_handler->position(table->record[0]); - memcpy(table->file->dup_ref, lookup_handler->ref, ref_length); } + while (!(result= lookup_handler->ha_index_next_same(table->record[0], ptr, + key_info->key_length))); + +end: + if (result == HA_ERR_END_OF_FILE || result == HA_ERR_KEY_NOT_FOUND) + result= 0; + restore_record(table, file->lookup_buffer); table->restore_blob_values(blob_storage); lookup_handler->ha_index_end(); - DBUG_RETURN(error); + DBUG_RETURN(result); } void handler::alloc_lookup_buffer() @@ -7943,77 +7948,48 @@ void handler::alloc_lookup_buffer() + table_share->reclength); } -/** @brief - check whether inserted records breaks the - unique constraint on long columns. - @returns 0 if no duplicate else returns error - */ -int handler::check_duplicate_long_entries(const uchar *new_rec) + +int handler::ha_check_inserver_constraints(const uchar *old_data, + const uchar* new_data) { - lookup_errkey= (uint)-1; - for (uint i= 0; i < table->s->keys; i++) + int error= 0; + if (is_root_handler()) { - int result; - if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH && - (result= check_duplicate_long_entry_key(new_rec, i))) - return result; + uint saved_status= table->status; + if (!(error= ha_check_overlaps(old_data, new_data))) + error= ha_check_long_uniques(old_data, new_data); + table->status= saved_status; } - return 0; + return error; } /** @brief - check whether updated records breaks the - unique constraint on long columns. - In the case of update we just need to check the specic key - reason for that is consider case - create table t1(a blob , b blob , x blob , y blob ,unique(a,b) - ,unique(x,y)) - and update statement like this - update t1 set a=23+a; in this case if we try to scan for - whole keys in table then index scan on x_y will return 0 - because data is same so in the case of update we take - key as a parameter in normal insert key should be -1 + check whether inserted records breaks the unique constraint on long columns. @returns 0 if no duplicate else returns error */ -int handler::check_duplicate_long_entries_update(const uchar *new_rec) +int handler::ha_check_long_uniques(const uchar *old_rec, const uchar *new_rec) { - Field *field; - uint key_parts; - KEY *keyinfo; - KEY_PART_INFO *keypart; - /* - Here we are comparing whether new record and old record are same - with respect to fields in hash_str - */ - uint reclength= (uint) (table->record[1] - table->record[0]); - + if (!table->s->long_unique_table) + return 0; + DBUG_ASSERT(inited == NONE || lookup_handler != this); + DBUG_ASSERT(new_rec == table->record[0]); + DBUG_ASSERT(!old_rec || old_rec == table->record[1]); + lookup_errkey= (uint)-1; for (uint i= 0; i < table->s->keys; i++) { - keyinfo= table->key_info + i; + KEY *keyinfo= table->key_info + i; if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH) { - key_parts= fields_in_hash_keyinfo(keyinfo); - keypart= keyinfo->key_part - key_parts; - for (uint j= 0; j < key_parts; j++, keypart++) + if (!old_rec || long_unique_fields_differ(keyinfo, old_rec)) { - int error; - field= keypart->field; - /* - Compare fields if they are different then check for duplicates - cmp_binary_offset cannot differentiate between null and empty string - So also check for that too - */ - if((field->is_null(0) != field->is_null(reclength)) || - field->cmp_offset(reclength)) + if (int res= check_duplicate_long_entry_key(new_rec, i)) { - if((error= check_duplicate_long_entry_key(new_rec, i))) - return error; - /* - break because check_duplicate_long_entries_key will - take care of remaining fields - */ - break; + if (!old_rec && table->next_number_field && + !(ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE)) + if (int err= update_auto_increment()) + return err; + return res; } } } @@ -8025,14 +8001,14 @@ int handler::check_duplicate_long_entries_update(const uchar *new_rec) int handler::ha_check_overlaps(const uchar *old_data, const uchar* new_data) { DBUG_ASSERT(new_data); - if (this != table->file) - return 0; + DBUG_ASSERT(this == table->file); if (!table_share->period.unique_keys) return 0; if (table->versioned() && !table->vers_end_field()->is_max()) return 0; - const bool is_update= old_data != NULL; + const bool after_write= ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE; + const bool is_update= !after_write && old_data; uchar *record_buffer= lookup_buffer + table_share->max_unique_length + table_share->null_fields; @@ -8087,17 +8063,22 @@ int handler::ha_check_overlaps(const uchar *old_data, const uchar* new_data) key_part_map((1 << (key_parts - 1)) - 1), HA_READ_AFTER_KEY); - if (!error && is_update) + if (!error) { - /* In case of update it could happen that the nearest neighbour is - a record we are updating. It means, that there are no overlaps - from this side. - */ - DBUG_ASSERT(lookup_handler != this); - DBUG_ASSERT(ref_length == lookup_handler->ref_length); + if (is_update) + { + /* In case of update it could happen that the nearest neighbour is + a record we are updating. It means, that there are no overlaps + from this side. + */ + DBUG_ASSERT(lookup_handler != this); + DBUG_ASSERT(ref_length == lookup_handler->ref_length); - lookup_handler->position(record_buffer); - if (memcmp(ref, lookup_handler->ref, ref_length) == 0) + lookup_handler->position(record_buffer); + if (memcmp(ref, lookup_handler->ref, ref_length) == 0) + error= lookup_handler->ha_index_next(record_buffer); + } + else if (after_write) error= lookup_handler->ha_index_next(record_buffer); } @@ -8223,11 +8204,8 @@ int handler::prepare_for_modify(bool can_set_fields, bool can_lookup) int handler::ha_write_row(const uchar *buf) { int error; - DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || m_lock_type == F_WRLCK); - DBUG_ASSERT(buf == table->record[0]); DBUG_ENTER("handler::ha_write_row"); DEBUG_SYNC_C("ha_write_row_start"); -#ifdef WITH_WSREP DBUG_EXECUTE_IF("wsrep_ha_write_row", { const char act[]= @@ -8236,25 +8214,12 @@ int handler::ha_write_row(const uchar *buf) "WAIT_FOR wsrep_ha_write_row_continue"; DBUG_ASSERT(!debug_sync_set_action(ha_thd(), STRING_WITH_LEN(act))); }); -#endif /* WITH_WSREP */ - if ((error= ha_check_overlaps(NULL, buf))) - { - DEBUG_SYNC_C("ha_write_row_end"); - DBUG_RETURN(error); - } + DBUG_ASSERT(table_share->tmp_table || m_lock_type == F_WRLCK); + DBUG_ASSERT(buf == table->record[0]); - if (table->s->long_unique_table && is_root_handler()) - { - DBUG_ASSERT(inited == NONE || lookup_handler != this); - if ((error= check_duplicate_long_entries(buf))) - { - if (table->next_number_field) - if (int err= update_auto_increment()) - error= err; - DEBUG_SYNC_C("ha_write_row_end"); - DBUG_RETURN(error); - } - } + if (!(ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) && + (error= ha_check_inserver_constraints(NULL, buf))) + goto err; MYSQL_INSERT_ROW_START(table_share->db.str, table_share->table_name.str); mark_trx_read_write(); @@ -8266,20 +8231,43 @@ int handler::ha_write_row(const uchar *buf) dbug_format_row(table, buf, false).c_ptr_safe(), error)); MYSQL_INSERT_ROW_DONE(error); - if (!error && !((error= table->hlindexes_on_insert()))) - { - rows_stats.inserted++; - Log_func *log_func= Write_rows_log_event::binlog_row_logging_function; - error= binlog_log_row(0, buf, log_func); + if (error) + goto err; -#ifdef WITH_WSREP - THD *thd= ha_thd(); - if (WSREP_NNULL(thd) && table_share->tmp_table == NO_TMP_TABLE && - ht->flags & HTON_WSREP_REPLICATION && !error) - error= wsrep_after_row(thd); -#endif /* WITH_WSREP */ + if ((ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) && + (error= ha_check_inserver_constraints(NULL, buf))) + { + if (lookup_handler != this) // INSERT IGNORE or REPLACE or ODKU + { + int olderror= error; + if ((error= lookup_handler->rnd_init(0))) + goto err; + position(buf); + if ((error= lookup_handler->rnd_pos(lookup_buffer, ref))) + goto err; + + increment_statistics(&SSV::ha_delete_count); + TABLE_IO_WAIT(tracker, PSI_TABLE_DELETE_ROW, MAX_KEY, error, + { error= lookup_handler->delete_row(buf);}) + lookup_handler->rnd_end(); + if (!error) + error= olderror; + } + goto err; } + if ((error= table->hlindexes_on_insert())) + goto err; + + rows_stats.inserted++; + error= binlog_log_row(0, buf, + Write_rows_log_event::binlog_row_logging_function); + + if (WSREP_NNULL(ha_thd()) && table_share->tmp_table == NO_TMP_TABLE && + ht->flags & HTON_WSREP_REPLICATION && !error) + error= wsrep_after_row(ha_thd()); + +err: DEBUG_SYNC_C("ha_write_row_end"); DBUG_RETURN(error); } @@ -8288,23 +8276,16 @@ int handler::ha_write_row(const uchar *buf) int handler::ha_update_row(const uchar *old_data, const uchar *new_data) { int error; - DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || - m_lock_type == F_WRLCK); + DBUG_ASSERT(table_share->tmp_table || m_lock_type == F_WRLCK); /* Some storage engines require that the new record is in record[0] (and the old record is in record[1]). - */ + */ DBUG_ASSERT(new_data == table->record[0]); DBUG_ASSERT(old_data == table->record[1]); - uint saved_status= table->status; - error= ha_check_overlaps(old_data, new_data); - - if (!error && table->s->long_unique_table && is_root_handler()) - error= check_duplicate_long_entries_update(new_data); - table->status= saved_status; - - if (error) + if (!(ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) && + (error= ha_check_inserver_constraints(old_data, new_data))) return error; MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str); @@ -8319,33 +8300,51 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data) error)); MYSQL_UPDATE_ROW_DONE(error); - if (likely(!error) && !(error= table->hlindexes_on_update())) + if (error) + return error; + + if ((ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) && + (error= ha_check_inserver_constraints(old_data, new_data))) { - rows_stats.updated++; - Log_func *log_func= Update_rows_log_event::binlog_row_logging_function; - error= binlog_log_row(old_data, new_data, log_func); + int e= 0; + if (ha_thd()->lex->ignore) + { + my_printf_error(ER_NOT_SUPPORTED_YET, "UPDATE IGNORE in READ " + "COMMITTED isolation mode of a table with a UNIQUE constraint " + "%s is not currently supported", MYF(0), + table->s->long_unique_table ? "USING HASH" : "WITHOUT OVERLAPS"); + return HA_ERR_UNSUPPORTED; + } + return e ? e : error; + } + + if ((error= table->hlindexes_on_update())) + return error; + + rows_stats.updated++; + error= binlog_log_row(old_data, new_data, + Update_rows_log_event::binlog_row_logging_function); #ifdef WITH_WSREP - THD *thd= ha_thd(); - if (WSREP_NNULL(thd)) + THD *thd= ha_thd(); + if (WSREP_NNULL(thd)) + { + /* for streaming replication, the following wsrep_after_row() + may replicate a fragment, so we have to declare potential PA + unsafe before that */ + if (table->s->primary_key == MAX_KEY && wsrep_thd_is_local(thd)) { - /* for streaming replication, the following wsrep_after_row() - may replicate a fragment, so we have to declare potential PA - unsafe before that */ - if (table->s->primary_key == MAX_KEY && wsrep_thd_is_local(thd)) - { - WSREP_DEBUG("marking trx as PA unsafe pk %d", table->s->primary_key); - if (thd->wsrep_cs().mark_transaction_pa_unsafe()) - WSREP_DEBUG("session does not have active transaction," - " can not mark as PA unsafe"); - } - - if (!error && table_share->tmp_table == NO_TMP_TABLE && - ht->flags & HTON_WSREP_REPLICATION) - error= wsrep_after_row(thd); + WSREP_DEBUG("marking trx as PA unsafe pk %d", table->s->primary_key); + if (thd->wsrep_cs().mark_transaction_pa_unsafe()) + WSREP_DEBUG("session does not have active transaction," + " can not mark as PA unsafe"); } -#endif /* WITH_WSREP */ + + if (!error && table_share->tmp_table == NO_TMP_TABLE && + ht->flags & HTON_WSREP_REPLICATION) + error= wsrep_after_row(thd); } +#endif /* WITH_WSREP */ return error; } @@ -8380,8 +8379,7 @@ int handler::update_first_row(const uchar *new_data) int handler::ha_delete_row(const uchar *buf) { int error; - DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || - m_lock_type == F_WRLCK); + DBUG_ASSERT(table_share->tmp_table || m_lock_type == F_WRLCK); /* Normally table->record[0] is used, but sometimes table->record[1] is used. (notably, for REPLACE and in sql_acl.cc) @@ -8400,8 +8398,8 @@ int handler::ha_delete_row(const uchar *buf) if (likely(!error) && !(error= table->hlindexes_on_delete(buf))) { rows_stats.deleted++; - Log_func *log_func= Delete_rows_log_event::binlog_row_logging_function; - error= binlog_log_row(buf, 0, log_func); + error= binlog_log_row(buf, 0, + Delete_rows_log_event::binlog_row_logging_function); #ifdef WITH_WSREP THD *thd= ha_thd(); diff --git a/sql/handler.h b/sql/handler.h index 24b99bae7c1..7c1d7eeea5c 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -334,8 +334,7 @@ enum chf_create_flags { #define HA_REUSES_FILE_NAMES (1ULL << 49) /* - Set of all binlog flags. Currently only contain the capabilities - flags. + Set of all binlog flags. Currently only contain the capabilities flags. */ #define HA_BINLOG_FLAGS (HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE) @@ -384,12 +383,13 @@ enum chf_create_flags { /* Implements SELECT ... FOR UPDATE SKIP LOCKED */ #define HA_CAN_SKIP_LOCKED (1ULL << 61) +#define HA_CHECK_UNIQUE_AFTER_WRITE (1ULL << 62) + /* This engine is not compatible with Online ALTER TABLE */ -#define HA_NO_ONLINE_ALTER (1ULL << 62) +#define HA_NO_ONLINE_ALTER (1ULL << 63) #define HA_LAST_TABLE_FLAG HA_NO_ONLINE_ALTER - /* bits in index_flags(index_number) for what you can do with index */ #define HA_READ_NEXT 1 /* TODO really use this flag */ #define HA_READ_PREV 2 /* supports ::index_prev */ @@ -2387,6 +2387,7 @@ struct HA_CREATE_INFO: public Table_scope_and_contents_source_st, const Lex_table_charset_collation_attrs_st &default_cscl, const Lex_table_charset_collation_attrs_st &convert_cscl, const Charset_collation_context &ctx); + bool check_if_valid_log_table(); }; @@ -3588,7 +3589,7 @@ public: */ Table_flags ha_table_flags() const { - DBUG_ASSERT(cached_table_flags < (HA_LAST_TABLE_FLAG << 1)); + DBUG_ASSERT((cached_table_flags >> 1) < HA_LAST_TABLE_FLAG); return cached_table_flags; } /** @@ -5195,11 +5196,11 @@ private: int create_lookup_handler(); void alloc_lookup_buffer(); - int check_duplicate_long_entries(const uchar *new_rec); - int check_duplicate_long_entries_update(const uchar *new_rec); int check_duplicate_long_entry_key(const uchar *new_rec, uint key_no); /** PRIMARY KEY/UNIQUE WITHOUT OVERLAPS check */ int ha_check_overlaps(const uchar *old_data, const uchar* new_data); + int ha_check_long_uniques(const uchar *old_rec, const uchar *new_rec); + int ha_check_inserver_constraints(const uchar *old_data, const uchar* new_data); protected: /* diff --git a/sql/item.cc b/sql/item.cc index d1220bf3cde..dd2fc9596e4 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -5150,6 +5150,7 @@ Item_param::set_param_type_and_swap_value(Item_param *src) void Item_param::set_default(bool set_type_handler_null) { m_is_settable_routine_parameter= false; + current_thd->lex->default_used= true; state= DEFAULT_VALUE; /* When Item_param is set to DEFAULT_VALUE: @@ -5307,14 +5308,46 @@ static Field *make_default_field(THD *thd, Field *field_arg) if (!newptr) return nullptr; + /* Don't check privileges, if it's parse_vcol_defs() */ + if (def_field->table->pos_in_table_list && + def_field->default_value->check_access(thd)) + return nullptr; + if (should_mark_column(thd->column_usage)) def_field->default_value->expr->update_used_tables(); def_field->move_field(newptr + 1, def_field->maybe_null() ? newptr : 0, 1); } else - def_field->move_field_offset((my_ptrdiff_t) - (def_field->table->s->default_values - - def_field->table->record[0])); + { + if (field_arg->table->s->field != nullptr) + { + /* + Use fields array from TABLE_SHARE for referencing to null byte and + field's value on construction of a Field object for default value of + table column + */ + Field *target= field_arg->table->s->field[field_arg->field_index]; + + /* + Set up table field's pointers ptr, null_ptr to point to corresponding + s->default_values parts. + */ + def_field->move_field(target->ptr, target->null_ptr, target->null_bit); + } + else + { + /* + We get to here in case the field references a temporary table. + Triggers in not associated with a temporary table. Check these + invariants by DBUG_ASSERTs. + */ + DBUG_ASSERT(field_arg->table->s->tmp_table != NO_TMP_TABLE); + DBUG_ASSERT(field_arg->table->triggers == nullptr); + def_field->move_field_offset((my_ptrdiff_t) + (def_field->table->s->default_values - + def_field->table->record[0])); + } + } return def_field; } @@ -5836,6 +5869,8 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) DBUG_ASSERT((*select_ref)->fixed()); return &select->ref_pointer_array[counter]; } + if (group_by_ref && (*group_by_ref)->type() == Item::REF_ITEM) + return ((Item_ref*)(*group_by_ref))->ref; if (group_by_ref) return group_by_ref; DBUG_ASSERT(FALSE); @@ -10555,7 +10590,11 @@ bool Item_cache_bool::cache_value() if (!example) return false; value_cached= true; + THD *thd= current_thd; + const bool err= thd->is_error(); value= example->val_bool_result(); + if (!err && thd->is_error()) + value_cached= false; null_value_inside= null_value= example->null_value; unsigned_flag= false; return true; @@ -10567,7 +10606,11 @@ bool Item_cache_int::cache_value() if (!example) return FALSE; value_cached= TRUE; + THD *thd= current_thd; + const bool err= thd->is_error(); value= example->val_int_result(); + if (!err && thd->is_error()) + value_cached= false; null_value_inside= null_value= example->null_value; unsigned_flag= example->unsigned_flag; return TRUE; @@ -10644,7 +10687,11 @@ bool Item_cache_temporal::cache_value() if (!example) return false; value_cached= true; - value= example->val_datetime_packed_result(current_thd); + THD *thd= current_thd; + const bool err= thd->is_error(); + value= example->val_datetime_packed_result(thd); + if (!err && thd->is_error()) + value_cached= false; null_value_inside= null_value= example->null_value; return true; } @@ -10655,7 +10702,11 @@ bool Item_cache_time::cache_value() if (!example) return false; value_cached= true; - value= example->val_time_packed_result(current_thd); + THD *thd= current_thd; + const bool err= thd->is_error(); + value= example->val_time_packed_result(thd); + if (!err && thd->is_error()) + value_cached= false; null_value_inside= null_value= example->null_value; return true; } @@ -10783,8 +10834,12 @@ bool Item_cache_timestamp::cache_value() if (!example) return false; value_cached= true; + THD *thd= current_thd; + const bool err= thd->is_error(); null_value_inside= null_value= - example->val_native_with_conversion_result(current_thd, &m_native, type_handler()); + example->val_native_with_conversion_result(thd, &m_native, type_handler()); + if (!err && thd->is_error()) + value_cached= false; return true; } @@ -10794,7 +10849,11 @@ bool Item_cache_real::cache_value() if (!example) return FALSE; value_cached= TRUE; + THD *thd= current_thd; + const bool err= thd->is_error(); value= example->val_result(); + if (!err && thd->is_error()) + value_cached= false; null_value_inside= null_value= example->null_value; return TRUE; } @@ -10861,7 +10920,11 @@ bool Item_cache_decimal::cache_value() if (!example) return FALSE; value_cached= TRUE; + THD *thd= current_thd; + const bool err= thd->is_error(); my_decimal *val= example->val_decimal_result(&decimal_value); + if (!err && thd->is_error()) + value_cached= false; if (!(null_value_inside= null_value= example->null_value) && val != &decimal_value) my_decimal2decimal(val, &decimal_value); @@ -10917,8 +10980,12 @@ bool Item_cache_str::cache_value() return FALSE; } value_cached= TRUE; + THD *thd= current_thd; + const bool err= thd->is_error(); value_buff.set(buffer, sizeof(buffer), example->collation.collation); value= example->str_result(&value_buff); + if (!err && thd->is_error()) + value_cached= false; if ((null_value= null_value_inside= example->null_value)) value= 0; else if (value != &value_buff) diff --git a/sql/item.h b/sql/item.h index 7bc8c4d7f15..c69e74a4deb 100644 --- a/sql/item.h +++ b/sql/item.h @@ -2454,6 +2454,7 @@ public: If there is some, sets a bit for this key in the proper key map. */ virtual bool check_index_dependence(void *arg) { return 0; } + virtual bool check_sequence_privileges(void *arg) { return 0; } /*============== End of Item processor list ======================*/ /* @@ -6583,6 +6584,19 @@ public: { return get_item_copy(thd, this); } Item *field_transformer_for_having_pushdown(THD *, uchar *) override { return this; } + /* + Do the same thing as Item_field: if we were referring to a local view, + now we refer to somewhere outside of our SELECT. + */ + bool set_fields_as_dependent_processor(void *arg) override + { + if (!(used_tables() & OUTER_REF_TABLE_BIT)) + { + depended_from= (st_select_lex *) arg; + item_equal= NULL; + } + return 0; + } void print(String *str, enum_query_type query_type) override; }; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index c244780835f..31458007e3d 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -638,7 +638,7 @@ bool Arg_comparator::set_cmp_func_string(THD *thd) else if ((*b)->type() == Item::FUNC_ITEM && ((Item_func *) (*b))->functype() == Item_func::JSON_EXTRACT_FUNC) { - func= is_owner_equal_func() ? &Arg_comparator::compare_e_json_str: + func= is_owner_equal_func() ? &Arg_comparator::compare_e_str_json: &Arg_comparator::compare_str_json; return 0; } @@ -3166,7 +3166,8 @@ Item *Item_func_case_simple::find_item() { /* Compare every WHEN argument with it and return the first match */ uint idx; - if (!Predicant_to_list_comparator::cmp(this, &idx, NULL)) + bool found_unknown_values; + if (!Predicant_to_list_comparator::cmp(this, &idx, &found_unknown_values)) return args[idx + when_count()]; Item **pos= Item_func_case_simple::else_expr_addr(); return pos ? pos[0] : 0; diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 8f69b529b42..8a2c8ea1331 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1205,6 +1205,7 @@ public: bool native_op(THD *thd, Native *to) override; bool fix_length_and_dec(THD *thd) override { + update_nullability_post_fix_fields(); if (aggregate_for_result(func_name_cstring(), args, arg_count, true)) return TRUE; fix_attributes(args, arg_count); @@ -1289,17 +1290,7 @@ public: bool native_op(THD *thd, Native *to) override; bool fix_length_and_dec(THD *thd) override { - /* - Set nullability from args[1] by default. - Note, some type handlers may reset maybe_null - in Item_hybrid_func_fix_attributes() if args[1] - is NOT NULL but cannot always be converted to - the data type of "this" safely. - E.g. Type_handler_inet6 does: - IFNULL(inet6_not_null_expr, 'foo') -> INET6 NULL - IFNULL(inet6_not_null_expr, '::1') -> INET6 NOT NULL - */ - copy_flags(args[1], item_base_t::MAYBE_NULL); + update_nullability_post_fix_fields(); if (Item_func_case_abbreviation2::fix_length_and_dec2(args)) return TRUE; return FALSE; @@ -2336,15 +2327,8 @@ public: @param [OUT] idx - In case if a value that is equal to the predicant was found, the index of the matching value is returned here. Otherwise, *idx is not changed. - @param [IN/OUT] found_unknown_values - how to handle UNKNOWN results. - If found_unknown_values is NULL (e.g. Item_func_case), - cmp() returns immediately when the first UNKNOWN - result is found. - If found_unknown_values is non-NULL (Item_func_in), - cmp() does not return when an UNKNOWN result is found, - sets *found_unknown_values to true, and continues - to compare the remaining pairs to find FALSE - (i.e. the value that is equal to the predicant). + @param [OUT] found_unknown_values - set to true if the result of at least + one comparison was UNKNOWN @retval false - Found a value that is equal to the predicant @retval true - Didn't find an equal value @@ -2361,11 +2345,7 @@ public: return false; // Found a matching value } if (rc == UNKNOWN) - { - if (!found_unknown_values) - return true; *found_unknown_values= true; - } } return true; // Not found } diff --git a/sql/item_func.cc b/sql/item_func.cc index 67bcc5e42e3..7ed6499cdcc 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -276,6 +276,38 @@ bool Item_func::check_argument_types_scalar(uint start, uint end) const return false; } + +/** + @brief + Update function's nullability based on nullness of its arguments + + @details + Functions like `IFNULL` and `COALESCE` decide nullability of their + result after checking all the arguments. If any of the argument + is NOT NULL, function's result is also set to NOT NULL. + Note: Nullability determined here may be reset by type handlers in + `Item_hybrid_func_fix_attributes()`, if the first non-null argument + cannot be safely converted to target data type. + E.g. Type_handler_inet6 does: + IFNULL(inet6_not_null_expr, 'foo') -> INET6 NULL + IFNULL(inet6_not_null_expr, '::1') -> INET6 NOT NULL +*/ +void Item_func::update_nullability_post_fix_fields() +{ + if (!maybe_null()) + return; + + for (uint i= 0; i < arg_count; i++) + { + if (!args[i]->maybe_null()) + { + base_flags &= ~item_base_t::MAYBE_NULL; + break; + } + } +} + + /* Resolve references to table column for a function and its argument @@ -2823,6 +2855,7 @@ bool Item_func_rand::fix_fields(THD *thd,Item **ref) if (Item_real_func::fix_fields(thd, ref)) return TRUE; used_tables_cache|= RAND_TABLE_BIT; + thd->lex->safe_to_cache_query= 0; if (arg_count) { // Only use argument once in query /* @@ -7069,15 +7102,14 @@ longlong Item_func_cursor_rowcount::val_int() /***************************************************************************** SEQUENCE functions *****************************************************************************/ -bool Item_func_nextval::check_access_and_fix_fields(THD *thd, Item **ref, - privilege_t want_access) +bool Item_func_nextval::check_access(THD *thd, privilege_t want_access) { table_list->sequence= false; bool error= check_single_table_access(thd, want_access, table_list, false); table_list->sequence= true; if (error && table_list->belong_to_view) table_list->replace_view_error_with_generic(thd); - return error || Item_longlong_func::fix_fields(thd, ref); + return error; } longlong Item_func_nextval::val_int() @@ -7092,7 +7124,8 @@ longlong Item_func_nextval::val_int() String key_buff(buff,sizeof(buff), &my_charset_bin); DBUG_ENTER("Item_func_nextval::val_int"); update_table(); - DBUG_ASSERT(table && table->s->sequence); + DBUG_ASSERT(table); + DBUG_ASSERT(table->s->sequence); thd= table->in_use; if (thd->count_cuted_fields == CHECK_FIELD_EXPRESSION) diff --git a/sql/item_func.h b/sql/item_func.h index 520cc82203b..93130079b1a 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -31,8 +31,8 @@ extern "C" /* Bug in BSDI include file */ #include -extern int st_append_json(String *s, - CHARSET_INFO *json_cs, const uchar *js, uint js_len); +extern bool st_append_json(String *s, + CHARSET_INFO *json_cs, const uchar *js, uint js_len); class Item_func :public Item_func_or_sum { void sync_with_sum_func_and_with_field(List &list); @@ -81,6 +81,8 @@ protected: return print_sql_mode_qualified_name(to, query_type, func_name_cstring()); } + void update_nullability_post_fix_fields(); + bool aggregate_args2_for_comparison_with_conversion(THD *thd, Type_handler_hybrid_field_type *th); public: @@ -4359,7 +4361,7 @@ protected: TABLE_LIST *table_list; TABLE *table; bool print_table_list_identifier(THD *thd, String *to) const; - bool check_access_and_fix_fields(THD *, Item **ref, privilege_t); + bool check_access(THD *, privilege_t); public: Item_func_nextval(THD *thd, TABLE_LIST *table_list_arg): Item_longlong_func(thd), table_list(table_list_arg) {} @@ -4370,7 +4372,13 @@ public: return name; } bool fix_fields(THD *thd, Item **ref) override - { return check_access_and_fix_fields(thd, ref, INSERT_ACL | SELECT_ACL); } + { + /* Don't check privileges, if it's parse_vcol_defs() */ + return (table_list->table && check_sequence_privileges(thd)) || + Item_longlong_func::fix_fields(thd, ref); + } + bool check_sequence_privileges(void *thd) override + { return check_access((THD*)thd, INSERT_ACL | SELECT_ACL); } bool fix_length_and_dec(THD *thd) override { if (table_list->table) @@ -4413,8 +4421,8 @@ class Item_func_lastval :public Item_func_nextval public: Item_func_lastval(THD *thd, TABLE_LIST *table_list_arg): Item_func_nextval(thd, table_list_arg) {} - bool fix_fields(THD *thd, Item **ref) override - { return check_access_and_fix_fields(thd, ref, SELECT_ACL); } + bool check_sequence_privileges(void *thd) override + { return check_access((THD*)thd, SELECT_ACL); } longlong val_int() override; LEX_CSTRING func_name_cstring() const override { @@ -4439,8 +4447,8 @@ public: : Item_func_nextval(thd, table_list_arg), nextval(nextval_arg), round(round_arg), is_used(is_used_arg) {} - bool fix_fields(THD *thd, Item **ref) override - { return check_access_and_fix_fields(thd, ref, INSERT_ACL); } + bool check_sequence_privileges(void *thd) override + { return check_access((THD*)thd, INSERT_ACL); } longlong val_int() override; LEX_CSTRING func_name_cstring() const override { diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc index a17c2aba4ab..4ad89a7619e 100644 --- a/sql/item_jsonfunc.cc +++ b/sql/item_jsonfunc.cc @@ -26,7 +26,7 @@ static bool check_overlaps(json_engine_t *, json_engine_t *, bool); static int json_find_overlap_with_object(json_engine_t *, json_engine_t *, bool); #ifndef DBUG_OFF -static int dbug_json_check_min_stack_requirement() +int dbug_json_check_min_stack_requirement() { my_error(ER_STACK_OVERRUN_NEED_MORE, MYF(ME_FATAL), my_thread_stack_size, my_thread_stack_size, STACK_MIN_SIZE); @@ -103,20 +103,34 @@ append_simple(String *s, const uchar *a, size_t a_len) Appends JSON string to the String object taking charsets in consideration. */ -int st_append_json(String *s, +bool st_append_json(String *s, CHARSET_INFO *json_cs, const uchar *js, uint js_len) { int str_len= js_len * s->charset()->mbmaxlen; - if (!s->reserve(str_len, 1024) && - (str_len= json_unescape(json_cs, js, js + js_len, + if (s->reserve(str_len, 1024)) + { + my_error(ER_OUTOFMEMORY, MYF(0), str_len); + return false; + } + + if ((str_len= json_unescape(json_cs, js, js + js_len, s->charset(), (uchar *) s->end(), (uchar *) s->end() + str_len)) > 0) { s->length(s->length() + str_len); - return 0; + return false; + } + if (current_thd) + { + if (str_len == JSON_ERROR_OUT_OF_SPACE) + my_error(ER_OUTOFMEMORY, MYF(0), str_len); + else if (str_len == JSON_ERROR_ILLEGAL_SYMBOL) + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_JSON_BAD_CHR, ER_THD(current_thd, ER_JSON_BAD_CHR), + 0, "st_append_json", 0); } - return str_len; + return true; } @@ -796,7 +810,12 @@ bool Json_engine_scan::check_and_get_value_scalar(String *res, int *error) js_len= value_len; } - return st_append_json(res, json_cs, js, js_len); + if (st_append_json(res, json_cs, js, js_len)) + { + *error= 1; + return true; + } + return false; } @@ -901,7 +920,7 @@ error: String *Item_func_json_unquote::val_str(String *str) { json_engine_t je; - int c_len; + int c_len= JSON_ERROR_OUT_OF_SPACE; String *js; if (!(js= read_json(&je))) @@ -910,21 +929,40 @@ String *Item_func_json_unquote::val_str(String *str) if (unlikely(je.s.error) || je.value_type != JSON_VALUE_STRING) return js; + int buf_len= je.value_len; + if (js->charset()->cset != my_charset_utf8mb4_bin.cset) + { + /* + json_unquote() will be transcoding between charsets. We don't know + how much buffer space we'll need. Assume that each byte in the source + will require mbmaxlen bytes in the output. + */ + buf_len *= my_charset_utf8mb4_bin.mbmaxlen; + } + str->length(0); str->set_charset(&my_charset_utf8mb4_bin); - if (str->realloc_with_extra_if_needed(je.value_len) || + if (str->realloc_with_extra_if_needed(buf_len) || (c_len= json_unescape(js->charset(), je.value, je.value + je.value_len, &my_charset_utf8mb4_bin, - (uchar *) str->ptr(), (uchar *) (str->ptr() + je.value_len))) < 0) + (uchar *) str->ptr(), (uchar *) (str->ptr() + buf_len))) < 0) goto error; str->length(c_len); return str; error: - report_json_error(js, &je, 0); + if (current_thd) + { + if (c_len == JSON_ERROR_OUT_OF_SPACE) + my_error(ER_OUTOFMEMORY, MYF(0), buf_len); + else if (c_len == JSON_ERROR_ILLEGAL_SYMBOL) + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_JSON_BAD_CHR, ER_THD(current_thd, ER_JSON_BAD_CHR), + 0, "unquote", 0); + } return js; } @@ -3923,7 +3961,21 @@ int Item_func_json_search::compare_json_value_wild(json_engine_t *je, (uchar *) (esc_value.ptr() + esc_value.alloced_length())); if (esc_len <= 0) + { + if (current_thd) + { + if (esc_len == JSON_ERROR_OUT_OF_SPACE) + my_error(ER_OUTOFMEMORY, MYF(0), je->value_len); + else if (esc_len == JSON_ERROR_ILLEGAL_SYMBOL) + { + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_JSON_BAD_CHR, ER_THD(current_thd, ER_JSON_BAD_CHR), + 0, "comparison", + (int)(je->s.c_str - je->value)); + } + } return 0; + } return collation.collation->wildcmp( esc_value.ptr(), esc_value.ptr() + esc_len, @@ -4189,9 +4241,16 @@ int Arg_comparator::compare_json_str_basic(Item *j, Item *s) (uchar *) (value2.ptr() + je.value_len))) < 0) { if (current_thd) - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_JSON_BAD_CHR, ER_THD(current_thd, ER_JSON_BAD_CHR), - 0, "comparison", (int)((const char *) je.s.c_str - js->ptr())); + { + if (c_len == JSON_ERROR_OUT_OF_SPACE) + my_error(ER_OUTOFMEMORY, MYF(0), je.value_len); + else if (c_len == JSON_ERROR_ILLEGAL_SYMBOL) + { + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_JSON_BAD_CHR, ER_THD(current_thd, ER_JSON_BAD_CHR), + 0, "comparison", (int)((const char *) je.s.c_str - js->ptr())); + } + } goto error; } @@ -4248,10 +4307,17 @@ int Arg_comparator::compare_e_json_str_basic(Item *j, Item *s) (uchar *) (value1.ptr() + value_len))) < 0) { if (current_thd) - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_JSON_BAD_CHR, ER_THD(current_thd, ER_JSON_BAD_CHR), - 0, "equality comparison", 0); - return 1; + { + if (c_len == JSON_ERROR_OUT_OF_SPACE) + my_error(ER_OUTOFMEMORY, MYF(0), value_len); + else if (c_len == JSON_ERROR_ILLEGAL_SYMBOL) + { + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_JSON_BAD_CHR, ER_THD(current_thd, ER_JSON_BAD_CHR), + 0, "equality comparison", 0); + } + } + return 1; } value1.length(c_len); res1= &value1; diff --git a/sql/item_subselect.h b/sql/item_subselect.h index a0c2a9237af..67e9e2bc97c 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -644,6 +644,7 @@ public: value= 0; null_value= 0; was_null= 0; + is_jtbm_const_tab= 0; } bool select_transformer(JOIN *join) override; bool create_in_to_exists_cond(JOIN *join_arg); diff --git a/sql/json_schema.cc b/sql/json_schema.cc index c5dfdec409a..dd5862acb17 100644 --- a/sql/json_schema.cc +++ b/sql/json_schema.cc @@ -21,6 +21,11 @@ #include "json_schema.h" #include "json_schema_helper.h" #include "pcre2.h" + +#ifndef DBUG_OFF +int dbug_json_check_min_stack_requirement(); +#endif + static HASH all_keywords_hash; static Json_schema_keyword *create_json_schema_keyword(THD *thd) @@ -2779,15 +2784,9 @@ bool create_object_and_handle_keyword(THD *thd, json_engine_t *je, List temporary_list; DBUG_EXECUTE_IF("json_check_min_stack_requirement", - { - long arbitrary_var; - long stack_used_up= - (available_stack_size(thd->thread_stack, - &arbitrary_var)); - ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE); - }); + dbug_json_check_min_stack_requirement(); return true;); if (check_stack_overrun(thd, STACK_MIN_SIZE , NULL)) - return 1; + return true; while (json_scan_next(je)== 0 && je->stack_p >= level) { diff --git a/sql/json_table.cc b/sql/json_table.cc index 905ad1ac303..96bb6e6c6b0 100644 --- a/sql/json_table.cc +++ b/sql/json_table.cc @@ -27,6 +27,10 @@ #include "create_tmp_table.h" #include "sql_parse.h" +#ifndef DBUG_OFF +int dbug_json_check_min_stack_requirement(); +#endif + #define HA_ERR_JSON_TABLE (HA_ERR_LAST+1) class table_function_handlerton @@ -104,13 +108,9 @@ int get_disallowed_table_deps_for_list(MEM_ROOT *mem_root, List_iterator li(*join_list); DBUG_EXECUTE_IF("json_check_min_stack_requirement", - { - long arbitrary_var; - long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var)); - ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE); - }); + return -dbug_json_check_min_stack_requirement();); if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL)) - return 1; + return -1; while ((table= li++)) { @@ -1343,21 +1343,20 @@ void Table_function_json_table::fix_after_pullout(TABLE_LIST *sql_table, /* @brief Recursively make all tables in the join_list also depend on deps. + + @return - boolean - true if error (out of memory). */ -static void add_extra_deps(List *join_list, table_map deps) +static bool add_extra_deps(List *join_list, table_map deps) { TABLE_LIST *table; List_iterator li(*join_list); DBUG_EXECUTE_IF("json_check_min_stack_requirement", - { - long arbitrary_var; - long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var)); - ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE); - }); + dbug_json_check_min_stack_requirement(); return true;); if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL)) - return; + return true; + while ((table= li++)) { table->dep_tables |= deps; @@ -1365,9 +1364,11 @@ static void add_extra_deps(List *join_list, table_map deps) if ((nested_join= table->nested_join)) { // set the deps inside, too - add_extra_deps(&nested_join->join_list, deps); + if (add_extra_deps(&nested_join->join_list, deps)) + return true; } } + return false; } @@ -1435,25 +1436,29 @@ static void add_extra_deps(List *join_list, table_map deps) @param join_list List of tables to process. Initial invocation should supply the JOIN's top-level table list. @param nest_tables Bitmap of all tables in the join list. + @param error Pointer to value which is set to true on stack overrun + error. - @return Bitmap of all outside references that tables in join_list have + @return Bitmap of all outside references that tables in join_list have, + or 0 on out of stack overrun error (in addition to *error= true). */ table_map add_table_function_dependencies(List *join_list, - table_map nest_tables) + table_map nest_tables, + bool *error) { TABLE_LIST *table; table_map res= 0; List_iterator li(*join_list); DBUG_EXECUTE_IF("json_check_min_stack_requirement", - { - long arbitrary_var; - long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var)); - ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE); - }); + if (dbug_json_check_min_stack_requirement()) + { *error= true; return 0; }); if ((res=check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))) - return res; + { + *error= true; + return 0; + } // Recursively compute extra dependencies while ((table= li++)) @@ -1462,7 +1467,9 @@ table_map add_table_function_dependencies(List *join_list, if ((nested_join= table->nested_join)) { res |= add_table_function_dependencies(&nested_join->join_list, - nested_join->used_tables); + nested_join->used_tables, error); + if (*error) + return 0; } else if (table->table_function) { @@ -1473,7 +1480,13 @@ table_map add_table_function_dependencies(List *join_list, res= res & ~nest_tables & ~PSEUDO_TABLE_BITS; // Then, make all "peers" have them: if (res) - add_extra_deps(join_list, res); + { + if (add_extra_deps(join_list, res)) + { + *error= true; + return 0; + } + } return res; } diff --git a/sql/json_table.h b/sql/json_table.h index 84f0a099d6e..f588b6e08b0 100644 --- a/sql/json_table.h +++ b/sql/json_table.h @@ -284,7 +284,7 @@ bool push_table_function_arg_context(LEX *lex, MEM_ROOT *alloc); TABLE *create_table_for_function(THD *thd, TABLE_LIST *sql_table); table_map add_table_function_dependencies(List *join_list, - table_map nest_tables); + table_map nest_tables, bool *error); #endif /* JSON_TABLE_INCLUDED */ diff --git a/sql/log.cc b/sql/log.cc index 7989437140b..b948cc84349 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -695,6 +695,33 @@ bool LOGGER::is_log_table_enabled(uint log_table_type) } } + +int check_if_log_table(const TABLE_LIST *table) +{ + if (MYSQL_SCHEMA_NAME.streq(table->db)) + { + if (GENERAL_LOG_NAME.streq(table->table_name)) + return QUERY_LOG_GENERAL;; + + if (SLOW_LOG_NAME.streq(table->table_name)) + return QUERY_LOG_SLOW; + } + return 0; +} + + +bool HA_CREATE_INFO::check_if_valid_log_table() +{ + if (!(db_type->flags & HTON_SUPPORT_LOG_TABLES) || + (db_type == maria_hton && transactional != HA_CHOICE_NO)) + { + my_error(ER_UNSUPORTED_LOG_ENGINE, MYF(0), hton_name(db_type)->str); + return true; + } + return false; +} + + /** Check if a given table is opened log table @@ -710,25 +737,9 @@ int check_if_log_table(const TABLE_LIST *table, bool check_if_opened, const char *error_msg) { - int result= 0; - if (table->db.streq(MYSQL_SCHEMA_NAME)) - { - if (table->table_name.streq(GENERAL_LOG_NAME)) - { - result= QUERY_LOG_GENERAL; - goto end; - } - - if (table->table_name.streq(SLOW_LOG_NAME)) - { - result= QUERY_LOG_SLOW; - goto end; - } - } - return 0; - -end: - if (!check_if_opened || logger.is_log_table_enabled(result)) + int result= check_if_log_table(table); + if (result && + (!check_if_opened || logger.is_log_table_enabled(result))) { if (error_msg) my_error(ER_BAD_LOG_STATEMENT, MYF(0), error_msg); diff --git a/sql/log.h b/sql/log.h index a053b3a4703..ca194434e30 100644 --- a/sql/log.h +++ b/sql/log.h @@ -1224,6 +1224,7 @@ public: }; +int check_if_log_table(const TABLE_LIST *table); int check_if_log_table(const TABLE_LIST *table, bool check_if_opened, const char *errmsg); diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc index c0a58ecaf9d..77fea29fba5 100644 --- a/sql/log_event_server.cc +++ b/sql/log_event_server.cc @@ -5215,6 +5215,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) DBUG_ASSERT(table->in_use); error= do_exec_row(rgi); + THD_STAGE_INFO(thd, stage_executing); if (unlikely(error)) DBUG_PRINT("info", ("error: %s", HA_ERR(error))); @@ -6149,7 +6150,7 @@ static inline void store_compressed_length(String &str_buf, ulonglong length) { // Store Type and packed length - uchar buf[4]; + uchar buf[9]; uchar *buf_ptr = net_store_length(buf, length); str_buf.append(reinterpret_cast(buf), buf_ptr-buf); @@ -7156,36 +7157,10 @@ int Rows_log_event::update_sequence() int Write_rows_log_event::do_exec_row(rpl_group_info *rgi) { - DBUG_ASSERT(m_table != NULL); - const char *tmp= thd->get_proc_info(); - char *message, msg[128]; - const LEX_CSTRING &table_name= m_table->s->table_name; - const char quote_char= - get_quote_char_for_identifier(thd, table_name.str, table_name.length); - my_snprintf(msg, sizeof msg, - "Write_rows_log_event::write_row() on table %c%.*s%c", - quote_char, int(table_name.length), table_name.str, quote_char); - message= msg; int error; - -#ifdef WSREP_PROC_INFO - my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Write_rows_log_event::write_row(%lld) on table %c%.*s%c", - (long long) wsrep_thd_trx_seqno(thd), quote_char, - int(table_name.length), table_name.str, quote_char); - message= thd->wsrep_info; -#endif /* WSREP_PROC_INFO */ - - thd_proc_info(thd, message); + thd_proc_info(thd, "Write_rows_log_event::write_row()"); error= write_row(rgi, slave_exec_mode == SLAVE_EXEC_MODE_IDEMPOTENT); - thd_proc_info(thd, tmp); - - if (unlikely(error) && unlikely(!thd->is_error())) - { - DBUG_ASSERT(0); - my_error(ER_UNKNOWN_ERROR, MYF(0)); - } - + DBUG_ASSERT(!error || thd->is_error()); return error; } @@ -7935,46 +7910,15 @@ Delete_rows_log_event::do_after_row_operations(int error) int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi) { int error; - const char *tmp= thd->get_proc_info(); - char *message, msg[128]; - const LEX_CSTRING &table_name= m_table->s->table_name; - const char quote_char= - get_quote_char_for_identifier(thd, table_name.str, table_name.length); - my_snprintf(msg, sizeof msg, - "Delete_rows_log_event::find_row() on table %c%.*s%c", - quote_char, int(table_name.length), table_name.str, quote_char); - message= msg; - const bool invoke_triggers= (m_table->triggers && do_invoke_trigger()); - DBUG_ASSERT(m_table != NULL); + const bool invoke_triggers= m_table->triggers && do_invoke_trigger(); -#ifdef WSREP_PROC_INFO - my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Delete_rows_log_event::find_row(%lld) on table %c%.*s%c", - (long long) wsrep_thd_trx_seqno(thd), quote_char, - int(table_name.length), table_name.str, - quote_char); - message= thd->wsrep_info; -#endif /* WSREP_PROC_INFO */ - - thd_proc_info(thd, message); + thd_proc_info(thd, "Delete_rows_log_event::find_row()"); if (likely(!(error= find_row(rgi)))) { /* Delete the record found, located in record[0] */ - my_snprintf(msg, sizeof msg, - "Delete_rows_log_event::ha_delete_row() on table %c%.*s%c", - quote_char, int(table_name.length), table_name.str, - quote_char); - message= msg; -#ifdef WSREP_PROC_INFO - snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Delete_rows_log_event::ha_delete_row(%lld) on table %c%.*s%c", - (long long) wsrep_thd_trx_seqno(thd), quote_char, - int(table_name.length), table_name.str, quote_char); - message= thd->wsrep_info; -#endif - thd_proc_info(thd, message); + thd_proc_info(thd, "Delete_rows_log_event::ha_delete_row()"); bool trg_skip_row= false; if (invoke_triggers && @@ -8002,7 +7946,6 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi) error= HA_ERR_GENERIC; // in case if error is not set yet m_table->file->ha_index_or_rnd_end(); } - thd_proc_info(thd, tmp); return error; } @@ -8095,29 +8038,9 @@ int Update_rows_log_event::do_exec_row(rpl_group_info *rgi) { const bool invoke_triggers= (m_table->triggers && do_invoke_trigger()); - const char *tmp= thd->get_proc_info(); - DBUG_ASSERT(m_table != NULL); - char *message, msg[128]; - const LEX_CSTRING &table_name= m_table->s->table_name; - const char quote_char= - get_quote_char_for_identifier(thd, table_name.str, table_name.length); bool trg_skip_row= false; - my_snprintf(msg, sizeof msg, - "Update_rows_log_event::find_row() on table %c%.*s%c", - quote_char, int(table_name.length), table_name.str, quote_char); - message= msg; - -#ifdef WSREP_PROC_INFO - my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Update_rows_log_event::find_row(%lld) on table %c%.*s%c", - (long long) wsrep_thd_trx_seqno(thd), quote_char, - int(table_name.length), table_name.str, - quote_char); - message= thd->wsrep_info; -#endif /* WSREP_PROC_INFO */ - - thd_proc_info(thd, message); + thd_proc_info(thd, "Update_rows_log_event::find_row()"); int error= find_row(rgi); if (unlikely(error)) { @@ -8127,7 +8050,6 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) */ if ((m_curr_row= m_curr_row_end)) unpack_current_row(rgi, &m_cols_ai); - thd_proc_info(thd, tmp); return error; } @@ -8150,20 +8072,8 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) store_record(m_table,record[1]); m_curr_row= m_curr_row_end; - my_snprintf(msg, sizeof msg, - "Update_rows_log_event::unpack_current_row() on table %c%.*s%c", - quote_char, int(table_name.length), table_name.str, quote_char); - message= msg; -#ifdef WSREP_PROC_INFO - my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Update_rows_log_event::unpack_current_row(%lld) on table %c%.*s%c", - (long long) wsrep_thd_trx_seqno(thd), quote_char, - int(table_name.length), table_name.str, quote_char); - message= thd->wsrep_info; -#endif /* WSREP_PROC_INFO */ - /* this also updates m_curr_row_end */ - thd_proc_info(thd, message); + thd_proc_info(thd, "Update_rows_log_event::unpack_current_row()"); if (unlikely((error= unpack_current_row(rgi, &m_cols_ai)))) goto err; if (m_table->s->long_unique_table) @@ -8183,19 +8093,7 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) DBUG_DUMP("new values", m_table->record[0], m_table->s->reclength); #endif - my_snprintf(msg, sizeof msg, - "Update_rows_log_event::ha_update_row() on table %c%.*s%c", - quote_char, int(table_name.length), table_name.str, quote_char); - message= msg; -#ifdef WSREP_PROC_INFO - my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Update_rows_log_event::ha_update_row(%lld) on table %c%.*s%c", - (long long) wsrep_thd_trx_seqno(thd), quote_char, - int(table_name.length), table_name.str, quote_char); - message= thd->wsrep_info; -#endif /* WSREP_PROC_INFO */ - - thd_proc_info(thd, message); + thd_proc_info(thd, "Update_rows_log_event::ha_update_row()"); if (invoke_triggers && unlikely(process_triggers(TRG_EVENT_UPDATE, TRG_ACTION_BEFORE, true, &trg_skip_row))) @@ -8238,9 +8136,7 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) nullptr))) error= HA_ERR_GENERIC; // in case if error is not set yet - err: - thd_proc_info(thd, tmp); m_table->file->ha_index_or_rnd_end(); return error; } diff --git a/sql/mdl.cc b/sql/mdl.cc index 5b077177d25..cabb8d80406 100644 --- a/sql/mdl.cc +++ b/sql/mdl.cc @@ -2412,6 +2412,8 @@ MDL_context::acquire_lock(MDL_request *mdl_request, double lock_wait_timeout) (ulonglong)(abort_blocking_timeout * 1000000000ULL)); abort_blocking_enabled= true; } + DEBUG_SYNC(get_thd(), "mdl_after_find_deadlock"); + set_timespec_nsec(abs_timeout, (ulonglong)(lock_wait_timeout * 1000000000ULL)); wait_status= MDL_wait::EMPTY; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 0c9131aa749..31371bf91df 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2422,8 +2422,8 @@ static void activate_tcp_port(uint port, else { ip_sock.address_family= a->ai_family; - sql_print_information("Server socket created on IP: '%s'.", - (const char *) ip_addr); + sql_print_information("Server socket created on IP: '%s', port: '%u'.", + (const char *) ip_addr, port); if (mysql_socket_getfd(ip_sock) == INVALID_SOCKET) { @@ -2884,7 +2884,7 @@ void unlink_thd(THD *thd) } -#if defined(_WIN32) +#if defined(_WIN32) && !defined(EMBEDDED_LIBRARY) /* If server is started as service, the service routine will set the callback function. diff --git a/sql/opt_histogram_json.cc b/sql/opt_histogram_json.cc index 020e07527d4..6a0b31e6826 100644 --- a/sql/opt_histogram_json.cc +++ b/sql/opt_histogram_json.cc @@ -52,10 +52,17 @@ static bool json_unescape_to_string(const char *val, int val_len, String* out) out->length(res); return false; // Ok } + if (res == JSON_ERROR_ILLEGAL_SYMBOL) + return true; // Invalid character // We get here if the unescaped string didn't fit into memory. - if (out->alloc(out->alloced_length()*2)) - return true; + if (res == JSON_ERROR_OUT_OF_SPACE) + { + if (out->alloc(out->alloced_length()*2)) + return true; + } + else + return true; // unknown error } } @@ -492,7 +499,7 @@ bool read_bucket_endpoint(json_engine_t *je, Field *field, String *out, const char* je_value= (const char*)je->value; if (je->value_type == JSON_VALUE_STRING && je->value_escaped) { - StringBuffer<128> unescape_buf; + StringBuffer<128> unescape_buf(field->charset() ? field->charset() : &my_charset_bin); if (json_unescape_to_string(je_value, je->value_len, &unescape_buf)) { *err= "Un-escape error"; @@ -599,10 +606,14 @@ int Histogram_json_hb::parse_bucket(json_engine_t *je, Field *field, bool have_start= false; bool have_size= false; bool have_ndv= false; + CHARSET_INFO *cs; + + if (!(cs= field->charset())) + cs= &my_charset_bin; double size_d; longlong ndv_ll= 0; - StringBuffer<128> value_buf; + StringBuffer<128> value_buf(cs); int rc; while (!(rc= json_scan_next(je)) && je->state != JST_OBJ_END) diff --git a/sql/opt_histogram_json.h b/sql/opt_histogram_json.h index 248467928fd..a3e21a08aa7 100644 --- a/sql/opt_histogram_json.h +++ b/sql/opt_histogram_json.h @@ -16,6 +16,20 @@ #include "sql_statistics.h" +struct Histogram_bucket +{ + // The left endpoint in KeyTupleFormat. The endpoint is inclusive, this + // value is in this bucket. + std::string start_value; + + // Cumulative fraction: The fraction of table rows that fall into this + // and preceding buckets. + double cum_fract; + + // Number of distinct values in the bucket. + longlong ndv; +}; + /* An equi-height histogram which stores real values for bucket bounds. @@ -71,21 +85,7 @@ class Histogram_json_hb final : public Histogram_base /* Collection-time only: collected histogram in the JSON form. */ std::string json_text; - struct Bucket - { - // The left endpoint in KeyTupleFormat. The endpoint is inclusive, this - // value is in this bucket. - std::string start_value; - - // Cumulative fraction: The fraction of table rows that fall into this - // and preceding buckets. - double cum_fract; - - // Number of distinct values in the bucket. - longlong ndv; - }; - - std::vector buckets; + std::vector buckets; std::string last_bucket_end_endp; @@ -129,6 +129,16 @@ public: double range_selectivity(Field *field, key_range *min_endp, key_range *max_endp, double avg_sel) override; + const std::vector& get_json_histogram() const + { + return buckets; + } + + const std::string& get_last_bucket_end_endp() const + { + return last_bucket_end_endp; + } + void set_json_text(ulonglong sz, const char *json_text_arg, size_t json_text_len) { diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 48d325c7a15..baf1e989448 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -7336,21 +7336,24 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info, order R by (E(#records_matched) * key_record_length). S= first(R); -- set of scans that will be used for ROR-intersection - R= R-first(S); + R= R - S; min_cost= cost(S); min_scan= make_scan(S); while (R is not empty) { - firstR= R - first(R); - if (!selectivity(S + firstR < selectivity(S))) + firstR= first(R); + if (!selectivity(S + firstR) < selectivity(S)) + { + R= R - firstR; continue; - + } S= S + first(R); if (cost(S) < min_cost) { min_cost= cost(S); min_scan= make_scan(S); } + R= R - firstR; -- Remove the processed scan from R } return min_scan; } @@ -12460,22 +12463,44 @@ static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts) return FALSE; } + if (key_part >= key_part_end) + return TRUE; + + pk_number= param->table->s->primary_key; + if (!param->table->file->pk_is_clustering_key(pk_number)) + return FALSE; + + if (keynr == pk_number) + return TRUE; /* Scan on clustered PK is always ROR */ + + + KEY_PART_INFO *pk_part= param->table->key_info[pk_number].key_part; + KEY_PART_INFO *pk_part_end= pk_part + + param->table->key_info[pk_number].user_defined_key_parts; + /* + Check for columns indexed with DESC. + If a column is present in both Secondary Key and Primary Key and either of + indexes include it with DESC, then the scan is not a ROR scan. + */ + for (; key_part != key_part_end; ++key_part) + { + pk_part= param->table->key_info[pk_number].key_part; + for (; pk_part != pk_part_end; ++pk_part) + { + if (key_part->fieldnr == pk_part->fieldnr && + (MY_TEST(key_part->key_part_flag & HA_REVERSE_SORT) || + MY_TEST(pk_part->key_part_flag & HA_REVERSE_SORT))) + return FALSE; + } + } + /* If there are equalities for all key parts, it is a ROR scan. If there are equalities all keyparts and even some of key parts from "Extended Key" index suffix, it is a ROR-scan, too. */ - if (key_part >= key_part_end) - return TRUE; - key_part= table_key->key_part + nparts; - pk_number= param->table->s->primary_key; - if (!param->table->file->pk_is_clustering_key(pk_number)) - return FALSE; - - KEY_PART_INFO *pk_part= param->table->key_info[pk_number].key_part; - KEY_PART_INFO *pk_part_end= pk_part + - param->table->key_info[pk_number].user_defined_key_parts; + pk_part= param->table->key_info[pk_number].key_part; for (;(key_part!=key_part_end) && (pk_part != pk_part_end); ++key_part, ++pk_part) { diff --git a/sql/opt_split.cc b/sql/opt_split.cc index c08d29e69a0..d407b828bee 100644 --- a/sql/opt_split.cc +++ b/sql/opt_split.cc @@ -291,7 +291,6 @@ public: double unsplit_oper_cost; /* Cardinality of T when nothing is pushed */ double unsplit_card; - double last_refills; SplM_plan_info *find_plan(TABLE *table, uint key, uint parts); }; @@ -603,13 +602,24 @@ void TABLE::add_splitting_info_for_key_field(KEY_FIELD *key_field) THD *thd= in_use; Item *left_item= spl_field->producing_item->build_clone(thd); Item *right_item= key_field->val->build_clone(thd); - Item_func_eq *eq_item= 0; + Item_bool_func *eq_item= 0; if (left_item && right_item) { right_item->walk(&Item::set_fields_as_dependent_processor, false, join->select_lex); right_item->update_used_tables(); - eq_item= new (thd->mem_root) Item_func_eq(thd, left_item, right_item); + /* + We've just pushed right_item down into the child select. It may only + have references to outside. + */ + DBUG_ASSERT(!(right_item->used_tables() & ~PSEUDO_TABLE_BITS)); + + // Item_func::EQUAL_FUNC is null-safe, others can use Item_func_eq() + if (key_field->cond->type() == Item::FUNC_ITEM && + ((Item_func*)key_field->cond)->functype() == Item_func::EQUAL_FUNC) + eq_item= new (thd->mem_root) Item_func_equal(thd, left_item, right_item); + else + eq_item= new (thd->mem_root) Item_func_eq(thd, left_item, right_item); } if (!eq_item) return; @@ -623,14 +633,7 @@ void TABLE::add_splitting_info_for_key_field(KEY_FIELD *key_field) added_key_field->level= 0; added_key_field->optimize= KEY_OPTIMIZE_EQ; added_key_field->eq_func= true; - - Item *real= key_field->val->real_item(); - if ((real->type() == Item::FIELD_ITEM) && - ((Item_field*)real)->field->maybe_null()) - added_key_field->null_rejecting= true; - else - added_key_field->null_rejecting= false; - + added_key_field->null_rejecting= key_field->null_rejecting; added_key_field->cond_guard= NULL; added_key_field->sj_pred_no= UINT_MAX; return; @@ -1054,6 +1057,25 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(uint idx, if (best_table) { *spl_pd_boundary= this->table->map; + /* + Compute "refills" - how many times we'll need to refill the split- + materialized temp. table. Split-materialized table has references to + preceding table(s). Suppose the join prefix is (t1, t2, t3) and + split-materialized refers to table t2: + + t1 t2 t3 + ^ | + +------------ + + + If we do not use join buffer for table t3, then we'll need to refill + the split-materialized table partial_join_cardinality({t1, t2}) times. + (this assumes that fanout of table t3 is greater than 1, which is + typically true). + If table t3 uses join buffer, then every time we get a record combination + of {t1.row,t2.row,t3.row} the t2.row may be different and so we will need + to refill every time, that is, + partial_join_cardinality(t1,t3,t3) times. + */ if (!best_param_tables) refills= 1; else @@ -1173,7 +1195,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(uint idx, The best plan that employs splitting is cheaper than the plan without splitting */ - startup_cost= spl_opt_info->last_refills * spl_plan->cost; + startup_cost= refills * spl_plan->cost; records= (ha_rows) (spl_opt_info->unsplit_card * spl_plan->split_sel); if (unlikely(thd->trace_started()) && ! already_printed) { diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc index 4cc1e264cd0..1f688fc2a1a 100644 --- a/sql/opt_table_elimination.cc +++ b/sql/opt_table_elimination.cc @@ -1592,6 +1592,13 @@ void check_equality(Dep_analysis_context *ctx, Dep_module_expr **eq_mod, if (field->can_optimize_outer_join_table_elimination(cond, right) != Data_type_compatibility::OK) return; + /* + UNIQUE indexes over nullable columns may have duplicate NULL values. + This means, a condition like "field IS NULL" or "field <=> right_expr" + may match multiple rows. Dis-qualify such conditions. + */ + if (field->real_maybe_null() && right->maybe_null()) + return; Dep_value_field *field_val; if ((field_val= ctx->get_field_value(field))) add_module_expr(ctx, eq_mod, and_level, field_val, right, NULL); diff --git a/sql/parse_file.cc b/sql/parse_file.cc index f4aae1300e2..8495f56c1fe 100644 --- a/sql/parse_file.cc +++ b/sql/parse_file.cc @@ -465,8 +465,6 @@ sql_parse_prepare(const LEX_CSTRING *file_name, MEM_ROOT *mem_root, DBUG_RETURN(0); } - MSAN_STAT_WORKAROUND(&stat_info); - if (stat_info.st_size > INT_MAX-1) { my_error(ER_FPARSER_TOO_BIG_FILE, MYF(0), file_name->str); diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc index 94e8f188f77..9790349015a 100644 --- a/sql/rpl_gtid.cc +++ b/sql/rpl_gtid.cc @@ -3432,6 +3432,7 @@ struct gtid_report_ctx my_bool contains_err; }; +/** Iteration block for Binlog_gtid_state_validator::report() */ static my_bool report_audit_findings(void *entry, void *report_ctx_arg) { struct Binlog_gtid_state_validator::audit_elem *audit_el= @@ -3570,7 +3571,7 @@ my_bool Window_gtid_event_filter::exclude(rpl_gtid *gtid) bounds of this window. */ - if (!m_has_start && is_gtid_at_or_before(&m_stop, gtid)) + if (!m_has_start && m_has_stop && is_gtid_at_or_before(&m_stop, gtid)) { /* Start GTID was not provided, so we want to include everything from here @@ -3578,6 +3579,12 @@ my_bool Window_gtid_event_filter::exclude(rpl_gtid *gtid) */ m_is_active= TRUE; should_exclude= FALSE; + if (gtid->seq_no == m_stop.seq_no) + { + m_has_passed= TRUE; + DBUG_PRINT("gtid-event-filter", + ("Window: End %d-%d-%llu", PARAM_GTID(m_stop))); + } } else if ((m_has_start && is_gtid_at_or_after(&m_start, gtid)) && (!m_has_stop || is_gtid_at_or_before(&m_stop, gtid))) @@ -3585,8 +3592,7 @@ my_bool Window_gtid_event_filter::exclude(rpl_gtid *gtid) m_is_active= TRUE; DBUG_PRINT("gtid-event-filter", - ("Window: Begin (%d-%d-%llu, %d-%d-%llu]", - PARAM_GTID(m_start), PARAM_GTID(m_stop))); + ("Window: Begin %d-%d-%llu", PARAM_GTID(m_start))); /* As the start of the range is exclusive, if this gtid is the start of @@ -3601,8 +3607,7 @@ my_bool Window_gtid_event_filter::exclude(rpl_gtid *gtid) { m_has_passed= TRUE; DBUG_PRINT("gtid-event-filter", - ("Window: End (%d-%d-%llu, %d-%d-%llu]", - PARAM_GTID(m_start), PARAM_GTID(m_stop))); + ("Window: End %d-%d-%llu", PARAM_GTID(m_stop))); } } } /* if (!m_is_active && !m_has_passed) */ @@ -3642,6 +3647,17 @@ my_bool Window_gtid_event_filter::has_finished() return m_has_stop ? m_has_passed : FALSE; } +bool Window_gtid_event_filter::verify_final_state() +{ + bool is_not_final= m_has_stop && !m_has_passed; + if (is_not_final) + Binlog_gtid_state_validator::warn(stderr, + "Did not reach stop position %u-%u-%llu before end of input", + PARAM_GTID(m_stop) + ); + return is_not_final; +} + void free_u32_gtid_filter_element(void *p) { gtid_filter_element *gfe= (gtid_filter_element *) p; @@ -3724,6 +3740,30 @@ my_bool Id_delegating_gtid_event_filter::has_finished() m_num_completed_filters == m_num_stateful_filters; } +/** + Iteration block for Id_delegating_gtid_event_filter::verify_final_state() +*/ +static my_bool +verify_subfilter_final_state(void *entry, void *is_any_not_final) +{ + if (entry && static_cast * + >(entry)->filter->verify_final_state()) + *static_cast(is_any_not_final)= true; + return false; // do not terminate early +} + +template +bool Id_delegating_gtid_event_filter::verify_final_state() +{ + if (has_finished()) // fast happy path + return false; + // If a user-defined filters is not deactivated, it may not be complete. + bool is_any_not_final= false; + my_hash_iterate(&m_filters_by_id_hash, + verify_subfilter_final_state, &is_any_not_final); + return is_any_not_final; +} + template my_bool Id_delegating_gtid_event_filter::exclude(rpl_gtid *gtid) { @@ -4184,3 +4224,18 @@ my_bool Intersecting_gtid_event_filter::has_finished() } return FALSE; } + +bool Intersecting_gtid_event_filter::verify_final_state() +{ + bool is_any_not_final= false; + Gtid_event_filter *subfilter; + for (size_t i= 0; i < m_filters.elements; ++i) + { + subfilter= + *reinterpret_cast(dynamic_array_ptr(&m_filters, i)); + DBUG_ASSERT(subfilter); + if (subfilter->verify_final_state()) + is_any_not_final= true; + } + return is_any_not_final; +} diff --git a/sql/rpl_gtid.h b/sql/rpl_gtid.h index d5af90e029d..fe8e3751dbe 100644 --- a/sql/rpl_gtid.h +++ b/sql/rpl_gtid.h @@ -594,6 +594,24 @@ public: Returns TRUE when completed, and FALSE when the filter has not finished. */ virtual my_bool has_finished() = 0; + + /** + Check that this filter implementation is at a final, + completed state, and warn if it is not. + + For a filter that can maintain their own state, + this not only validates if the filter ::has_finished(), + but may also print specific warnings for its variety of non-final states. + + For a filter that manage multiple subfilters, this should iterate + through all of those to have each self-report any ineffectiveness. + This cumulative result may not correlate with the ::has_finished() state. + + @return + `false` if the filter is at a completed state, or `true` if it + warned incompleteness (This scheme is the opposite of has_finished()!) + */ + virtual bool verify_final_state() { return false; } }; /* @@ -643,6 +661,7 @@ public: my_bool exclude(rpl_gtid*) override; my_bool has_finished() override; + bool verify_final_state() override; /* Set the GTID that begins this window (exclusive) @@ -688,15 +707,6 @@ public: m_stop= {0, 0, 0}; } -protected: - - /* - When processing GTID streams, the order in which they are processed should - be sequential with no gaps between events. If a gap is found within a - window, warn the user. - */ - void verify_gtid_is_expected(rpl_gtid *gtid); - private: enum warning_flags @@ -726,7 +736,7 @@ private: /* m_has_passed : Indicates whether or not the program is currently reading - events from within this window. + events from beyond this window. */ my_bool m_has_passed; @@ -763,6 +773,7 @@ public: my_bool exclude(rpl_gtid *gtid) override; my_bool has_finished() override; + bool verify_final_state() override; void set_default_filter(Gtid_event_filter *default_filter); uint32 get_filter_type() override { return DELEGATING_GTID_FILTER_TYPE; } @@ -944,6 +955,7 @@ public: */ my_bool has_finished() override; + bool verify_final_state() override; uint32 get_filter_type() override { return INTERSECTING_GTID_FILTER_TYPE; } /* diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index 92646b89e67..d7ed5c98954 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -104,6 +104,17 @@ handle_queued_pos_update(THD *thd, rpl_parallel_thread::queued_event *qev) (e->force_abort && !rli->stop_for_until)) return; + #ifdef ENABLED_DEBUG_SYNC + DBUG_EXECUTE_IF("pause_sql_thread_on_fde", + DBUG_ASSERT(!debug_sync_set_action(thd, STRING_WITH_LEN( + "now SIGNAL paused_on_fde WAIT_FOR sql_thread_continue" + ))); + DBUG_ASSERT(!debug_sync_set_action(thd, STRING_WITH_LEN( + "now SIGNAL main_sql_thread_continue" + ))); + ); + #endif + mysql_mutex_lock(&rli->data_lock); cmp= compare_log_name(rli->group_relay_log_name, qev->event_relay_log_name); if (cmp < 0) @@ -1541,6 +1552,14 @@ handle_rpl_parallel_thread(void *arg) else rgi->mark_start_commit(); DEBUG_SYNC(thd, "rpl_parallel_after_mark_start_commit"); +#ifdef ENABLED_DEBUG_SYNC + DBUG_EXECUTE_IF("halt_past_mark_start_commit", + { + DBUG_ASSERT(!debug_sync_set_action + (thd, STRING_WITH_LEN("now WAIT_FOR past_mark_continue"))); + DBUG_SET_INITIAL("-d,halt_past_mark_start_commit"); + };); +#endif } } @@ -3054,13 +3073,21 @@ rpl_parallel::stop_during_until() } -bool -rpl_parallel::workers_idle(Relay_log_info *rli) +bool Relay_log_info::are_sql_threads_caught_up() { - mysql_mutex_assert_owner(&rli->data_lock); - return !rli->last_inuse_relaylog || - rli->last_inuse_relaylog->queued_count == - rli->last_inuse_relaylog->dequeued_count; + mysql_mutex_assert_owner(&data_lock); + if (!sql_thread_caught_up) + return false; + /* + The SQL thread sets @ref worker_threads_caught_up to `false` but not `true`. + Therefore, this place needs to check if it can now be `true`. + */ + if (!worker_threads_caught_up && ( // No need to re-check if already `true`. + !last_inuse_relaylog || // `nullptr` case + last_inuse_relaylog->queued_count == last_inuse_relaylog->dequeued_count + )) + worker_threads_caught_up= true; // Refresh + return worker_threads_caught_up; } diff --git a/sql/rpl_parallel.h b/sql/rpl_parallel.h index ef872dec66f..4eb4b00699c 100644 --- a/sql/rpl_parallel.h +++ b/sql/rpl_parallel.h @@ -510,8 +510,6 @@ struct rpl_parallel { void stop_during_until(); int wait_for_workers_idle(THD *thd); int do_event(rpl_group_info *serial_rgi, Log_event *ev, ulonglong event_size); - - static bool workers_idle(Relay_log_info *rli); }; diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h index 7bbb5b33daf..432ae982762 100644 --- a/sql/rpl_rli.h +++ b/sql/rpl_rli.h @@ -257,6 +257,29 @@ public: Seconds_Behind_Master as zero while the SQL thread is so waiting. */ bool sql_thread_caught_up; + /** + Simple setter for @ref worker_threads_caught_up; + sets it `false` to to indicate new user events in queue + @pre @ref data_lock held to prevent race with is_threads_caught_up() + */ + inline void unset_worker_threads_caught_up() + { + mysql_mutex_assert_owner(&data_lock); + worker_threads_caught_up= false; + } + /** + @return + `true` if both @ref sql_thread_caught_up and (refresh according to + @ref last_inuse_relaylog as needed) @ref worker_threads_caught_up + @pre Only meaningful if `mi->using_parallel()` + @pre @ref data_lock held to prevent race condition + @note + Parallel replication requires the idleness of the main SQL thread as well, + because after the thread sets its state to "busy" with `data_lock` held, + it enqueues events *without this lock*. Not to mention any event the main + thread processes itself without distribution, e.g., ignored ones. + */ + bool are_sql_threads_caught_up(); /* Last executed timestamp */ my_time_t last_master_timestamp; @@ -602,6 +625,22 @@ private: relay log. */ uint32 m_flags; + + /** + When `true`, this worker threads' copy of @ref sql_thread_caught_up + represents that __every__ worker thread is waiting for new events. + * The SQL driver thread sets this to `false` through + unset_worker_threads_caught_up() as it prepares an event + (either to enqueue a worker or, e.g., ignored events, process itself) + * For the main driver or any worker thread to refresh this state immediately + when it finishes, the procedure would have to be a critical section. + To avoid depending on a mutex, this state instead only returns to `true` + as part of its reader, are_worker_threads_caught_up(). + `Seconds_Behind_Master` of SHOW SLAVE STATUS uses this method (which also + reads `sql_thread_caught_up`) to know when all SQL threads are waiting. + @pre Only meaningful if `mi->using_parallel()` + */ + bool worker_threads_caught_up= true; }; diff --git a/sql/semisync_master.cc b/sql/semisync_master.cc index 0e41dbd0c9c..93ed2c9f927 100644 --- a/sql/semisync_master.cc +++ b/sql/semisync_master.cc @@ -68,15 +68,20 @@ static ulonglong timespec_to_usec(const struct timespec *ts) return (ulonglong) ts->tv_sec * TIME_MILLION + ts->tv_nsec / TIME_THOUSAND; } -int signal_waiting_transaction(THD *waiting_thd, const char *binlog_file, - my_off_t binlog_pos) +static int +signal_waiting_transaction(THD *waiting_thd, bool thd_valid, + const char *binlog_file, my_off_t binlog_pos) { /* It is possible that the connection thd waiting for an ACK was killed. In such circumstance, the connection thread will nullify the thd member of its Active_tranx node. So before we try to signal, ensure the THD exists. + + The thd_valid is only set while the THD is waiting in commit_trx(); this + is defensive coding to not signal an invalid THD if we somewhere + accidentally did not remove the transaction from the list. */ - if (waiting_thd) + if (waiting_thd && thd_valid) mysql_cond_signal(&waiting_thd->COND_wakeup_ready); return 0; } @@ -182,6 +187,7 @@ int Active_tranx::insert_tranx_node(THD *thd_to_wait, ins_node->log_name[FN_REFLEN-1] = 0; /* make sure it ends properly */ ins_node->log_pos = log_file_pos; ins_node->thd= thd_to_wait; + ins_node->thd_valid= false; if (!m_trx_front) { @@ -263,7 +269,8 @@ void Active_tranx::clear_active_tranx_nodes( if ((log_file_name != NULL) && compare(new_front, log_file_name, log_file_pos) > 0) break; - pre_delete_hook(new_front->thd, new_front->log_name, new_front->log_pos); + pre_delete_hook(new_front->thd, new_front->thd_valid, + new_front->log_name, new_front->log_pos); new_front = new_front->next; } @@ -355,13 +362,17 @@ void Active_tranx::unlink_thd_as_waiter(const char *log_file_name, } if (entry) + { entry->thd= NULL; + entry->thd_valid= false; + } DBUG_VOID_RETURN; } -bool Active_tranx::is_thd_waiter(THD *thd_to_check, const char *log_file_name, - my_off_t log_file_pos) +Tranx_node * +Active_tranx::is_thd_waiter(THD *thd_to_check, const char *log_file_name, + my_off_t log_file_pos) { DBUG_ENTER("Active_tranx::assert_thd_is_waiter"); mysql_mutex_assert_owner(m_lock); @@ -377,7 +388,7 @@ bool Active_tranx::is_thd_waiter(THD *thd_to_check, const char *log_file_name, entry = entry->hash_next; } - DBUG_RETURN(static_cast(entry)); + DBUG_RETURN(entry); } /******************************************************************************* @@ -863,6 +874,10 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name, if (!rpl_semi_sync_master_clients && !rpl_semi_sync_master_wait_no_slave) { + lock(); + m_active_tranxs->unlink_thd_as_waiter(trx_wait_binlog_name, + trx_wait_binlog_pos); + unlock(); rpl_semi_sync_master_no_transactions++; DBUG_RETURN(0); } @@ -922,6 +937,9 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name, } } + Tranx_node *tranx_entry= + m_active_tranxs->is_thd_waiter(thd, trx_wait_binlog_name, + trx_wait_binlog_pos); /* In between the binlogging of this transaction and this wait, it is * possible that our entry in Active_tranx was removed (i.e. if * semi-sync was switched off and on). It is also possible that the @@ -932,8 +950,7 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name, * rpl_semi_sync_master_yes/no_tx consistent with it, we check for a * semi-sync restart _after_ checking the reply state. */ - if (unlikely(!m_active_tranxs->is_thd_waiter(thd, trx_wait_binlog_name, - trx_wait_binlog_pos))) + if (unlikely(!tranx_entry)) { DBUG_EXECUTE_IF( "semisync_log_skip_trx_wait", @@ -952,6 +969,16 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name, break; } + /* + Mark that our THD is now valid for signalling to by the ack thread. + It is important to ensure that we can never leave a no longer valid + THD in the transaction list and signal it, eg. MDEV-36934. This way, + we ensure the THD will only be signalled while this function is + running, even in case of some incorrect error handling or similar + that might leave a dangling THD in the list. + */ + tranx_entry->thd_valid= true; + /* Let us update the info about the minimum binlog position of waiting * threads. */ @@ -1284,6 +1311,8 @@ int Repl_semi_sync_master::write_tranx_in_binlog(THD *thd, DBUG_ENTER("Repl_semi_sync_master::write_tranx_in_binlog"); + DEBUG_SYNC(current_thd, "semisync_at_write_tranx_in_binlog"); + lock(); /* This is the real check inside the mutex. */ @@ -1317,7 +1346,8 @@ int Repl_semi_sync_master::write_tranx_in_binlog(THD *thd, m_commit_file_name_inited = true; } - if (is_on()) + if (is_on() && + (rpl_semi_sync_master_clients || rpl_semi_sync_master_wait_no_slave)) { DBUG_ASSERT(m_active_tranxs != NULL); if(m_active_tranxs->insert_tranx_node(thd, log_file_name, log_file_pos)) diff --git a/sql/semisync_master.h b/sql/semisync_master.h index c96b0404035..2b8f221fcff 100644 --- a/sql/semisync_master.h +++ b/sql/semisync_master.h @@ -30,6 +30,7 @@ extern PSI_cond_key key_COND_binlog_send; struct Tranx_node { char log_name[FN_REFLEN]; + bool thd_valid; /* thd is valid for signalling */ my_off_t log_pos; THD *thd; /* The thread awaiting an ACK */ struct Tranx_node *next; /* the next node in the sorted list */ @@ -126,7 +127,9 @@ public: trx_node= &(current_block->nodes[++last_node]); trx_node->log_name[0] = '\0'; + trx_node->thd_valid= false; trx_node->log_pos= 0; + trx_node->thd= nullptr; trx_node->next= 0; trx_node->hash_next= 0; return trx_node; @@ -298,7 +301,8 @@ private: its invocation. See the context in which it is called to know. */ -typedef int (*active_tranx_action)(THD *trx_thd, const char *log_file_name, +typedef int (*active_tranx_action)(THD *trx_thd, bool thd_valid, + const char *log_file_name, my_off_t trx_log_file_pos); /** @@ -381,8 +385,8 @@ public: * matches the thread of the respective Tranx_node::thd of the passed in * log_file_name and log_file_pos. */ - bool is_thd_waiter(THD *thd_to_check, const char *log_file_name, - my_off_t log_file_pos); + Tranx_node * is_thd_waiter(THD *thd_to_check, const char *log_file_name, + my_off_t log_file_pos); /* Given a position, check to see whether the position is an active * transaction's ending position by probing the hash table. diff --git a/sql/slave.cc b/sql/slave.cc index 7f995c62bd9..42d00ecce29 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -3021,21 +3021,10 @@ void store_master_info(THD *thd, Master_info *mi, TABLE *table, if (!stamp) idle= true; + else if (mi->using_parallel()) + idle= mi->rli.are_sql_threads_caught_up(); else - { idle= mi->rli.sql_thread_caught_up; - - /* - The idleness of the SQL thread is needed for the parallel slave - because events can be ignored before distribution to a worker thread. - That is, Seconds_Behind_Master should still be calculated and visible - while the slave is processing ignored events, such as those skipped - due to slave_skip_counter. - */ - if (mi->using_parallel() && idle && - !rpl_parallel::workers_idle(&mi->rli)) - idle= false; - } if (idle) time_diff= 0; else @@ -4125,20 +4114,19 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli, if (rli->mi->using_parallel()) { /* - rli->sql_thread_caught_up is checked and negated here to ensure that + Relay_log_info::are_sql_threads_caught_up() + is checked and its states are negated here to ensure that the value of Seconds_Behind_Master in SHOW SLAVE STATUS is consistent with the update of last_master_timestamp. It was previously unset immediately after reading an event from the relay log; however, for the duration between that unset and the time that LMT would be updated could lead to spikes in SBM. - The check for queued_count == dequeued_count ensures the worker threads - are all idle (i.e. all events have been executed). + The check also ensures the worker threads + are all practically idle (i.e. all user events have been executed). */ if ((unlikely(rli->last_master_timestamp == 0) || - (rli->sql_thread_caught_up && - (rli->last_inuse_relaylog->queued_count == - rli->last_inuse_relaylog->dequeued_count))) && + rli->are_sql_threads_caught_up()) && event_can_update_last_master_timestamp(ev)) { /* @@ -4153,6 +4141,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli, rli->last_master_timestamp= ev->when; } rli->sql_thread_caught_up= false; + rli->unset_worker_threads_caught_up(); } int res= rli->parallel.do_event(serial_rgi, ev, event_size); @@ -4165,6 +4154,12 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli, rli->event_relay_log_pos= rli->future_event_relay_log_pos; if (res >= 0) { + DBUG_EXECUTE_IF("pause_sql_thread_on_fde", + if (typ == FORMAT_DESCRIPTION_EVENT) + DBUG_ASSERT(!debug_sync_set_action(thd, STRING_WITH_LEN( + "now WAIT_FOR main_sql_thread_continue" + ))); + ); #ifdef WITH_WSREP wsrep_after_statement(thd); #endif /* WITH_WSREP */ @@ -4193,15 +4188,6 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli, { Gtid_log_event *gev= static_cast(ev); -#ifdef ENABLED_DEBUG_SYNC - DBUG_EXECUTE_IF( - "pause_sql_thread_on_relay_fde_after_trans", - { - DBUG_SET("-d,pause_sql_thread_on_relay_fde_after_trans"); - DBUG_SET("+d,pause_sql_thread_on_next_relay_fde"); - }); -#endif - /* For GTID, allocate a new sub_id for the given domain_id. The sub_id must be allocated in increasing order of binlog order. @@ -4353,16 +4339,13 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli, wsrep_after_statement(thd); #endif /* WITH_WSREP */ #ifdef ENABLED_DEBUG_SYNC - DBUG_EXECUTE_IF( - "pause_sql_thread_on_next_relay_fde", - if (ev && typ == FORMAT_DESCRIPTION_EVENT && - ((Format_description_log_event *) ev)->is_relay_log_event()) { - DBUG_ASSERT(!debug_sync_set_action( - thd, - STRING_WITH_LEN( - "now SIGNAL paused_on_fde WAIT_FOR sql_thread_continue"))); - DBUG_SET("-d,pause_sql_thread_on_next_relay_fde"); - }); + // Note: Parallel Replication does not hit this point. + DBUG_EXECUTE_IF("pause_sql_thread_on_fde", + if (typ == FORMAT_DESCRIPTION_EVENT) + DBUG_ASSERT(!debug_sync_set_action(thd, STRING_WITH_LEN( + "now SIGNAL paused_on_fde WAIT_FOR sql_thread_continue" + ))); + ); #endif DBUG_RETURN(exec_res); @@ -7096,9 +7079,9 @@ static IO_CACHE *reopen_relay_log(Relay_log_info *rli, const char **errmsg) /** Reads next event from the relay log. Should be called from the - slave IO thread. + slave SQL thread. - @param rli Relay_log_info structure for the slave IO thread. + @param rgi rpl_group_info structure for the slave SQL thread. @return The event read, or NULL on error. If an error occurs, the error is reported through the sql_print_information() or diff --git a/sql/sp_head.cc b/sql/sp_head.cc index dbfe6aa6995..6e64f6d8833 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -601,8 +601,7 @@ sp_head::sp_head(MEM_ROOT *mem_root_arg, sp_package *parent, my_init_dynamic_array(key_memory_sp_head_main_root, &m_instr, sizeof(sp_instr *), 16, 8, MYF(0)); my_hash_init(key_memory_sp_head_main_root, &m_sptabs, - Lex_ident_routine::charset_info(), - 0, 0, 0, sp_table_key, 0, 0); + table_alias_charset, 0, 0, 0, sp_table_key, 0, 0); my_hash_init(key_memory_sp_head_main_root, &m_sroutines, Lex_ident_routine::charset_info(), 0, 0, 0, sp_sroutine_key, 0, 0); diff --git a/sql/sp_instr.cc b/sql/sp_instr.cc index ff217750a6d..88afd241524 100644 --- a/sql/sp_instr.cc +++ b/sql/sp_instr.cc @@ -41,6 +41,42 @@ static int cmp_rqp_locations(const void *a_, const void *b_) } +/** + Traverse the list of Item_param instances created on the fist parsing of + SP instruction's statement and put them back into sp_inst_lex->free list + for releasing them on deallocating statement's resources to avoid + memory leaks. +*/ + +void +sp_lex_instr::put_back_item_params(THD *thd, LEX *lex, + const List& param_values) +{ + /* + Instance of Item_param must be ignored on re-parsing a statement + of failed SP instruction, therefore lex->param_list must be empty. + Instance of the class Item_param created on first (initial) parsing of + Prepared Statement is used for whole its life. + */ + DBUG_ASSERT(lex->param_list.is_empty()); + + for (auto it= param_values.begin(); + it != param_values.end(); ++it) + { + /* + Put retained instances of Item_param back into sp_lex_inst::free_list + to avoid leaking them. Original ordering of Item_param objects + are preserved since param_values contains items in reverse order. + */ + Item_param *param_for_adding_to_free_list= it.operator->(); + + Item *prev_head= free_list; + free_list= param_for_adding_to_free_list; + param_for_adding_to_free_list->next= prev_head; + } +} + + /* StoredRoutinesBinlogging This paragraph applies only to statement-based binlogging. Row-based @@ -609,14 +645,30 @@ void sp_lex_instr::get_query(String *sql_query) const } -void sp_lex_instr::cleanup_before_parsing(enum_sp_type sp_type) +List +sp_lex_instr::cleanup_before_parsing(enum_sp_type sp_type) { Item *current= free_list; + List param_values{}; while (current) { Item *next= current->next; - current->delete_self(); + + if (current->is_stored_routine_parameter()) + /* + `current` points to an instance of the class Item_param. + Place an instance of the class Item_param into the list `param_values` + and skip the item in free_list (don't invoke the method delete_self() + on it). Since the `free_list` stores items in reverse order of creation + (that is the last created item is the one pointed by the `free_list`), + place items in the list `param_values` using push_front to save + original ordering of items + */ + param_values.push_front((Item_param*)current); + else + current->delete_self(); + current= next; } @@ -629,6 +681,8 @@ void sp_lex_instr::cleanup_before_parsing(enum_sp_type sp_type) dangling references. */ m_cur_trigger_stmt_items.empty(); + + return param_values; } @@ -787,10 +841,13 @@ LEX* sp_lex_instr::parse_expr(THD *thd, sp_head *sp, LEX *sp_instr_lex) m_cur_trigger_stmt_items.first->next_trig_field_list; /* - Clean up items owned by this SP instruction. + Clean up items owned by this SP instruction except instances of Item_param. + `sp_statement_param_values` stores instances of the class Item_param + associated with the SP instruction's statement before the statement + has been re-parsed. */ - cleanup_before_parsing(sp->m_handler->type()); - + List sp_statement_param_values= + cleanup_before_parsing(sp->m_handler->type()); DBUG_ASSERT(mem_root != thd->mem_root); /* Back up the current free_list pointer and reset it to nullptr. @@ -829,10 +886,17 @@ LEX* sp_lex_instr::parse_expr(THD *thd, sp_head *sp, LEX *sp_instr_lex) if (parser_state.init(thd, sql_query.c_ptr(), sql_query.length())) return nullptr; + /* + Direct the parser to handle the '?' symbol in special way, that is as + a positional parameter inside a prepared statement. + */ + parser_state.m_lip.stmt_prepare_mode= true; + // Create a new LEX and initialize it. LEX *lex_saved= thd->lex; Item **cursor_free_list= nullptr; + st_lex_local *lex_local= nullptr; /* sp_instr_lex != nullptr for cursor relating SP instructions (sp_instr_cpush, @@ -840,7 +904,11 @@ LEX* sp_lex_instr::parse_expr(THD *thd, sp_head *sp, LEX *sp_instr_lex) */ if (sp_instr_lex == nullptr) { - thd->lex= new (thd->mem_root) st_lex_local; + lex_local= new (thd->mem_root) st_lex_local; + thd->lex= lex_local; + + lex_local->sp_statement_param_values= std::move(sp_statement_param_values); + lex_local->param_values_it= lex_local->sp_statement_param_values.begin(); lex_start(thd); if (sp->m_handler->type() == SP_TYPE_TRIGGER) { @@ -916,7 +984,17 @@ LEX* sp_lex_instr::parse_expr(THD *thd, sp_head *sp, LEX *sp_instr_lex) const char *m_tmp_query_bak= sp->m_tmp_query; sp->m_tmp_query= sql_query.c_ptr(); + /* + Hint the parser that re-parsing of a failed SP instruction is in progress + and instances of the class Item_param associated with SP instruction + should be handled carefully (re-used on re-parsing the instruction's + statement). + @sa param_push_or_clone + @sa LEX::add_placeholder + */ + thd->reparsing_sp_stmt= true; bool parsing_failed= parse_sql(thd, &parser_state, nullptr); + thd->reparsing_sp_stmt= false; sp->m_tmp_query= m_tmp_query_bak; thd->m_digest= parent_digest; @@ -939,12 +1017,17 @@ LEX* sp_lex_instr::parse_expr(THD *thd, sp_head *sp, LEX *sp_instr_lex) */ *cursor_free_list= thd->free_list; else + { /* Assign the list of items created on re-parsing the statement to the current stored routine's instruction. */ free_list= thd->free_list; + put_back_item_params(thd, thd->lex, + lex_local->sp_statement_param_values); + } + thd->free_list= nullptr; } diff --git a/sql/sp_instr.h b/sql/sp_instr.h index 0b087d0cff7..6317b3913b4 100644 --- a/sql/sp_instr.h +++ b/sql/sp_instr.h @@ -500,8 +500,12 @@ private: /** Clean up items previously created on behalf of the current instruction. + + @return a list of Item_param instances representing position parameters + specified for the instruction that is a part of a prepared + statement */ - void cleanup_before_parsing(enum_sp_type sp_type); + List cleanup_before_parsing(enum_sp_type sp_type); /** @@ -524,6 +528,10 @@ private: bool setup_memroot_for_reparsing(sp_head *sphead, bool *new_memroot_allocated); + + void put_back_item_params(THD *thd, LEX *lex, + const List& param_values); + }; diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 17c18e492f6..1abaa838043 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1659,7 +1659,7 @@ class User_table_json: public User_table set_int_value("version_id", (longlong) MYSQL_VERSION_ID); } const char *unsafe_str(const char *s) const - { return s[0] ? s : NULL; } + { return s ? (s[0] ? s : NULL) : NULL; } SSL_type get_ssl_type () const override { return (SSL_type)get_int_value("ssl_type"); } @@ -1758,6 +1758,8 @@ class User_table_json: public User_table if (get_value(key, JSV_STRING, &value_start, &value_len)) return ""; char *ptr= (char*)alloca(value_len); + if (!ptr) + return NULL; int len= json_unescape(m_table->field[2]->charset(), (const uchar*)value_start, (const uchar*)value_start + value_len, @@ -8526,9 +8528,17 @@ bool check_grant(THD *thd, privilege_t want_access, TABLE_LIST *tables, Direct SELECT of a sequence table doesn't set t_ref->sequence, so privileges will be checked normally, as for any table. */ - if (t_ref->sequence && - !(want_access & ~(SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL))) - continue; + if (t_ref->sequence) + { + if (!(want_access & ~(SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL))) + continue; + /* + If it is ALTER..SET DEFAULT= nextval(sequence), also defer checks + until ::fix_fields(). + */ + if (tl != tables && want_access == ALTER_ACL) + continue; + } const ACL_internal_table_access *access= get_cached_table_access(&t_ref->grant.m_internal, @@ -14410,11 +14420,11 @@ static int server_mpvio_write_packet(MYSQL_PLUGIN_VIO *param, res= send_server_handshake_packet(mpvio, (char*) packet, packet_len); else if (mpvio->status == MPVIO_EXT::RESTART) res= send_plugin_request_packet(mpvio, packet, packet_len); - else if (packet_len > 0 && (*packet == 1 || *packet == 255 || *packet == 254)) + else if (packet_len > 0 && (*packet < 2 || *packet > 253)) { /* - we cannot allow plugin data packet to start from 255 or 254 - - as the client will treat it as an error or "change plugin" packet. + we cannot allow plugin data packet to start from 0, 255 or 254 - + as the client will treat it as an OK, ERROR or "change plugin" packet. We'll escape these bytes with \1. Consequently, we have to escape \1 byte too. */ diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index bcbc656b2f3..e00be250e31 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -73,9 +73,8 @@ static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list, /* Ignore if there is more than one table in the list */ save_next_global= table_list->next_global; table_list->next_global= 0; - result_code= (thd->open_temporary_tables(table_list) || - mysql_recreate_table(thd, table_list, recreate_info, - table_copy)); + result_code= thd->check_and_open_tmp_table(table_list) || + mysql_recreate_table(thd, table_list, recreate_info, table_copy); table_list->next_global= save_next_global; reenable_binlog(thd); /* @@ -577,6 +576,9 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, Disable_wsrep_on_guard wsrep_on_guard(thd, disable_wsrep_on); #endif /* WITH_WSREP */ + if (thd->transaction->xid_state.check_has_uncommitted_xa()) + DBUG_RETURN(TRUE); + fill_check_table_metadata_fields(thd, &field_list); if (protocol->send_result_set_metadata(&field_list, diff --git a/sql/sql_base.cc b/sql/sql_base.cc index df54ab26f5e..af2578a78bc 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -4634,10 +4634,7 @@ bool open_tables(THD *thd, const DDL_options_st &options, if (!table->schema_table) { if (thd->transaction->xid_state.check_has_uncommitted_xa()) - { - thd->transaction->xid_state.er_xaer_rmfail(); DBUG_RETURN(true); - } else break; } @@ -5205,7 +5202,7 @@ bool DML_prelocking_strategy::handle_table(THD *thd, DBUG_ASSERT(table_list->lock_type >= TL_FIRST_WRITE || thd->lex->default_used); - if (table_list->trg_event_map) + if (table_list->trg_event_map && table_list->lock_type >= TL_FIRST_WRITE) { if (table->triggers) { diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 1aff7b51a02..5a3e1f5f4da 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -60,6 +60,7 @@ #include "lock.h" #include "wsrep_mysqld.h" #include "sql_connect.h" +#include "sql_cursor.h" //Select_materialize #ifdef WITH_WSREP #include "wsrep_thd.h" #include "wsrep_trans_observer.h" @@ -910,7 +911,6 @@ THD::THD(my_thread_id id, bool is_wsrep_applier) #ifdef WITH_WSREP mysql_cond_init(key_COND_wsrep_thd, &COND_wsrep_thd, NULL); - wsrep_info[sizeof(wsrep_info) - 1] = '\0'; /* make sure it is 0-terminated */ #endif /* Call to init() below requires fully initialized Open_tables_state. */ reset_open_tables_state(); @@ -8899,6 +8899,12 @@ void Charset_loader_server::raise_not_applicable_error(const char *cs, } +bool THD::is_cursor_execution() const +{ + return dynamic_cast(this->lex->result); +} + + LEX_CSTRING make_string(THD *thd, const char *start_ptr, const char *end_ptr) { diff --git a/sql/sql_class.h b/sql/sql_class.h index a6b9447af63..fa2ca6cbe52 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -5694,6 +5694,7 @@ public: TMP_TABLE_SHARE *find_tmp_table_share(const char *key, size_t key_length); bool open_temporary_table(TABLE_LIST *tl); + bool check_and_open_tmp_table(TABLE_LIST *tl); bool open_temporary_tables(TABLE_LIST *tl); bool close_temporary_tables(); @@ -5798,7 +5799,6 @@ public: uint32 wsrep_rand; rpl_group_info *wsrep_rgi; bool wsrep_converted_lock_session; - char wsrep_info[128]; /* string for dynamic proc info */ ulong wsrep_retry_counter; // of autocommit bool wsrep_PA_safe; char* wsrep_retry_query; @@ -6103,6 +6103,18 @@ public: return false; return !is_set_timestamp_forbidden(this); } + + /** + @brief + Return true if current statement uses cursor protocol for execution. + + @details + Cursor protocol execution is determined by checking if lex->result is a + Select_materialize object, which is exclusively used by the server for + cursor result set materialization. + */ + bool is_cursor_execution() const; + /* Return true if we are in stored procedure, not in a function or trigger. @@ -6113,6 +6125,8 @@ public: !(in_sub_stmt & (SUB_STMT_FUNCTION | SUB_STMT_TRIGGER))); } + bool reparsing_sp_stmt= {false}; + /* Data and methods for bulk multiple unit result reporting */ DYNAMIC_ARRAY *unit_results; void stop_collecting_unit_results(); diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc index ae5c99353d2..363c363ccd9 100644 --- a/sql/sql_cursor.cc +++ b/sql/sql_cursor.cc @@ -21,72 +21,6 @@ #include "probes_mysql.h" #include "sql_parse.h" // mysql_execute_command -/**************************************************************************** - Declarations. -****************************************************************************/ - -/** - Materialized_cursor -- an insensitive materialized server-side - cursor. The result set of this cursor is saved in a temporary - table at open. The cursor itself is simply an interface for the - handler of the temporary table. -*/ - -class Materialized_cursor: public Server_side_cursor -{ - MEM_ROOT main_mem_root; - /* A fake unit to supply to select_send when fetching */ - SELECT_LEX_UNIT fake_unit; - TABLE *table; - List item_list; - ulong fetch_limit; - ulong fetch_count; - bool is_rnd_inited; -public: - Materialized_cursor(select_result *result, TABLE *table); - - int send_result_set_metadata(THD *thd, List &send_result_set_metadata); - bool is_open() const override { return table != 0; } - int open(JOIN *join __attribute__((unused))) override; - void fetch(ulong num_rows) override; - void close() override; - bool export_structure(THD *thd, Row_definition_list *defs) override - { - return table->export_structure(thd, defs); - } - ~Materialized_cursor() override; - - void on_table_fill_finished(); -}; - - -/** - Select_materialize -- a mediator between a cursor query and the - protocol. In case we were not able to open a non-materialzed - cursor, it creates an internal temporary HEAP table, and insert - all rows into it. When the table reaches max_heap_table_size, - it's converted to a MyISAM table. Later this table is used to - create a Materialized_cursor. -*/ - -class Select_materialize: public select_unit -{ - select_result *result; /**< the result object of the caller (PS or SP) */ -public: - Materialized_cursor *materialized_cursor; - Select_materialize(THD *thd_arg, select_result *result_arg): - select_unit(thd_arg), result(result_arg), materialized_cursor(0) {} - bool send_result_set_metadata(List &list, uint flags) override; - bool send_eof() override { return false; } - bool view_structure_only() const override - { - return result->view_structure_only(); - } -}; - - -/**************************************************************************/ - /** Attempt to open a materialized cursor. diff --git a/sql/sql_cursor.h b/sql/sql_cursor.h index 5afb7d81e9b..e447ca512be 100644 --- a/sql/sql_cursor.h +++ b/sql/sql_cursor.h @@ -64,6 +64,66 @@ public: }; +/** + Materialized_cursor -- an insensitive materialized server-side + cursor. The result set of this cursor is saved in a temporary + table at open. The cursor itself is simply an interface for the + handler of the temporary table. +*/ + +class Materialized_cursor: public Server_side_cursor +{ + MEM_ROOT main_mem_root; + /* A fake unit to supply to select_send when fetching */ + SELECT_LEX_UNIT fake_unit; + TABLE *table; + List item_list; + ulong fetch_limit; + ulong fetch_count; + bool is_rnd_inited; +public: + Materialized_cursor(select_result *result, TABLE *table); + + int send_result_set_metadata(THD *thd, List &send_result_set_metadata); + bool is_open() const override { return table != 0; } + int open(JOIN *join __attribute__((unused))) override; + void fetch(ulong num_rows) override; + void close() override; + bool export_structure(THD *thd, Row_definition_list *defs) override + { + return table->export_structure(thd, defs); + } + ~Materialized_cursor() override; + + void on_table_fill_finished(); +}; + + +/** + Select_materialize -- a mediator between a cursor query and the + protocol. In case we were not able to open a non-materialzed + cursor, it creates an internal temporary HEAP table, and insert + all rows into it. When the table reaches max_heap_table_size, + it's converted to a MyISAM table. Later this table is used to + create a Materialized_cursor. +*/ + +class Select_materialize: public select_unit +{ + select_result *result; /**< the result object of the caller (PS or SP) */ +public: + Materialized_cursor *materialized_cursor; + Select_materialize(THD *thd_arg, select_result *result_arg): + select_unit(thd_arg), result(result_arg), materialized_cursor(0) {} + bool send_result_set_metadata(List &list, uint flags) override; + bool send_eof() override { return false; } + bool view_structure_only() const override + { + return result->view_structure_only(); + } +}; + + int mysql_open_cursor(THD *thd, select_result *result, Server_side_cursor **res); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index cb37708a1e4..25f94609518 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1698,6 +1698,9 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list, DBUG_RETURN(insert_view_fields(thd, &fields, table_list)); } + if (table_list->table->check_sequence_privileges(thd)) + DBUG_RETURN(TRUE); + DBUG_RETURN(FALSE); } diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index e56ef99fef1..b175d8d80c3 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1,5 +1,5 @@ -/* Copyright (c) 2000, 2019, Oracle and/or its affiliates. - Copyright (c) 2009, 2022, MariaDB Corporation. +/* Copyright (c) 2000, 2025, Oracle and/or its affiliates. + Copyright (c) 2009, 2025, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -2568,6 +2568,8 @@ int Lex_input_stream::lex_one_token(YYSTYPE *yylval, THD *thd) state=MY_LEX_CHAR; break; case MY_LEX_END: + /* Unclosed special comments result in a syntax error */ + if (in_comment == DISCARD_COMMENT) return (ABORT_SYM); next_state= MY_LEX_END; return(0); // We found end of input last time @@ -8357,9 +8359,19 @@ Item *LEX::make_item_sysvar(THD *thd, static bool param_push_or_clone(THD *thd, LEX *lex, Item_param *item) { - return !lex->clone_spec_offset ? - lex->param_list.push_back(item, thd->mem_root) : - item->add_as_clone(thd); + if (lex->clone_spec_offset) + return item->add_as_clone(thd); + else + { + if (thd->reparsing_sp_stmt) + /* + Don't put an instance of Item_param in case a SP statement + being re-parsed. + */ + return false; + + return lex->param_list.push_back(item, thd->mem_root); + } } @@ -8377,8 +8389,40 @@ Item_param *LEX::add_placeholder(THD *thd, const LEX_CSTRING *name, return NULL; } Query_fragment pos(thd, sphead, start, end); - Item_param *item= new (thd->mem_root) Item_param(thd, name, - pos.pos(), pos.length()); + Item_param *item; + /* + Check whether re-parsing of a failed SP instruction is in progress. + In context of the method LEX::add_placeholder, the failed instruction + being re-parsed is a part of compound statement enclosed into + the BEGIN/END clauses. + */ + if (thd->reparsing_sp_stmt) + { + /* + Get a saved Item_param and reuse it instead of creating a new one. + st_lex_local stores instances of the class Item_param that were saved + before cleaning up SP instruction's free_list. So, the same instance of + Item_param will be used on every re-parsing of failed SP instruction + for each specific positional parameter. + */ + st_lex_local *lex= (st_lex_local*)this; + DBUG_ASSERT(lex->param_values_it != lex->sp_statement_param_values.end()); + /* + For release build emit internal error in case the assert condition + fails + */ + if (lex->param_values_it == lex->sp_statement_param_values.end()) + { + my_error(ER_INTERNAL_ERROR, MYF(0), "no more Item_param for re-bind"); + return nullptr; + } + + item= lex->param_values_it.operator ->(); + lex->param_values_it++; + } + else + item= new (thd->mem_root) Item_param(thd, name, + pos.pos(), pos.length()); if (unlikely(!item) || unlikely(param_push_or_clone(thd, this, item))) { my_error(ER_OUT_OF_RESOURCES, MYF(0)); diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 63f3ea651d2..8ee61ca104c 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -5076,6 +5076,16 @@ digest_reduce_token(sql_digest_state *state, uint token_left, uint token_right); struct st_lex_local: public LEX, public Sql_alloc { + /** + List of Item_param instances that should be re-used on re-parsing of + a SP instruction's statement + */ + List sp_statement_param_values; + /** + Iterator to the next Item_param in the list above to be processed by + the method LEX::add_placeholder() + */ + List::iterator param_values_it; }; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 675ea86105e..c75deac31bf 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -54,6 +54,7 @@ #include "sql_test.h" // mysql_print_status #include "sql_select.h" // handle_select, mysql_select, // mysql_explain_union +#include "sql_cursor.h" // Select_materialzie #include "sql_load.h" // mysql_load #include "sql_servers.h" // create_servers, alter_servers, // drop_servers, servers_reload @@ -3067,11 +3068,6 @@ static bool do_execute_sp(THD *thd, sp_head *sp) ha_rows select_limit= thd->variables.select_limit; thd->variables.select_limit= HA_POS_ERROR; - /* - Reset current_select as it may point to random data as a - result of previous parsing. - */ - thd->lex->current_select= NULL; thd->lex->in_sum_func= 0; // For Item_field::fix_fields() /* @@ -5992,6 +5988,9 @@ finish: one of storage engines (e.g. due to deadlock). Rollback transaction in all storage engines including binary log. */ + auto &xid_state= thd->transaction->xid_state; + if (xid_state.is_explicit_XA()) + xid_state.set_rollback_only(); trans_rollback_implicit(thd); thd->release_transactional_locks(); } @@ -6208,7 +6207,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) } } /* Count number of empty select queries */ - if (!thd->get_sent_row_count() && !res) + if (!thd->is_cursor_execution() && !thd->get_sent_row_count() && !res) status_var_increment(thd->status_var.empty_queries); else status_var_add(thd->status_var.rows_sent, thd->get_sent_row_count()); diff --git a/sql/sql_parse.h b/sql/sql_parse.h index f280d3cce8a..2e5a875906d 100644 --- a/sql/sql_parse.h +++ b/sql/sql_parse.h @@ -189,20 +189,4 @@ check_table_access(THD *thd, privilege_t requirements,TABLE_LIST *tables, { return false; } #endif /*NO_EMBEDDED_ACCESS_CHECKS*/ - -/* - Allocating memory and *also* using it (reading and - writing from it) because some build instructions cause - compiler to optimize out stack_used_up. Since alloca() - here depends on stack_used_up, it doesnt get executed - correctly and causes json_debug_nonembedded to fail - ( --error ER_STACK_OVERRUN_NEED_MORE does not occur). -*/ -#define ALLOCATE_MEM_ON_STACK(A) do \ - { \ - uchar *array= (uchar*)alloca(A); \ - bzero(array, A); \ - my_checksum(0, array, A); \ - } while(0) - #endif /* SQL_PARSE_INCLUDED */ diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 5d1cf53afc1..4d2baef9605 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -7623,7 +7623,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, ERROR_INJECT("convert_partition_1") || write_log_drop_shadow_frm(lpt) || ERROR_INJECT("convert_partition_2") || - mysql_write_frm(lpt, WFRM_WRITE_SHADOW) || + mysql_write_frm(lpt, WFRM_WRITE_SHADOW|WFRM_ALTER_INFO_PREPARED) || ERROR_INJECT("convert_partition_3") || wait_while_table_is_used(thd, table, HA_EXTRA_NOT_USED) || ERROR_INJECT("convert_partition_4") || diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 58216a4ce6e..e4cb7f3f719 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -3513,6 +3513,9 @@ void mysqld_stmt_fetch(THD *thd, char *packet, uint packet_length) cursor->fetch(num_rows); + if (!thd->get_sent_row_count()) + status_var_increment(thd->status_var.empty_queries); + if (!cursor->is_open()) { stmt->close_cursor(); @@ -4354,6 +4357,8 @@ Prepared_statement::set_parameters(String *expanded_query, res= set_params_data(this, expanded_query); #endif } + lex->default_used= thd->lex->default_used; + thd->lex->default_used= false; if (res) { my_error(ER_WRONG_ARGUMENTS, MYF(0), diff --git a/sql/sql_select.cc b/sql/sql_select.cc index b1ec6825b16..d92b874d10b 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1316,6 +1316,7 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) } bool timestamps_only= table->table->versioned(VERS_TIMESTAMP); + bool update_this= update_conds; if (vers_conditions.is_set() && vers_conditions.type != SYSTEM_TIME_HISTORY) { @@ -1332,9 +1333,22 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) my_error(ER_VERS_ENGINE_UNSUPPORTED, MYF(0), table->table_name.str); DBUG_RETURN(-1); } + if (vers_conditions.has_param) + { + /* + PS parameter in history expression requires processing at execution + stage when parameters has values substituted. So at prepare continue + the loop, but at execution enter update_this. The second execution + is skipped on vers_conditions.type == SYSTEM_TIME_ALL condition. + */ + if (thd->stmt_arena->is_stmt_prepare()) + continue; + DBUG_ASSERT(thd->stmt_arena->is_stmt_execute()); + update_this= true; + } } - if (update_conds) + if (update_this) { vers_conditions.period = &table->table->s->vers; Item *cond= period_get_condition(thd, table, this, &vers_conditions, @@ -1351,6 +1365,8 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) else where= and_items(thd, where, cond); table->where= and_items(thd, table->where, cond); + if (where && vers_conditions.has_param && vers_conditions.delete_history) + prep_where= where->copy_andor_structure(thd); } table->vers_conditions.set_all(); @@ -2294,6 +2310,7 @@ JOIN::optimize_inner() SELECT_LEX *sel= select_lex; if (sel->first_cond_optimization) { + bool error= false; /* The following code will allocate the new items in a permanent MEMROOT for prepared statements and stored procedures. @@ -2311,7 +2328,7 @@ JOIN::optimize_inner() /* Convert all outer joins to inner joins if possible */ conds= simplify_joins(this, join_list, conds, TRUE, FALSE); - add_table_function_dependencies(join_list, table_map(-1)); + add_table_function_dependencies(join_list, table_map(-1), &error); if (thd->is_error() || (!select_lex->leaf_tables_saved && select_lex->save_leaf_tables(thd))) @@ -7333,7 +7350,13 @@ static bool add_key_part(DYNAMIC_ARRAY *keyuse_array, KEY_FIELD *key_field) } } } - if (field->hash_join_is_possible() && + /* + Compressed field cannot be part of a key. For optimizer temporary table + compressed fields are replaced by uncompressed, see + is_optimizer_tmp_table() and Field_*_compressed::make_new_field(). + */ + if (!field->compression_method() && + field->hash_join_is_possible() && (key_field->optimize & KEY_OPTIMIZE_EQ) && key_field->val->used_tables()) { @@ -32129,6 +32152,8 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) } } str->append(STRING_WITH_LEN(" */ ")); + if (join && join->cleaned) // if this join has been cleaned + return; // the select_number printed above is all we have } if (sel_type == SELECT_CMD || @@ -32143,10 +32168,11 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) because temporary tables they pointed on could be freed. */ str->append('#'); - str->append(select_number); + str->append_ulonglong(select_number); return; } + /* First add options */ if (options & SELECT_STRAIGHT_JOIN) str->append(STRING_WITH_LEN("straight_join ")); diff --git a/sql/sql_sequence.cc b/sql/sql_sequence.cc index 7348c49c70d..79ccf5af636 100644 --- a/sql/sql_sequence.cc +++ b/sql/sql_sequence.cc @@ -1127,162 +1127,164 @@ bool Sql_cmd_alter_sequence::execute(THD *thd) SEQUENCE *seq; No_such_table_error_handler no_such_table_handler; DBUG_ENTER("Sql_cmd_alter_sequence::execute"); + { #if defined(HAVE_REPLICATION) - /* No wakeup():s of subsequent commits is allowed in this function. */ - wait_for_commit_raii suspend_wfc(thd); + /* No wakeup():s of subsequent commits is allowed in this function. */ + wait_for_commit_raii suspend_wfc(thd); #endif - if (check_access(thd, ALTER_ACL, first_table->db.str, - &first_table->grant.privilege, - &first_table->grant.m_internal, - 0, 0)) - DBUG_RETURN(TRUE); /* purecov: inspected */ + if (check_access(thd, ALTER_ACL, first_table->db.str, + &first_table->grant.privilege, + &first_table->grant.m_internal, + 0, 0)) + DBUG_RETURN(TRUE); /* purecov: inspected */ - if (check_grant(thd, ALTER_ACL, first_table, FALSE, 1, FALSE)) - DBUG_RETURN(TRUE); /* purecov: inspected */ + if (check_grant(thd, ALTER_ACL, first_table, FALSE, 1, FALSE)) + DBUG_RETURN(TRUE); /* purecov: inspected */ #ifdef WITH_WSREP - if (WSREP(thd) && wsrep_thd_is_local(thd)) - { - const bool used_engine= lex->create_info.used_fields & HA_CREATE_USED_ENGINE; - if (wsrep_check_sequence(thd, new_seq, used_engine)) - DBUG_RETURN(TRUE); - - if (wsrep_to_isolation_begin(thd, first_table->db.str, - first_table->table_name.str, - first_table)) + if (WSREP(thd) && wsrep_thd_is_local(thd)) { - DBUG_RETURN(TRUE); + const bool used_engine= lex->create_info.used_fields & HA_CREATE_USED_ENGINE; + if (wsrep_check_sequence(thd, new_seq, used_engine)) + DBUG_RETURN(TRUE); + + if (wsrep_to_isolation_begin(thd, first_table->db.str, + first_table->table_name.str, + first_table)) + { + DBUG_RETURN(TRUE); + } } - } #endif /* WITH_WSREP */ - if (new_seq->used_fields & seq_field_used_as) - { - /* This should have been prevented during parsing. */ - DBUG_ASSERT(!(new_seq->used_fields - seq_field_used_as)); + if (new_seq->used_fields & seq_field_used_as) + { + /* This should have been prevented during parsing. */ + DBUG_ASSERT(!(new_seq->used_fields - seq_field_used_as)); + + first_table->lock_type= TL_READ_NO_INSERT; + first_table->mdl_request.set_type(MDL_SHARED_NO_WRITE); + Alter_info alter_info; + alter_info.flags= ALTER_CHANGE_COLUMN; + if (new_seq->prepare_sequence_fields(&alter_info.create_list, true)) + DBUG_RETURN(TRUE); + Table_specification_st create_info; + create_info.init(); + create_info.alter_info= &alter_info; + if (if_exists()) + thd->push_internal_handler(&no_such_table_handler); + Recreate_info recreate_info; + error= mysql_alter_table(thd, &null_clex_str, &null_clex_str, + &create_info, first_table, &recreate_info, + &alter_info, 0, (ORDER *) 0, 0, 0); + if (if_exists()) + { + trapped_errors= no_such_table_handler.safely_trapped_errors(); + thd->pop_internal_handler(); + } + /* Do we need to store the sequence value in table share, like below? */ + DBUG_RETURN(error); + } - first_table->lock_type= TL_READ_NO_INSERT; - first_table->mdl_request.set_type(MDL_SHARED_NO_WRITE); - Alter_info alter_info; - alter_info.flags= ALTER_CHANGE_COLUMN; - if (new_seq->prepare_sequence_fields(&alter_info.create_list, true)) - DBUG_RETURN(TRUE); - Table_specification_st create_info; - create_info.init(); - create_info.alter_info= &alter_info; if (if_exists()) thd->push_internal_handler(&no_such_table_handler); - Recreate_info recreate_info; - error= mysql_alter_table(thd, &null_clex_str, &null_clex_str, - &create_info, first_table, &recreate_info, - &alter_info, 0, (ORDER *) 0, 0, 0); + error= open_and_lock_tables(thd, first_table, FALSE, 0); if (if_exists()) { trapped_errors= no_such_table_handler.safely_trapped_errors(); thd->pop_internal_handler(); } - /* Do we need to store the sequence value in table share, like below? */ - DBUG_RETURN(error); - } - - if (if_exists()) - thd->push_internal_handler(&no_such_table_handler); - error= open_and_lock_tables(thd, first_table, FALSE, 0); - if (if_exists()) - { - trapped_errors= no_such_table_handler.safely_trapped_errors(); - thd->pop_internal_handler(); - } - if (unlikely(error)) - { - if (trapped_errors) + if (unlikely(error)) { - StringBuffer tbl_name; - tbl_name.append(&first_table->db); - tbl_name.append('.'); - tbl_name.append(&first_table->table_name); - push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, - ER_UNKNOWN_SEQUENCES, - ER_THD(thd, ER_UNKNOWN_SEQUENCES), - tbl_name.c_ptr_safe()); - my_ok(thd); - DBUG_RETURN(FALSE); + if (trapped_errors) + { + StringBuffer tbl_name; + tbl_name.append(&first_table->db); + tbl_name.append('.'); + tbl_name.append(&first_table->table_name); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_UNKNOWN_SEQUENCES, + ER_THD(thd, ER_UNKNOWN_SEQUENCES), + tbl_name.c_ptr_safe()); + my_ok(thd); + DBUG_RETURN(FALSE); + } + DBUG_RETURN(TRUE); } - DBUG_RETURN(TRUE); - } - table= first_table->table; - seq= table->s->sequence; + table= first_table->table; + seq= table->s->sequence; - seq->write_lock(table); - new_seq->reserved_until= seq->reserved_until; + seq->write_lock(table); + new_seq->reserved_until= seq->reserved_until; - /* Copy from old sequence those fields that the user didn't specified */ - if (!(new_seq->used_fields & seq_field_used_increment)) - new_seq->increment= seq->increment; - /* - We need to assign to foo_from_parser so that things get handled - properly in check_and_adjust() later - */ - if (!(new_seq->used_fields & seq_field_used_min_value)) - new_seq->min_value_from_parser= Longlong_hybrid(seq->min_value, seq->is_unsigned); - if (!(new_seq->used_fields & seq_field_used_max_value)) - new_seq->max_value_from_parser= Longlong_hybrid(seq->max_value, seq->is_unsigned); - if (!(new_seq->used_fields & seq_field_used_start)) - new_seq->start_from_parser= Longlong_hybrid(seq->start, seq->is_unsigned); - if (!(new_seq->used_fields & seq_field_used_cache)) - new_seq->cache= seq->cache; - if (!(new_seq->used_fields & seq_field_used_cycle)) - new_seq->cycle= seq->cycle; - /* This should have been prevented during parsing. */ - DBUG_ASSERT(!(new_seq->used_fields & seq_field_used_as)); - new_seq->value_type= seq->value_type; - new_seq->is_unsigned= seq->is_unsigned; - - /* If we should restart from a new value */ - if (new_seq->used_fields & seq_field_used_restart) - { - if (!(new_seq->used_fields & seq_field_used_restart_value)) - new_seq->restart_from_parser= new_seq->start_from_parser; + /* Copy from old sequence those fields that the user didn't specified */ + if (!(new_seq->used_fields & seq_field_used_increment)) + new_seq->increment= seq->increment; /* - Similar to start, we just need to truncate reserved_until and - the errors will be reported in check_and_adjust if truncation - happens on the wrong end. + We need to assign to foo_from_parser so that things get handled + properly in check_and_adjust() later */ - new_seq->reserved_until= - new_seq->truncate_value(new_seq->restart_from_parser); - } + if (!(new_seq->used_fields & seq_field_used_min_value)) + new_seq->min_value_from_parser= Longlong_hybrid(seq->min_value, seq->is_unsigned); + if (!(new_seq->used_fields & seq_field_used_max_value)) + new_seq->max_value_from_parser= Longlong_hybrid(seq->max_value, seq->is_unsigned); + if (!(new_seq->used_fields & seq_field_used_start)) + new_seq->start_from_parser= Longlong_hybrid(seq->start, seq->is_unsigned); + if (!(new_seq->used_fields & seq_field_used_cache)) + new_seq->cache= seq->cache; + if (!(new_seq->used_fields & seq_field_used_cycle)) + new_seq->cycle= seq->cycle; + /* This should have been prevented during parsing. */ + DBUG_ASSERT(!(new_seq->used_fields & seq_field_used_as)); + new_seq->value_type= seq->value_type; + new_seq->is_unsigned= seq->is_unsigned; - /* Let check_and_adjust think all fields are used */ - new_seq->used_fields= ~0; - if (new_seq->check_and_adjust(thd, 0)) - { - my_error(ER_SEQUENCE_INVALID_DATA, MYF(0), - first_table->db.str, - first_table->table_name.str); - error= 1; + /* If we should restart from a new value */ + if (new_seq->used_fields & seq_field_used_restart) + { + if (!(new_seq->used_fields & seq_field_used_restart_value)) + new_seq->restart_from_parser= new_seq->start_from_parser; + /* + Similar to start, we just need to truncate reserved_until and + the errors will be reported in check_and_adjust if truncation + happens on the wrong end. + */ + new_seq->reserved_until= + new_seq->truncate_value(new_seq->restart_from_parser); + } + + /* Let check_and_adjust think all fields are used */ + new_seq->used_fields= ~0; + if (new_seq->check_and_adjust(thd, 0)) + { + my_error(ER_SEQUENCE_INVALID_DATA, MYF(0), + first_table->db.str, + first_table->table_name.str); + error= 1; + seq->write_unlock(table); + goto end; + } + + if (likely(!(error= new_seq->write(table, 1)))) + { + /* Store the sequence values in table share */ + seq->copy(new_seq); + } + else + table->file->print_error(error, MYF(0)); seq->write_unlock(table); - goto end; + if (trans_commit_stmt(thd)) + error= 1; + if (trans_commit_implicit(thd)) + error= 1; + DBUG_EXECUTE_IF("hold_worker_on_schedule", + { + /* delay binlogging of a parent trx in rpl_parallel_seq */ + my_sleep(100000); + }); } - - if (likely(!(error= new_seq->write(table, 1)))) - { - /* Store the sequence values in table share */ - seq->copy(new_seq); - } - else - table->file->print_error(error, MYF(0)); - seq->write_unlock(table); - if (trans_commit_stmt(thd)) - error= 1; - if (trans_commit_implicit(thd)) - error= 1; - DBUG_EXECUTE_IF("hold_worker_on_schedule", - { - /* delay binlogging of a parent trx in rpl_parallel_seq */ - my_sleep(100000); - }); if (likely(!error)) error= write_bin_log(thd, 1, thd->query(), thd->query_length()); if (likely(!error)) diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 9c72fd07a86..ebaf224c602 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -721,16 +721,30 @@ uint build_table_shadow_filename(char *buff, size_t bufflen, lpt Struct carrying many parameters needed for this method flags Flags as defined below - WFRM_INITIAL_WRITE If set we need to prepare table before - creating the frm file - WFRM_INSTALL_SHADOW If set we should install the new frm - WFRM_KEEP_SHARE If set we know that the share is to be + WFRM_WRITE_SHADOW If set, we need to prepare the table before + creating the frm file. Note it is possible that + mysql_write_frm was already called with + WFRM_WRITE_CONVERTED_TO, which would have + already called mysql_prepare_create_table, in + which case, we can skip that specific step in + the preparation. + WFRM_INSTALL_SHADOW If set, we should install the new frm + WFRM_KEEP_SHARE If set, we know that the share is to be retained and thus we should ensure share object is correct, if not set we don't set the new partition syntax string since we know the share object is destroyed. - WFRM_PACK_FRM If set we should pack the frm file and delete - the frm file + WFRM_WRITE_CONVERTED_TO Similar to WFRM_WRITE_SHADOW but for + ALTER TABLE ... CONVERT PARTITION .. TO TABLE, + i.e., we need to prepare the table before + creating the frm file. Though in this case, + mysql_write_frm will be called again with + WFRM_WRITE_SHADOW, where the + prepare_create_table step will be skipped. + WFRM_BACKUP_ORIGINAL If set, will back up the existing frm file + before creating the new frm file. + WFRM_ALTER_INFO_PREPARED If set, the prepare_create_table step should be + skipped when WFRM_WRITE_SHADOW is set. RETURN VALUES TRUE Error @@ -775,7 +789,15 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) strxmov(shadow_frm_name, shadow_path, reg_ext, NullS); if (flags & WFRM_WRITE_SHADOW) { - if (mysql_prepare_create_table(lpt->thd, lpt->create_info, lpt->alter_info, + /* + It is possible mysql_prepare_create_table was already called in our + create/alter_info context and we don't need to call it again. That is, if + in the context of `ALTER TABLE ... CONVERT PARTITION .. TO TABLE` then + mysql_prepare_create_table would have already been called through a prior + invocation of mysql_write_frm with flag MFRM_WRITE_CONVERTED_TO. + */ + if (!(flags & WFRM_ALTER_INFO_PREPARED) && + mysql_prepare_create_table(lpt->thd, lpt->create_info, lpt->alter_info, &lpt->db_options, lpt->table->file, &lpt->key_info_buffer, &lpt->key_count, C_ALTER_TABLE)) @@ -850,6 +872,11 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) ERROR_INJECT("create_before_create_frm")) DBUG_RETURN(TRUE); + /* + For WFRM_WRITE_CONVERTED_TO, we always need to call + mysql_prepare_create_table + */ + DBUG_ASSERT(!(flags & WFRM_ALTER_INFO_PREPARED)); if (mysql_prepare_create_table(thd, create_info, lpt->alter_info, &lpt->db_options, file, &lpt->key_info_buffer, &lpt->key_count, @@ -3068,6 +3095,11 @@ my_bool init_key_info(THD *thd, Alter_info *alter_info, for (Key &key: alter_info->key_list) { + /* + Ensure we aren't re-initializing keys that were already initialized. + */ + DBUG_ASSERT(!key.length); + if (key.type == Key::FOREIGN_KEY) continue; @@ -3575,8 +3607,6 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info, st_plugin_int *index_plugin= hton2plugin[create_info->db_type->slot]; ha_create_table_option *index_options= create_info->db_type->index_options; - bool is_hash_field_needed= key->key_create_info.algorithm - == HA_KEY_ALG_LONG_HASH; if (key->type == Key::IGNORE_KEY) { /* ignore redundant keys */ @@ -3587,6 +3617,9 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info, break; } + bool is_hash_field_needed= key->key_create_info.algorithm + == HA_KEY_ALG_LONG_HASH; + if (key_check_without_overlaps(thd, create_info, alter_info, *key)) DBUG_RETURN(true); @@ -3796,6 +3829,11 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info, auto_increment_key= sql_field; } + /* For SPATIAL, FULLTEXT and HASH indexes (anything other than B-tree), + ignore the ASC/DESC attribute of columns. */ + if (key_info->algorithm > HA_KEY_ALG_BTREE) + column->asc= true; // ignore DESC + key_part_info->fieldnr= field; key_part_info->offset= (uint16) sql_field->offset; key_part_info->key_type=sql_field->pack_flag; @@ -4712,6 +4750,12 @@ int create_table_impl(THD *thd, goto err; } + TABLE_LIST table_list; + table_list.init_one_table(&db, &table_name, 0, TL_WRITE_ALLOW_WRITE); + int log_table= check_if_log_table(&table_list); + if (log_table && create_info->check_if_valid_log_table()) + goto err; + handlerton *db_type; if (!internal_tmp_table && ha_table_exists(thd, &db, &table_name, @@ -4728,12 +4772,13 @@ int create_table_impl(THD *thd, { (void) delete_statistics_for_table(thd, &db, &table_name); - TABLE_LIST table_list; - table_list.init_one_table(&db, &table_name, 0, TL_WRITE_ALLOW_WRITE); table_list.table= create_info->table; - if (check_if_log_table(&table_list, TRUE, "CREATE OR REPLACE")) + if (log_table && logger.is_log_table_enabled(log_table)) + { + my_error(ER_BAD_LOG_STATEMENT, MYF(0), "CREATE OR REPLACE"); goto err; + } /* Rollback the empty transaction started in mysql_create_table() @@ -6062,6 +6107,8 @@ int mysql_discard_or_import_tablespace(THD *thd, { Alter_table_prelocking_strategy alter_prelocking_strategy; int error; + TABLE *table; + enum_mdl_type mdl_downgrade= MDL_NOT_INITIALIZED; DBUG_ENTER("mysql_discard_or_import_tablespace"); mysql_audit_alter_table(thd, table_list); @@ -6094,24 +6141,38 @@ int mysql_discard_or_import_tablespace(THD *thd, DBUG_RETURN(-1); } - DBUG_ASSERT(table_list->table->s->hlindexes() <= 1); - for (uint i= table_list->table->s->keys; i < table_list->table->s->total_keys; i++) + table= table_list->table; + DBUG_ASSERT(table->mdl_ticket || table->s->tmp_table); + if (table->mdl_ticket && table->mdl_ticket->get_type() < MDL_EXCLUSIVE) { - if (table_list->table->hlindex_open(i)) + DBUG_ASSERT(thd->locked_tables_mode); + mdl_downgrade= table->mdl_ticket->get_type(); + if (thd->mdl_context.upgrade_shared_lock(table->mdl_ticket, MDL_EXCLUSIVE, + thd->variables.lock_wait_timeout)) + { + error= 1; + goto err; + } + } + + DBUG_ASSERT(table->s->hlindexes() <= 1); + for (uint i= table->s->keys; i < table->s->total_keys; i++) + { + if (table->hlindex_open(i)) { thd->tablespace_op= FALSE; DBUG_RETURN(-1); } } - for (uint i= table_list->table->s->keys; i < table_list->table->s->total_keys; i++) + for (uint i= table->s->keys; i < table->s->total_keys; i++) { - error= table_list->table->hlindex->file-> + error= table->hlindex->file-> ha_discard_or_import_tablespace(discard); if (unlikely(error)) goto err; } - error= table_list->table->file->ha_discard_or_import_tablespace(discard); + error= table->file->ha_discard_or_import_tablespace(discard); THD_STAGE_INFO(thd, stage_end); @@ -6119,7 +6180,7 @@ int mysql_discard_or_import_tablespace(THD *thd, goto err; if (discard) - table_list->table->s->tdc->flush(thd, true); + table->s->tdc->flush(thd, true); /* The 0 in the call below means 'not in a transaction', which means @@ -6137,13 +6198,16 @@ int mysql_discard_or_import_tablespace(THD *thd, err: thd->tablespace_op=FALSE; + if (mdl_downgrade > MDL_NOT_INITIALIZED) + table->mdl_ticket->downgrade_lock(mdl_downgrade); + if (likely(error == 0)) { my_ok(thd); DBUG_RETURN(0); } - table_list->table->file->print_error(error, MYF(0)); + table->file->print_error(error, MYF(0)); DBUG_RETURN(-1); } @@ -8790,7 +8854,8 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, } else { - if ((def->default_value= alter->default_value)) + if ((def->default_value= alter->default_value) || + !(def->flags & NOT_NULL_FLAG)) def->flags&= ~NO_DEFAULT_VALUE_FLAG; else def->flags|= NO_DEFAULT_VALUE_FLAG; @@ -10691,7 +10756,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, it is the case. TODO: this design is obsolete and will be removed. */ - int table_kind= check_if_log_table(table_list, FALSE, NullS); + int table_kind= check_if_log_table(table_list); const bool used_engine= create_info->used_fields & HA_CREATE_USED_ENGINE; if (table_kind) @@ -10706,17 +10771,8 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, /* Disable alter of log tables to unsupported engine */ if ((used_engine) && (!create_info->db_type || /* unknown engine */ - !(create_info->db_type->flags & HTON_SUPPORT_LOG_TABLES))) - { - unsupported: - my_error(ER_UNSUPORTED_LOG_ENGINE, MYF(0), - hton_name(create_info->db_type)->str); + create_info->check_if_valid_log_table())) DBUG_RETURN(true); - } - - if (create_info->db_type == maria_hton && - create_info->transactional != HA_CHOICE_NO) - goto unsupported; #ifdef WITH_PARTITION_STORAGE_ENGINE if (alter_info->partition_flags & ALTER_PARTITION_INFO) @@ -10785,7 +10841,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, if (unlikely(error)) { - if (if_exists) + if (if_exists && thd->get_stmt_da()->is_error()) { int tmp_errno= thd->get_stmt_da()->sql_errno(); if (tmp_errno == ER_NO_SUCH_TABLE) @@ -11607,7 +11663,8 @@ do_continue:; thd->count_cuted_fields= CHECK_FIELD_EXPRESSION; altered_table.reset_default_fields(); if (altered_table.default_field && - altered_table.update_default_fields(true)) + (altered_table.check_sequence_privileges(thd) || + altered_table.update_default_fields(true))) { cleanup_table_after_inplace_alter(&altered_table); goto err_new_table_cleanup; @@ -13292,6 +13349,23 @@ bool check_engine(THD *thd, const char *db_name, my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "NO_ENGINE_SUBSTITUTION"); DBUG_RETURN(TRUE); } +#ifdef WITH_WSREP + /* @@enforce_storage_engine is local, if user has used + ENGINE=XXX we can't allow it in cluster in this + case as enf_engine != new _engine. This is because + original stmt is replicated including ENGINE=XXX and + here */ + if ((create_info->used_fields & HA_CREATE_USED_ENGINE) && + WSREP(thd)) + { + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "ENFORCE_STORAGE_ENGINE"); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_OPTION_PREVENTS_STATEMENT, + "Do not use ENGINE=x when @@enforce_storage_engine is set"); + + DBUG_RETURN(TRUE); + } +#endif *new_engine= enf_engine; } diff --git a/sql/sql_table.h b/sql/sql_table.h index b116df9af7f..9d02cf73d6e 100644 --- a/sql/sql_table.h +++ b/sql/sql_table.h @@ -56,11 +56,13 @@ enum enum_explain_filename_mode /* depends on errmsg.txt Database `db`, Table `t` ... */ #define EXPLAIN_FILENAME_MAX_EXTRA_LENGTH 63 +/* See mysql_write_frm function comment for explanations of these flags */ #define WFRM_WRITE_SHADOW 1 #define WFRM_INSTALL_SHADOW 2 #define WFRM_KEEP_SHARE 4 #define WFRM_WRITE_CONVERTED_TO 8 #define WFRM_BACKUP_ORIGINAL 16 +#define WFRM_ALTER_INFO_PREPARED 32 /* Flags for conversion functions. */ static constexpr uint FN_FROM_IS_TMP= 1 << 0; diff --git a/sql/sql_type_fixedbin.h b/sql/sql_type_fixedbin.h index c1a1567c03b..890d06be04c 100644 --- a/sql/sql_type_fixedbin.h +++ b/sql/sql_type_fixedbin.h @@ -1599,17 +1599,40 @@ public: - either by the most generic way in Item_func::fix_fields() - or by Item_func_xxx::fix_length_and_dec() before the call of Item_hybrid_func_fix_attributes() - IFNULL() is special. It does not need to test args[0]. + IFNULL and COALESCE are special- + If the first non-null arg can be safely converted to result type, + the result is guaranteed to be NOT NULL */ - uint first= dynamic_cast(attr) ? 1 : 0; - for (uint i= first; i < nitems; i++) + bool not_null_on_conversion= false; + if (dynamic_cast(attr) || + dynamic_cast(attr)) { - if (Fbt::fix_fields_maybe_null_on_conversion_to_fbt(items[i])) + for (uint i= 0; i< nitems; i++) { - attr->set_type_maybe_null(true); - break; + if (!items[i]->maybe_null() && + !Fbt::fix_fields_maybe_null_on_conversion_to_fbt(items[i])) + { + not_null_on_conversion= true; + break; + } } } + else + { + not_null_on_conversion= true; + for (uint i= 0; i < nitems; i++) + { + if (Fbt::fix_fields_maybe_null_on_conversion_to_fbt(items[i])) + { + not_null_on_conversion= false; + break; + } + } + } + if (not_null_on_conversion) + attr->set_type_maybe_null(false); + else + attr->set_type_maybe_null(true); return false; } bool Item_func_min_max_fix_attributes(THD *thd, Item_func_min_max *func, diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 15ac871de1d..193096eee55 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -2586,9 +2586,7 @@ bool st_select_lex_unit::exec_inner() fake_select_lex->table_list.empty(); if (likely(!saved_error)) - { - thd->limit_found_rows = (ulonglong)table->file->stats.records + add_rows; - } + thd->limit_found_rows= (ulonglong)table->file->stats.records + add_rows; /* Mark for slow query log if any of the union parts didn't use indexes efficiently diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 91dba6c09ac..74cac2fac10 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1928,8 +1928,6 @@ int multi_update::prepare(List ¬_used_values, TABLE *table= table_ref->table; table->read_set= &table->def_read_set; bitmap_union(table->read_set, &table->tmp_set); - if (!(thd->lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_PREPARE)) - table->file->prepare_for_modify(true, true); } /* @@ -2145,6 +2143,8 @@ multi_update::initialize_tables(JOIN *join) ORDER group; TMP_TABLE_PARAM *tmp_param; + table->file->prepare_for_modify(true, true); + if (ignore) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); if (table == main_table) // First table in join @@ -3170,6 +3170,11 @@ bool Sql_cmd_update::prepare_inner(THD *thd) table_list->table->no_cache= true; } + { + List_iterator_fast fs(select_lex->item_list), vs(lex->value_list); + while (Item *f= fs++) + vs++->associate_with_target_field(thd, static_cast(f)); + } free_join= false; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index f323be9f88a..570b5362634 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -13834,6 +13834,7 @@ expr_or_ignore_or_default: | DEFAULT { $$= new (thd->mem_root) Item_default_specification(thd); + Lex->default_used= TRUE; if (unlikely($$ == NULL)) MYSQL_YYABORT; } @@ -13925,6 +13926,7 @@ update_elem: { Item *def= new (thd->mem_root) Item_default_value(thd, Lex->current_context(), $1, 1); + Lex->default_used= TRUE; if (!def || add_item_to_list(thd, $1) || add_value_to_list(thd, def)) MYSQL_YYABORT; } @@ -16114,7 +16116,6 @@ user_maybe_role: if (unlikely(!($$= thd->calloc(1)))) MYSQL_YYABORT; $$->user= current_user; - $$->auth= new (thd->mem_root) USER_AUTH(); } ; diff --git a/sql/table.cc b/sql/table.cc index e4ef77f133b..452f1a7cd07 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2881,6 +2881,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, hash_keypart->fieldnr= hash_field_used_no + 1; hash_field= share->field[hash_field_used_no]; hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs + DBUG_ASSERT(hash_field->invisible == INVISIBLE_FULL); keyinfo->flags|= HA_NOSAME; share->virtual_fields++; share->stored_fields--; @@ -3897,6 +3898,19 @@ Vcol_expr_context::~Vcol_expr_context() } +bool TABLE::check_sequence_privileges(THD *thd) +{ + if (internal_tables) + for (Field **fp= field; *fp; fp++) + { + Virtual_column_info *vcol= (*fp)->default_value; + if (vcol && vcol->check_access(thd)) + return 1; + } + return 0; +} + + bool TABLE::vcol_fix_expr(THD *thd) { if (pos_in_table_list->placeholder() || vcol_refix_list.is_empty()) @@ -4033,6 +4047,13 @@ bool Virtual_column_info::fix_and_check_expr(THD *thd, TABLE *table) } +bool Virtual_column_info::check_access(THD *thd) +{ + return flags & VCOL_NEXTVAL && + expr->walk(&Item::check_sequence_privileges, 0, thd); +} + + /* @brief Unpack the definition of a virtual column from its linear representation @@ -8059,7 +8080,8 @@ void TABLE::mark_columns_needed_for_insert() } /* - Mark columns according the binlog row image option. + Mark columns according the binlog row image option + or mark virtual columns for slave. Columns to be written are stored in 'rpl_write_set' @@ -8090,6 +8112,10 @@ void TABLE::mark_columns_needed_for_insert() the read_set at binlogging time (for those cases that we only want to log a PK and we needed other fields for execution). + + If binlog row image is off on slave we mark virtual columns + for read as InnoDB requires correct field metadata which is set + by update_virtual_fields(). */ void TABLE::mark_columns_per_binlog_row_image() @@ -8099,9 +8125,6 @@ void TABLE::mark_columns_per_binlog_row_image() DBUG_ASSERT(read_set->bitmap); DBUG_ASSERT(write_set->bitmap); - /* If not using row format */ - rpl_write_set= write_set; - /** If in RBR we may need to mark some extra columns, depending on the binlog-row-image command line argument. @@ -8182,6 +8205,12 @@ void TABLE::mark_columns_per_binlog_row_image() } file->column_bitmaps_signal(); } + else + { + /* If not using row format */ + rpl_write_set= write_set; + file->column_bitmaps_signal(); + } DBUG_VOID_RETURN; } @@ -10809,8 +10838,8 @@ bool vers_select_conds_t::check_units(THD *thd) { DBUG_ASSERT(type != SYSTEM_TIME_UNSPECIFIED); DBUG_ASSERT(start.item); - return start.check_unit(thd) || - end.check_unit(thd); + return start.check_unit(thd, this) || + end.check_unit(thd, this); } bool vers_select_conds_t::eq(const vers_select_conds_t &conds) const @@ -10836,7 +10865,7 @@ bool vers_select_conds_t::eq(const vers_select_conds_t &conds) const } -bool Vers_history_point::check_unit(THD *thd) +bool Vers_history_point::check_unit(THD *thd, vers_select_conds_t *vers_conds) { if (!item) return false; @@ -10846,6 +10875,9 @@ bool Vers_history_point::check_unit(THD *thd) item->full_name(), "FOR SYSTEM_TIME"); return true; } + else if (item->with_param()) + vers_conds->has_param= true; + if (item->fix_fields_if_needed(thd, &item)) return true; const Type_handler *t= item->this_item()->real_type_handler(); diff --git a/sql/table.h b/sql/table.h index e627e4cbbd3..6547c19d597 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1158,6 +1158,11 @@ struct TABLE_SHARE return (tmp_table == SYSTEM_TMP_TABLE) ? 0 : table_map_id; } + bool is_optimizer_tmp_table() + { + return tmp_table == INTERNAL_TMP_TABLE && !db.length && table_name.length; + } + bool visit_subgraph(Wait_for_flush *waiting_ticket, MDL_wait_for_graph_visitor *gvisitor); @@ -1845,6 +1850,7 @@ public: TABLE *tmp_table, TMP_TABLE_PARAM *tmp_table_param, bool with_cleanup); + bool check_sequence_privileges(THD *thd); bool vcol_fix_expr(THD *thd); bool vcol_cleanup_expr(THD *thd); Field *find_field_by_name(const LEX_CSTRING *str) const; @@ -2277,6 +2283,8 @@ struct vers_history_point_t Item *item; }; +struct vers_select_conds_t; + class Vers_history_point : public vers_history_point_t { void fix_item(); @@ -2297,7 +2305,8 @@ public: } void empty() { unit= VERS_TIMESTAMP; item= NULL; } void print(String *str, enum_query_type, const char *prefix, size_t plen) const; - bool check_unit(THD *thd); + bool check_unit(THD *thd, vers_select_conds_t *vers_conds); + bool has_param() const; bool eq(const vers_history_point_t &point) const; }; @@ -2307,6 +2316,7 @@ struct vers_select_conds_t vers_system_time_t orig_type; bool used:1; bool delete_history:1; + bool has_param:1; Vers_history_point start; Vers_history_point end; Lex_ident_column name; @@ -2322,6 +2332,7 @@ struct vers_select_conds_t orig_type= SYSTEM_TIME_UNSPECIFIED; used= false; delete_history= false; + has_param= false; start.empty(); end.empty(); } @@ -2336,6 +2347,7 @@ struct vers_select_conds_t used= false; delete_history= (type == SYSTEM_TIME_HISTORY || type == SYSTEM_TIME_BEFORE); + has_param= false; start= _start; end= _end; name= _name; diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc index f145e581b3f..1b05df67a1d 100644 --- a/sql/temporary_tables.cc +++ b/sql/temporary_tables.cc @@ -441,6 +441,17 @@ bool THD::open_temporary_table(TABLE_LIST *tl) } +bool THD::check_and_open_tmp_table(TABLE_LIST *tl) +{ + if (!has_temporary_tables() || + tl == lex->first_not_own_table() || + tl->derived || tl->schema_table) + return false; + + return open_temporary_table(tl); +} + + /** Pre-open temporary tables corresponding to table list elements. diff --git a/sql/transaction.cc b/sql/transaction.cc index 0dd5e1bebab..7b1dafd79b8 100644 --- a/sql/transaction.cc +++ b/sql/transaction.cc @@ -439,11 +439,11 @@ bool trans_rollback_implicit(THD *thd) DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS")); res= ha_rollback_trans(thd, true); /* - We don't reset OPTION_BEGIN flag below to simulate implicit start - of new transacton in @@autocommit=1 mode. This is necessary to - preserve backward compatibility. + Implicit rollback should reset OPTION_BEGIN flag to avoid starting a + new transaction implicitly in next statement. It makes the behaviour + uniform with direct commit and rollback. */ - thd->variables.option_bits&= ~(OPTION_BINLOG_THIS_TRX); + thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_BINLOG_THIS_TRX); thd->transaction->all.reset(); /* Rollback should clear transaction_rollback_request flag. */ diff --git a/sql/unireg.cc b/sql/unireg.cc index fca005cc990..4c31ce6c283 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -288,9 +288,8 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING &table, DBUG_ENTER("build_frm_image"); /* If fixed row records, we need one bit to check for deleted rows */ - if (!(create_info->table_options & HA_OPTION_PACK_RECORD)) - create_info->null_bits++; - data_offset= (create_info->null_bits + 7) / 8; + bool need_deleted_bit= !(create_info->table_options & HA_OPTION_PACK_RECORD); + data_offset= (create_info->null_bits + need_deleted_bit + 7) / 8; error= pack_vcols(thd, &vcols, create_fields, create_info->check_constraint_list); @@ -685,11 +684,6 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo, key->flags, key->user_defined_key_parts, key->key_part)); - /* For SPATIAL, FULLTEXT and HASH indexes (anything other than B-tree), - ignore the ASC/DESC attribute of columns. */ - const uchar ha_reverse_sort= key->algorithm > HA_KEY_ALG_BTREE - ? 0 : HA_REVERSE_SORT; - for (key_part=key->key_part,key_part_end=key_part+key->user_defined_key_parts ; key_part != key_part_end ; key_part++) @@ -702,14 +696,17 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo, int2store(pos,key_part->fieldnr+1+FIELD_NAME_USED); offset= (uint) (key_part->offset+data_offset+1); int2store(pos+2, offset); - key_part->key_part_flag &= ha_reverse_sort; + key_part->key_part_flag &= HA_REVERSE_SORT; + /* DESC can be set only for BTREE indexes */ + DBUG_ASSERT(key_part->key_part_flag == 0 || + key->algorithm <= HA_KEY_ALG_BTREE); pos[4]= (uchar)(key_part->key_part_flag); int2store(pos+5,key_part->key_type); int2store(pos+7,key_part->length); pos+=9; } } - /* Save keynames */ + /* Save keynames */ keyname_pos=pos; *pos++=(uchar) NAMES_SEP_CHAR; for (key=keyinfo ; key != end ; key++) diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 052a0da2992..2ee0416c558 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -2838,9 +2838,17 @@ static int wsrep_TOI_begin(THD *thd, const char *db, const char *table, WSREP_DEBUG("TOI Begin: %s", wsrep_thd_query(thd)); DEBUG_SYNC(thd, "wsrep_before_toi_begin"); - if (wsrep_can_run_in_toi(thd, db, table, table_list, create_info) == false) + if (!wsrep_ready || + wsrep_can_run_in_toi(thd, db, table, table_list, create_info) == false) { WSREP_DEBUG("No TOI for %s", wsrep_thd_query(thd)); + if (!wsrep_ready) + { + my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0)); + push_warning_printf(thd, Sql_state_errno_level::WARN_LEVEL_WARN, + ER_GALERA_REPLICATION_NOT_SUPPORTED, + "Galera cluster is not ready to execute replication"); + } return 1; } diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index 3394ed6913c..3d7b7941d2e 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -27,7 +27,7 @@ #include #include "wsrep_trans_observer.h" #include "wsrep_server_state.h" -#include "wsrep_plugin.h" /* wsrep_provider_plugin_is_enabled() */ +#include "wsrep_plugin.h" /* wsrep_provider_plugin_enabled() */ ulong wsrep_reject_queries; @@ -133,6 +133,14 @@ bool wsrep_on_update (sys_var *self, THD* thd, enum_var_type var_type) saved_wsrep_on= false; } + if (!wsrep_ready_get()) + { + my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0)); + WSREP_INFO("Failed to start Galera replication. Please check your " + "configuration."); + saved_wsrep_on= false; + } + free(tmp); mysql_mutex_lock(&LOCK_global_system_variables); } diff --git a/sql/xa.cc b/sql/xa.cc index 730e312b746..d84affa4e9f 100644 --- a/sql/xa.cc +++ b/sql/xa.cc @@ -632,7 +632,7 @@ bool trans_xa_commit(THD *thd) if (!xid_state.is_explicit_XA() || !xid_state.xid_cache_element->xid.eq(thd->lex->xid)) { - if (thd->in_multi_stmt_transaction_mode()) + if (thd->in_multi_stmt_transaction_mode() || xid_state.xid_cache_element) { /* Not allow to commit from inside an not-"native" to xid @@ -805,7 +805,7 @@ bool trans_xa_rollback(THD *thd) if (!xid_state.is_explicit_XA() || !xid_state.xid_cache_element->xid.eq(thd->lex->xid)) { - if (thd->in_multi_stmt_transaction_mode()) + if (thd->in_multi_stmt_transaction_mode() || xid_state.xid_cache_element) { my_error(ER_XAER_OUTSIDE, MYF(0)); DBUG_RETURN(TRUE); diff --git a/storage/columnstore/columnstore b/storage/columnstore/columnstore index 9763b126517..f33e9ce86c7 160000 --- a/storage/columnstore/columnstore +++ b/storage/columnstore/columnstore @@ -1 +1 @@ -Subproject commit 9763b126517c8efb716e767fd5ba4eb2b5b405fc +Subproject commit f33e9ce86c73f606dbef2d50cac8b92e393344ef diff --git a/storage/connect/ioapi.h b/storage/connect/ioapi.h index 94b292ed78f..7971717ad36 100644 --- a/storage/connect/ioapi.h +++ b/storage/connect/ioapi.h @@ -21,9 +21,10 @@ #ifndef _ZLIBIOAPI64_H #define _ZLIBIOAPI64_H -#if defined(__linux__) +#if defined(__linux__) || defined (__GNU__) - // Linux needs this to support file operation on files larger then 4+GB + // Linux and Hurd needs this to support file operation on files larger + // than 4+GB. // But might need better if/def to select just the platforms that needs them. #ifndef __USE_FILE_OFFSET64 diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index a2c23eb580e..ca11ceb2e1b 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -1516,10 +1516,10 @@ error: check_opt The options for repair. We do not use it currently. DESCRIPTION - If the file is empty, change # of rows in the file and complete recovery. - Otherwise, scan the table looking for bad rows. If none were found, + Scan the table looking for bad rows. If none were found, we mark file as a good one and return. If a bad row was encountered, we truncate the datafile up to the last good row. + If the file is empty, then do nothing and complete recovery. TODO: Make repair more clever - it should try to recover subsequent rows (after the first bad one) as well. @@ -1537,10 +1537,7 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt) /* empty file */ if (!share->saved_data_file_length) - { - share->rows_recorded= 0; goto end; - } /* Don't assert in field::val() functions */ table->use_all_columns(); diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt index 439a7a445d3..3204b66721b 100644 --- a/storage/innobase/CMakeLists.txt +++ b/storage/innobase/CMakeLists.txt @@ -131,7 +131,6 @@ ENDIF() INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/storage/innobase/include ${CMAKE_SOURCE_DIR}/storage/innobase/handler ${CMAKE_SOURCE_DIR}/libbinlogevents/include) -INCLUDE_DIRECTORIES(${PROJECT_SOURCE_DIR}/tpool) SET(INNOBASE_SOURCES btr/btr0btr.cc @@ -435,14 +434,16 @@ MYSQL_ADD_PLUGIN(innobase ${INNOBASE_SOURCES} STORAGE_ENGINE ${ZLIB_LIBRARIES} ${NUMA_LIBRARY} ${LIBSYSTEMD} - ${LINKER_SCRIPT}) + ${LINKER_SCRIPT} + tpool + ) IF(NOT TARGET innobase) RETURN() ENDIF() -ADD_DEFINITIONS(${SSL_DEFINES} ${TPOOL_DEFINES}) +ADD_DEFINITIONS(${SSL_DEFINES}) # A GCC bug causes crash when compiling these files on ARM64 with -O1+ # Compile them with -O0 as a workaround. IF(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64" @@ -531,7 +532,7 @@ IF(INNODB_ENABLE_XAP_UNLOCK_UNMODIFIED_FOR_PRIMARY) ENDIF() IF(NOT (PLUGIN_INNOBASE STREQUAL DYNAMIC)) - TARGET_LINK_LIBRARIES(innobase tpool mysys) + TARGET_LINK_LIBRARIES(innobase mysys) ADD_SUBDIRECTORY(${CMAKE_SOURCE_DIR}/extra/mariabackup ${CMAKE_BINARY_DIR}/extra/mariabackup) ENDIF() diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 0e828ae4e3c..2cd1d7644e9 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -1532,6 +1532,27 @@ void buf_pool_t::close() noexcept { const size_t size{size_in_bytes}; +#ifdef __SANITIZE_ADDRESS__ + /* Sequence of operation which leads to use_after_poison error: + + mmap(); + __asan_poison_memory_region(); + munmap(); + mmap() reuses the same virtual address + Write into the memory region throws the error. + + Recent clang-18, gcc-13.3 doesn't detect this error. + Older like clang-14..clang-16 and gcc-10, gcc-11, gcc-12 detects + this error. Please check the reported bug + (https://github.com/google/sanitizers/issues/1705) + + Unpoison the whole buffer pool memory to avoid this error */ + #if (defined(__GNUC__) && !defined(__clang__) && (__GNUC__ < 14)) ||\ + (defined(__clang__) && (__clang_major__ < 18)) + MEM_MAKE_ADDRESSABLE(memory, size); + #endif /* __GNUC__ __clang */ +#endif /* __SANITIZE_ADDRESS__ */ + for (char *extent= memory, *end= memory + block_descriptors_in_bytes(n_blocks); extent < end; extent+= innodb_buffer_pool_extent_size) @@ -1761,6 +1782,11 @@ ATTRIBUTE_COLD buf_pool_t::shrink_status buf_pool_t::shrink(size_t size) buf_flush_relocate_on_flush_list(b, &block->page); mysql_mutex_unlock(&flush_list_mutex); } + else + { + ut_d(if (auto om= b->oldest_modification()) ut_ad(om == 2)); + b->oldest_modification_.store(0, std::memory_order_relaxed); + } } /* relocate LRU list */ @@ -1813,6 +1839,12 @@ ATTRIBUTE_COLD buf_pool_t::shrink_status buf_pool_t::shrink(size_t size) goto next; } + if (block) + buf_LRU_block_free_non_file_page(block); + + if (!UT_LIST_GET_LEN(LRU) && n_blocks_to_withdraw) + return SHRINK_ABORT; + if (UT_LIST_GET_LEN(free) + UT_LIST_GET_LEN(LRU) < usable_size() / 20) return SHRINK_ABORT; @@ -2085,10 +2117,11 @@ ATTRIBUTE_COLD void buf_pool_t::resize(size_t size, THD *thd) noexcept while (buf_page_t *b= UT_LIST_GET_FIRST(withdrawn)) { + ut_ad(!b->oldest_modification()); + ut_ad(b->state() == buf_page_t::NOT_USED); UT_LIST_REMOVE(withdrawn, b); UT_LIST_ADD_LAST(free, b); ut_d(b->in_free_list= true); - ut_ad(b->state() == buf_page_t::NOT_USED); b->lock.init(); #ifdef BTR_CUR_HASH_ADAPT /* Clear the AHI fields, because buf_block_init_low() expects diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index a70dc42cf6b..278bba2b006 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -950,12 +950,15 @@ MY_ATTRIBUTE((warn_unused_result)) @return number of pages written or hole-punched */ uint32_t fil_space_t::flush_freed(bool writable) noexcept { + mysql_mutex_assert_not_owner(&buf_pool.flush_list_mutex); + mysql_mutex_assert_not_owner(&buf_pool.mutex); + const bool punch_hole= chain.start->punch_hole == 1; if (!punch_hole && !srv_immediate_scrub_data_uncompressed) return 0; - - mysql_mutex_assert_not_owner(&buf_pool.flush_list_mutex); - mysql_mutex_assert_not_owner(&buf_pool.mutex); + if (srv_is_undo_tablespace(id)) + /* innodb_undo_log_truncate=ON can take care of these better */ + return 0; for (;;) { diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index 84b8a4435a4..594cfe2bdcc 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -864,6 +864,8 @@ retry: else if (table) table->acquire(); } + else if (!dict_locked) + dict_sys.unfreeze(); return table; } diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 2953b003994..0dc7ce4f8f5 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -2833,23 +2833,30 @@ fil_io_t fil_space_t::io(const IORequest &type, os_offset_t offset, size_t len, while (node->size <= p) { p -= node->size; - node = UT_LIST_GET_NEXT(chain, node); - if (!node) { + if (!UT_LIST_GET_NEXT(chain, node)) { fail: - if (type.type != IORequest::READ_ASYNC) { + switch (type.type) { + case IORequest::READ_ASYNC: + /* Read-ahead may be requested for + non-existing pages. Ignore such + requests. */ + break; + default: fil_invalid_page_access_msg( node->name, offset, len, type.is_read()); - } #ifndef DBUG_OFF io_error: #endif - set_corrupted(); + set_corrupted(); + } + err = DB_CORRUPTION; node = nullptr; goto release; } + node = UT_LIST_GET_NEXT(chain, node); } offset = os_offset_t{p} << srv_page_size_shift; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 61025c1fea1..a58ab75d7f2 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -122,8 +122,6 @@ simple_thread_local ha_handler_stats *mariadb_stats; #include // TT_FOR_UPGRADE #include "sql_type_vector.h" -#define thd_get_trx_isolation(X) ((enum_tx_isolation)thd_tx_isolation(X)) - extern "C" void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all); unsigned long long thd_get_query_id(const MYSQL_THD thd); void thd_clear_error(MYSQL_THD thd); @@ -329,6 +327,25 @@ This is the same as "myisam_stats_method_typelib" */ static TYPELIB innodb_stats_method_typelib = CREATE_TYPELIB_FOR(innodb_stats_method_names); +/** Possible values for system variable "innodb_linux_aio" */ +#ifdef __linux__ +const char* innodb_linux_aio_names[] = { + "auto", /* SRV_LINUX_AIO_AUTO */ + "io_uring", /* SRV_LINUX_AIO_IO_URING */ + "aio", /* SRV_LINUX_AIO_LIBAIO */ + NullS +}; + +/** Used to define an enumerate type of the system variable +innodb_linux_aio. Used by mariadb-backup too. */ +TYPELIB innodb_linux_aio_typelib = { + array_elements(innodb_linux_aio_names) - 1, + "innodb_linux_aio_typelib", + innodb_linux_aio_names, + NULL +}; +#endif + /** Possible values of the parameter innodb_checksum_algorithm */ const char* innodb_checksum_algorithm_names[] = { "crc32", @@ -801,14 +818,16 @@ innodb_tmpdir_validate( return(0); } -/******************************************************************//** -Maps a MySQL trx isolation level code to the InnoDB isolation level code -@return InnoDB isolation level */ -static inline -uint -innobase_map_isolation_level( -/*=========================*/ - enum_tx_isolation iso); /*!< in: MySQL isolation level code */ +/** @return the current transaction isolation level */ +static inline uint innodb_isolation_level(const THD *thd) noexcept +{ + static_assert(ISO_REPEATABLE_READ == TRX_ISO_REPEATABLE_READ, ""); + static_assert(ISO_SERIALIZABLE == TRX_ISO_SERIALIZABLE, ""); + static_assert(ISO_READ_COMMITTED == TRX_ISO_READ_COMMITTED, ""); + static_assert(ISO_READ_UNCOMMITTED == TRX_ISO_READ_UNCOMMITTED, ""); + return high_level_read_only + ? ISO_READ_UNCOMMITTED : (thd_tx_isolation(thd) & 3); +} /** Gets field offset for a field in a table. @param[in] table MySQL table object @@ -4080,12 +4099,8 @@ static int innodb_init_params() skip_buffering_tweak: #endif -#if !defined LINUX_NATIVE_AIO && !defined HAVE_URING && !defined _WIN32 - /* Currently native AIO is supported only on windows and linux - and that also when the support is compiled in. In all other - cases, we ignore the setting of innodb_use_native_aio. */ - srv_use_native_aio= FALSE; -#endif + if (!tpool::supports_native_aio()) + srv_use_native_aio= FALSE; DBUG_RETURN(0); } @@ -4377,21 +4392,18 @@ innobase_start_trx_and_assign_read_view( trx_start_if_not_started_xa(trx, false); - /* Assign a read view if the transaction does not have it yet. - Do this only if transaction is using REPEATABLE READ isolation - level. */ - trx->isolation_level = innobase_map_isolation_level( - thd_get_trx_isolation(thd)) & 3; + /* Assign a read view if the transaction does not have one yet. + Skip this for the READ UNCOMMITTED isolation level. */ + trx->isolation_level = innodb_isolation_level(thd) & 3; - if (trx->isolation_level == TRX_ISO_REPEATABLE_READ) { + if (trx->isolation_level != TRX_ISO_READ_UNCOMMITTED) { trx->read_view.open(trx); } else { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_UNSUPPORTED, "InnoDB: WITH CONSISTENT SNAPSHOT" - " was ignored because this phrase" - " can only be used with" - " REPEATABLE READ isolation level."); + " is ignored at READ UNCOMMITTED" + " isolation level."); } /* Set the MySQL flag to mark that there is an active transaction */ @@ -5000,7 +5012,7 @@ ha_innobase::table_flags() const called before prebuilt is inited. */ if (thd_tx_isolation(thd) <= ISO_READ_COMMITTED) { - return(flags); + return(flags | HA_CHECK_UNIQUE_AFTER_WRITE); } return(flags | HA_BINLOG_STMT_CAPABLE); @@ -14029,10 +14041,10 @@ int ha_innobase::truncate() trx); if (!err) { + trx->commit(deleted); m_prebuilt->table->acquire(); create_table_info_t::create_table_update_dict(m_prebuilt->table, m_user_thd, info, *table); - trx->commit(deleted); } else { @@ -16064,31 +16076,6 @@ ha_innobase::start_stmt( DBUG_RETURN(0); } -/******************************************************************//** -Maps a MySQL trx isolation level code to the InnoDB isolation level code -@return InnoDB isolation level */ -static inline -uint -innobase_map_isolation_level( -/*=========================*/ - enum_tx_isolation iso) /*!< in: MySQL isolation level code */ -{ - if (UNIV_UNLIKELY(srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN) - || UNIV_UNLIKELY(srv_read_only_mode)) { - return TRX_ISO_READ_UNCOMMITTED; - } - switch (iso) { - case ISO_REPEATABLE_READ: return(TRX_ISO_REPEATABLE_READ); - case ISO_READ_COMMITTED: return(TRX_ISO_READ_COMMITTED); - case ISO_SERIALIZABLE: return(TRX_ISO_SERIALIZABLE); - case ISO_READ_UNCOMMITTED: return(TRX_ISO_READ_UNCOMMITTED); - } - - ut_error; - - return(0); -} - /******************************************************************//** As MySQL will execute an external lock for every new table it uses when it starts to process an SQL statement (an exception is when MySQL calls @@ -16558,19 +16545,30 @@ ha_innobase::store_lock( Be careful to ignore TL_IGNORE if we are going to do something with only 'real' locks! */ - /* If no MySQL table is in use, we need to set the isolation level + /* If no table handle is open, we need to set the isolation level of the transaction. */ if (lock_type != TL_IGNORE && trx->n_mysql_tables_in_use == 0) { - trx->isolation_level = innobase_map_isolation_level( - (enum_tx_isolation) thd_tx_isolation(thd)) & 3; - - if (trx->isolation_level <= TRX_ISO_READ_COMMITTED) { - + switch ((trx->isolation_level + = innodb_isolation_level(thd) & 3)) { + case ISO_REPEATABLE_READ: + break; + case ISO_READ_COMMITTED: + case ISO_READ_UNCOMMITTED: /* At low transaction isolation levels we let each consistent read set its own snapshot */ trx->read_view.close(); + break; + case ISO_SERIALIZABLE: + auto trx_state = trx->state; + if (trx_state != TRX_STATE_NOT_STARTED) { + ut_ad(trx_state == TRX_STATE_ACTIVE); + } else if (trx->snapshot_isolation) { + trx->will_lock = true; + trx_start_if_not_started(trx, false); + trx->read_view.open(trx); + } } } @@ -19659,6 +19657,15 @@ static MYSQL_SYSVAR_BOOL(use_native_aio, srv_use_native_aio, "Use native AIO if supported on this platform", NULL, NULL, TRUE); +#ifdef __linux__ +static MYSQL_SYSVAR_ENUM(linux_aio, srv_linux_aio_method, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Specifies which Linux AIO implementation should be used." + " Possible value are \"auto\" (default) to select io_uring" + " and fallback to aio, or explicit \"io_uring\" or \"aio\"", + nullptr, nullptr, SRV_LINUX_AIO_AUTO, &innodb_linux_aio_typelib); +#endif + #ifdef HAVE_LIBNUMA static MYSQL_SYSVAR_BOOL(numa_interleave, srv_numa_interleave, PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, @@ -20015,6 +20022,9 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(tmpdir), MYSQL_SYSVAR(autoinc_lock_mode), MYSQL_SYSVAR(use_native_aio), +#ifdef __linux__ + MYSQL_SYSVAR(linux_aio), +#endif #ifdef HAVE_LIBNUMA MYSQL_SYSVAR(numa_interleave), #endif /* HAVE_LIBNUMA */ diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index d05b89329ab..8b1d316437f 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -48,6 +48,22 @@ struct named_spaces_tag_t; using space_list_t= ilist; +/** Possible values of innodb_linux_aio */ +#ifdef __linux__ +enum srv_linux_aio_t +{ + /** auto, io_uring first and then aio */ + SRV_LINUX_AIO_AUTO, + /** io_uring */ + SRV_LINUX_AIO_IO_URING, + /** aio (libaio interface) */ + SRV_LINUX_AIO_LIBAIO +}; +#endif + +/** innodb_flush_method */ +extern ulong srv_file_flush_method; + /** Undo tablespaces starts with space_id. */ extern uint32_t srv_undo_space_id_start; /** The number of UNDO tablespaces that are open and ready to use. */ diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 4af1f891865..1ff4003428d 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -178,6 +178,12 @@ OS (provided we compiled Innobase with it in), otherwise we will use simulated aio. Currently we support native aio on windows and linux */ extern my_bool srv_use_native_aio; + +#ifdef __linux__ +/* This enum is defined which linux native io method to use */ +extern ulong srv_linux_aio_method; +#endif + extern my_bool srv_numa_interleave; /* Use atomic writes i.e disable doublewrite buffer */ diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index fe516ae7f02..a91635e85d0 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -283,11 +283,9 @@ remap: struct stat st; if (!fstat(file, &st)) { - MSAN_STAT_WORKAROUND(&st); const auto st_dev= st.st_dev; if (!stat("/dev/shm", &st)) { - MSAN_STAT_WORKAROUND(&st); is_pmem= st.st_dev == st_dev; if (!is_pmem) return ptr; /* MAP_FAILED */ diff --git a/storage/innobase/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff b/storage/innobase/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff index 2860d5cb0b8..b2251a7222a 100644 --- a/storage/innobase/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff +++ b/storage/innobase/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff @@ -1,14 +1,6 @@ --- suite/storage_engine/trx/cons_snapshot_serializable.result +++ suite/storage_engine/trx/cons_snapshot_serializable.reject -@@ -5,12 +5,15 @@ - CREATE TABLE t1 (a ) ENGINE= ; - SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; - START TRANSACTION WITH CONSISTENT SNAPSHOT; -+Warnings: -+Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level. - connection con2; - INSERT INTO t1 (a) VALUES (1); - connection con1; +@@ -11,6 +11,7 @@ # If consistent read works on this isolation level (SERIALIZABLE), the following SELECT should not return the value we inserted (1) SELECT a FROM t1; a diff --git a/storage/innobase/mysql-test/storage_engine/trx/level_read_committed.rdiff b/storage/innobase/mysql-test/storage_engine/trx/level_read_committed.rdiff deleted file mode 100644 index d0a846ee1f7..00000000000 --- a/storage/innobase/mysql-test/storage_engine/trx/level_read_committed.rdiff +++ /dev/null @@ -1,11 +0,0 @@ ---- suite/storage_engine/trx/level_read_committed.result -+++ suite/storage_engine/trx/level_read_committed.reject -@@ -77,6 +77,8 @@ - CREATE TABLE t1 (a ) ENGINE= ; - SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; - START TRANSACTION WITH CONSISTENT SNAPSHOT; -+Warnings: -+Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level. - connection con2; - INSERT INTO t1 (a) VALUES (1); - connection con1; diff --git a/storage/innobase/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff b/storage/innobase/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff index ee483dd64bb..756b8626f76 100644 --- a/storage/innobase/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff +++ b/storage/innobase/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff @@ -5,7 +5,7 @@ SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; START TRANSACTION WITH CONSISTENT SNAPSHOT; +Warnings: -+Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level. ++Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT is ignored at READ UNCOMMITTED isolation level. connection con2; INSERT INTO t1 (a) VALUES (1); connection con1; diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index 8eed3595c9f..f3c04027077 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -52,10 +52,6 @@ Created 10/21/1995 Heikki Tuuri #include -#ifdef LINUX_NATIVE_AIO -#include -#endif /* LINUX_NATIVE_AIO */ - #ifdef HAVE_FALLOC_PUNCH_HOLE_AND_KEEP_SIZE # include # include @@ -886,7 +882,6 @@ os_file_status_posix( if (!ret) { /* file exists, everything OK */ - MSAN_STAT_WORKAROUND(&statinfo); } else if (errno == ENOENT || errno == ENOTDIR || errno == ENAMETOOLONG) { /* file does not exist */ return(true); @@ -1051,7 +1046,6 @@ static ATTRIBUTE_COLD void os_file_log_buffered() /** @return whether the log file may work with unbuffered I/O. */ static ATTRIBUTE_COLD bool os_file_log_maybe_unbuffered(const struct stat &st) { - MSAN_STAT_WORKAROUND(&st); char b[20 + sizeof "/sys/dev/block/" ":" "/../queue/physical_block_size"]; if (snprintf(b, sizeof b, "/sys/dev/block/%u:%u/queue/physical_block_size", major(st.st_dev), minor(st.st_dev)) >= @@ -1436,7 +1430,6 @@ os_file_size_t os_file_get_size(const char *filename) noexcept int ret = stat(filename, &s); if (ret == 0) { - MSAN_STAT_WORKAROUND(&s); file_size.m_total_size = s.st_size; /* st_blocks is in 512 byte sized blocks */ file_size.m_alloc_size = s.st_blocks * 512; @@ -1481,8 +1474,6 @@ os_file_get_status_posix( return(DB_FAIL); } - MSAN_STAT_WORKAROUND(statinfo); - switch (statinfo->st_mode & S_IFMT) { case S_IFDIR: stat_info->type = OS_FILE_TYPE_DIR; @@ -3076,132 +3067,6 @@ static void write_io_callback(void *c) write_slots->release(cb); } -#ifdef LINUX_NATIVE_AIO -/** Checks if the system supports native linux aio. On some kernel -versions where native aio is supported it won't work on tmpfs. In such -cases we can't use native aio. - -@return: true if supported, false otherwise. */ -static bool is_linux_native_aio_supported() -{ - File fd; - io_context_t io_ctx; - std::string log_file_path = get_log_file_path(); - - memset(&io_ctx, 0, sizeof(io_ctx)); - if (io_setup(1, &io_ctx)) { - - /* The platform does not support native aio. */ - - return(false); - - } - else if (!srv_read_only_mode) { - - /* Now check if tmpdir supports native aio ops. */ - fd = mysql_tmpfile("ib"); - - if (fd < 0) { - ib::warn() - << "Unable to create temp file to check" - " native AIO support."; - - int ret = io_destroy(io_ctx); - ut_a(ret != -EINVAL); - ut_ad(ret != -EFAULT); - - return(false); - } - } - else { - fd = my_open(log_file_path.c_str(), O_RDONLY | O_CLOEXEC, - MYF(0)); - - if (fd == -1) { - - ib::warn() << "Unable to open \"" << log_file_path - << "\" to check native" - << " AIO read support."; - - int ret = io_destroy(io_ctx); - ut_a(ret != EINVAL); - ut_ad(ret != EFAULT); - - return(false); - } - } - - struct io_event io_event; - - memset(&io_event, 0x0, sizeof(io_event)); - - byte* ptr = static_cast(aligned_malloc(srv_page_size, - srv_page_size)); - - struct iocb iocb; - - /* Suppress valgrind warning. */ - memset(ptr, 0, srv_page_size); - memset(&iocb, 0x0, sizeof(iocb)); - - struct iocb* p_iocb = &iocb; - - if (!srv_read_only_mode) { - - io_prep_pwrite(p_iocb, fd, ptr, srv_page_size, 0); - - } - else { - ut_a(srv_page_size >= 512); - io_prep_pread(p_iocb, fd, ptr, 512, 0); - } - - int err = io_submit(io_ctx, 1, &p_iocb); - - if (err >= 1) { - /* Now collect the submitted IO request. */ - err = io_getevents(io_ctx, 1, 1, &io_event, NULL); - } - - aligned_free(ptr); - my_close(fd, MYF(MY_WME)); - - switch (err) { - case 1: - { - int ret = io_destroy(io_ctx); - ut_a(ret != -EINVAL); - ut_ad(ret != -EFAULT); - - return(true); - } - - case -EINVAL: - case -ENOSYS: - ib::warn() - << "Linux Native AIO not supported. You can either" - " move " - << (srv_read_only_mode ? log_file_path : "tmpdir") - << " to a file system that supports native" - " AIO or you can set innodb_use_native_aio to" - " FALSE to avoid this message."; - - /* fall through. */ - default: - ib::warn() - << "Linux Native AIO check on " - << (srv_read_only_mode ? log_file_path : "tmpdir") - << "returned error[" << -err << "]"; - } - - int ret = io_destroy(io_ctx); - ut_a(ret != -EINVAL); - ut_ad(ret != -EFAULT); - - return(false); -} -#endif - int os_aio_init() noexcept { int max_write_events= int(srv_n_write_io_threads * @@ -3209,41 +3074,41 @@ int os_aio_init() noexcept int max_read_events= int(srv_n_read_io_threads * OS_AIO_N_PENDING_IOS_PER_THREAD); int max_events= max_read_events + max_write_events; - int ret; -#if LINUX_NATIVE_AIO - if (srv_use_native_aio && !is_linux_native_aio_supported()) - goto disable; -#endif + int ret= 1; - ret= srv_thread_pool->configure_aio(srv_use_native_aio, max_events); - -#ifdef LINUX_NATIVE_AIO - if (ret) + if (srv_use_native_aio) { - ut_ad(srv_use_native_aio); -disable: - ib::warn() << "Linux Native AIO disabled."; - srv_use_native_aio= false; - ret= srv_thread_pool->configure_aio(false, max_events); - } + tpool::aio_implementation aio_impl= tpool::OS_IO_DEFAULT; +#ifdef __linux__ + compile_time_assert(SRV_LINUX_AIO_IO_URING == (srv_linux_aio_t)tpool::OS_IO_URING); + compile_time_assert(SRV_LINUX_AIO_LIBAIO == (srv_linux_aio_t) tpool::OS_IO_LIBAIO); + compile_time_assert(SRV_LINUX_AIO_AUTO == (srv_linux_aio_t) tpool::OS_IO_DEFAULT); + aio_impl=(tpool::aio_implementation) srv_linux_aio_method; #endif -#ifdef HAVE_URING + ret= srv_thread_pool->configure_aio(srv_use_native_aio, max_events, + aio_impl); + if (ret) + { + srv_use_native_aio= false; + sql_print_warning("InnoDB: native AIO failed: falling back to" + " innodb_use_native_aio=OFF"); + } + else + sql_print_information("InnoDB: Using %s", srv_thread_pool + ->get_aio_implementation()); + } if (ret) - { - ut_ad(srv_use_native_aio); - ib::warn() - << "liburing disabled: falling back to innodb_use_native_aio=OFF"; - srv_use_native_aio= false; - ret= srv_thread_pool->configure_aio(false, max_events); - } -#endif - + ret= srv_thread_pool->configure_aio(false, max_events, + tpool::OS_IO_DEFAULT); if (!ret) { read_slots= new io_slots(max_read_events, srv_n_read_io_threads); write_slots= new io_slots(max_write_events, srv_n_write_io_threads); } + else + sql_print_error("InnoDB: Cannot initialize AIO sub-system"); + return ret; } @@ -3281,8 +3146,8 @@ int os_aio_resize(ulint n_reader_threads, ulint n_writer_threads) noexcept int max_write_events= int(n_writer_threads * OS_AIO_N_PENDING_IOS_PER_THREAD); int events= max_read_events + max_write_events; - /** Do the Linux AIO dance (this will try to create a new - io context with changed max_events ,etc*/ + /* Do the Linux AIO dance (this will try to create a new + io context with changed max_events, etc.) */ int ret= srv_thread_pool->reconfigure_aio(srv_use_native_aio, events); @@ -3738,7 +3603,6 @@ void fil_node_t::find_metadata(IF_WIN(,bool create)) noexcept struct stat statbuf; if (!fstat(file, &statbuf)) { - MSAN_STAT_WORKAROUND(&statbuf); block_size= statbuf.st_blksize; # ifdef __linux__ on_ssd= fil_system.is_ssd(statbuf.st_dev); diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc index 43475882943..1a42785d97f 100644 --- a/storage/innobase/page/page0page.cc +++ b/storage/innobase/page/page0page.cc @@ -956,8 +956,9 @@ page_delete_rec_list_end( size+= s; n_recs++; - if (scrub) - mtr->memset(block, rec2 - page, rec_offs_data_size(offsets), 0); + if (UNIV_LIKELY(!scrub)); + else if (size_t size= rec_offs_data_size(offsets)) + mtr->memset(block, rec2 - page, size, 0); rec2= page_rec_get_next(rec2); } diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc index 06393149738..4a55d328523 100644 --- a/storage/innobase/row/row0log.cc +++ b/storage/innobase/row/row0log.cc @@ -2660,7 +2660,8 @@ all_done: ut_ad((mrec == NULL) == (index->online_log->head.bytes == 0)); #ifdef UNIV_DEBUG - if (next_mrec_end == index->online_log->head.block + if (index->online_log->head.block && + next_mrec_end == index->online_log->head.block + srv_sort_buf_size) { /* If tail.bytes == 0, next_mrec_end can also be at the end of tail.block. */ @@ -2675,7 +2676,8 @@ all_done: ut_ad(index->online_log->tail.blocks > index->online_log->head.blocks); } - } else if (next_mrec_end == index->online_log->tail.block + } else if (index->online_log->tail.block && + next_mrec_end == index->online_log->tail.block + index->online_log->tail.bytes) { ut_ad(next_mrec == index->online_log->tail.block + index->online_log->head.bytes); diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 60d2614c4e8..0575053739c 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -135,6 +135,10 @@ OS (provided we compiled Innobase with it in), otherwise we will use simulated aio we build below with threads. Currently we support native aio on windows and linux */ my_bool srv_use_native_aio; +#ifdef __linux__ +/* This enum is defined which linux native io method to use */ +ulong srv_linux_aio_method; +#endif my_bool srv_numa_interleave; /** copy of innodb_use_atomic_writes; @see innodb_init_params() */ my_bool srv_use_atomic_writes; diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index a2413258741..bf76e81f23a 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -995,12 +995,19 @@ srv_open_tmp_tablespace(bool create_new_db) return(err); } -/** Shutdown background threads, except the page cleaner. */ -static void srv_shutdown_threads() +/** Shutdown background threads, except the page cleaner. +@param init_abort set to true when InnoDB startup aborted */ +static void srv_shutdown_threads(bool init_abort= false) { ut_ad(!srv_undo_sources); srv_master_timer.reset(); - srv_shutdown_state = SRV_SHUTDOWN_EXIT_THREADS; + /* In case of InnoDB start up aborted, Don't change + the srv_shutdown_state. Because innodb_shutdown() + does call innodb_preshutdown() which changes the + srv_shutdown_state back to SRV_SHUTDOWN_INITIATED */ + if (!init_abort) { + srv_shutdown_state = SRV_SHUTDOWN_EXIT_THREADS; + } if (purge_sys.enabled()) { srv_purge_shutdown(); @@ -1070,7 +1077,7 @@ srv_init_abort_low( } srv_shutdown_bg_undo_sources(); - srv_shutdown_threads(); + srv_shutdown_threads(true); return(err); } @@ -1368,22 +1375,9 @@ dberr_t srv_start(bool create_new_db) } if (os_aio_init()) { - ib::error() << "Cannot initialize AIO sub-system"; - return(srv_init_abort(DB_ERROR)); } -#ifdef LINUX_NATIVE_AIO - if (srv_use_native_aio) { - ib::info() << "Using Linux native AIO"; - } -#endif -#ifdef HAVE_URING - if (srv_use_native_aio) { - ib::info() << "Using liburing"; - } -#endif - fil_system.create(srv_file_per_table ? 50000 : 5000); if (buf_pool.create()) { diff --git a/storage/maria/ma_crypt.c b/storage/maria/ma_crypt.c index fe1ea09e3ad..4a8da54f8b2 100644 --- a/storage/maria/ma_crypt.c +++ b/storage/maria/ma_crypt.c @@ -166,7 +166,7 @@ ma_crypt_read(MARIA_SHARE* share, uchar *buff, my_bool silent) iv_length != sizeof(((MARIA_CRYPT_DATA*)1)->scheme.iv) + 4) { my_printf_error(HA_ERR_UNSUPPORTED, - "Unsupported crypt scheme type: %d iv_length: %d\n", + "Unsupported crypt scheme type: %d iv_length: %d", MYF(ME_ERROR_LOG | (silent ? ME_WARNING : ME_FATAL)), type, iv_length); return 0; @@ -514,7 +514,7 @@ static int ma_encrypt(MARIA_SHARE *share, MARIA_CRYPT_DATA *crypt_data, my_errno= HA_ERR_DECRYPTION_FAILED; my_printf_error(HA_ERR_DECRYPTION_FAILED, - "failed to encrypt '%s' rc: %d dstlen: %u size: %u\n", + "failed to encrypt '%s' rc: %d dstlen: %u size: %u", MYF(ME_FATAL|ME_ERROR_LOG), share->open_file_name.str, rc, dstlen, size); return 1; @@ -544,7 +544,7 @@ static int ma_decrypt(MARIA_SHARE *share, MARIA_CRYPT_DATA *crypt_data, my_errno= HA_ERR_DECRYPTION_FAILED; if (!share->silence_encryption_errors) my_printf_error(HA_ERR_DECRYPTION_FAILED, - "failed to decrypt '%s' rc: %d dstlen: %u size: %u\n", + "failed to decrypt '%s' rc: %d dstlen: %u size: %u", MYF(ME_FATAL|ME_ERROR_LOG), share->open_file_name.str, rc, dstlen, size); return 1; diff --git a/storage/myisam/mi_info.c b/storage/myisam/mi_info.c index 3e4105796d1..a4e22f5f6a5 100644 --- a/storage/myisam/mi_info.c +++ b/storage/myisam/mi_info.c @@ -86,10 +86,7 @@ int mi_status(MI_INFO *info, register MI_ISAMINFO *x, uint flag) x->index_file_name = share->index_file_name; } if ((flag & HA_STATUS_TIME) && !mysql_file_fstat(info->dfile, &state, MYF(0))) - { - MSAN_STAT_WORKAROUND(&state); x->update_time=state.st_mtime; - } else x->update_time=0; if (flag & HA_STATUS_AUTO) diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c index 1a34bb85602..956bcda536e 100644 --- a/storage/myisam/myisamchk.c +++ b/storage/myisam/myisamchk.c @@ -130,6 +130,17 @@ int main(int argc, char **argv) #endif } /* main */ + +/* Free memory and exit */ + +void __attribute__ ((noreturn)) my_exit(int exit_state) +{ + free_defaults(default_argv); + my_end(MY_CHECK_ERROR); + exit(exit_state); +} + + enum options_mc { OPT_CHARSETS_DIR=256, OPT_SET_COLLATION,OPT_START_CHECK_POS, OPT_CORRECT_CHECKSUM, OPT_CREATE_MISSING_KEYS, OPT_KEY_BUFFER_SIZE, @@ -653,7 +664,7 @@ get_one_option(const struct my_option *opt, fprintf(stderr, "The value of the sort key is bigger than max key: %d.\n", MI_MAX_KEY); - exit(1); + my_exit(1); } } break; @@ -687,7 +698,9 @@ get_one_option(const struct my_option *opt, break; case 'V': print_version(); - exit(0); + free_defaults(default_argv); + my_end(MY_CHECK_ERROR); + my_exit(0); case OPT_CORRECT_CHECKSUM: if (argument == disabled_my_option) check_param.testflag&= ~T_CALC_CHECKSUM; @@ -704,7 +717,7 @@ get_one_option(const struct my_option *opt, FIND_TYPE_BASIC)) <= 0) { fprintf(stderr, "Invalid value of stats_method: %s.\n", argument); - exit(1); + my_exit(1); } switch (method-1) { case 0: @@ -728,10 +741,10 @@ get_one_option(const struct my_option *opt, #endif case 'H': my_print_help(my_long_options); - exit(0); + my_exit(0); case '?': usage(); - exit(0); + my_exit(0); } return 0; } @@ -747,7 +760,7 @@ static void get_options(register int *argc,register char ***argv) check_param.testflag|=T_WRITE_LOOP; if ((ho_error=handle_options(argc, argv, my_long_options, get_one_option))) - exit(ho_error); + my_exit(ho_error); /* If using repair, then update checksum if one uses --update-state */ if ((check_param.testflag & T_UPDATE_STATE) && @@ -757,7 +770,7 @@ static void get_options(register int *argc,register char ***argv) if (*argc == 0) { usage(); - exit(-1); + my_exit(-1); } if ((check_param.testflag & T_UNPACK) && @@ -766,7 +779,7 @@ static void get_options(register int *argc,register char ***argv) (void) fprintf(stderr, "%s: --unpack can't be used with --quick or --sort-records\n", my_progname_short); - exit(1); + my_exit(1); } if ((check_param.testflag & T_READONLY) && (check_param.testflag & @@ -776,11 +789,11 @@ static void get_options(register int *argc,register char ***argv) (void) fprintf(stderr, "%s: Can't use --readonly when repairing or sorting\n", my_progname_short); - exit(1); + my_exit(1); } if (init_tmpdir(&myisamchk_tmpdir, opt_tmpdir)) - exit(1); + my_exit(1); check_param.tmpdir=&myisamchk_tmpdir; check_param.key_cache_block_size= opt_key_cache_block_size; @@ -788,7 +801,7 @@ static void get_options(register int *argc,register char ***argv) if (set_collation_name) if (!(set_collation= get_charset_by_name(set_collation_name, MYF(MY_UTF8_IS_UTF8MB3 | MY_WME)))) - exit(1); + my_exit(1); myisam_block_size=(uint) 1 << my_bit_log2_uint64(opt_myisam_block_size); return; diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 8ce050c3f26..03603735c31 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -40,6 +40,11 @@ IF(HAVE_SCHED_GETCPU) ADD_DEFINITIONS(-DHAVE_SCHED_GETCPU=1 -DROCKSDB_SCHED_GETCPU_PRESENT) ENDIF() +CHECK_FUNCTION_EXISTS(getauxval HAVE_AUXV_GETAUXVAL) +IF(HAVE_AUXV_GETAUXVAL) + ADD_DEFINITIONS(-DROCKSDB_AUXV_GETAUXVAL_PRESENT) +ENDIF() + IF(WITH_VALGRIND) ADD_DEFINITIONS(-DROCKSDB_VALGRIND_RUN=1) ENDIF() @@ -72,15 +77,6 @@ IF(MSVC_ARM64) SKIP_ROCKSDB_PLUGIN("Windows ARM64 not supported") ENDIF() -# -# Also, disable on ARM64 when not Linux -# Requires submodule update to v6.16.3 -# containing commit https://github.com/facebook/rocksdb/commit/ee4bd4780b321ddb5f92a0f4eb956f2a2ebd60dc -# -IF(CMAKE_SYSTEM_PROCESSOR MATCHES "(arm64|aarch64)" AND NOT CMAKE_SYSTEM_NAME STREQUAL "Linux") - SKIP_ROCKSDB_PLUGIN("ARM64 disabled on all except Linux") -ENDIF() - # This plugin needs recent C++ compilers (it is using C++11 features) # Skip build for the old compilers SET(CXX11_FLAGS) diff --git a/storage/rocksdb/build_rocksdb.cmake b/storage/rocksdb/build_rocksdb.cmake index 3bb05c06caf..1cdbad49419 100644 --- a/storage/rocksdb/build_rocksdb.cmake +++ b/storage/rocksdb/build_rocksdb.cmake @@ -481,7 +481,7 @@ else() util/crc32c_ppc_asm.S) endif(CMAKE_SYSTEM_PROCESSOR MATCHES "ppc64") # aarch - if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64") + if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64") INCLUDE(CheckCXXCompilerFlag) CHECK_CXX_COMPILER_FLAG("-march=armv8-a+crc+crypto" HAS_ARMV8_CRC) if(HAS_ARMV8_CRC) @@ -490,7 +490,7 @@ else() list(APPEND ROCKSDB_SOURCES util/crc32c_arm64.cc) endif(HAS_ARMV8_CRC) - endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64") + endif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64") endif() SET(SOURCES) FOREACH(s ${ROCKSDB_SOURCES}) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result b/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result index 38481b3e49a..c33569af619 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result @@ -26,7 +26,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `pk` int(11) NOT NULL, - `a` int(11), + `a` int(11) DEFAULT NULL, `c` char(8) DEFAULT NULL, `b` int(11) DEFAULT NULL, PRIMARY KEY (`pk`) @@ -37,7 +37,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `b1` char(8) DEFAULT NULL, `pk` int(11) NOT NULL, - `a` int(11), + `a` int(11) DEFAULT NULL, `c` char(8) DEFAULT NULL, PRIMARY KEY (`pk`) ) ENGINE=ROCKSDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci @@ -46,7 +46,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `pk` int(11) NOT NULL, - `a` int(11), + `a` int(11) DEFAULT NULL, `c` char(8) DEFAULT NULL, `b` int(11) DEFAULT NULL, PRIMARY KEY (`pk`) @@ -56,7 +56,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `pk` int(11) NOT NULL, - `a` int(11), + `a` int(11) DEFAULT NULL, `c` char(8) DEFAULT NULL, `b` char(8) DEFAULT NULL, PRIMARY KEY (`pk`) @@ -66,7 +66,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `pk` int(11) NOT NULL, - `a` int(11), + `a` int(11) DEFAULT NULL, `c` char(8) DEFAULT NULL, `b` int(11) DEFAULT NULL, PRIMARY KEY (`pk`) @@ -77,7 +77,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `b` char(8) DEFAULT NULL, `pk` int(11) NOT NULL, - `a` int(11), + `a` int(11) DEFAULT NULL, `c` char(8) DEFAULT NULL, PRIMARY KEY (`pk`) ) ENGINE=ROCKSDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci @@ -86,7 +86,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `pk` int(11) NOT NULL, - `a` int(11), + `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL, `c` char(8) DEFAULT NULL, PRIMARY KEY (`pk`) @@ -96,7 +96,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `pk` int(11) NOT NULL, - `a` int(11), + `a` int(11) DEFAULT NULL, `c` char(8) DEFAULT NULL, PRIMARY KEY (`pk`) ) ENGINE=ROCKSDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci @@ -107,7 +107,7 @@ SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( `pk` int(11) NOT NULL, - `a` int(11), + `a` int(11) DEFAULT NULL, `c` char(8) DEFAULT NULL, PRIMARY KEY (`pk`) ) ENGINE=ROCKSDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_32907.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_32907.result new file mode 100644 index 00000000000..19eb4a9034f --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_32907.result @@ -0,0 +1,23 @@ +for master_1 +for child2 +for child3 +set spider_same_server_link= 1; +CREATE SERVER srv FOREIGN DATA WRAPPER mysql +OPTIONS (SOCKET "$MASTER_1_MYSOCK", DATABASE 'test',user 'root'); +create table t2 (c int); +create table t1 (c int) ENGINE=Spider +COMMENT='WRAPPER "mysql", srv "srv",TABLE "t2"'; +Warnings: +Warning 138 Spider table params in COMMENT or CONNECTION strings have been deprecated and will be removed in a future release. Please use table options instead. +insert into t1 values (3), (NULL); +explain select nvl(sum(c), 0) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 +select nvl(sum(c), 0) from t1; +nvl(sum(c), 0) +3 +drop table t1, t2; +drop server srv; +for master_1 +for child2 +for child3 diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_32907.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_32907.test new file mode 100644 index 00000000000..50835f4e47d --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_32907.test @@ -0,0 +1,26 @@ +--disable_query_log +--disable_result_log +--source ../../t/test_init.inc +--enable_result_log +--enable_query_log +set spider_same_server_link= 1; +evalp CREATE SERVER srv FOREIGN DATA WRAPPER mysql +OPTIONS (SOCKET "$MASTER_1_MYSOCK", DATABASE 'test',user 'root'); + +create table t2 (c int); +create table t1 (c int) ENGINE=Spider +COMMENT='WRAPPER "mysql", srv "srv",TABLE "t2"'; + +insert into t1 values (3), (NULL); + +explain select nvl(sum(c), 0) from t1; +select nvl(sum(c), 0) from t1; +drop table t1, t2; + +drop server srv; + +--disable_query_log +--disable_result_log +--source ../../t/test_deinit.inc +--enable_result_log +--enable_query_log diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index 6f9ae7c8dc7..f55458ab1e1 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -6899,11 +6899,21 @@ int spider_db_print_item_type( DBUG_ENTER("spider_db_print_item_type"); DBUG_PRINT("info",("spider COND type=%d", item->type())); - if (item->type() == Item::REF_ITEM && - ((Item_ref*)item)->ref_type() == Item_ref::DIRECT_REF) + if (item->type() == Item::REF_ITEM) { - item= item->real_item(); - DBUG_PRINT("info",("spider new COND type=%d", item->type())); + const auto rtype= ((Item_ref*)item)->ref_type(); + /* + The presence of an Item_aggregate_ref tends to lead to the query + being broken at the execution stage. + */ + if (rtype == Item_ref::AGGREGATE_REF && !str) + DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM); + DBUG_ASSERT(rtype != Item_ref::AGGREGATE_REF); + if (rtype == Item_ref::DIRECT_REF) + { + item= item->real_item(); + DBUG_PRINT("info", ("spider new COND type=%d", item->type())); + } } switch (item->type()) { @@ -7331,6 +7341,10 @@ int spider_db_open_item_ref( } DBUG_RETURN(0); } + /* + TODO: MDEV-25116 is the same case as MDEV-32907 (having an + Item_aggregate_ref). Perhaps the following is redundant. + */ DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM); // MDEV-25116 } DBUG_RETURN(spider_db_open_item_ident((Item_ident *) item_ref, spider, str, diff --git a/storage/spider/spd_db_include.h b/storage/spider/spd_db_include.h index 19cb0646a40..aba9f1d9849 100644 --- a/storage/spider/spd_db_include.h +++ b/storage/spider/spd_db_include.h @@ -844,9 +844,6 @@ public: virtual void free_result() = 0; virtual SPIDER_DB_ROW *current_row() = 0; virtual SPIDER_DB_ROW *fetch_row(MY_BITMAP *skips = NULL) = 0; - virtual SPIDER_DB_ROW *fetch_row_from_result_buffer( - spider_db_result_buffer *spider_res_buf - ) = 0; virtual SPIDER_DB_ROW *fetch_row_from_tmp_table( TABLE *tmp_table ) = 0; diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index 68ed931b11a..2c6f13d5d01 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -760,30 +760,6 @@ SPIDER_DB_ROW *spider_db_mbase_result::fetch_row(MY_BITMAP *skips) DBUG_RETURN((SPIDER_DB_ROW *) &row); } -SPIDER_DB_ROW *spider_db_mbase_result::fetch_row_from_result_buffer( - spider_db_result_buffer *spider_res_buf -) { - DBUG_ENTER("spider_db_mbase_result::fetch_row_from_result_buffer"); - DBUG_PRINT("info",("spider this=%p", this)); - if (!(row.row = mysql_fetch_row(db_result))) - { - if (mysql_errno(((spider_db_mbase *) db_conn)->db_conn)) - { - store_error_num = mysql_errno(((spider_db_mbase *) db_conn)->db_conn); - my_message(store_error_num, - mysql_error(((spider_db_mbase *) db_conn)->db_conn), MYF(0)); - } else - store_error_num = HA_ERR_END_OF_FILE; - DBUG_RETURN(NULL); - } - row.lengths = mysql_fetch_lengths(db_result); - row.field_count = mysql_num_fields(db_result); - row.row_first = row.row; - row.lengths_first = row.lengths; - row.record_size = 0; - DBUG_RETURN((SPIDER_DB_ROW *) &row); -} - SPIDER_DB_ROW *spider_db_mbase_result::fetch_row_from_tmp_table( TABLE *tmp_table ) { @@ -4847,13 +4823,9 @@ int spider_db_mbase_util::open_item_func( ) { DBUG_ENTER("spider_db_mbase_util::open_item_func"); - int error = check_item_func(item_func, spider, alias, - alias_length, use_fields, fields); - if (error) - DBUG_RETURN(error); if (!str) - DBUG_RETURN(0); - + DBUG_RETURN(check_item_func(item_func, spider, alias, + alias_length, use_fields, fields)); DBUG_RETURN(print_item_func(item_func, spider, str, alias, alias_length, use_fields, fields)); } @@ -5013,8 +4985,6 @@ int spider_db_mbase_util::print_item_func( int use_pushdown_udf, case_when_start, case_when_count; bool merge_func = FALSE, case_with_else; DBUG_ENTER("spider_db_mbase_util::print_item_func"); - DBUG_ASSERT(!check_item_func(item_func, spider, alias, alias_length, - use_fields, fields)); DBUG_ASSERT(str); if (str->reserve(SPIDER_SQL_OPEN_PAREN_LEN)) diff --git a/storage/spider/spd_db_mysql.h b/storage/spider/spd_db_mysql.h index 8e8d8ebd963..38f7af9dcfa 100644 --- a/storage/spider/spd_db_mysql.h +++ b/storage/spider/spd_db_mysql.h @@ -301,9 +301,6 @@ public: void free_result() override; SPIDER_DB_ROW *current_row() override; SPIDER_DB_ROW *fetch_row(MY_BITMAP *) override; - SPIDER_DB_ROW *fetch_row_from_result_buffer( - spider_db_result_buffer *spider_res_buf - ) override; SPIDER_DB_ROW *fetch_row_from_tmp_table( TABLE *tmp_table ) override; diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc index d614cec3a9c..84cb22898da 100644 --- a/storage/spider/spd_table.cc +++ b/storage/spider/spd_table.cc @@ -6261,8 +6261,8 @@ int spider_db_done( } } - spider_free_crd_threads(spider_table_crd_thread); - spider_free_sts_threads(spider_table_sts_thread); + spider_free_sts_crd_threads(spider_table_crd_thread); + spider_free_sts_crd_threads(spider_table_sts_thread); spider_free(NULL, spider_table_sts_thread, MYF(0)); for (roop_count= spider_udf_table_mon_mutex_count - 1; @@ -6742,9 +6742,9 @@ int spider_db_init( ) goto error_alloc_table_sts_crd_threads; - if ((error_num = spider_create_sts_threads(spider_table_sts_thread))) + if ((error_num = spider_create_sts_crd_threads(spider_table_sts_thread, 1))) goto error_init_table_sts_threads; - if ((error_num = spider_create_crd_threads(spider_table_crd_thread))) + if ((error_num = spider_create_sts_crd_threads(spider_table_crd_thread, 0))) goto error_init_table_crd_threads; /** Populates `spider_dbton` with available `SPIDER_DBTON`s */ @@ -6772,9 +6772,9 @@ error_init_dbton: spider_dbton[roop_count].deinit(); } error_init_table_crd_threads: - spider_free_crd_threads(spider_table_crd_thread); + spider_free_sts_crd_threads(spider_table_crd_thread); error_init_table_sts_threads: - spider_free_sts_threads(spider_table_sts_thread); + spider_free_sts_crd_threads(spider_table_sts_thread); error_alloc_table_sts_crd_threads: spider_free(NULL, spider_table_sts_thread, MYF(0)); roop_count= spider_udf_table_mon_mutex_count - 1; @@ -8784,32 +8784,46 @@ void spider_free_spider_object_for_share( DBUG_VOID_RETURN; } -int spider_create_sts_threads( - SPIDER_THREAD *spider_thread +int spider_create_sts_crd_threads( + SPIDER_THREAD *spider_thread, + bool is_sts ) { int error_num; - DBUG_ENTER("spider_create_sts_threads"); - if (mysql_mutex_init(spd_key_mutex_bg_stss, + DBUG_ENTER("spider_create_sts_crd_threads"); +#ifdef HAVE_PSI_INTERFACE + PSI_mutex_key mutex_bg= is_sts ? spd_key_mutex_bg_stss : + spd_key_mutex_bg_crds; + PSI_cond_key cond_bg= is_sts ? spd_key_cond_bg_stss : + spd_key_cond_bg_crds; + PSI_cond_key cond_bg_syncs= is_sts ? spd_key_cond_bg_sts_syncs : + spd_key_cond_bg_crd_syncs; +#endif + if (mysql_mutex_init(mutex_bg, &spider_thread->mutex, MY_MUTEX_INIT_FAST)) { error_num = HA_ERR_OUT_OF_MEM; goto error_mutex_init; } - if (mysql_cond_init(spd_key_cond_bg_stss, + if (mysql_cond_init(cond_bg, &spider_thread->cond, NULL)) { error_num = HA_ERR_OUT_OF_MEM; goto error_cond_init; } - if (mysql_cond_init(spd_key_cond_bg_sts_syncs, + if (mysql_cond_init(cond_bg_syncs, &spider_thread->sync_cond, NULL)) { error_num = HA_ERR_OUT_OF_MEM; goto error_sync_cond_init; } - if (mysql_thread_create(spd_key_thd_bg_stss, &spider_thread->thread, - &spider_pt_attr, spider_table_bg_sts_action, (void *) spider_thread) - ) + error_num = is_sts ? + mysql_thread_create(spd_key_thd_bg_stss, &spider_thread->thread, + &spider_pt_attr, spider_table_bg_sts_action, + (void *) spider_thread) : + mysql_thread_create(spd_key_thd_bg_crds, &spider_thread->thread, + &spider_pt_attr, spider_table_bg_crd_action, + (void *) spider_thread); + if (error_num) { error_num = HA_ERR_OUT_OF_MEM; goto error_thread_create; @@ -8826,11 +8840,11 @@ error_mutex_init: DBUG_RETURN(error_num); } -void spider_free_sts_threads( +void spider_free_sts_crd_threads( SPIDER_THREAD *spider_thread ) { bool thread_killed; - DBUG_ENTER("spider_free_sts_threads"); + DBUG_ENTER("spider_free_sts_crd_threads"); pthread_mutex_lock(&spider_thread->mutex); thread_killed = spider_thread->killed; spider_thread->killed = TRUE; @@ -8852,86 +8866,20 @@ void spider_free_sts_threads( DBUG_VOID_RETURN; } -int spider_create_crd_threads( - SPIDER_THREAD *spider_thread -) { - int error_num; - DBUG_ENTER("spider_create_crd_threads"); - if (mysql_mutex_init(spd_key_mutex_bg_crds, - &spider_thread->mutex, MY_MUTEX_INIT_FAST)) - { - error_num = HA_ERR_OUT_OF_MEM; - goto error_mutex_init; - } - if (mysql_cond_init(spd_key_cond_bg_crds, - &spider_thread->cond, NULL)) - { - error_num = HA_ERR_OUT_OF_MEM; - goto error_cond_init; - } - if (mysql_cond_init(spd_key_cond_bg_crd_syncs, - &spider_thread->sync_cond, NULL)) - { - error_num = HA_ERR_OUT_OF_MEM; - goto error_sync_cond_init; - } - if (mysql_thread_create(spd_key_thd_bg_crds, &spider_thread->thread, - &spider_pt_attr, spider_table_bg_crd_action, (void *) spider_thread) - ) - { - error_num = HA_ERR_OUT_OF_MEM; - goto error_thread_create; - } - DBUG_RETURN(0); - -error_thread_create: - pthread_cond_destroy(&spider_thread->sync_cond); -error_sync_cond_init: - pthread_cond_destroy(&spider_thread->cond); -error_cond_init: - pthread_mutex_destroy(&spider_thread->mutex); -error_mutex_init: - DBUG_RETURN(error_num); -} - -void spider_free_crd_threads( - SPIDER_THREAD *spider_thread -) { - bool thread_killed; - DBUG_ENTER("spider_free_crd_threads"); - pthread_mutex_lock(&spider_thread->mutex); - thread_killed = spider_thread->killed; - spider_thread->killed = TRUE; - if (!thread_killed) - { - if (spider_thread->thd_wait) - { - pthread_cond_signal(&spider_thread->cond); - } - pthread_cond_wait(&spider_thread->sync_cond, &spider_thread->mutex); - } - pthread_mutex_unlock(&spider_thread->mutex); - pthread_join(spider_thread->thread, NULL); - pthread_cond_destroy(&spider_thread->sync_cond); - pthread_cond_destroy(&spider_thread->cond); - pthread_mutex_destroy(&spider_thread->mutex); - spider_thread->thd_wait = FALSE; - spider_thread->killed = FALSE; - DBUG_VOID_RETURN; -} - -void *spider_table_bg_sts_action( - void *arg +static void *spider_table_bg_sts_crd_action( + void *arg, + bool is_sts ) { SPIDER_THREAD *thread = (SPIDER_THREAD *) arg; SPIDER_SHARE *share; SPIDER_TRX *trx; int error_num; ha_spider *spider; + TABLE *table; /* only needed for crd */ SPIDER_CONN **conns; THD *thd; my_thread_init(); - DBUG_ENTER("spider_table_bg_sts_action"); + DBUG_ENTER("spider_table_bg_sts_crd_action"); /* init start */ pthread_mutex_lock(&thread->mutex); if (!(thd = spider_create_sys_thd(thread))) @@ -8946,7 +8894,8 @@ void *spider_table_bg_sts_action( #ifdef HAVE_PSI_INTERFACE mysql_thread_set_psi_id(thd->thread_id); #endif - thd_proc_info(thd, "Spider table background statistics action handler"); + thd_proc_info(thd, "Spider table background statistics/cardinality" + " action handler"); if (!(trx = spider_get_trx(NULL, FALSE, &error_num))) { spider_destroy_sys_thd(thd); @@ -8962,10 +8911,6 @@ void *spider_table_bg_sts_action( trx->thd = thd; /* init end */ - if (thd->killed) - { - thread->killed = TRUE; - } if (thd->killed) { thread->killed = TRUE; @@ -8973,10 +8918,10 @@ void *spider_table_bg_sts_action( while (TRUE) { - DBUG_PRINT("info",("spider bg sts loop start")); + DBUG_PRINT("info",("spider bg sts/crd loop start")); if (thread->killed) { - DBUG_PRINT("info",("spider bg sts kill start")); + DBUG_PRINT("info",("spider bg sts/crd kill start")); trx->thd = NULL; spider_free_trx(trx, TRUE); spider_destroy_sys_thd(thd); @@ -8990,7 +8935,7 @@ void *spider_table_bg_sts_action( } if (!thread->queue_first) { - DBUG_PRINT("info",("spider bg sts has no job")); + DBUG_PRINT("info",("spider bg sts/crd has no job")); thread->thd_wait = TRUE; pthread_cond_wait(&thread->cond, &thread->mutex); thread->thd_wait = FALSE; @@ -8999,155 +8944,16 @@ void *spider_table_bg_sts_action( continue; } share = (SPIDER_SHARE *) thread->queue_first; - share->sts_working = TRUE; + if (is_sts) + share->sts_working = TRUE; + else + share->crd_working = TRUE; pthread_mutex_unlock(&thread->mutex); - - spider = share->sts_spider; - conns = spider->conns; - if (spider->search_link_idx < 0) - { - spider->wide_handler->trx = trx; - spider_trx_set_link_idx_for_all(spider); - spider->search_link_idx = spider_conn_first_link_idx(thd, - share->link_statuses, share->access_balances, spider->conn_link_idx, - share->link_count, SPIDER_LINK_STATUS_OK); - } - if (spider->search_link_idx >= 0) - { - DBUG_PRINT("info", - ("spider difftime=%f", - difftime(share->bg_sts_try_time, share->sts_get_time))); - DBUG_PRINT("info", - ("spider bg_sts_interval=%f", share->bg_sts_interval)); - if (difftime(share->bg_sts_try_time, share->sts_get_time) >= - share->bg_sts_interval) - { - if (!conns[spider->search_link_idx]) - { - spider_get_conn(share, spider->search_link_idx, - share->conn_keys[spider->search_link_idx], trx, - spider, FALSE, FALSE, &error_num); - if (conns[spider->search_link_idx]) - { - conns[spider->search_link_idx]->error_mode = 0; - } else { - spider->search_link_idx = -1; - } - } - DBUG_PRINT("info", - ("spider search_link_idx=%d", spider->search_link_idx)); - if (spider->search_link_idx >= 0 && conns[spider->search_link_idx]) - { - if (spider_get_sts(share, spider->search_link_idx, - share->bg_sts_try_time, spider, - share->bg_sts_interval, share->bg_sts_mode, - share->bg_sts_sync, - 2, HA_STATUS_CONST | HA_STATUS_VARIABLE)) - { - spider->search_link_idx = -1; - } - } - } - } - memset(spider->need_mons, 0, sizeof(int) * share->link_count); - pthread_mutex_lock(&thread->mutex); - if (thread->queue_first == thread->queue_last) - { - thread->queue_first = NULL; - thread->queue_last = NULL; - } else { - thread->queue_first = share->sts_next; - share->sts_next->sts_prev = NULL; - share->sts_next = NULL; - } - share->sts_working = FALSE; - share->sts_wait = FALSE; - if (thread->first_free_wait) - { - pthread_cond_signal(&thread->sync_cond); - pthread_cond_wait(&thread->cond, &thread->mutex); - if (thd->killed) - thread->killed = TRUE; - } - } -} - -void *spider_table_bg_crd_action( - void *arg -) { - SPIDER_THREAD *thread = (SPIDER_THREAD *) arg; - SPIDER_SHARE *share; - SPIDER_TRX *trx; - int error_num; - ha_spider *spider; - TABLE *table; - SPIDER_CONN **conns; - THD *thd; - my_thread_init(); - DBUG_ENTER("spider_table_bg_crd_action"); - /* init start */ - pthread_mutex_lock(&thread->mutex); - if (!(thd = spider_create_sys_thd(thread))) - { - thread->thd_wait = FALSE; - thread->killed = FALSE; - pthread_mutex_unlock(&thread->mutex); - my_thread_end(); - DBUG_RETURN(NULL); - } - SPIDER_set_next_thread_id(thd); -#ifdef HAVE_PSI_INTERFACE - mysql_thread_set_psi_id(thd->thread_id); -#endif - thd_proc_info(thd, "Spider table background cardinality action handler"); - if (!(trx = spider_get_trx(NULL, FALSE, &error_num))) - { - spider_destroy_sys_thd(thd); - thread->thd_wait = FALSE; - thread->killed = FALSE; - pthread_mutex_unlock(&thread->mutex); -#if !defined(MYSQL_DYNAMIC_PLUGIN) || !defined(_WIN32) - set_current_thd(nullptr); -#endif - my_thread_end(); - DBUG_RETURN(NULL); - } - trx->thd = thd; - /* init end */ - - while (TRUE) - { - DBUG_PRINT("info",("spider bg crd loop start")); - if (thread->killed) - { - DBUG_PRINT("info",("spider bg crd kill start")); - trx->thd = NULL; - spider_free_trx(trx, TRUE); - spider_destroy_sys_thd(thd); - pthread_cond_signal(&thread->sync_cond); - pthread_mutex_unlock(&thread->mutex); -#if !defined(MYSQL_DYNAMIC_PLUGIN) || !defined(_WIN32) - set_current_thd(nullptr); -#endif - my_thread_end(); - DBUG_RETURN(NULL); - } - if (!thread->queue_first) - { - DBUG_PRINT("info",("spider bg crd has no job")); - thread->thd_wait = TRUE; - pthread_cond_wait(&thread->cond, &thread->mutex); - thread->thd_wait = FALSE; - if (thd->killed) - thread->killed = TRUE; - continue; - } - share = (SPIDER_SHARE *) thread->queue_first; - share->crd_working = TRUE; - pthread_mutex_unlock(&thread->mutex); - table = &share->table; - spider = share->crd_spider; + if (is_sts) + spider = share->sts_spider; + else + spider = share->crd_spider; conns = spider->conns; if (spider->search_link_idx < 0) { @@ -9159,13 +8965,13 @@ void *spider_table_bg_crd_action( } if (spider->search_link_idx >= 0) { - DBUG_PRINT("info", - ("spider difftime=%f", - difftime(share->bg_crd_try_time, share->crd_get_time))); - DBUG_PRINT("info", - ("spider bg_crd_interval=%f", share->bg_crd_interval)); - if (difftime(share->bg_crd_try_time, share->crd_get_time) >= - share->bg_crd_interval) + double diff_time= is_sts ? + difftime(share->bg_sts_try_time, share->sts_get_time) : + difftime(share->bg_crd_try_time, share->crd_get_time); + double interval= is_sts? share->bg_sts_interval : share->bg_crd_interval; + DBUG_PRINT("info", ("spider difftime=%f", diff_time)); + DBUG_PRINT("info", ("spider bg_sts_interval=%f", interval)); + if (diff_time >= interval) { if (!conns[spider->search_link_idx]) { @@ -9183,11 +8989,17 @@ void *spider_table_bg_crd_action( ("spider search_link_idx=%d", spider->search_link_idx)); if (spider->search_link_idx >= 0 && conns[spider->search_link_idx]) { - if (spider_get_crd(share, spider->search_link_idx, - share->bg_crd_try_time, spider, table, - share->bg_crd_interval, share->bg_crd_mode, - share->bg_crd_sync, - 2)) + int result = is_sts ? + spider_get_sts(share, spider->search_link_idx, + share->bg_sts_try_time, spider, + share->bg_sts_interval, share->bg_sts_mode, + share->bg_sts_sync, + 2, HA_STATUS_CONST | HA_STATUS_VARIABLE) : + spider_get_crd(share, spider->search_link_idx, + share->bg_crd_try_time, spider, table, + share->bg_crd_interval, share->bg_crd_mode, + share->bg_crd_sync, 2); + if (result) { spider->search_link_idx = -1; } @@ -9201,12 +9013,29 @@ void *spider_table_bg_crd_action( thread->queue_first = NULL; thread->queue_last = NULL; } else { - thread->queue_first = share->crd_next; - share->crd_next->crd_prev = NULL; - share->crd_next = NULL; + if (is_sts) + { + thread->queue_first = share->sts_next; + share->sts_next->sts_prev = NULL; + share->sts_next = NULL; + } + else + { + thread->queue_first = share->crd_next; + share->crd_next->crd_prev = NULL; + share->crd_next = NULL; + } + } + if (is_sts) + { + share->sts_working= FALSE; + share->sts_wait= FALSE; + } + else + { + share->crd_working= FALSE; + share->crd_wait= FALSE; } - share->crd_working = FALSE; - share->crd_wait = FALSE; if (thread->first_free_wait) { pthread_cond_signal(&thread->sync_cond); @@ -9217,6 +9046,18 @@ void *spider_table_bg_crd_action( } } +void *spider_table_bg_sts_action(void *arg) +{ + DBUG_ENTER("spider_table_bg_sts_action"); + DBUG_RETURN(spider_table_bg_sts_crd_action(arg, true)); +} + +void *spider_table_bg_crd_action(void *arg) +{ + DBUG_ENTER("spider_table_bg_crd_action"); + DBUG_RETURN(spider_table_bg_sts_crd_action(arg, false)); +} + void spider_table_add_share_to_sts_thread( SPIDER_SHARE *share ) { diff --git a/storage/spider/spd_table.h b/storage/spider/spd_table.h index 3c2cd7823ab..b48d8ada588 100644 --- a/storage/spider/spd_table.h +++ b/storage/spider/spd_table.h @@ -446,19 +446,12 @@ void spider_free_spider_object_for_share( ha_spider **spider ); -int spider_create_sts_threads( - SPIDER_THREAD *spider_thread +int spider_create_sts_crd_threads( + SPIDER_THREAD *spider_thread, + bool is_sts ); -void spider_free_sts_threads( - SPIDER_THREAD *spider_thread -); - -int spider_create_crd_threads( - SPIDER_THREAD *spider_thread -); - -void spider_free_crd_threads( +void spider_free_sts_crd_threads( SPIDER_THREAD *spider_thread ); diff --git a/strings/json_lib.c b/strings/json_lib.c index 4bf39dd7b0d..43bc29811c0 100644 --- a/strings/json_lib.c +++ b/strings/json_lib.c @@ -1656,13 +1656,13 @@ int json_unescape(CHARSET_INFO *json_cs, } if (c_len == MY_CS_ILUNI) { - return -1; + return JSON_ERROR_ILLEGAL_SYMBOL; } /* Result buffer is too small. */ - return -1; + return JSON_ERROR_OUT_OF_SPACE; } - return s.error==JE_EOS ? (int)(res - res_b) : -1; + return s.error==JE_EOS ? (int)(res - res_b) : JSON_ERROR_OUT_OF_SPACE; } diff --git a/strings/my_vsnprintf.c b/strings/my_vsnprintf.c index 4525cd06d0b..cc42255f526 100644 --- a/strings/my_vsnprintf.c +++ b/strings/my_vsnprintf.c @@ -723,13 +723,7 @@ size_t my_vsnprintf_ex(CHARSET_INFO *cs, char *to, size_t n, case 'g': { double d; -#if __has_feature(memory_sanitizer) /* QQ: MSAN has double trouble? */ - __msan_check_mem_is_initialized(ap, sizeof(double)); -#endif d= va_arg(ap, double); -#if __has_feature(memory_sanitizer) /* QQ: MSAN has double trouble? */ - __msan_unpoison(&d, sizeof(double)); -#endif to= process_dbl_arg(to, end, width, d, arg_type); continue; } diff --git a/support-files/MacOSX/ReadMe.txt b/support-files/MacOSX/ReadMe.txt index 31fb0d97e14..efa10075834 100644 --- a/support-files/MacOSX/ReadMe.txt +++ b/support-files/MacOSX/ReadMe.txt @@ -80,7 +80,7 @@ Note lib Libraries man Unix manual pages mysql-test MySQL test suite - scripts Contains the mysql_install_db script + scripts Contains the mariadb-install-db script share/mysql Error message files sql-bench Benchmarks support-files Scripts and sample configuration files @@ -288,7 +288,7 @@ Note /usr/libexec The mariadbd server /usr/share/man Unix manual pages /usr/share/mysql/mysql-test MySQL test suite - /usr/share/mysql Contains the mysql_install_db script + /usr/share/mysql Contains the mariadb-install-db script /var/mysql/mysql.sock The location of the MySQL Unix socket Note diff --git a/support-files/MacOSX/postflight.sh b/support-files/MacOSX/postflight.sh index 752eddc4241..223ff0d657d 100644 --- a/support-files/MacOSX/postflight.sh +++ b/support-files/MacOSX/postflight.sh @@ -23,13 +23,13 @@ # installation has been performed. # # This script will install the MySQL privilege tables using the -# "mysql_install_db" script and will correct the ownerships of these files +# "mariadb-install-db" script and will correct the ownerships of these files # afterwards. # if cd @prefix@ ; then if [ ! -f data/mysql/db.frm ] ; then - ./scripts/mysql_install_db --rpm + ./scripts/mariadb-install-db --rpm fi if [ -d data ] ; then diff --git a/support-files/binary-configure.sh b/support-files/binary-configure.sh index 47123668683..98b26c161d3 100644 --- a/support-files/binary-configure.sh +++ b/support-files/binary-configure.sh @@ -23,9 +23,9 @@ for arg do esac done -if test ! -x ./scripts/mysql_install_db +if test ! -x ./scripts/mariadb-install-db then - echo "I didn't find the script './scripts/mysql_install_db'." + echo "I didn't find the script './scripts/mariadb-install-db'." echo "Please execute this script in the mysql distribution directory!" exit 1; fi @@ -38,7 +38,7 @@ echo "and start the MySQL server for you. If you run into any trouble, please" echo "consult the MySQL manual, that you can find in the Docs directory." echo "" -./scripts/mysql_install_db --no-defaults +./scripts/mariadb-install-db --no-defaults if [ $? = 0 ] then echo "Starting the mariadbd server. You can test that it is up and running" diff --git a/support-files/mariadb.service.in b/support-files/mariadb.service.in index b3e076aba16..3e779d9a842 100644 --- a/support-files/mariadb.service.in +++ b/support-files/mariadb.service.in @@ -85,7 +85,7 @@ ExecStartPre=/bin/sh -c "[ ! -e @bindir@/galera_recovery ] && VAR= || \ && echo _WSREP_START_POSITION=$VAR > @mysqlunixdir@/wsrep-start-position || exit 1" # Needed to create system tables etc. -# ExecStartPre=@scriptdir@/mysql_install_db -u mysql +# ExecStartPre=@scriptdir@/mariadb-install-db -u mysql # Start main service # MYSQLD_OPTS here is for users to set in /etc/systemd/system/mariadb.service.d/MY_SPECIAL.conf diff --git a/support-files/mariadb@.service.in b/support-files/mariadb@.service.in index 40d87817c10..01859763eb0 100644 --- a/support-files/mariadb@.service.in +++ b/support-files/mariadb@.service.in @@ -88,7 +88,7 @@ # User=%I # ProtectHome=false # ExecStartPre= -# ExecStartPre=@scriptdir@/mysql_install_db $MYSQLD_MULTI_INSTANCE \ +# ExecStartPre=@scriptdir@/mariadb-install-db $MYSQLD_MULTI_INSTANCE \ # --auth-root-authentication-method=socket --auth-root-socket-user=%I # Environment=MYSQLD_MULTI_INSTANCE="--defaults-file=/home/%I/my%I.cnf \ # --datadir=/home/%I/mysqldatadir --skip-networking --socket=@mysql-%I" @@ -197,8 +197,8 @@ ProtectSystem=full # Prevent accessing /home, /root and /run/user ProtectHome=true -# Needed to create system tables etc. -ExecStartPre=@scriptdir@/mysql_install_db $MYSQLD_MULTI_INSTANCE +# Needed to create system tables etc. --rpm to not do pam permission changes. +ExecStartPre=@scriptdir@/mariadb-install-db $MYSQLD_MULTI_INSTANCE --rpm # Start main service # A few variables are here: diff --git a/support-files/rpm/server.cnf b/support-files/rpm/server.cnf index 8d0b32bff0c..62a30672550 100644 --- a/support-files/rpm/server.cnf +++ b/support-files/rpm/server.cnf @@ -55,4 +55,3 @@ # If you use the same .cnf file for MariaDB of different versions, # use this group for options that older servers don't understand [mariadb-11.8] - diff --git a/tpool/CMakeLists.txt b/tpool/CMakeLists.txt index cf35633b090..a6db19d2794 100644 --- a/tpool/CMakeLists.txt +++ b/tpool/CMakeLists.txt @@ -1,22 +1,36 @@ -INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR} ${PROJECT_SOURCE_DIR}/include) +ADD_LIBRARY(tpool STATIC + aio_simulated.cc + tpool_structs.h + CMakeLists.txt + tpool.h + tpool_generic.cc + task_group.cc + task.cc + wait_notification.cc +) + +TARGET_INCLUDE_DIRECTORIES(tpool PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}" + PRIVATE ${PROJECT_SOURCE_DIR}/include) + IF(WIN32) - SET(EXTRA_SOURCES tpool_win.cc aio_win.cc) + TARGET_SOURCES(tpool PRIVATE tpool_win.cc aio_win.cc) ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "Linux") + TARGET_SOURCES(tpool PRIVATE aio_linux.cc) OPTION(WITH_URING "Require that io_uring be used" OFF) - OPTION(WITH_LIBAIO "Require that libaio is used, unless uring is there" OFF) + OPTION(WITH_LIBAIO "Require that libaio is used" OFF) IF(WITH_URING) SET(URING_REQUIRED REQUIRED) - ELSEIF(WITH_LIBAIO) + ENDIF() + IF(WITH_LIBAIO) SET(LIBAIO_REQUIRED REQUIRED) ENDIF() FIND_PACKAGE(URING QUIET ${URING_REQUIRED}) IF(URING_FOUND) SET(URING_FOUND ${URING_FOUND} PARENT_SCOPE) - SET(TPOOL_DEFINES "-DHAVE_URING" PARENT_SCOPE) - ADD_DEFINITIONS(-DHAVE_URING) - LINK_LIBRARIES(${URING_LIBRARIES}) - INCLUDE_DIRECTORIES(${URING_INCLUDE_DIRS}) - SET(EXTRA_SOURCES aio_liburing.cc) + TARGET_COMPILE_DEFINITIONS(tpool PUBLIC "-DHAVE_URING") + TARGET_LINK_LIBRARIES(tpool PRIVATE ${URING_LIBRARIES}) + TARGET_INCLUDE_DIRECTORIES(tpool PUBLIC ${URING_INCLUDE_DIRS}) + TARGET_SOURCES(tpool PRIVATE aio_liburing.cc) SET(CMAKE_REQUIRED_INCLUDES_SAVE ${CMAKE_REQUIRED_INCLUDES}) SET(CMAKE_REQUIRED_LIBRARIES_SAVE ${CMAKE_REQUIRED_LIBRARIES}) SET(CMAKE_REQUIRED_INCLUDES ${URING_INCLUDE_DIRS}) @@ -27,30 +41,17 @@ ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "Linux") IF(HAVE_IO_URING_MLOCK_SIZE) SET_SOURCE_FILES_PROPERTIES(aio_liburing.cc PROPERTIES COMPILE_FLAGS "-DHAVE_IO_URING_MLOCK_SIZE") ENDIF() - ELSE() - FIND_PACKAGE(LIBAIO QUIET ${LIBAIO_REQUIRED}) - IF(LIBAIO_FOUND) - SET(TPOOL_DEFINES "-DLINUX_NATIVE_AIO" PARENT_SCOPE) - ADD_DEFINITIONS(-DLINUX_NATIVE_AIO) - INCLUDE_DIRECTORIES(${LIBAIO_INCLUDE_DIRS}) - LINK_LIBRARIES(${LIBAIO_LIBRARIES}) - SET(EXTRA_SOURCES aio_linux.cc) - ENDIF() + ENDIF() + + FIND_PACKAGE(LIBAIO QUIET ${LIBAIO_REQUIRED}) + IF(LIBAIO_FOUND) + TARGET_COMPILE_DEFINITIONS(tpool PUBLIC "-DHAVE_LIBAIO") + TARGET_INCLUDE_DIRECTORIES(tpool PUBLIC ${LIBAIO_INCLUDE_DIRS}) + TARGET_LINK_LIBRARIES(tpool PRIVATE ${LIBAIO_LIBRARIES}) + TARGET_SOURCES(tpool PRIVATE aio_libaio.cc) ENDIF() ENDIF() -ADD_LIBRARY(tpool STATIC - aio_simulated.cc - tpool_structs.h - CMakeLists.txt - tpool.h - tpool_generic.cc - task_group.cc - task.cc - wait_notification.cc - ${EXTRA_SOURCES} -) - IF(URING_FOUND) ADD_DEPENDENCIES(tpool GenError) ENDIF() diff --git a/tpool/aio_libaio.cc b/tpool/aio_libaio.cc new file mode 100644 index 00000000000..30a6c54e7cf --- /dev/null +++ b/tpool/aio_libaio.cc @@ -0,0 +1,204 @@ +/* Copyright (C) 2019, 2020, MariaDB Corporation. + +This program is free software; you can redistribute itand /or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111 - 1301 USA*/ + +#include "tpool.h" +#include +#include +#include +#include "my_valgrind.h" +#include +#include + +/** + Invoke the io_getevents() system call, without timeout parameter. + + @param ctx context from io_setup() + @param min_nr minimum number of completion events to wait for + @param nr maximum number of completion events to collect + @param ev the collected events + + In https://pagure.io/libaio/c/7cede5af5adf01ad26155061cc476aad0804d3fc + the io_getevents() implementation in libaio was "optimized" so that it + would elide the system call when there are no outstanding requests + and a timeout was specified. + + The libaio code for dereferencing ctx would occasionally trigger + SIGSEGV if io_destroy() was concurrently invoked from another thread. + Hence, we have to use the raw system call. + + WHY are we doing this at all? + Because we want io_destroy() from another thread to interrupt io_getevents(). + + And, WHY do we want io_destroy() from another thread to interrupt + io_getevents()? + + Because there is no documented, libaio-friendly and + race-condition-free way to interrupt io_getevents(). io_destroy() + coupled with raw syscall seemed to work for us so far. + + Historical note: in the past, we used io_getevents with + timeouts. We'd wake up periodically, check for shutdown flag, return + from the main routine. This was admittedly safer, yet it did cost + periodic wakeups, which we are not willing to do anymore. + + @note we also rely on the undocumented property, that io_destroy(ctx) + will make this version of io_getevents return EINVAL. +*/ +static int my_getevents(io_context_t ctx, long min_nr, long nr, io_event *ev) + noexcept +{ + int saved_errno= errno; + int ret= syscall(__NR_io_getevents, reinterpret_cast(ctx), + min_nr, nr, ev, 0); + if (ret < 0) + { + ret= -errno; + errno= saved_errno; + } + return ret; +} + + +/* + Linux AIO implementation, based on native AIO. + Needs libaio.h and -laio at the compile time. + + io_submit() is used to submit async IO. + + A single thread will collect the completion notification + with io_getevents() and forward io completion callback to + the worker threadpool. +*/ +namespace +{ +using namespace tpool; + +class aio_libaio final : public aio +{ + thread_pool *m_pool; + io_context_t m_io_ctx; + std::thread m_getevent_thread; + static std::atomic shutdown_in_progress; + + static void getevent_thread_routine(aio_libaio *aio) + { + my_thread_set_name("my_getevents"); + /* + We collect events in small batches to hopefully reduce the + number of system calls. + */ + constexpr unsigned MAX_EVENTS= 256; + + aio->m_pool->m_worker_init_callback(); + io_event events[MAX_EVENTS]; + for (;;) + { + switch (int ret= my_getevents(aio->m_io_ctx, 1, MAX_EVENTS, events)) { + case -EINTR: + continue; + case -EINVAL: + if (shutdown_in_progress) + goto end; + /* fall through */ + default: + if (ret < 0) + { + fprintf(stderr, "io_getevents returned %d\n", ret); + abort(); + goto end; + } +#if __has_feature(memory_sanitizer) + MEM_MAKE_DEFINED(events, ret * sizeof *events); +#endif + for (int i= 0; i < ret; i++) + { + const io_event &event= events[i]; + aiocb *iocb= reinterpret_cast(event.obj); + if (static_cast(event.res) < 0) + { + iocb->m_err= -event.res; + iocb->m_ret_len= 0; + } + else + { +#if __has_feature(memory_sanitizer) + if (iocb->m_opcode == aio_opcode::AIO_PREAD) + MEM_MAKE_DEFINED(iocb->m_buffer, event.res); +#endif + iocb->m_ret_len= event.res; + iocb->m_err= 0; + finish_synchronous(iocb); + } + iocb->m_internal_task.m_func= iocb->m_callback; + iocb->m_internal_task.m_arg= iocb; + iocb->m_internal_task.m_group= iocb->m_group; + aio->m_pool->submit_task(&iocb->m_internal_task); + } + } + } +end: + aio->m_pool->m_worker_destroy_callback(); + } + +public: + aio_libaio(io_context_t ctx, thread_pool *pool) + : m_pool(pool), m_io_ctx(ctx), + m_getevent_thread(getevent_thread_routine, this) + { + } + + ~aio_libaio() + { + shutdown_in_progress= true; + io_destroy(m_io_ctx); + m_getevent_thread.join(); + shutdown_in_progress= false; + } + + int submit_io(aiocb *cb) override + { + io_prep_pread(&cb->m_iocb, cb->m_fh, cb->m_buffer, cb->m_len, cb->m_offset); + if (cb->m_opcode != aio_opcode::AIO_PREAD) + cb->m_iocb.aio_lio_opcode= IO_CMD_PWRITE; + iocb *icb= &cb->m_iocb; + int ret= io_submit(m_io_ctx, 1, &icb); + if (ret == 1) + return 0; + errno= -ret; + return -1; + } + + int bind(native_file_handle&) override { return 0; } + int unbind(const native_file_handle&) override { return 0; } + const char *get_implementation() const override { return "Linux native AIO"; }; +}; + +std::atomic aio_libaio::shutdown_in_progress; +} + +namespace tpool +{ +aio *create_libaio(thread_pool *pool, int max_io) +{ + io_context_t ctx; + memset(&ctx, 0, sizeof ctx); + if (int ret= io_setup(max_io, &ctx)) + { + fprintf(stderr, "io_setup(%d) returned %d\n", max_io, ret); + return nullptr; + } + return new aio_libaio(ctx, pool); +} +} diff --git a/tpool/aio_liburing.cc b/tpool/aio_liburing.cc index b9c4af7c367..fe3da892d5d 100644 --- a/tpool/aio_liburing.cc +++ b/tpool/aio_liburing.cc @@ -62,7 +62,10 @@ public: case EPERM: my_printf_error(ER_UNKNOWN_ERROR, "io_uring_queue_init() failed with EPERM:" - " sysctl kernel.io_uring_disabled has the value 2, or 1 and the user of the process is not a member of sysctl kernel.io_uring_group. (see man 2 io_uring_setup).", + " sysctl kernel.io_uring_disabled has the value 2, " + "or 1 and the user of the process is not a member of " + "sysctl kernel.io_uring_group. (see man 2 " + "io_uring_setup).", ME_ERROR_LOG | ME_WARNING); break; default: @@ -81,8 +84,9 @@ public: thread_= std::thread(thread_routine, this); } + const char *get_implementation() const override { return "io_uring"; }; - ~aio_uring() noexcept + ~aio_uring() noexcept override { { std::lock_guard _(mutex_); @@ -94,7 +98,7 @@ public: { my_printf_error(ER_UNKNOWN_ERROR, "io_uring_submit() returned %d during shutdown:" - " this may cause a hang\n", + " this may cause a hang", ME_ERROR_LOG | ME_FATAL, ret); abort(); } @@ -105,8 +109,8 @@ public: int submit_io(tpool::aiocb *cb) final { - cb->iov_base= cb->m_buffer; - cb->iov_len= cb->m_len; + cb->m_iovec.iov_base= cb->m_buffer; + cb->m_iovec.iov_len= cb->m_len; // The whole operation since io_uring_get_sqe() and till io_uring_submit() // must be atomical. This is because liburing provides thread-unsafe calls. @@ -114,11 +118,9 @@ public: io_uring_sqe *sqe= io_uring_get_sqe(&uring_); if (cb->m_opcode == tpool::aio_opcode::AIO_PREAD) - io_uring_prep_readv(sqe, cb->m_fh, static_cast(cb), 1, - cb->m_offset); + io_uring_prep_readv(sqe, cb->m_fh, &cb->m_iovec, 1, cb->m_offset); else - io_uring_prep_writev(sqe, cb->m_fh, static_cast(cb), 1, - cb->m_offset); + io_uring_prep_writev(sqe, cb->m_fh, &cb->m_iovec, 1, cb->m_offset); io_uring_sqe_set_data(sqe, cb); return io_uring_submit(&uring_) == 1 ? 0 : -1; @@ -156,7 +158,7 @@ private: if (ret == -EINTR) continue; my_printf_error(ER_UNKNOWN_ERROR, - "io_uring_wait_cqe() returned %d\n", + "io_uring_wait_cqe() returned %d", ME_ERROR_LOG | ME_FATAL, ret); abort(); } @@ -205,12 +207,11 @@ private: namespace tpool { - -aio *create_linux_aio(thread_pool *pool, int max_aio) +aio *create_uring(thread_pool *pool, int max_aio) { try { return new aio_uring(pool, max_aio); - } catch (std::runtime_error& error) { + } catch (std::runtime_error&) { return nullptr; } } diff --git a/tpool/aio_linux.cc b/tpool/aio_linux.cc index fdce7e35063..88107a24c7c 100644 --- a/tpool/aio_linux.cc +++ b/tpool/aio_linux.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2019, 2020, MariaDB Corporation. +/* Copyright (C) 2025 MariaDB Corporation. This program is free software; you can redistribute itand /or modify it under the terms of the GNU General Public License as published by @@ -13,182 +13,61 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111 - 1301 USA*/ -#include "tpool_structs.h" -#include "tpool.h" - -# include -# include -# include -# include -# include -# include -# include -/** - Invoke the io_getevents() system call, without timeout parameter. - - @param ctx context from io_setup() - @param min_nr minimum number of completion events to wait for - @param nr maximum number of completion events to collect - @param ev the collected events - - In https://pagure.io/libaio/c/7cede5af5adf01ad26155061cc476aad0804d3fc - the io_getevents() implementation in libaio was "optimized" so that it - would elide the system call when there are no outstanding requests - and a timeout was specified. - - The libaio code for dereferencing ctx would occasionally trigger - SIGSEGV if io_destroy() was concurrently invoked from another thread. - Hence, we have to use the raw system call. - - WHY are we doing this at all? - Because we want io_destroy() from another thread to interrupt io_getevents(). - - And, WHY do we want io_destroy() from another thread to interrupt - io_getevents()? - - Because there is no documented, libaio-friendly and race-condition-free way to - interrupt io_getevents(). io_destroy() coupled with raw syscall seemed to work - for us so far. - - Historical note : in the past, we used io_getevents with timeouts. We'd wake - up periodically, check for shutdown flag, return from the main routine. - This was admittedly safer, yet it did cost periodic wakeups, which we are not - willing to do anymore. - - @note we also rely on the undocumented property, that io_destroy(ctx) - will make this version of io_getevents return EINVAL. -*/ -static int my_getevents(io_context_t ctx, long min_nr, long nr, io_event *ev) -{ - int saved_errno= errno; - int ret= syscall(__NR_io_getevents, reinterpret_cast(ctx), - min_nr, nr, ev, 0); - if (ret < 0) - { - ret= -errno; - errno= saved_errno; - } - return ret; -} - - /* - Linux AIO implementation, based on native AIO. - Needs libaio.h and -laio at the compile time. + This file exports create_linux_aio() function which is used to create + an asynchronous IO implementation for Linux (currently either libaio or + uring). + */ - io_submit() is used to submit async IO. - - A single thread will collect the completion notification - with io_getevents() and forward io completion callback to - the worker threadpool. -*/ +#include "tpool.h" +#include namespace tpool { -class aio_linux final : public aio +// Forward declarations of aio implementations +#ifdef HAVE_LIBAIO +// defined in aio_libaio.cc +aio *create_libaio(thread_pool *pool, int max_io); +#endif +#if defined HAVE_URING +// defined in aio_uring.cc +aio *create_uring(thread_pool *pool, int max_io); +#endif + +/* + @brief + Choose native linux aio implementation based on availability and user + preference. + + @param pool - thread pool to use for aio operations + @param max_io - maximum number of concurrent io operations + @param impl - implementation to use, can be one of the following: + + @returns + A pointer to the aio implementation object, or nullptr if no suitable + implementation is available. + + If impl is OS_IO_DEFAULT, it will try uring first, fallback to libaio + If impl is OS_IO_URING or OS_IO_LIBAIO, it won't fallback +*/ +aio *create_linux_aio(thread_pool *pool, int max_io, aio_implementation impl) { - thread_pool *m_pool; - io_context_t m_io_ctx; - std::thread m_getevent_thread; - static std::atomic shutdown_in_progress; - - static void getevent_thread_routine(aio_linux *aio) +#ifdef HAVE_URING + if (impl != OS_IO_LIBAIO) { - my_thread_set_name("my_getevents"); - /* - We collect events in small batches to hopefully reduce the - number of system calls. - */ - constexpr unsigned MAX_EVENTS= 256; - - aio->m_pool->m_worker_init_callback(); - io_event events[MAX_EVENTS]; - for (;;) - { - switch (int ret= my_getevents(aio->m_io_ctx, 1, MAX_EVENTS, events)) { - case -EINTR: - continue; - case -EINVAL: - if (shutdown_in_progress) - goto end; - /* fall through */ - default: - if (ret < 0) - { - fprintf(stderr, "io_getevents returned %d\n", ret); - abort(); - goto end; - } - for (int i= 0; i < ret; i++) - { - const io_event &event= events[i]; - aiocb *iocb= static_cast(event.obj); - if (static_cast(event.res) < 0) - { - iocb->m_err= -event.res; - iocb->m_ret_len= 0; - } - else - { - iocb->m_ret_len= event.res; - iocb->m_err= 0; - finish_synchronous(iocb); - } - iocb->m_internal_task.m_func= iocb->m_callback; - iocb->m_internal_task.m_arg= iocb; - iocb->m_internal_task.m_group= iocb->m_group; - aio->m_pool->submit_task(&iocb->m_internal_task); - } - } - } -end: - aio->m_pool->m_worker_destroy_callback(); + aio *ret= create_uring(pool, max_io); + if (ret) + return ret; + else if (impl != OS_IO_DEFAULT) + return nullptr; // uring is not available + else + fprintf(stderr, "create_uring failed: falling back to libaio\n"); } - -public: - aio_linux(io_context_t ctx, thread_pool *pool) - : m_pool(pool), m_io_ctx(ctx), - m_getevent_thread(getevent_thread_routine, this) - { - } - - ~aio_linux() - { - shutdown_in_progress= true; - io_destroy(m_io_ctx); - m_getevent_thread.join(); - shutdown_in_progress= false; - } - - int submit_io(aiocb *cb) override - { - io_prep_pread(static_cast(cb), cb->m_fh, cb->m_buffer, cb->m_len, - cb->m_offset); - if (cb->m_opcode != aio_opcode::AIO_PREAD) - cb->aio_lio_opcode= IO_CMD_PWRITE; - iocb *icb= static_cast(cb); - int ret= io_submit(m_io_ctx, 1, &icb); - if (ret == 1) - return 0; - errno= -ret; - return -1; - } - - int bind(native_file_handle&) override { return 0; } - int unbind(const native_file_handle&) override { return 0; } -}; - -std::atomic aio_linux::shutdown_in_progress; - -aio *create_linux_aio(thread_pool *pool, int max_io) -{ - io_context_t ctx; - memset(&ctx, 0, sizeof ctx); - if (int ret= io_setup(max_io, &ctx)) - { - fprintf(stderr, "io_setup(%d) returned %d\n", max_io, ret); - return nullptr; - } - return new aio_linux(ctx, pool); -} +#endif +#ifdef HAVE_LIBAIO + if (impl != OS_IO_URING) + return create_libaio(pool, max_io); +#endif + return nullptr; } +} // namespace tpool diff --git a/tpool/aio_simulated.cc b/tpool/aio_simulated.cc index cf1e7dca4c9..5a18fb03457 100644 --- a/tpool/aio_simulated.cc +++ b/tpool/aio_simulated.cc @@ -154,6 +154,7 @@ public: int bind(native_file_handle &fd) override { return 0; } int unbind(const native_file_handle &fd) override { return 0; } + const char *get_implementation() const override { return "simulated"; } }; aio *create_simulated_aio(thread_pool *tp) diff --git a/tpool/aio_win.cc b/tpool/aio_win.cc index f483e3ca1e1..3c9143a51a0 100644 --- a/tpool/aio_win.cc +++ b/tpool/aio_win.cc @@ -131,6 +131,7 @@ public: : GetLastError(); } int unbind(const native_file_handle& fd) override { return 0; } + const char *get_implementation() const override { return "completion ports"; } }; aio* create_win_aio(thread_pool* pool, int max_io) diff --git a/tpool/tpool.h b/tpool/tpool.h index 4dc29981366..1dc3dbb015b 100644 --- a/tpool/tpool.h +++ b/tpool/tpool.h @@ -19,7 +19,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111 - 1301 USA*/ #include #include #include -#ifdef LINUX_NATIVE_AIO +#ifdef HAVE_LIBAIO #include #endif #ifdef HAVE_URING @@ -143,12 +143,21 @@ constexpr size_t MAX_AIO_USERDATA_LEN= 4 * sizeof(void*); struct aiocb #ifdef _WIN32 :OVERLAPPED -#elif defined LINUX_NATIVE_AIO - :iocb -#elif defined HAVE_URING - :iovec #endif { +#if defined HAVE_LIBAIO || defined HAVE_URING + union { +# ifdef HAVE_LIBAIO + /** The context between io_submit() and io_getevents(); + must be the first data member! */ + iocb m_iocb; +# endif +# ifdef HAVE_URING + /** The context between io_uring_submit() and io_uring_wait_cqe() */ + iovec m_iovec; +# endif + }; +#endif native_file_handle m_fh; aio_opcode m_opcode; unsigned long long m_offset; @@ -188,6 +197,7 @@ public: virtual int bind(native_file_handle &fd)= 0; /** "Unind" file to AIO handler (used on Windows only) */ virtual int unbind(const native_file_handle &fd)= 0; + virtual const char *get_implementation() const=0; virtual ~aio(){}; protected: static void synchronous(aiocb *cb); @@ -217,12 +227,22 @@ class thread_pool; extern aio *create_simulated_aio(thread_pool *tp); +enum aio_implementation +{ + OS_IO_DEFAULT +#ifdef __linux__ + , OS_IO_URING + , OS_IO_LIBAIO +#endif +}; + class thread_pool { protected: /* AIO handler */ - std::unique_ptr m_aio; - virtual aio *create_native_aio(int max_io)= 0; + std::unique_ptr m_aio{}; + aio_implementation m_aio_impl= OS_IO_DEFAULT; + virtual aio *create_native_aio(int max_io, aio_implementation)= 0; public: /** @@ -232,10 +252,7 @@ public: void (*m_worker_init_callback)(void)= [] {}; void (*m_worker_destroy_callback)(void)= [] {}; - thread_pool() - : m_aio() - { - } + thread_pool()= default; virtual void submit_task(task *t)= 0; virtual timer* create_timer(callback_func func, void *data=nullptr) = 0; void set_thread_callbacks(void (*init)(), void (*destroy)()) @@ -245,10 +262,13 @@ public: m_worker_init_callback= init; m_worker_destroy_callback= destroy; } - int configure_aio(bool use_native_aio, int max_io) + int configure_aio(bool use_native_aio, int max_io, aio_implementation impl) { if (use_native_aio) - m_aio.reset(create_native_aio(max_io)); + { + m_aio.reset(create_native_aio(max_io, impl)); + m_aio_impl= impl; + } else m_aio.reset(create_simulated_aio(this)); return !m_aio ? -1 : 0; @@ -259,7 +279,7 @@ public: assert(m_aio); if (use_native_aio) { - auto new_aio = create_native_aio(max_io); + auto new_aio= create_native_aio(max_io, m_aio_impl); if (!new_aio) return -1; m_aio.reset(new_aio); @@ -271,6 +291,10 @@ public: { m_aio.reset(); } + const char *get_aio_implementation() const + { + return m_aio->get_implementation(); + } /** Tweaks how fast worker threads are created, or how often they are signaled. @@ -296,6 +320,19 @@ public: virtual void wait_end() {}; virtual ~thread_pool() {} }; + +/** Return true if compiled with native AIO support.*/ +constexpr bool supports_native_aio() +{ +#ifdef _WIN32 + return true; +#elif defined(__linux__) && (defined(HAVE_LIBAIO) || defined(HAVE_URING)) + return true; +#else + return false; +#endif +} + const int DEFAULT_MIN_POOL_THREADS= 1; const int DEFAULT_MAX_POOL_THREADS= 500; extern thread_pool * diff --git a/tpool/tpool_generic.cc b/tpool/tpool_generic.cc index e7c83fa172d..be37231e8cc 100644 --- a/tpool/tpool_generic.cc +++ b/tpool/tpool_generic.cc @@ -37,16 +37,10 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111 - 1301 USA*/ namespace tpool { - #ifdef __linux__ -#if defined(HAVE_URING) || defined(LINUX_NATIVE_AIO) - extern aio* create_linux_aio(thread_pool* tp, int max_io); -#else - aio *create_linux_aio(thread_pool *, int) { return nullptr; }; -#endif -#endif -#ifdef _WIN32 - extern aio* create_win_aio(thread_pool* tp, int max_io); + aio *create_linux_aio(thread_pool* tp, int max_io, aio_implementation); +#elif defined _WIN32 + aio *create_win_aio(thread_pool* tp, int max_io); #endif static const std::chrono::milliseconds LONG_TASK_DURATION = std::chrono::milliseconds(500); @@ -299,16 +293,15 @@ public: void wait_begin() override; void wait_end() override; void submit_task(task *task) override; - aio *create_native_aio(int max_io) override - { #ifdef _WIN32 - return create_win_aio(this, max_io); -#elif defined(__linux__) - return create_linux_aio(this,max_io); + aio *create_native_aio(int max_io, aio_implementation) override + { return create_win_aio(this, max_io); } +#elif defined __linux__ + aio *create_native_aio(int max_io, aio_implementation impl) override + { return create_linux_aio(this, max_io, impl); } #else - return nullptr; + aio *create_native_aio(int, aio_implementation) override { return nullptr; } #endif - } class timer_generic : public thr_timer_t, public timer { diff --git a/tpool/tpool_win.cc b/tpool/tpool_win.cc index 30867b8885c..a5539edbdd7 100644 --- a/tpool/tpool_win.cc +++ b/tpool/tpool_win.cc @@ -206,6 +206,11 @@ class thread_pool_win : public thread_pool CloseThreadpoolIo(fd.m_ptp_io); return 0; } + + /** + Expose implementation. + */ + const char *get_implementation() const override { return "ThreadPool"; } }; PTP_POOL m_ptp_pool; @@ -268,7 +273,7 @@ public: abort(); } - aio *create_native_aio(int max_io) override + aio *create_native_aio(int max_io, aio_implementation) override { return new native_aio(*this, max_io); } diff --git a/unittest/mysys/CMakeLists.txt b/unittest/mysys/CMakeLists.txt index b0cdae738b9..e56b97e7fb0 100644 --- a/unittest/mysys/CMakeLists.txt +++ b/unittest/mysys/CMakeLists.txt @@ -13,12 +13,27 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA -MY_ADD_TESTS(bitmap base64 my_atomic my_rdtsc lf my_malloc my_getopt dynstring - byte_order my_tzinfo dynamic_array - queues stacktrace stack_allocation crc32 LINK_LIBRARIES mysys) +MY_ADD_TESTS( + base64 + bitmap + byte_order + crc32 + dynamic_array + dynstring + lf + my_atomic + my_getopt + my_malloc + my_rdtsc + my_tzinfo + queues + stack_allocation + stacktrace + waiting_threads + LINK_LIBRARIES mysys) MY_ADD_TESTS(my_vsnprintf LINK_LIBRARIES strings mysys) -MY_ADD_TESTS(aes LINK_LIBRARIES mysys mysys_ssl) ADD_DEFINITIONS(${SSL_DEFINES}) +MY_ADD_TESTS(aes LINK_LIBRARIES mysys mysys_ssl) INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIRS}) MY_ADD_TESTS(ma_dyncol LINK_LIBRARIES mysys) diff --git a/unittest/mysys/lf-t.c b/unittest/mysys/lf-t.c index dfb0d31b42b..b390987ef8a 100644 --- a/unittest/mysys/lf-t.c +++ b/unittest/mysys/lf-t.c @@ -91,7 +91,7 @@ pthread_handler_t test_lf_alloc(void *arg) lf_alloc_free(pins, node2); } lf_alloc_put_pins(pins); - pthread_mutex_lock(&mutex); + mysql_mutex_lock(&mutex); bad+= y; if (--N == 0) @@ -102,7 +102,7 @@ pthread_handler_t test_lf_alloc(void *arg) bad|= lf_allocator.mallocs - lf_alloc_pool_count(&lf_allocator); #endif } - pthread_mutex_unlock(&mutex); + mysql_mutex_unlock(&mutex); if (with_my_thread_init) my_thread_end(); @@ -157,7 +157,7 @@ pthread_handler_t test_lf_hash(void *arg) } } lf_hash_put_pins(pins); - pthread_mutex_lock(&mutex); + mysql_mutex_lock(&mutex); bad+= sum; inserts+= ins; @@ -168,7 +168,7 @@ pthread_handler_t test_lf_hash(void *arg) lf_hash.size, inserts, scans); bad|= lf_hash.count; } - pthread_mutex_unlock(&mutex); + mysql_mutex_unlock(&mutex); if (with_my_thread_init) my_thread_end(); return 0; diff --git a/unittest/mysys/thr_template.c b/unittest/mysys/thr_template.c index 7a406863d05..8c6265ab62f 100644 --- a/unittest/mysys/thr_template.c +++ b/unittest/mysys/thr_template.c @@ -19,7 +19,7 @@ #include volatile uint32 bad; -pthread_mutex_t mutex; +mysql_mutex_t mutex; void do_tests(); @@ -57,7 +57,7 @@ int main(int argc __attribute__((unused)), char **argv) if (argv[1] && *argv[1]) DBUG_SET_INITIAL(argv[1]); - pthread_mutex_init(&mutex, 0); + mysql_mutex_init(PSI_NOT_INSTRUMENTED, &mutex, 0); #define CYCLES 30000 #define THREADS 30 @@ -66,7 +66,7 @@ int main(int argc __attribute__((unused)), char **argv) do_tests(); - pthread_mutex_destroy(&mutex); + mysql_mutex_destroy(&mutex); my_end(0); return exit_status(); } diff --git a/unittest/mysys/waiting_threads-t.c b/unittest/mysys/waiting_threads-t.c index 02cc0cac7c9..a43ce8afe84 100644 --- a/unittest/mysys/waiting_threads-t.c +++ b/unittest/mysys/waiting_threads-t.c @@ -16,15 +16,16 @@ #include "thr_template.c" #include #include +#include struct test_wt_thd { WT_THD thd; - pthread_mutex_t lock; + mysql_mutex_t lock; } thds[THREADS]; uint i, cnt; -pthread_mutex_t lock; -pthread_cond_t thread_sync; +mysql_mutex_t lock; +mysql_cond_t thread_sync; ulong wt_timeout_short=100, wt_deadlock_search_depth_short=4; ulong wt_timeout_long=10000, wt_deadlock_search_depth_long=15; @@ -49,7 +50,7 @@ pthread_handler_t test_wt(void *arg) my_thread_init(); - pthread_mutex_lock(&mutex); + mysql_mutex_lock(&mutex); id= cnt++; wt_thd_lazy_init(& thds[id].thd, & wt_deadlock_search_depth_short, & wt_timeout_short, @@ -57,11 +58,11 @@ pthread_handler_t test_wt(void *arg) /* now, wait for everybody to be ready to run */ if (cnt >= THREADS) - pthread_cond_broadcast(&thread_sync); + mysql_cond_broadcast(&thread_sync); else while (cnt < THREADS) - pthread_cond_wait(&thread_sync, &mutex); - pthread_mutex_unlock(&mutex); + mysql_cond_wait(&thread_sync, &mutex); + mysql_mutex_unlock(&mutex); my_rnd_init(&rand, (ulong)(intptr)&m, id); if (kill_strategy == YOUNGEST) @@ -72,7 +73,7 @@ pthread_handler_t test_wt(void *arg) for (m= *(int *)arg; m ; m--) { WT_RESOURCE_ID resid; - int blockers[THREADS/10], j, k; + int blockers[THREADS/10]={0}, j, k; resid.value= id; resid.type= &restype; @@ -94,25 +95,25 @@ retry: if (kill_strategy == RANDOM) thds[id].thd.weight= rnd(); - pthread_mutex_lock(& thds[i].lock); + mysql_mutex_lock(& thds[i].lock); res= wt_thd_will_wait_for(& thds[id].thd, & thds[i].thd, &resid); - pthread_mutex_unlock(& thds[i].lock); + mysql_mutex_unlock(& thds[i].lock); } if (!res) { - pthread_mutex_lock(&lock); + mysql_mutex_lock(&lock); res= wt_thd_cond_timedwait(& thds[id].thd, &lock); - pthread_mutex_unlock(&lock); + mysql_mutex_unlock(&lock); } if (res) { - pthread_mutex_lock(& thds[id].lock); - pthread_mutex_lock(&lock); + mysql_mutex_lock(& thds[id].lock); + mysql_mutex_lock(&lock); wt_thd_release_all(& thds[id].thd); - pthread_mutex_unlock(&lock); - pthread_mutex_unlock(& thds[id].lock); + mysql_mutex_unlock(&lock); + mysql_mutex_unlock(& thds[id].lock); if (kill_strategy == LOCKS) thds[id].thd.weight= 0; if (kill_strategy == YOUNGEST) @@ -122,21 +123,21 @@ retry: thds[id].thd.weight++; } - pthread_mutex_lock(&mutex); + mysql_mutex_lock(&mutex); /* wait for everybody to finish */ if (!--cnt) - pthread_cond_broadcast(&thread_sync); + mysql_cond_broadcast(&thread_sync); else while (cnt) - pthread_cond_wait(&thread_sync, &mutex); + mysql_cond_wait(&thread_sync, &mutex); - pthread_mutex_lock(& thds[id].lock); - pthread_mutex_lock(&lock); + mysql_mutex_lock(& thds[id].lock); + mysql_mutex_lock(&lock); wt_thd_release_all(& thds[id].thd); - pthread_mutex_unlock(&lock); - pthread_mutex_unlock(& thds[id].lock); + mysql_mutex_unlock(&lock); + mysql_mutex_unlock(& thds[id].lock); wt_thd_destroy(& thds[id].thd); - pthread_mutex_unlock(&mutex); + mysql_mutex_unlock(&mutex); DBUG_PRINT("wt", ("exiting")); my_thread_end(); @@ -152,7 +153,8 @@ void do_one_test() reset(wt_wait_stats); wt_success_stats=0; cnt=0; - test_concurrently("waiting_threads", test_wt, THREADS, CYCLES); + test_concurrently("waiting_threads", test_wt, THREADS, + CYCLES/(skip_big_tests?500:10)); sum=sum0=0; for (cnt=0; cnt < WT_CYCLE_STATS; cnt++) @@ -179,21 +181,16 @@ void do_one_test() void do_tests() { DBUG_ENTER("do_tests"); - if (skip_big_tests) - { - skip(1, "Big test skipped"); - return; - } plan(13); compile_time_assert(THREADS >= 4); DBUG_PRINT("wt", ("================= initialization ===================")); - pthread_cond_init(&thread_sync, 0); - pthread_mutex_init(&lock, 0); + mysql_cond_init(PSI_NOT_INSTRUMENTED, &thread_sync, 0); + mysql_mutex_init(PSI_NOT_INSTRUMENTED, &lock, 0); wt_init(); for (cnt=0; cnt < THREADS; cnt++) - pthread_mutex_init(& thds[cnt].lock, 0); + mysql_mutex_init(PSI_NOT_INSTRUMENTED, & thds[cnt].lock, 0); { WT_RESOURCE_ID resid[4]; for (i=0; i < array_elements(resid); i++) @@ -218,16 +215,16 @@ void do_tests() ok_wait(0,2,0); ok_wait(0,3,0); - pthread_mutex_lock(&lock); + mysql_mutex_lock(&lock); bad= wt_thd_cond_timedwait(& thds[0].thd, &lock); - pthread_mutex_unlock(&lock); + mysql_mutex_unlock(&lock); ok(bad == WT_TIMEOUT, "timeout test returned %d", bad); ok_wait(0,1,0); ok_wait(1,2,1); ok_deadlock(2,0,2); - pthread_mutex_lock(&lock); + mysql_mutex_lock(&lock); ok(wt_thd_cond_timedwait(& thds[0].thd, &lock) == WT_TIMEOUT, "as always"); ok(wt_thd_cond_timedwait(& thds[1].thd, &lock) == WT_TIMEOUT, "as always"); wt_thd_release_all(& thds[0].thd); @@ -240,7 +237,7 @@ void do_tests() wt_thd_release_all(& thds[i].thd); wt_thd_destroy(& thds[i].thd); } - pthread_mutex_unlock(&lock); + mysql_mutex_unlock(&lock); } wt_deadlock_search_depth_short=6; @@ -277,9 +274,9 @@ void do_tests() DBUG_PRINT("wt", ("================= cleanup ===================")); for (cnt=0; cnt < THREADS; cnt++) - pthread_mutex_destroy(& thds[cnt].lock); + mysql_mutex_destroy(& thds[cnt].lock); wt_end(); - pthread_mutex_destroy(&lock); - pthread_cond_destroy(&thread_sync); + mysql_mutex_destroy(&lock); + mysql_cond_destroy(&thread_sync); DBUG_VOID_RETURN; } diff --git a/win/packaging/create_msi.cmake b/win/packaging/create_msi.cmake index 57262e10d7d..20b139cf5d1 100644 --- a/win/packaging/create_msi.cmake +++ b/win/packaging/create_msi.cmake @@ -357,6 +357,11 @@ SET(CPACK_WIX_INCLUDES ) ENDFOREACH() +LIST(LENGTH WIX_FEATURE_AlwaysInstall_COMPONENTS len) +IF (len LESS_EQUAL 0) + MESSAGE(FATAL_ERROR "AlwaysInstall_COMPONENTS is empty") +ENDIF() +LIST(JOIN WIX_FEATURE_AlwaysInstall_COMPONENTS "," MANDATORY_FEATURES) CONFIGURE_FILE(${SRCDIR}/mysql_server.wxs.in ${CMAKE_CURRENT_BINARY_DIR}/mysql_server.wxs) diff --git a/win/packaging/extra.wxs.in b/win/packaging/extra.wxs.in index 10de2b06f15..3876f0af65f 100644 --- a/win/packaging/extra.wxs.in +++ b/win/packaging/extra.wxs.in @@ -64,6 +64,21 @@ + + + + ADDLOCAL + + + ADDLOCAL + - + diff --git a/win/packaging/heidisql.cmake b/win/packaging/heidisql.cmake index 157e5517594..33d3e84b875 100644 --- a/win/packaging/heidisql.cmake +++ b/win/packaging/heidisql.cmake @@ -1,4 +1,4 @@ -SET(HEIDISQL_BASE_NAME "HeidiSQL_12.10_32_Portable") +SET(HEIDISQL_BASE_NAME "HeidiSQL_12.11_64_Portable") SET(HEIDISQL_ZIP "${HEIDISQL_BASE_NAME}.zip") SET(HEIDISQL_URL "https://www.heidisql.com/downloads/releases/${HEIDISQL_ZIP}") SET(HEIDISQL_DOWNLOAD_DIR ${THIRD_PARTY_DOWNLOAD_LOCATION}/${HEIDISQL_BASE_NAME}) diff --git a/win/packaging/heidisql.wxi.in b/win/packaging/heidisql.wxi.in index 45bd6c4b218..e4089be7204 100644 --- a/win/packaging/heidisql.wxi.in +++ b/win/packaging/heidisql.wxi.in @@ -1,6 +1,15 @@ - + + + + - + - +