1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-07 00:04:31 +03:00

Merge branch '11.4' into 11.8

This commit is contained in:
Sergei Golubchik
2025-07-28 20:16:25 +02:00
380 changed files with 11138 additions and 2776 deletions

View File

@@ -341,7 +341,13 @@ SET(WITH_SAFEMALLOC "AUTO" CACHE STRING "Use safemalloc memory debugger. Will re
IF(WITH_SAFEMALLOC MATCHES "ON") IF(WITH_SAFEMALLOC MATCHES "ON")
ADD_DEFINITIONS( -DSAFEMALLOC) ADD_DEFINITIONS( -DSAFEMALLOC)
ELSEIF(WITH_SAFEMALLOC MATCHES "AUTO" AND NOT WIN32 AND NOT WITH_VALGRIND) ELSEIF(WITH_SAFEMALLOC MATCHES "AUTO"
AND NOT WIN32
AND NOT WITH_VALGRIND
AND NOT WITH_ASAN
AND NOT WITH_UBSAN
AND NOT WITH_TSAN
AND NOT WITH_MSAN)
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC") SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC")
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC") SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC")
ENDIF() ENDIF()

View File

@@ -3212,6 +3212,26 @@ static int reconnect(void)
return 0; return 0;
} }
#ifndef EMBEDDED_LIBRARY
static void status_info_cb(void *data, enum enum_mariadb_status_info type, ...)
{
va_list ap;
va_start(ap, type);
if (type == SESSION_TRACK_TYPE && va_arg(ap, int) == SESSION_TRACK_SCHEMA)
{
MARIADB_CONST_STRING *val= va_arg(ap, MARIADB_CONST_STRING *);
my_free(current_db);
if (val->length)
current_db= my_strndup(PSI_NOT_INSTRUMENTED, val->str, val->length, MYF(MY_FAE));
else
current_db= NULL;
}
va_end(ap);
}
#else
#define mysql_optionsv(A,B,C,D) do { } while(0)
#endif
static void get_current_db() static void get_current_db()
{ {
MYSQL_RES *res; MYSQL_RES *res;
@@ -5025,6 +5045,8 @@ sql_real_connect(char *host,char *database,char *user,char *password,
mysql_close(&mysql); mysql_close(&mysql);
} }
mysql_init(&mysql); mysql_init(&mysql);
if (!one_database)
mysql_optionsv(&mysql, MARIADB_OPT_STATUS_CALLBACK, status_info_cb, NULL);
if (opt_init_command) if (opt_init_command)
mysql_options(&mysql, MYSQL_INIT_COMMAND, opt_init_command); mysql_options(&mysql, MYSQL_INIT_COMMAND, opt_init_command);
if (opt_connect_timeout) if (opt_connect_timeout)

View File

@@ -1033,9 +1033,16 @@ Exit_status process_event(PRINT_EVENT_INFO *print_event_info, Log_event *ev,
binlog, even if they have a server_id. Also, we have to read binlog, even if they have a server_id. Also, we have to read
the format_description event so that we can parse subsequent the format_description event so that we can parse subsequent
events. events.
Don't skip Unknown events either since we don't know their `server_id`s.
*/ */
if (ev_type != ROTATE_EVENT && is_server_id_excluded(ev->server_id)) switch (ev_type) {
goto end; case ROTATE_EVENT:
case UNKNOWN_EVENT:
break;
default:
if (is_server_id_excluded(ev->server_id))
goto end;
}
} }
if ((stop_datetime_given && ev->when >= stop_datetime) if ((stop_datetime_given && ev->when >= stop_datetime)
|| (pos >= stop_position_mot)) || (pos >= stop_position_mot))
@@ -3400,7 +3407,8 @@ int main(int argc, char** argv)
} }
/* /*
Emit a warning if we finished processing input before reaching the stop Emit warning(s) (in Gtid_event_filter::verify_completed_state() for GTID(s))
if we finished processing input before reaching the stop
boundaries indicated by --stop-datetime or --stop-position. boundaries indicated by --stop-datetime or --stop-position.
*/ */
if (stop_datetime_given && stop_datetime > last_processed_ev.datetime) if (stop_datetime_given && stop_datetime > last_processed_ev.datetime)
@@ -3410,6 +3418,8 @@ int main(int argc, char** argv)
stop_position > last_processed_ev.position) stop_position > last_processed_ev.position)
warning("Did not reach stop position %llu before end of input", warning("Did not reach stop position %llu before end of input",
stop_position); stop_position);
if (position_gtid_filter)
position_gtid_filter->verify_final_state();
/* /*
If enable flashback, need to print the events from the end to the If enable flashback, need to print the events from the end to the

View File

@@ -184,7 +184,7 @@ static DYNAMIC_STRING extended_row;
static DYNAMIC_STRING dynamic_where; static DYNAMIC_STRING dynamic_where;
static MYSQL_RES *get_table_name_result= NULL; static MYSQL_RES *get_table_name_result= NULL;
static MEM_ROOT glob_root; static MEM_ROOT glob_root;
static MYSQL_RES *routine_res, *routine_list_res; static MYSQL_RES *routine_res, *routine_list_res, *slave_status_res= NULL;
#include <sslopt-vars.h> #include <sslopt-vars.h>
@@ -1996,6 +1996,8 @@ static void free_resources()
mysql_free_result(routine_res); mysql_free_result(routine_res);
if (routine_list_res) if (routine_list_res)
mysql_free_result(routine_list_res); mysql_free_result(routine_list_res);
if (slave_status_res)
mysql_free_result(slave_status_res);
if (mysql) if (mysql)
{ {
mysql_close(mysql); mysql_close(mysql);
@@ -6432,17 +6434,19 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos,
static int do_stop_slave_sql(MYSQL *mysql_con) static int do_stop_slave_sql(MYSQL *mysql_con)
{ {
MYSQL_RES *slave;
MYSQL_ROW row; MYSQL_ROW row;
DBUG_ASSERT(
!slave_status_res // do_stop_slave_sql() should only be called once
);
if (mysql_query_with_error_report(mysql_con, &slave, if (mysql_query_with_error_report(mysql_con, &slave_status_res,
multi_source ? multi_source ?
"SHOW ALL SLAVES STATUS" : "SHOW ALL SLAVES STATUS" :
"SHOW SLAVE STATUS")) "SHOW SLAVE STATUS"))
return(1); return(1);
/* Loop over all slaves */ /* Loop over all slaves */
while ((row= mysql_fetch_row(slave))) while ((row= mysql_fetch_row(slave_status_res)))
{ {
if (row[11 + multi_source]) if (row[11 + multi_source])
{ {
@@ -6457,13 +6461,11 @@ static int do_stop_slave_sql(MYSQL *mysql_con)
if (mysql_query_with_error_report(mysql_con, 0, query)) if (mysql_query_with_error_report(mysql_con, 0, query))
{ {
mysql_free_result(slave);
return 1; return 1;
} }
} }
} }
} }
mysql_free_result(slave);
return(0); return(0);
} }
@@ -6587,32 +6589,35 @@ static int do_show_slave_status(MYSQL *mysql_con, int have_mariadb_gtid,
static int do_start_slave_sql(MYSQL *mysql_con) static int do_start_slave_sql(MYSQL *mysql_con)
{ {
MYSQL_RES *slave;
MYSQL_ROW row; MYSQL_ROW row;
int error= 0; int error= 0;
DBUG_ENTER("do_start_slave_sql"); DBUG_ENTER("do_start_slave_sql");
/* We need to check if the slave sql is stopped in the first place */ /*
if (mysql_query_with_error_report(mysql_con, &slave, do_start_slave_sql() should normally be called
multi_source ? sometime after do_stop_slave_sql() succeeds
"SHOW ALL SLAVES STATUS" : */
"SHOW SLAVE STATUS")) if (!slave_status_res)
DBUG_RETURN(1); DBUG_RETURN(error);
mysql_data_seek(slave_status_res, 0);
while ((row= mysql_fetch_row(slave))) while ((row= mysql_fetch_row(slave_status_res)))
{ {
DBUG_PRINT("info", ("Connection: '%s' status: '%s'", DBUG_PRINT("info", ("Connection: '%s' status: '%s'",
multi_source ? row[0] : "", row[11 + multi_source])); multi_source ? row[0] : "", row[11 + multi_source]));
if (row[11 + multi_source]) if (row[11 + multi_source])
{ {
/* if SLAVE SQL is not running, we don't start it */ /*
if (strcmp(row[11 + multi_source], "Yes")) If SLAVE_SQL was not running but is now,
we start it anyway to warn the unexpected state change.
*/
if (strcmp(row[11 + multi_source], "No"))
{ {
char query[160]; char query[160];
if (multi_source) if (multi_source)
sprintf(query, "START SLAVE '%.80s'", row[0]); sprintf(query, "START SLAVE '%.80s' SQL_THREAD", row[0]);
else else
strmov(query, "START SLAVE"); strmov(query, "START SLAVE SQL_THREAD");
if (mysql_query_with_error_report(mysql_con, 0, query)) if (mysql_query_with_error_report(mysql_con, 0, query))
{ {
@@ -6623,7 +6628,6 @@ static int do_start_slave_sql(MYSQL *mysql_con)
} }
} }
} }
mysql_free_result(slave);
DBUG_RETURN(error); DBUG_RETURN(error);
} }

View File

@@ -4,19 +4,27 @@ MACRO (SKIP_AWS_SDK MSG)
RETURN() RETURN()
ENDMACRO() ENDMACRO()
FUNCTION (CHECK_AWS_SDK RETVAL REASON) FUNCTION (CHECK_AWS_SDK COMPONENTS RETVAL REASON)
# AWS_SDK_EXTERNAL_PROJECT must be ON # AWS_SDK_EXTERNAL_PROJECT must be ON
IF(NOT AWS_SDK_EXTERNAL_PROJECT) IF(NOT AWS_SDK_EXTERNAL_PROJECT)
SKIP_AWS_SDK("AWS_SDK_EXTERNAL_PROJECT is not ON") FOREACH(comp ${COMPONENTS})
FIND_PACKAGE(aws-cpp-sdk-${comp} CONFIG QUIET)
IF (NOT aws-cpp-sdk-${comp}_FOUND)
SKIP_AWS_SDK("AWS_SDK_EXTERNAL_PROJECT is not ON and aws-cpp-sdk-${comp} not found")
ENDIF()
ENDFOREACH()
SET(${RETVAL} ON PARENT_SCOPE)
ENDIF() ENDIF()
IF(NOT NOT_FOR_DISTRIBUTION) IF(NOT NOT_FOR_DISTRIBUTION)
SKIP_AWS_SDK("AWS SDK has Apache 2.0 License which is not compatible with GPLv2. Set -DNOT_FOR_DISTRIBUTION=ON if you need it") SKIP_AWS_SDK("AWS SDK has Apache 2.0 License which is not compatible with GPLv2. Set -DNOT_FOR_DISTRIBUTION=ON if you need it")
ENDIF() ENDIF()
IF(CMAKE_VERSION VERSION_LESS "3.15")
SKIP_AWS_SDK("CMake too old")
ENDIF()
# Check compiler support # Check compiler support
IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU") IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
EXECUTE_PROCESS(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) IF (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9)
IF (GCC_VERSION VERSION_LESS 4.8) SKIP_AWS_SDK("GCC VERSION too old (${GCC_VERSION}, required is 4.9 or later")
SKIP_AWS_SDK("GCC VERSION too old (${GCC_VERSION}, required is 4.8 or later")
ENDIF() ENDIF()
ELSEIF (CMAKE_CXX_COMPILER_ID MATCHES "Clang") ELSEIF (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
IF ((CMAKE_CXX_COMPILER_VERSION AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.3) OR IF ((CMAKE_CXX_COMPILER_VERSION AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.3) OR
@@ -36,35 +44,27 @@ FUNCTION (CHECK_AWS_SDK RETVAL REASON)
SKIP_AWS_SDK("OS unsupported by AWS SDK") SKIP_AWS_SDK("OS unsupported by AWS SDK")
ENDIF() ENDIF()
# Build from source, using ExternalProject_Add
# AWS C++ SDK requires cmake 2.8.12
IF(CMAKE_VERSION VERSION_LESS "2.8.12")
SKIP_AWS_SDK("CMake is too old")
ENDIF()
IF(UNIX) IF(UNIX)
# Check librairies required for building SDK IF("${WITH_ZLIB}" STREQUAL "bundled")
FIND_PACKAGE(CURL) # Breaks FIND_PACKAGE(ZLIB)
SET_PACKAGE_PROPERTIES(CURL PROPERTIES TYPE REQUIRED) SKIP_AWS_SDK("Incompatible with WITH_ZLIB=bundled")
IF(NOT CURL_FOUND)
SKIP_AWS_SDK("AWS C++ SDK requires libcurl development package")
ENDIF() ENDIF()
FIND_PATH(UUID_INCLUDE_DIR uuid/uuid.h) # Check libraries required for building SDK
IF(NOT UUID_INCLUDE_DIR) FOREACH(pkg CURL ZLIB OpenSSL)
SKIP_AWS_SDK("AWS C++ SDK requires uuid development package") FIND_PACKAGE(${pkg})
ENDIF() IF(NOT ${pkg}_FOUND)
IF(NOT APPLE) SKIP_AWS_SDK("AWS C++ SDK requires ${pkg}")
FIND_LIBRARY(UUID_LIBRARIES uuid)
SET_PACKAGE_PROPERTIES(UUID_LIBRARIES PROPERTIES TYPE REQUIRED)
IF(NOT UUID_LIBRARIES)
SKIP_AWS_SDK("AWS C++ SDK requires uuid development package")
ENDIF() ENDIF()
FIND_PACKAGE(OpenSSL) SET_PACKAGE_PROPERTIES(${pkg} PROPERTIES TYPE REQUIRED)
SET_PACKAGE_PROPERTIES(OpenSSL PROPERTIES TYPE REQUIRED) ENDFOREACH()
IF(NOT OPENSSL_FOUND) # Also check for required libraries explicitely - they might be
SKIP_AWS_SDK("AWS C++ SDK requires openssl development package") # missing, even if check above succeeds, e.g when using own copy
# of zlib
FOREACH(lib OpenSSL::Crypto ZLIB::ZLIB CURL::libcurl)
IF(NOT TARGET ${lib})
SKIP_AWS_SDK("AWS C++ SDK requires ${lib}")
ENDIF() ENDIF()
ENDIF() ENDFOREACH()
ENDIF() ENDIF()
SET(${RETVAL} ON PARENT_SCOPE) SET(${RETVAL} ON PARENT_SCOPE)
ENDFUNCTION() ENDFUNCTION()
@@ -85,14 +85,4 @@ FUNCTION(USE_AWS_SDK_LIBS)
SET_PROPERTY(GLOBAL PROPERTY AWS_SDK_LIBS ${comp} APPEND) SET_PROPERTY(GLOBAL PROPERTY AWS_SDK_LIBS ${comp} APPEND)
TARGET_LINK_LIBRARIES(${target} aws-cpp-sdk-${comp}) TARGET_LINK_LIBRARIES(${target} aws-cpp-sdk-${comp})
ENDFOREACH() ENDFOREACH()
TARGET_LINK_LIBRARIES(${target} aws-cpp-sdk-core)
TARGET_INCLUDE_DIRECTORIES(${target} PRIVATE ${PROJECT_BINARY_DIR}/extra/aws_sdk/aws_sdk_cpp/include)
# Link OS libraries that AWS SDK depends on
IF(WIN32)
TARGET_LINK_LIBRARIES(${target} bcrypt winhttp wininet userenv version)
ELSE()
FIND_PACKAGE(CURL REQUIRED)
FIND_PACKAGE(OpenSSL REQUIRED)
TARGET_LINK_LIBRARIES(${target} ${OPENSSL_LIBRARIES} ${CURL_LIBRARIES} ${UUID_LIBRARIES})
ENDIF()
ENDFUNCTION() ENDFUNCTION()

View File

@@ -120,7 +120,10 @@ ELSEIF(DEB)
SET(WITH_ZLIB system CACHE STRING "") SET(WITH_ZLIB system CACHE STRING "")
SET(WITH_LIBWRAP ON) SET(WITH_LIBWRAP ON)
SET(HAVE_EMBEDDED_PRIVILEGE_CONTROL ON) SET(HAVE_EMBEDDED_PRIVILEGE_CONTROL ON)
SET(PLUGIN_AUTH_SOCKET YES CACHE STRING "") # No hurd implementation
IF(NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "i686-AT386")
SET(PLUGIN_AUTH_SOCKET YES CACHE STRING "")
ENDIF()
SET(WITH_EMBEDDED_SERVER ON CACHE BOOL "") SET(WITH_EMBEDDED_SERVER ON CACHE BOOL "")
SET(WITH_PCRE system CACHE STRING "") SET(WITH_PCRE system CACHE STRING "")
SET(CLIENT_PLUGIN_ZSTD OFF) SET(CLIENT_PLUGIN_ZSTD OFF)

View File

@@ -255,6 +255,7 @@ FUNCTION(INSTALL_RUNTIME_DEPS)
GET_PROPERTY(installed_targets GLOBAL PROPERTY INSTALLED_TARGETS) GET_PROPERTY(installed_targets GLOBAL PROPERTY INSTALLED_TARGETS)
# Exclude all dependencies that are shared libraries from the # Exclude all dependencies that are shared libraries from the
# same build. # same build.
FILE(TO_CMAKE_PATH "$ENV{PATH}" _path_list)
FOREACH(tgt ${installed_targets}) FOREACH(tgt ${installed_targets})
SET(exclude_libs) SET(exclude_libs)
GET_TARGET_PROPERTY(link_libraries ${tgt} LINK_LIBRARIES) GET_TARGET_PROPERTY(link_libraries ${tgt} LINK_LIBRARIES)
@@ -286,8 +287,9 @@ FUNCTION(INSTALL_RUNTIME_DEPS)
".*system32/.*\\.dll" # Windows stuff ".*system32/.*\\.dll" # Windows stuff
POST_INCLUDE_REGEXES POST_INCLUDE_REGEXES
DIRECTORIES DIRECTORIES
${VCPKG_INSTALLED_DIR}/${VCPKG_TARGET_TRIPLET}/bin $<$<BOOL:${VCPKG_INSTALLED_DIR}>:${VCPKG_INSTALLED_DIR}/${VCPKG_TARGET_TRIPLET}/bin
$<$<CONFIG:Debug>:${VCPKG_INSTALLED_DIR}/${VCPKG_TARGET_TRIPLET}/debug/bin> $<$<AND:$<CONFIG:Debug>,$<BOOL:${VCPKG_INSTALLED_DIR}>>:${VCPKG_INSTALLED_DIR}/${VCPKG_TARGET_TRIPLET}/debug/bin>
${_path_list}
) )
ENDFOREACH() ENDFOREACH()
ENDFUNCTION() ENDFUNCTION()

View File

@@ -361,4 +361,5 @@ SET(HAVE_LINUX_UNISTD_H CACHE INTERNAL "")
SET(OFF64_T CACHE INTERNAL "") SET(OFF64_T CACHE INTERNAL "")
SET(Z_HAVE_UNISTD_H CACHE INTERNAL "") SET(Z_HAVE_UNISTD_H CACHE INTERNAL "")
SET(HAVE_OFF64_T CACHE FALSE INTERNAL "") SET(HAVE_OFF64_T CACHE FALSE INTERNAL "")
SET(HAVE_AUXV_GETAUXVAL CACHE INTERNAL "")
ENDIF(MSVC) ENDIF(MSVC)

View File

@@ -161,6 +161,9 @@ MACRO(MYSQL_ADD_PLUGIN)
PROPERTIES COMPILE_DEFINITIONS "EMBEDDED_LIBRARY${version_string}") PROPERTIES COMPILE_DEFINITIONS "EMBEDDED_LIBRARY${version_string}")
ENDIF() ENDIF()
ADD_DEPENDENCIES(${target}_embedded GenError ${ARG_DEPENDS}) ADD_DEPENDENCIES(${target}_embedded GenError ${ARG_DEPENDS})
IF(ARG_LINK_LIBRARIES)
TARGET_LINK_LIBRARIES (${target}_embedded ${ARG_LINK_LIBRARIES})
ENDIF()
ENDIF() ENDIF()
ENDIF() ENDIF()

View File

@@ -49,8 +49,6 @@ Then restart the build.
SET(WSREP_VERSION "${WSREP_API_VERSION}.${WSREP_PATCH_VERSION}" SET(WSREP_VERSION "${WSREP_API_VERSION}.${WSREP_PATCH_VERSION}"
CACHE INTERNAL "WSREP version") CACHE INTERNAL "WSREP version")
SET(WSREP_PROC_INFO ${WITH_WSREP})
SET(WSREP_PATCH_VERSION "wsrep_${WSREP_VERSION}") SET(WSREP_PATCH_VERSION "wsrep_${WSREP_VERSION}")
if (NOT WITH_WSREP_ALL) if (NOT WITH_WSREP_ALL)
SET(WSREP_LIB_WITH_UNIT_TESTS OFF CACHE BOOL SET(WSREP_LIB_WITH_UNIT_TESTS OFF CACHE BOOL

View File

@@ -542,7 +542,6 @@ this is the case with Microsoft Windows VirtualFree(MEM_DECOMMIT) */
#ifndef EMBEDDED_LIBRARY #ifndef EMBEDDED_LIBRARY
#cmakedefine WSREP_INTERFACE_VERSION "@WSREP_INTERFACE_VERSION@" #cmakedefine WSREP_INTERFACE_VERSION "@WSREP_INTERFACE_VERSION@"
#cmakedefine WITH_WSREP 1 #cmakedefine WITH_WSREP 1
#cmakedefine WSREP_PROC_INFO 1
#endif #endif
#if !defined(__STDC_FORMAT_MACROS) #if !defined(__STDC_FORMAT_MACROS)

View File

@@ -84,7 +84,13 @@ function check_root_accounts() {
logger -p daemon.info -i -t"$0" "Checking for insecure root accounts." logger -p daemon.info -i -t"$0" "Checking for insecure root accounts."
ret=$(echo "SELECT count(*) FROM mysql.user WHERE user='root' and password='' and password_expired='N' and plugin in ('', 'mysql_native_password', 'mysql_old_password');" | $MARIADB --skip-column-names) ret=$(echo "
SELECT count(*) FROM mysql.global_priv
WHERE user='root' AND
JSON_VALUE(priv, '$.plugin') in ('mysql_native_password', 'mysql_old_password', 'parsec') AND
JSON_VALUE(priv, '$.authentication_string') = '' AND
JSON_VALUE(priv, '$.password_last_changed') != 0
" | $MARIADB --skip-column-names)
if [ "$ret" -ne "0" ] if [ "$ret" -ne "0" ]
then then
logger -p daemon.warn -i -t"$0" "WARNING: mysql.user contains $ret root accounts without password!" logger -p daemon.warn -i -t"$0" "WARNING: mysql.user contains $ret root accounts without password!"

View File

@@ -44,11 +44,10 @@ add_lsb_base_depends()
sed -e 's#lsof #lsb-base (>= 3.0-10),\n lsof #' -i debian/control sed -e 's#lsof #lsb-base (>= 3.0-10),\n lsof #' -i debian/control
} }
replace_uring_with_aio() remove_uring()
{ {
sed 's/liburing-dev/libaio-dev/g' -i debian/control sed -e '/liburing-dev/d' -i debian/control
sed -e '/-DIGNORE_AIO_CHECK=ON/d' \ sed -e '/-DWITH_URING=ON/d' -i debian/rules
-e '/-DWITH_URING=ON/d' -i debian/rules
} }
disable_libfmt() disable_libfmt()
@@ -96,7 +95,7 @@ in
# Debian # Debian
"buster") "buster")
disable_libfmt disable_libfmt
replace_uring_with_aio remove_uring
;& ;&
"bullseye") "bullseye")
add_lsb_base_depends add_lsb_base_depends
@@ -107,7 +106,7 @@ in
# so no removal is necessary. # so no removal is necessary.
if [[ ! "$architecture" =~ amd64|arm64|armel|armhf|i386|mips64el|mipsel|ppc64el|s390x ]] if [[ ! "$architecture" =~ amd64|arm64|armel|armhf|i386|mips64el|mipsel|ppc64el|s390x ]]
then then
replace_uring_with_aio remove_uring
fi fi
;& ;&
"trixie"|"forky"|"sid") "trixie"|"forky"|"sid")
@@ -116,8 +115,8 @@ in
;; ;;
# Ubuntu # Ubuntu
"focal") "focal")
replace_uring_with_aio
disable_libfmt disable_libfmt
remove_uring
;& ;&
"jammy"|"kinetic") "jammy"|"kinetic")
add_lsb_base_depends add_lsb_base_depends

16
debian/control vendored
View File

@@ -9,17 +9,17 @@ Build-Depends: bison,
default-jdk, default-jdk,
dh-exec, dh-exec,
dh-package-notes, dh-package-notes,
flex [amd64], flex [amd64 arm64],
gdb <!nocheck>, gdb <!nocheck>,
libaio-dev [linux-any], libaio-dev [linux-any],
libboost-atomic-dev [amd64], libboost-atomic-dev [amd64 arm64],
libboost-chrono-dev [amd64], libboost-chrono-dev [amd64 arm64],
libboost-date-time-dev [amd64], libboost-date-time-dev [amd64 arm64],
libboost-dev, libboost-dev,
libboost-filesystem-dev [amd64], libboost-filesystem-dev [amd64 arm64],
libboost-regex-dev [amd64], libboost-regex-dev [amd64 arm64],
libboost-system-dev [amd64], libboost-system-dev [amd64 arm64],
libboost-thread-dev [amd64], libboost-thread-dev [amd64 arm64],
libbz2-dev, libbz2-dev,
libcrack2-dev (>= 2.9.0), libcrack2-dev (>= 2.9.0),
libcurl4-openssl-dev | libcurl4-dev, libcurl4-openssl-dev | libcurl4-dev,

View File

@@ -1,6 +1,3 @@
# These should be moved, see https://jira.mariadb.org/browse/MDEV-21654
arch-dependent-file-in-usr-share [usr/share/mariadb/mariadb-test/suite/plugins/pam/pam_mariadb_mtr.so]
arch-independent-package-contains-binary-or-object [usr/share/mariadb/mariadb-test/suite/plugins/pam/pam_mariadb_mtr.so]
# Mainly for support for *BSD family. Not right way to do but this is test package and not for production # Mainly for support for *BSD family. Not right way to do but this is test package and not for production
incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mariadb/mariadb-test/std_data/checkDBI_DBD-MariaDB.pl] incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mariadb/mariadb-test/std_data/checkDBI_DBD-MariaDB.pl]
incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mariadb/mariadb-test/suite/engines/rr_trx/run_stress_tx_rr.pl] incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mariadb/mariadb-test/suite/engines/rr_trx/run_stress_tx_rr.pl]

5
debian/rules vendored
View File

@@ -87,9 +87,6 @@ endif
# quality standards in Debian. Also building it requires an extra 4 GB of disk # quality standards in Debian. Also building it requires an extra 4 GB of disk
# space which makes native Debian builds fail as the total disk space needed # space which makes native Debian builds fail as the total disk space needed
# for MariaDB becomes over 10 GB. Only build CS via autobake-deb.sh. # for MariaDB becomes over 10 GB. Only build CS via autobake-deb.sh.
#
# Note: Don't use '-DWITH_URING=ON' as some Buildbot builders are missing it
# and would fail permanently.
PATH=$${MYSQL_BUILD_PATH:-"/usr/lib/ccache:/usr/local/bin:/usr/bin:/bin"} \ PATH=$${MYSQL_BUILD_PATH:-"/usr/lib/ccache:/usr/local/bin:/usr/bin:/bin"} \
dh_auto_configure --builddirectory=$(BUILDDIR) -- \ dh_auto_configure --builddirectory=$(BUILDDIR) -- \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \ -DCMAKE_BUILD_TYPE=RelWithDebInfo \
@@ -103,6 +100,8 @@ endif
-DPLUGIN_AWS_KEY_MANAGEMENT=NO \ -DPLUGIN_AWS_KEY_MANAGEMENT=NO \
-DPLUGIN_COLUMNSTORE=NO \ -DPLUGIN_COLUMNSTORE=NO \
-DIGNORE_AIO_CHECK=ON \ -DIGNORE_AIO_CHECK=ON \
-DWITH_URING=ON \
-DWITH_LIBAIO=ON \
-DDEB=$(DEB_VENDOR) -DDEB=$(DEB_VENDOR)
# This is needed, otherwise 'make test' will run before binaries have been built # This is needed, otherwise 'make test' will run before binaries have been built

View File

@@ -8,19 +8,46 @@ IF(SDK_LIBS_COUNT EQUAL 0)
RETURN() RETURN()
ENDIF() ENDIF()
CHECK_AWS_SDK(RETVAL REASON) CHECK_AWS_SDK("${SDK_LIBS}" RETVAL REASON)
IF(NOT RETVAL) IF(NOT RETVAL)
MESSAGE(FATAL_ERROR MESSAGE(FATAL_ERROR
"AWS C++ will not be built (${REASON}), but dependency on following components is found ${SDK_LIBS}. "AWS C++ will not be built (${REASON}), but dependency on following components is found ${SDK_LIBS}.
Use CHECK_AWS_SDK() function before trying to build with SDK components") Use CHECK_AWS_SDK() function before trying to build with SDK components")
ENDIF() ENDIF()
IF(NOT AWS_SDK_EXTERNAL_PROJECT)
# System aws sdk libraries found
RETURN()
ENDIF()
SET(all_libs
aws-crt-cpp
aws-c-event-stream
aws-c-common
aws-c-http
aws-c-mqtt
aws-c-cal
aws-c-auth
aws-c-io
aws-checksums
aws-c-s3
aws-c-sdkutils
aws-c-compression
aws-cpp-sdk-core
)
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux")
LIST(APPEND all_libs s2n)
ENDIF()
FOREACH(lib ${SDK_LIBS})
LIST(APPEND all_libs aws-cpp-sdk-${lib})
ENDFOREACH()
LIST(REMOVE_DUPLICATES all_libs)
SET(byproducts) SET(byproducts)
MAKE_DIRECTORY(${PROJECT_BINARY_DIR}/extra/aws_sdk/aws_sdk_cpp/include)
FOREACH(lib ${SDK_LIBS} core) FOREACH(lib ${all_libs})
SET(lib aws-cpp-sdk-${lib}) ADD_LIBRARY(${lib} UNKNOWN IMPORTED GLOBAL)
ADD_LIBRARY(${lib} STATIC IMPORTED GLOBAL)
ADD_DEPENDENCIES(${lib} aws_sdk_cpp) ADD_DEPENDENCIES(${lib} aws_sdk_cpp)
SET (loc "${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}") SET (loc "${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}")
@@ -28,47 +55,85 @@ FOREACH(lib ${SDK_LIBS} core)
SET(byproducts ${byproducts} BUILD_BYPRODUCTS ${loc}) SET(byproducts ${byproducts} BUILD_BYPRODUCTS ${loc})
ENDIF() ENDIF()
SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LOCATION ${loc}) SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LOCATION ${loc})
TARGET_INCLUDE_DIRECTORIES(${lib} INTERFACE ${PROJECT_BINARY_DIR}/extra/aws_sdk/aws_sdk_cpp/include)
ENDFOREACH() ENDFOREACH()
# To be compatible with older cmake, we use older version of the SDK # Define link dependencies between AWS C libs
IF(CMAKE_VERSION LESS "3.0") TARGET_LINK_LIBRARIES(aws-cpp-sdk-core INTERFACE aws-crt-cpp aws-c-event-stream aws-c-common)
SET(GIT_TAG "1.0.8") TARGET_LINK_LIBRARIES(aws-crt-cpp INTERFACE aws-c-http aws-c-mqtt aws-c-cal aws-c-auth aws-c-common aws-c-io
aws-checksums aws-c-event-stream aws-c-s3 aws-c-sdkutils)
TARGET_LINK_LIBRARIES(aws-c-event-stream INTERFACE aws-c-io aws-c-common aws-checksums)
TARGET_LINK_LIBRARIES(aws-c-http INTERFACE aws-c-io aws-c-compression aws-c-cal aws-c-common)
TARGET_LINK_LIBRARIES(aws-c-mqtt INTERFACE aws-c-http aws-c-io aws-c-common aws-c-cal aws-c-common)
TARGET_LINK_LIBRARIES(aws-c-auth INTERFACE aws-c-sdkutils aws-c-cal aws-c-http aws-c-io aws-c-common)
TARGET_LINK_LIBRARIES(aws-c-io INTERFACE aws-c-common aws-c-cal)
TARGET_LINK_LIBRARIES(aws-checksums INTERFACE aws-c-common)
TARGET_LINK_LIBRARIES(aws-c-compression INTERFACE aws-c-common)
TARGET_LINK_LIBRARIES(aws-c-s3 INTERFACE aws-c-auth aws-checksums aws-c-http aws-c-io aws-c-cal aws-c-common aws-c-sdkutils aws-c-common)
TARGET_LINK_LIBRARIES(aws-c-sdkutils INTERFACE aws-c-common)
IF(WIN32)
TARGET_LINK_LIBRARIES(aws-c-io INTERFACE bcrypt crypt32 secur32 ncrypt ws2_32)
TARGET_LINK_LIBRARIES(aws-c-common INTERFACE shlwapi)
TARGET_LINK_LIBRARIES(aws-cpp-sdk-core INTERFACE winhttp wininet version userenv)
ELSE() ELSE()
SET(GIT_TAG "1.8.29") TARGET_LINK_LIBRARIES(aws-c-cal INTERFACE OpenSSL::Crypto)
TARGET_LINK_LIBRARIES(aws-cpp-sdk-core INTERFACE ZLIB::ZLIB CURL::libcurl)
# Dependencies below are from CMakeLists.txt for aws-c-common
SET(THREADS_PREFER_PTHREAD_FLAG ON)
FIND_PACKAGE(Threads REQUIRED)
TARGET_LINK_LIBRARIES(aws-c-common INTERFACE ${CMAKE_DL_LIBS} Threads::Threads)
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux")
TARGET_LINK_LIBRARIES(aws-c-common INTERFACE s2n m rt)
ELSEIF(APPLE)
TARGET_LINK_LIBRARIES(aws-c-common INTERFACE "-framework CoreFoundation")
TARGET_LINK_LIBRARIES(aws-c-io INTERFACE "-framework Security" "-framework Network")
ENDIF()
ENDIF() ENDIF()
IF(MSVC_CRT_TYPE MATCHES "/MD") FOREACH(lib ${SDK_LIBS})
SET(FORCE_SHARED_CRT ON) TARGET_LINK_LIBRARIES(aws-cpp-sdk-${lib} INTERFACE aws-cpp-sdk-core)
ELSE() ENDFOREACH()
SET(FORCE_SHARED_CRT OFF)
ENDIF() SET(GIT_TAG "1.11.582")
LIST(REMOVE_DUPLICATES SDK_LIBS) LIST(REMOVE_DUPLICATES SDK_LIBS)
STRING( REPLACE ";" "!" SDK_LIBS_STR "${SDK_LIBS}") STRING( REPLACE ";" "!" SDK_LIBS_STR "${SDK_LIBS}")
#MESSAGE("SDK_LIBS_STR=${SDK_LIBS_STR}")
IF(MSVC)
SET(BUILD_AND_INSTALL_COMMANDS
BUILD_COMMAND ${CMAKE_COMMAND} --build <BINARY_DIR> --config RelWithDebInfo --verbose --parallel
INSTALL_COMMAND ${CMAKE_COMMAND} --install <BINARY_DIR> --config RelWithDebInfo)
SET(BUILD_TYPE -DCMAKE_BUILD_TYPE=RelWithDebInfo)
ELSE()
SET(BUILD_AND_INSTALL_COMMANDS)
SET(BUILD_TYPE)
ENDIF()
ExternalProject_Add( ExternalProject_Add(
aws_sdk_cpp aws_sdk_cpp
GIT_REPOSITORY "https://github.com/awslabs/aws-sdk-cpp.git" GIT_REPOSITORY "https://github.com/awslabs/aws-sdk-cpp.git"
GIT_TAG ${GIT_TAG} GIT_TAG ${GIT_TAG}
GIT_SHALLOW TRUE
UPDATE_COMMAND "" UPDATE_COMMAND ""
SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/aws-sdk-cpp" SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/aws-sdk-cpp"
LIST_SEPARATOR ! LIST_SEPARATOR !
${byproducts} ${byproducts}
CMAKE_ARGS CMAKE_ARGS
${BUILD_TYPE}
-DCMAKE_BUILD_TYPE=RelWithDebInfo
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DBUILD_ONLY=${SDK_LIBS_STR} -DBUILD_ONLY=${SDK_LIBS_STR}
-DBUILD_SHARED_LIBS=OFF -DBUILD_SHARED_LIBS=OFF
-DFORCE_SHARED_CRT=${FORCE_SHARED_CRT} -DFORCE_SHARED_CRT=ON
-DENABLE_TESTING=OFF -DENABLE_TESTING=OFF
"-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG} ${PIC_FLAG}" -DAWS_WARNINGS_ARE_ERRORS=OFF
"-DCMAKE_CXX_FLAGS_RELWITHDEBINFO=${CMAKE_CXX_FLAGS_RELWITHDEBINFO} ${PIC_FLAG}"
"-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE} ${PIC_FLAG}"
"-DCMAKE_CXX_FLAGS_MINSIZEREL=${CMAKE_CXX_FLAGS_MINSIZEREL} ${PIC_FLAG}"
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
${EXTRA_SDK_CMAKE_FLAGS} ${EXTRA_SDK_CMAKE_FLAGS}
-DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp -DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp
-DCMAKE_INSTALL_LIBDIR=lib -DCMAKE_INSTALL_LIBDIR=lib
TEST_COMMAND "" TEST_COMMAND ""
${BUILD_AND_INSTALL_COMMANDS}
) )
SET_TARGET_PROPERTIES(aws_sdk_cpp PROPERTIES EXCLUDE_FROM_ALL TRUE) SET_TARGET_PROPERTIES(aws_sdk_cpp PROPERTIES EXCLUDE_FROM_ALL TRUE)

View File

@@ -107,6 +107,8 @@ MYSQL_ADD_EXECUTABLE(mbstream
TARGET_LINK_LIBRARIES(mbstream TARGET_LINK_LIBRARIES(mbstream
mysys mysys
) )
TARGET_INCLUDE_DIRECTORIES(mbstream PRIVATE ${PROJECT_SOURCE_DIR}/tpool)
ADD_DEPENDENCIES(mbstream GenError) ADD_DEPENDENCIES(mbstream GenError)
IF(MSVC) IF(MSVC)

View File

@@ -1349,6 +1349,8 @@ backup_files(ds_ctxt *ds_data, const char *from)
} }
} }
} }
if (!backup_mroonga_files_from_datadir(ds_data, from))
goto out;
msg("Finished backing up non-InnoDB tables and files"); msg("Finished backing up non-InnoDB tables and files");
out: out:
datadir_iter_free(it); datadir_iter_free(it);
@@ -1515,7 +1517,9 @@ ibx_copy_incremental_over_full()
"aws-kms-key")) || "aws-kms-key")) ||
!(ret = backup_files_from_datadir(ds_data, !(ret = backup_files_from_datadir(ds_data,
xtrabackup_incremental_dir, xtrabackup_incremental_dir,
"aria_log"))) "aria_log")) ||
!(ret = backup_mroonga_files_from_datadir(ds_data,
xtrabackup_incremental_dir)))
goto cleanup; goto cleanup;
/* copy supplementary files */ /* copy supplementary files */
@@ -2078,6 +2082,47 @@ bool backup_files_from_datadir(ds_ctxt_t *ds_data,
return ret; return ret;
} }
bool backup_mroonga_files_from_datadir(ds_ctxt_t *ds_data,
const char *dir_path)
{
os_file_dir_t dir= os_file_opendir(dir_path);
if (dir == IF_WIN(INVALID_HANDLE_VALUE, nullptr)) return false;
os_file_stat_t info;
bool ret= true;
while (os_file_readdir_next_file(dir_path, dir, &info) == 0)
{
if (info.type != OS_FILE_TYPE_FILE)
continue;
const char *pname = strrchr(info.name, '/');
#ifdef _WIN32
if (const char *last = strrchr(info.name, '\\'))
{
if (!pname || last > pname)
pname = last;
}
#endif
if (!pname)
pname = info.name;
if (!strstr(pname, ".mrn"))
continue;
if (xtrabackup_prepare && xtrabackup_incremental_dir &&
file_exists(info.name))
unlink(info.name);
std::string full_path(dir_path);
full_path.append(1, '/').append(info.name);
if (!(ret = ds_data->copy_file(full_path.c_str() , info.name, 1)))
break;
}
os_file_closedir(dir);
return ret;
}
static int rocksdb_remove_checkpoint_directory() static int rocksdb_remove_checkpoint_directory()
{ {

View File

@@ -60,6 +60,8 @@ const char *trim_dotslash(const char *path);
bool backup_files_from_datadir(ds_ctxt_t *ds_data, bool backup_files_from_datadir(ds_ctxt_t *ds_data,
const char *dir_path, const char *dir_path,
const char *prefix); const char *prefix);
bool backup_mroonga_files_from_datadir(ds_ctxt_t *ds_data,
const char *dir_path);
bool is_system_table(const char *dbname, const char *tablename); bool is_system_table(const char *dbname, const char *tablename);
std::unique_ptr<std::vector<std::string>> std::unique_ptr<std::vector<std::string>>

View File

@@ -563,12 +563,36 @@ select_incremental_lsn_from_history(lsn_t *incremental_lsn)
{ {
MYSQL_RES *mysql_result; MYSQL_RES *mysql_result;
char query[1000]; char query[1000];
char buf[100]; char buf[NAME_LEN*2+3];
size_t opt_incremental_history_name_len= 0;
size_t opt_incremental_history_uuid_len= 0;
if (opt_incremental_history_name)
opt_incremental_history_name_len=
strlen(opt_incremental_history_name);
if (opt_incremental_history_uuid)
opt_incremental_history_uuid_len=
strlen(opt_incremental_history_uuid);
if (opt_incremental_history_name_len*2 > sizeof(buf))
die("Incremental history table name '%s' is too long.",
opt_incremental_history_name);
if (opt_incremental_history_uuid_len*2 > sizeof(buf))
die("Incremental history uuid '%s' is too long.",
opt_incremental_history_uuid);
if (opt_incremental_history_name && opt_incremental_history_name[0]
&& opt_incremental_history_uuid && opt_incremental_history_uuid[0])
die("It is allowed to use either --incremental-history-name "
"or --incremental-history-uuid, but not both.");
if (opt_incremental_history_name) { if (opt_incremental_history_name) {
mysql_real_escape_string(mysql_connection, buf, mysql_real_escape_string(mysql_connection, buf,
opt_incremental_history_name, opt_incremental_history_name,
(unsigned long)strlen(opt_incremental_history_name)); (unsigned long) opt_incremental_history_name_len);
snprintf(query, sizeof(query), snprintf(query, sizeof(query),
"SELECT innodb_to_lsn " "SELECT innodb_to_lsn "
"FROM " XB_HISTORY_TABLE " " "FROM " XB_HISTORY_TABLE " "
@@ -577,11 +601,10 @@ select_incremental_lsn_from_history(lsn_t *incremental_lsn)
"ORDER BY innodb_to_lsn DESC LIMIT 1", "ORDER BY innodb_to_lsn DESC LIMIT 1",
buf); buf);
} }
else if (opt_incremental_history_uuid) {
if (opt_incremental_history_uuid) {
mysql_real_escape_string(mysql_connection, buf, mysql_real_escape_string(mysql_connection, buf,
opt_incremental_history_uuid, opt_incremental_history_uuid,
(unsigned long)strlen(opt_incremental_history_uuid)); (unsigned long) opt_incremental_history_uuid_len);
snprintf(query, sizeof(query), snprintf(query, sizeof(query),
"SELECT innodb_to_lsn " "SELECT innodb_to_lsn "
"FROM " XB_HISTORY_TABLE " " "FROM " XB_HISTORY_TABLE " "
@@ -591,6 +614,8 @@ select_incremental_lsn_from_history(lsn_t *incremental_lsn)
buf); buf);
} }
/* xb_mysql_query(..,.., true) will die on error, so
mysql_result can't be nullptr */
mysql_result = xb_mysql_query(mysql_connection, query, true); mysql_result = xb_mysql_query(mysql_connection, query, true);
ut_ad(mysql_num_fields(mysql_result) == 1); ut_ad(mysql_num_fields(mysql_result) == 1);
@@ -1691,7 +1716,7 @@ write_xtrabackup_info(ds_ctxt *datasink,
char buf_end_time[100]; char buf_end_time[100];
tm tm; tm tm;
std::ostringstream oss; std::ostringstream oss;
const char *xb_stream_name[] = {"file", "tar", "xbstream"}; const char *xb_stream_name[] = {"file", "mbstream"};
uuid = read_mysql_one_value(connection, "SELECT UUID()"); uuid = read_mysql_one_value(connection, "SELECT UUID()");
server_version = read_mysql_one_value(connection, "SELECT VERSION()"); server_version = read_mysql_one_value(connection, "SELECT VERSION()");
@@ -1774,6 +1799,10 @@ write_xtrabackup_info(ds_ctxt *datasink,
goto cleanup; goto cleanup;
} }
xb_mysql_query(connection,
"ALTER TABLE IF EXISTS " XB_HISTORY_TABLE
" MODIFY format ENUM('file', 'tar', 'mbstream') DEFAULT NULL", false);
xb_mysql_query(connection, xb_mysql_query(connection,
"CREATE TABLE IF NOT EXISTS " XB_HISTORY_TABLE "(" "CREATE TABLE IF NOT EXISTS " XB_HISTORY_TABLE "("
"uuid VARCHAR(40) NOT NULL PRIMARY KEY," "uuid VARCHAR(40) NOT NULL PRIMARY KEY,"
@@ -1791,7 +1820,7 @@ write_xtrabackup_info(ds_ctxt *datasink,
"innodb_to_lsn BIGINT UNSIGNED DEFAULT NULL," "innodb_to_lsn BIGINT UNSIGNED DEFAULT NULL,"
"partial ENUM('Y', 'N') DEFAULT NULL," "partial ENUM('Y', 'N') DEFAULT NULL,"
"incremental ENUM('Y', 'N') DEFAULT NULL," "incremental ENUM('Y', 'N') DEFAULT NULL,"
"format ENUM('file', 'tar', 'xbstream') DEFAULT NULL," "format ENUM('file', 'tar', 'mbstream') DEFAULT NULL,"
"compressed ENUM('Y', 'N') DEFAULT NULL" "compressed ENUM('Y', 'N') DEFAULT NULL"
") CHARACTER SET utf8 ENGINE=innodb", false); ") CHARACTER SET utf8 ENGINE=innodb", false);
@@ -1942,7 +1971,7 @@ void
capture_tool_command(int argc, char **argv) capture_tool_command(int argc, char **argv)
{ {
/* capture tool name tool args */ /* capture tool name tool args */
tool_name = strrchr(argv[0], '/'); tool_name = strrchr(argv[0], IF_WIN('\\', '/'));
tool_name = tool_name ? tool_name + 1 : argv[0]; tool_name = tool_name ? tool_name + 1 : argv[0];
make_argv(tool_args, sizeof(tool_args), argc, argv); make_argv(tool_args, sizeof(tool_args), argc, argv);

View File

@@ -186,7 +186,6 @@ xb_fil_cur_open(
} }
#else #else
err = fstat(cursor->file.m_file, &cursor->statinfo); err = fstat(cursor->file.m_file, &cursor->statinfo);
MSAN_STAT_WORKAROUND(&cursor->statinfo);
#endif #endif
if (max_file_size < (ulonglong)cursor->statinfo.st_size) { if (max_file_size < (ulonglong)cursor->statinfo.st_size) {
cursor->statinfo.st_size = (ulonglong)max_file_size; cursor->statinfo.st_size = (ulonglong)max_file_size;

View File

@@ -377,6 +377,10 @@ static my_bool opt_check_privileges;
extern const char *innodb_checksum_algorithm_names[]; extern const char *innodb_checksum_algorithm_names[];
extern TYPELIB innodb_checksum_algorithm_typelib; extern TYPELIB innodb_checksum_algorithm_typelib;
extern TYPELIB innodb_flush_method_typelib; extern TYPELIB innodb_flush_method_typelib;
#ifdef __linux__
extern const char *innodb_linux_aio_names[];
extern TYPELIB innodb_linux_aio_typelib;
#endif
extern TYPELIB innodb_doublewrite_typelib; extern TYPELIB innodb_doublewrite_typelib;
/** Ignored option */ /** Ignored option */
static ulong innodb_flush_method; static ulong innodb_flush_method;
@@ -1147,29 +1151,63 @@ static void backup_file_op(uint32_t space_id, int type,
} }
} }
static bool check_if_fts_table(const char *file_name) { /** Check whether the spacename belongs to internal FTS table
const char *table_name_start = strrchr(file_name, '/'); @param space_name space name to be checked
@return true if it is fts table or false otherwise */
static bool check_if_fts_table(const char *space_name) {
/* There are two types of FTS internal table
1) FTS common tables (FTS_<space_id>_<fts_common_tables>
2) FTS INDEX auxiliary table (FTS_<space_id>_<index_id>_<aux_table> */
const char *table_name_start = strrchr(space_name, '/');
if (table_name_start) if (table_name_start)
++table_name_start; ++table_name_start;
else else
table_name_start = file_name; table_name_start = space_name;
if (!starts_with(table_name_start,"FTS_"))
return false;
const char *table_name_end = strrchr(table_name_start, '.'); const char *table_name_end = strrchr(table_name_start, '.');
if (!table_name_end) if (!table_name_end)
table_name_end = table_name_start + strlen(table_name_start); table_name_end =
ptrdiff_t table_name_len = table_name_end - table_name_end; table_name_start + strlen(table_name_start) - 1;
if (!starts_with(table_name_start,"FTS_"))
return false;
for (const char **suffix = fts_common_tables; *suffix; ++suffix) /* Skip FTS_ */
if (!strncmp(table_name_start, *suffix, table_name_len)) const char *table_name_suffix = strchr(table_name_start, '_');
if (!table_name_suffix ||
table_name_suffix == table_name_end) {
return false;
}
table_name_suffix++;
/* Skip <table_id>_ */
table_name_suffix = strchr(table_name_suffix, '_');
if (!table_name_suffix ||
table_name_end == table_name_suffix) {
return false;
}
table_name_suffix++;
ptrdiff_t table_name_len = table_name_end - table_name_suffix;
/* Compare only common tables */
for (const char **suffix = fts_common_tables; *suffix; ++suffix) {
if (!strncmp(table_name_suffix, *suffix, table_name_len))
return true; return true;
}
/* Skip index_id on fts table name */
table_name_suffix = strchr(table_name_suffix, '_');
if (!table_name_suffix ||
table_name_suffix == table_name_end) {
return false;
}
table_name_suffix++;
table_name_len = table_name_end - table_name_suffix;
for (size_t i = 0; fts_index_selector[i].suffix; ++i) for (size_t i = 0; fts_index_selector[i].suffix; ++i)
if (!strncmp(table_name_start, fts_index_selector[i].suffix, if (!strncmp(table_name_suffix, fts_index_selector[i].suffix,
table_name_len)) table_name_len))
return true; return true;
return false; return false;
} }
@@ -1194,7 +1232,20 @@ static void backup_file_op_fail(uint32_t space_id, int type,
msg("DDL tracking : create %" PRIu32 " \"%.*s\"", msg("DDL tracking : create %" PRIu32 " \"%.*s\"",
space_id, int(len), name); space_id, int(len), name);
fail = !check_if_skip_table(spacename.c_str()); fail = !check_if_skip_table(spacename.c_str());
error= "create"; if (!opt_no_lock && fail &&
check_if_fts_table(spacename.c_str())) {
/* Ignore the FTS internal table because InnoDB does
create intermediate table and their associative FTS
internal table when table is being rebuilt during
prepare phase. Also, backup_set_alter_copy_lock()
downgrades the MDL_BACKUP_DDL before prepare phase
of alter. This leads to the FTS internal table being
created in the late phase of backup.
mariabackup --prepare should be able to handle
this case. */
fail = false;
}
error= "create";
break; break;
case FILE_MODIFY: case FILE_MODIFY:
break; break;
@@ -1335,6 +1386,9 @@ enum options_xtrabackup
OPT_INNODB_READ_IO_THREADS, OPT_INNODB_READ_IO_THREADS,
OPT_INNODB_WRITE_IO_THREADS, OPT_INNODB_WRITE_IO_THREADS,
OPT_INNODB_USE_NATIVE_AIO, OPT_INNODB_USE_NATIVE_AIO,
#ifdef __linux__
OPT_INNODB_LINUX_AIO,
#endif
OPT_INNODB_PAGE_SIZE, OPT_INNODB_PAGE_SIZE,
OPT_INNODB_BUFFER_POOL_FILENAME, OPT_INNODB_BUFFER_POOL_FILENAME,
OPT_INNODB_LOCK_WAIT_TIMEOUT, OPT_INNODB_LOCK_WAIT_TIMEOUT,
@@ -1947,6 +2001,14 @@ struct my_option xb_server_options[] =
(G_PTR*) &srv_use_native_aio, (G_PTR*) &srv_use_native_aio,
(G_PTR*) &srv_use_native_aio, 0, GET_BOOL, NO_ARG, (G_PTR*) &srv_use_native_aio, 0, GET_BOOL, NO_ARG,
TRUE, 0, 0, 0, 0, 0}, TRUE, 0, 0, 0, 0, 0},
#ifdef __linux__
{"innodb_linux_aio", OPT_INNODB_LINUX_AIO,
"Which linux AIO implementation to use, auto (io_uring, failing to aio) or explicit",
(G_PTR*) &srv_linux_aio_method,
(G_PTR*) &srv_linux_aio_method,
&innodb_linux_aio_typelib, GET_ENUM, REQUIRED_ARG,
SRV_LINUX_AIO_AUTO, 0, 0, 0, 0, 0},
#endif
{"innodb_page_size", OPT_INNODB_PAGE_SIZE, {"innodb_page_size", OPT_INNODB_PAGE_SIZE,
"The universal page size of the database.", "The universal page size of the database.",
(G_PTR*) &innobase_page_size, (G_PTR*) &innobase_page_size, 0, (G_PTR*) &innobase_page_size, (G_PTR*) &innobase_page_size, 0,
@@ -2517,26 +2579,7 @@ static bool innodb_init_param()
ut_ad(DATA_MYSQL_BINARY_CHARSET_COLL == my_charset_bin.number); ut_ad(DATA_MYSQL_BINARY_CHARSET_COLL == my_charset_bin.number);
#ifdef _WIN32 srv_use_native_aio= tpool::supports_native_aio();
srv_use_native_aio = TRUE;
#elif defined(LINUX_NATIVE_AIO)
if (srv_use_native_aio) {
msg("InnoDB: Using Linux native AIO");
}
#elif defined(HAVE_URING)
if (srv_use_native_aio) {
msg("InnoDB: Using liburing");
}
#else
/* Currently native AIO is supported only on windows and linux
and that also when the support is compiled in. In all other
cases, we ignore the setting of innodb_use_native_aio. */
srv_use_native_aio = FALSE;
#endif
/* Assign the default value to srv_undo_dir if it's not specified, as /* Assign the default value to srv_undo_dir if it's not specified, as
my_getopt does not support default values for string options. We also my_getopt does not support default values for string options. We also
@@ -2571,9 +2614,6 @@ static bool innodb_init_param()
} }
} }
#ifdef _WIN32
srv_use_native_aio = TRUE;
#endif
return false; return false;
error: error:
@@ -2581,6 +2621,7 @@ error:
return true; return true;
} }
alignas(8)
static byte log_hdr_buf[log_t::START_OFFSET + SIZE_OF_FILE_CHECKPOINT]; static byte log_hdr_buf[log_t::START_OFFSET + SIZE_OF_FILE_CHECKPOINT];
/** Initialize an InnoDB log file header in log_hdr_buf[] */ /** Initialize an InnoDB log file header in log_hdr_buf[] */
@@ -4182,7 +4223,6 @@ next_file:
return(-1); return(-1);
} }
MSAN_STAT_WORKAROUND(&statinfo);
info->size = statinfo.st_size; info->size = statinfo.st_size;
if (S_ISDIR(statinfo.st_mode)) { if (S_ISDIR(statinfo.st_mode)) {
@@ -4544,7 +4584,7 @@ xb_register_filter_entry(
databases_hash->cell_get(my_crc32c(0, name, p - name)) databases_hash->cell_get(my_crc32c(0, name, p - name))
->search(&xb_filter_entry_t::name_hash, ->search(&xb_filter_entry_t::name_hash,
[dbname](xb_filter_entry_t* f) [dbname](xb_filter_entry_t* f)
{ return f && !strcmp(f->name, dbname); }); { return !f || !strcmp(f->name, dbname); });
if (!*prev) { if (!*prev) {
(*prev = xb_new_filter_entry(dbname)) (*prev = xb_new_filter_entry(dbname))
->has_tables = TRUE; ->has_tables = TRUE;
@@ -4678,7 +4718,7 @@ xb_load_list_file(
FILE* fp; FILE* fp;
/* read and store the filenames */ /* read and store the filenames */
fp = fopen(filename, "r"); fp = fopen(filename, "rt");
if (!fp) { if (!fp) {
die("Can't open %s", die("Can't open %s",
filename); filename);
@@ -5090,7 +5130,7 @@ class BackupStages {
bool stage_start(Backup_datasinks &backup_datasinks, bool stage_start(Backup_datasinks &backup_datasinks,
CorruptedPages &corrupted_pages) { CorruptedPages &corrupted_pages) {
msg("BACKUP STAGE START"); msg("Starting BACKUP STAGE START");
if (!opt_no_lock) { if (!opt_no_lock) {
if (opt_safe_slave_backup) { if (opt_safe_slave_backup) {
if (!wait_for_safe_slave(mysql_connection)) { if (!wait_for_safe_slave(mysql_connection)) {
@@ -5104,6 +5144,7 @@ class BackupStages {
msg("Error on BACKUP STAGE START query execution"); msg("Error on BACKUP STAGE START query execution");
return(false); return(false);
} }
msg("Acquired locks for BACKUP STAGE START");
} }
InnodbDataCopier innodb_data_copier(backup_datasinks, InnodbDataCopier innodb_data_copier(backup_datasinks,
@@ -5134,14 +5175,18 @@ class BackupStages {
DBUG_MARIABACKUP_EVENT_LOCK("after_aria_background", {}); DBUG_MARIABACKUP_EVENT_LOCK("after_aria_background", {});
msg("Finished BACKUP STAGE START");
return true; return true;
} }
bool stage_flush() { bool stage_flush() {
msg("BACKUP STAGE FLUSH"); msg("Starting BACKUP STAGE FLUSH");
if (!opt_no_lock && !lock_for_backup_stage_flush(m_bs_con)) { if (!opt_no_lock) {
msg("Error on BACKUP STAGE FLUSH query execution"); if (!lock_for_backup_stage_flush(m_bs_con)) {
return false; msg("Error on BACKUP STAGE FLUSH query execution");
return false;
}
msg("Acquired locks for BACKUP STAGE FLUSH");
} }
auto tables_in_use = get_tables_in_use(mysql_connection); auto tables_in_use = get_tables_in_use(mysql_connection);
// Copy non-stats-log non-in-use tables of non-InnoDB-Aria-RocksDB engines // Copy non-stats-log non-in-use tables of non-InnoDB-Aria-RocksDB engines
@@ -5189,17 +5234,20 @@ class BackupStages {
xb_mysql_query(mysql_connection, xb_mysql_query(mysql_connection,
"SET debug_sync='now WAIT_FOR copy_started'", false, true); "SET debug_sync='now WAIT_FOR copy_started'", false, true);
); );
msg("Finished BACKUP STAGE FLUSH");
return true; return true;
} }
bool stage_block_ddl(Backup_datasinks &backup_datasinks, bool stage_block_ddl(Backup_datasinks &backup_datasinks,
CorruptedPages &corrupted_pages) { CorruptedPages &corrupted_pages) {
msg("Started BACKUP STAGE BLOCK_DDL");
if (!opt_no_lock) { if (!opt_no_lock) {
if (!lock_for_backup_stage_block_ddl(m_bs_con)) { if (!lock_for_backup_stage_block_ddl(m_bs_con)) {
msg("BACKUP STAGE BLOCK_DDL"); msg("Error on BACKUP STAGE BLOCK_DDL "
"query execution");
return false; return false;
} }
msg("Acquired locks for BACKUP STAGE BLOCK_DDL");
if (have_galera_enabled) if (have_galera_enabled)
{ {
xb_mysql_query(mysql_connection, "SET SESSION wsrep_sync_wait=0", false); xb_mysql_query(mysql_connection, "SET SESSION wsrep_sync_wait=0", false);
@@ -5261,14 +5309,18 @@ class BackupStages {
DBUG_MARIABACKUP_EVENT_LOCK("after_stage_block_ddl", {}); DBUG_MARIABACKUP_EVENT_LOCK("after_stage_block_ddl", {});
msg("Finished BACKUP STAGE BLOCK_DDL");
return true; return true;
} }
bool stage_block_commit(Backup_datasinks &backup_datasinks) { bool stage_block_commit(Backup_datasinks &backup_datasinks) {
msg("BACKUP STAGE BLOCK_COMMIT"); msg("Starting BACKUP STAGE BLOCK_COMMIT");
if (!opt_no_lock && !lock_for_backup_stage_commit(m_bs_con)) { if (!opt_no_lock) {
msg("Error on BACKUP STAGE BLOCK_COMMIT query execution"); if (!lock_for_backup_stage_commit(m_bs_con)) {
return false; msg("Error on BACKUP STAGE BLOCK_COMMIT query execution");
return false;
}
msg("Acquired locks for BACKUP STAGE BLOCK_COMMIT");
} }
// Copy log tables tail // Copy log tables tail
@@ -5368,11 +5420,13 @@ class BackupStages {
"FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS", false); "FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS", false);
} }
return backup_datasinks.backup_low(); bool res= backup_datasinks.backup_low();
msg("Finishing BACKUP STAGE BLOCK_COMMIT");
return res;
} }
bool stage_end(Backup_datasinks &backup_datasinks) { bool stage_end(Backup_datasinks &backup_datasinks) {
msg("BACKUP STAGE END"); msg("Starting BACKUP STAGE END");
/* release all locks */ /* release all locks */
if (!opt_no_lock) { if (!opt_no_lock) {
unlock_all(m_bs_con); unlock_all(m_bs_con);
@@ -5495,7 +5549,6 @@ fail:
xb_fil_io_init(); xb_fil_io_init();
if (os_aio_init()) { if (os_aio_init()) {
msg("Error: cannot initialize AIO subsystem");
goto fail; goto fail;
} }
@@ -5543,10 +5596,6 @@ fail:
backup_datasinks.init(); backup_datasinks.init();
if (!select_history()) {
goto fail;
}
/* open the log file */ /* open the log file */
memset(&stat_info, 0, sizeof(MY_STAT)); memset(&stat_info, 0, sizeof(MY_STAT));
dst_log_file = ds_open(backup_datasinks.m_redo, LOG_FILE_NAME, &stat_info); dst_log_file = ds_open(backup_datasinks.m_redo, LOG_FILE_NAME, &stat_info);
@@ -5561,6 +5610,11 @@ fail:
if (innodb_log_checkpoint_now != false) { if (innodb_log_checkpoint_now != false) {
mysql_read_query_result(mysql_connection); mysql_read_query_result(mysql_connection);
} }
if (!select_history()) {
goto fail;
}
/* label it */ /* label it */
recv_sys.file_checkpoint = log_sys.next_checkpoint_lsn; recv_sys.file_checkpoint = log_sys.next_checkpoint_lsn;
log_hdr_init(); log_hdr_init();

View File

@@ -138,6 +138,8 @@ if(MSVC)
remove_definitions(-DHAVE_CONFIG_H) remove_definitions(-DHAVE_CONFIG_H)
target_compile_definitions(wolfssl PRIVATE target_compile_definitions(wolfssl PRIVATE
WOLFSSL_HAVE_MIN WOLFSSL_HAVE_MAX) WOLFSSL_HAVE_MIN WOLFSSL_HAVE_MAX)
# Workaround https://github.com/wolfSSL/wolfssl/issues/9004
target_compile_definitions(wolfssl PRIVATE WOLFSSL_NO_SOCK SOCKET_INVALID=-1)
endif() endif()
set_property(TARGET wolfssl PROPERTY POSITION_INDEPENDENT_CODE ON) set_property(TARGET wolfssl PROPERTY POSITION_INDEPENDENT_CODE ON)

View File

@@ -44,7 +44,7 @@
#define SD_LISTEN_FDS_START (0) #define SD_LISTEN_FDS_START (0)
#define sd_notify(X, Y) #define sd_notify(X, Y)
#define sd_notifyf(E, F, ...) #define sd_notifyf(E, F, ...)
#ifdef _WIN32 #if defined (_WIN32) && !defined(EMBEDDED_LIBRARY)
#define service_manager_extend_timeout(I, F, ...) \ #define service_manager_extend_timeout(I, F, ...) \
mysqld_win_extend_service_timeout(I) mysqld_win_extend_service_timeout(I)
#else #else

View File

@@ -37,11 +37,6 @@
# define MEM_GET_VBITS(a,b,len) __msan_copy_shadow(b,a,len) # define MEM_GET_VBITS(a,b,len) __msan_copy_shadow(b,a,len)
# define MEM_SET_VBITS(a,b,len) __msan_copy_shadow(a,b,len) # define MEM_SET_VBITS(a,b,len) __msan_copy_shadow(a,b,len)
# define REDZONE_SIZE 8 # define REDZONE_SIZE 8
# ifdef __linux__
# define MSAN_STAT_WORKAROUND(st) MEM_MAKE_DEFINED(st, sizeof(*st))
# else
# define MSAN_STAT_WORKAROUND(st) ((void) 0)
# endif
#elif defined(HAVE_VALGRIND_MEMCHECK_H) && defined(HAVE_valgrind) #elif defined(HAVE_VALGRIND_MEMCHECK_H) && defined(HAVE_valgrind)
# include <valgrind/memcheck.h> # include <valgrind/memcheck.h>
# define HAVE_MEM_CHECK # define HAVE_MEM_CHECK
@@ -54,7 +49,6 @@
# define MEM_GET_VBITS(a,b,len) VALGRIND_GET_VBITS(a,b,len) # define MEM_GET_VBITS(a,b,len) VALGRIND_GET_VBITS(a,b,len)
# define MEM_SET_VBITS(a,b,len) VALGRIND_SET_VBITS(a,b,len) # define MEM_SET_VBITS(a,b,len) VALGRIND_SET_VBITS(a,b,len)
# define REDZONE_SIZE 8 # define REDZONE_SIZE 8
# define MSAN_STAT_WORKAROUND(st) ((void) 0)
#elif defined(__SANITIZE_ADDRESS__) && (!defined(_MSC_VER) || defined (__clang__)) #elif defined(__SANITIZE_ADDRESS__) && (!defined(_MSC_VER) || defined (__clang__))
# include <sanitizer/asan_interface.h> # include <sanitizer/asan_interface.h>
/* How to do manual poisoning: /* How to do manual poisoning:
@@ -68,7 +62,6 @@ https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning */
# define MEM_CHECK_DEFINED(a,len) ((void) 0) # define MEM_CHECK_DEFINED(a,len) ((void) 0)
# define MEM_GET_VBITS(a,b,len) ((void) 0) # define MEM_GET_VBITS(a,b,len) ((void) 0)
# define MEM_SET_VBITS(a,b,len) ((void) 0) # define MEM_SET_VBITS(a,b,len) ((void) 0)
# define MSAN_STAT_WORKAROUND(st) ((void) 0)
# define REDZONE_SIZE 8 # define REDZONE_SIZE 8
#else #else
# define MEM_UNDEFINED(a,len) ((void) 0) # define MEM_UNDEFINED(a,len) ((void) 0)
@@ -80,7 +73,6 @@ https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning */
# define MEM_GET_VBITS(a,b,len) ((void) 0) # define MEM_GET_VBITS(a,b,len) ((void) 0)
# define MEM_SET_VBITS(a,b,len) ((void) 0) # define MEM_SET_VBITS(a,b,len) ((void) 0)
# define REDZONE_SIZE 0 # define REDZONE_SIZE 0
# define MSAN_STAT_WORKAROUND(st) ((void) 0)
#endif /* __has_feature(memory_sanitizer) */ #endif /* __has_feature(memory_sanitizer) */
#ifdef TRASH_FREED_MEMORY #ifdef TRASH_FREED_MEMORY

View File

@@ -55,6 +55,10 @@
#ifdef HAVE_WOLFSSL #ifdef HAVE_WOLFSSL
#undef ERR_remove_state #undef ERR_remove_state
#define ERR_remove_state(x) do {} while(0) #define ERR_remove_state(x) do {} while(0)
#undef SSL_get_cipher
#define SSL_get_cipher(ssl) (SSL_version(ssl) == TLS1_3_VERSION ? wolfSSL_get_cipher(ssl) : wolfSSL_get_cipher_name(ssl))
#undef SSL_get_cipher_list
#define SSL_get_cipher_list(ctx,i) wolfSSL_get_cipher_list(i)
#elif defined (HAVE_ERR_remove_thread_state) #elif defined (HAVE_ERR_remove_thread_state)
#define ERR_remove_state(X) ERR_remove_thread_state(NULL) #define ERR_remove_state(X) ERR_remove_thread_state(NULL)
#endif /* HAVE_ERR_remove_thread_state */ #endif /* HAVE_ERR_remove_thread_state */

View File

@@ -21,7 +21,6 @@ INCLUDE_DIRECTORIES(
${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/include
${CMAKE_SOURCE_DIR}/libmysqld ${CMAKE_SOURCE_DIR}/libmysqld
${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/tpool
${CMAKE_BINARY_DIR}/sql ${CMAKE_BINARY_DIR}/sql
${PCRE_INCLUDE_DIRS} ${PCRE_INCLUDE_DIRS}
${LIBFMT_INCLUDE_DIR} ${LIBFMT_INCLUDE_DIR}

View File

@@ -3099,6 +3099,42 @@ alter table t1 drop constraint t1_fk_t2_id, drop t2_id, drop t2_id;
ERROR 42000: Can't DROP COLUMN `t2_id`; check that it exists ERROR 42000: Can't DROP COLUMN `t2_id`; check that it exists
drop table t1, t2; drop table t1, t2;
# #
# MDEV-29001 DROP DEFAULT makes SHOW CREATE non-idempotent
#
SET @save_sql_mode=@@sql_mode;
SET sql_mode=strict_all_tables;
create table t1 (
a int,
b int default 0,
c int not null,
d int not null default 1);
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT 0,
`c` int(11) NOT NULL,
`d` int(11) NOT NULL DEFAULT 1
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
alter table t1
alter a drop default,
alter b drop default,
alter c drop default,
alter d drop default;
SHOW create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL,
`c` int(11) NOT NULL,
`d` int(11) NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
insert t1 values (default, default, default, default);
ERROR HY000: Field 'c' doesn't have a default value
insert t1 values (default, default, 0, 0);
drop table t1;
set sql_mode= @save_sql_mode;
#
# End of 10.6 tests # End of 10.6 tests
# #
# #

View File

@@ -2403,6 +2403,30 @@ create table t1(id int primary key, t2_id int, constraint t1_fk_t2_id foreign ke
alter table t1 drop constraint t1_fk_t2_id, drop t2_id, drop t2_id; alter table t1 drop constraint t1_fk_t2_id, drop t2_id, drop t2_id;
drop table t1, t2; drop table t1, t2;
--echo #
--echo # MDEV-29001 DROP DEFAULT makes SHOW CREATE non-idempotent
--echo #
SET @save_sql_mode=@@sql_mode;
SET sql_mode=strict_all_tables;
create table t1 (
a int,
b int default 0,
c int not null,
d int not null default 1);
show create table t1;
alter table t1
alter a drop default,
alter b drop default,
alter c drop default,
alter d drop default;
SHOW create table t1;
--error ER_NO_DEFAULT_FOR_FIELD
insert t1 values (default, default, default, default);
insert t1 values (default, default, 0, 0);
drop table t1;
set sql_mode= @save_sql_mode;
--echo # --echo #
--echo # End of 10.6 tests --echo # End of 10.6 tests
--echo # --echo #

View File

@@ -1,3 +1,6 @@
#
# MDEV-16110 ALTER with ALGORITHM=INPLACE breaks temporary table with virtual columns
#
create table t (a int, v int as (a)) engine=innodb; create table t (a int, v int as (a)) engine=innodb;
alter table t change column a b tinyint, algorithm=inplace; alter table t change column a b tinyint, algorithm=inplace;
ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
@@ -28,8 +31,13 @@ t2 CREATE TEMPORARY TABLE `t2` (
`v` int(11) GENERATED ALWAYS AS (`a`) VIRTUAL `v` int(11) GENERATED ALWAYS AS (`a`) VIRTUAL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
drop temporary table t1, t2; drop temporary table t1, t2;
unlock tables;
#
# MDEV-18083 ASAN heap-use-after-free in Field::set_warning_truncated_wrong_value upon inserting into temporary table
#
create temporary table t1 (a int); create temporary table t1 (a int);
alter table t1 add column f text; alter table t1 add column f text;
insert into t1 values ('x','foo'); insert into t1 values ('x','foo');
ERROR 22007: Incorrect integer value: 'x' for column `test`.`t1`.`a` at row 1 ERROR 22007: Incorrect integer value: 'x' for column `test`.`t1`.`a` at row 1
drop temporary table t1; drop temporary table t1;
# End of 10.2 tests

View File

@@ -1,8 +1,8 @@
--source include/have_innodb.inc --source include/have_innodb.inc
# --echo #
# MDEV-16110 ALTER with ALGORITHM=INPLACE breaks temporary table with virtual columns --echo # MDEV-16110 ALTER with ALGORITHM=INPLACE breaks temporary table with virtual columns
# --echo #
create table t (a int, v int as (a)) engine=innodb; create table t (a int, v int as (a)) engine=innodb;
--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON --error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
alter table t change column a b tinyint, algorithm=inplace; alter table t change column a b tinyint, algorithm=inplace;
@@ -20,12 +20,15 @@ lock table t2 write;
alter table t2 change column a b int, algorithm=inplace; alter table t2 change column a b int, algorithm=inplace;
show create table t2; show create table t2;
drop temporary table t1, t2; drop temporary table t1, t2;
unlock tables;
# --echo #
# MDEV-18083 ASAN heap-use-after-free in Field::set_warning_truncated_wrong_value upon inserting into temporary table --echo # MDEV-18083 ASAN heap-use-after-free in Field::set_warning_truncated_wrong_value upon inserting into temporary table
# --echo #
create temporary table t1 (a int); create temporary table t1 (a int);
alter table t1 add column f text; alter table t1 add column f text;
--error ER_TRUNCATED_WRONG_VALUE_FOR_FIELD --error ER_TRUNCATED_WRONG_VALUE_FOR_FIELD
insert into t1 values ('x','foo'); insert into t1 values ('x','foo');
drop temporary table t1; drop temporary table t1;
--echo # End of 10.2 tests

View File

@@ -60,3 +60,22 @@ drop table t1;
# #
# End of 10.11 tests # End of 10.11 tests
# #
#
# MDEV-35611 Assertion failure in Diagnostics_area::sql_errno upon interrupted ALTER
#
CREATE TABLE t (a INT) ENGINE=MyISAM;
INSERT INTO t VALUES (1);
LOCK TABLE t READ;
connection con1;
SET max_statement_time=0.001;
ALTER TABLE t FORCE;
ERROR 70100: Query execution was interrupted (max_statement_time exceeded)
ALTER TABLE IF EXISTS t FORCE;
ERROR 70100: Query execution was interrupted (max_statement_time exceeded)
disconnect con1;
connection default;
UNLOCK TABLES;
DROP TABLE t;
#
# End of 11.4 tests
#

View File

@@ -78,3 +78,25 @@ drop table t1;
--echo # --echo #
--echo # End of 10.11 tests --echo # End of 10.11 tests
--echo # --echo #
--echo #
--echo # MDEV-35611 Assertion failure in Diagnostics_area::sql_errno upon interrupted ALTER
--echo #
CREATE TABLE t (a INT) ENGINE=MyISAM;
INSERT INTO t VALUES (1);
LOCK TABLE t READ;
--connection con1
SET max_statement_time=0.001;
--error ER_STATEMENT_TIMEOUT
ALTER TABLE t FORCE;
--error ER_STATEMENT_TIMEOUT
ALTER TABLE IF EXISTS t FORCE;
# Cleanup
--disconnect con1
--connection default
UNLOCK TABLES;
DROP TABLE t;
--echo #
--echo # End of 11.4 tests
--echo #

View File

@@ -10,6 +10,7 @@ a
1 1
2 2
drop table t1; drop table t1;
FLUSH SLOW LOGS;
# explain: id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra # explain: id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
# explain: 1 INSERT t1 ALL NULL NULL NULL NULL NULL NULL 100.00 100.00 NULL # explain: 1 INSERT t1 ALL NULL NULL NULL NULL NULL NULL 100.00 100.00 NULL
# explain: id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra # explain: id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra

View File

@@ -26,6 +26,7 @@ select * from t1 where a<3;
--enable_cursor_protocol --enable_cursor_protocol
drop table t1; drop table t1;
let SLOW_LOG_FILE= `select @@slow_query_log_file`; let SLOW_LOG_FILE= `select @@slow_query_log_file`;
FLUSH SLOW LOGS;
# select @@slow_query_log_file; # select @@slow_query_log_file;

View File

@@ -1,4 +1,3 @@
drop table if exists t1, t2;
select CASE "b" when "a" then 1 when "b" then 2 END as exp; select CASE "b" when "a" then 1 when "b" then 2 END as exp;
exp exp
2 2
@@ -165,7 +164,7 @@ t1 CREATE TABLE `t1` (
`COALESCE(1,1.0)` decimal(2,1) NOT NULL, `COALESCE(1,1.0)` decimal(2,1) NOT NULL,
`COALESCE(1,'1')` varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci NOT NULL, `COALESCE(1,'1')` varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci NOT NULL,
`COALESCE(1.1,'1')` varchar(4) CHARACTER SET latin1 COLLATE latin1_swedish_ci NOT NULL, `COALESCE(1.1,'1')` varchar(4) CHARACTER SET latin1 COLLATE latin1_swedish_ci NOT NULL,
`COALESCE('a' COLLATE latin1_bin,'b')` varchar(1) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL `COALESCE('a' COLLATE latin1_bin,'b')` varchar(1) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
DROP TABLE t1; DROP TABLE t1;
CREATE TABLE t1 SELECT IFNULL('a' COLLATE latin1_swedish_ci, 'b' COLLATE latin1_bin); CREATE TABLE t1 SELECT IFNULL('a' COLLATE latin1_swedish_ci, 'b' COLLATE latin1_bin);
@@ -572,6 +571,22 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
Warnings: Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where case `test`.`t1`.`a` when `test`.`t1`.`b` then 1 end = 1 and case when `test`.`t1`.`a` then `test`.`t1`.`b` else 1 end = 3 Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where case `test`.`t1`.`a` when `test`.`t1`.`b` then 1 end = 1 and case when `test`.`t1`.`a` then `test`.`t1`.`b` else 1 end = 3
DROP TABLE t1; DROP TABLE t1;
#
# End of 10.3 test # End of 10.3 test
# #
# MDEV-25415 CASE function handles NULL inconsistently
#
select case 'X' when null then 1 when 'X' then 2 else 3 end;
case 'X' when null then 1 when 'X' then 2 else 3 end
2
select case 'X' when 1/1 then 1 when 'X' then 2 else 3 end;
case 'X' when 1/1 then 1 when 'X' then 2 else 3 end
2
Warnings:
Warning 1292 Truncated incorrect DOUBLE value: 'X'
select case 'X' when 1/0 then 1 when 'X' then 2 else 3 end;
case 'X' when 1/0 then 1 when 'X' then 2 else 3 end
2
Warnings:
Warning 1292 Truncated incorrect DOUBLE value: 'X'
Warning 1365 Division by 0
# End of 10.11 test

View File

@@ -2,11 +2,6 @@
# Testing of CASE # Testing of CASE
# #
--disable_warnings
drop table if exists t1, t2;
--enable_warnings
select CASE "b" when "a" then 1 when "b" then 2 END as exp; select CASE "b" when "a" then 1 when "b" then 2 END as exp;
select CASE "c" when "a" then 1 when "b" then 2 END as exp; select CASE "c" when "a" then 1 when "b" then 2 END as exp;
select CASE "c" when "a" then 1 when "b" then 2 ELSE 3 END as exp; select CASE "c" when "a" then 1 when "b" then 2 ELSE 3 END as exp;
@@ -412,7 +407,13 @@ SELECT * FROM t1 WHERE
DROP TABLE t1; DROP TABLE t1;
--echo # End of 10.3 test
--echo # --echo #
--echo # End of 10.3 test --echo # MDEV-25415 CASE function handles NULL inconsistently
--echo # --echo #
select case 'X' when null then 1 when 'X' then 2 else 3 end;
select case 'X' when 1/1 then 1 when 'X' then 2 else 3 end;
select case 'X' when 1/0 then 1 when 'X' then 2 else 3 end;
--echo # End of 10.11 test

View File

@@ -317,9 +317,63 @@ insert t1 (a) values (1);
insert t1 (a) values (-1); insert t1 (a) values (-1);
ERROR 23000: CONSTRAINT `t1.b` failed for `test`.`t1` ERROR 23000: CONSTRAINT `t1.b` failed for `test`.`t1`
drop table t1; drop table t1;
#
# End of 10.4 tests # End of 10.4 tests
# #
# MDEV-36662 CHECK constraint does not repeat in case of error
#
create table t1 (d date check (d > 2020-01-01));
insert into t1 values ('2023-12-05');
ERROR 22007: Truncated incorrect datetime value: '2018'
INSERT into t1 values ('2024-12-05');
ERROR 22007: Truncated incorrect datetime value: '2018'
create or replace table t1 (d time check (d > "a"));
insert into t1 values ('22:30');
ERROR 22007: Truncated incorrect time value: 'a'
insert into t1 values ('23:30');
ERROR 22007: Truncated incorrect time value: 'a'
create or replace table t1 (d datetime check (d > "a"));
insert into t1 values ('2023-12-05');
ERROR 22007: Truncated incorrect datetime value: 'a'
insert into t1 values ('2024-12-05');
ERROR 22007: Truncated incorrect datetime value: 'a'
create or replace table t1 (d timestamp check (d > "a"));
insert into t1 values ('2023-12-05');
ERROR 22007: Truncated incorrect datetime value: 'a'
insert into t1 values ('2024-12-05');
ERROR 22007: Truncated incorrect datetime value: 'a'
create or replace table t1 (d year check (d > "a"));
insert into t1 values ('2023');
ERROR 22007: Truncated incorrect DECIMAL value: 'a'
insert into t1 values ('2024');
ERROR 22007: Truncated incorrect DECIMAL value: 'a'
create or replace table t1 (d int check (d > "a"));
insert into t1 values (0);
ERROR 22007: Truncated incorrect DECIMAL value: 'a'
insert into t1 values (1);
ERROR 22007: Truncated incorrect DECIMAL value: 'a'
create or replace table t1 (d real check (d > "a"));
insert into t1 values (0.1);
ERROR 22007: Truncated incorrect DOUBLE value: 'a'
insert into t1 values (1.1);
ERROR 22007: Truncated incorrect DOUBLE value: 'a'
create or replace table t1 (d decimal check (d > "a"));
insert into t1 values (0);
ERROR 22007: Truncated incorrect DECIMAL value: 'a'
insert into t1 values (1);
ERROR 22007: Truncated incorrect DECIMAL value: 'a'
create or replace table t1 (d bool check (d != "a"));
insert into t1 values (0);
ERROR 22007: Truncated incorrect DECIMAL value: 'a'
insert into t1 values (1);
ERROR 22007: Truncated incorrect DECIMAL value: 'a'
drop table t1;
create or replace table t1 (d varchar(30) check (d != 1));
insert into t1 values ("a");
ERROR 22007: Truncated incorrect DECIMAL value: 'a'
insert into t1 values ("b");
ERROR 22007: Truncated incorrect DECIMAL value: 'b'
drop table t1;
# End of 10.11 tests
# #
# MDEV-32439 INSERT IGNORE VALUES (one row) errors on constraint # MDEV-32439 INSERT IGNORE VALUES (one row) errors on constraint
# #
@@ -376,6 +430,4 @@ SELECT * FROM t1;
v1 v2 v1 v2
1 2 1 2
DROP TABLE t1; DROP TABLE t1;
#
# End of 11.4 tests # End of 11.4 tests
#

View File

@@ -244,9 +244,65 @@ insert t1 (a) values (1);
insert t1 (a) values (-1); insert t1 (a) values (-1);
drop table t1; drop table t1;
--echo #
--echo # End of 10.4 tests --echo # End of 10.4 tests
--echo # --echo #
--echo # MDEV-36662 CHECK constraint does not repeat in case of error
--echo #
create table t1 (d date check (d > 2020-01-01));
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values ('2023-12-05');
--error ER_TRUNCATED_WRONG_VALUE
INSERT into t1 values ('2024-12-05');
create or replace table t1 (d time check (d > "a"));
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values ('22:30');
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values ('23:30');
create or replace table t1 (d datetime check (d > "a"));
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values ('2023-12-05');
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values ('2024-12-05');
create or replace table t1 (d timestamp check (d > "a"));
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values ('2023-12-05');
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values ('2024-12-05');
create or replace table t1 (d year check (d > "a"));
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values ('2023');
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values ('2024');
create or replace table t1 (d int check (d > "a"));
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values (0);
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values (1);
create or replace table t1 (d real check (d > "a"));
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values (0.1);
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values (1.1);
create or replace table t1 (d decimal check (d > "a"));
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values (0);
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values (1);
create or replace table t1 (d bool check (d != "a"));
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values (0);
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values (1);
drop table t1;
create or replace table t1 (d varchar(30) check (d != 1));
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values ("a");
--error ER_TRUNCATED_WRONG_VALUE
insert into t1 values ("b");
drop table t1;
--echo # End of 10.11 tests
--echo # --echo #
--echo # MDEV-32439 INSERT IGNORE VALUES (one row) errors on constraint --echo # MDEV-32439 INSERT IGNORE VALUES (one row) errors on constraint
@@ -276,6 +332,4 @@ SHOW WARNINGS;
SELECT * FROM t1; SELECT * FROM t1;
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # End of 11.4 tests --echo # End of 11.4 tests
--echo #

View File

@@ -3018,4 +3018,60 @@ SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1;
f f
nc,mmmmmmmmmmd nc,mmmmmmmmmmd
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-24726 Assertion `0' failed in Field_varstring_compressed::key_cmp
#
# VARCHAR
create table t1 (a varchar(8) compressed) character set utf8mb4;
create algorithm=temptable view v1 as select * from t1;
insert into t1 values ('foo'),('bar'),('foo');
select * from v1 where a in (select a from t1);
a
foo
foo
bar
drop view v1;
drop table t1;
create table t1 (f1 varchar(8)) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t1 values ('');
create table t2 (f2 varchar(8) compressed) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t2 values ('a'),('b');
select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1;
f1
drop table t1, t2;
# BLOB
create table t1 (a text compressed) character set utf8mb4;
create algorithm=temptable view v1 as select * from t1;
insert into t1 values ('foo'),('bar'),('foo');
select * from v1 where a in (select a from t1);
a
foo
foo
bar
drop view v1;
drop table t1;
create table t1 (f1 text) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t1 values ('');
create table t2 (f2 text compressed) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t2 values ('a'),('b');
select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1;
f1
drop table t1, t2;
#
# MDEV-16808 Assertion on compressed blob as key field
#
set join_cache_level= 3;
create table t1 (col_blob text) engine=innodb;
create table t2 (col_blob text compressed) engine=innodb;
select * from t1 join t2 using ( col_blob );
col_blob
drop tables t1, t2;
create table t (a text compressed,b text) engine=innodb;
create table t4 like t;
set session join_cache_level=3;
select * from (select * from t) as t natural join (select * from t) as t1;
a b
drop tables t, t4;
# End of 10.5 tests # End of 10.5 tests

View File

@@ -524,4 +524,57 @@ INSERT INTO t1 VALUES ('c','n'),('d','mmmmmmmmmm');
SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1; SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1;
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # MDEV-24726 Assertion `0' failed in Field_varstring_compressed::key_cmp
--echo #
--echo # VARCHAR
create table t1 (a varchar(8) compressed) character set utf8mb4;
create algorithm=temptable view v1 as select * from t1;
insert into t1 values ('foo'),('bar'),('foo');
select * from v1 where a in (select a from t1);
# cleanup
drop view v1;
drop table t1;
create table t1 (f1 varchar(8)) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t1 values ('');
create table t2 (f2 varchar(8) compressed) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t2 values ('a'),('b');
select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1;
# cleanup
drop table t1, t2;
--echo # BLOB
create table t1 (a text compressed) character set utf8mb4;
create algorithm=temptable view v1 as select * from t1;
insert into t1 values ('foo'),('bar'),('foo');
select * from v1 where a in (select a from t1);
# cleanup
drop view v1;
drop table t1;
create table t1 (f1 text) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t1 values ('');
create table t2 (f2 text compressed) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t2 values ('a'),('b');
select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1;
# cleanup
drop table t1, t2;
--echo #
--echo # MDEV-16808 Assertion on compressed blob as key field
--echo #
set join_cache_level= 3;
create table t1 (col_blob text) engine=innodb;
create table t2 (col_blob text compressed) engine=innodb;
select * from t1 join t2 using ( col_blob );
drop tables t1, t2;
create table t (a text compressed,b text) engine=innodb;
create table t4 like t;
set session join_cache_level=3;
select * from (select * from t) as t natural join (select * from t) as t1;
drop tables t, t4;
--echo # End of 10.5 tests --echo # End of 10.5 tests

View File

@@ -127,7 +127,6 @@ ALTER TABLE t1 MODIFY COLUMN a VARCHAR(1000);
ALTER TABLE t1 ALTER COLUMN a DROP DEFAULT; ALTER TABLE t1 ALTER COLUMN a DROP DEFAULT;
INSERT INTO t1 VALUES (REPEAT('b',100),11); INSERT INTO t1 VALUES (REPEAT('b',100),11);
INSERT INTO t1 VALUES (default,10); INSERT INTO t1 VALUES (default,10);
ERROR HY000: Field 'a' doesn't have a default value
ALTER TABLE t1 MODIFY COLUMN a VARCHAR(1000) COMPRESSED; ALTER TABLE t1 MODIFY COLUMN a VARCHAR(1000) COMPRESSED;
SHOW CREATE TABLE t1; SHOW CREATE TABLE t1;
Table Create Table Table Create Table
@@ -139,6 +138,7 @@ t1 CREATE TABLE `t1` (
(PARTITION `p0` VALUES LESS THAN (100,'sss') ENGINE = MyISAM) (PARTITION `p0` VALUES LESS THAN (100,'sss') ENGINE = MyISAM)
SELECT * from t1 ORDER BY id; SELECT * from t1 ORDER BY id;
a id a id
NULL 10
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 11 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 11
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 23 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 23
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 24 zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 24

View File

@@ -72,7 +72,6 @@ ALTER TABLE t1 ALTER COLUMN a DROP DEFAULT;
ALTER TABLE t1 MODIFY COLUMN a VARCHAR(1000); ALTER TABLE t1 MODIFY COLUMN a VARCHAR(1000);
ALTER TABLE t1 ALTER COLUMN a DROP DEFAULT; ALTER TABLE t1 ALTER COLUMN a DROP DEFAULT;
INSERT INTO t1 VALUES (REPEAT('b',100),11); INSERT INTO t1 VALUES (REPEAT('b',100),11);
--error ER_NO_DEFAULT_FOR_FIELD
INSERT INTO t1 VALUES (default,10); INSERT INTO t1 VALUES (default,10);
ALTER TABLE t1 MODIFY COLUMN a VARCHAR(1000) COMPRESSED; ALTER TABLE t1 MODIFY COLUMN a VARCHAR(1000) COMPRESSED;

View File

@@ -2621,7 +2621,7 @@ GROUP_CONCAT(CASE WHEN a THEN a ELSE '' END)
1234567 1234567
SELECT COALESCE(a,'') FROM t1 GROUP BY 1; SELECT COALESCE(a,'') FROM t1 GROUP BY 1;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def COALESCE(a,'') 253 9 7 Y 128 39 63 def COALESCE(a,'') 253 9 7 N 129 39 63
COALESCE(a,'') COALESCE(a,'')
1234567 1234567
# All columns must be VARCHAR(9) with the same length: # All columns must be VARCHAR(9) with the same length:
@@ -2640,7 +2640,7 @@ t2 CREATE TABLE `t2` (
`IFNULL(a,'')` varbinary(9) NOT NULL, `IFNULL(a,'')` varbinary(9) NOT NULL,
`IF(a,a,'')` varbinary(9) DEFAULT NULL, `IF(a,a,'')` varbinary(9) DEFAULT NULL,
`CASE WHEN a THEN a ELSE '' END` varbinary(9) DEFAULT NULL, `CASE WHEN a THEN a ELSE '' END` varbinary(9) DEFAULT NULL,
`COALESCE(a,'')` varbinary(9) DEFAULT NULL `COALESCE(a,'')` varbinary(9) NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
DROP TABLE t2; DROP TABLE t2;
CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1; CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1;

View File

@@ -3033,7 +3033,7 @@ GROUP_CONCAT(CASE WHEN a THEN a ELSE '' END)
1234567 1234567
SELECT COALESCE(a,'') FROM t1 GROUP BY 1; SELECT COALESCE(a,'') FROM t1 GROUP BY 1;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def COALESCE(a,'') 253 9 7 Y 0 39 51 def COALESCE(a,'') 253 9 7 N 1 39 51
COALESCE(a,'') COALESCE(a,'')
1234567 1234567
# All columns must be VARCHAR(9) with the same length: # All columns must be VARCHAR(9) with the same length:
@@ -3052,7 +3052,7 @@ t2 CREATE TABLE `t2` (
`IFNULL(a,'')` varchar(9) CHARACTER SET cp1251 COLLATE cp1251_general_ci NOT NULL, `IFNULL(a,'')` varchar(9) CHARACTER SET cp1251 COLLATE cp1251_general_ci NOT NULL,
`IF(a,a,'')` varchar(9) CHARACTER SET cp1251 COLLATE cp1251_general_ci DEFAULT NULL, `IF(a,a,'')` varchar(9) CHARACTER SET cp1251 COLLATE cp1251_general_ci DEFAULT NULL,
`CASE WHEN a THEN a ELSE '' END` varchar(9) CHARACTER SET cp1251 COLLATE cp1251_general_ci DEFAULT NULL, `CASE WHEN a THEN a ELSE '' END` varchar(9) CHARACTER SET cp1251 COLLATE cp1251_general_ci DEFAULT NULL,
`COALESCE(a,'')` varchar(9) CHARACTER SET cp1251 COLLATE cp1251_general_ci DEFAULT NULL `COALESCE(a,'')` varchar(9) CHARACTER SET cp1251 COLLATE cp1251_general_ci NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
DROP TABLE t2; DROP TABLE t2;
CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1; CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1;

View File

@@ -3342,7 +3342,7 @@ GROUP_CONCAT(CASE WHEN a THEN a ELSE '' END)
1234567 1234567
SELECT COALESCE(a,'') FROM t1 GROUP BY 1; SELECT COALESCE(a,'') FROM t1 GROUP BY 1;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def COALESCE(a,'') 253 9 7 Y 0 39 8 def COALESCE(a,'') 253 9 7 N 1 39 8
COALESCE(a,'') COALESCE(a,'')
1234567 1234567
# All columns must be VARCHAR(9) with the same length: # All columns must be VARCHAR(9) with the same length:
@@ -3361,7 +3361,7 @@ t2 CREATE TABLE `t2` (
`IFNULL(a,'')` varchar(9) CHARACTER SET latin1 COLLATE latin1_swedish_ci NOT NULL, `IFNULL(a,'')` varchar(9) CHARACTER SET latin1 COLLATE latin1_swedish_ci NOT NULL,
`IF(a,a,'')` varchar(9) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL, `IF(a,a,'')` varchar(9) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL,
`CASE WHEN a THEN a ELSE '' END` varchar(9) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL, `CASE WHEN a THEN a ELSE '' END` varchar(9) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL,
`COALESCE(a,'')` varchar(9) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL `COALESCE(a,'')` varchar(9) CHARACTER SET latin1 COLLATE latin1_swedish_ci NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
DROP TABLE t2; DROP TABLE t2;
CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1; CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1;

View File

@@ -4224,7 +4224,7 @@ GROUP_CONCAT(CASE WHEN a THEN a ELSE '' END)
1234567 1234567
SELECT COALESCE(a,'') FROM t1 GROUP BY 1; SELECT COALESCE(a,'') FROM t1 GROUP BY 1;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def COALESCE(a,'') 253 9 7 Y 0 39 8 def COALESCE(a,'') 253 9 7 N 1 39 8
COALESCE(a,'') COALESCE(a,'')
1234567 1234567
# All columns must be VARCHAR(9) with the same length: # All columns must be VARCHAR(9) with the same length:
@@ -4243,7 +4243,7 @@ t2 CREATE TABLE `t2` (
`IFNULL(a,'')` varchar(9) CHARACTER SET ucs2 COLLATE ucs2_general_ci NOT NULL, `IFNULL(a,'')` varchar(9) CHARACTER SET ucs2 COLLATE ucs2_general_ci NOT NULL,
`IF(a,a,'')` varchar(9) CHARACTER SET ucs2 COLLATE ucs2_general_ci DEFAULT NULL, `IF(a,a,'')` varchar(9) CHARACTER SET ucs2 COLLATE ucs2_general_ci DEFAULT NULL,
`CASE WHEN a THEN a ELSE '' END` varchar(9) CHARACTER SET ucs2 COLLATE ucs2_general_ci DEFAULT NULL, `CASE WHEN a THEN a ELSE '' END` varchar(9) CHARACTER SET ucs2 COLLATE ucs2_general_ci DEFAULT NULL,
`COALESCE(a,'')` varchar(9) CHARACTER SET ucs2 COLLATE ucs2_general_ci DEFAULT NULL `COALESCE(a,'')` varchar(9) CHARACTER SET ucs2 COLLATE ucs2_general_ci NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
DROP TABLE t2; DROP TABLE t2;
CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1; CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1;

View File

@@ -4970,7 +4970,7 @@ GROUP_CONCAT(CASE WHEN a THEN a ELSE '' END)
1234567 1234567
SELECT COALESCE(a,'') FROM t1 GROUP BY 1; SELECT COALESCE(a,'') FROM t1 GROUP BY 1;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def COALESCE(a,'') 253 27 7 Y 0 39 33 def COALESCE(a,'') 253 27 7 N 1 39 33
COALESCE(a,'') COALESCE(a,'')
1234567 1234567
# All columns must be VARCHAR(9) with the same length: # All columns must be VARCHAR(9) with the same length:
@@ -4989,7 +4989,7 @@ t2 CREATE TABLE `t2` (
`IFNULL(a,'')` varchar(9) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL, `IFNULL(a,'')` varchar(9) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL,
`IF(a,a,'')` varchar(9) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL, `IF(a,a,'')` varchar(9) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL,
`CASE WHEN a THEN a ELSE '' END` varchar(9) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL, `CASE WHEN a THEN a ELSE '' END` varchar(9) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL,
`COALESCE(a,'')` varchar(9) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL `COALESCE(a,'')` varchar(9) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
DROP TABLE t2; DROP TABLE t2;
CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1; CREATE TABLE t2 AS SELECT CONCAT_WS(1,2,3) FROM t1;

View File

@@ -320,3 +320,30 @@ SHOW VARIABLES LIKE 'DEBUG_SYNC';
Variable_name Value Variable_name Value
debug_sync ON - current signals: 's2,s7,s1,s5' debug_sync ON - current signals: 's2,s7,s1,s5'
SET DEBUG_SYNC= 'RESET'; SET DEBUG_SYNC= 'RESET';
#
# MDEV-30364 Assertion MDL_EXCLUSIVE on DISCARD TABLESPACE in LOCK TABLE mode
#
create table t (c int) engine=innodb;
connect con1,localhost,root;
set debug_sync='get_schema_column WAIT_FOR go';
select column_name from information_schema.columns
where table_schema='test' and table_name='t';
connection default;
lock table t write;
alter table t discard tablespace;
connect con2,localhost,root;
disconnect con2;
connection default;
ERROR 70100: Query execution was interrupted
set debug_sync='now SIGNAL go';
connection con1;
column_name
c
disconnect con1;
connection default;
unlock tables;
drop table t;
set debug_sync= 'reset';
#
# End of 10.6 tests
#

View File

@@ -18,6 +18,7 @@
# We need the Debug Sync Facility. # We need the Debug Sync Facility.
# #
--source include/have_debug_sync.inc --source include/have_debug_sync.inc
--source include/have_innodb.inc
# #
# We are checking privileges, which the embedded server cannot do. # We are checking privileges, which the embedded server cannot do.
@@ -448,3 +449,42 @@ SHOW VARIABLES LIKE 'DEBUG_SYNC';
# #
SET DEBUG_SYNC= 'RESET'; SET DEBUG_SYNC= 'RESET';
--echo #
--echo # MDEV-30364 Assertion MDL_EXCLUSIVE on DISCARD TABLESPACE in LOCK TABLE mode
--echo #
create table t (c int) engine=innodb;
--connect con1,localhost,root
set debug_sync='get_schema_column WAIT_FOR go';
send select column_name from information_schema.columns
where table_schema='test' and table_name='t';
--connection default
let $wait_condition=select 1 from information_schema.processlist where state like 'debug sync point%';
--source include/wait_condition.inc
let $connid=`select connection_id()`;
lock table t write;
send alter table t discard tablespace;
--connect con2,localhost,root
--disable_query_log
--eval kill query $connid
--enable_query_log
--disconnect con2
--connection default
--error ER_QUERY_INTERRUPTED
reap;
set debug_sync='now SIGNAL go';
--connection con1
reap;
--disconnect con1
--connection default
unlock tables;
drop table t;
set debug_sync= 'reset';
--echo #
--echo # End of 10.6 tests
--echo #

View File

@@ -3433,10 +3433,8 @@ DEFAULT(a) CASE a WHEN 0 THEN 1 ELSE 2 END
NULL 2 NULL 2
DROP TABLE t; DROP TABLE t;
DROP VIEW v; DROP VIEW v;
#
# End of 10.2 test # End of 10.2 test
# #
#
# MDEV-22703 DEFAULT() on a BLOB column can overwrite the default # MDEV-22703 DEFAULT() on a BLOB column can overwrite the default
# record, which can cause crashes when accessing already released # record, which can cause crashes when accessing already released
# memory. # memory.
@@ -3451,10 +3449,8 @@ length(DEFAULT(h))
25 25
INSERT INTO t1 () VALUES (); INSERT INTO t1 () VALUES ();
drop table t1; drop table t1;
#
# End of 10.3 test # End of 10.3 test
# #
#
# MDEV-26423: MariaDB server crash in Create_tmp_table::finalize # MDEV-26423: MariaDB server crash in Create_tmp_table::finalize
# #
CREATE TABLE t1 (pk text DEFAULT length(uuid())); CREATE TABLE t1 (pk text DEFAULT length(uuid()));
@@ -3484,7 +3480,15 @@ column_name column_default has_default is_nullable
a NULL 1 YES a NULL 1 YES
drop view v1; drop view v1;
drop table t1; drop table t1;
#
# End of 10.4 test # End of 10.4 test
# #
# MDEV-37320 ASAN errors in Field::is_null / Item_param::assign_default
#
create table t1 (f01 timestamp, f03 timestamp);
insert into t1 () values ();
create trigger tr before insert on t1 for each row set @a=1;
prepare stmt from "update t1 set f03 = ?";
execute stmt using default;
drop table t1;
# End of 10.6 test
ALTER DATABASE test CHARACTER SET utf8mb4 COLLATE utf8mb4_uca1400_ai_ci; ALTER DATABASE test CHARACTER SET utf8mb4 COLLATE utf8mb4_uca1400_ai_ci;

View File

@@ -2140,9 +2140,8 @@ CREATE ALGORITHM=TEMPTABLE VIEW v AS SELECT * FROM t;
SELECT DISTINCT DEFAULT(a), CASE a WHEN 0 THEN 1 ELSE 2 END FROM v GROUP BY a WITH ROLLUP; SELECT DISTINCT DEFAULT(a), CASE a WHEN 0 THEN 1 ELSE 2 END FROM v GROUP BY a WITH ROLLUP;
DROP TABLE t; DROP TABLE t;
DROP VIEW v; DROP VIEW v;
--echo #
--echo # End of 10.2 test --echo # End of 10.2 test
--echo #
--echo # --echo #
--echo # MDEV-22703 DEFAULT() on a BLOB column can overwrite the default --echo # MDEV-22703 DEFAULT() on a BLOB column can overwrite the default
@@ -2160,9 +2159,7 @@ SELECT length(DEFAULT(h)) FROM t1;
INSERT INTO t1 () VALUES (); INSERT INTO t1 () VALUES ();
drop table t1; drop table t1;
--echo #
--echo # End of 10.3 test --echo # End of 10.3 test
--echo #
--echo # --echo #
--echo # MDEV-26423: MariaDB server crash in Create_tmp_table::finalize --echo # MDEV-26423: MariaDB server crash in Create_tmp_table::finalize
@@ -2186,9 +2183,18 @@ select column_name, column_default, column_default is not null as 'has_default',
drop view v1; drop view v1;
drop table t1; drop table t1;
--echo #
--echo # End of 10.4 test --echo # End of 10.4 test
--echo #
--echo #
--echo # MDEV-37320 ASAN errors in Field::is_null / Item_param::assign_default
--echo #
create table t1 (f01 timestamp, f03 timestamp);
insert into t1 () values ();
create trigger tr before insert on t1 for each row set @a=1;
prepare stmt from "update t1 set f03 = ?";
execute stmt using default;
drop table t1;
--echo # End of 10.6 test
--source include/test_db_charset_restore.inc --source include/test_db_charset_restore.inc

View File

@@ -1204,6 +1204,7 @@ drop table t1,t2,t3;
--echo # Tests from the bug report --echo # Tests from the bug report
--disable_view_protocol
CREATE TABLE t (pk INT PRIMARY KEY); CREATE TABLE t (pk INT PRIMARY KEY);
INSERT INTO t VALUES (1), (2), (3); INSERT INTO t VALUES (1), (2), (3);
@@ -1258,6 +1259,7 @@ SHOW CREATE VIEW v1;
DROP VIEW v_t, v1; DROP VIEW v_t, v1;
DROP TABLE t; DROP TABLE t;
--enable_view_protocol
--echo # Tests on views created using SELECT statements that contain derived columns --echo # Tests on views created using SELECT statements that contain derived columns

View File

@@ -22727,8 +22727,8 @@ where dt.a=t1.a and t3.a < 3
from t1 limit 5; from t1 limit 5;
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 1000 1 PRIMARY t1 ALL NULL NULL NULL NULL 1000
2 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 2 Using where
2 DEPENDENT SUBQUERY <derived3> ref key0 key0 5 test.t1.a 1 2 DEPENDENT SUBQUERY <derived3> ref key0 key0 5 test.t1.a 1
2 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
3 LATERAL DERIVED t2 ref a a 5 test.t1.a 10 3 LATERAL DERIVED t2 ref a a 5 test.t1.a 10
select select
a, a,

View File

@@ -969,4 +969,311 @@ cnt
6 6
DROP TABLE t1; DROP TABLE t1;
# End of 10.4 tests # End of 10.4 tests
#
# MDEV-30711: Crash in add_keyuses_for_splitting() when joining with a derived table
#
create table t1 (a int);
insert into t1 values (1),(2);
create table t2 (a int, index(a));
insert into t2 values (1),(3);
create view v1 as
select
nullif(tbl2.COL1,123) as COL10
from
t1 left join
(select 1 as COL1, a from t2) tbl2 on t1.a=tbl2.a;
create table t10 (grp_id int, a int, index(grp_id));
insert into t10 select A.seq, B.seq from seq_1_to_100 A, seq_1_to_100 B;
analyze table t10;
Table Op Msg_type Msg_text
test.t10 analyze status Engine-independent statistics collected
test.t10 analyze status Table is already up to date
explain
select * from
v1,
(select grp_id, count(*) from t10 group by grp_id) T
where
T.grp_id=v1.COL10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
1 PRIMARY t2 ref a a 5 test.t1.a 1 Using where; Using index
1 PRIMARY <derived2> ref key0 key0 5 func 10 Using where
2 DERIVED t10 index grp_id grp_id 5 NULL 10000 Using index; Using temporary; Using filesort
drop table t1,t2, t10;
drop view v1;
# End of 10.11 tests
#
# MDEV-37057 Wrong result with LATERAL DERIVED
#
CREATE TABLE t1 (
a int NOT NULL,
b int default null,
amount decimal DEFAULT NULL,
KEY t1_IDX (a,b) USING BTREE
) ENGINE=INNODB;
CREATE TABLE t2 (
a int NOT NULL,
b int default null,
name varchar(50) DEFAULT NULL,
KEY t2_IDX (a,b) USING BTREE
) ENGINE=INNODB;
INSERT INTO t1 VALUES
(1, NULL, 10.0000), (2, 2, 20.0000), (3, 3, 30.0000), (4, 4, 40.0000),
(5, 5, NULL), (6, 6, NULL), (7, 7, 70.0000), (8, 8, 80.0000);
INSERT INTO t2 VALUES
(1, NULL, 'A'), (2,2, 'B'), (3,3, 'C'), (4,4, 'D'), (5,5, NULL), (6,6, NULL),
(7,7, 'E'), (8,8, 'F'), (9,9, 'G'), (10,10,'H'), (11,11, NULL), (12,12, NULL);
# Must use Split-Materialized:
explain $query;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 range t2_IDX t2_IDX 4 NULL 1 Using index condition
1 PRIMARY <derived2> ref key0 key0 10 test.t2.a,test.t2.b 1 Using where
2 LATERAL DERIVED t1 ref t1_IDX t1_IDX 9 test.t2.a,test.t2.b 1 Using index condition
$query;
a b name total_amt
1 NULL A 10
# Compare with correct result:
set statement optimizer_switch='split_materialized=off' for $query;
a b name total_amt
1 NULL A 10
DROP TABLE t1,t2;
#
# MDEV-37230 Incorrect handling of NULL join conditions when using
# split-materialized
#
create table t1
(
a int not null,
b int,
c int,
d int,
amount decimal,
key t1_ix1 (a,b)
) engine=innodb;
insert into t1 values (0, NULL, 0, NULL, 10.0000), (1, 1, 1, 1, 10.0000),
(2, 2, 2, 2, 20.0000), (3, 3, 3, 3, 30.0000), (4, 4, 4, 4, 40.0000),
(5, 5, 5, 5, NULL), (6, 6, 6, 6, NULL), (7, 7, 7, 7, 70.0000),
(8, 8, 8, 8, 80.0000);
create table t2
(
a int NOT NULL,
b int,
name varchar(50),
key t2_ix1 (a,b)
) engine=innodb;
insert into t2 values (0, NULL, 'a'), (1, NULL, 'A'), (2, 2, 'B'), (3,3, 'C'),
(4,4, 'D'), (5,5, NULL), (6,6, NULL), (7,7, 'E'), (8,8, 'F'), (9,9, 'G'),
(10,10,'H'), (11,11, NULL), (12,12, NULL);
create table t3
(
a int not null,
b int,
description varchar(50),
key t3_ix1 (a,b)
);
insert into t3 values (1, 1, 'bar'),(2,2,'buz'),(0,NULL, 'gold');
insert into t3 select seq, seq, 'junk' from seq_3_to_13;
analyze table t1, t2, t3;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
set optimizer_switch='default';
set statement optimizer_switch='split_materialized=on' for explain format=json select * from t1
join t2 on t1.a = t2.a and t1.b <=> t2.b
join
(
select a, b, description from t3 group by a, b
) dt on dt.a = t1.a and dt.b <=> t1.b and dt.b <=> t2.b
where dt.a < 1;
EXPLAIN
{
"query_block": {
"select_id": 1,
"cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "range",
"possible_keys": ["t1_ix1"],
"key": "t1_ix1",
"key_length": "4",
"used_key_parts": ["a"],
"loops": 1,
"rows": 1,
"cost": "REPLACED",
"filtered": 100,
"index_condition": "t1.a < 1"
}
},
{
"table": {
"table_name": "t2",
"access_type": "ref",
"possible_keys": ["t2_ix1"],
"key": "t2_ix1",
"key_length": "9",
"used_key_parts": ["a", "b"],
"ref": ["test.t1.a", "test.t1.b"],
"loops": 1,
"rows": 1,
"cost": "REPLACED",
"filtered": 100,
"index_condition": "t1.b <=> t2.b"
}
},
{
"table": {
"table_name": "<derived3>",
"access_type": "ref",
"possible_keys": ["key0"],
"key": "key0",
"key_length": "9",
"used_key_parts": ["a", "b"],
"ref": ["test.t1.a", "test.t1.b"],
"loops": 1,
"rows": 1,
"cost": "REPLACED",
"filtered": 100,
"attached_condition": "dt.b <=> t1.b and dt.b <=> t2.b",
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 3,
"cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ref",
"possible_keys": ["t3_ix1"],
"key": "t3_ix1",
"key_length": "9",
"used_key_parts": ["a", "b"],
"ref": ["test.t1.a", "test.t1.b"],
"loops": 1,
"rows": 1,
"cost": "REPLACED",
"filtered": 100,
"index_condition": "t3.a < 1 and t3.b <=> t1.b and t3.b <=> t2.b and t3.a = t2.a"
}
}
]
}
}
}
}
]
}
}
set statement optimizer_switch='split_materialized=on' for select * from t1
join t2 on t1.a = t2.a and t1.b <=> t2.b
join
(
select a, b, description from t3 group by a, b
) dt on dt.a = t1.a and dt.b <=> t1.b and dt.b <=> t2.b
where dt.a < 1;
a b c d amount a b name a b description
0 NULL 0 NULL 10 0 NULL a 0 NULL gold
set statement optimizer_switch='split_materialized=off' for explain format=json select * from t1
join t2 on t1.a = t2.a and t1.b <=> t2.b
join
(
select a, b, description from t3 group by a, b
) dt on dt.a = t1.a and dt.b <=> t1.b and dt.b <=> t2.b
where dt.a < 1;
EXPLAIN
{
"query_block": {
"select_id": 1,
"cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "range",
"possible_keys": ["t1_ix1"],
"key": "t1_ix1",
"key_length": "4",
"used_key_parts": ["a"],
"loops": 1,
"rows": 1,
"cost": "REPLACED",
"filtered": 100,
"index_condition": "t1.a < 1"
}
},
{
"table": {
"table_name": "t2",
"access_type": "ref",
"possible_keys": ["t2_ix1"],
"key": "t2_ix1",
"key_length": "9",
"used_key_parts": ["a", "b"],
"ref": ["test.t1.a", "test.t1.b"],
"loops": 1,
"rows": 1,
"cost": "REPLACED",
"filtered": 100,
"index_condition": "t1.b <=> t2.b"
}
},
{
"table": {
"table_name": "<derived3>",
"access_type": "ref",
"possible_keys": ["key0"],
"key": "key0",
"key_length": "9",
"used_key_parts": ["a", "b"],
"ref": ["test.t1.a", "test.t1.b"],
"loops": 1,
"rows": 1,
"cost": "REPLACED",
"filtered": 100,
"attached_condition": "dt.b <=> t1.b and dt.b <=> t2.b",
"materialized": {
"query_block": {
"select_id": 3,
"cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "range",
"possible_keys": ["t3_ix1"],
"key": "t3_ix1",
"key_length": "4",
"used_key_parts": ["a"],
"loops": 1,
"rows": 1,
"cost": "REPLACED",
"filtered": 100,
"index_condition": "t3.a < 1"
}
}
]
}
}
}
}
]
}
}
set statement optimizer_switch='split_materialized=off' for select * from t1
join t2 on t1.a = t2.a and t1.b <=> t2.b
join
(
select a, b, description from t3 group by a, b
) dt on dt.a = t1.a and dt.b <=> t1.b and dt.b <=> t2.b
where dt.a < 1;
a b c d amount a b name a b description
0 NULL 0 NULL 10 0 NULL a 0 NULL gold
drop table t1, t2, t3;
# End of 11.4 tests
SET GLOBAL innodb_stats_persistent=@save_innodb_stats_persistent; SET GLOBAL innodb_stats_persistent=@save_innodb_stats_persistent;

View File

@@ -561,4 +561,141 @@ DROP TABLE t1;
--echo # End of 10.4 tests --echo # End of 10.4 tests
--echo #
--echo # MDEV-30711: Crash in add_keyuses_for_splitting() when joining with a derived table
--echo #
create table t1 (a int);
insert into t1 values (1),(2);
create table t2 (a int, index(a));
insert into t2 values (1),(3);
create view v1 as
select
nullif(tbl2.COL1,123) as COL10
from
t1 left join
(select 1 as COL1, a from t2) tbl2 on t1.a=tbl2.a;
create table t10 (grp_id int, a int, index(grp_id));
insert into t10 select A.seq, B.seq from seq_1_to_100 A, seq_1_to_100 B;
analyze table t10;
explain
select * from
v1,
(select grp_id, count(*) from t10 group by grp_id) T
where
T.grp_id=v1.COL10;
drop table t1,t2, t10;
drop view v1;
--echo # End of 10.11 tests
--echo #
--echo # MDEV-37057 Wrong result with LATERAL DERIVED
--echo #
CREATE TABLE t1 (
a int NOT NULL,
b int default null,
amount decimal DEFAULT NULL,
KEY t1_IDX (a,b) USING BTREE
) ENGINE=INNODB;
CREATE TABLE t2 (
a int NOT NULL,
b int default null,
name varchar(50) DEFAULT NULL,
KEY t2_IDX (a,b) USING BTREE
) ENGINE=INNODB;
INSERT INTO t1 VALUES
(1, NULL, 10.0000), (2, 2, 20.0000), (3, 3, 30.0000), (4, 4, 40.0000),
(5, 5, NULL), (6, 6, NULL), (7, 7, 70.0000), (8, 8, 80.0000);
INSERT INTO t2 VALUES
(1, NULL, 'A'), (2,2, 'B'), (3,3, 'C'), (4,4, 'D'), (5,5, NULL), (6,6, NULL),
(7,7, 'E'), (8,8, 'F'), (9,9, 'G'), (10,10,'H'), (11,11, NULL), (12,12, NULL);
let $query=
SELECT t2.a,t2.b,t2.name,t.total_amt FROM t2
LEFT JOIN
(
SELECT a, b, sum(amount) total_amt FROM t1 GROUP BY a, b
) AS t ON t2.a=t.a and t2.b<=>t.b
WHERE t2.a < 2;
--echo # Must use Split-Materialized:
evalp explain $query;
evalp $query;
--echo # Compare with correct result:
evalp set statement optimizer_switch='split_materialized=off' for $query;
DROP TABLE t1,t2;
--echo #
--echo # MDEV-37230 Incorrect handling of NULL join conditions when using
--echo # split-materialized
--echo #
create table t1
(
a int not null,
b int,
c int,
d int,
amount decimal,
key t1_ix1 (a,b)
) engine=innodb;
insert into t1 values (0, NULL, 0, NULL, 10.0000), (1, 1, 1, 1, 10.0000),
(2, 2, 2, 2, 20.0000), (3, 3, 3, 3, 30.0000), (4, 4, 4, 4, 40.0000),
(5, 5, 5, 5, NULL), (6, 6, 6, 6, NULL), (7, 7, 7, 7, 70.0000),
(8, 8, 8, 8, 80.0000);
create table t2
(
a int NOT NULL,
b int,
name varchar(50),
key t2_ix1 (a,b)
) engine=innodb;
insert into t2 values (0, NULL, 'a'), (1, NULL, 'A'), (2, 2, 'B'), (3,3, 'C'),
(4,4, 'D'), (5,5, NULL), (6,6, NULL), (7,7, 'E'), (8,8, 'F'), (9,9, 'G'),
(10,10,'H'), (11,11, NULL), (12,12, NULL);
create table t3
(
a int not null,
b int,
description varchar(50),
key t3_ix1 (a,b)
);
insert into t3 values (1, 1, 'bar'),(2,2,'buz'),(0,NULL, 'gold');
insert into t3 select seq, seq, 'junk' from seq_3_to_13;
let $q=
select * from t1
join t2 on t1.a = t2.a and t1.b <=> t2.b
join
(
select a, b, description from t3 group by a, b
) dt on dt.a = t1.a and dt.b <=> t1.b and dt.b <=> t2.b
where dt.a < 1;
analyze table t1, t2, t3;
set optimizer_switch='default';
--source include/analyze-format.inc
eval set statement optimizer_switch='split_materialized=on' for explain format=json $q;
eval set statement optimizer_switch='split_materialized=on' for $q;
--source include/analyze-format.inc
eval set statement optimizer_switch='split_materialized=off' for explain format=json $q;
eval set statement optimizer_switch='split_materialized=off' for $q;
drop table t1, t2, t3;
--echo # End of 11.4 tests
SET GLOBAL innodb_stats_persistent=@save_innodb_stats_persistent; SET GLOBAL innodb_stats_persistent=@save_innodb_stats_persistent;

View File

@@ -113,13 +113,13 @@ min(a)
2 2
explain select max(200 - a) from t1; explain select max(200 - a) from t1;
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL a 5 NULL 100 Using index 1 SIMPLE t1 index NULL a 5 NULL # Using index
select max(200 - a) from t1; select max(200 - a) from t1;
max(200 - a) max(200 - a)
198 198
explain select min(200 - a) from t1; explain select min(200 - a) from t1;
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL a 5 NULL 100 Using index 1 SIMPLE t1 index NULL a 5 NULL # Using index
select min(200 - a) from t1; select min(200 - a) from t1;
min(200 - a) min(200 - a)
0 0
@@ -174,6 +174,4 @@ SELECT MAX(a) FROM t1 WHERE a <= 0.6789;
MAX(a) MAX(a)
0.6789 0.6789
drop table t1; drop table t1;
#
# end of test 11.4 # end of test 11.4
#

View File

@@ -102,13 +102,13 @@ eval $query;
# double reversion # double reversion
let $query= let $query=
select max(200 - a) from t1; select max(200 - a) from t1;
replace_column 9 100; replace_column 9 #;
eval explain $query; eval explain $query;
eval $query; eval $query;
let $query= let $query=
select min(200 - a) from t1; select min(200 - a) from t1;
replace_column 9 100; replace_column 9 #;
eval explain $query; eval explain $query;
eval $query; eval $query;
@@ -162,6 +162,5 @@ eval $query;
# Cleanup # Cleanup
drop table t1; drop table t1;
--echo #
--echo # end of test 11.4 --echo # end of test 11.4
--echo #

View File

@@ -3430,7 +3430,7 @@ CASE WHEN TRUE THEN COALESCE(NULL) ELSE 4 END
NULL NULL
SELECT COALESCE(COALESCE(NULL), 1.1) AS c0, IF(0, COALESCE(NULL), 1.1) AS c1; SELECT COALESCE(COALESCE(NULL), 1.1) AS c0, IF(0, COALESCE(NULL), 1.1) AS c1;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def c0 246 4 3 Y 32896 1 63 def c0 246 4 3 N 32897 1 63
def c1 246 4 3 Y 32896 1 63 def c1 246 4 3 Y 32896 1 63
c0 c1 c0 c1
1.1 1.1 1.1 1.1
@@ -3795,8 +3795,8 @@ FROM t1;
SHOW CREATE TABLE t2; SHOW CREATE TABLE t2;
Table Create Table Table Create Table
t2 CREATE TABLE `t2` ( t2 CREATE TABLE `t2` (
`f0` decimal(1,0) DEFAULT NULL, `f0` decimal(1,0) NOT NULL,
`f1` decimal(1,0) DEFAULT NULL `f1` decimal(1,0) NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
DROP TABLE t1, t2; DROP TABLE t1, t2;
SET sql_mode=DEFAULT; SET sql_mode=DEFAULT;
@@ -4366,3 +4366,146 @@ LEAST( CAST( 0 AS CHAR ), OLD_PASSWORD( 1 ) )
# #
# End of 10.6 tests # End of 10.6 tests
# #
#
# MDEV-36581: COALESCE() returns nullable column while IFNULL() does not
#
CREATE OR REPLACE VIEW test_coalesce_vs_ifnull AS
SELECT
COALESCE(operation_date, '1970-01-01 00:00:00') AS coalesced_date,
IFNULL(operation_date, '1970-01-01 00:00:00') AS ifnull_date
FROM (
SELECT NULL AS operation_date
) AS t;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME = 'test_coalesce_vs_ifnull';
COLUMN_NAME IS_NULLABLE COLUMN_TYPE
coalesced_date NO varchar(19)
ifnull_date NO varchar(19)
DROP VIEW test_coalesce_vs_ifnull;
CREATE VIEW v2 as SELECT COALESCE(c, NULL) AS c_col, IFNULL(c, 10) AS i_col FROM (SELECT NULL AS c) AS t;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v2';
COLUMN_NAME IS_NULLABLE COLUMN_TYPE
c_col YES binary(0)
i_col NO varchar(2)
DROP VIEW v2;
CREATE VIEW v3 as SELECT COALESCE(c, 10, NULL) AS c_col, IFNULL(c, 10) AS i_col FROM (SELECT NULL AS c) AS t;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v3';
COLUMN_NAME IS_NULLABLE COLUMN_TYPE
c_col NO varchar(2)
i_col NO varchar(2)
DROP VIEW v3;
CREATE VIEW v4 AS SELECT COALESCE(c, NULL, NULL) as c_col FROM (SELECT NULL AS c) AS t;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v4';
COLUMN_NAME IS_NULLABLE COLUMN_TYPE
c_col YES binary(0)
DROP VIEW v4;
CREATE VIEW v5 AS SELECT COALESCE(c, COALESCE(NULL, 10), NULL) as c_col FROM (SELECT NULL AS c) AS t;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v5';
COLUMN_NAME IS_NULLABLE COLUMN_TYPE
c_col NO varchar(2)
DROP VIEW v5;
CREATE TABLE t (c1 INT, c2 DOUBLE, c3 VARCHAR(5), c4 DATE);
INSERT INTO t values (1, 2.3, 'four', '2025-05-06');
SELECT COALESCE(c1, 10) AS coalesced_c1, IFNULL(c1, 10) AS ifnull_c1 FROM t;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def coalesced_c1 3 11 1 N 32897 0 63
def ifnull_c1 3 11 1 N 32897 0 63
coalesced_c1 ifnull_c1
1 1
SELECT COALESCE(c1, NULL) AS coalesced_c1, IFNULL(c1, NULL) AS ifnull_c1 FROM t;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def coalesced_c1 3 11 1 Y 32896 0 63
def ifnull_c1 3 11 1 Y 32896 0 63
coalesced_c1 ifnull_c1
1 1
SELECT COALESCE(c2, NULL) AS coalesced_c2, IFNULL(c2, NULL) as ifnull_c2 FROM t;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def coalesced_c2 5 22 3 Y 32896 31 63
def ifnull_c2 5 22 3 Y 32896 31 63
coalesced_c2 ifnull_c2
2.3 2.3
SELECT COALESCE(c3, 'two') as coalesced_c1, COALESCE(c4, '2025-07-08') AS coalesced_date FROM t;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def coalesced_c1 253 5 4 N 1 39 8
def coalesced_date 253 10 10 N 1 39 8
coalesced_c1 coalesced_date
four 2025-05-06
INSERT INTO t values (2, 3.4, NULL, NULL);
SELECT COALESCE(c3, 'two') AS coalesced_c3, IFNULL(c3, 'three') AS ifnull_c3 FROM t WHERE c1 = 2;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def coalesced_c3 253 5 3 N 1 39 8
def ifnull_c3 253 5 5 N 1 39 8
coalesced_c3 ifnull_c3
two three
SELECT COALESCE(c3, 'four', NULL) AS coalesced_c3, COALESCE(COALESCE(c3, NULL), NULL) AS coalesced_c3_null FROM t WHERE c1 = 2;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def coalesced_c3 253 5 4 N 1 39 8
def coalesced_c3_null 253 5 0 Y 0 39 8
coalesced_c3 coalesced_c3_null
four NULL
SELECT COALESCE(c4, COALESCE('2025-05-06', NULL)) AS coalesced_date FROM t WHERE c1 = 2;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def coalesced_date 253 10 10 N 1 39 8
coalesced_date
2025-05-06
DROP TABLE t;
SET sql_mode='';
CREATE TABLE t1 (a UUID, b VARCHAR(32) NOT NULL);
INSERT INTO t1 VALUES (NULL, '1');
CREATE TABLE t2 AS SELECT COALESCE(a, b), IFNULL(a, b) FROM t1;
Warnings:
Warning 1292 Incorrect uuid value: '1'
Warning 1292 Incorrect uuid value: '1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
`COALESCE(a, b)` uuid DEFAULT NULL,
`IFNULL(a, b)` uuid DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't2';
COLUMN_NAME IS_NULLABLE COLUMN_TYPE
COALESCE(a, b) YES uuid
IFNULL(a, b) YES uuid
DROP TABLE t1, t2;
CREATE TABLE t (c1 INET6, c2 INET4);
INSERT INTO t VALUES ('::', '0.0.0.0'), (NULL, NULL);
CREATE TABLE t1 AS
SELECT
COALESCE(c1, '::1') AS inet6_c1_c, IFNULL(c1, '::1') AS inet6_c1_i,
COALESCE(c2, '0.0.0.0') AS inet4_c2_c, IFNULL(c2, '0.0.0.0') AS inet4_c2_i
FROM t;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
COLUMN_NAME IS_NULLABLE COLUMN_TYPE
inet6_c1_c NO inet6
inet6_c1_i NO inet6
inet4_c2_c NO inet4
inet4_c2_i NO inet4
CREATE TABLE t2 AS
SELECT
COALESCE(c1, 'ipv6') AS inet6_c1_c, IFNULL(c1, 'ipv6') AS inet6_c1_i,
COALESCE(c2, 'ipv4') AS inet4_c2_c, IFNULL(c2, 'ipv4') AS inet4_c2_i
FROM t;
Warnings:
Warning 1292 Incorrect inet6 value: 'ipv6'
Warning 1292 Incorrect inet6 value: 'ipv6'
Warning 1292 Incorrect inet4 value: 'ipv4'
Warning 1292 Incorrect inet4 value: 'ipv4'
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't2';
COLUMN_NAME IS_NULLABLE COLUMN_TYPE
inet6_c1_c YES inet6
inet6_c1_i YES inet6
inet4_c2_c YES inet4
inet4_c2_i YES inet4
CREATE TABLE t3 AS SELECT COALESCE(c1, '::1') AS inet4_c1_c, IFNULL(c1, '::1') as inet6_c1_i FROM t WHERE c1 IS NULL;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't3';
COLUMN_NAME IS_NULLABLE COLUMN_TYPE
inet4_c1_c NO inet6
inet6_c1_i NO inet6
CREATE TABLE t4 AS SELECT COALESCE(c1, 'foo') AS inet4_c1_c, IFNULL(c1, 'bar') as inet6_c1_i FROM t WHERE c1 IS NOT NULL;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't3';
COLUMN_NAME IS_NULLABLE COLUMN_TYPE
inet4_c1_c NO inet6
inet6_c1_i NO inet6
DROP TABLE t, t1, t2, t3, t4;
# End of 10.11 tests

View File

@@ -1164,3 +1164,110 @@ SELECT LEAST( CAST( 0 AS CHAR ), OLD_PASSWORD( 1 ) );
--echo # --echo #
--echo # End of 10.6 tests --echo # End of 10.6 tests
--echo # --echo #
--echo #
--echo # MDEV-36581: COALESCE() returns nullable column while IFNULL() does not
--echo #
CREATE OR REPLACE VIEW test_coalesce_vs_ifnull AS
SELECT
COALESCE(operation_date, '1970-01-01 00:00:00') AS coalesced_date,
IFNULL(operation_date, '1970-01-01 00:00:00') AS ifnull_date
FROM (
SELECT NULL AS operation_date
) AS t;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME = 'test_coalesce_vs_ifnull';
DROP VIEW test_coalesce_vs_ifnull;
# Tests on views
CREATE VIEW v2 as SELECT COALESCE(c, NULL) AS c_col, IFNULL(c, 10) AS i_col FROM (SELECT NULL AS c) AS t;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v2';
DROP VIEW v2;
CREATE VIEW v3 as SELECT COALESCE(c, 10, NULL) AS c_col, IFNULL(c, 10) AS i_col FROM (SELECT NULL AS c) AS t;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v3';
DROP VIEW v3;
CREATE VIEW v4 AS SELECT COALESCE(c, NULL, NULL) as c_col FROM (SELECT NULL AS c) AS t;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v4';
DROP VIEW v4;
CREATE VIEW v5 AS SELECT COALESCE(c, COALESCE(NULL, 10), NULL) as c_col FROM (SELECT NULL AS c) AS t;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'v5';
DROP VIEW v5;
# Tests on tables
CREATE TABLE t (c1 INT, c2 DOUBLE, c3 VARCHAR(5), c4 DATE);
INSERT INTO t values (1, 2.3, 'four', '2025-05-06');
--enable_metadata
--disable_ps_protocol
--disable_view_protocol
SELECT COALESCE(c1, 10) AS coalesced_c1, IFNULL(c1, 10) AS ifnull_c1 FROM t;
SELECT COALESCE(c1, NULL) AS coalesced_c1, IFNULL(c1, NULL) AS ifnull_c1 FROM t;
SELECT COALESCE(c2, NULL) AS coalesced_c2, IFNULL(c2, NULL) as ifnull_c2 FROM t;
SELECT COALESCE(c3, 'two') as coalesced_c1, COALESCE(c4, '2025-07-08') AS coalesced_date FROM t;
--enable_view_protocol
--enable_ps_protocol
--disable_metadata
INSERT INTO t values (2, 3.4, NULL, NULL);
--enable_metadata
--disable_ps_protocol
--disable_view_protocol
SELECT COALESCE(c3, 'two') AS coalesced_c3, IFNULL(c3, 'three') AS ifnull_c3 FROM t WHERE c1 = 2;
SELECT COALESCE(c3, 'four', NULL) AS coalesced_c3, COALESCE(COALESCE(c3, NULL), NULL) AS coalesced_c3_null FROM t WHERE c1 = 2;
SELECT COALESCE(c4, COALESCE('2025-05-06', NULL)) AS coalesced_date FROM t WHERE c1 = 2;
--enable_view_protocol
--enable_ps_protocol
--disable_metadata
DROP TABLE t;
# Case when one type cannot alwasy be converted to another safely
SET sql_mode='';
CREATE TABLE t1 (a UUID, b VARCHAR(32) NOT NULL);
INSERT INTO t1 VALUES (NULL, '1');
CREATE TABLE t2 AS SELECT COALESCE(a, b), IFNULL(a, b) FROM t1;
SHOW CREATE TABLE t2;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't2';
DROP TABLE t1, t2;
CREATE TABLE t (c1 INET6, c2 INET4);
INSERT INTO t VALUES ('::', '0.0.0.0'), (NULL, NULL);
CREATE TABLE t1 AS
SELECT
COALESCE(c1, '::1') AS inet6_c1_c, IFNULL(c1, '::1') AS inet6_c1_i,
COALESCE(c2, '0.0.0.0') AS inet4_c2_c, IFNULL(c2, '0.0.0.0') AS inet4_c2_i
FROM t;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
CREATE TABLE t2 AS
SELECT
COALESCE(c1, 'ipv6') AS inet6_c1_c, IFNULL(c1, 'ipv6') AS inet6_c1_i,
COALESCE(c2, 'ipv4') AS inet4_c2_c, IFNULL(c2, 'ipv4') AS inet4_c2_i
FROM t;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't2';
CREATE TABLE t3 AS SELECT COALESCE(c1, '::1') AS inet4_c1_c, IFNULL(c1, '::1') as inet6_c1_i FROM t WHERE c1 IS NULL;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't3';
CREATE TABLE t4 AS SELECT COALESCE(c1, 'foo') AS inet4_c1_c, IFNULL(c1, 'bar') as inet6_c1_i FROM t WHERE c1 IS NOT NULL;
SELECT COLUMN_NAME, IS_NULLABLE, COLUMN_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't3';
DROP TABLE t, t1, t2, t3, t4;
--echo # End of 10.11 tests

View File

@@ -978,10 +978,8 @@ FROM (SELECT * FROM json_test) AS json_test_values;
json_object("a", json_compact(a), "b", json_compact(b)) json_object("a", json_compact(a), "b", json_compact(b))
{"a": [1,2,3], "b": {"a":"foo"}} {"a": [1,2,3], "b": {"a":"foo"}}
DROP TABLE json_test; DROP TABLE json_test;
#
# End of 10.2 tests # End of 10.2 tests
# #
#
# MDEV-12854 Synchronize CREATE..SELECT data type and result set metadata data type for INT functions # MDEV-12854 Synchronize CREATE..SELECT data type and result set metadata data type for INT functions
# #
SELECT SELECT
@@ -1513,10 +1511,8 @@ JSON_VALID(' {"number": 01E-4}')
select JSON_VALID(' {"number": 0E-4.0}'); select JSON_VALID(' {"number": 0E-4.0}');
JSON_VALID(' {"number": 0E-4.0}') JSON_VALID(' {"number": 0E-4.0}')
0 0
#
# End of 10.4 tests # End of 10.4 tests
# #
#
# MDEV-16620 JSON_ARRAYAGG # MDEV-16620 JSON_ARRAYAGG
# #
CREATE TABLE t1 (a INT); CREATE TABLE t1 (a INT);
@@ -1748,10 +1744,8 @@ NULL
Warnings: Warnings:
Warning 4036 Character disallowed in JSON in argument 1 to function 'json_extract' at position 2 Warning 4036 Character disallowed in JSON in argument 1 to function 'json_extract' at position 2
SET @@collation_connection= @save_collation_connection; SET @@collation_connection= @save_collation_connection;
#
# End of 10.5 tests # End of 10.5 tests
# #
#
# MDEV-26054 Server crashes in Item_func_json_arrayagg::get_str_from_field # MDEV-26054 Server crashes in Item_func_json_arrayagg::get_str_from_field
# #
CREATE TABLE t (a VARCHAR(8)); CREATE TABLE t (a VARCHAR(8));
@@ -1787,6 +1781,15 @@ FROM JSON_TABLE (@data, '$[*]' COLUMNS (data text PATH '$.Data')) AS t;
data data
<root language="de"></root> <root language="de"></root>
# #
# MDEV-21530: json_extract STILL crashes in Item_func_json_extract::read_json
#
select null<=>json_extract('1',json_object(null,'{ }',null,null),'{}');
null<=>json_extract('1',json_object(null,'{ }',null,null),'{}')
1
Warnings:
Warning 4042 Syntax error in JSON path in argument 2 to function 'json_extract' at position 1
# End of 10.6 tests
#
# MDEV-35614 JSON_UNQUOTE doesn't work with emojis # MDEV-35614 JSON_UNQUOTE doesn't work with emojis
# #
SELECT HEX(JSON_UNQUOTE('"\\ud83d\\ude0a"')) as hex_smiley; SELECT HEX(JSON_UNQUOTE('"\\ud83d\\ude0a"')) as hex_smiley;
@@ -1824,9 +1827,6 @@ show warnings;
Level Code Message Level Code Message
Warning 4035 Broken JSON string in argument 1 to function 'json_unquote' at position 13 Warning 4035 Broken JSON string in argument 1 to function 'json_unquote' at position 13
# #
# End of 10.6 tests
#
#
# MDEV-31147 json_normalize does not work correctly with MSAN build # MDEV-31147 json_normalize does not work correctly with MSAN build
# #
CREATE TABLE t1 (val JSON); CREATE TABLE t1 (val JSON);
@@ -1836,10 +1836,8 @@ SELECT * FROM t1;
val normalized_json val normalized_json
15 1.5E1 15 1.5E1
DROP TABLE t1; DROP TABLE t1;
#
# End of 10.8 tests # End of 10.8 tests
# #
#
# MDEV-27677: Implement JSON_OVERLAPS() # MDEV-27677: Implement JSON_OVERLAPS()
# #
# Testing scalar json datatypes # Testing scalar json datatypes
@@ -2691,10 +2689,8 @@ SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1');
JSON_VALUE(@json,'$.A[last-1][last-1].key1') JSON_VALUE(@json,'$.A[last-1][last-1].key1')
NULL NULL
SET @@collation_connection= @save_collation_connection; SET @@collation_connection= @save_collation_connection;
#
# End of 10.9 Test # End of 10.9 Test
# #
#
# MDEV-32007: JSON_VALUE and JSON_EXTRACT doesn't handle dash (-) # MDEV-32007: JSON_VALUE and JSON_EXTRACT doesn't handle dash (-)
# as first character in key # as first character in key
# #
@@ -4883,7 +4879,6 @@ SELECT JSON_SCHEMA_VALID(@a,'{}');
JSON_SCHEMA_VALID(@a,'{}') JSON_SCHEMA_VALID(@a,'{}')
NULL NULL
# End of 11.1 test # End of 11.1 test
# Beginning of 11.2
# #
# MDEV-30145: JSON_TABLE: allow to retrieve the key when iterating on JSON objects # MDEV-30145: JSON_TABLE: allow to retrieve the key when iterating on JSON objects
# #
@@ -5074,9 +5069,6 @@ FROM JSON_TABLE(
JSON_KEY_VALUE('{"key1":{"a":1, "b": [1,2,3, {"some_key":"some_val", "c":3}]}, "key2":"val2"}', '$.key1.b[0]'), '$[*]' JSON_KEY_VALUE('{"key1":{"a":1, "b": [1,2,3, {"some_key":"some_val", "c":3}]}, "key2":"val2"}', '$.key1.b[0]'), '$[*]'
COLUMNS (k VARCHAR(20) PATH '$.key', v VARCHAR(20) PATH '$.value', id FOR ORDINALITY)) AS jt; COLUMNS (k VARCHAR(20) PATH '$.key', v VARCHAR(20) PATH '$.value', id FOR ORDINALITY)) AS jt;
k v id k v id
# End of 11.2 test
#
# Beginning of 11.2 tests
# #
# MDEV-26182: Implement json_array_intersect() # MDEV-26182: Implement json_array_intersect()
# #
@@ -5273,6 +5265,4 @@ SET @obj1='{ "a": 1,"b": 2,"c": 3}';
SELECT JSON_OBJECT_FILTER_KEYS (@obj1,@arr1); SELECT JSON_OBJECT_FILTER_KEYS (@obj1,@arr1);
JSON_OBJECT_FILTER_KEYS (@obj1,@arr1) JSON_OBJECT_FILTER_KEYS (@obj1,@arr1)
NULL NULL
#
# End of 11.2 Test # End of 11.2 Test
#

View File

@@ -619,9 +619,7 @@ SELECT json_object("a", json_compact(a), "b", json_compact(b))
FROM (SELECT * FROM json_test) AS json_test_values; FROM (SELECT * FROM json_test) AS json_test_values;
DROP TABLE json_test; DROP TABLE json_test;
--echo #
--echo # End of 10.2 tests --echo # End of 10.2 tests
--echo #
--echo # --echo #
--echo # MDEV-12854 Synchronize CREATE..SELECT data type and result set metadata data type for INT functions --echo # MDEV-12854 Synchronize CREATE..SELECT data type and result set metadata data type for INT functions
@@ -983,9 +981,7 @@ select JSON_VALID(' {"number": 00E-4}');
select JSON_VALID(' {"number": 01E-4}'); select JSON_VALID(' {"number": 01E-4}');
select JSON_VALID(' {"number": 0E-4.0}'); select JSON_VALID(' {"number": 0E-4.0}');
--echo #
--echo # End of 10.4 tests --echo # End of 10.4 tests
--echo #
-- echo # -- echo #
-- echo # MDEV-16620 JSON_ARRAYAGG -- echo # MDEV-16620 JSON_ARRAYAGG
@@ -1207,9 +1203,7 @@ SELECT JSON_EXTRACT('{"a": 1,"b": 2}','$.a');
SET @@collation_connection= @save_collation_connection; SET @@collation_connection= @save_collation_connection;
--echo #
--echo # End of 10.5 tests --echo # End of 10.5 tests
--echo #
--echo # --echo #
--echo # MDEV-26054 Server crashes in Item_func_json_arrayagg::get_str_from_field --echo # MDEV-26054 Server crashes in Item_func_json_arrayagg::get_str_from_field
@@ -1245,6 +1239,14 @@ SELECT
FROM JSON_TABLE (@data, '$[*]' COLUMNS (data text PATH '$.Data')) AS t; FROM JSON_TABLE (@data, '$[*]' COLUMNS (data text PATH '$.Data')) AS t;
--echo #
--echo # MDEV-21530: json_extract STILL crashes in Item_func_json_extract::read_json
--echo #
select null<=>json_extract('1',json_object(null,'{ }',null,null),'{}');
--echo # End of 10.6 tests
--echo # --echo #
--echo # MDEV-35614 JSON_UNQUOTE doesn't work with emojis --echo # MDEV-35614 JSON_UNQUOTE doesn't work with emojis
--echo # --echo #
@@ -1265,10 +1267,6 @@ select json_unquote(json_extract(@v,'$.color')) as unquoted, collation(json_unqu
SELECT JSON_UNQUOTE('"\\uc080\\ude0a"') as invalid_utf8mb4; SELECT JSON_UNQUOTE('"\\uc080\\ude0a"') as invalid_utf8mb4;
show warnings; show warnings;
--echo #
--echo # End of 10.6 tests
--echo #
--echo # --echo #
--echo # MDEV-31147 json_normalize does not work correctly with MSAN build --echo # MDEV-31147 json_normalize does not work correctly with MSAN build
--echo # --echo #
@@ -1278,9 +1276,7 @@ INSERT INTO t1 (val) VALUES ('15');
SELECT * FROM t1; SELECT * FROM t1;
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # End of 10.8 tests --echo # End of 10.8 tests
--echo #
--echo # --echo #
--echo # MDEV-27677: Implement JSON_OVERLAPS() --echo # MDEV-27677: Implement JSON_OVERLAPS()
@@ -1954,9 +1950,7 @@ SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1');
SET @@collation_connection= @save_collation_connection; SET @@collation_connection= @save_collation_connection;
--echo #
--echo # End of 10.9 Test --echo # End of 10.9 Test
--echo #
--echo # --echo #
--echo # MDEV-32007: JSON_VALUE and JSON_EXTRACT doesn't handle dash (-) --echo # MDEV-32007: JSON_VALUE and JSON_EXTRACT doesn't handle dash (-)
@@ -3837,8 +3831,6 @@ SELECT JSON_SCHEMA_VALID(@a,'{}');
--echo # End of 11.1 test --echo # End of 11.1 test
--echo # Beginning of 11.2
--echo # --echo #
--echo # MDEV-30145: JSON_TABLE: allow to retrieve the key when iterating on JSON objects --echo # MDEV-30145: JSON_TABLE: allow to retrieve the key when iterating on JSON objects
--echo # --echo #
@@ -3988,16 +3980,10 @@ FROM JSON_TABLE(
JSON_KEY_VALUE('{"key1":{"a":1, "b": [1,2,3, {"some_key":"some_val", "c":3}]}, "key2":"val2"}', '$.key1.b[0]'), '$[*]' JSON_KEY_VALUE('{"key1":{"a":1, "b": [1,2,3, {"some_key":"some_val", "c":3}]}, "key2":"val2"}', '$.key1.b[0]'), '$[*]'
COLUMNS (k VARCHAR(20) PATH '$.key', v VARCHAR(20) PATH '$.value', id FOR ORDINALITY)) AS jt; COLUMNS (k VARCHAR(20) PATH '$.key', v VARCHAR(20) PATH '$.value', id FOR ORDINALITY)) AS jt;
--echo # End of 11.2 test
--echo #
--echo # Beginning of 11.2 tests
--echo # --echo #
--echo # MDEV-26182: Implement json_array_intersect() --echo # MDEV-26182: Implement json_array_intersect()
--echo # --echo #
--echo # JSON_ARRAY_INTERSECT() --echo # JSON_ARRAY_INTERSECT()
--echo # Scalar as elements --echo # Scalar as elements
@@ -4176,6 +4162,4 @@ SET CHARACTER SET utf8;
SET @obj1='{ "a": 1,"b": 2,"c": 3}'; SET @obj1='{ "a": 1,"b": 2,"c": 3}';
SELECT JSON_OBJECT_FILTER_KEYS (@obj1,@arr1); SELECT JSON_OBJECT_FILTER_KEYS (@obj1,@arr1);
--echo #
--echo # End of 11.2 Test --echo # End of 11.2 Test
--echo #

View File

@@ -5005,8 +5005,8 @@ LEAST(POINT(1,1),0x60);
SHOW CREATE TABLE t1; SHOW CREATE TABLE t1;
Table Create Table Table Create Table
t1 CREATE TABLE `t1` ( t1 CREATE TABLE `t1` (
`COALESCE(0x60,POINT(1,1))` longblob DEFAULT NULL, `COALESCE(0x60,POINT(1,1))` longblob NOT NULL,
`COALESCE(POINT(1,1),0x60)` longblob DEFAULT NULL, `COALESCE(POINT(1,1),0x60)` longblob NOT NULL,
`LEAST(0x60,POINT(1,1))` longblob DEFAULT NULL, `LEAST(0x60,POINT(1,1))` longblob DEFAULT NULL,
`LEAST(POINT(1,1),0x60)` longblob DEFAULT NULL `LEAST(POINT(1,1),0x60)` longblob DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci

View File

@@ -6304,3 +6304,165 @@ SELECT LOAD_FILE('') AS f, a FROM t1 GROUP BY f, a HAVING f = a;
f a f a
DROP TABLE t1; DROP TABLE t1;
End of 10.5 tests End of 10.5 tests
#
# MDEV-19269 Pushdown into IN subquery is not made on the second
# execution of stmt
#
create table t1 (a int, b int);
create table t2 (x int, y int);
insert into t1 values (1,1),(2,2);
insert into t2 values (1,1),(2,2),(2,3);
prepare stmt from "
EXPLAIN FORMAT=JSON
SELECT * FROM t1
WHERE a = b
AND (a,b) IN (SELECT t2.x, COUNT(t2.y) FROM t2 WHERE @a=1 GROUP BY t2.x);";
set @a=2;
execute stmt;
EXPLAIN
{
"query_block": {
"select_id": 1,
"table": {
"message": "Impossible WHERE noticed after reading const tables"
},
"subqueries": [
{
"materialization": {
"query_block": {
"select_id": 2,
"table": {
"message": "Impossible WHERE"
}
}
}
}
]
}
}
set @a=1;
# we expect to see having_condition in both the below statements
execute stmt;
EXPLAIN
{
"query_block": {
"select_id": 1,
"cost": 0.021147833,
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
"loops": 1,
"rows": 2,
"cost": 0.01034841,
"filtered": 100,
"attached_condition": "t1.b = t1.a and t1.a is not null and t1.a is not null"
}
},
{
"table": {
"table_name": "<subquery2>",
"access_type": "eq_ref",
"possible_keys": ["distinct_key"],
"key": "distinct_key",
"key_length": "12",
"used_key_parts": ["x", "COUNT(t2.y)"],
"ref": ["test.t1.a", "test.t1.a"],
"loops": 2,
"rows": 1,
"cost": 0.010799423,
"filtered": 100,
"attached_condition": "t1.a = `<subquery2>`.`COUNT(t2.y)`",
"materialized": {
"unique": 1,
"materialization": {
"query_block": {
"select_id": 2,
"cost": 0.012403489,
"having_condition": "`COUNT(t2.y)` = t2.x",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
"loops": 1,
"rows": 3,
"cost": 0.010504815,
"filtered": 100
}
}
]
}
}
}
}
}
}
]
}
}
execute stmt;
EXPLAIN
{
"query_block": {
"select_id": 1,
"cost": 0.021147833,
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
"loops": 1,
"rows": 2,
"cost": 0.01034841,
"filtered": 100,
"attached_condition": "t1.b = t1.a and t1.a is not null and t1.a is not null"
}
},
{
"table": {
"table_name": "<subquery2>",
"access_type": "eq_ref",
"possible_keys": ["distinct_key"],
"key": "distinct_key",
"key_length": "12",
"used_key_parts": ["x", "COUNT(t2.y)"],
"ref": ["test.t1.a", "test.t1.a"],
"loops": 2,
"rows": 1,
"cost": 0.010799423,
"filtered": 100,
"attached_condition": "t1.a = `<subquery2>`.`COUNT(t2.y)`",
"materialized": {
"unique": 1,
"materialization": {
"query_block": {
"select_id": 2,
"cost": 0.012403489,
"having_condition": "`COUNT(t2.y)` = t2.x",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
"loops": 1,
"rows": 3,
"cost": 0.010504815,
"filtered": 100
}
}
]
}
}
}
}
}
}
]
}
}
drop table t1, t2;
End of 10.11 tests

View File

@@ -1751,3 +1751,31 @@ SELECT LOAD_FILE('') AS f, a FROM t1 GROUP BY f, a HAVING f = a;
DROP TABLE t1; DROP TABLE t1;
--echo End of 10.5 tests --echo End of 10.5 tests
--echo #
--echo # MDEV-19269 Pushdown into IN subquery is not made on the second
--echo # execution of stmt
--echo #
create table t1 (a int, b int);
create table t2 (x int, y int);
insert into t1 values (1,1),(2,2);
insert into t2 values (1,1),(2,2),(2,3);
prepare stmt from "
EXPLAIN FORMAT=JSON
SELECT * FROM t1
WHERE a = b
AND (a,b) IN (SELECT t2.x, COUNT(t2.y) FROM t2 WHERE @a=1 GROUP BY t2.x);";
set @a=2;
execute stmt;
set @a=1;
--echo # we expect to see having_condition in both the below statements
execute stmt;
execute stmt;
drop table t1, t2;
--echo End of 10.11 tests

View File

@@ -0,0 +1,102 @@
# MDEV-36410 wrong result with index_merge on indexes having descending primary key
#
set optimizer_trace='enabled=on';
SET @save_sort_buffer_size=@@sort_buffer_size;
SET SESSION sort_buffer_size = 1024*16;
CREATE TABLE t1 (
id bigint(20) NOT NULL,
title varchar(255) NOT NULL,
status tinyint(4) DEFAULT 0,
country_code varchar(5) DEFAULT NULL,
PRIMARY KEY (id),
KEY idx_status (status),
KEY idx_country_code_status_id (country_code,status,id DESC)
) ENGINE=InnoDB;
INSERT INTO t1(id,title,status,country_code)
SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500;
# This must not use index_merge:
EXPLAIN
SELECT * FROM t1 WHERE country_code ='C1' and `status` =1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref idx_status,idx_country_code_status_id idx_status 2 const 50 Using where
set @trace= (select JSON_EXTRACT(trace, '$**.range_scan_alternatives[*]')
from INFORMATION_SCHEMA.OPTIMIZER_TRACE);
select json_detailed(json_extract(@trace, '$[*].index')) as INDEXES;
INDEXES
[
"idx_status",
"idx_country_code_status_id"
]
select json_detailed(json_extract(@trace, '$[*].rowid_ordered')) as ROR;
ROR
[
true,
false
]
DROP table t1;
# Now, try with indexes using ASC ordering and PK using DESC
CREATE TABLE t1 (
id bigint(20) NOT NULL,
title varchar(255) NOT NULL,
status tinyint(4) DEFAULT 0,
country_code varchar(5) DEFAULT NULL,
PRIMARY KEY (id DESC),
KEY idx_status (status),
KEY idx_country_code_status_id (country_code,status,id)
) ENGINE=InnoDB;
INSERT INTO t1(id,title,status,country_code)
SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500;
# Must not use index_merge:
EXPLAIN
SELECT * FROM t1 WHERE country_code ='C1' and status = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref idx_status,idx_country_code_status_id idx_status 2 const 50 Using where
set @trace= (select JSON_EXTRACT(trace, '$**.range_scan_alternatives[*]')
from INFORMATION_SCHEMA.OPTIMIZER_TRACE);
select json_detailed(json_extract(@trace, '$[*].index')) as INDEXES;
INDEXES
[
"idx_status",
"idx_country_code_status_id"
]
select json_detailed(json_extract(@trace, '$[*].rowid_ordered')) as ROR;
ROR
[
true,
false
]
DROP TABLE t1;
# Now, try with indexes using DESC ordering and PK using DESC
CREATE TABLE t1 (
id bigint(20) NOT NULL,
title varchar(255) NOT NULL,
status tinyint(4) DEFAULT 0,
country_code varchar(5) DEFAULT NULL,
PRIMARY KEY (id DESC),
KEY idx_status (status),
KEY idx_country_code_status_id (country_code,status,id DESC)
) ENGINE=InnoDB;
INSERT INTO t1(id,title,status,country_code)
SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500;
# Must not use index_merge:
EXPLAIN
SELECT * FROM t1 WHERE country_code ='C1' and status = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref idx_status,idx_country_code_status_id idx_status 2 const 50 Using where
set @trace= (select JSON_EXTRACT(trace, '$**.range_scan_alternatives[*]')
from INFORMATION_SCHEMA.OPTIMIZER_TRACE);
select json_detailed(json_extract(@trace, '$[*].index')) as INDEXES;
INDEXES
[
"idx_status",
"idx_country_code_status_id"
]
select json_detailed(json_extract(@trace, '$[*].rowid_ordered')) as ROR;
ROR
[
true,
false
]
DROP TABLE t1;
SET sort_buffer_size= @save_sort_buffer_size;

View File

@@ -0,0 +1,90 @@
--source include/have_innodb.inc
--source include/have_sequence.inc
--source include/not_embedded.inc
--echo
--echo # MDEV-36410 wrong result with index_merge on indexes having descending primary key
--echo #
set optimizer_trace='enabled=on';
SET @save_sort_buffer_size=@@sort_buffer_size;
SET SESSION sort_buffer_size = 1024*16;
CREATE TABLE t1 (
id bigint(20) NOT NULL,
title varchar(255) NOT NULL,
status tinyint(4) DEFAULT 0,
country_code varchar(5) DEFAULT NULL,
PRIMARY KEY (id),
KEY idx_status (status),
KEY idx_country_code_status_id (country_code,status,id DESC)
) ENGINE=InnoDB;
INSERT INTO t1(id,title,status,country_code)
SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500;
--echo # This must not use index_merge:
EXPLAIN
SELECT * FROM t1 WHERE country_code ='C1' and `status` =1;
set @trace= (select JSON_EXTRACT(trace, '$**.range_scan_alternatives[*]')
from INFORMATION_SCHEMA.OPTIMIZER_TRACE);
select json_detailed(json_extract(@trace, '$[*].index')) as INDEXES;
select json_detailed(json_extract(@trace, '$[*].rowid_ordered')) as ROR;
#select JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives[*].index')) AS JS from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
DROP table t1;
--echo # Now, try with indexes using ASC ordering and PK using DESC
CREATE TABLE t1 (
id bigint(20) NOT NULL,
title varchar(255) NOT NULL,
status tinyint(4) DEFAULT 0,
country_code varchar(5) DEFAULT NULL,
PRIMARY KEY (id DESC),
KEY idx_status (status),
KEY idx_country_code_status_id (country_code,status,id)
) ENGINE=InnoDB;
INSERT INTO t1(id,title,status,country_code)
SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500;
--echo # Must not use index_merge:
EXPLAIN
SELECT * FROM t1 WHERE country_code ='C1' and status = 1;
set @trace= (select JSON_EXTRACT(trace, '$**.range_scan_alternatives[*]')
from INFORMATION_SCHEMA.OPTIMIZER_TRACE);
select json_detailed(json_extract(@trace, '$[*].index')) as INDEXES;
select json_detailed(json_extract(@trace, '$[*].rowid_ordered')) as ROR;
DROP TABLE t1;
--echo # Now, try with indexes using DESC ordering and PK using DESC
CREATE TABLE t1 (
id bigint(20) NOT NULL,
title varchar(255) NOT NULL,
status tinyint(4) DEFAULT 0,
country_code varchar(5) DEFAULT NULL,
PRIMARY KEY (id DESC),
KEY idx_status (status),
KEY idx_country_code_status_id (country_code,status,id DESC)
) ENGINE=InnoDB;
INSERT INTO t1(id,title,status,country_code)
SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500;
--echo # Must not use index_merge:
EXPLAIN
SELECT * FROM t1 WHERE country_code ='C1' and status = 1;
set @trace= (select JSON_EXTRACT(trace, '$**.range_scan_alternatives[*]')
from INFORMATION_SCHEMA.OPTIMIZER_TRACE);
select json_detailed(json_extract(@trace, '$[*].index')) as INDEXES;
select json_detailed(json_extract(@trace, '$[*].rowid_ordered')) as ROR;
DROP TABLE t1;
SET sort_buffer_size= @save_sort_buffer_size;

View File

@@ -3,6 +3,8 @@
# #
SET @saved_dbug = @@debug_dbug; SET @saved_dbug = @@debug_dbug;
SET debug_dbug='+d,json_check_min_stack_requirement'; SET debug_dbug='+d,json_check_min_stack_requirement';
SELECT * from JSON_TABLE('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
ERROR HY000: Thread stack overrun: 'used bytes' used of a 'available' byte stack, and 'X' bytes needed. Consider increasing the thread_stack system variable.
SET @json1= '{"key1":"val1"}'; SET @json1= '{"key1":"val1"}';
SET @json2= '{"key1":"val1"}'; SET @json2= '{"key1":"val1"}';
SELECT JSON_OVERLAPS(@json1, @json2); SELECT JSON_OVERLAPS(@json1, @json2);

View File

@@ -1,6 +1,5 @@
-- source include/not_embedded.inc -- source include/not_embedded.inc
--source include/have_debug.inc --source include/have_debug.inc
--source include/not_asan.inc
--echo # --echo #
--echo # MDEV-28762: recursive call of some json functions without stack control --echo # MDEV-28762: recursive call of some json functions without stack control
@@ -9,6 +8,10 @@
SET @saved_dbug = @@debug_dbug; SET @saved_dbug = @@debug_dbug;
SET debug_dbug='+d,json_check_min_stack_requirement'; SET debug_dbug='+d,json_check_min_stack_requirement';
--replace_regex /overrun: [0-9]* bytes used of a [0-9]* byte stack, and [0-9]* bytes needed/overrun: 'used bytes' used of a 'available' byte stack, and 'X' bytes needed/
--error ER_STACK_OVERRUN_NEED_MORE
SELECT * from JSON_TABLE('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
SET @json1= '{"key1":"val1"}'; SET @json1= '{"key1":"val1"}';
SET @json2= '{"key1":"val1"}'; SET @json2= '{"key1":"val1"}';

View File

@@ -840,28 +840,3 @@ SET DEBUG_SYNC="RESET";
disconnect con1; disconnect con1;
disconnect con2; disconnect con2;
DROP TABLES t1, t2; DROP TABLES t1, t2;
#
# MDEV-28567 Assertion `0' in open_tables upon function-related operation
#
CREATE TABLE t1 (a INT);
CREATE TABLE t2 (b INT);
CREATE TRIGGER tr1 BEFORE INSERT ON t1 FOR EACH ROW UPDATE t2 SET b = 0;
CREATE TRIGGER tr2 BEFORE INSERT ON t2 FOR EACH ROW UPDATE t1 SET a = 6;
CREATE VIEW v1 AS SELECT * FROM t1;
SET AUTOCOMMIT=OFF;
SELECT * FROM t1;
a
DROP TRIGGER tr1;
INSERT INTO t2 SELECT * FROM t2;
SELECT f() FROM t2;
ERROR 42000: FUNCTION test.f does not exist
set debug_sync= 'after_open_table_mdl_shared signal s1';
ALTER VIEW v1 AS SELECT f() FROM t1;
CREATE FUNCTION f() RETURNS INT RETURN 1;
set debug_sync= 'now wait_for s1';
SELECT * FROM ( SELECT * FROM v1 ) sq;
COMMIT;
DROP VIEW v1;
DROP FUNCTION IF EXISTS f;
DROP TABLE t1, t2;
set debug_sync= 'reset';

View File

@@ -1079,123 +1079,3 @@ DROP TABLES t1, t2;
# Check that all connections opened by test cases in this file are really # Check that all connections opened by test cases in this file are really
# gone so execution of other tests won't be affected by their presence. # gone so execution of other tests won't be affected by their presence.
--source include/wait_until_count_sessions.inc --source include/wait_until_count_sessions.inc
--echo #
--echo # MDEV-28567 Assertion `0' in open_tables upon function-related operation
--echo #
# To get MDL trace run this case like this:
# mtr --mysqld=--debug-dbug=d,mdl,query:i:o,/tmp/mdl.log ...
# Cleanup trace like this:
# sed -i -re '/(mysql|performance_schema|sys|mtr)\// d; /MDL_BACKUP_|MDL_INTENTION_/ d; /\/(t2|tr1|tr2)/ d' /tmp/mdl.log
CREATE TABLE t1 (a INT);
CREATE TABLE t2 (b INT);
CREATE TRIGGER tr1 BEFORE INSERT ON t1 FOR EACH ROW UPDATE t2 SET b = 0;
CREATE TRIGGER tr2 BEFORE INSERT ON t2 FOR EACH ROW UPDATE t1 SET a = 6;
CREATE VIEW v1 AS SELECT * FROM t1;
SET AUTOCOMMIT=OFF;
SELECT * FROM t1;
# T@6
# Seized: test/t1 (MDL_SHARED_READ)
--connect (con1,localhost,root,,test)
--send
DROP TRIGGER tr1;
# T@7
# Seized: test/t1 (MDL_SHARED_NO_WRITE)
# Waiting: test/t1 (MDL_EXCLUSIVE)
# Waiting: test/t1 (MDL_SHARED_WRITE)
# Deadlock: test/t1 (MDL_SHARED_WRITE)
--connection default
--error 0, ER_LOCK_DEADLOCK
INSERT INTO t2 SELECT * FROM t2;
# T@6
# Released: test/t1 (MDL_SHARED_READ)
# T@7
# Acquired: test/t1 (MDL_EXCLUSIVE) (good)
--error ER_SP_DOES_NOT_EXIST
SELECT f() FROM t2;
# T@6
# Seized: test/f (MDL_SHARED)
# T@7
# Released: test/t1 (MDL_EXCLUSIVE)
# Good1: continue T@6 below
# Bad1: continue T@8 below
# Now we hold test/f, the below code creates concurrent
# waiting of 3 threads for test/f which leads to deadlock (Bad)
# To achive Good comment out 'now wait_for s1' below and run multiple times.
--connect (con2,localhost,root,,test)
set debug_sync= 'after_open_table_mdl_shared signal s1';
--send
ALTER VIEW v1 AS SELECT f() FROM t1;
# T@8
# Good2: Waiting: test/v1 (MDL_EXCLUSIVE)
# Good2-3: continue T@7 below
# Good5: Acquired: test/v1 (MDL_EXCLUSIVE)
# Good5: Seized: test/v1 (MDL_EXCLUSIVE)
# Good5-6: continue T@7 below
# Good7: Seized: test/t1 (MDL_SHARED_READ)
# Good7: Waiting: test/f (MDL_SHARED)
# Good7-8: continue T@7 below
# Good9: Acquired: test/f (MDL_SHARED)
# Good9: Released: test/f (MDL_SHARED)
# Good9: Released: test/t1 (MDL_SHARED_READ)
# Good9: Released: test/v1 (MDL_EXCLUSIVE)
# Good9: command finished without error
# Bad1: Seized: test/v1 (MDL_EXCLUSIVE)
# Bad1: Seized: test/v1 (MDL_EXCLUSIVE)
# Bad1: Seized: test/t1 (MDL_SHARED_READ)
# Bad1-2: continue T@6 below
# Bad4: Waiting: test/f (MDL_SHARED)
# Bad4: Deadlock: test/f (MDL_SHARED)
# Bad4: command finished with error
--connection con1
--reap
--send
CREATE FUNCTION f() RETURNS INT RETURN 1;
# T@7
# Good3: Waiting: test/f (MDL_EXCLUSIVE)
# Good3-4: continue T@6 below
# Good6: Acquired: test/f (MDL_EXCLUSIVE)
# Good6-7: continue T@8 above
# Good8: Released: test/f (MDL_EXCLUSIVE)
# Good8-9: continue T@8 above
# Bad3: Waiting: test/f (MDL_EXCLUSIVE)
# Bad3-4: continue T@8 above
--connection default
set debug_sync= 'now wait_for s1';
--disable_result_log
SELECT * FROM ( SELECT * FROM v1 ) sq;
--enable_result_log
# T@6
# Good1: Seized: test/v1 (MDL_SHARED_READ)
# Good1-2: continue T@8 above
# Good4: Seized: test/t1 (MDL_SHARED_READ)
# Bad2: Waiting: test/v1 (MDL_SHARED_READ)
# Bad2-3: continue T@7 above
# Cleanup
COMMIT;
# Good4: Released: test/t1 (MDL_SHARED_READ)
# Good4: Released: test/v1 (MDL_SHARED_READ)
# Good4: Released: test/f (MDL_SHARED)
# Good4-5: continue T@8 above
--connection con2
--error 0, ER_SP_DOES_NOT_EXIST
--reap
--disconnect con1
--disconnect con2
--connection default
--source include/wait_until_count_sessions.inc
DROP VIEW v1;
DROP FUNCTION IF EXISTS f;
DROP TABLE t1, t2;
set debug_sync= 'reset';

View File

@@ -313,6 +313,137 @@ disconnect con2;
disconnect con1; disconnect con1;
connection default; connection default;
drop procedure p1; drop procedure p1;
#
# MDEV-35353 Rows_examined is always 0 in the slow query log
# for union all queries
#
SET GLOBAL log_output = "TABLE";
SET GLOBAL slow_query_log = ON;
SET GLOBAL long_query_time = 0.0;
TRUNCATE TABLE mysql.slow_log;
create table t1(a int, b int);
insert into t1 select seq, seq from seq_1_to_20;
connect con2,localhost,root,,;
select sum(a) from t1
union all
select sum(b) from t1;
sum(a)
210
210
SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%union all%';
rows_examined sql_text
40 select sum(a) from t1
union all
select sum(b) from t1
TRUNCATE TABLE mysql.slow_log;
select sum(a) from t1
union
select sum(b) from t1;
sum(a)
210
SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%union%';
rows_examined sql_text
41 select sum(a) from t1
union
select sum(b) from t1
TRUNCATE TABLE mysql.slow_log;
select sum(a) from t1
except all
select sum(b) from t1;
sum(a)
SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%except all%';
rows_examined sql_text
40 select sum(a) from t1
except all
select sum(b) from t1
TRUNCATE TABLE mysql.slow_log;
select sum(a) from t1
except
select sum(b) from t1;
sum(a)
SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%except%';
rows_examined sql_text
40 select sum(a) from t1
except
select sum(b) from t1
TRUNCATE TABLE mysql.slow_log;
select sum(a) from t1
intersect all
select sum(b) from t1;
sum(a)
210
SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%intersect all%';
rows_examined sql_text
41 select sum(a) from t1
intersect all
select sum(b) from t1
TRUNCATE TABLE mysql.slow_log;
select sum(a) from t1
intersect
select sum(b) from t1;
sum(a)
210
SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%intersect%';
rows_examined sql_text
41 select sum(a) from t1
intersect
select sum(b) from t1
disconnect con2;
connection default;
DROP TABLE t1;
TRUNCATE TABLE mysql.slow_log;
#
# MDEV-37195 Rows_examined is always 0 in the slow query log
# for queries with a subquery and degenerate select
#
SET GLOBAL log_output = "TABLE";
SET GLOBAL slow_query_log = ON;
SET GLOBAL long_query_time = 0.0;
TRUNCATE TABLE mysql.slow_log;
CREATE TABLE t1 (id INT);
INSERT INTO t1(id) SELECT seq FROM seq_1_to_10;
connect con2,localhost,root,,;
SELECT 100 in (SELECT id FROM t1) AS res;
res
0
SELECT rows_examined,sql_text FROM mysql.slow_log WHERE sql_text LIKE '%SELECT 100%';
rows_examined sql_text
10 SELECT 100 in (SELECT id FROM t1) AS res
TRUNCATE TABLE mysql.slow_log;
SELECT 100 in (
SELECT id FROM t1
UNION
SELECT id FROM t1
) AS res;
res
0
SELECT rows_examined,sql_text FROM mysql.slow_log WHERE sql_text LIKE '%SELECT 100%';
rows_examined sql_text
20 SELECT 100 in (
SELECT id FROM t1
UNION
SELECT id FROM t1
) AS res
TRUNCATE TABLE mysql.slow_log;
SELECT 100 in (
SELECT id FROM t1
UNION ALL
SELECT id FROM t1
) AS res;
res
0
SELECT rows_examined,sql_text FROM mysql.slow_log WHERE sql_text LIKE '%SELECT 100%';
rows_examined sql_text
20 SELECT 100 in (
SELECT id FROM t1
UNION ALL
SELECT id FROM t1
) AS res
disconnect con2;
connection default;
DROP TABLE t1;
TRUNCATE TABLE mysql.slow_log;
End of 10.11 tests
SET GLOBAL long_query_time = @save_long_query_time; SET GLOBAL long_query_time = @save_long_query_time;
SET GLOBAL log_output = @old_log_output; SET GLOBAL log_output = @old_log_output;
SET global general_log = @old_general_log; SET global general_log = @old_general_log;

View File

@@ -3,6 +3,7 @@
--source include/not_embedded.inc --source include/not_embedded.inc
--source include/have_csv.inc --source include/have_csv.inc
--source include/have_sequence.inc
call mtr.add_suppression("options .* --log_slow_queries is not set"); call mtr.add_suppression("options .* --log_slow_queries is not set");
@@ -370,6 +371,130 @@ disconnect con1;
connection default; connection default;
drop procedure p1; drop procedure p1;
###########################################################################
--echo #
--echo # MDEV-35353 Rows_examined is always 0 in the slow query log
--echo # for union all queries
--echo #
SET GLOBAL log_output = "TABLE";
SET GLOBAL slow_query_log = ON;
SET GLOBAL long_query_time = 0.0;
# clear slow_log of any residual slow queries
TRUNCATE TABLE mysql.slow_log;
create table t1(a int, b int);
insert into t1 select seq, seq from seq_1_to_20;
connect (con2,localhost,root,,);
--disable_ps_protocol
select sum(a) from t1
union all
select sum(b) from t1;
SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%union all%';
TRUNCATE TABLE mysql.slow_log;
select sum(a) from t1
union
select sum(b) from t1;
SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%union%';
TRUNCATE TABLE mysql.slow_log;
select sum(a) from t1
except all
select sum(b) from t1;
SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%except all%';
TRUNCATE TABLE mysql.slow_log;
select sum(a) from t1
except
select sum(b) from t1;
SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%except%';
TRUNCATE TABLE mysql.slow_log;
select sum(a) from t1
intersect all
select sum(b) from t1;
SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%intersect all%';
TRUNCATE TABLE mysql.slow_log;
select sum(a) from t1
intersect
select sum(b) from t1;
SELECT rows_examined,sql_text FROM mysql.slow_log where sql_text LIKE '%intersect%';
disconnect con2;
connection default;
DROP TABLE t1;
TRUNCATE TABLE mysql.slow_log;
###########################################################################
--echo #
--echo # MDEV-37195 Rows_examined is always 0 in the slow query log
--echo # for queries with a subquery and degenerate select
--echo #
SET GLOBAL log_output = "TABLE";
SET GLOBAL slow_query_log = ON;
SET GLOBAL long_query_time = 0.0;
# clear slow_log of any residual slow queries
TRUNCATE TABLE mysql.slow_log;
CREATE TABLE t1 (id INT);
INSERT INTO t1(id) SELECT seq FROM seq_1_to_10;
connect (con2,localhost,root,,);
--disable_ps_protocol
SELECT 100 in (SELECT id FROM t1) AS res;
SELECT rows_examined,sql_text FROM mysql.slow_log WHERE sql_text LIKE '%SELECT 100%';
TRUNCATE TABLE mysql.slow_log;
SELECT 100 in (
SELECT id FROM t1
UNION
SELECT id FROM t1
) AS res;
SELECT rows_examined,sql_text FROM mysql.slow_log WHERE sql_text LIKE '%SELECT 100%';
TRUNCATE TABLE mysql.slow_log;
SELECT 100 in (
SELECT id FROM t1
UNION ALL
SELECT id FROM t1
) AS res;
SELECT rows_examined,sql_text FROM mysql.slow_log WHERE sql_text LIKE '%SELECT 100%';
disconnect con2;
connection default;
DROP TABLE t1;
TRUNCATE TABLE mysql.slow_log;
--echo End of 10.11 tests
###########################################################################
# Reset global system variables to initial values if forgotten somewhere above. # Reset global system variables to initial values if forgotten somewhere above.
SET GLOBAL long_query_time = @save_long_query_time; SET GLOBAL long_query_time = @save_long_query_time;
SET GLOBAL log_output = @old_log_output; SET GLOBAL log_output = @old_log_output;

View File

@@ -1022,6 +1022,37 @@ select 'evil-doing', sleep(1.1)
select 'after evil-doing', sleep(0.2) select 'after evil-doing', sleep(0.2)
set global log_output=default; set global log_output=default;
drop user u@localhost; drop user u@localhost;
# End of 10.5 tests
#
# MDEV-34928 CREATE TABLE does not check valid engine for log tables
#
set global general_log='on';
show create table mysql.general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
`event_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6),
`user_host` mediumtext NOT NULL,
`thread_id` bigint(21) unsigned NOT NULL,
`server_id` int(10) unsigned NOT NULL,
`command_type` varchar(64) NOT NULL,
`argument` mediumtext NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci COMMENT='General log'
create or replace table mysql.general_log (a int) engine=innodb;
ERROR HY000: Storage engine InnoDB cannot be used for log tables
create or replace table mysql.slow_log (a int) engine=innodb;
ERROR HY000: Storage engine InnoDB cannot be used for log tables
create temporary table t (c int) engine=innodb;
insert into t values (1);
set global log_output='table';
set session autocommit=0;
update t set c=0;
truncate t;
select a;
ERROR 42S22: Unknown column 'a' in 'SELECT'
drop temporary table t;
set @@global.log_output= @old_log_output;
set @@global.general_log= @old_general_log;
# End of 10.6 tests
SET @@global.log_output= @old_log_output; SET @@global.log_output= @old_log_output;
SET @@global.slow_query_log= @old_slow_query_log; SET @@global.slow_query_log= @old_slow_query_log;
SET @@global.general_log= @old_general_log; SET @@global.general_log= @old_general_log;

View File

@@ -2,6 +2,7 @@
-- source include/not_embedded.inc -- source include/not_embedded.inc
--source include/have_csv.inc --source include/have_csv.inc
--source include/have_innodb.inc
SET SQL_MODE=""; SET SQL_MODE="";
SET @old_log_output= @@global.log_output; SET @old_log_output= @@global.log_output;
@@ -1060,6 +1061,31 @@ set global log_output=default;
drop user u@localhost; drop user u@localhost;
--enable_cursor_protocol --enable_cursor_protocol
--echo # End of 10.5 tests
--echo #
--echo # MDEV-34928 CREATE TABLE does not check valid engine for log tables
--echo #
set global general_log='on';
show create table mysql.general_log;
--error ER_UNSUPORTED_LOG_ENGINE
create or replace table mysql.general_log (a int) engine=innodb;
--error ER_UNSUPORTED_LOG_ENGINE
create or replace table mysql.slow_log (a int) engine=innodb;
create temporary table t (c int) engine=innodb;
insert into t values (1);
set global log_output='table';
set session autocommit=0;
update t set c=0;
truncate t;
--error ER_BAD_FIELD_ERROR
select a;
drop temporary table t;
set @@global.log_output= @old_log_output;
set @@global.general_log= @old_general_log;
--echo # End of 10.6 tests
SET @@global.log_output= @old_log_output; SET @@global.log_output= @old_log_output;
SET @@global.slow_query_log= @old_slow_query_log; SET @@global.slow_query_log= @old_slow_query_log;
SET @@global.general_log= @old_general_log; SET @@global.general_log= @old_general_log;

View File

@@ -1242,6 +1242,7 @@ t1 CREATE TABLE `t1` (
insert into t1 value(concat(repeat('s',3000),'1')); insert into t1 value(concat(repeat('s',3000),'1'));
insert into t1 value(concat(repeat('s',3000),'2')); insert into t1 value(concat(repeat('s',3000),'2'));
ERROR 23000: Duplicate entry 'sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss...' for key 'a' ERROR 23000: Duplicate entry 'sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss...' for key 'a'
update t1 set a= concat(repeat('s',3000),'2');
insert into t1 value(concat(repeat('a',3000),'2')); insert into t1 value(concat(repeat('a',3000),'2'));
drop table t1; drop table t1;
create table t1(a varchar(4000), b longblob , c varchar(5000), d longblob, create table t1(a varchar(4000), b longblob , c varchar(5000), d longblob,

View File

@@ -405,6 +405,7 @@ show create table t1;
insert into t1 value(concat(repeat('s',3000),'1')); insert into t1 value(concat(repeat('s',3000),'1'));
--error ER_DUP_ENTRY --error ER_DUP_ENTRY
insert into t1 value(concat(repeat('s',3000),'2')); insert into t1 value(concat(repeat('s',3000),'2'));
update t1 set a= concat(repeat('s',3000),'2');
insert into t1 value(concat(repeat('a',3000),'2')); insert into t1 value(concat(repeat('a',3000),'2'));
drop table t1; drop table t1;

View File

@@ -356,6 +356,7 @@ ERROR 42000: Specified key was too long; max key length is 2300 bytes
# #
create table t1(a int, unique(a) using hash); create table t1(a int, unique(a) using hash);
#BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES) #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES)
insert into t1 values(200),(150),(149),(148),(147),(146),(145),(144),(143),(142),(141),(140),(139),(138),(137),(136),(135),(134),(133),(132),(131),(130),(129),(128),(127),(126),(125),(124),(123),(122),(121),(120),(119),(118),(117),(116),(115),(114),(113),(112),(111),(110),(109),(108),(107),(106),(105),(104),(103),(102),(101),(100),(99),(98),(97),(96),(95),(94),(93),(92),(91),(90),(89),(88),(87),(86),(85),(84),(83),(82),(81),(80),(79),(78),(77),(76),(75),(74),(73),(72),(71),(70),(69),(68),(67),(66),(65),(64),(63),(62),(61),(60),(59),(58),(57),(56),(55),(54),(53),(52),(51),(50),(49),(48),(47),(46),(45),(44),(43),(42),(41),(40),(39),(38),(37),(36),(35),(34),(33),(32),(31),(30),(29),(28),(27),(26),(25),(24),(23),(22),(21),(20),(19),(18),(17),(16),(15),(14),(13),(12),(11),(10),(9),(8),(7),(6),(5),(4),(3),(2),(1);
drop table t1; drop table t1;
# #
# MDEV-21804 Assertion `marked_for_read()' failed upon INSERT into table with long unique blob under binlog_row_image=NOBLOB # MDEV-21804 Assertion `marked_for_read()' failed upon INSERT into table with long unique blob under binlog_row_image=NOBLOB
@@ -809,3 +810,36 @@ hex(c1) hex(c2) c3 hex(c4)
NULL NULL NULL NULL NULL NULL NULL NULL
drop table t1; drop table t1;
# End of 10.5 tests # End of 10.5 tests
#
# MDEV-36852 Table definition gets corrupt after adding unique hash key
#
create table t1 (a text, b int, foreign key(a) references x(x)) engine=myisam;
Warnings:
Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` text DEFAULT NULL,
`b` int(11) DEFAULT NULL,
KEY `a` (`a`(250))
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
alter table t1 add unique(a), add key(a);
Warnings:
Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` text DEFAULT NULL,
`b` int(11) DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH,
KEY `a_2` (`a`(250))
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci
drop table t1;
#
# MDEV-37203 UBSAN: applying zero offset to null pointer in strings/ctype-uca.inl | my_uca_strnncollsp_onelevel_utf8mb4 | handler::check_duplicate_long_entries_update
#
create table t (a int,b text unique key);
insert into t (a) values (1);
update t set a=2;
drop table t;
# End of 10.6 tests

View File

@@ -332,17 +332,8 @@ CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria;
--echo # MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes --echo # MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes
--echo # --echo #
create table t1(a int, unique(a) using hash); create table t1(a int, unique(a) using hash);
--let $count=150
--let insert_stmt= insert into t1 values(200)
while ($count)
{
--let $insert_stmt=$insert_stmt,($count)
--dec $count
}
--disable_query_log
--echo #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES) --echo #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES)
--eval $insert_stmt insert into t1 values(200),(150),(149),(148),(147),(146),(145),(144),(143),(142),(141),(140),(139),(138),(137),(136),(135),(134),(133),(132),(131),(130),(129),(128),(127),(126),(125),(124),(123),(122),(121),(120),(119),(118),(117),(116),(115),(114),(113),(112),(111),(110),(109),(108),(107),(106),(105),(104),(103),(102),(101),(100),(99),(98),(97),(96),(95),(94),(93),(92),(91),(90),(89),(88),(87),(86),(85),(84),(83),(82),(81),(80),(79),(78),(77),(76),(75),(74),(73),(72),(71),(70),(69),(68),(67),(66),(65),(64),(63),(62),(61),(60),(59),(58),(57),(56),(55),(54),(53),(52),(51),(50),(49),(48),(47),(46),(45),(44),(43),(42),(41),(40),(39),(38),(37),(36),(35),(34),(33),(32),(31),(30),(29),(28),(27),(26),(25),(24),(23),(22),(21),(20),(19),(18),(17),(16),(15),(14),(13),(12),(11),(10),(9),(8),(7),(6),(5),(4),(3),(2),(1);
--enable_query_log
drop table t1; drop table t1;
--echo # --echo #
@@ -756,3 +747,23 @@ select hex(c1), hex(c2), c3, hex(c4) from t1;
drop table t1; drop table t1;
--echo # End of 10.5 tests --echo # End of 10.5 tests
--echo #
--echo # MDEV-36852 Table definition gets corrupt after adding unique hash key
--echo #
create table t1 (a text, b int, foreign key(a) references x(x)) engine=myisam;
show create table t1;
alter table t1 add unique(a), add key(a);
show create table t1;
drop table t1;
--echo #
--echo # MDEV-37203 UBSAN: applying zero offset to null pointer in strings/ctype-uca.inl | my_uca_strnncollsp_onelevel_utf8mb4 | handler::check_duplicate_long_entries_update
--echo #
create table t (a int,b text unique key);
insert into t (a) values (1);
update t set a=2;
drop table t;
--echo # End of 10.6 tests

View File

@@ -137,3 +137,39 @@ disconnect con2;
# MDEV-20131 Assertion `!pk->has_virtual()' failed # MDEV-20131 Assertion `!pk->has_virtual()' failed
create table t1 (a text, primary key(a(1871))) engine=innodb; create table t1 (a text, primary key(a(1871))) engine=innodb;
ERROR 42000: Specified key was too long; max key length is 1536 bytes ERROR 42000: Specified key was too long; max key length is 1536 bytes
# End of 10.4 tests
#
# MDEV-37268 ER_NOT_KEYFILE or assertion failure upon REPLACE into table with unique hash under READ-COMMITTED
#
create table t1 (id int not null primary key, f varchar(100), unique(f) using hash) engine=innodb;
insert t1 values (1,'x');
set transaction isolation level read committed;
replace t1 values (2,'x');
select * from t1;
id f
2 x
drop table t1;
create table t1 (id int, f longtext, primary key (id), unique(f)) engine=innodb partition by key (id) partitions 9;
insert t1 (id) values (1),(2);
set transaction isolation level read committed;
update ignore t1 set f = 'x';
ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported
select * from t1;
id f
1 NULL
2 NULL
drop table t1;
#
# MDEV-37310 Non-debug failing assertion node->pcur->rel_pos == BTR_PCUR_ON upon violating long unique under READ-COMMITTED
#
create table t1 (id int, f blob, unique(id,f)) engine=innodb partition by key(id) partitions 2;
insert t1 values (1,'foo'),(2,'foo');
set transaction isolation level read committed;
update ignore t1 set id = 2;
ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported
select * from t1;
id f
1 foo
2 foo
drop table t1;
# End of 10.6 tests

View File

@@ -1,4 +1,5 @@
--source include/have_innodb.inc --source include/have_innodb.inc
--source include/have_partition.inc
# #
# MDEV-371 Unique indexes for blobs # MDEV-371 Unique indexes for blobs
@@ -144,3 +145,36 @@ disconnect con2;
--error ER_TOO_LONG_KEY --error ER_TOO_LONG_KEY
create table t1 (a text, primary key(a(1871))) engine=innodb; create table t1 (a text, primary key(a(1871))) engine=innodb;
--echo # End of 10.4 tests
--echo #
--echo # MDEV-37268 ER_NOT_KEYFILE or assertion failure upon REPLACE into table with unique hash under READ-COMMITTED
--echo #
create table t1 (id int not null primary key, f varchar(100), unique(f) using hash) engine=innodb;
insert t1 values (1,'x');
set transaction isolation level read committed;
replace t1 values (2,'x');
select * from t1;
drop table t1;
create table t1 (id int, f longtext, primary key (id), unique(f)) engine=innodb partition by key (id) partitions 9;
insert t1 (id) values (1),(2);
set transaction isolation level read committed;
--error ER_NOT_SUPPORTED_YET
update ignore t1 set f = 'x';
select * from t1;
drop table t1;
--echo #
--echo # MDEV-37310 Non-debug failing assertion node->pcur->rel_pos == BTR_PCUR_ON upon violating long unique under READ-COMMITTED
--echo #
create table t1 (id int, f blob, unique(id,f)) engine=innodb partition by key(id) partitions 2;
insert t1 values (1,'foo'),(2,'foo');
set transaction isolation level read committed;
--error ER_NOT_SUPPORTED_YET
update ignore t1 set id = 2;
select * from t1;
drop table t1;
--echo # End of 10.6 tests

View File

@@ -0,0 +1,255 @@
#
# MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records
#
## INSERT
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
# Keep a Read View open to prevent purge
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
# Create delete marked secondary index Record ('a', 10)
insert t1 values(10, 'a');
delete from t1;
# Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum
set transaction isolation level read committed;
set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
insert t1 values(15, 'a');
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
# Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10)
set transaction isolation level read committed;
insert t1 values(5, 'a');
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 23000: Duplicate entry 'a' for key 'col2'
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
5 a
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## INSERT, row-level locking proof
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
# Keep a Read View open to prevent purge
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
# Create delete marked secondary index Record ('a', 10)
insert t1 values(10, 'a');
delete from t1;
# Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum
set transaction isolation level read committed;
set debug_sync="ha_write_row_end SIGNAL checked_duplicate WAIT_FOR do_insert";
insert t1 values(15, 'a');
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
# Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10)
set session innodb_lock_wait_timeout= 1;
set transaction isolation level read committed;
insert t1 values(5, 'a');
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
set debug_sync="now SIGNAL do_insert";
connection con1;
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
15 a
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## UPDATE
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
update t1 set col2='a' where col1=5;
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 23000: Duplicate entry 'a' for key 'col2'
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
5 b
15 a
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## INSERT IGNORE
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
insert t1 values(10, 'a');
delete from t1;
set transaction isolation level read committed;
set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
insert ignore t1 values(15, 'a'), (16, 'b');
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
insert t1 values(5, 'a');
set debug_sync="now SIGNAL do_insert";
connection con1;
Warnings:
Warning 1062 Duplicate entry 'a' for key 'col2'
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
5 a
16 b
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## UPDATE IGNORE
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
insert into t1 values( 9, 'd');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
update ignore t1 set col2=chr(92+col1) where col1<=9;
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
5 b
9 d
15 a
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## UPDATE modifying PK
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
update t1 set col2='a', col1=4 where col1=5;
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 23000: Duplicate entry 'a' for key 'col2'
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
5 b
15 a
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## UPDATE IGNORE modifying PK
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
insert into t1 values( 9, 'd');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
update ignore t1 set col2=chr(92+col1), col1=col1-1 where col1<=9;
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
5 b
9 d
15 a
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
# End of 10.6 tests

View File

@@ -0,0 +1,242 @@
--source include/have_innodb.inc
--source include/have_debug_sync.inc
--echo #
--echo # MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records
--echo #
--disable_view_protocol
--echo ## INSERT
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
--echo # Keep a Read View open to prevent purge
start transaction;
select * from t1;
--connect con1,localhost,root
--echo # Create delete marked secondary index Record ('a', 10)
insert t1 values(10, 'a');
delete from t1;
--echo # Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum
set transaction isolation level read committed;
set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
--send insert t1 values(15, 'a')
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
--echo # Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10)
set transaction isolation level read committed;
insert t1 values(5, 'a');
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_DUP_ENTRY
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## INSERT, row-level locking proof
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
--echo # Keep a Read View open to prevent purge
start transaction;
select * from t1;
--connect con1,localhost,root
--echo # Create delete marked secondary index Record ('a', 10)
insert t1 values(10, 'a');
delete from t1;
--echo # Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum
set transaction isolation level read committed;
set debug_sync="ha_write_row_end SIGNAL checked_duplicate WAIT_FOR do_insert";
--send insert t1 values(15, 'a')
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
--echo # Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10)
set session innodb_lock_wait_timeout= 1;
set transaction isolation level read committed;
--error ER_LOCK_WAIT_TIMEOUT
insert t1 values(5, 'a');
set debug_sync="now SIGNAL do_insert";
--connection con1
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## UPDATE
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
--send update t1 set col2='a' where col1=5
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_DUP_ENTRY
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## INSERT IGNORE
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert t1 values(10, 'a');
delete from t1;
set transaction isolation level read committed;
set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
--send insert ignore t1 values(15, 'a'), (16, 'b')
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
insert t1 values(5, 'a');
set debug_sync="now SIGNAL do_insert";
--connection con1
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## UPDATE IGNORE
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
insert into t1 values( 9, 'd');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
--send update ignore t1 set col2=chr(92+col1) where col1<=9
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_NOT_SUPPORTED_YET
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## UPDATE modifying PK
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
--send update t1 set col2='a', col1=4 where col1=5
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_DUP_ENTRY
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## UPDATE IGNORE modifying PK
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
insert into t1 values( 9, 'd');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
--send update ignore t1 set col2=chr(92+col1), col1=col1-1 where col1<=9
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_NOT_SUPPORTED_YET
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--enable_view_protocol
--echo # End of 10.6 tests

View File

@@ -274,6 +274,23 @@ CREATE TABLE t1 (a mariadb_schema.date);
DROP TABLE t1; DROP TABLE t1;
# End of 10.3 tests # End of 10.3 tests
# #
# MDEV-36979 Same alias name with different case on same table is not working in functions
#
create table t1 (a int);
insert t1 values (1);
create table T1 (b int);
insert T1 values (10);
create function t1test(val int) returns int
begin
return (select a from t1 where exists (select b from T1 where b > val));
end//
select t1test(1);
t1test(1)
1
drop function t1test;
drop table t1, T1;
# End of 10.11 tests
#
# MDEV-32973 SHOW TABLES LIKE shows temporary tables with non-matching names # MDEV-32973 SHOW TABLES LIKE shows temporary tables with non-matching names
# #
create temporary table t1 (a int); create temporary table t1 (a int);

View File

@@ -149,6 +149,25 @@ DROP TABLE t1;
--echo # End of 10.3 tests --echo # End of 10.3 tests
--echo #
--echo # MDEV-36979 Same alias name with different case on same table is not working in functions
--echo #
create table t1 (a int);
insert t1 values (1);
create table T1 (b int);
insert T1 values (10);
delimiter //;
create function t1test(val int) returns int
begin
return (select a from t1 where exists (select b from T1 where b > val));
end//
delimiter ;//
select t1test(1);
drop function t1test;
drop table t1, T1;
--echo # End of 10.11 tests
--echo # --echo #
--echo # MDEV-32973 SHOW TABLES LIKE shows temporary tables with non-matching names --echo # MDEV-32973 SHOW TABLES LIKE shows temporary tables with non-matching names
--echo # --echo #

View File

@@ -3015,3 +3015,34 @@ connection default;
SET debug_sync='RESET'; SET debug_sync='RESET';
DROP TABLE t1; DROP TABLE t1;
disconnect con1; disconnect con1;
#
# MDEV-28567 Assertion `0' in open_tables upon function-related operation
#
CREATE VIEW v1 AS SELECT 1;
CREATE FUNCTION f1() RETURNS INT RETURN (SELECT * FROM v1);
connect con1, localhost, root;
SET debug_sync='open_and_process_table SIGNAL ready1 WAIT_FOR go1';
ALTER VIEW v1 AS SELECT f1();
connect con2, localhost, root;
SET debug_sync='now WAIT_FOR ready1';
SET debug_sync='mdl_after_find_deadlock SIGNAL ready2';
SELECT f1();
connect con3, localhost, root;
SET debug_sync='now WAIT_FOR ready2';
SET debug_sync='mdl_after_find_deadlock SIGNAL ready3';
DROP FUNCTION f1;
connection default;
SET debug_sync='now WAIT_FOR ready3';
SET debug_sync='now SIGNAL go1';
connection con3;
disconnect con3;
connection con2;
f1()
1
disconnect con2;
connection con1;
ERROR 42000: FUNCTION test.f1 does not exist
disconnect con1;
connection default;
DROP VIEW v1;
SET debug_sync='reset';

View File

@@ -4010,6 +4010,58 @@ DROP TABLE t1;
disconnect con1; disconnect con1;
--echo #
--echo # MDEV-28567 Assertion `0' in open_tables upon function-related operation
--echo #
#
# This test covers deadlock recovery code in open_tables() after
# open_and_process_routine(). ALTER VIEW v1 connection must fall as a deadlock
# victim to hit this code.
#
# default connection cannot be used since it may have deadlock weight spoiled
# by "MDL deadlock overweight" feature from previous deadlocks. In this case
# ALTER VIEW v1 won't fall as a deadlock victim and the test won't cover
# intended code.
#
CREATE VIEW v1 AS SELECT 1;
CREATE FUNCTION f1() RETURNS INT RETURN (SELECT * FROM v1);
connect con1, localhost, root;
SET debug_sync='open_and_process_table SIGNAL ready1 WAIT_FOR go1';
send ALTER VIEW v1 AS SELECT f1(); # X v1, S f1
connect con2, localhost, root;
SET debug_sync='now WAIT_FOR ready1';
SET debug_sync='mdl_after_find_deadlock SIGNAL ready2';
send SELECT f1(); # S f1, SR v1
connect con3, localhost, root;
SET debug_sync='now WAIT_FOR ready2';
SET debug_sync='mdl_after_find_deadlock SIGNAL ready3';
send DROP FUNCTION f1; # X f1
connection default;
SET debug_sync='now WAIT_FOR ready3';
SET debug_sync='now SIGNAL go1';
connection con3;
reap;
disconnect con3;
connection con2;
reap;
disconnect con2;
connection con1;
--error ER_SP_DOES_NOT_EXIST
reap;
disconnect con1;
connection default;
DROP VIEW v1;
SET debug_sync='reset';
# Check that all connections opened by test cases in this file are really # Check that all connections opened by test cases in this file are really
# gone so execution of other tests won't be affected by their presence. # gone so execution of other tests won't be affected by their presence.
--source include/wait_until_count_sessions.inc --source include/wait_until_count_sessions.inc

View File

@@ -24,3 +24,41 @@ MariaDB [(none)]> select 1;
MariaDB [(none)]> exit MariaDB [(none)]> exit
Bye Bye
# End of 10.4 tests
#
# MDEV-36701 command line client doesn't check session_track information
#
create database db1;
use db1;
drop database db1;
create database db1;
execute immediate "use db1";
execute immediate "drop database db1";
exit
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is X
Server version: Y
Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> create database db1;
Query OK, 1 row affected
MariaDB [(none)]> use db1;
Database changed
MariaDB [db1]> drop database db1;
Query OK, 0 rows affected
MariaDB [(none)]> create database db1;
Query OK, 1 row affected
MariaDB [(none)]> execute immediate "use db1";
Query OK, 0 rows affected
MariaDB [db1]> execute immediate "drop database db1";
Query OK, 0 rows affected
MariaDB [(none)]> exit
Bye
# End of 10.11 tests

View File

@@ -22,3 +22,27 @@ if ($sys_errno == 127)
skip no socat; skip no socat;
} }
remove_file $MYSQL_TMP_DIR/mysql_in; remove_file $MYSQL_TMP_DIR/mysql_in;
--echo # End of 10.4 tests
--echo #
--echo # MDEV-36701 command line client doesn't check session_track information
--echo #
# test old behavior (make sure session tracking didn't break it)
# and new one, that didn't work before
write_file $MYSQL_TMP_DIR/mysql_in;
create database db1;
use db1;
drop database db1;
create database db1;
execute immediate "use db1";
execute immediate "drop database db1";
exit
EOF
let TERM=dumb;
replace_regex /id is \d+/id is X/ /Server version: .*/Server version: Y/ / \(\d+\.\d+ sec\)//;
error 0,127;
exec socat -t10 EXEC:"$MYSQL",pty STDIO < $MYSQL_TMP_DIR/mysql_in;
remove_file $MYSQL_TMP_DIR/mysql_in;
--echo # End of 10.11 tests

View File

@@ -1,5 +1,7 @@
# Embedded server doesn't support external clients # Embedded server doesn't support external clients
--source include/not_embedded.inc --source include/not_embedded.inc
# MDEV-37169 - msan unknown failure
--source include/not_msan.inc
# #
# Test "mysqladmin ping" # Test "mysqladmin ping"
# #

View File

@@ -263,11 +263,11 @@ t1 CREATE TABLE `t1` (
`c01` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c01` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL,
`c02` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c02` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL,
`c03` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci NOT NULL, `c03` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci NOT NULL,
`c04` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c04` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci NOT NULL,
`c05` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c05` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL,
`c06` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c06` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL,
`c07` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c07` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci NOT NULL,
`c08` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c08` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci NOT NULL,
`c09` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c09` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL,
`c10` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c10` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL,
`c11` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL, `c11` varchar(6) CHARACTER SET latin2 COLLATE latin2_general_ci DEFAULT NULL,

View File

@@ -12203,6 +12203,23 @@ exp1 exp2
] 1 ] 1
DROP TABLE t1; DROP TABLE t1;
# #
# MDEV-30334 Optimizer trace produces invalid JSON with WHERE subquery
# Simple code rearrangement to stop it displaying an unsigned int in a String.
#
SET optimizer_trace= 'enabled=on';
CREATE TABLE t1 (id INT PRIMARY KEY);
INSERT INTO t1 VALUES (1),(2);
CREATE TABLE t2 (a INT);
INSERT INTO t2 VALUES (3),(4);
SELECT * FROM t1 WHERE id < ( SELECT SUM(a) FROM t2 );
id
1
2
SELECT JSON_VALID(trace) FROM information_schema.optimizer_trace;
JSON_VALID(trace)
1
DROP TABLE t1, t2;
#
# End of 10.4 tests # End of 10.4 tests
# #
set optimizer_trace='enabled=on'; set optimizer_trace='enabled=on';
@@ -12921,9 +12938,9 @@ SUBQ a
985 1 985 1
985 2 985 2
# The trace must be empty: # The trace must be empty:
select json_detailed(json_extract(trace, '$**.range-checked-for-each-record')) select json_detailed(json_extract(trace, '$**.range-checked-for-each-record')) as TRACE
from information_schema.optimizer_trace; from information_schema.optimizer_trace;
json_detailed(json_extract(trace, '$**.range-checked-for-each-record')) TRACE
NULL NULL
# The trace must be empty: # The trace must be empty:
select json_detailed(json_extract(trace, '$**.join_execution')) select json_detailed(json_extract(trace, '$**.join_execution'))

View File

@@ -855,6 +855,23 @@ SELECT b, a FROM t1 WHERE b <> 'p' OR a = 4 GROUP BY b, a HAVING a <= 7; SELECT
DROP TABLE t1; DROP TABLE t1;
--enable_view_protocol --enable_view_protocol
--echo #
--echo # MDEV-30334 Optimizer trace produces invalid JSON with WHERE subquery
--echo # Simple code rearrangement to stop it displaying an unsigned int in a String.
--echo #
SET optimizer_trace= 'enabled=on';
CREATE TABLE t1 (id INT PRIMARY KEY);
INSERT INTO t1 VALUES (1),(2);
CREATE TABLE t2 (a INT);
INSERT INTO t2 VALUES (3),(4);
SELECT * FROM t1 WHERE id < ( SELECT SUM(a) FROM t2 );
SELECT JSON_VALID(trace) FROM information_schema.optimizer_trace;
DROP TABLE t1, t2;
--echo # --echo #
--echo # End of 10.4 tests --echo # End of 10.4 tests
--echo # --echo #
@@ -1214,7 +1231,7 @@ select
from t3; from t3;
--echo # The trace must be empty: --echo # The trace must be empty:
select json_detailed(json_extract(trace, '$**.range-checked-for-each-record')) select json_detailed(json_extract(trace, '$**.range-checked-for-each-record')) as TRACE
from information_schema.optimizer_trace; from information_schema.optimizer_trace;
--echo # The trace must be empty: --echo # The trace must be empty:
select json_detailed(json_extract(trace, '$**.join_execution')) select json_detailed(json_extract(trace, '$**.join_execution'))

View File

@@ -5965,9 +5965,145 @@ t1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VI
# Clean up # Clean up
DEALLOCATE PREPARE stmt; DEALLOCATE PREPARE stmt;
DROP VIEW t1; DROP VIEW t1;
#
# End of 10.4 tests # End of 10.4 tests
# #
# MDEV-35330: Assertion `marked_for_read()` failed in VSec9::VSec9 | Item_func_from_unixtime::get_date
#
CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a));
INSERT INTO t VALUES (1,'2025-07-18 18:37:10');
SELECT * FROM t;
a b
1 2025-07-18 18:37:10
EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT;
SELECT * FROM t;
a b
1 1970-01-01 09:00:01
DROP TABLE t;
CREATE TABLE t (a INT, b INT DEFAULT (a+5));
INSERT INTO t values (1,2), (2,DEFAULT);
EXECUTE IMMEDIATE 'INSERT INTO t VALUES (3,4), (4,?)' USING DEFAULT;
SELECT * FROM t;
a b
1 2
2 7
3 4
4 9
EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT;
SELECT * FROM t;
a b
1 6
2 7
3 8
4 9
DROP TABLE t;
CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a));
INSERT INTO t VALUES (1,'2025-07-18 18:37:10');
SELECT * FROM t;
a b
1 2025-07-18 18:37:10
PREPARE s FROM 'UPDATE t SET b=?';
EXECUTE s USING DEFAULT;
SELECT * FROM t;
a b
1 1970-01-01 09:00:01
DROP TABLE t;
CREATE TABLE t (a INT, b TIMESTAMP DEFAULT FROM_UNIXTIME(a), c INT DEFAULT (a+5));
INSERT INTO t VALUES (1,'2025-07-18 18:37:10',3);
SELECT * FROM t;
a b c
1 2025-07-18 18:37:10 3
EXECUTE IMMEDIATE 'UPDATE t SET b=?, c=?' USING DEFAULT, DEFAULT;
SELECT * FROM t;
a b c
1 1970-01-01 09:00:01 6
DROP TABLE t;
# End of 10.6 tests
#
# MDEV-34322: ASAN heap-buffer-overflow in Field::is_null / Item_param::assign_default or bogus ER_BAD_NULL_ERROR
#
CREATE TABLE t1 (a INT NOT NULL DEFAULT '0', b INT);
INSERT INTO t1 VALUES (1,11);
CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL;
EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT;
DROP TABLE t1;
CREATE TABLE t1 (a INT NOT NULL DEFAULT (30 + 100), b INT);
INSERT INTO t1 VALUES (1,11);
CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL;
EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT;
DROP TABLE t1;
CREATE TABLE t1 (a INT NOT NULL DEFAULT (b + 100), b INT);
INSERT INTO t1 VALUES (1,11);
CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL;
EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT;
DROP TABLE t1;
CREATE TABLE t1 (a INT NOT NULL DEFAULT (FLOOR(RAND()*100)), b INT);
INSERT INTO t1 VALUES (1,11);
CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL;
EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT;
DROP TABLE t1;
#
# MDEV-32694: ASAN errors in Binary_string::alloced_length / reset_stmt_params
#
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7);
PREPARE stmt FROM 'BEGIN NOT ATOMIC SELECT * FROM t1 LIMIT ?; SELECT * FROM t1 LIMIT ?; END';
# Expected output is row (1) produced by the first query
# the rows (1), (2), (3) produced by the second one
EXECUTE stmt USING 1, 3;
a
1
a
1
2
3
ALTER TABLE t1 ADD COLUMN f INT;
# Because metadata of the table t1 has been changed,
# the second execution of the same prepared statement should result in
# re-compilation of the first statement enclosed in the BEGIN / END block
# and since different actual values are provided to positional parameters
# on the second execution, the exepected output is the row (1), (2), (3)
# produced by the first query and the rows (1), (2), (3), (4), (5)
# produced by the second one
EXECUTE stmt USING 3, 5;
a f
1 NULL
2 NULL
3 NULL
a f
1 NULL
2 NULL
3 NULL
4 NULL
5 NULL
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
# Check that the order of parameters preserved after re-compilation of a
# failed statement inside anonymous BEGIN / END block.
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1);
PREPARE stmt FROM 'BEGIN NOT ATOMIC SELECT a, ?, ? FROM t1; END';
# Expected output is the row (1, 10, 20)
EXECUTE stmt USING 10, 20;
a ? ?
1 10 20
ALTER TABLE t1 ADD COLUMN b INT;
# Expected output is the row (1, 300, 400)
EXECUTE stmt USING 300, 400;
a ? ?
1 300 400
ALTER TABLE t1 DROP COLUMN b;
# Expected output is the row (1, 500, 700)
EXECUTE stmt USING 500, 700;
a ? ?
1 500 700
ALTER TABLE t1 ADD COLUMN b INT;
# Expected output is the row (1, 700, 900)
EXECUTE stmt USING 700, 900;
a ? ?
1 700 900
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
# End of 11.4 tests
# #
# MDEV-25008 Delete query gets stuck on mariadb , same query works on MySQL 8.0.21 # MDEV-25008 Delete query gets stuck on mariadb , same query works on MySQL 8.0.21
# #
@@ -5986,7 +6122,5 @@ execute stmt using @var2;
delete from t1 where a=1 or b=2; delete from t1 where a=1 or b=2;
ERROR HY000: You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column ERROR HY000: You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column
drop table t1, t2; drop table t1, t2;
#
# End of 11.7 tests # End of 11.7 tests
#
ALTER DATABASE test CHARACTER SET utf8mb4 COLLATE utf8mb4_uca1400_ai_ci; ALTER DATABASE test CHARACTER SET utf8mb4 COLLATE utf8mb4_uca1400_ai_ci;

View File

@@ -5447,9 +5447,137 @@ EXECUTE stmt;
DEALLOCATE PREPARE stmt; DEALLOCATE PREPARE stmt;
DROP VIEW t1; DROP VIEW t1;
--echo #
--echo # End of 10.4 tests --echo # End of 10.4 tests
--echo # --echo #
--echo # MDEV-35330: Assertion `marked_for_read()` failed in VSec9::VSec9 | Item_func_from_unixtime::get_date
--echo #
CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a));
INSERT INTO t VALUES (1,'2025-07-18 18:37:10');
SELECT * FROM t;
EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT;
SELECT * FROM t;
DROP TABLE t;
CREATE TABLE t (a INT, b INT DEFAULT (a+5));
INSERT INTO t values (1,2), (2,DEFAULT);
EXECUTE IMMEDIATE 'INSERT INTO t VALUES (3,4), (4,?)' USING DEFAULT;
SELECT * FROM t;
EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT;
SELECT * FROM t;
DROP TABLE t;
CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a));
INSERT INTO t VALUES (1,'2025-07-18 18:37:10');
SELECT * FROM t;
PREPARE s FROM 'UPDATE t SET b=?';
EXECUTE s USING DEFAULT;
SELECT * FROM t;
DROP TABLE t;
CREATE TABLE t (a INT, b TIMESTAMP DEFAULT FROM_UNIXTIME(a), c INT DEFAULT (a+5));
INSERT INTO t VALUES (1,'2025-07-18 18:37:10',3);
SELECT * FROM t;
EXECUTE IMMEDIATE 'UPDATE t SET b=?, c=?' USING DEFAULT, DEFAULT;
SELECT * FROM t;
DROP TABLE t;
--echo # End of 10.6 tests
--echo #
--echo # MDEV-34322: ASAN heap-buffer-overflow in Field::is_null / Item_param::assign_default or bogus ER_BAD_NULL_ERROR
--echo #
CREATE TABLE t1 (a INT NOT NULL DEFAULT '0', b INT);
INSERT INTO t1 VALUES (1,11);
CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL;
EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT;
# Cleanup
DROP TABLE t1;
CREATE TABLE t1 (a INT NOT NULL DEFAULT (30 + 100), b INT);
INSERT INTO t1 VALUES (1,11);
CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL;
EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT;
# Cleanup
DROP TABLE t1;
CREATE TABLE t1 (a INT NOT NULL DEFAULT (b + 100), b INT);
INSERT INTO t1 VALUES (1,11);
CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL;
EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT;
# Cleanup
DROP TABLE t1;
CREATE TABLE t1 (a INT NOT NULL DEFAULT (FLOOR(RAND()*100)), b INT);
INSERT INTO t1 VALUES (1,11);
CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW SET @x = NULL;
EXECUTE IMMEDIATE "UPDATE t1 SET a = ?" USING DEFAULT;
# Cleanup
DROP TABLE t1;
--echo #
--echo # MDEV-32694: ASAN errors in Binary_string::alloced_length / reset_stmt_params
--echo #
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7);
PREPARE stmt FROM 'BEGIN NOT ATOMIC SELECT * FROM t1 LIMIT ?; SELECT * FROM t1 LIMIT ?; END';
--echo # Expected output is row (1) produced by the first query
--echo # the rows (1), (2), (3) produced by the second one
EXECUTE stmt USING 1, 3;
ALTER TABLE t1 ADD COLUMN f INT;
--echo # Because metadata of the table t1 has been changed,
--echo # the second execution of the same prepared statement should result in
--echo # re-compilation of the first statement enclosed in the BEGIN / END block
--echo # and since different actual values are provided to positional parameters
--echo # on the second execution, the exepected output is the row (1), (2), (3)
--echo # produced by the first query and the rows (1), (2), (3), (4), (5)
--echo # produced by the second one
EXECUTE stmt USING 3, 5;
# Cleanup
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
--echo # Check that the order of parameters preserved after re-compilation of a
--echo # failed statement inside anonymous BEGIN / END block.
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1);
PREPARE stmt FROM 'BEGIN NOT ATOMIC SELECT a, ?, ? FROM t1; END';
--echo # Expected output is the row (1, 10, 20)
EXECUTE stmt USING 10, 20;
ALTER TABLE t1 ADD COLUMN b INT;
--echo # Expected output is the row (1, 300, 400)
EXECUTE stmt USING 300, 400;
ALTER TABLE t1 DROP COLUMN b;
--echo # Expected output is the row (1, 500, 700)
EXECUTE stmt USING 500, 700;
ALTER TABLE t1 ADD COLUMN b INT;
--echo # Expected output is the row (1, 700, 900)
EXECUTE stmt USING 700, 900;
# Cleanup
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
--echo # End of 11.4 tests
--echo # --echo #
--echo # MDEV-25008 Delete query gets stuck on mariadb , same query works on MySQL 8.0.21 --echo # MDEV-25008 Delete query gets stuck on mariadb , same query works on MySQL 8.0.21
@@ -5473,8 +5601,6 @@ execute stmt using @var2;
delete from t1 where a=1 or b=2; delete from t1 where a=1 or b=2;
drop table t1, t2; drop table t1, t2;
--echo #
--echo # End of 11.7 tests --echo # End of 11.7 tests
--echo #
--source include/test_db_charset_restore.inc --source include/test_db_charset_restore.inc

View File

@@ -265,6 +265,26 @@ test.t1 repair status OK
set myisam_repair_threads = default; set myisam_repair_threads = default;
drop table t1; drop table t1;
# End of 10.2 tests # End of 10.2 tests
USE test;
CREATE TEMPORARY TABLE t(c INT NOT NULL) ENGINE=CSV;
INSERT INTO t VALUES(1);
REPAIR TABLE t;
Table Op Msg_type Msg_text
test.t repair status OK
DELETE FROM t;
#
# MDEV-23207 Assertion `tl->table == __null' failed in THD::open_temporary_table
#
create table t1 (pk int primary key) engine=innodb partition by hash(pk) partitions 10;
create table t2 (c int) engine=innodb;
create temporary table t3 (c int);
repair table t1, t2, t3;
Table Op Msg_type Msg_text
test.t1 repair status OK
test.t2 repair status OK
test.t3 repair status OK
drop tables t1, t2;
# End of 10.11 tests
# #
# MDEV-33737 The way of ignoring alter-algorithm is inconsistent with # MDEV-33737 The way of ignoring alter-algorithm is inconsistent with
# other options and with itself # other options and with itself

View File

@@ -5,6 +5,7 @@
--source include/have_sequence.inc --source include/have_sequence.inc
--source include/default_charset.inc --source include/default_charset.inc
--source include/have_innodb.inc --source include/have_innodb.inc
--source include/have_partition.inc
call mtr.add_suppression("character set is multi-byte"); call mtr.add_suppression("character set is multi-byte");
call mtr.add_suppression("exists only for compatibility"); call mtr.add_suppression("exists only for compatibility");
@@ -281,6 +282,23 @@ drop table t1;
--echo # End of 10.2 tests --echo # End of 10.2 tests
USE test;
CREATE TEMPORARY TABLE t(c INT NOT NULL) ENGINE=CSV;
INSERT INTO t VALUES(1);
REPAIR TABLE t;
DELETE FROM t;
--echo #
--echo # MDEV-23207 Assertion `tl->table == __null' failed in THD::open_temporary_table
--echo #
create table t1 (pk int primary key) engine=innodb partition by hash(pk) partitions 10;
create table t2 (c int) engine=innodb;
create temporary table t3 (c int);
repair table t1, t2, t3;
drop tables t1, t2;
--echo # End of 10.11 tests
--echo # --echo #
--echo # MDEV-33737 The way of ignoring alter-algorithm is inconsistent with --echo # MDEV-33737 The way of ignoring alter-algorithm is inconsistent with
--echo # other options and with itself --echo # other options and with itself

Some files were not shown because too many files have changed in this diff Show More