From 114476f2ec8244bd02b0f51fdaca05e055a2c1dc Mon Sep 17 00:00:00 2001 From: Haidong Ji Date: Mon, 7 Mar 2022 21:17:39 -0600 Subject: [PATCH 01/14] MDEV-27978 fix wrong name in error when max_session_mem_used exceeded Fixed typo in my_malloc_size_cb_func. There is no max-thread-mem-used sys variable in MariaDB, only max-session-mem-used. The relevant entry in sys_vars.cc is also fixed. Added a fallback case in case we could allocate the 256 bytes for the error message containing the exact setting. --- mysql-test/r/error_simulation.result | 10 ++++++++++ mysql-test/r/truncate_notembedded.result | 2 +- mysql-test/t/error_simulation.test | 13 +++++++++++++ sql/mysqld.cc | 7 ++++++- sql/sys_vars.cc | 2 +- 5 files changed, 31 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/error_simulation.result b/mysql-test/r/error_simulation.result index 457e5c8ec9c..680937accfd 100644 --- a/mysql-test/r/error_simulation.result +++ b/mysql-test/r/error_simulation.result @@ -128,3 +128,13 @@ SELECT f1(1); Got one of the listed errors DROP FUNCTION f1; SET debug_dbug= @saved_dbug; +# +# MDEV-27978 wrong option name in error when exceeding max_session_mem_used +# +SET SESSION max_session_mem_used = 8192; +SELECT * FROM information_schema.processlist; +ERROR HY000: The MariaDB server is running with the --max-session-mem-used=8192 option so it cannot execute this statement +SET SESSION max_session_mem_used = DEFAULT; +# +# End of 10.2 tests +# diff --git a/mysql-test/r/truncate_notembedded.result b/mysql-test/r/truncate_notembedded.result index dabd5474141..90f27fd2688 100644 --- a/mysql-test/r/truncate_notembedded.result +++ b/mysql-test/r/truncate_notembedded.result @@ -13,7 +13,7 @@ a UNLOCK TABLES; connection con1; TRUNCATE TABLE t1; -ERROR HY000: The MariaDB server is running with the --max-thread-mem-used=45500 option so it cannot execute this statement +ERROR HY000: The MariaDB server is running with the --max-session-mem-used=45500 option so it cannot execute this statement disconnect con1; connection default; DROP TABLE t1; diff --git a/mysql-test/t/error_simulation.test b/mysql-test/t/error_simulation.test index f713e2da6ba..2c155bc9a22 100644 --- a/mysql-test/t/error_simulation.test +++ b/mysql-test/t/error_simulation.test @@ -158,3 +158,16 @@ SET SESSION debug_dbug="+d,simulate_create_virtual_tmp_table_out_of_memory"; SELECT f1(1); DROP FUNCTION f1; SET debug_dbug= @saved_dbug; + +--echo # +--echo # MDEV-27978 wrong option name in error when exceeding max_session_mem_used +--echo # +SET SESSION max_session_mem_used = 8192; +--error ER_OPTION_PREVENTS_STATEMENT +SELECT * FROM information_schema.processlist; +SET SESSION max_session_mem_used = DEFAULT; + + +--echo # +--echo # End of 10.2 tests +--echo # diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 6a7ea117c84..7f18f758d13 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -4090,13 +4090,18 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific) /* Ensure we don't get called here again */ char buf[50], *buf2; thd->set_killed(KILL_QUERY); - my_snprintf(buf, sizeof(buf), "--max-thread-mem-used=%llu", + my_snprintf(buf, sizeof(buf), "--max-session-mem-used=%llu", thd->variables.max_mem_used); if ((buf2= (char*) thd->alloc(256))) { my_snprintf(buf2, 256, ER_THD(thd, ER_OPTION_PREVENTS_STATEMENT), buf); thd->set_killed(KILL_QUERY, ER_OPTION_PREVENTS_STATEMENT, buf2); } + else + { + thd->set_killed(KILL_QUERY, ER_OPTION_PREVENTS_STATEMENT, + "--max-session-mem-used"); + } } DBUG_ASSERT((longlong) thd->status_var.local_memory_used >= 0 || !debug_assert_on_not_freed_memory); diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index ee862e4936e..6a35176bd99 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -5719,7 +5719,7 @@ static Sys_var_ulong Sys_log_tc_size( BLOCK_SIZE(my_getpagesize())); #endif -static Sys_var_ulonglong Sys_max_thread_mem( +static Sys_var_ulonglong Sys_max_session_mem_used( "max_session_mem_used", "Amount of memory a single user session " "is allowed to allocate. This limits the value of the " "session variable MEM_USED", SESSION_VAR(max_mem_used), From 6de482a6fefac0c21daf33ed465644151cdf879f Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Fri, 4 Mar 2022 16:46:06 +1100 Subject: [PATCH 02/14] MDEV-28011: debian autobake cleanup Travis is dead to us so we don't need all the conditions around it. Remove depends for no longer supported versions Debian Jessies, and Ubuntu Trusty, Xenial, Wily are all eol as far as we are concerned. The dependancy on an apt cache when running autobake broke the 10.2 aarch64 packages (MDEV-28014). Lets reduce the risk here. --- debian/autobake-deb.sh | 110 +++++------------------------------------ debian/control | 5 +- 2 files changed, 13 insertions(+), 102 deletions(-) diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh index 21ab4d8fe22..a9715dc30fd 100755 --- a/debian/autobake-deb.sh +++ b/debian/autobake-deb.sh @@ -9,31 +9,7 @@ set -e # On Buildbot, don't run the mysql-test-run test suite as part of build. # It takes a lot of time, and we will do a better test anyway in # Buildbot, running the test suite from installed .debs on a clean VM. -# On Travis-CI we want to simulate the full build, including tests. -# Also on Travis-CI it is useful not to override the DEB_BUILD_OPTIONS -# at this stage at all. -if [[ ! $TRAVIS ]] -then - export DEB_BUILD_OPTIONS="nocheck" -fi - -# Travis-CI optimizations -if [[ $TRAVIS ]] -then - # On Travis-CI, the log must stay under 4MB so make the build less verbose - sed -i -e '/Add support for verbose builds/,+2d' debian/rules - - # Don't include test suite package on Travis-CI to make the build time shorter - sed '/Package: mariadb-test-data/,/^$/d' -i debian/control - sed '/Package: mariadb-test/,/^$/d' -i debian/control - sed '/Package: mariadb-plugin-tokudb/,/^$/d' -i debian/control - sed '/Package: mariadb-plugin-mroonga/,/^$/d' -i debian/control - sed '/Package: mariadb-plugin-spider/,/^$/d' -i debian/control - sed '/Package: mariadb-plugin-oqgraph/,/^$/d' -i debian/control - export MYSQL_COMPILER_LAUNCHER=ccache - sed 's|-DDEB|-DPLUGIN_TOKUDB=NO -DPLUGIN_MROONGA=NO -DPLUGIN_SPIDER=NO -DPLUGIN_OQGRAPH=NO -DPLUGIN_PERFSCHEMA=NO -WITH_EMBEDDED_SERVER=OFF -DDEB|' -i debian/rules -fi - +export DEB_BUILD_OPTIONS="nocheck" # Look up distro-version specific stuff # @@ -41,52 +17,8 @@ fi # Debian policy and targetting Debian Sid. Then case-by-case run in autobake-deb.sh # tests for backwards compatibility and strip away parts on older builders. -# If iproute2 is not available (before Debian Jessie and Ubuntu Trusty) -# fall back to the old iproute package. -if ! apt-cache madison iproute2 | grep 'iproute2 *|' >/dev/null 2>&1 -then - sed 's/iproute2/iproute/' -i debian/control -fi - -# If libcrack2 (>= 2.9.0) is not available (before Debian Jessie and Ubuntu Trusty) -# clean away the cracklib stanzas so the package can build without them. -if ! apt-cache madison libcrack2-dev | grep 'libcrack2-dev *| *2\.9' >/dev/null 2>&1 -then - sed '/libcrack2-dev/d' -i debian/control - sed '/Package: mariadb-plugin-cracklib/,/^$/d' -i debian/control -fi - -# If libpcre3-dev (>= 2:8.35-3.2~) is not available (before Debian Jessie or Ubuntu Wily) -# clean away the PCRE3 stanzas so the package can build without them. -# Update check when version 2:8.40 or newer is available. -if ! apt-cache madison libpcre3-dev | grep 'libpcre3-dev *| *2:8\.3[2-9]' >/dev/null 2>&1 -then - sed '/libpcre3-dev/d' -i debian/control -fi - -# If libsystemd-dev is not available (before Debian Jessie or Ubuntu Wily) -# clean away the systemd stanzas so the package can build without them. -if ! apt-cache madison libsystemd-dev | grep 'libsystemd-dev' >/dev/null 2>&1 -then - sed '/dh-systemd/d' -i debian/control - sed '/libsystemd-dev/d' -i debian/control - sed 's/ --with systemd//' -i debian/rules - sed '/systemd/d' -i debian/rules - sed '/\.service/d' -i debian/rules - sed '/galera_new_cluster/d' -i debian/mariadb-server-10.2.install - sed '/galera_recovery/d' -i debian/mariadb-server-10.2.install - sed '/mariadb-service-convert/d' -i debian/mariadb-server-10.2.install -fi - -# Convert gcc version to numberical value. Format is Mmmpp where M is Major -# version, mm is minor version and p is patch. -# -dumpfullversion & -dumpversion to make it uniform across old and new (>=7) -GCCVERSION=$(gcc -dumpfullversion -dumpversion | sed -e 's/\.\([0-9][0-9]\)/\1/g' \ - -e 's/\.\([0-9]\)/0\1/g' \ - -e 's/^[0-9]\{3,4\}$/&00/') -# Don't build rocksdb package if gcc version is less than 4.8 or we are running on -# x86 32 bit. -if [[ $GCCVERSION -lt 40800 ]] || [[ $(arch) =~ i[346]86 ]] +# Don't build rocksdb package on x86 32 bit. +if [[ $(arch) =~ i[346]86 ]] then sed '/Package: mariadb-plugin-rocksdb/,/^$/d' -i debian/control fi @@ -106,15 +38,6 @@ then sed '/Package: mariadb-plugin-cassandra/,/^$/d' -i debian/control fi -# From Debian Stretch/Ubuntu Bionic onwards dh-systemd is just an empty -# transitional metapackage and the functionality was merged into debhelper. -# In Ubuntu Hirsute is was completely removed, so it can't be referenced anymore. -# Keep using it only on Debian Jessie and Ubuntu Xenial. -if apt-cache madison dh-systemd | grep 'dh-systemd' >/dev/null 2>&1 -then - sed 's/debhelper (>= 9.20160709~),/debhelper (>= 9), dh-systemd,/' -i debian/control -fi - # Adjust changelog, add new version echo "Incrementing changelog and starting build scripts" @@ -133,13 +56,6 @@ dch -b -D ${CODENAME} -v "${EPOCH}${UPSTREAM}${PATCHLEVEL}~${CODENAME}" "Automat echo "Creating package version ${EPOCH}${UPSTREAM}${PATCHLEVEL}~${CODENAME} ... " -# On Travis CI, use -b to build binary only packages as there is no need to -# waste time on generating the source package. -if [[ $TRAVIS ]] -then - BUILDPACKAGE_FLAGS="-b" -fi - # Build the package # Pass -I so that .git and other unnecessary temporary and source control files # will be ignored by dpkg-source when creating the tar.gz source package. @@ -148,17 +64,13 @@ fakeroot dpkg-buildpackage -us -uc -I $BUILDPACKAGE_FLAGS # If the step above fails due to missing dependencies, you can manually run # sudo mk-build-deps debian/control -r -i -# Don't log package contents on Travis-CI to save time and log size -if [[ ! $TRAVIS ]] -then - echo "List package contents ..." - cd .. - for package in `ls *.deb` - do - echo $package | cut -d '_' -f 1 - dpkg-deb -c $package | awk '{print $1 " " $2 " " $6}' | sort -k 3 - echo "------------------------------------------------" - done -fi +echo "List package contents ..." +cd .. +for package in `ls *.deb` +do + echo $package | cut -d '_' -f 1 + dpkg-deb -c $package | awk '{print $1 " " $2 " " $6}' | sort -k 3 + echo "------------------------------------------------" +done echo "Build complete" diff --git a/debian/control b/debian/control index 372abafe205..bcae798e4c1 100644 --- a/debian/control +++ b/debian/control @@ -5,9 +5,8 @@ Maintainer: MariaDB Developers Build-Depends: bison, chrpath, cmake (>= 2.7), - debhelper (>= 9.20160709~), + debhelper (>= 10), dh-apparmor, - dh-systemd, dpatch, gdb, libaio-dev [linux-any], @@ -353,7 +352,7 @@ Depends: bsdutils, findutils, galera-3 (>=25.3), gawk, - iproute | iproute2, + iproute2, libdbi-perl, lsb-base (>= 3.0-10), lsof, From ed6e271f786504916dbcbd3d55ee17cd3f2566ef Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Mon, 14 Mar 2022 00:59:44 +0200 Subject: [PATCH 03/14] MDEV-28036 gcol.gcol_supported_sql_funcs_xxx fail in FIPS mode --- mysql-test/suite/gcol/inc/gcol_supported_sql_funcs_main.inc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mysql-test/suite/gcol/inc/gcol_supported_sql_funcs_main.inc b/mysql-test/suite/gcol/inc/gcol_supported_sql_funcs_main.inc index 88268ddd6c4..3f48f86ce7c 100644 --- a/mysql-test/suite/gcol/inc/gcol_supported_sql_funcs_main.inc +++ b/mysql-test/suite/gcol/inc/gcol_supported_sql_funcs_main.inc @@ -12,6 +12,9 @@ # Change Date: # # Change: # ################################################################################ + +--source include/have_des.inc + set time_zone="+03:00"; --echo # --echo # NUMERIC FUNCTIONS From bfed2c7d57a7ca34936d6ef0688af7357592dc40 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 11 Mar 2022 20:18:22 +0100 Subject: [PATCH 04/14] MDEV-27753 Incorrect ENGINE type of table after crash for CONNECT table whenever possible, partitioning should use the full partition plugin name, not the one byte legacy code. Normally, ha_partition can get the engine plugin from table_share->default_part_plugin. But in some cases, e.g. in DROP TABLE, the table isn't opened, table_share is NULL, and ha_partition has to parse the frm, much like dd_frm_type() does. temporary_tables.cc, sql_table.cc: When dropping a table, it must be deleted in the engine first, then frm file. Because frm can be the only true source of metadata that the engine might need for DROP. table.cc: when opening a partitioned table, if the engine for partitions is not found, do not fallback to MyISAM. --- mysql-test/main/drop_bad_db_type.result | 54 +++++++----- mysql-test/main/drop_bad_db_type.test | 19 +++-- .../main/partition_not_blackhole.result | 2 +- mysql-test/main/partition_not_blackhole.test | 2 +- sql/ha_partition.cc | 84 +++++++++++++++++-- sql/ha_partition.h | 3 +- sql/sql_table.cc | 12 +-- sql/table.cc | 2 +- sql/temporary_tables.cc | 13 +-- sql/unireg.h | 15 ++++ 10 files changed, 159 insertions(+), 47 deletions(-) diff --git a/mysql-test/main/drop_bad_db_type.result b/mysql-test/main/drop_bad_db_type.result index ae6fe708e60..97869a39aa3 100644 --- a/mysql-test/main/drop_bad_db_type.result +++ b/mysql-test/main/drop_bad_db_type.result @@ -3,34 +3,50 @@ SET debug_dbug='+d,unstable_db_type'; install soname 'ha_archive'; create table t1 (a int) engine=archive; insert t1 values (1),(2),(3); +create table t2 (a int) engine=archive partition by hash(a) partitions 3; flush tables; uninstall soname 'ha_archive'; -select table_schema, table_name from information_schema.tables where table_name like 't1'; -table_schema test -table_name t1 -select table_schema, table_name, engine, version from information_schema.tables where table_name like 't1'; -table_schema test -table_name t1 -engine ARCHIVE -version NULL +select table_schema, table_name from information_schema.tables where table_name like 't_' order by 1,2; +table_schema table_name +test t1 +test t2 +select table_schema, table_name, engine, version from information_schema.tables where table_name like 't_' order by 1,2; +table_schema table_name engine version +test t1 ARCHIVE NULL +test t2 NULL NULL Warnings: -Level Warning -Code 1286 -Message Unknown storage engine 'ARCHIVE' -select table_schema, table_name, engine, row_format from information_schema.tables where table_name like 't1'; -table_schema test -table_name t1 -engine ARCHIVE -row_format NULL +Warning 1033 Incorrect information in file: './test/t2.frm' +Warning 1286 Unknown storage engine 'ARCHIVE' +select table_schema, table_name, engine, row_format from information_schema.tables where table_name like 't_' order by 1,2; +table_schema table_name engine row_format +test t1 ARCHIVE NULL +test t2 NULL NULL Warnings: -Level Warning -Code 1286 -Message Unknown storage engine 'ARCHIVE' +Warning 1033 Incorrect information in file: './test/t2.frm' +Warning 1286 Unknown storage engine 'ARCHIVE' install soname 'ha_archive'; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL +) ENGINE=ARCHIVE DEFAULT CHARSET=latin1 +show create table t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) DEFAULT NULL +) ENGINE=ARCHIVE DEFAULT CHARSET=latin1 + PARTITION BY HASH (`a`) +PARTITIONS 3 db.opt t1.ARZ t1.frm +t2#P#p0.ARZ +t2#P#p1.ARZ +t2#P#p2.ARZ +t2.frm +t2.par drop table t1; +drop table t2; db.opt uninstall soname 'ha_archive'; SET debug_dbug=@saved_dbug; diff --git a/mysql-test/main/drop_bad_db_type.test b/mysql-test/main/drop_bad_db_type.test index ebc732104d3..0fb5fe5edf4 100644 --- a/mysql-test/main/drop_bad_db_type.test +++ b/mysql-test/main/drop_bad_db_type.test @@ -1,4 +1,4 @@ - +--source include/have_partition.inc --source include/have_debug.inc if (!$HA_ARCHIVE_SO) { @@ -13,18 +13,25 @@ SET debug_dbug='+d,unstable_db_type'; install soname 'ha_archive'; create table t1 (a int) engine=archive; insert t1 values (1),(2),(3); + +create table t2 (a int) engine=archive partition by hash(a) partitions 3; + flush tables; uninstall soname 'ha_archive'; ---vertical_results -select table_schema, table_name from information_schema.tables where table_name like 't1'; -select table_schema, table_name, engine, version from information_schema.tables where table_name like 't1'; -select table_schema, table_name, engine, row_format from information_schema.tables where table_name like 't1'; ---horizontal_results +select table_schema, table_name from information_schema.tables where table_name like 't_' order by 1,2; +--replace_result $mysqld_datadir ./ +select table_schema, table_name, engine, version from information_schema.tables where table_name like 't_' order by 1,2; +--replace_result $mysqld_datadir ./ +select table_schema, table_name, engine, row_format from information_schema.tables where table_name like 't_' order by 1,2; install soname 'ha_archive'; +show create table t1; +show create table t2; + --list_files $mysqld_datadir/test drop table t1; +drop table t2; --list_files $mysqld_datadir/test uninstall soname 'ha_archive'; diff --git a/mysql-test/main/partition_not_blackhole.result b/mysql-test/main/partition_not_blackhole.result index ff1e51df892..7759f947c32 100644 --- a/mysql-test/main/partition_not_blackhole.result +++ b/mysql-test/main/partition_not_blackhole.result @@ -9,7 +9,7 @@ SHOW TABLES; Tables_in_test t1 SHOW CREATE TABLE t1; -ERROR HY000: Failed to read from the .par file +ERROR HY000: Incorrect information in file: './test/t1.frm' DROP TABLE t1; ERROR HY000: Got error 1 "Operation not permitted" from storage engine partition t1.frm diff --git a/mysql-test/main/partition_not_blackhole.test b/mysql-test/main/partition_not_blackhole.test index d9e653b5252..fe7452432b2 100644 --- a/mysql-test/main/partition_not_blackhole.test +++ b/mysql-test/main/partition_not_blackhole.test @@ -17,7 +17,7 @@ let $MYSQLD_DATADIR= `SELECT @@datadir`; --copy_file std_data/parts/t1_blackhole.par $MYSQLD_DATADIR/test/t1.par SHOW TABLES; --replace_result $MYSQLD_DATADIR ./ ---error ER_FAILED_READ_FROM_PAR_FILE +--error ER_NOT_FORM_FILE SHOW CREATE TABLE t1; # The replace is needed for Solaris diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index bf6fb816b5d..dc4ec0407db 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3059,11 +3059,12 @@ err1: @retval true Failure */ -bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) +bool ha_partition::setup_engine_array(MEM_ROOT *mem_root, + handlerton* first_engine) { uint i; uchar *buff; - handlerton **engine_array, *first_engine; + handlerton **engine_array; enum legacy_db_type db_type, first_db_type; DBUG_ASSERT(!m_file); @@ -3073,11 +3074,8 @@ bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) DBUG_RETURN(true); buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET); - first_db_type= (enum legacy_db_type) buff[0]; - first_engine= ha_resolve_by_legacy_type(ha_thd(), first_db_type); - if (!first_engine) - goto err; + first_db_type= (enum legacy_db_type) buff[0]; if (!(m_engine_array= (plugin_ref*) alloc_root(&m_mem_root, m_tot_parts * sizeof(plugin_ref)))) goto err; @@ -3118,6 +3116,74 @@ err: } +handlerton *ha_partition::get_def_part_engine(const char *name) +{ + if (table_share) + { + if (table_share->default_part_plugin) + return plugin_data(table_share->default_part_plugin, handlerton *); + } + else + { + // DROP TABLE, for example + char buff[FN_REFLEN]; + File file; + MY_STAT state; + uchar *frm_image= 0; + handlerton *hton= 0; + bool use_legacy_type= false; + + fn_format(buff, name, "", reg_ext, MY_APPEND_EXT); + + file= mysql_file_open(key_file_frm, buff, O_RDONLY | O_SHARE, MYF(0)); + if (file < 0) + return NULL; + + if (mysql_file_fstat(file, &state, MYF(MY_WME))) + goto err; + if (state.st_size <= 64) + goto err; + if (!(frm_image= (uchar*)my_malloc(state.st_size, MYF(MY_WME)))) + goto err; + if (mysql_file_read(file, frm_image, state.st_size, MYF(MY_NABP))) + goto err; + + if (frm_image[64] != '/') + { + const uchar *e2= frm_image + 64; + const uchar *e2end = e2 + uint2korr(frm_image + 4); + if (e2end > frm_image + state.st_size) + goto err; + while (e2 + 3 < e2end) + { + uchar type= *e2++; + size_t length= extra2_read_len(&e2, e2end); + if (!length) + goto err; + if (type == EXTRA2_DEFAULT_PART_ENGINE) + { + LEX_CSTRING name= { (char*)e2, length }; + plugin_ref plugin= ha_resolve_by_name(ha_thd(), &name, false); + if (plugin) + hton= plugin_data(plugin, handlerton *); + goto err; + } + e2+= length; + } + } + use_legacy_type= true; +err: + my_free(frm_image); + mysql_file_close(file, MYF(0)); + if (!use_legacy_type) + return hton; + } + + return ha_resolve_by_legacy_type(ha_thd(), + (enum legacy_db_type)m_file_buffer[PAR_ENGINES_OFFSET]); +} + + /** Get info about partition engines and their names from the .par file @@ -3145,7 +3211,11 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root, if (read_par_file(name)) DBUG_RETURN(true); - if (!is_clone && setup_engine_array(mem_root)) + handlerton *default_engine= get_def_part_engine(name); + if (!default_engine) + DBUG_RETURN(true); + + if (!is_clone && setup_engine_array(mem_root, default_engine)) DBUG_RETURN(true); DBUG_RETURN(false); diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 48552301f55..36e18d15c0a 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -580,8 +580,9 @@ private: And one method to read it in. */ bool create_handler_file(const char *name); - bool setup_engine_array(MEM_ROOT *mem_root); + bool setup_engine_array(MEM_ROOT *mem_root, handlerton *first_engine); bool read_par_file(const char *name); + handlerton *get_def_part_engine(const char *name); bool get_from_handler_file(const char *name, MEM_ROOT *mem_root, bool is_clone); bool new_handlers_from_part_info(MEM_ROOT *mem_root); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index f06ae3d05dc..07adda33ad3 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2802,15 +2802,13 @@ bool quick_rm_table(THD *thd, handlerton *base, const LEX_CSTRING *db, const LEX_CSTRING *table_name, uint flags, const char *table_path) { char path[FN_REFLEN + 1]; + const size_t pathmax = sizeof(path) - 1 - reg_ext_length; int error= 0; DBUG_ENTER("quick_rm_table"); size_t path_length= table_path ? - (strxnmov(path, sizeof(path) - 1, table_path, reg_ext, NullS) - path) : - build_table_filename(path, sizeof(path)-1, db->str, table_name->str, reg_ext, flags); - if (mysql_file_delete(key_file_frm, path, MYF(0))) - error= 1; /* purecov: inspected */ - path[path_length - reg_ext_length]= '\0'; // Remove reg_ext + (strxnmov(path, pathmax, table_path, NullS) - path) : + build_table_filename(path, pathmax, db->str, table_name->str, "", flags); if (flags & NO_HA_TABLE) { handler *file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, base); @@ -2822,6 +2820,10 @@ bool quick_rm_table(THD *thd, handlerton *base, const LEX_CSTRING *db, if (!(flags & (FRM_ONLY|NO_HA_TABLE))) error|= ha_delete_table(current_thd, base, path, db, table_name, 0); + memcpy(path + path_length, reg_ext, reg_ext_length + 1); + if (mysql_file_delete(key_file_frm, path, MYF(0))) + error= 1; + if (likely(error == 0)) { PSI_CALL_drop_table_share(flags & FN_IS_TMP, db->str, (uint)db->length, diff --git a/sql/table.cc b/sql/table.cc index 9fda107a021..5e30732fc82 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1370,7 +1370,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if (frm_image[61] && !share->default_part_plugin) { enum legacy_db_type db_type= (enum legacy_db_type) (uint) frm_image[61]; - share->default_part_plugin= ha_lock_engine(NULL, ha_checktype(thd, db_type)); + share->default_part_plugin= ha_lock_engine(NULL, ha_checktype(thd, db_type, 1)); if (!share->default_part_plugin) goto err; } diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc index e957488e3db..26036fba7e0 100644 --- a/sql/temporary_tables.cc +++ b/sql/temporary_tables.cc @@ -698,19 +698,20 @@ bool THD::rm_temporary_table(handlerton *base, const char *path) char frm_path[FN_REFLEN + 1]; strxnmov(frm_path, sizeof(frm_path) - 1, path, reg_ext, NullS); - if (mysql_file_delete(key_file_frm, frm_path, MYF(0))) - { - error= true; - } - file= get_new_handler((TABLE_SHARE*) 0, current_thd->mem_root, base); + + file= get_new_handler((TABLE_SHARE*) 0, mem_root, base); if (file && file->ha_delete_table(path)) { error= true; sql_print_warning("Could not remove temporary table: '%s', error: %d", path, my_errno); } - delete file; + + if (mysql_file_delete(key_file_frm, frm_path, MYF(0))) + { + error= true; + } DBUG_RETURN(error); } diff --git a/sql/unireg.h b/sql/unireg.h index e9c62334e86..03d63d0fd06 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -188,6 +188,21 @@ enum extra2_field_flags { VERS_OPTIMIZED_UPDATE= 1 << INVISIBLE_MAX_BITS }; +static inline size_t extra2_read_len(const uchar **extra2, const uchar *end) +{ + size_t length= *(*extra2)++; + if (length) + return length; + + if ((*extra2) + 2 >= end) + return 0; + length= uint2korr(*extra2); + (*extra2)+= 2; + if (length < 256 || *extra2 + length > end) + return 0; + return length; +} + int rea_create_table(THD *thd, LEX_CUSTRING *frm, const char *path, const char *db, const char *table_name, HA_CREATE_INFO *create_info, handler *file, From 03c3dc63655aabcfc309208188e44c200f680404 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Sat, 12 Mar 2022 15:38:44 +0400 Subject: [PATCH 05/14] MDEV-23210 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on ALTER TABLE, SELECT and INSERT Problem: Parse-time conversion from binary to tricky character sets like utf32 produced ill-formed strings. So, later a chash happened in debug builds, or a wrong SHOW CREATE TABLE was returned in release builds. Fix: 1. Backporting a few methods from 10.3: - THD::check_string_for_wellformedness() - THD::convert_string() overloads - THD::make_text_string_connection() 2. Adding a new method THD::reinterpret_string_from_binary(), which makes sure to either returns a well-formed string (optionally prepending with zero bytes), or returns an error. --- mysql-test/r/ctype_utf32.result | 23 ++++++++++ mysql-test/r/ctype_utf32_uca.result | 15 +++++++ mysql-test/t/ctype_utf32.test | 19 ++++++++ mysql-test/t/ctype_utf32_uca.test | 13 ++++++ sql/sql_class.cc | 69 ++++++++++++++++++++++++++++- sql/sql_class.h | 63 +++++++++++++++++++++++++- sql/sql_yacc.yy | 10 +---- 7 files changed, 202 insertions(+), 10 deletions(-) diff --git a/mysql-test/r/ctype_utf32.result b/mysql-test/r/ctype_utf32.result index 584ca12f8c3..143fff9e419 100644 --- a/mysql-test/r/ctype_utf32.result +++ b/mysql-test/r/ctype_utf32.result @@ -2890,5 +2890,28 @@ HEX(c1) 0000006100000063 DROP TABLE t1; # +# MDEV-23210 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on ALTER TABLE, SELECT and INSERT +# +CREATE TABLE t1 (a CHAR(1)); +SET COLLATION_CONNECTION=utf32_general_ci, CHARACTER_SET_CLIENT=binary; +ALTER TABLE t1 CHANGE a a ENUM('a','a') CHARACTER SET utf32; +ERROR HY000: Column 'a' has duplicated value 'a' in ENUM +ALTER TABLE t1 CHANGE a a ENUM('aaa') CHARACTER SET utf32; +ERROR HY000: Invalid utf32 character string: '\x00aaa' +ALTER TABLE t1 CHANGE a a ENUM('aa') CHARACTER SET utf32; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` enum('慡') CHARACTER SET utf32 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +ALTER TABLE t1 CHANGE a a ENUM('a','b') CHARACTER SET utf32; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` enum('a','b') CHARACTER SET utf32 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +SET NAMES utf8; +# # End of 10.2 tests # diff --git a/mysql-test/r/ctype_utf32_uca.result b/mysql-test/r/ctype_utf32_uca.result index 46ca6e7baee..2f6e44dc402 100644 --- a/mysql-test/r/ctype_utf32_uca.result +++ b/mysql-test/r/ctype_utf32_uca.result @@ -7941,5 +7941,20 @@ EXECUTE s; DEALLOCATE PREPARE s; SET NAMES utf8; # +# MDEV-23210 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on ALTER TABLE, SELECT and INSERT +# +CREATE TABLE t1 (a CHAR(1)); +SET COLLATION_CONNECTION=utf32_myanmar_ci, CHARACTER_SET_CLIENT=binary; +ALTER TABLE t1 CHANGE a a ENUM('a','a') CHARACTER SET utf32; +ERROR HY000: Column 'a' has duplicated value 'a' in ENUM +ALTER TABLE t1 CHANGE a a ENUM('a','b') CHARACTER SET utf32; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` enum('a','b') CHARACTER SET utf32 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +SET NAMES utf8; +# # End of 10.2 tests # diff --git a/mysql-test/t/ctype_utf32.test b/mysql-test/t/ctype_utf32.test index 891fd14d15f..46ff333b5f7 100644 --- a/mysql-test/t/ctype_utf32.test +++ b/mysql-test/t/ctype_utf32.test @@ -1048,6 +1048,25 @@ INSERT INTO t1 (c1) VALUES (1),(2),(3); SELECT HEX(c1) FROM t1 ORDER BY c1; DROP TABLE t1; + +--echo # +--echo # MDEV-23210 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on ALTER TABLE, SELECT and INSERT +--echo # + +CREATE TABLE t1 (a CHAR(1)); +SET COLLATION_CONNECTION=utf32_general_ci, CHARACTER_SET_CLIENT=binary; +--error ER_DUPLICATED_VALUE_IN_TYPE +ALTER TABLE t1 CHANGE a a ENUM('a','a') CHARACTER SET utf32; +--error ER_INVALID_CHARACTER_STRING +ALTER TABLE t1 CHANGE a a ENUM('aaa') CHARACTER SET utf32; +ALTER TABLE t1 CHANGE a a ENUM('aa') CHARACTER SET utf32; +SHOW CREATE TABLE t1; +ALTER TABLE t1 CHANGE a a ENUM('a','b') CHARACTER SET utf32; +SHOW CREATE TABLE t1; +DROP TABLE t1; +SET NAMES utf8; + + --echo # --echo # End of 10.2 tests --echo # diff --git a/mysql-test/t/ctype_utf32_uca.test b/mysql-test/t/ctype_utf32_uca.test index 2969480b0ef..9073d8c57f5 100644 --- a/mysql-test/t/ctype_utf32_uca.test +++ b/mysql-test/t/ctype_utf32_uca.test @@ -290,6 +290,19 @@ EXECUTE s; DEALLOCATE PREPARE s; SET NAMES utf8; +--echo # +--echo # MDEV-23210 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on ALTER TABLE, SELECT and INSERT +--echo # + +CREATE TABLE t1 (a CHAR(1)); +SET COLLATION_CONNECTION=utf32_myanmar_ci, CHARACTER_SET_CLIENT=binary; +--error ER_DUPLICATED_VALUE_IN_TYPE +ALTER TABLE t1 CHANGE a a ENUM('a','a') CHARACTER SET utf32; +ALTER TABLE t1 CHANGE a a ENUM('a','b') CHARACTER SET utf32; +SHOW CREATE TABLE t1; +DROP TABLE t1; +SET NAMES utf8; + --echo # --echo # End of 10.2 tests diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 479578679f1..4edf573e596 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -2148,7 +2148,7 @@ void THD::cleanup_after_query() */ bool THD::convert_string(LEX_STRING *to, CHARSET_INFO *to_cs, - const char *from, uint from_length, + const char *from, size_t from_length, CHARSET_INFO *from_cs) { DBUG_ENTER("THD::convert_string"); @@ -2170,6 +2170,58 @@ bool THD::convert_string(LEX_STRING *to, CHARSET_INFO *to_cs, } +/* + Reinterpret a binary string to a character string + + @param[OUT] to The result will be written here, + either the original string as is, + or a newly alloced fixed string with + some zero bytes prepended. + @param cs The destination character set + @param str The binary string + @param length The length of the binary string + + @return false on success + @return true on error +*/ + +bool THD::reinterpret_string_from_binary(LEX_CSTRING *to, CHARSET_INFO *cs, + const char *str, size_t length) +{ + /* + When reinterpreting from binary to tricky character sets like + UCS2, UTF16, UTF32, we may need to prepend some zero bytes. + This is possible in scenarios like this: + SET COLLATION_CONNECTION=utf32_general_ci, CHARACTER_SET_CLIENT=binary; + This code is similar to String::copy_aligned(). + */ + size_t incomplete= length % cs->mbminlen; // Bytes in an incomplete character + if (incomplete) + { + size_t zeros= cs->mbminlen - incomplete; + size_t aligned_length= zeros + length; + char *dst= (char*) alloc(aligned_length + 1); + if (!dst) + { + to->str= NULL; // Safety + to->length= 0; + return true; + } + bzero(dst, zeros); + memcpy(dst + zeros, str, length); + dst[aligned_length]= '\0'; + to->str= dst; + to->length= aligned_length; + } + else + { + to->str= str; + to->length= length; + } + return check_string_for_wellformedness(to->str, to->length, cs); +} + + /* Convert a string between two character sets. dstcs and srccs cannot be &my_charset_bin. @@ -2274,6 +2326,21 @@ bool THD::convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs) } +bool THD::check_string_for_wellformedness(const char *str, + size_t length, + CHARSET_INFO *cs) const +{ + size_t wlen= Well_formed_prefix(cs, str, length).length(); + if (wlen < length) + { + ErrConvString err(str, length, &my_charset_bin); + my_error(ER_INVALID_CHARACTER_STRING, MYF(0), cs->csname, err.ptr()); + return true; + } + return false; +} + + /* Update some cache variables when character set changes */ diff --git a/sql/sql_class.h b/sql/sql_class.h index 3f0fba8fc10..a748def9b56 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3503,8 +3503,31 @@ public: return true; // EOM } bool convert_string(LEX_STRING *to, CHARSET_INFO *to_cs, - const char *from, uint from_length, + const char *from, size_t from_length, CHARSET_INFO *from_cs); + bool reinterpret_string_from_binary(LEX_CSTRING *to, CHARSET_INFO *to_cs, + const char *from, size_t from_length); + bool convert_string(LEX_CSTRING *to, CHARSET_INFO *to_cs, + const char *from, size_t from_length, + CHARSET_INFO *from_cs) + { + LEX_STRING tmp; + bool rc= convert_string(&tmp, to_cs, from, from_length, from_cs); + to->str= tmp.str; + to->length= tmp.length; + return rc; + } + bool convert_string(LEX_CSTRING *to, CHARSET_INFO *tocs, + const LEX_CSTRING *from, CHARSET_INFO *fromcs, + bool simple_copy_is_possible) + { + if (!simple_copy_is_possible) + return unlikely(convert_string(to, tocs, from->str, from->length, fromcs)); + if (fromcs == &my_charset_bin) + return reinterpret_string_from_binary(to, tocs, from->str, from->length); + *to= *from; + return false; + } /* Convert a strings between character sets. Uses my_convert_fix(), which uses an mb_wc .. mc_mb loop internally. @@ -3540,6 +3563,44 @@ public: bool convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs); + /* + Check if the string is wellformed, raise an error if not wellformed. + @param str - The string to check. + @param length - the string length. + */ + bool check_string_for_wellformedness(const char *str, + size_t length, + CHARSET_INFO *cs) const; + + bool make_text_string_connection(LEX_CSTRING *to, + const LEX_CSTRING *from) + { + return convert_string(to, variables.collation_connection, + from, charset(), charset_is_collation_connection); + } +#if MYSQL_VERSION_ID < 100300 + /* + A wrapper method for 10.2. It fixes the problem + that various fields in bison %union use LEX_STRING. + In 10.3 those fields are fixed to use LEX_CSTRING. + Please remove this wrapper when mering to 10.3. + */ + bool make_text_string_connection(LEX_STRING *to, + const LEX_STRING *from) + { + LEX_CSTRING cto; + LEX_CSTRING cfrom; + bool rc; + cfrom.str= from->str; + cfrom.length= from->length; + rc= make_text_string_connection(&cto, &cfrom); + to->str= (char*) cto.str; + to->length= cto.length; + return rc; + } +#else +#error Remove the above wrapper +#endif void add_changed_table(TABLE *table); void add_changed_table(const char *key, long key_length); CHANGED_TABLE_LIST * changed_table_dup(const char *key, long key_length); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 6f3274aced5..34f37efafdb 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -14571,14 +14571,8 @@ TEXT_STRING_sys: TEXT_STRING_literal: TEXT_STRING { - if (thd->charset_is_collation_connection) - $$= $1; - else - { - if (thd->convert_string(&$$, thd->variables.collation_connection, - $1.str, $1.length, thd->charset())) - MYSQL_YYABORT; - } + if (thd->make_text_string_connection(&$$, &$1)) + MYSQL_YYABORT; } ; From 99837c61a6efd27a270c45bd055fac110d1a6947 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Wed, 23 Feb 2022 10:10:01 +1100 Subject: [PATCH 06/14] MDEV-23915 ER_KILL_DENIED_ERROR not passed a thread id The 10.5 test error main.grant_kill showed up a incorrect thread id on a big endian architecture. The cause of this is the sql_kill_user function assumed the error was ER_OUT_OF_RESOURCES, when the the actual error was ER_KILL_DENIED_ERROR. ER_KILL_DENIED_ERROR as an error message requires a thread id to be passed as unsigned long, however a user/host was passed. ER_OUT_OF_RESOURCES doesn't even take a user/host, despite the optimistic comment. We remove this being passed as an argument to the function so that when MDEV-21978 is implemented one less compiler format warning is generated (which would have caught this error sooner). Thanks Otto for reporting and Marko for analysis. --- .../suite/galera/r/galera_kill_applier.result | 4 ++++ .../suite/galera/t/galera_kill_applier.test | 6 ++++-- sql/sql_parse.cc | 18 ++++++++++-------- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_kill_applier.result b/mysql-test/suite/galera/r/galera_kill_applier.result index 075cbe6f702..0b1a0c12d0d 100644 --- a/mysql-test/suite/galera/r/galera_kill_applier.result +++ b/mysql-test/suite/galera/r/galera_kill_applier.result @@ -1,8 +1,12 @@ connection node_2; SET GLOBAL wsrep_slave_threads=2; +KILL ID; Got one of the listed errors +KILL QUERY ID; Got one of the listed errors +KILL ID; Got one of the listed errors +KILL QUERY ID; Got one of the listed errors SET GLOBAL wsrep_slave_threads=1; connection node_1; diff --git a/mysql-test/suite/galera/t/galera_kill_applier.test b/mysql-test/suite/galera/t/galera_kill_applier.test index 4136bac5dc6..c9ff795bab6 100644 --- a/mysql-test/suite/galera/t/galera_kill_applier.test +++ b/mysql-test/suite/galera/t/galera_kill_applier.test @@ -15,21 +15,23 @@ SET GLOBAL wsrep_slave_threads=2; --let $applier_thread = `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep applier idle' LIMIT 1` ---disable_query_log +--replace_result $applier_thread ID --error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR --eval KILL $applier_thread +--replace_result $applier_thread ID --error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR --eval KILL QUERY $applier_thread --let $aborter_thread = `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep aborter idle' LIMIT 1` +--replace_result $aborter_thread ID --error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR --eval KILL $aborter_thread +--replace_result $aborter_thread ID --error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR --eval KILL QUERY $aborter_thread ---enable_query_log SET GLOBAL wsrep_slave_threads=1; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 67e2fcd6764..a9d928f2c2a 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -9117,15 +9117,17 @@ void sql_kill_user(THD *thd, LEX_USER *user, killed_state state) WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL) } #endif /* WITH_WSREP */ - if (!(error= kill_threads_for_user(thd, user, state, &rows))) - my_ok(thd, rows); - else + switch (error= kill_threads_for_user(thd, user, state, &rows)) { - /* - This is probably ER_OUT_OF_RESOURCES, but in the future we may - want to write the name of the user we tried to kill - */ - my_error(error, MYF(0), user->host.str, user->user.str); + case 0: + my_ok(thd, rows); + break; + case ER_KILL_DENIED_ERROR: + my_error(error, MYF(0), (unsigned long) thd->thread_id); + break; + case ER_OUT_OF_RESOURCES: + default: + my_error(error, MYF(0)); } #ifdef WITH_WSREP return; From 57dbe8785d14a4b9e5b9dc17625cd00f615e136d Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Tue, 15 Mar 2022 13:30:46 +1100 Subject: [PATCH 07/14] MDEV-23915 ER_KILL_DENIED_ERROR not passed a thread id (part 2) Per Marko's comment in JIRA, sql_kill is passing the thread id as long long. We change the format of the error messages to match, and cast the thread id to long long in sql_kill_user. --- sql/share/errmsg-utf8.txt | 48 +++++++++++++++++++-------------------- sql/sql_parse.cc | 2 +- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 2d4e0562d8f..814716b0a97 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -2231,30 +2231,30 @@ ER_NO_SUCH_THREAD swe "Finns ingen tråd med id %lu" ukr "Невідомий ідентифікатор гілки: %lu" ER_KILL_DENIED_ERROR - cze "Nejste vlastníkem threadu %lu" - dan "Du er ikke ejer af tråden %lu" - nla "U bent geen bezitter van thread %lu" - eng "You are not owner of thread %lu" - est "Ei ole lõime %lu omanik" - fre "Vous n'êtes pas propriétaire de la tâche no: %lu" - ger "Sie sind nicht Eigentümer von Thread %lu" - greek "Δεν είσθε owner του thread %lu" - hindi "आप थ्रेड %lu के OWNER नहीं हैं" - hun "A %lu thread-nek mas a tulajdonosa" - ita "Utente non proprietario del thread %lu" - jpn "スレッド %lu のオーナーではありません。" - kor "쓰레드(Thread) %lu의 소유자가 아닙니다." - nor "Du er ikke eier av tråden %lu" - norwegian-ny "Du er ikkje eigar av tråd %lu" - pol "Nie jeste? wła?cicielem w?tku %lu" - por "Você não é proprietário da 'thread' %lu" - rum "Nu sinteti proprietarul threadului %lu" - rus "Вы не являетесь владельцем потока %lu" - serbian "Vi niste vlasnik thread-a %lu" - slo "Nie ste vlastníkom vlákna %lu" - spa "Tu no eres el propietario del thread%lu" - swe "Du är inte ägare till tråd %lu" - ukr "Ви не володар гілки %lu" + cze "Nejste vlastníkem threadu %lld" + dan "Du er ikke ejer af tråden %lld" + nla "U bent geen bezitter van thread %lld" + eng "You are not owner of thread %lld" + est "Ei ole lõime %lld omanik" + fre "Vous n'êtes pas propriétaire de la tâche no: %lld" + ger "Sie sind nicht Eigentümer von Thread %lld" + greek "Δεν είσθε owner του thread %lld" + hindi "आप थ्रेड %lld के OWNER नहीं हैं" + hun "A %lld thread-nek mas a tulajdonosa" + ita "Utente non proprietario del thread %lld" + jpn "スレッド %lld のオーナーではありません。" + kor "쓰레드(Thread) %lld의 소유자가 아닙니다." + nor "Du er ikke eier av tråden %lld" + norwegian-ny "Du er ikkje eigar av tråd %lld" + pol "Nie jeste? wła?cicielem w?tku %lld" + por "Você não é proprietário da 'thread' %lld" + rum "Nu sinteti proprietarul threadului %lld" + rus "Вы не являетесь владельцем потока %lld" + serbian "Vi niste vlasnik thread-a %lld" + slo "Nie ste vlastníkom vlákna %lld" + spa "Tu no eres el propietario del thread%lld" + swe "Du är inte ägare till tråd %lld" + ukr "Ви не володар гілки %lld" ER_NO_TABLES_USED cze "Nejsou použity žádné tabulky" dan "Ingen tabeller i brug" diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index a9d928f2c2a..989ca0c8803 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -9123,7 +9123,7 @@ void sql_kill_user(THD *thd, LEX_USER *user, killed_state state) my_ok(thd, rows); break; case ER_KILL_DENIED_ERROR: - my_error(error, MYF(0), (unsigned long) thd->thread_id); + my_error(error, MYF(0), (long long) thd->thread_id); break; case ER_OUT_OF_RESOURCES: default: From b2c81e06b042025663ea01fa98dac0ff536c7706 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Mon, 28 Feb 2022 10:16:26 +1100 Subject: [PATCH 08/14] MDEV-27955 main.func_json_notembedded test fails on out-of-memory Uses 500M+ of memory by repeating an 8 byte sequence 62.5M times. Reduce the number of repeats on string reduced by 100 times. Tested by applying against the reverted MDEV-24909 code. 1000 times reduction was too much, but 100 still managed to trigger the bug. --- mysql-test/main/func_json_notembedded.result | 6 +++--- mysql-test/main/func_json_notembedded.test | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/mysql-test/main/func_json_notembedded.result b/mysql-test/main/func_json_notembedded.result index be879dfc9d6..756d2e85f7c 100644 --- a/mysql-test/main/func_json_notembedded.result +++ b/mysql-test/main/func_json_notembedded.result @@ -4,11 +4,11 @@ connect u,localhost,root; # MDEV-24909 JSON functions don't respect KILL QUERY / max_statement_time limit # set group_concat_max_len= 4294967295; -set @obj=concat_ws('','{', repeat('"a":"b",', 125000000/2), '"c":"d"}'); -set @arr=concat_ws('','[', repeat('1234567,', 125000000/2), '2345678]'); +set @obj=concat_ws('','{', repeat('"a":"b",', 1250000/2), '"c":"d"}'); +set @arr=concat_ws('','[', repeat('1234567,', 1250000/2), '2345678]'); select length(@obj), length(@arr); length(@obj) length(@arr) -500000009 500000009 +5000009 5000009 set max_statement_time=0.0001; select json_array_append(@arr, '$[0]', 1); ERROR 70100: Query execution was interrupted (max_statement_time exceeded) diff --git a/mysql-test/main/func_json_notembedded.test b/mysql-test/main/func_json_notembedded.test index 328d9974c77..b33615060b4 100644 --- a/mysql-test/main/func_json_notembedded.test +++ b/mysql-test/main/func_json_notembedded.test @@ -9,8 +9,8 @@ connect u,localhost,root; --echo # set group_concat_max_len= 4294967295; -set @obj=concat_ws('','{', repeat('"a":"b",', 125000000/2), '"c":"d"}'); -set @arr=concat_ws('','[', repeat('1234567,', 125000000/2), '2345678]'); +set @obj=concat_ws('','{', repeat('"a":"b",', 1250000/2), '"c":"d"}'); +set @arr=concat_ws('','[', repeat('1234567,', 1250000/2), '2345678]'); select length(@obj), length(@arr); set max_statement_time=0.0001; From 31ad9277fea8b8a9414f9495eeae7e0424275cd6 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Wed, 16 Mar 2022 14:30:36 +0530 Subject: [PATCH 09/14] MDEV-28079 Shutdown hangs after altering innodb partition fts table - InnoDB purge waits at resume_FTS() while shutting down. This happens after altering the FTS innodb partition table. stop_FTS() has been called for each partition, but it calls resume_FTS() only once and it leads to hang during shutdown. This issue was introduced by commit 1bd681c8b3c5213ce1f7976940a7dc38b48a0d39(MDEV-25506). --- mysql-test/suite/parts/r/partition_alter_innodb.result | 8 ++++++++ mysql-test/suite/parts/t/partition_alter_innodb.test | 8 ++++++++ storage/innobase/fts/fts0fts.cc | 9 ++++++--- storage/innobase/handler/handler0alter.cc | 4 +++- storage/innobase/include/trx0purge.h | 6 ++++-- 5 files changed, 29 insertions(+), 6 deletions(-) diff --git a/mysql-test/suite/parts/r/partition_alter_innodb.result b/mysql-test/suite/parts/r/partition_alter_innodb.result index f3921a1db26..fad8434989f 100644 --- a/mysql-test/suite/parts/r/partition_alter_innodb.result +++ b/mysql-test/suite/parts/r/partition_alter_innodb.result @@ -48,3 +48,11 @@ alter table t1 add partition (partition p0 values less than (20)); ERROR HY000: Duplicate partition name p0 alter table t1 add partition (partition p1 values less than (20)) /* comment */; drop table t1; +# +# MDEV-28079 Shutdown hangs after altering innodb partition fts table +# +CREATE TABLE t1(f1 INT, f2 CHAR(100))ENGINE=InnoDB PARTITION BY HASH(f1) PARTITIONS 2; +ALTER TABLE t1 ADD FULLTEXT(f2); +InnoDB 0 transactions not purged +DROP TABLE t1; +# End of 10.6 tests diff --git a/mysql-test/suite/parts/t/partition_alter_innodb.test b/mysql-test/suite/parts/t/partition_alter_innodb.test index 4ea3a0da88c..844b2084531 100644 --- a/mysql-test/suite/parts/t/partition_alter_innodb.test +++ b/mysql-test/suite/parts/t/partition_alter_innodb.test @@ -9,3 +9,11 @@ SET GLOBAL innodb_read_only_compressed=OFF; --disable_query_log SET GLOBAL innodb_read_only_compressed=@save_innodb_read_only_compressed; --enable_query_log +--echo # +--echo # MDEV-28079 Shutdown hangs after altering innodb partition fts table +--echo # +CREATE TABLE t1(f1 INT, f2 CHAR(100))ENGINE=InnoDB PARTITION BY HASH(f1) PARTITIONS 2; +ALTER TABLE t1 ADD FULLTEXT(f2); +--source ../innodb/include/wait_all_purged.inc +DROP TABLE t1; +--echo # End of 10.6 tests diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 7302f436918..c8aa6aab35a 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -1567,10 +1567,13 @@ static void fts_table_no_ref_count(const char *table_name) /** Stop the purge thread and check n_ref_count of all auxiliary and common table associated with the fts table. -@param table parent FTS table */ -void purge_sys_t::stop_FTS(const dict_table_t &table) +@param table parent FTS table +@param already_stopped True indicates purge threads were + already stopped*/ +void purge_sys_t::stop_FTS(const dict_table_t &table, bool already_stopped) { - purge_sys.stop_FTS(); + if (!already_stopped) + purge_sys.stop_FTS(); fts_table_t fts_table; char table_name[MAX_FULL_NAME_LEN]; diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 5a13240249c..ff069777ec4 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -10903,12 +10903,14 @@ ha_innobase::commit_inplace_alter_table( } } + bool already_stopped= false; for (inplace_alter_handler_ctx** pctx = ctx_array; *pctx; pctx++) { auto ctx = static_cast(*pctx); dberr_t error = DB_SUCCESS; if (fts_exist) { - purge_sys.stop_FTS(*ctx->old_table); + purge_sys.stop_FTS(*ctx->old_table, already_stopped); + already_stopped = true; } if (new_clustered && ctx->old_table->fts) { diff --git a/storage/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h index dc032cdf73a..b3f2fbeedf3 100644 --- a/storage/innobase/include/trx0purge.h +++ b/storage/innobase/include/trx0purge.h @@ -286,8 +286,10 @@ public: /** Stop the purge thread and check n_ref_count of all auxiliary and common table associated with the fts table. - @param table parent FTS table */ - void stop_FTS(const dict_table_t &table); + @param table parent FTS table + @param already_stopped True indicates purge threads were + already stopped */ + void stop_FTS(const dict_table_t &table, bool already_stopped=false); }; /** The global data structure coordinating a purge */ From ee80c19633dd5d4e781b3e675d2ece52a5b0f9ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 16 Mar 2022 17:19:13 +0200 Subject: [PATCH 10/14] MDEV-26551 InnoDB crash on multiple concurrent SHOW TABLE STATUS dict_get_and_save_data_dir_path(): Protect the operation with dict_table_t::lock_mutex and avoid unnecessary memory allocation. --- storage/innobase/dict/dict0load.cc | 84 ++++++--------------------- storage/innobase/handler/ha_innodb.cc | 4 +- storage/innobase/include/dict0load.h | 7 +-- storage/innobase/row/row0import.cc | 6 +- storage/innobase/srv/srv0start.cc | 2 +- 5 files changed, 27 insertions(+), 76 deletions(-) diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc index 71dedd48c29..155f2e55057 100644 --- a/storage/innobase/dict/dict0load.cc +++ b/storage/innobase/dict/dict0load.cc @@ -2078,74 +2078,26 @@ const char *dict_load_table_low(const span &name, return(NULL); } -/********************************************************************//** -Using the table->heap, copy the null-terminated filepath into -table->data_dir_path and replace the 'databasename/tablename.ibd' -portion with 'tablename'. -This allows SHOW CREATE TABLE to return the correct DATA DIRECTORY path. -Make this data directory path only if it has not yet been saved. */ -static -void -dict_save_data_dir_path( -/*====================*/ - dict_table_t* table, /*!< in/out: table */ - const char* filepath) /*!< in: filepath of tablespace */ -{ - ut_ad(dict_sys.frozen()); - ut_a(DICT_TF_HAS_DATA_DIR(table->flags)); - - ut_a(!table->data_dir_path); - ut_a(filepath); - - /* Be sure this filepath is not the default filepath. */ - if (char* default_filepath = fil_make_filepath(nullptr, table->name, - IBD, false)) { - if (0 != strcmp(filepath, default_filepath)) { - ulint pathlen = strlen(filepath); - ut_a(pathlen < OS_FILE_MAX_PATH); - ut_a(0 == strcmp(filepath + pathlen - 4, DOT_IBD)); - - table->data_dir_path = mem_heap_strdup( - table->heap, filepath); - os_file_make_data_dir_path(table->data_dir_path); - } - - ut_free(default_filepath); - } -} - /** Make sure the data_file_name is saved in dict_table_t if needed. -@param[in,out] table Table object -@param[in] dict_locked dict_sys.frozen() */ -void dict_get_and_save_data_dir_path(dict_table_t* table, bool dict_locked) +@param[in,out] table Table object */ +void dict_get_and_save_data_dir_path(dict_table_t *table) { - ut_ad(!table->is_temporary()); - ut_ad(!table->space || table->space->id == table->space_id); + ut_ad(!table->is_temporary()); + ut_ad(!table->space || table->space->id == table->space_id); - if (!table->data_dir_path && table->space_id && table->space) { - if (!dict_locked) { - dict_sys.freeze(SRW_LOCK_CALL); - } - - table->flags |= 1 << DICT_TF_POS_DATA_DIR - & ((1U << DICT_TF_BITS) - 1); - dict_save_data_dir_path(table, - table->space->chain.start->name); - - if (table->data_dir_path == NULL) { - /* Since we did not set the table data_dir_path, - unset the flag. This does not change - SYS_TABLES or FSP_SPACE_FLAGS on the header page - of the tablespace, but it makes dict_table_t - consistent. */ - table->flags &= ~DICT_TF_MASK_DATA_DIR - & ((1U << DICT_TF_BITS) - 1); - } - - if (!dict_locked) { - dict_sys.unfreeze(); - } - } + if (!table->data_dir_path && table->space_id && table->space) + { + const char *filepath= table->space->chain.start->name; + if (strncmp(fil_path_to_mysql_datadir, filepath, + strlen(fil_path_to_mysql_datadir))) + { + table->lock_mutex_lock(); + table->flags|= 1 << DICT_TF_POS_DATA_DIR & ((1U << DICT_TF_BITS) - 1); + table->data_dir_path= mem_heap_strdup(table->heap, filepath); + os_file_make_data_dir_path(table->data_dir_path); + table->lock_mutex_unlock(); + } + } } /** Opens a tablespace for dict_load_table_one() @@ -2199,7 +2151,7 @@ dict_load_tablespace( char* filepath = NULL; if (DICT_TF_HAS_DATA_DIR(table->flags)) { /* This will set table->data_dir_path from fil_system */ - dict_get_and_save_data_dir_path(table, true); + dict_get_and_save_data_dir_path(table); if (table->data_dir_path) { filepath = fil_make_filepath( diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 7b51c97b753..ba48cc4a92e 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -11427,7 +11427,7 @@ ha_innobase::update_create_info( return; } - dict_get_and_save_data_dir_path(m_prebuilt->table, false); + dict_get_and_save_data_dir_path(m_prebuilt->table); if (m_prebuilt->table->data_dir_path) { create_info->data_file_name = m_prebuilt->table->data_dir_path; @@ -13842,7 +13842,7 @@ int ha_innobase::truncate() mem_heap_t* heap = mem_heap_create(1000); - dict_get_and_save_data_dir_path(ib_table, false); + dict_get_and_save_data_dir_path(ib_table); info.data_file_name = ib_table->data_dir_path; const char* temp_name = dict_mem_create_temporary_tablename( heap, ib_table->name.m_name, ib_table->id); diff --git a/storage/innobase/include/dict0load.h b/storage/innobase/include/dict0load.h index 072773694a9..43e732263fd 100644 --- a/storage/innobase/include/dict0load.h +++ b/storage/innobase/include/dict0load.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2021, MariaDB Corporation. +Copyright (c) 2017, 2022, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -53,9 +53,8 @@ We also scan the biggest space id, and store it to fil_system. */ void dict_check_tablespaces_and_store_max_id(); /** Make sure the data_file_name is saved in dict_table_t if needed. -@param[in,out] table Table object -@param[in] dict_locked dict_sys.frozen() */ -void dict_get_and_save_data_dir_path(dict_table_t* table, bool dict_locked); +@param[in,out] table Table object */ +void dict_get_and_save_data_dir_path(dict_table_t* table); /***********************************************************************//** Loads a table object based on the table id. diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index aa5bfb099b4..e9034c05b89 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -3128,7 +3128,7 @@ and apply it to dict_table_t static dberr_t handle_instant_metadata(dict_table_t *table, const row_import &cfg) { - dict_get_and_save_data_dir_path(table, false); + dict_get_and_save_data_dir_path(table); char *filepath; if (DICT_TF_HAS_DATA_DIR(table->flags)) @@ -4167,7 +4167,7 @@ fil_tablespace_iterate( return(DB_CORRUPTION);); /* Make sure the data_dir_path is set. */ - dict_get_and_save_data_dir_path(table, false); + dict_get_and_save_data_dir_path(table); ut_ad(!DICT_TF_HAS_DATA_DIR(table->flags) || table->data_dir_path); @@ -4488,7 +4488,7 @@ row_import_for_mysql( /* If the table is stored in a remote tablespace, we need to determine that filepath from the link file and system tables. Find the space ID in SYS_TABLES since this is an ALTER TABLE. */ - dict_get_and_save_data_dir_path(table, true); + dict_get_and_save_data_dir_path(table); ut_ad(!DICT_TF_HAS_DATA_DIR(table->flags) || table->data_dir_path); const char *data_dir_path = DICT_TF_HAS_DATA_DIR(table->flags) diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 830ccbcaa56..b6341bc8aea 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -2072,7 +2072,7 @@ srv_get_meta_data_filename( char* path; /* Make sure the data_dir_path is set. */ - dict_get_and_save_data_dir_path(table, false); + dict_get_and_save_data_dir_path(table); const char* data_dir_path = DICT_TF_HAS_DATA_DIR(table->flags) ? table->data_dir_path : nullptr; From 06e3bc4390ee286fd20b34526510768f1b32ed7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 17 Mar 2022 10:33:06 +0200 Subject: [PATCH 11/14] MDEV-17841 fixup: GCC -Wmaybe-uninitialized In commit ab38b7511bad8cc03a67f0d43e7169e6dfcac9fa an added "goto err" would seemingly cause a read of an uninitialized variable old_info if errpos>=5. However, because we would have errpos=0 at that point, there was no real error. --- storage/maria/ma_open.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c index db15778bc23..cf5bf2a5fea 100644 --- a/storage/maria/ma_open.c +++ b/storage/maria/ma_open.c @@ -1,5 +1,5 @@ /* Copyright (C) 2006 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - Copyright (c) 2009, 2021, MariaDB Corporation Ab + Copyright (c) 2009, 2022, MariaDB Corporation Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -276,7 +276,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags, char name_buff[FN_REFLEN], org_name[FN_REFLEN], index_name[FN_REFLEN], data_name[FN_REFLEN]; uchar *UNINIT_VAR(disk_cache), *disk_pos, *end_pos; - MARIA_HA info, *UNINIT_VAR(m_info), *old_info; + MARIA_HA info, *UNINIT_VAR(m_info), *old_info= NULL; MARIA_SHARE share_buff,*share; double *rec_per_key_part; ulong *nulls_per_key_part; @@ -327,7 +327,6 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags, } #endif /* WITH_S3_STORAGE_ENGINE */ - old_info= 0; if (!internal_table) mysql_mutex_lock(&THR_LOCK_maria); if ((open_flags & HA_OPEN_COPY) || From 8840583a92243f6ac543689148ca79c85fa0a09d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 18 Mar 2022 10:52:08 +0200 Subject: [PATCH 12/14] MDEV-27909 InnoDB: Failing assertion: state == TRX_STATE_NOT_STARTED ... on DDL The fix in commit 6e390a62baa9dfd92d2776d28c97fd9525422295 (MDEV-26772) was a step to the right direction, but implemented incorrectly. When an InnoDB persistent statistics table cannot be locked immediately, we must not let row_mysql_handle_errors() to roll back the transaction. lock_table_for_trx(): Add the parameter no_wait (default false) for an immediate return of DB_LOCK_WAIT in case of a conflict. ha_innobase::delete_table(), ha_innobase::rename_table(): Pass no_wait=true to lock_table_for_trx() when needed, instead of temporarily setting THDVAR(thd, lock_wait_timeout) to 0. --- storage/innobase/handler/ha_innodb.cc | 28 ++++------ storage/innobase/include/lock0lock.h | 18 +++---- storage/innobase/lock/lock0lock.cc | 78 +++++++++++++-------------- 3 files changed, 55 insertions(+), 69 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index ba48cc4a92e..72300f83c9c 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -13547,29 +13547,26 @@ int ha_innobase::delete_table(const char *name) dict_sys.unfreeze(); } - auto &timeout= THDVAR(thd, lock_wait_timeout); - const auto save_timeout= timeout; - if (table->name.is_temporary()) - timeout= 0; + const bool skip_wait{table->name.is_temporary()}; if (table_stats && index_stats && !strcmp(table_stats->name.m_name, TABLE_STATS_NAME) && !strcmp(index_stats->name.m_name, INDEX_STATS_NAME) && - !(err= lock_table_for_trx(table_stats, trx, LOCK_X))) - err= lock_table_for_trx(index_stats, trx, LOCK_X); + !(err= lock_table_for_trx(table_stats, trx, LOCK_X, skip_wait))) + err= lock_table_for_trx(index_stats, trx, LOCK_X, skip_wait); - if (err != DB_SUCCESS && !timeout) + if (err != DB_SUCCESS && skip_wait) { /* We may skip deleting statistics if we cannot lock the tables, when the table carries a temporary name. */ + ut_ad(err == DB_LOCK_WAIT); + ut_ad(trx->error_state == DB_SUCCESS); err= DB_SUCCESS; dict_table_close(table_stats, false, thd, mdl_table); dict_table_close(index_stats, false, thd, mdl_index); table_stats= nullptr; index_stats= nullptr; } - - timeout= save_timeout; } if (err == DB_SUCCESS) @@ -14075,17 +14072,15 @@ ha_innobase::rename_table( if (error == DB_SUCCESS && table_stats && index_stats && !strcmp(table_stats->name.m_name, TABLE_STATS_NAME) && !strcmp(index_stats->name.m_name, INDEX_STATS_NAME)) { - auto &timeout = THDVAR(thd, lock_wait_timeout); - const auto save_timeout = timeout; - if (from_temp) { - timeout = 0; - } - error = lock_table_for_trx(table_stats, trx, LOCK_X); + error = lock_table_for_trx(table_stats, trx, LOCK_X, + from_temp); if (error == DB_SUCCESS) { error = lock_table_for_trx(index_stats, trx, - LOCK_X); + LOCK_X, from_temp); } if (error != DB_SUCCESS && from_temp) { + ut_ad(error == DB_LOCK_WAIT); + ut_ad(trx->error_state == DB_SUCCESS); error = DB_SUCCESS; /* We may skip renaming statistics if we cannot lock the tables, when the @@ -14098,7 +14093,6 @@ ha_innobase::rename_table( table_stats = nullptr; index_stats = nullptr; } - timeout = save_timeout; } } diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index 28d75517d45..e4ceff6dec2 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -394,15 +394,13 @@ lock_table( void lock_table_resurrect(dict_table_t *table, trx_t *trx, lock_mode mode); /** Sets a lock on a table based on the given mode. -@param[in] table table to lock -@param[in,out] trx transaction -@param[in] mode LOCK_X or LOCK_S -@return error code or DB_SUCCESS. */ -dberr_t -lock_table_for_trx( - dict_table_t* table, - trx_t* trx, - enum lock_mode mode) +@param table table to lock +@param trx transaction +@param mode LOCK_X or LOCK_S +@param no_wait whether to skip handling DB_LOCK_WAIT +@return error code */ +dberr_t lock_table_for_trx(dict_table_t *table, trx_t *trx, lock_mode mode, + bool no_wait= false) MY_ATTRIBUTE((nonnull, warn_unused_result)); /** Exclusively lock the data dictionary tables. @@ -915,10 +913,8 @@ public: @param page whether to discard also from lock_sys.prdt_hash */ void prdt_page_free_from_discard(const page_id_t id, bool all= false); -#ifdef WITH_WSREP /** Cancel possible lock waiting for a transaction */ static void cancel_lock_wait_for_trx(trx_t *trx); -#endif /* WITH_WSREP */ }; /** The lock system */ diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index fa1ea357fe6..f920ac1ac95 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -3627,52 +3627,50 @@ static void lock_table_dequeue(lock_t *in_lock, bool owns_wait_mutex) } } + /** Sets a lock on a table based on the given mode. -@param[in] table table to lock -@param[in,out] trx transaction -@param[in] mode LOCK_X or LOCK_S -@return error code or DB_SUCCESS. */ -dberr_t -lock_table_for_trx( - dict_table_t* table, - trx_t* trx, - enum lock_mode mode) +@param table table to lock +@param trx transaction +@param mode LOCK_X or LOCK_S +@param no_wait whether to skip handling DB_LOCK_WAIT +@return error code */ +dberr_t lock_table_for_trx(dict_table_t *table, trx_t *trx, lock_mode mode, + bool no_wait) { - mem_heap_t* heap; - que_thr_t* thr; - dberr_t err; - sel_node_t* node; - heap = mem_heap_create(512); + mem_heap_t *heap= mem_heap_create(512); + sel_node_t *node= sel_node_create(heap); + que_thr_t *thr= pars_complete_graph_for_exec(node, trx, heap, nullptr); + thr->graph->state= QUE_FORK_ACTIVE; - node = sel_node_create(heap); - thr = pars_complete_graph_for_exec(node, trx, heap, NULL); - thr->graph->state = QUE_FORK_ACTIVE; - - /* We use the select query graph as the dummy graph needed - in the lock module call */ - - thr = static_cast( - que_fork_get_first_thr( - static_cast(que_node_get_parent(thr)))); + thr= static_cast + (que_fork_get_first_thr(static_cast + (que_node_get_parent(thr)))); run_again: - thr->run_node = thr; - thr->prev_node = thr->common.parent; + thr->run_node= thr; + thr->prev_node= thr->common.parent; + dberr_t err= lock_table(table, mode, thr); - err = lock_table(table, mode, thr); + switch (err) { + case DB_SUCCESS: + break; + case DB_LOCK_WAIT: + if (no_wait) + { + lock_sys.cancel_lock_wait_for_trx(trx); + break; + } + /* fall through */ + default: + trx->error_state= err; + if (row_mysql_handle_errors(&err, trx, thr, nullptr)) + goto run_again; + } - trx->error_state = err; + que_graph_free(thr->graph); + trx->op_info= ""; - if (UNIV_UNLIKELY(err != DB_SUCCESS)) { - if (row_mysql_handle_errors(&err, trx, thr, NULL)) { - goto run_again; - } - } - - que_graph_free(thr->graph); - trx->op_info = ""; - - return(err); + return err; } /** Exclusively lock the data dictionary tables. @@ -5639,8 +5637,7 @@ static void lock_cancel_waiting_and_release(lock_t *lock) lock_wait_end(trx); trx->mutex_unlock(); } -#ifdef WITH_WSREP -TRANSACTIONAL_TARGET + void lock_sys_t::cancel_lock_wait_for_trx(trx_t *trx) { lock_sys.wr_lock(SRW_LOCK_CALL); @@ -5654,7 +5651,6 @@ void lock_sys_t::cancel_lock_wait_for_trx(trx_t *trx) lock_sys.wr_unlock(); mysql_mutex_unlock(&lock_sys.wait_mutex); } -#endif /* WITH_WSREP */ /** Cancel a waiting lock request. @tparam check_victim whether to check for DB_DEADLOCK From 35725df6e2791d19bebf0301bb9fcb6200f5b00d Mon Sep 17 00:00:00 2001 From: Julius Goryavsky Date: Tue, 22 Mar 2022 03:23:32 +0100 Subject: [PATCH 13/14] MDEV-27524 addendum: fix for bug introduced by automatic migration --- scripts/wsrep_sst_common.sh | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/scripts/wsrep_sst_common.sh b/scripts/wsrep_sst_common.sh index 084854c8dcb..cc5db9e9e01 100644 --- a/scripts/wsrep_sst_common.sh +++ b/scripts/wsrep_sst_common.sh @@ -1214,16 +1214,6 @@ verify_ca_matches_cert() return fi - local readable=1; [ ! -r "$cert" ] && readable=0 - [ -n "$ca" ] && [ ! -r "$ca" ] && readable=0 - [ -n "$cap" ] && [ ! -r "$cap" ] && readable=0 - - if [ readable -eq 0 ]; then - wsrep_log_error \ - "Both PEM file and CA file (or path) must be readable" - exit 22 - fi - local not_match=0 local errmsg errmsg=$("$OPENSSL_BINARY" verify -verbose \ From 5ccd845d51d80a2f94dc796037f6d2aef106f75d Mon Sep 17 00:00:00 2001 From: Andrei Date: Thu, 10 Feb 2022 19:17:06 +0200 Subject: [PATCH 14/14] MDEV-27760 event may non stop replicate in circular semisync setup MDEV-21117 had to relax own events acceptance condition for a case when a former semisync master server recovers after crash as the semisync slave. That however admitted a possibility for endless event "orbiting" in the non-strict slave gtid mode of semisync circular setup. The same server-id event termination is restored now for the non-strict gtid mode to follow regular rules (that is it's ignored unless @@global.replicate_same_server_id allows it in). To address MDEV-21117 recovery agenda, in the strict gtid mode and the transaction's gtid ordered strictly greater than the current slave gtid state, the same server-id transaction is accepted. The gtid strict mode is safe to accept transactions even if the slave state were not set correct by the user, e.g at the former master. An added test shows a typical out-of-order error at execution so no data corruption is guaranteed in such a case. --- .../suite/rpl/r/rpl_circular_semi_sync.result | 76 ++++++++++++ .../suite/rpl/t/rpl_circular_semi_sync.cnf | 11 ++ .../suite/rpl/t/rpl_circular_semi_sync.test | 115 ++++++++++++++++++ sql/slave.cc | 24 ++-- 4 files changed, 217 insertions(+), 9 deletions(-) create mode 100644 mysql-test/suite/rpl/r/rpl_circular_semi_sync.result create mode 100644 mysql-test/suite/rpl/t/rpl_circular_semi_sync.cnf create mode 100644 mysql-test/suite/rpl/t/rpl_circular_semi_sync.test diff --git a/mysql-test/suite/rpl/r/rpl_circular_semi_sync.result b/mysql-test/suite/rpl/r/rpl_circular_semi_sync.result new file mode 100644 index 00000000000..dcced9833ca --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_circular_semi_sync.result @@ -0,0 +1,76 @@ +include/master-slave.inc +[connection master] +# Master server_1 and Slave server_2 initialiation ... +connection server_2; +include/stop_slave.inc +connection server_1; +set @@sql_log_bin = off; +call mtr.add_suppression("Slave: An attempt was made to binlog GTID 10-1-1 which would create an out-of-order sequence number with existing GTID"); +set @@sql_log_bin = on; +RESET MASTER; +set @@session.gtid_domain_id=10; +set @@global.rpl_semi_sync_master_enabled = 1; +set @@global.rpl_semi_sync_master_wait_point=AFTER_SYNC; +connection server_2; +RESET MASTER; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +set @@session.gtid_domain_id=20; +set @@global.rpl_semi_sync_slave_enabled = 1; +# a 1948 warning is expected +set @@global.gtid_slave_pos = ""; +Warnings: +Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-1. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos +CHANGE MASTER TO master_use_gtid= slave_pos; +include/start_slave.inc +# ... server_1 -> server_2 is set up +connection server_1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=Innodb; +INSERT INTO t1 VALUES (1); +connection server_2; +# Circular configuration server_2 -> server_1 initialiation ... +connection server_1; +# A. ... first when server_1 is in gtid strict mode... +set @@global.gtid_strict_mode = true; +set @@global.rpl_semi_sync_slave_enabled = 1; +CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_2, master_user='root', master_use_gtid=SLAVE_POS; +# ... only for it to fail 'cos if its inconsistent (empty) slave's gtid state: +SELECT @@global.gtid_slave_pos; +@@global.gtid_slave_pos + +START SLAVE; +include/wait_for_slave_sql_error.inc [errno=1950] +# B. ... Resume on the circular setup with the server_id now in the non-strict mode ... +set @@global.gtid_strict_mode = false; +include/start_slave.inc +# ... to have succeeded. +connection server_2; +INSERT INTO t1 VALUES (2); +connection server_1; +INSERT INTO t1 VALUES (3); +connection server_2; +# The gtid states on server_2 must be equal to ... +SHOW VARIABLES LIKE 'gtid_binlog_pos'; +Variable_name Value +gtid_binlog_pos 0-2-1,10-1-3,20-2-1 +SHOW VARIABLES LIKE 'gtid_slave_pos'; +Variable_name Value +gtid_slave_pos 0-2-1,10-1-3,20-2-1 +connection server_1; +# ... the gtid states on server_1 +SHOW VARIABLES LIKE 'gtid_slave_pos'; +Variable_name Value +gtid_slave_pos 0-2-1,10-1-3,20-2-1 +SHOW VARIABLES LIKE 'gtid_binlog_pos'; +Variable_name Value +gtid_binlog_pos 0-2-1,10-1-3,20-2-1 +# Cleanup +connection server_1; +include/stop_slave.inc +set @@global.rpl_semi_sync_master_enabled = default; +set @@global.rpl_semi_sync_slave_enabled = default; +set @@global.rpl_semi_sync_master_wait_point=default; +DROP TABLE t1; +connection server_2; +set @@global.rpl_semi_sync_master_enabled = default; +set @@global.rpl_semi_sync_slave_enabled = default; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_circular_semi_sync.cnf b/mysql-test/suite/rpl/t/rpl_circular_semi_sync.cnf new file mode 100644 index 00000000000..be39fea91d8 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_circular_semi_sync.cnf @@ -0,0 +1,11 @@ +!include suite/rpl/rpl_1slave_base.cnf +!include include/default_client.cnf + + +[mysqld.1] +log-slave-updates +sync-binlog=1 + +[mysqld.2] +log-slave-updates +sync-binlog=1 diff --git a/mysql-test/suite/rpl/t/rpl_circular_semi_sync.test b/mysql-test/suite/rpl/t/rpl_circular_semi_sync.test new file mode 100644 index 00000000000..51fa5a242ea --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_circular_semi_sync.test @@ -0,0 +1,115 @@ +# ==== References ==== +# +# MDEV-27760 event may non stop replicate in circular semisync setup +# +--source include/have_innodb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +# The following tests prove +# A. out-of-order gtid error when the stict gtid mode semisync slave +# receives the same server-id gtid event inconsistent +# (rpl_semi_sync_fail_over tests the consistent case) with its state; +# B. in the non-strict mode the same server-id events remains ignored +# by default as usual. +# +--echo # Master server_1 and Slave server_2 initialiation ... +--connection server_2 +--source include/stop_slave.inc + +# Initial master +--connection server_1 +set @@sql_log_bin = off; +call mtr.add_suppression("Slave: An attempt was made to binlog GTID 10-1-1 which would create an out-of-order sequence number with existing GTID"); +set @@sql_log_bin = on; + +RESET MASTER; + +set @@session.gtid_domain_id=10; + +set @@global.rpl_semi_sync_master_enabled = 1; +set @@global.rpl_semi_sync_master_wait_point=AFTER_SYNC; + +--connection server_2 +RESET MASTER; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; + +set @@session.gtid_domain_id=20; + +set @@global.rpl_semi_sync_slave_enabled = 1; +--echo # a 1948 warning is expected +set @@global.gtid_slave_pos = ""; +CHANGE MASTER TO master_use_gtid= slave_pos; +--source include/start_slave.inc +--echo # ... server_1 -> server_2 is set up + +--connection server_1 +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=Innodb; +INSERT INTO t1 VALUES (1); +--save_master_pos + +--connection server_2 +--sync_with_master + +--echo # Circular configuration server_2 -> server_1 initialiation ... +--connection server_1 +--echo # A. ... first when server_1 is in gtid strict mode... +set @@global.gtid_strict_mode = true; +set @@global.rpl_semi_sync_slave_enabled = 1; + +evalp CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_2, master_user='root', master_use_gtid=SLAVE_POS; + +--echo # ... only for it to fail 'cos if its inconsistent (empty) slave's gtid state: +SELECT @@global.gtid_slave_pos; +START SLAVE; +# ER_GTID_STRICT_OUT_OF_ORDER +--let $slave_sql_errno = 1950 +--source include/wait_for_slave_sql_error.inc + +--echo # B. ... Resume on the circular setup with the server_id now in the non-strict mode ... +set @@global.gtid_strict_mode = false; +--source include/start_slave.inc + +--echo # ... to have succeeded. + +--connection server_2 +INSERT INTO t1 VALUES (2); +--save_master_pos + +--connection server_1 +--sync_with_master + +INSERT INTO t1 VALUES (3); +--save_master_pos + +--connection server_2 +--sync_with_master +--echo # The gtid states on server_2 must be equal to ... +--let $wait_condition=select @@gtid_slave_pos=@@gtid_binlog_pos +--source include/wait_condition.inc +SHOW VARIABLES LIKE 'gtid_binlog_pos'; +SHOW VARIABLES LIKE 'gtid_slave_pos'; + +--connection server_1 +--echo # ... the gtid states on server_1 +--let $wait_condition=select @@gtid_slave_pos=@@gtid_binlog_pos +--source include/wait_condition.inc +SHOW VARIABLES LIKE 'gtid_slave_pos'; +SHOW VARIABLES LIKE 'gtid_binlog_pos'; + +--echo # Cleanup +--connection server_1 +--source include/stop_slave.inc +set @@global.rpl_semi_sync_master_enabled = default; +set @@global.rpl_semi_sync_slave_enabled = default; +set @@global.rpl_semi_sync_master_wait_point=default; + +DROP TABLE t1; +--save_master_pos + +--connection server_2 +--sync_with_master +set @@global.rpl_semi_sync_master_enabled = default; +set @@global.rpl_semi_sync_slave_enabled = default; + +--source include/rpl_end.inc diff --git a/sql/slave.cc b/sql/slave.cc index c0eef02ca7a..a6578be199a 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -6186,13 +6186,13 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) bool is_rows_event= false; /* The flag has replicate_same_server_id semantics and is raised to accept - a same-server-id event on the semisync slave, for both the gtid and legacy - connection modes. - Such events can appear as result of this server recovery so the event - was created there and replicated elsewhere right before the crash. At recovery - it could be evicted from the server's binlog. - */ - bool do_accept_own_server_id= false; + a same-server-id event group by the gtid strict mode semisync slave. + Own server-id events can appear as result of this server crash-recovery: + the transaction was created on this server then being master, got replicated + elsewhere right before the crash before commit; + finally at recovery the transaction gets evicted from the server's binlog. + */ + bool do_accept_own_server_id; /* FD_q must have been prepared for the first R_a event inside get_master_version_and_clock() @@ -6281,6 +6281,8 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) dbug_rows_event_count = 0; };); #endif + s_id= uint4korr(buf + SERVER_ID_OFFSET); + mysql_mutex_lock(&mi->data_lock); switch (buf[EVENT_TYPE_OFFSET]) { @@ -6722,6 +6724,7 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) ++mi->events_queued_since_last_gtid; inc_pos= event_len; + } break; /* @@ -6864,6 +6867,10 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) break; } + do_accept_own_server_id= (s_id == global_system_variables.server_id + && rpl_semi_sync_slave_enabled && opt_gtid_strict_mode + && mi->using_gtid != Master_info::USE_GTID_NO); + /* Integrity of Rows- event group check. A sequence of Rows- events must end with STMT_END_F flagged one. @@ -6909,7 +6916,6 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) */ mysql_mutex_lock(log_lock); - s_id= uint4korr(buf + SERVER_ID_OFFSET); /* Write the event to the relay log, unless we reconnected in the middle of an event group and now need to skip the initial part of the group that @@ -6955,7 +6961,7 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) else if ((s_id == global_system_variables.server_id && !(mi->rli.replicate_same_server_id || - (do_accept_own_server_id= rpl_semi_sync_slave_enabled))) || + do_accept_own_server_id)) || event_that_should_be_ignored(buf) || /* the following conjunction deals with IGNORE_SERVER_IDS, if set