diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh index 7b1efbeab6f..e53040aa61a 100755 --- a/debian/autobake-deb.sh +++ b/debian/autobake-deb.sh @@ -11,26 +11,17 @@ # Exit immediately on any error set -e -source ./VERSION - -CODENAME="$(lsb_release -sc)" -case "${CODENAME}" in - stretch) - # MDEV-28022 libzstd-dev-1.1.3 minimum version - sed -i -e '/libzstd-dev/d' debian/control - ;; -esac - -# This file is invoked from Buildbot and Travis-CI to build deb packages. -# As both of those CI systems have many parallel jobs that include different -# parts of the test suite, we don't need to run the mysql-test-run at all when -# building the deb packages here. +# On Buildbot, don't run the mysql-test-run test suite as part of build. +# It takes a lot of time, and we will do a better test anyway in +# Buildbot, running the test suite from installed .debs on a clean VM. export DEB_BUILD_OPTIONS="nocheck $DEB_BUILD_OPTIONS" +source ./VERSION + # General CI optimizations to keep build output smaller -if [[ $TRAVIS ]] || [[ $GITLAB_CI ]] +if [[ $GITLAB_CI ]] then - # On both Travis and Gitlab the output log must stay under 4MB so make the + # On Gitlab the output log must stay under 4MB so make the # build less verbose sed '/Add support for verbose builds/,/^$/d' -i debian/rules elif [ -d storage/columnstore/columnstore/debian ] @@ -45,41 +36,38 @@ then sed "s/10.6/${MYSQL_VERSION_MAJOR}.${MYSQL_VERSION_MINOR}/" > debian/control fi -# Don't build or try to put files in a package for selected plugins and components on Travis-CI -# in order to keep build small (in both duration and disk space) -if [[ $TRAVIS ]] -then - # Test suite package not relevant on Travis-CI - sed 's|DINSTALL_MYSQLTESTDIR=share/mysql/mysql-test|DINSTALL_MYSQLTESTDIR=false|' -i debian/rules - sed '/Package: mariadb-test-data/,/^$/d' -i debian/control - sed '/Package: mariadb-test$/,/^$/d' -i debian/control +# Look up distro-version specific stuff +# +# Always keep the actual packaging as up-to-date as possible following the latest +# Debian policy and targeting Debian Sid. Then case-by-case run in autobake-deb.sh +# tests for backwards compatibility and strip away parts on older builders. - # Extra plugins such as Mroonga, Spider, OQgraph, Sphinx and the embedded build can safely be skipped - sed 's|-DDEB|-DPLUGIN_MROONGA=NO -DPLUGIN_ROCKSDB=NO -DPLUGIN_SPIDER=NO -DPLUGIN_OQGRAPH=NO -DPLUGIN_PERFSCHEMA=NO -DPLUGIN_SPHINX=NO -DWITH_EMBEDDED_SERVER=OFF -DDEB|' -i debian/rules - sed "/Package: mariadb-plugin-mroonga/,/^$/d" -i debian/control - sed "/Package: mariadb-plugin-rocksdb/,/^$/d" -i debian/control - sed "/Package: mariadb-plugin-spider/,/^$/d" -i debian/control - sed "/Package: mariadb-plugin-oqgraph/,/^$/d" -i debian/control - sed "/ha_sphinx.so/d" -i debian/mariadb-server-10.7.install - sed "/Package: libmariadbd19/,/^$/d" -i debian/control - sed "/Package: libmariadbd-dev/,/^$/d" -i debian/control -fi - -# If rocksdb-tools is not available (before Debian Buster and Ubuntu Disco) -# remove the dependency from the RocksDB plugin so it can install properly -# and instead ship the one built from MariaDB sources -if ! apt-cache madison rocksdb-tools | grep 'rocksdb-tools' >/dev/null 2>&1 -then +remove_rocksdb_tools() +{ sed '/rocksdb-tools/d' -i debian/control sed '/sst_dump/d' -i debian/not-installed - echo "usr/bin/sst_dump" >> debian/mariadb-plugin-rocksdb.install -fi + if ! grep -q sst_dump debian/mariadb-plugin-rocksdb.install + then + echo "usr/bin/sst_dump" >> debian/mariadb-plugin-rocksdb.install + fi +} -# If libcurl4 is not available (before Debian Buster and Ubuntu Bionic) -# use older libcurl3 instead -if ! apt-cache madison libcurl4 | grep 'libcurl4' >/dev/null 2>&1 +CODENAME="$(lsb_release -sc)" +case "${CODENAME}" in + stretch) + # MDEV-28022 libzstd-dev-1.1.3 minimum version + sed -i -e '/libzstd-dev/d' \ + -e 's/libcurl4/libcurl3/g' -i debian/control + remove_rocksdb_tools + ;; + bionic) + remove_rocksdb_tools + ;; +esac + +if [[ ! "$(dpkg-architecture -q DEB_BUILD_ARCH)" =~ amd64|arm64|ppc64el|s390x ]] then - sed 's/libcurl4/libcurl3/g' -i debian/control + remove_rocksdb_tools fi # From Debian Bullseye/Ubuntu Groovy, liburing replaces libaio @@ -98,15 +86,6 @@ then sed '/-DWITH_PMEM=yes/d' -i debian/rules fi -# Debian stretch doesn't support the zstd version 1.1.3 required -# for RocksDB. zstd isn't enabled in Mroonga even though code exists -# for it. If someone happens to have a non-default zstd installed -# (not 1.1.2), assume its a backport and build with it. -if [ "$(lsb_release -sc)" = stretch ] && [ "$(apt-cache madison 'libzstd-dev' | grep -v 1.1.2)" = '' ] -then - sed '/libzstd-dev/d' -i debian/control -fi - # Adjust changelog, add new version echo "Incrementing changelog and starting build scripts" @@ -121,9 +100,9 @@ dch -b -D "${CODENAME}" -v "${VERSION}" "Automatic build with ${LOGSTRING}." --c echo "Creating package version ${VERSION} ... " -# On Travis CI and Gitlab-CI, use -b to build binary only packages as there is +# On Gitlab-CI, use -b to build binary only packages as there is # no need to waste time on generating the source package. -if [[ $TRAVIS ]] +if [[ $GITLAB_CI ]] then BUILDPACKAGE_FLAGS="-b" fi @@ -143,8 +122,8 @@ fakeroot $BUILDPACKAGE_PREPEND dpkg-buildpackage -us -uc -I $BUILDPACKAGE_FLAGS # If the step above fails due to missing dependencies, you can manually run # sudo mk-build-deps debian/control -r -i -# Don't log package contents on Travis-CI or Gitlab-CI to save time and log size -if [[ ! $TRAVIS ]] && [[ ! $GITLAB_CI ]] +# Don't log package contents on Gitlab-CI to save time and log size +if [[ ! $GITLAB_CI ]] then echo "List package contents ..." cd .. diff --git a/debian/control b/debian/control index ada91550726..2ce173d21bb 100644 --- a/debian/control +++ b/debian/control @@ -5,7 +5,7 @@ Maintainer: MariaDB Developers Build-Depends: bison, cmake, cracklib-runtime , - debhelper (>= 9.20160709~), + debhelper (>= 10), dh-exec, flex [amd64], gdb , diff --git a/mysql-test/main/ctype_utf32.result b/mysql-test/main/ctype_utf32.result index d7717ece210..cf9db875290 100644 --- a/mysql-test/main/ctype_utf32.result +++ b/mysql-test/main/ctype_utf32.result @@ -2890,5 +2890,28 @@ HEX(c1) 0000006100000063 DROP TABLE t1; # +# MDEV-23210 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on ALTER TABLE, SELECT and INSERT +# +CREATE TABLE t1 (a CHAR(1)); +SET COLLATION_CONNECTION=utf32_general_ci, CHARACTER_SET_CLIENT=binary; +ALTER TABLE t1 CHANGE a a ENUM('a','a') CHARACTER SET utf32; +ERROR HY000: Column 'a' has duplicated value 'a' in ENUM +ALTER TABLE t1 CHANGE a a ENUM('aaa') CHARACTER SET utf32; +ERROR HY000: Invalid utf32 character string: '\x00aaa' +ALTER TABLE t1 CHANGE a a ENUM('aa') CHARACTER SET utf32; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` enum('慡') CHARACTER SET utf32 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +ALTER TABLE t1 CHANGE a a ENUM('a','b') CHARACTER SET utf32; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` enum('a','b') CHARACTER SET utf32 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +SET NAMES utf8; +# # End of 10.2 tests # diff --git a/mysql-test/main/ctype_utf32.test b/mysql-test/main/ctype_utf32.test index 59cc13015af..6944fdb30be 100644 --- a/mysql-test/main/ctype_utf32.test +++ b/mysql-test/main/ctype_utf32.test @@ -1048,6 +1048,25 @@ INSERT INTO t1 (c1) VALUES (1),(2),(3); SELECT HEX(c1) FROM t1 ORDER BY c1; DROP TABLE t1; + +--echo # +--echo # MDEV-23210 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on ALTER TABLE, SELECT and INSERT +--echo # + +CREATE TABLE t1 (a CHAR(1)); +SET COLLATION_CONNECTION=utf32_general_ci, CHARACTER_SET_CLIENT=binary; +--error ER_DUPLICATED_VALUE_IN_TYPE +ALTER TABLE t1 CHANGE a a ENUM('a','a') CHARACTER SET utf32; +--error ER_INVALID_CHARACTER_STRING +ALTER TABLE t1 CHANGE a a ENUM('aaa') CHARACTER SET utf32; +ALTER TABLE t1 CHANGE a a ENUM('aa') CHARACTER SET utf32; +SHOW CREATE TABLE t1; +ALTER TABLE t1 CHANGE a a ENUM('a','b') CHARACTER SET utf32; +SHOW CREATE TABLE t1; +DROP TABLE t1; +SET NAMES utf8; + + --echo # --echo # End of 10.2 tests --echo # diff --git a/mysql-test/main/ctype_utf32_uca.result b/mysql-test/main/ctype_utf32_uca.result index 32ecf49dc70..27aa934cf00 100644 --- a/mysql-test/main/ctype_utf32_uca.result +++ b/mysql-test/main/ctype_utf32_uca.result @@ -7941,6 +7941,21 @@ EXECUTE s; DEALLOCATE PREPARE s; SET NAMES utf8; # +# MDEV-23210 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on ALTER TABLE, SELECT and INSERT +# +CREATE TABLE t1 (a CHAR(1)); +SET COLLATION_CONNECTION=utf32_myanmar_ci, CHARACTER_SET_CLIENT=binary; +ALTER TABLE t1 CHANGE a a ENUM('a','a') CHARACTER SET utf32; +ERROR HY000: Column 'a' has duplicated value 'a' in ENUM +ALTER TABLE t1 CHANGE a a ENUM('a','b') CHARACTER SET utf32; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` enum('a','b') CHARACTER SET utf32 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +SET NAMES utf8; +# # End of 10.2 tests # # diff --git a/mysql-test/main/ctype_utf32_uca.test b/mysql-test/main/ctype_utf32_uca.test index 0abc6a73fc8..9a6fc3260b0 100644 --- a/mysql-test/main/ctype_utf32_uca.test +++ b/mysql-test/main/ctype_utf32_uca.test @@ -290,6 +290,19 @@ EXECUTE s; DEALLOCATE PREPARE s; SET NAMES utf8; +--echo # +--echo # MDEV-23210 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on ALTER TABLE, SELECT and INSERT +--echo # + +CREATE TABLE t1 (a CHAR(1)); +SET COLLATION_CONNECTION=utf32_myanmar_ci, CHARACTER_SET_CLIENT=binary; +--error ER_DUPLICATED_VALUE_IN_TYPE +ALTER TABLE t1 CHANGE a a ENUM('a','a') CHARACTER SET utf32; +ALTER TABLE t1 CHANGE a a ENUM('a','b') CHARACTER SET utf32; +SHOW CREATE TABLE t1; +DROP TABLE t1; +SET NAMES utf8; + --echo # --echo # End of 10.2 tests diff --git a/mysql-test/main/drop_bad_db_type.result b/mysql-test/main/drop_bad_db_type.result index ae6fe708e60..97869a39aa3 100644 --- a/mysql-test/main/drop_bad_db_type.result +++ b/mysql-test/main/drop_bad_db_type.result @@ -3,34 +3,50 @@ SET debug_dbug='+d,unstable_db_type'; install soname 'ha_archive'; create table t1 (a int) engine=archive; insert t1 values (1),(2),(3); +create table t2 (a int) engine=archive partition by hash(a) partitions 3; flush tables; uninstall soname 'ha_archive'; -select table_schema, table_name from information_schema.tables where table_name like 't1'; -table_schema test -table_name t1 -select table_schema, table_name, engine, version from information_schema.tables where table_name like 't1'; -table_schema test -table_name t1 -engine ARCHIVE -version NULL +select table_schema, table_name from information_schema.tables where table_name like 't_' order by 1,2; +table_schema table_name +test t1 +test t2 +select table_schema, table_name, engine, version from information_schema.tables where table_name like 't_' order by 1,2; +table_schema table_name engine version +test t1 ARCHIVE NULL +test t2 NULL NULL Warnings: -Level Warning -Code 1286 -Message Unknown storage engine 'ARCHIVE' -select table_schema, table_name, engine, row_format from information_schema.tables where table_name like 't1'; -table_schema test -table_name t1 -engine ARCHIVE -row_format NULL +Warning 1033 Incorrect information in file: './test/t2.frm' +Warning 1286 Unknown storage engine 'ARCHIVE' +select table_schema, table_name, engine, row_format from information_schema.tables where table_name like 't_' order by 1,2; +table_schema table_name engine row_format +test t1 ARCHIVE NULL +test t2 NULL NULL Warnings: -Level Warning -Code 1286 -Message Unknown storage engine 'ARCHIVE' +Warning 1033 Incorrect information in file: './test/t2.frm' +Warning 1286 Unknown storage engine 'ARCHIVE' install soname 'ha_archive'; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL +) ENGINE=ARCHIVE DEFAULT CHARSET=latin1 +show create table t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) DEFAULT NULL +) ENGINE=ARCHIVE DEFAULT CHARSET=latin1 + PARTITION BY HASH (`a`) +PARTITIONS 3 db.opt t1.ARZ t1.frm +t2#P#p0.ARZ +t2#P#p1.ARZ +t2#P#p2.ARZ +t2.frm +t2.par drop table t1; +drop table t2; db.opt uninstall soname 'ha_archive'; SET debug_dbug=@saved_dbug; diff --git a/mysql-test/main/drop_bad_db_type.test b/mysql-test/main/drop_bad_db_type.test index ebc732104d3..0fb5fe5edf4 100644 --- a/mysql-test/main/drop_bad_db_type.test +++ b/mysql-test/main/drop_bad_db_type.test @@ -1,4 +1,4 @@ - +--source include/have_partition.inc --source include/have_debug.inc if (!$HA_ARCHIVE_SO) { @@ -13,18 +13,25 @@ SET debug_dbug='+d,unstable_db_type'; install soname 'ha_archive'; create table t1 (a int) engine=archive; insert t1 values (1),(2),(3); + +create table t2 (a int) engine=archive partition by hash(a) partitions 3; + flush tables; uninstall soname 'ha_archive'; ---vertical_results -select table_schema, table_name from information_schema.tables where table_name like 't1'; -select table_schema, table_name, engine, version from information_schema.tables where table_name like 't1'; -select table_schema, table_name, engine, row_format from information_schema.tables where table_name like 't1'; ---horizontal_results +select table_schema, table_name from information_schema.tables where table_name like 't_' order by 1,2; +--replace_result $mysqld_datadir ./ +select table_schema, table_name, engine, version from information_schema.tables where table_name like 't_' order by 1,2; +--replace_result $mysqld_datadir ./ +select table_schema, table_name, engine, row_format from information_schema.tables where table_name like 't_' order by 1,2; install soname 'ha_archive'; +show create table t1; +show create table t2; + --list_files $mysqld_datadir/test drop table t1; +drop table t2; --list_files $mysqld_datadir/test uninstall soname 'ha_archive'; diff --git a/mysql-test/main/error_simulation.result b/mysql-test/main/error_simulation.result index 457e5c8ec9c..680937accfd 100644 --- a/mysql-test/main/error_simulation.result +++ b/mysql-test/main/error_simulation.result @@ -128,3 +128,13 @@ SELECT f1(1); Got one of the listed errors DROP FUNCTION f1; SET debug_dbug= @saved_dbug; +# +# MDEV-27978 wrong option name in error when exceeding max_session_mem_used +# +SET SESSION max_session_mem_used = 8192; +SELECT * FROM information_schema.processlist; +ERROR HY000: The MariaDB server is running with the --max-session-mem-used=8192 option so it cannot execute this statement +SET SESSION max_session_mem_used = DEFAULT; +# +# End of 10.2 tests +# diff --git a/mysql-test/main/error_simulation.test b/mysql-test/main/error_simulation.test index f713e2da6ba..2c155bc9a22 100644 --- a/mysql-test/main/error_simulation.test +++ b/mysql-test/main/error_simulation.test @@ -158,3 +158,16 @@ SET SESSION debug_dbug="+d,simulate_create_virtual_tmp_table_out_of_memory"; SELECT f1(1); DROP FUNCTION f1; SET debug_dbug= @saved_dbug; + +--echo # +--echo # MDEV-27978 wrong option name in error when exceeding max_session_mem_used +--echo # +SET SESSION max_session_mem_used = 8192; +--error ER_OPTION_PREVENTS_STATEMENT +SELECT * FROM information_schema.processlist; +SET SESSION max_session_mem_used = DEFAULT; + + +--echo # +--echo # End of 10.2 tests +--echo # diff --git a/mysql-test/main/func_json_notembedded.result b/mysql-test/main/func_json_notembedded.result index be879dfc9d6..756d2e85f7c 100644 --- a/mysql-test/main/func_json_notembedded.result +++ b/mysql-test/main/func_json_notembedded.result @@ -4,11 +4,11 @@ connect u,localhost,root; # MDEV-24909 JSON functions don't respect KILL QUERY / max_statement_time limit # set group_concat_max_len= 4294967295; -set @obj=concat_ws('','{', repeat('"a":"b",', 125000000/2), '"c":"d"}'); -set @arr=concat_ws('','[', repeat('1234567,', 125000000/2), '2345678]'); +set @obj=concat_ws('','{', repeat('"a":"b",', 1250000/2), '"c":"d"}'); +set @arr=concat_ws('','[', repeat('1234567,', 1250000/2), '2345678]'); select length(@obj), length(@arr); length(@obj) length(@arr) -500000009 500000009 +5000009 5000009 set max_statement_time=0.0001; select json_array_append(@arr, '$[0]', 1); ERROR 70100: Query execution was interrupted (max_statement_time exceeded) diff --git a/mysql-test/main/func_json_notembedded.test b/mysql-test/main/func_json_notembedded.test index 328d9974c77..b33615060b4 100644 --- a/mysql-test/main/func_json_notembedded.test +++ b/mysql-test/main/func_json_notembedded.test @@ -9,8 +9,8 @@ connect u,localhost,root; --echo # set group_concat_max_len= 4294967295; -set @obj=concat_ws('','{', repeat('"a":"b",', 125000000/2), '"c":"d"}'); -set @arr=concat_ws('','[', repeat('1234567,', 125000000/2), '2345678]'); +set @obj=concat_ws('','{', repeat('"a":"b",', 1250000/2), '"c":"d"}'); +set @arr=concat_ws('','[', repeat('1234567,', 1250000/2), '2345678]'); select length(@obj), length(@arr); set max_statement_time=0.0001; diff --git a/mysql-test/main/partition_not_blackhole.result b/mysql-test/main/partition_not_blackhole.result index 6cb8dea80c8..9753cd2bae2 100644 --- a/mysql-test/main/partition_not_blackhole.result +++ b/mysql-test/main/partition_not_blackhole.result @@ -9,7 +9,7 @@ SHOW TABLES; Tables_in_test t1 SHOW CREATE TABLE t1; -ERROR HY000: Failed to read from the .par file +ERROR HY000: Incorrect information in file: './test/t1.frm' DROP TABLE t1; ERROR HY000: Got error 175 "File too short; Expected more data in file" from storage engine partition t1.frm diff --git a/mysql-test/main/partition_not_blackhole.test b/mysql-test/main/partition_not_blackhole.test index d9e653b5252..fe7452432b2 100644 --- a/mysql-test/main/partition_not_blackhole.test +++ b/mysql-test/main/partition_not_blackhole.test @@ -17,7 +17,7 @@ let $MYSQLD_DATADIR= `SELECT @@datadir`; --copy_file std_data/parts/t1_blackhole.par $MYSQLD_DATADIR/test/t1.par SHOW TABLES; --replace_result $MYSQLD_DATADIR ./ ---error ER_FAILED_READ_FROM_PAR_FILE +--error ER_NOT_FORM_FILE SHOW CREATE TABLE t1; # The replace is needed for Solaris diff --git a/mysql-test/main/truncate_notembedded.result b/mysql-test/main/truncate_notembedded.result index cf6498427d0..e8bf6c2575b 100644 --- a/mysql-test/main/truncate_notembedded.result +++ b/mysql-test/main/truncate_notembedded.result @@ -13,7 +13,7 @@ a UNLOCK TABLES; connection con1; TRUNCATE TABLE t1; -ERROR HY000: The MariaDB server is running with the --max-thread-mem-used=8192 option so it cannot execute this statement +ERROR HY000: The MariaDB server is running with the --max-session-mem-used=8192 option so it cannot execute this statement disconnect con1; connection default; DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_kill_applier.result b/mysql-test/suite/galera/r/galera_kill_applier.result index 78a91d2638f..a47f486b5fb 100644 --- a/mysql-test/suite/galera/r/galera_kill_applier.result +++ b/mysql-test/suite/galera/r/galera_kill_applier.result @@ -5,9 +5,13 @@ SELECT @@wsrep_slave_threads; @@wsrep_slave_threads 1 SET GLOBAL wsrep_slave_threads=2; +KILL ID; Got one of the listed errors +KILL QUERY ID; Got one of the listed errors +KILL ID; Got one of the listed errors +KILL QUERY ID; Got one of the listed errors SET GLOBAL wsrep_slave_threads=DEFAULT; connection node_1; diff --git a/mysql-test/suite/galera/t/galera_kill_applier.test b/mysql-test/suite/galera/t/galera_kill_applier.test index 3a285822613..88ec55ed0c1 100644 --- a/mysql-test/suite/galera/t/galera_kill_applier.test +++ b/mysql-test/suite/galera/t/galera_kill_applier.test @@ -16,21 +16,23 @@ SET GLOBAL wsrep_slave_threads=2; --let $applier_thread = `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep applier idle' LIMIT 1` ---disable_query_log +--replace_result $applier_thread ID --error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR --eval KILL $applier_thread +--replace_result $applier_thread ID --error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR --eval KILL QUERY $applier_thread --let $aborter_thread = `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep aborter idle' LIMIT 1` +--replace_result $aborter_thread ID --error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR --eval KILL $aborter_thread +--replace_result $aborter_thread ID --error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR --eval KILL QUERY $aborter_thread ---enable_query_log SET GLOBAL wsrep_slave_threads=DEFAULT; diff --git a/mysql-test/suite/gcol/inc/gcol_supported_sql_funcs_main.inc b/mysql-test/suite/gcol/inc/gcol_supported_sql_funcs_main.inc index 88268ddd6c4..3f48f86ce7c 100644 --- a/mysql-test/suite/gcol/inc/gcol_supported_sql_funcs_main.inc +++ b/mysql-test/suite/gcol/inc/gcol_supported_sql_funcs_main.inc @@ -12,6 +12,9 @@ # Change Date: # # Change: # ################################################################################ + +--source include/have_des.inc + set time_zone="+03:00"; --echo # --echo # NUMERIC FUNCTIONS diff --git a/mysql-test/suite/parts/r/partition_alter_innodb.result b/mysql-test/suite/parts/r/partition_alter_innodb.result index f3921a1db26..fad8434989f 100644 --- a/mysql-test/suite/parts/r/partition_alter_innodb.result +++ b/mysql-test/suite/parts/r/partition_alter_innodb.result @@ -48,3 +48,11 @@ alter table t1 add partition (partition p0 values less than (20)); ERROR HY000: Duplicate partition name p0 alter table t1 add partition (partition p1 values less than (20)) /* comment */; drop table t1; +# +# MDEV-28079 Shutdown hangs after altering innodb partition fts table +# +CREATE TABLE t1(f1 INT, f2 CHAR(100))ENGINE=InnoDB PARTITION BY HASH(f1) PARTITIONS 2; +ALTER TABLE t1 ADD FULLTEXT(f2); +InnoDB 0 transactions not purged +DROP TABLE t1; +# End of 10.6 tests diff --git a/mysql-test/suite/parts/t/partition_alter_innodb.test b/mysql-test/suite/parts/t/partition_alter_innodb.test index 4ea3a0da88c..844b2084531 100644 --- a/mysql-test/suite/parts/t/partition_alter_innodb.test +++ b/mysql-test/suite/parts/t/partition_alter_innodb.test @@ -9,3 +9,11 @@ SET GLOBAL innodb_read_only_compressed=OFF; --disable_query_log SET GLOBAL innodb_read_only_compressed=@save_innodb_read_only_compressed; --enable_query_log +--echo # +--echo # MDEV-28079 Shutdown hangs after altering innodb partition fts table +--echo # +CREATE TABLE t1(f1 INT, f2 CHAR(100))ENGINE=InnoDB PARTITION BY HASH(f1) PARTITIONS 2; +ALTER TABLE t1 ADD FULLTEXT(f2); +--source ../innodb/include/wait_all_purged.inc +DROP TABLE t1; +--echo # End of 10.6 tests diff --git a/mysql-test/suite/rpl/r/rpl_circular_semi_sync.result b/mysql-test/suite/rpl/r/rpl_circular_semi_sync.result new file mode 100644 index 00000000000..dcced9833ca --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_circular_semi_sync.result @@ -0,0 +1,76 @@ +include/master-slave.inc +[connection master] +# Master server_1 and Slave server_2 initialiation ... +connection server_2; +include/stop_slave.inc +connection server_1; +set @@sql_log_bin = off; +call mtr.add_suppression("Slave: An attempt was made to binlog GTID 10-1-1 which would create an out-of-order sequence number with existing GTID"); +set @@sql_log_bin = on; +RESET MASTER; +set @@session.gtid_domain_id=10; +set @@global.rpl_semi_sync_master_enabled = 1; +set @@global.rpl_semi_sync_master_wait_point=AFTER_SYNC; +connection server_2; +RESET MASTER; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +set @@session.gtid_domain_id=20; +set @@global.rpl_semi_sync_slave_enabled = 1; +# a 1948 warning is expected +set @@global.gtid_slave_pos = ""; +Warnings: +Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-1. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos +CHANGE MASTER TO master_use_gtid= slave_pos; +include/start_slave.inc +# ... server_1 -> server_2 is set up +connection server_1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=Innodb; +INSERT INTO t1 VALUES (1); +connection server_2; +# Circular configuration server_2 -> server_1 initialiation ... +connection server_1; +# A. ... first when server_1 is in gtid strict mode... +set @@global.gtid_strict_mode = true; +set @@global.rpl_semi_sync_slave_enabled = 1; +CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_2, master_user='root', master_use_gtid=SLAVE_POS; +# ... only for it to fail 'cos if its inconsistent (empty) slave's gtid state: +SELECT @@global.gtid_slave_pos; +@@global.gtid_slave_pos + +START SLAVE; +include/wait_for_slave_sql_error.inc [errno=1950] +# B. ... Resume on the circular setup with the server_id now in the non-strict mode ... +set @@global.gtid_strict_mode = false; +include/start_slave.inc +# ... to have succeeded. +connection server_2; +INSERT INTO t1 VALUES (2); +connection server_1; +INSERT INTO t1 VALUES (3); +connection server_2; +# The gtid states on server_2 must be equal to ... +SHOW VARIABLES LIKE 'gtid_binlog_pos'; +Variable_name Value +gtid_binlog_pos 0-2-1,10-1-3,20-2-1 +SHOW VARIABLES LIKE 'gtid_slave_pos'; +Variable_name Value +gtid_slave_pos 0-2-1,10-1-3,20-2-1 +connection server_1; +# ... the gtid states on server_1 +SHOW VARIABLES LIKE 'gtid_slave_pos'; +Variable_name Value +gtid_slave_pos 0-2-1,10-1-3,20-2-1 +SHOW VARIABLES LIKE 'gtid_binlog_pos'; +Variable_name Value +gtid_binlog_pos 0-2-1,10-1-3,20-2-1 +# Cleanup +connection server_1; +include/stop_slave.inc +set @@global.rpl_semi_sync_master_enabled = default; +set @@global.rpl_semi_sync_slave_enabled = default; +set @@global.rpl_semi_sync_master_wait_point=default; +DROP TABLE t1; +connection server_2; +set @@global.rpl_semi_sync_master_enabled = default; +set @@global.rpl_semi_sync_slave_enabled = default; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_circular_semi_sync.cnf b/mysql-test/suite/rpl/t/rpl_circular_semi_sync.cnf new file mode 100644 index 00000000000..be39fea91d8 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_circular_semi_sync.cnf @@ -0,0 +1,11 @@ +!include suite/rpl/rpl_1slave_base.cnf +!include include/default_client.cnf + + +[mysqld.1] +log-slave-updates +sync-binlog=1 + +[mysqld.2] +log-slave-updates +sync-binlog=1 diff --git a/mysql-test/suite/rpl/t/rpl_circular_semi_sync.test b/mysql-test/suite/rpl/t/rpl_circular_semi_sync.test new file mode 100644 index 00000000000..51fa5a242ea --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_circular_semi_sync.test @@ -0,0 +1,115 @@ +# ==== References ==== +# +# MDEV-27760 event may non stop replicate in circular semisync setup +# +--source include/have_innodb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +# The following tests prove +# A. out-of-order gtid error when the stict gtid mode semisync slave +# receives the same server-id gtid event inconsistent +# (rpl_semi_sync_fail_over tests the consistent case) with its state; +# B. in the non-strict mode the same server-id events remains ignored +# by default as usual. +# +--echo # Master server_1 and Slave server_2 initialiation ... +--connection server_2 +--source include/stop_slave.inc + +# Initial master +--connection server_1 +set @@sql_log_bin = off; +call mtr.add_suppression("Slave: An attempt was made to binlog GTID 10-1-1 which would create an out-of-order sequence number with existing GTID"); +set @@sql_log_bin = on; + +RESET MASTER; + +set @@session.gtid_domain_id=10; + +set @@global.rpl_semi_sync_master_enabled = 1; +set @@global.rpl_semi_sync_master_wait_point=AFTER_SYNC; + +--connection server_2 +RESET MASTER; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; + +set @@session.gtid_domain_id=20; + +set @@global.rpl_semi_sync_slave_enabled = 1; +--echo # a 1948 warning is expected +set @@global.gtid_slave_pos = ""; +CHANGE MASTER TO master_use_gtid= slave_pos; +--source include/start_slave.inc +--echo # ... server_1 -> server_2 is set up + +--connection server_1 +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=Innodb; +INSERT INTO t1 VALUES (1); +--save_master_pos + +--connection server_2 +--sync_with_master + +--echo # Circular configuration server_2 -> server_1 initialiation ... +--connection server_1 +--echo # A. ... first when server_1 is in gtid strict mode... +set @@global.gtid_strict_mode = true; +set @@global.rpl_semi_sync_slave_enabled = 1; + +evalp CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_2, master_user='root', master_use_gtid=SLAVE_POS; + +--echo # ... only for it to fail 'cos if its inconsistent (empty) slave's gtid state: +SELECT @@global.gtid_slave_pos; +START SLAVE; +# ER_GTID_STRICT_OUT_OF_ORDER +--let $slave_sql_errno = 1950 +--source include/wait_for_slave_sql_error.inc + +--echo # B. ... Resume on the circular setup with the server_id now in the non-strict mode ... +set @@global.gtid_strict_mode = false; +--source include/start_slave.inc + +--echo # ... to have succeeded. + +--connection server_2 +INSERT INTO t1 VALUES (2); +--save_master_pos + +--connection server_1 +--sync_with_master + +INSERT INTO t1 VALUES (3); +--save_master_pos + +--connection server_2 +--sync_with_master +--echo # The gtid states on server_2 must be equal to ... +--let $wait_condition=select @@gtid_slave_pos=@@gtid_binlog_pos +--source include/wait_condition.inc +SHOW VARIABLES LIKE 'gtid_binlog_pos'; +SHOW VARIABLES LIKE 'gtid_slave_pos'; + +--connection server_1 +--echo # ... the gtid states on server_1 +--let $wait_condition=select @@gtid_slave_pos=@@gtid_binlog_pos +--source include/wait_condition.inc +SHOW VARIABLES LIKE 'gtid_slave_pos'; +SHOW VARIABLES LIKE 'gtid_binlog_pos'; + +--echo # Cleanup +--connection server_1 +--source include/stop_slave.inc +set @@global.rpl_semi_sync_master_enabled = default; +set @@global.rpl_semi_sync_slave_enabled = default; +set @@global.rpl_semi_sync_master_wait_point=default; + +DROP TABLE t1; +--save_master_pos + +--connection server_2 +--sync_with_master +set @@global.rpl_semi_sync_master_enabled = default; +set @@global.rpl_semi_sync_slave_enabled = default; + +--source include/rpl_end.inc diff --git a/scripts/wsrep_sst_common.sh b/scripts/wsrep_sst_common.sh index 084854c8dcb..cc5db9e9e01 100644 --- a/scripts/wsrep_sst_common.sh +++ b/scripts/wsrep_sst_common.sh @@ -1214,16 +1214,6 @@ verify_ca_matches_cert() return fi - local readable=1; [ ! -r "$cert" ] && readable=0 - [ -n "$ca" ] && [ ! -r "$ca" ] && readable=0 - [ -n "$cap" ] && [ ! -r "$cap" ] && readable=0 - - if [ readable -eq 0 ]; then - wsrep_log_error \ - "Both PEM file and CA file (or path) must be readable" - exit 22 - fi - local not_match=0 local errmsg errmsg=$("$OPENSSL_BINARY" verify -verbose \ diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 06be234fb13..2f0891e5914 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3193,11 +3193,12 @@ err1: @retval true Failure */ -bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) +bool ha_partition::setup_engine_array(MEM_ROOT *mem_root, + handlerton* first_engine) { uint i; uchar *buff; - handlerton **engine_array, *first_engine; + handlerton **engine_array; enum legacy_db_type db_type, first_db_type; DBUG_ASSERT(!m_file); @@ -3207,11 +3208,8 @@ bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) DBUG_RETURN(true); buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET); - first_db_type= (enum legacy_db_type) buff[0]; - first_engine= ha_resolve_by_legacy_type(ha_thd(), first_db_type); - if (!first_engine) - goto err; + first_db_type= (enum legacy_db_type) buff[0]; if (!(m_engine_array= (plugin_ref*) alloc_root(&m_mem_root, m_tot_parts * sizeof(plugin_ref)))) goto err; @@ -3252,6 +3250,75 @@ err: } +handlerton *ha_partition::get_def_part_engine(const char *name) +{ + if (table_share) + { + if (table_share->default_part_plugin) + return plugin_data(table_share->default_part_plugin, handlerton *); + } + else + { + // DROP TABLE, for example + char buff[FN_REFLEN]; + File file; + MY_STAT state; + uchar *frm_image= 0; + handlerton *hton= 0; + bool use_legacy_type= false; + + fn_format(buff, name, "", reg_ext, MY_APPEND_EXT); + + file= mysql_file_open(key_file_frm, buff, O_RDONLY | O_SHARE, MYF(0)); + if (file < 0) + return NULL; + + if (mysql_file_fstat(file, &state, MYF(MY_WME))) + goto err; + if (state.st_size <= 64) + goto err; + if (!(frm_image= (uchar*)my_malloc(key_memory_Partition_share, + state.st_size, MYF(MY_WME)))) + goto err; + if (mysql_file_read(file, frm_image, state.st_size, MYF(MY_NABP))) + goto err; + + if (frm_image[64] != '/') + { + const uchar *e2= frm_image + 64; + const uchar *e2end = e2 + uint2korr(frm_image + 4); + if (e2end > frm_image + state.st_size) + goto err; + while (e2 + 3 < e2end) + { + uchar type= *e2++; + size_t length= extra2_read_len(&e2, e2end); + if (!length) + goto err; + if (type == EXTRA2_DEFAULT_PART_ENGINE) + { + LEX_CSTRING name= { (char*)e2, length }; + plugin_ref plugin= ha_resolve_by_name(ha_thd(), &name, false); + if (plugin) + hton= plugin_data(plugin, handlerton *); + goto err; + } + e2+= length; + } + } + use_legacy_type= true; +err: + my_free(frm_image); + mysql_file_close(file, MYF(0)); + if (!use_legacy_type) + return hton; + } + + return ha_resolve_by_legacy_type(ha_thd(), + (enum legacy_db_type)m_file_buffer[PAR_ENGINES_OFFSET]); +} + + /** Get info about partition engines and their names from the .par file @@ -3279,7 +3346,11 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root, if (read_par_file(name)) DBUG_RETURN(true); - if (!is_clone && setup_engine_array(mem_root)) + handlerton *default_engine= get_def_part_engine(name); + if (!default_engine) + DBUG_RETURN(true); + + if (!is_clone && setup_engine_array(mem_root, default_engine)) DBUG_RETURN(true); DBUG_RETURN(false); diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 14f68b36c0b..dd14cd7a6d4 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -590,8 +590,9 @@ private: And one method to read it in. */ bool create_handler_file(const char *name); - bool setup_engine_array(MEM_ROOT *mem_root); + bool setup_engine_array(MEM_ROOT *mem_root, handlerton *first_engine); bool read_par_file(const char *name); + handlerton *get_def_part_engine(const char *name); bool get_from_handler_file(const char *name, MEM_ROOT *mem_root, bool is_clone); bool new_handlers_from_part_info(MEM_ROOT *mem_root); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 3ced6803a77..90bb9a74be6 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -3670,13 +3670,18 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific) /* Ensure we don't get called here again */ char buf[50], *buf2; thd->set_killed(KILL_QUERY); - my_snprintf(buf, sizeof(buf), "--max-thread-mem-used=%llu", + my_snprintf(buf, sizeof(buf), "--max-session-mem-used=%llu", thd->variables.max_mem_used); if ((buf2= (char*) thd->alloc(256))) { my_snprintf(buf2, 256, ER_THD(thd, ER_OPTION_PREVENTS_STATEMENT), buf); thd->set_killed(KILL_QUERY, ER_OPTION_PREVENTS_STATEMENT, buf2); } + else + { + thd->set_killed(KILL_QUERY, ER_OPTION_PREVENTS_STATEMENT, + "--max-session-mem-used"); + } } DBUG_ASSERT((longlong) thd->status_var.local_memory_used >= 0 || !debug_assert_on_not_freed_memory); diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index cb191ee6455..453ce5be4e0 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -2234,30 +2234,30 @@ ER_NO_SUCH_THREAD swe "Finns ingen tråd med id %lu" ukr "Невідомий ідентифікатор гілки: %lu" ER_KILL_DENIED_ERROR - cze "Nejste vlastníkem threadu %lu" - dan "Du er ikke ejer af tråden %lu" - nla "U bent geen bezitter van thread %lu" - eng "You are not owner of thread %lu" - est "Ei ole lõime %lu omanik" - fre "Vous n'êtes pas propriétaire de la tâche no: %lu" - ger "Sie sind nicht Eigentümer von Thread %lu" - greek "Δεν είσθε owner του thread %lu" - hindi "आप थ्रेड %lu के OWNER नहीं हैं" - hun "A %lu thread-nek mas a tulajdonosa" - ita "Utente non proprietario del thread %lu" - jpn "スレッド %lu のオーナーではありません。" - kor "쓰레드(Thread) %lu의 소유자가 아닙니다." - nor "Du er ikke eier av tråden %lu" - norwegian-ny "Du er ikkje eigar av tråd %lu" - pol "Nie jeste? wła?cicielem w?tku %lu" - por "Você não é proprietário da 'thread' %lu" - rum "Nu sinteti proprietarul threadului %lu" - rus "Вы не являетесь владельцем потока %lu" - serbian "Vi niste vlasnik thread-a %lu" - slo "Nie ste vlastníkom vlákna %lu" - spa "No eres el propietario del hilo (thread) %lu" - swe "Du är inte ägare till tråd %lu" - ukr "Ви не володар гілки %lu" + cze "Nejste vlastníkem threadu %lld" + dan "Du er ikke ejer af tråden %lld" + nla "U bent geen bezitter van thread %lld" + eng "You are not owner of thread %lld" + est "Ei ole lõime %lld omanik" + fre "Vous n'êtes pas propriétaire de la tâche no: %lld" + ger "Sie sind nicht Eigentümer von Thread %lld" + greek "Δεν είσθε owner του thread %lld" + hindi "आप थ्रेड %lld के OWNER नहीं हैं" + hun "A %lld thread-nek mas a tulajdonosa" + ita "Utente non proprietario del thread %lld" + jpn "スレッド %lld のオーナーではありません。" + kor "쓰레드(Thread) %lld의 소유자가 아닙니다." + nor "Du er ikke eier av tråden %lld" + norwegian-ny "Du er ikkje eigar av tråd %lld" + pol "Nie jeste? wła?cicielem w?tku %lld" + por "Você não é proprietário da 'thread' %lld" + rum "Nu sinteti proprietarul threadului %lld" + rus "Вы не являетесь владельцем потока %lld" + serbian "Vi niste vlasnik thread-a %lld" + slo "Nie ste vlastníkom vlákna %lld" + spa "No eres el propietario del hilo (thread) %lld" + swe "Du är inte ägare till tråd %lld" + ukr "Ви не володар гілки %lld" ER_NO_TABLES_USED cze "Nejsou použity žádné tabulky" dan "Ingen tabeller i brug" diff --git a/sql/slave.cc b/sql/slave.cc index e25ad565b07..91adb9fceb9 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -6186,13 +6186,13 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) bool is_rows_event= false; /* The flag has replicate_same_server_id semantics and is raised to accept - a same-server-id event on the semisync slave, for both the gtid and legacy - connection modes. - Such events can appear as result of this server recovery so the event - was created there and replicated elsewhere right before the crash. At recovery - it could be evicted from the server's binlog. - */ - bool do_accept_own_server_id= false; + a same-server-id event group by the gtid strict mode semisync slave. + Own server-id events can appear as result of this server crash-recovery: + the transaction was created on this server then being master, got replicated + elsewhere right before the crash before commit; + finally at recovery the transaction gets evicted from the server's binlog. + */ + bool do_accept_own_server_id; /* FD_q must have been prepared for the first R_a event inside get_master_version_and_clock() @@ -6281,6 +6281,8 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) dbug_rows_event_count = 0; };); #endif + s_id= uint4korr(buf + SERVER_ID_OFFSET); + mysql_mutex_lock(&mi->data_lock); switch (buf[EVENT_TYPE_OFFSET]) { @@ -6722,6 +6724,7 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) ++mi->events_queued_since_last_gtid; inc_pos= event_len; + } break; /* @@ -6864,6 +6867,10 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) break; } + do_accept_own_server_id= (s_id == global_system_variables.server_id + && rpl_semi_sync_slave_enabled && opt_gtid_strict_mode + && mi->using_gtid != Master_info::USE_GTID_NO); + /* Integrity of Rows- event group check. A sequence of Rows- events must end with STMT_END_F flagged one. @@ -6909,7 +6916,6 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) */ mysql_mutex_lock(log_lock); - s_id= uint4korr(buf + SERVER_ID_OFFSET); /* Write the event to the relay log, unless we reconnected in the middle of an event group and now need to skip the initial part of the group that @@ -6955,7 +6961,7 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) else if ((s_id == global_system_variables.server_id && !(mi->rli.replicate_same_server_id || - (do_accept_own_server_id= rpl_semi_sync_slave_enabled))) || + do_accept_own_server_id)) || event_that_should_be_ignored(buf) || /* the following conjunction deals with IGNORE_SERVER_IDS, if set diff --git a/sql/sql_class.cc b/sql/sql_class.cc index b40c00356eb..e053be8acde 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -2353,6 +2353,58 @@ bool THD::convert_string(LEX_STRING *to, CHARSET_INFO *to_cs, } +/* + Reinterpret a binary string to a character string + + @param[OUT] to The result will be written here, + either the original string as is, + or a newly alloced fixed string with + some zero bytes prepended. + @param cs The destination character set + @param str The binary string + @param length The length of the binary string + + @return false on success + @return true on error +*/ + +bool THD::reinterpret_string_from_binary(LEX_CSTRING *to, CHARSET_INFO *cs, + const char *str, size_t length) +{ + /* + When reinterpreting from binary to tricky character sets like + UCS2, UTF16, UTF32, we may need to prepend some zero bytes. + This is possible in scenarios like this: + SET COLLATION_CONNECTION=utf32_general_ci, CHARACTER_SET_CLIENT=binary; + This code is similar to String::copy_aligned(). + */ + size_t incomplete= length % cs->mbminlen; // Bytes in an incomplete character + if (incomplete) + { + size_t zeros= cs->mbminlen - incomplete; + size_t aligned_length= zeros + length; + char *dst= (char*) alloc(aligned_length + 1); + if (!dst) + { + to->str= NULL; // Safety + to->length= 0; + return true; + } + bzero(dst, zeros); + memcpy(dst + zeros, str, length); + dst[aligned_length]= '\0'; + to->str= dst; + to->length= aligned_length; + } + else + { + to->str= str; + to->length= length; + } + return check_string_for_wellformedness(to->str, to->length, cs); +} + + /* Convert a string between two character sets. dstcs and srccs cannot be &my_charset_bin. diff --git a/sql/sql_class.h b/sql/sql_class.h index a8c37a23fcd..1ce5b18060f 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -4155,6 +4155,8 @@ public: bool convert_string(LEX_STRING *to, CHARSET_INFO *to_cs, const char *from, size_t from_length, CHARSET_INFO *from_cs); + bool reinterpret_string_from_binary(LEX_CSTRING *to, CHARSET_INFO *to_cs, + const char *from, size_t from_length); bool convert_string(LEX_CSTRING *to, CHARSET_INFO *to_cs, const char *from, size_t from_length, CHARSET_INFO *from_cs) @@ -4171,6 +4173,8 @@ public: { if (!simple_copy_is_possible) return unlikely(convert_string(to, tocs, from->str, from->length, fromcs)); + if (fromcs == &my_charset_bin) + return reinterpret_string_from_binary(to, tocs, from->str, from->length); *to= *from; return false; } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index fe2307a2e91..77c55e3daf3 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -9391,15 +9391,17 @@ sql_kill_user(THD *thd, LEX_USER *user, killed_state state) { uint error; ha_rows rows; - if (likely(!(error= kill_threads_for_user(thd, user, state, &rows)))) - my_ok(thd, rows); - else + switch (error= kill_threads_for_user(thd, user, state, &rows)) { - /* - This is probably ER_OUT_OF_RESOURCES, but in the future we may - want to write the name of the user we tried to kill - */ - my_error(error, MYF(0), user->host.str, user->user.str); + case 0: + my_ok(thd, rows); + break; + case ER_KILL_DENIED_ERROR: + my_error(error, MYF(0), (long long) thd->thread_id); + break; + case ER_OUT_OF_RESOURCES: + default: + my_error(error, MYF(0)); } } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 398a635c927..d5c1250fbda 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1999,17 +1999,13 @@ bool quick_rm_table(THD *thd, handlerton *base, const LEX_CSTRING *db, const char *table_path) { char path[FN_REFLEN + 1]; + const size_t pathmax = sizeof(path) - 1 - reg_ext_length; int error= 0; DBUG_ENTER("quick_rm_table"); size_t path_length= table_path ? - (strxnmov(path, sizeof(path) - 1, table_path, reg_ext, NullS) - path) : - build_table_filename(path, sizeof(path)-1, db->str, table_name->str, - reg_ext, flags); - if (!(flags & NO_FRM_RENAME)) - if (mysql_file_delete(key_file_frm, path, MYF(0))) - error= 1; /* purecov: inspected */ - path[path_length - reg_ext_length]= '\0'; // Remove reg_ext + (strxnmov(path, pathmax, table_path, NullS) - path) : + build_table_filename(path, pathmax, db->str, table_name->str, "", flags); if ((flags & (NO_HA_TABLE | NO_PAR_TABLE)) == NO_HA_TABLE) { handler *file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, base); @@ -2019,8 +2015,14 @@ bool quick_rm_table(THD *thd, handlerton *base, const LEX_CSTRING *db, delete file; } if (!(flags & (FRM_ONLY|NO_HA_TABLE))) - if (ha_delete_table(thd, base, path, db, table_name, 0) > 0) - error= 1; + error|= ha_delete_table(thd, base, path, db, table_name, 0) > 0; + + if (!(flags & NO_FRM_RENAME)) + { + memcpy(path + path_length, reg_ext, reg_ext_length + 1); + if (mysql_file_delete(key_file_frm, path, MYF(0))) + error= 1; /* purecov: inspected */ + } if (likely(error == 0)) { diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 37cc94de3bf..f06e3fe03b6 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -6691,7 +6691,7 @@ static Sys_var_ulong Sys_log_tc_size( DEFAULT(my_getpagesize() * 6), BLOCK_SIZE(my_getpagesize())); #endif -static Sys_var_ulonglong Sys_max_thread_mem( +static Sys_var_ulonglong Sys_max_session_mem_used( "max_session_mem_used", "Amount of memory a single user session " "is allowed to allocate. This limits the value of the " "session variable MEM_USED", SESSION_VAR(max_mem_used), diff --git a/sql/table.cc b/sql/table.cc index 4e858cdb24c..19fc912e437 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1553,21 +1553,6 @@ bool TABLE_SHARE::init_period_from_extra2(period_info_t *period, } -static size_t extra2_read_len(const uchar **extra2, const uchar *extra2_end) -{ - size_t length= *(*extra2)++; - if (length) - return length; - - if ((*extra2) + 2 >= extra2_end) - return 0; - length= uint2korr(*extra2); - (*extra2)+= 2; - if (length < 256 || *extra2 + length > extra2_end) - return 0; - return length; -} - static bool read_extra2_section_once(const uchar *extra2, size_t len, LEX_CUSTRING *section) { @@ -1867,7 +1852,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if (frm_image[61] && !share->default_part_plugin) { enum legacy_db_type db_type= (enum legacy_db_type) (uint) frm_image[61]; - share->default_part_plugin= ha_lock_engine(NULL, ha_checktype(thd, db_type)); + share->default_part_plugin= ha_lock_engine(NULL, ha_checktype(thd, db_type, 1)); if (!share->default_part_plugin) goto err; } diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc index 981958abdb2..f3264a4c809 100644 --- a/sql/temporary_tables.cc +++ b/sql/temporary_tables.cc @@ -697,9 +697,7 @@ bool THD::rm_temporary_table(handlerton *base, const char *path) char frm_path[FN_REFLEN + 1]; strxnmov(frm_path, sizeof(frm_path) - 1, path, reg_ext, NullS); - if (mysql_file_delete(key_file_frm, frm_path, - MYF(MY_WME | MY_IGNORE_ENOENT))) - error= true; + if (base->drop_table(base, path) > 0) { error= true; @@ -707,6 +705,10 @@ bool THD::rm_temporary_table(handlerton *base, const char *path) path, my_errno); } + if (mysql_file_delete(key_file_frm, frm_path, + MYF(MY_WME | MY_IGNORE_ENOENT))) + error= true; + DBUG_RETURN(error); } diff --git a/sql/unireg.h b/sql/unireg.h index 0edb0a50ebd..01c1a5a284e 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -192,6 +192,22 @@ enum extra2_index_flags { EXTRA2_IGNORED_KEY }; + +static inline size_t extra2_read_len(const uchar **extra2, const uchar *end) +{ + size_t length= *(*extra2)++; + if (length) + return length; + + if ((*extra2) + 2 >= end) + return 0; + length= uint2korr(*extra2); + (*extra2)+= 2; + if (length < 256 || *extra2 + length > end) + return 0; + return length; +} + LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING &table, HA_CREATE_INFO *create_info, List &create_fields, diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc index 727efe6e8ac..48bc2ca3ca8 100644 --- a/storage/innobase/dict/dict0load.cc +++ b/storage/innobase/dict/dict0load.cc @@ -2072,74 +2072,26 @@ const char *dict_load_table_low(const span &name, return(NULL); } -/********************************************************************//** -Using the table->heap, copy the null-terminated filepath into -table->data_dir_path and replace the 'databasename/tablename.ibd' -portion with 'tablename'. -This allows SHOW CREATE TABLE to return the correct DATA DIRECTORY path. -Make this data directory path only if it has not yet been saved. */ -static -void -dict_save_data_dir_path( -/*====================*/ - dict_table_t* table, /*!< in/out: table */ - const char* filepath) /*!< in: filepath of tablespace */ -{ - ut_ad(dict_sys.frozen()); - ut_a(DICT_TF_HAS_DATA_DIR(table->flags)); - - ut_a(!table->data_dir_path); - ut_a(filepath); - - /* Be sure this filepath is not the default filepath. */ - if (char* default_filepath = fil_make_filepath(nullptr, table->name, - IBD, false)) { - if (0 != strcmp(filepath, default_filepath)) { - ulint pathlen = strlen(filepath); - ut_a(pathlen < OS_FILE_MAX_PATH); - ut_a(0 == strcmp(filepath + pathlen - 4, DOT_IBD)); - - table->data_dir_path = mem_heap_strdup( - table->heap, filepath); - os_file_make_data_dir_path(table->data_dir_path); - } - - ut_free(default_filepath); - } -} - /** Make sure the data_file_name is saved in dict_table_t if needed. -@param[in,out] table Table object -@param[in] dict_locked dict_sys.frozen() */ -void dict_get_and_save_data_dir_path(dict_table_t* table, bool dict_locked) +@param[in,out] table Table object */ +void dict_get_and_save_data_dir_path(dict_table_t *table) { - ut_ad(!table->is_temporary()); - ut_ad(!table->space || table->space->id == table->space_id); + ut_ad(!table->is_temporary()); + ut_ad(!table->space || table->space->id == table->space_id); - if (!table->data_dir_path && table->space_id && table->space) { - if (!dict_locked) { - dict_sys.freeze(SRW_LOCK_CALL); - } - - table->flags |= 1 << DICT_TF_POS_DATA_DIR - & ((1U << DICT_TF_BITS) - 1); - dict_save_data_dir_path(table, - table->space->chain.start->name); - - if (table->data_dir_path == NULL) { - /* Since we did not set the table data_dir_path, - unset the flag. This does not change - SYS_TABLES or FSP_SPACE_FLAGS on the header page - of the tablespace, but it makes dict_table_t - consistent. */ - table->flags &= ~DICT_TF_MASK_DATA_DIR - & ((1U << DICT_TF_BITS) - 1); - } - - if (!dict_locked) { - dict_sys.unfreeze(); - } - } + if (!table->data_dir_path && table->space_id && table->space) + { + const char *filepath= table->space->chain.start->name; + if (strncmp(fil_path_to_mysql_datadir, filepath, + strlen(fil_path_to_mysql_datadir))) + { + table->lock_mutex_lock(); + table->flags|= 1 << DICT_TF_POS_DATA_DIR & ((1U << DICT_TF_BITS) - 1); + table->data_dir_path= mem_heap_strdup(table->heap, filepath); + os_file_make_data_dir_path(table->data_dir_path); + table->lock_mutex_unlock(); + } + } } /** Opens a tablespace for dict_load_table_one() @@ -2193,7 +2145,7 @@ dict_load_tablespace( char* filepath = NULL; if (DICT_TF_HAS_DATA_DIR(table->flags)) { /* This will set table->data_dir_path from fil_system */ - dict_get_and_save_data_dir_path(table, true); + dict_get_and_save_data_dir_path(table); if (table->data_dir_path) { filepath = fil_make_filepath( diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 7302f436918..c8aa6aab35a 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -1567,10 +1567,13 @@ static void fts_table_no_ref_count(const char *table_name) /** Stop the purge thread and check n_ref_count of all auxiliary and common table associated with the fts table. -@param table parent FTS table */ -void purge_sys_t::stop_FTS(const dict_table_t &table) +@param table parent FTS table +@param already_stopped True indicates purge threads were + already stopped*/ +void purge_sys_t::stop_FTS(const dict_table_t &table, bool already_stopped) { - purge_sys.stop_FTS(); + if (!already_stopped) + purge_sys.stop_FTS(); fts_table_t fts_table; char table_name[MAX_FULL_NAME_LEN]; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index a062f94e75f..a0922bb370e 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -11411,7 +11411,7 @@ ha_innobase::update_create_info( return; } - dict_get_and_save_data_dir_path(m_prebuilt->table, false); + dict_get_and_save_data_dir_path(m_prebuilt->table); if (m_prebuilt->table->data_dir_path) { create_info->data_file_name = m_prebuilt->table->data_dir_path; @@ -13531,29 +13531,26 @@ int ha_innobase::delete_table(const char *name) dict_sys.unfreeze(); } - auto &timeout= THDVAR(thd, lock_wait_timeout); - const auto save_timeout= timeout; - if (table->name.is_temporary()) - timeout= 0; + const bool skip_wait{table->name.is_temporary()}; if (table_stats && index_stats && !strcmp(table_stats->name.m_name, TABLE_STATS_NAME) && !strcmp(index_stats->name.m_name, INDEX_STATS_NAME) && - !(err= lock_table_for_trx(table_stats, trx, LOCK_X))) - err= lock_table_for_trx(index_stats, trx, LOCK_X); + !(err= lock_table_for_trx(table_stats, trx, LOCK_X, skip_wait))) + err= lock_table_for_trx(index_stats, trx, LOCK_X, skip_wait); - if (err != DB_SUCCESS && !timeout) + if (err != DB_SUCCESS && skip_wait) { /* We may skip deleting statistics if we cannot lock the tables, when the table carries a temporary name. */ + ut_ad(err == DB_LOCK_WAIT); + ut_ad(trx->error_state == DB_SUCCESS); err= DB_SUCCESS; dict_table_close(table_stats, false, thd, mdl_table); dict_table_close(index_stats, false, thd, mdl_index); table_stats= nullptr; index_stats= nullptr; } - - timeout= save_timeout; } if (err == DB_SUCCESS) @@ -13826,7 +13823,7 @@ int ha_innobase::truncate() mem_heap_t* heap = mem_heap_create(1000); - dict_get_and_save_data_dir_path(ib_table, false); + dict_get_and_save_data_dir_path(ib_table); info.data_file_name = ib_table->data_dir_path; const char* temp_name = dict_mem_create_temporary_tablename( heap, ib_table->name.m_name, ib_table->id); @@ -14059,17 +14056,15 @@ ha_innobase::rename_table( if (error == DB_SUCCESS && table_stats && index_stats && !strcmp(table_stats->name.m_name, TABLE_STATS_NAME) && !strcmp(index_stats->name.m_name, INDEX_STATS_NAME)) { - auto &timeout = THDVAR(thd, lock_wait_timeout); - const auto save_timeout = timeout; - if (from_temp) { - timeout = 0; - } - error = lock_table_for_trx(table_stats, trx, LOCK_X); + error = lock_table_for_trx(table_stats, trx, LOCK_X, + from_temp); if (error == DB_SUCCESS) { error = lock_table_for_trx(index_stats, trx, - LOCK_X); + LOCK_X, from_temp); } if (error != DB_SUCCESS && from_temp) { + ut_ad(error == DB_LOCK_WAIT); + ut_ad(trx->error_state == DB_SUCCESS); error = DB_SUCCESS; /* We may skip renaming statistics if we cannot lock the tables, when the @@ -14082,7 +14077,6 @@ ha_innobase::rename_table( table_stats = nullptr; index_stats = nullptr; } - timeout = save_timeout; } } diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index ca13b21803c..bcd20c402c0 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -10903,12 +10903,14 @@ ha_innobase::commit_inplace_alter_table( } } + bool already_stopped= false; for (inplace_alter_handler_ctx** pctx = ctx_array; *pctx; pctx++) { auto ctx = static_cast(*pctx); dberr_t error = DB_SUCCESS; if (fts_exist) { - purge_sys.stop_FTS(*ctx->old_table); + purge_sys.stop_FTS(*ctx->old_table, already_stopped); + already_stopped = true; } if (new_clustered && ctx->old_table->fts) { diff --git a/storage/innobase/include/dict0load.h b/storage/innobase/include/dict0load.h index 072773694a9..43e732263fd 100644 --- a/storage/innobase/include/dict0load.h +++ b/storage/innobase/include/dict0load.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2021, MariaDB Corporation. +Copyright (c) 2017, 2022, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -53,9 +53,8 @@ We also scan the biggest space id, and store it to fil_system. */ void dict_check_tablespaces_and_store_max_id(); /** Make sure the data_file_name is saved in dict_table_t if needed. -@param[in,out] table Table object -@param[in] dict_locked dict_sys.frozen() */ -void dict_get_and_save_data_dir_path(dict_table_t* table, bool dict_locked); +@param[in,out] table Table object */ +void dict_get_and_save_data_dir_path(dict_table_t* table); /***********************************************************************//** Loads a table object based on the table id. diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index 28d75517d45..e4ceff6dec2 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -394,15 +394,13 @@ lock_table( void lock_table_resurrect(dict_table_t *table, trx_t *trx, lock_mode mode); /** Sets a lock on a table based on the given mode. -@param[in] table table to lock -@param[in,out] trx transaction -@param[in] mode LOCK_X or LOCK_S -@return error code or DB_SUCCESS. */ -dberr_t -lock_table_for_trx( - dict_table_t* table, - trx_t* trx, - enum lock_mode mode) +@param table table to lock +@param trx transaction +@param mode LOCK_X or LOCK_S +@param no_wait whether to skip handling DB_LOCK_WAIT +@return error code */ +dberr_t lock_table_for_trx(dict_table_t *table, trx_t *trx, lock_mode mode, + bool no_wait= false) MY_ATTRIBUTE((nonnull, warn_unused_result)); /** Exclusively lock the data dictionary tables. @@ -915,10 +913,8 @@ public: @param page whether to discard also from lock_sys.prdt_hash */ void prdt_page_free_from_discard(const page_id_t id, bool all= false); -#ifdef WITH_WSREP /** Cancel possible lock waiting for a transaction */ static void cancel_lock_wait_for_trx(trx_t *trx); -#endif /* WITH_WSREP */ }; /** The lock system */ diff --git a/storage/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h index dc032cdf73a..b3f2fbeedf3 100644 --- a/storage/innobase/include/trx0purge.h +++ b/storage/innobase/include/trx0purge.h @@ -286,8 +286,10 @@ public: /** Stop the purge thread and check n_ref_count of all auxiliary and common table associated with the fts table. - @param table parent FTS table */ - void stop_FTS(const dict_table_t &table); + @param table parent FTS table + @param already_stopped True indicates purge threads were + already stopped */ + void stop_FTS(const dict_table_t &table, bool already_stopped=false); }; /** The global data structure coordinating a purge */ diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index fa1ea357fe6..f920ac1ac95 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -3627,52 +3627,50 @@ static void lock_table_dequeue(lock_t *in_lock, bool owns_wait_mutex) } } + /** Sets a lock on a table based on the given mode. -@param[in] table table to lock -@param[in,out] trx transaction -@param[in] mode LOCK_X or LOCK_S -@return error code or DB_SUCCESS. */ -dberr_t -lock_table_for_trx( - dict_table_t* table, - trx_t* trx, - enum lock_mode mode) +@param table table to lock +@param trx transaction +@param mode LOCK_X or LOCK_S +@param no_wait whether to skip handling DB_LOCK_WAIT +@return error code */ +dberr_t lock_table_for_trx(dict_table_t *table, trx_t *trx, lock_mode mode, + bool no_wait) { - mem_heap_t* heap; - que_thr_t* thr; - dberr_t err; - sel_node_t* node; - heap = mem_heap_create(512); + mem_heap_t *heap= mem_heap_create(512); + sel_node_t *node= sel_node_create(heap); + que_thr_t *thr= pars_complete_graph_for_exec(node, trx, heap, nullptr); + thr->graph->state= QUE_FORK_ACTIVE; - node = sel_node_create(heap); - thr = pars_complete_graph_for_exec(node, trx, heap, NULL); - thr->graph->state = QUE_FORK_ACTIVE; - - /* We use the select query graph as the dummy graph needed - in the lock module call */ - - thr = static_cast( - que_fork_get_first_thr( - static_cast(que_node_get_parent(thr)))); + thr= static_cast + (que_fork_get_first_thr(static_cast + (que_node_get_parent(thr)))); run_again: - thr->run_node = thr; - thr->prev_node = thr->common.parent; + thr->run_node= thr; + thr->prev_node= thr->common.parent; + dberr_t err= lock_table(table, mode, thr); - err = lock_table(table, mode, thr); + switch (err) { + case DB_SUCCESS: + break; + case DB_LOCK_WAIT: + if (no_wait) + { + lock_sys.cancel_lock_wait_for_trx(trx); + break; + } + /* fall through */ + default: + trx->error_state= err; + if (row_mysql_handle_errors(&err, trx, thr, nullptr)) + goto run_again; + } - trx->error_state = err; + que_graph_free(thr->graph); + trx->op_info= ""; - if (UNIV_UNLIKELY(err != DB_SUCCESS)) { - if (row_mysql_handle_errors(&err, trx, thr, NULL)) { - goto run_again; - } - } - - que_graph_free(thr->graph); - trx->op_info = ""; - - return(err); + return err; } /** Exclusively lock the data dictionary tables. @@ -5639,8 +5637,7 @@ static void lock_cancel_waiting_and_release(lock_t *lock) lock_wait_end(trx); trx->mutex_unlock(); } -#ifdef WITH_WSREP -TRANSACTIONAL_TARGET + void lock_sys_t::cancel_lock_wait_for_trx(trx_t *trx) { lock_sys.wr_lock(SRW_LOCK_CALL); @@ -5654,7 +5651,6 @@ void lock_sys_t::cancel_lock_wait_for_trx(trx_t *trx) lock_sys.wr_unlock(); mysql_mutex_unlock(&lock_sys.wait_mutex); } -#endif /* WITH_WSREP */ /** Cancel a waiting lock request. @tparam check_victim whether to check for DB_DEADLOCK diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index ca6c85cf60f..7252d557772 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -3114,7 +3114,7 @@ and apply it to dict_table_t static dberr_t handle_instant_metadata(dict_table_t *table, const row_import &cfg) { - dict_get_and_save_data_dir_path(table, false); + dict_get_and_save_data_dir_path(table); char *filepath; if (DICT_TF_HAS_DATA_DIR(table->flags)) @@ -4149,7 +4149,7 @@ fil_tablespace_iterate( return(DB_CORRUPTION);); /* Make sure the data_dir_path is set. */ - dict_get_and_save_data_dir_path(table, false); + dict_get_and_save_data_dir_path(table); ut_ad(!DICT_TF_HAS_DATA_DIR(table->flags) || table->data_dir_path); @@ -4470,7 +4470,7 @@ row_import_for_mysql( /* If the table is stored in a remote tablespace, we need to determine that filepath from the link file and system tables. Find the space ID in SYS_TABLES since this is an ALTER TABLE. */ - dict_get_and_save_data_dir_path(table, true); + dict_get_and_save_data_dir_path(table); ut_ad(!DICT_TF_HAS_DATA_DIR(table->flags) || table->data_dir_path); const char *data_dir_path = DICT_TF_HAS_DATA_DIR(table->flags) diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index ff7fc3e28db..14d35a5c96b 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -2075,7 +2075,7 @@ srv_get_meta_data_filename( char* path; /* Make sure the data_dir_path is set. */ - dict_get_and_save_data_dir_path(table, false); + dict_get_and_save_data_dir_path(table); const char* data_dir_path = DICT_TF_HAS_DATA_DIR(table->flags) ? table->data_dir_path : nullptr; diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c index 3e166e99b91..981f32370b3 100644 --- a/storage/maria/ma_open.c +++ b/storage/maria/ma_open.c @@ -1,5 +1,5 @@ /* Copyright (C) 2006 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - Copyright (c) 2009, 2021, MariaDB Corporation Ab + Copyright (c) 2009, 2022, MariaDB Corporation Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -271,7 +271,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags, char name_buff[FN_REFLEN], org_name[FN_REFLEN], index_name[FN_REFLEN], data_name[FN_REFLEN]; uchar *UNINIT_VAR(disk_cache), *disk_pos, *end_pos; - MARIA_HA info, *UNINIT_VAR(m_info), *old_info; + MARIA_HA info, *UNINIT_VAR(m_info), *old_info= NULL; MARIA_SHARE share_buff,*share; double *rec_per_key_part; ulong *nulls_per_key_part; @@ -322,7 +322,6 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags, } #endif /* WITH_S3_STORAGE_ENGINE */ - old_info= 0; if (!internal_table) mysql_mutex_lock(&THR_LOCK_maria); if ((open_flags & HA_OPEN_COPY) ||