1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-08 11:22:35 +03:00

Merge 10.6 into 10.7

This commit is contained in:
Daniel Black
2022-03-24 18:57:07 +11:00
48 changed files with 739 additions and 338 deletions

View File

@@ -11,26 +11,17 @@
# Exit immediately on any error # Exit immediately on any error
set -e set -e
source ./VERSION # On Buildbot, don't run the mysql-test-run test suite as part of build.
# It takes a lot of time, and we will do a better test anyway in
CODENAME="$(lsb_release -sc)" # Buildbot, running the test suite from installed .debs on a clean VM.
case "${CODENAME}" in
stretch)
# MDEV-28022 libzstd-dev-1.1.3 minimum version
sed -i -e '/libzstd-dev/d' debian/control
;;
esac
# This file is invoked from Buildbot and Travis-CI to build deb packages.
# As both of those CI systems have many parallel jobs that include different
# parts of the test suite, we don't need to run the mysql-test-run at all when
# building the deb packages here.
export DEB_BUILD_OPTIONS="nocheck $DEB_BUILD_OPTIONS" export DEB_BUILD_OPTIONS="nocheck $DEB_BUILD_OPTIONS"
source ./VERSION
# General CI optimizations to keep build output smaller # General CI optimizations to keep build output smaller
if [[ $TRAVIS ]] || [[ $GITLAB_CI ]] if [[ $GITLAB_CI ]]
then then
# On both Travis and Gitlab the output log must stay under 4MB so make the # On Gitlab the output log must stay under 4MB so make the
# build less verbose # build less verbose
sed '/Add support for verbose builds/,/^$/d' -i debian/rules sed '/Add support for verbose builds/,/^$/d' -i debian/rules
elif [ -d storage/columnstore/columnstore/debian ] elif [ -d storage/columnstore/columnstore/debian ]
@@ -45,41 +36,38 @@ then
sed "s/10.6/${MYSQL_VERSION_MAJOR}.${MYSQL_VERSION_MINOR}/" <storage/columnstore/columnstore/debian/control >> debian/control sed "s/10.6/${MYSQL_VERSION_MAJOR}.${MYSQL_VERSION_MINOR}/" <storage/columnstore/columnstore/debian/control >> debian/control
fi fi
# Don't build or try to put files in a package for selected plugins and components on Travis-CI # Look up distro-version specific stuff
# in order to keep build small (in both duration and disk space) #
if [[ $TRAVIS ]] # Always keep the actual packaging as up-to-date as possible following the latest
then # Debian policy and targeting Debian Sid. Then case-by-case run in autobake-deb.sh
# Test suite package not relevant on Travis-CI # tests for backwards compatibility and strip away parts on older builders.
sed 's|DINSTALL_MYSQLTESTDIR=share/mysql/mysql-test|DINSTALL_MYSQLTESTDIR=false|' -i debian/rules
sed '/Package: mariadb-test-data/,/^$/d' -i debian/control
sed '/Package: mariadb-test$/,/^$/d' -i debian/control
# Extra plugins such as Mroonga, Spider, OQgraph, Sphinx and the embedded build can safely be skipped remove_rocksdb_tools()
sed 's|-DDEB|-DPLUGIN_MROONGA=NO -DPLUGIN_ROCKSDB=NO -DPLUGIN_SPIDER=NO -DPLUGIN_OQGRAPH=NO -DPLUGIN_PERFSCHEMA=NO -DPLUGIN_SPHINX=NO -DWITH_EMBEDDED_SERVER=OFF -DDEB|' -i debian/rules {
sed "/Package: mariadb-plugin-mroonga/,/^$/d" -i debian/control
sed "/Package: mariadb-plugin-rocksdb/,/^$/d" -i debian/control
sed "/Package: mariadb-plugin-spider/,/^$/d" -i debian/control
sed "/Package: mariadb-plugin-oqgraph/,/^$/d" -i debian/control
sed "/ha_sphinx.so/d" -i debian/mariadb-server-10.7.install
sed "/Package: libmariadbd19/,/^$/d" -i debian/control
sed "/Package: libmariadbd-dev/,/^$/d" -i debian/control
fi
# If rocksdb-tools is not available (before Debian Buster and Ubuntu Disco)
# remove the dependency from the RocksDB plugin so it can install properly
# and instead ship the one built from MariaDB sources
if ! apt-cache madison rocksdb-tools | grep 'rocksdb-tools' >/dev/null 2>&1
then
sed '/rocksdb-tools/d' -i debian/control sed '/rocksdb-tools/d' -i debian/control
sed '/sst_dump/d' -i debian/not-installed sed '/sst_dump/d' -i debian/not-installed
echo "usr/bin/sst_dump" >> debian/mariadb-plugin-rocksdb.install if ! grep -q sst_dump debian/mariadb-plugin-rocksdb.install
fi then
echo "usr/bin/sst_dump" >> debian/mariadb-plugin-rocksdb.install
fi
}
# If libcurl4 is not available (before Debian Buster and Ubuntu Bionic) CODENAME="$(lsb_release -sc)"
# use older libcurl3 instead case "${CODENAME}" in
if ! apt-cache madison libcurl4 | grep 'libcurl4' >/dev/null 2>&1 stretch)
# MDEV-28022 libzstd-dev-1.1.3 minimum version
sed -i -e '/libzstd-dev/d' \
-e 's/libcurl4/libcurl3/g' -i debian/control
remove_rocksdb_tools
;;
bionic)
remove_rocksdb_tools
;;
esac
if [[ ! "$(dpkg-architecture -q DEB_BUILD_ARCH)" =~ amd64|arm64|ppc64el|s390x ]]
then then
sed 's/libcurl4/libcurl3/g' -i debian/control remove_rocksdb_tools
fi fi
# From Debian Bullseye/Ubuntu Groovy, liburing replaces libaio # From Debian Bullseye/Ubuntu Groovy, liburing replaces libaio
@@ -98,15 +86,6 @@ then
sed '/-DWITH_PMEM=yes/d' -i debian/rules sed '/-DWITH_PMEM=yes/d' -i debian/rules
fi fi
# Debian stretch doesn't support the zstd version 1.1.3 required
# for RocksDB. zstd isn't enabled in Mroonga even though code exists
# for it. If someone happens to have a non-default zstd installed
# (not 1.1.2), assume its a backport and build with it.
if [ "$(lsb_release -sc)" = stretch ] && [ "$(apt-cache madison 'libzstd-dev' | grep -v 1.1.2)" = '' ]
then
sed '/libzstd-dev/d' -i debian/control
fi
# Adjust changelog, add new version # Adjust changelog, add new version
echo "Incrementing changelog and starting build scripts" echo "Incrementing changelog and starting build scripts"
@@ -121,9 +100,9 @@ dch -b -D "${CODENAME}" -v "${VERSION}" "Automatic build with ${LOGSTRING}." --c
echo "Creating package version ${VERSION} ... " echo "Creating package version ${VERSION} ... "
# On Travis CI and Gitlab-CI, use -b to build binary only packages as there is # On Gitlab-CI, use -b to build binary only packages as there is
# no need to waste time on generating the source package. # no need to waste time on generating the source package.
if [[ $TRAVIS ]] if [[ $GITLAB_CI ]]
then then
BUILDPACKAGE_FLAGS="-b" BUILDPACKAGE_FLAGS="-b"
fi fi
@@ -143,8 +122,8 @@ fakeroot $BUILDPACKAGE_PREPEND dpkg-buildpackage -us -uc -I $BUILDPACKAGE_FLAGS
# If the step above fails due to missing dependencies, you can manually run # If the step above fails due to missing dependencies, you can manually run
# sudo mk-build-deps debian/control -r -i # sudo mk-build-deps debian/control -r -i
# Don't log package contents on Travis-CI or Gitlab-CI to save time and log size # Don't log package contents on Gitlab-CI to save time and log size
if [[ ! $TRAVIS ]] && [[ ! $GITLAB_CI ]] if [[ ! $GITLAB_CI ]]
then then
echo "List package contents ..." echo "List package contents ..."
cd .. cd ..

2
debian/control vendored
View File

@@ -5,7 +5,7 @@ Maintainer: MariaDB Developers <maria-developers@lists.launchpad.net>
Build-Depends: bison, Build-Depends: bison,
cmake, cmake,
cracklib-runtime <!nocheck>, cracklib-runtime <!nocheck>,
debhelper (>= 9.20160709~), debhelper (>= 10),
dh-exec, dh-exec,
flex [amd64], flex [amd64],
gdb <!nocheck>, gdb <!nocheck>,

View File

@@ -2890,5 +2890,28 @@ HEX(c1)
0000006100000063 0000006100000063
DROP TABLE t1; DROP TABLE t1;
# #
# MDEV-23210 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on ALTER TABLE, SELECT and INSERT
#
CREATE TABLE t1 (a CHAR(1));
SET COLLATION_CONNECTION=utf32_general_ci, CHARACTER_SET_CLIENT=binary;
ALTER TABLE t1 CHANGE a a ENUM('a','a') CHARACTER SET utf32;
ERROR HY000: Column 'a' has duplicated value 'a' in ENUM
ALTER TABLE t1 CHANGE a a ENUM('aaa') CHARACTER SET utf32;
ERROR HY000: Invalid utf32 character string: '\x00aaa'
ALTER TABLE t1 CHANGE a a ENUM('aa') CHARACTER SET utf32;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` enum('慡') CHARACTER SET utf32 DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
ALTER TABLE t1 CHANGE a a ENUM('a','b') CHARACTER SET utf32;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` enum('a','b') CHARACTER SET utf32 DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
DROP TABLE t1;
SET NAMES utf8;
#
# End of 10.2 tests # End of 10.2 tests
# #

View File

@@ -1048,6 +1048,25 @@ INSERT INTO t1 (c1) VALUES (1),(2),(3);
SELECT HEX(c1) FROM t1 ORDER BY c1; SELECT HEX(c1) FROM t1 ORDER BY c1;
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # MDEV-23210 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on ALTER TABLE, SELECT and INSERT
--echo #
CREATE TABLE t1 (a CHAR(1));
SET COLLATION_CONNECTION=utf32_general_ci, CHARACTER_SET_CLIENT=binary;
--error ER_DUPLICATED_VALUE_IN_TYPE
ALTER TABLE t1 CHANGE a a ENUM('a','a') CHARACTER SET utf32;
--error ER_INVALID_CHARACTER_STRING
ALTER TABLE t1 CHANGE a a ENUM('aaa') CHARACTER SET utf32;
ALTER TABLE t1 CHANGE a a ENUM('aa') CHARACTER SET utf32;
SHOW CREATE TABLE t1;
ALTER TABLE t1 CHANGE a a ENUM('a','b') CHARACTER SET utf32;
SHOW CREATE TABLE t1;
DROP TABLE t1;
SET NAMES utf8;
--echo # --echo #
--echo # End of 10.2 tests --echo # End of 10.2 tests
--echo # --echo #

View File

@@ -7941,6 +7941,21 @@ EXECUTE s;
DEALLOCATE PREPARE s; DEALLOCATE PREPARE s;
SET NAMES utf8; SET NAMES utf8;
# #
# MDEV-23210 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on ALTER TABLE, SELECT and INSERT
#
CREATE TABLE t1 (a CHAR(1));
SET COLLATION_CONNECTION=utf32_myanmar_ci, CHARACTER_SET_CLIENT=binary;
ALTER TABLE t1 CHANGE a a ENUM('a','a') CHARACTER SET utf32;
ERROR HY000: Column 'a' has duplicated value 'a' in ENUM
ALTER TABLE t1 CHANGE a a ENUM('a','b') CHARACTER SET utf32;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` enum('a','b') CHARACTER SET utf32 DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
DROP TABLE t1;
SET NAMES utf8;
#
# End of 10.2 tests # End of 10.2 tests
# #
# #

View File

@@ -290,6 +290,19 @@ EXECUTE s;
DEALLOCATE PREPARE s; DEALLOCATE PREPARE s;
SET NAMES utf8; SET NAMES utf8;
--echo #
--echo # MDEV-23210 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on ALTER TABLE, SELECT and INSERT
--echo #
CREATE TABLE t1 (a CHAR(1));
SET COLLATION_CONNECTION=utf32_myanmar_ci, CHARACTER_SET_CLIENT=binary;
--error ER_DUPLICATED_VALUE_IN_TYPE
ALTER TABLE t1 CHANGE a a ENUM('a','a') CHARACTER SET utf32;
ALTER TABLE t1 CHANGE a a ENUM('a','b') CHARACTER SET utf32;
SHOW CREATE TABLE t1;
DROP TABLE t1;
SET NAMES utf8;
--echo # --echo #
--echo # End of 10.2 tests --echo # End of 10.2 tests

View File

@@ -3,34 +3,50 @@ SET debug_dbug='+d,unstable_db_type';
install soname 'ha_archive'; install soname 'ha_archive';
create table t1 (a int) engine=archive; create table t1 (a int) engine=archive;
insert t1 values (1),(2),(3); insert t1 values (1),(2),(3);
create table t2 (a int) engine=archive partition by hash(a) partitions 3;
flush tables; flush tables;
uninstall soname 'ha_archive'; uninstall soname 'ha_archive';
select table_schema, table_name from information_schema.tables where table_name like 't1'; select table_schema, table_name from information_schema.tables where table_name like 't_' order by 1,2;
table_schema test table_schema table_name
table_name t1 test t1
select table_schema, table_name, engine, version from information_schema.tables where table_name like 't1'; test t2
table_schema test select table_schema, table_name, engine, version from information_schema.tables where table_name like 't_' order by 1,2;
table_name t1 table_schema table_name engine version
engine ARCHIVE test t1 ARCHIVE NULL
version NULL test t2 NULL NULL
Warnings: Warnings:
Level Warning Warning 1033 Incorrect information in file: './test/t2.frm'
Code 1286 Warning 1286 Unknown storage engine 'ARCHIVE'
Message Unknown storage engine 'ARCHIVE' select table_schema, table_name, engine, row_format from information_schema.tables where table_name like 't_' order by 1,2;
select table_schema, table_name, engine, row_format from information_schema.tables where table_name like 't1'; table_schema table_name engine row_format
table_schema test test t1 ARCHIVE NULL
table_name t1 test t2 NULL NULL
engine ARCHIVE
row_format NULL
Warnings: Warnings:
Level Warning Warning 1033 Incorrect information in file: './test/t2.frm'
Code 1286 Warning 1286 Unknown storage engine 'ARCHIVE'
Message Unknown storage engine 'ARCHIVE'
install soname 'ha_archive'; install soname 'ha_archive';
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1
show create table t2;
Table Create Table
t2 CREATE TABLE `t2` (
`a` int(11) DEFAULT NULL
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1
PARTITION BY HASH (`a`)
PARTITIONS 3
db.opt db.opt
t1.ARZ t1.ARZ
t1.frm t1.frm
t2#P#p0.ARZ
t2#P#p1.ARZ
t2#P#p2.ARZ
t2.frm
t2.par
drop table t1; drop table t1;
drop table t2;
db.opt db.opt
uninstall soname 'ha_archive'; uninstall soname 'ha_archive';
SET debug_dbug=@saved_dbug; SET debug_dbug=@saved_dbug;

View File

@@ -1,4 +1,4 @@
--source include/have_partition.inc
--source include/have_debug.inc --source include/have_debug.inc
if (!$HA_ARCHIVE_SO) { if (!$HA_ARCHIVE_SO) {
@@ -13,18 +13,25 @@ SET debug_dbug='+d,unstable_db_type';
install soname 'ha_archive'; install soname 'ha_archive';
create table t1 (a int) engine=archive; create table t1 (a int) engine=archive;
insert t1 values (1),(2),(3); insert t1 values (1),(2),(3);
create table t2 (a int) engine=archive partition by hash(a) partitions 3;
flush tables; flush tables;
uninstall soname 'ha_archive'; uninstall soname 'ha_archive';
--vertical_results select table_schema, table_name from information_schema.tables where table_name like 't_' order by 1,2;
select table_schema, table_name from information_schema.tables where table_name like 't1'; --replace_result $mysqld_datadir ./
select table_schema, table_name, engine, version from information_schema.tables where table_name like 't1'; select table_schema, table_name, engine, version from information_schema.tables where table_name like 't_' order by 1,2;
select table_schema, table_name, engine, row_format from information_schema.tables where table_name like 't1'; --replace_result $mysqld_datadir ./
--horizontal_results select table_schema, table_name, engine, row_format from information_schema.tables where table_name like 't_' order by 1,2;
install soname 'ha_archive'; install soname 'ha_archive';
show create table t1;
show create table t2;
--list_files $mysqld_datadir/test --list_files $mysqld_datadir/test
drop table t1; drop table t1;
drop table t2;
--list_files $mysqld_datadir/test --list_files $mysqld_datadir/test
uninstall soname 'ha_archive'; uninstall soname 'ha_archive';

View File

@@ -128,3 +128,13 @@ SELECT f1(1);
Got one of the listed errors Got one of the listed errors
DROP FUNCTION f1; DROP FUNCTION f1;
SET debug_dbug= @saved_dbug; SET debug_dbug= @saved_dbug;
#
# MDEV-27978 wrong option name in error when exceeding max_session_mem_used
#
SET SESSION max_session_mem_used = 8192;
SELECT * FROM information_schema.processlist;
ERROR HY000: The MariaDB server is running with the --max-session-mem-used=8192 option so it cannot execute this statement
SET SESSION max_session_mem_used = DEFAULT;
#
# End of 10.2 tests
#

View File

@@ -158,3 +158,16 @@ SET SESSION debug_dbug="+d,simulate_create_virtual_tmp_table_out_of_memory";
SELECT f1(1); SELECT f1(1);
DROP FUNCTION f1; DROP FUNCTION f1;
SET debug_dbug= @saved_dbug; SET debug_dbug= @saved_dbug;
--echo #
--echo # MDEV-27978 wrong option name in error when exceeding max_session_mem_used
--echo #
SET SESSION max_session_mem_used = 8192;
--error ER_OPTION_PREVENTS_STATEMENT
SELECT * FROM information_schema.processlist;
SET SESSION max_session_mem_used = DEFAULT;
--echo #
--echo # End of 10.2 tests
--echo #

View File

@@ -4,11 +4,11 @@ connect u,localhost,root;
# MDEV-24909 JSON functions don't respect KILL QUERY / max_statement_time limit # MDEV-24909 JSON functions don't respect KILL QUERY / max_statement_time limit
# #
set group_concat_max_len= 4294967295; set group_concat_max_len= 4294967295;
set @obj=concat_ws('','{', repeat('"a":"b",', 125000000/2), '"c":"d"}'); set @obj=concat_ws('','{', repeat('"a":"b",', 1250000/2), '"c":"d"}');
set @arr=concat_ws('','[', repeat('1234567,', 125000000/2), '2345678]'); set @arr=concat_ws('','[', repeat('1234567,', 1250000/2), '2345678]');
select length(@obj), length(@arr); select length(@obj), length(@arr);
length(@obj) length(@arr) length(@obj) length(@arr)
500000009 500000009 5000009 5000009
set max_statement_time=0.0001; set max_statement_time=0.0001;
select json_array_append(@arr, '$[0]', 1); select json_array_append(@arr, '$[0]', 1);
ERROR 70100: Query execution was interrupted (max_statement_time exceeded) ERROR 70100: Query execution was interrupted (max_statement_time exceeded)

View File

@@ -9,8 +9,8 @@ connect u,localhost,root;
--echo # --echo #
set group_concat_max_len= 4294967295; set group_concat_max_len= 4294967295;
set @obj=concat_ws('','{', repeat('"a":"b",', 125000000/2), '"c":"d"}'); set @obj=concat_ws('','{', repeat('"a":"b",', 1250000/2), '"c":"d"}');
set @arr=concat_ws('','[', repeat('1234567,', 125000000/2), '2345678]'); set @arr=concat_ws('','[', repeat('1234567,', 1250000/2), '2345678]');
select length(@obj), length(@arr); select length(@obj), length(@arr);
set max_statement_time=0.0001; set max_statement_time=0.0001;

View File

@@ -9,7 +9,7 @@ SHOW TABLES;
Tables_in_test Tables_in_test
t1 t1
SHOW CREATE TABLE t1; SHOW CREATE TABLE t1;
ERROR HY000: Failed to read from the .par file ERROR HY000: Incorrect information in file: './test/t1.frm'
DROP TABLE t1; DROP TABLE t1;
ERROR HY000: Got error 175 "File too short; Expected more data in file" from storage engine partition ERROR HY000: Got error 175 "File too short; Expected more data in file" from storage engine partition
t1.frm t1.frm

View File

@@ -17,7 +17,7 @@ let $MYSQLD_DATADIR= `SELECT @@datadir`;
--copy_file std_data/parts/t1_blackhole.par $MYSQLD_DATADIR/test/t1.par --copy_file std_data/parts/t1_blackhole.par $MYSQLD_DATADIR/test/t1.par
SHOW TABLES; SHOW TABLES;
--replace_result $MYSQLD_DATADIR ./ --replace_result $MYSQLD_DATADIR ./
--error ER_FAILED_READ_FROM_PAR_FILE --error ER_NOT_FORM_FILE
SHOW CREATE TABLE t1; SHOW CREATE TABLE t1;
# The replace is needed for Solaris # The replace is needed for Solaris

View File

@@ -13,7 +13,7 @@ a
UNLOCK TABLES; UNLOCK TABLES;
connection con1; connection con1;
TRUNCATE TABLE t1; TRUNCATE TABLE t1;
ERROR HY000: The MariaDB server is running with the --max-thread-mem-used=8192 option so it cannot execute this statement ERROR HY000: The MariaDB server is running with the --max-session-mem-used=8192 option so it cannot execute this statement
disconnect con1; disconnect con1;
connection default; connection default;
DROP TABLE t1; DROP TABLE t1;

View File

@@ -5,9 +5,13 @@ SELECT @@wsrep_slave_threads;
@@wsrep_slave_threads @@wsrep_slave_threads
1 1
SET GLOBAL wsrep_slave_threads=2; SET GLOBAL wsrep_slave_threads=2;
KILL ID;
Got one of the listed errors Got one of the listed errors
KILL QUERY ID;
Got one of the listed errors Got one of the listed errors
KILL ID;
Got one of the listed errors Got one of the listed errors
KILL QUERY ID;
Got one of the listed errors Got one of the listed errors
SET GLOBAL wsrep_slave_threads=DEFAULT; SET GLOBAL wsrep_slave_threads=DEFAULT;
connection node_1; connection node_1;

View File

@@ -16,21 +16,23 @@ SET GLOBAL wsrep_slave_threads=2;
--let $applier_thread = `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep applier idle' LIMIT 1` --let $applier_thread = `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep applier idle' LIMIT 1`
--disable_query_log --replace_result $applier_thread ID
--error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR --error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR
--eval KILL $applier_thread --eval KILL $applier_thread
--replace_result $applier_thread ID
--error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR --error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR
--eval KILL QUERY $applier_thread --eval KILL QUERY $applier_thread
--let $aborter_thread = `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep aborter idle' LIMIT 1` --let $aborter_thread = `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep aborter idle' LIMIT 1`
--replace_result $aborter_thread ID
--error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR --error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR
--eval KILL $aborter_thread --eval KILL $aborter_thread
--replace_result $aborter_thread ID
--error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR --error ER_KILL_DENIED_ERROR,ER_KILL_DENIED_ERROR
--eval KILL QUERY $aborter_thread --eval KILL QUERY $aborter_thread
--enable_query_log
SET GLOBAL wsrep_slave_threads=DEFAULT; SET GLOBAL wsrep_slave_threads=DEFAULT;

View File

@@ -12,6 +12,9 @@
# Change Date: # # Change Date: #
# Change: # # Change: #
################################################################################ ################################################################################
--source include/have_des.inc
set time_zone="+03:00"; set time_zone="+03:00";
--echo # --echo #
--echo # NUMERIC FUNCTIONS --echo # NUMERIC FUNCTIONS

View File

@@ -48,3 +48,11 @@ alter table t1 add partition (partition p0 values less than (20));
ERROR HY000: Duplicate partition name p0 ERROR HY000: Duplicate partition name p0
alter table t1 add partition (partition p1 values less than (20)) /* comment */; alter table t1 add partition (partition p1 values less than (20)) /* comment */;
drop table t1; drop table t1;
#
# MDEV-28079 Shutdown hangs after altering innodb partition fts table
#
CREATE TABLE t1(f1 INT, f2 CHAR(100))ENGINE=InnoDB PARTITION BY HASH(f1) PARTITIONS 2;
ALTER TABLE t1 ADD FULLTEXT(f2);
InnoDB 0 transactions not purged
DROP TABLE t1;
# End of 10.6 tests

View File

@@ -9,3 +9,11 @@ SET GLOBAL innodb_read_only_compressed=OFF;
--disable_query_log --disable_query_log
SET GLOBAL innodb_read_only_compressed=@save_innodb_read_only_compressed; SET GLOBAL innodb_read_only_compressed=@save_innodb_read_only_compressed;
--enable_query_log --enable_query_log
--echo #
--echo # MDEV-28079 Shutdown hangs after altering innodb partition fts table
--echo #
CREATE TABLE t1(f1 INT, f2 CHAR(100))ENGINE=InnoDB PARTITION BY HASH(f1) PARTITIONS 2;
ALTER TABLE t1 ADD FULLTEXT(f2);
--source ../innodb/include/wait_all_purged.inc
DROP TABLE t1;
--echo # End of 10.6 tests

View File

@@ -0,0 +1,76 @@
include/master-slave.inc
[connection master]
# Master server_1 and Slave server_2 initialiation ...
connection server_2;
include/stop_slave.inc
connection server_1;
set @@sql_log_bin = off;
call mtr.add_suppression("Slave: An attempt was made to binlog GTID 10-1-1 which would create an out-of-order sequence number with existing GTID");
set @@sql_log_bin = on;
RESET MASTER;
set @@session.gtid_domain_id=10;
set @@global.rpl_semi_sync_master_enabled = 1;
set @@global.rpl_semi_sync_master_wait_point=AFTER_SYNC;
connection server_2;
RESET MASTER;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
set @@session.gtid_domain_id=20;
set @@global.rpl_semi_sync_slave_enabled = 1;
# a 1948 warning is expected
set @@global.gtid_slave_pos = "";
Warnings:
Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-1. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos
CHANGE MASTER TO master_use_gtid= slave_pos;
include/start_slave.inc
# ... server_1 -> server_2 is set up
connection server_1;
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=Innodb;
INSERT INTO t1 VALUES (1);
connection server_2;
# Circular configuration server_2 -> server_1 initialiation ...
connection server_1;
# A. ... first when server_1 is in gtid strict mode...
set @@global.gtid_strict_mode = true;
set @@global.rpl_semi_sync_slave_enabled = 1;
CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_2, master_user='root', master_use_gtid=SLAVE_POS;
# ... only for it to fail 'cos if its inconsistent (empty) slave's gtid state:
SELECT @@global.gtid_slave_pos;
@@global.gtid_slave_pos
START SLAVE;
include/wait_for_slave_sql_error.inc [errno=1950]
# B. ... Resume on the circular setup with the server_id now in the non-strict mode ...
set @@global.gtid_strict_mode = false;
include/start_slave.inc
# ... to have succeeded.
connection server_2;
INSERT INTO t1 VALUES (2);
connection server_1;
INSERT INTO t1 VALUES (3);
connection server_2;
# The gtid states on server_2 must be equal to ...
SHOW VARIABLES LIKE 'gtid_binlog_pos';
Variable_name Value
gtid_binlog_pos 0-2-1,10-1-3,20-2-1
SHOW VARIABLES LIKE 'gtid_slave_pos';
Variable_name Value
gtid_slave_pos 0-2-1,10-1-3,20-2-1
connection server_1;
# ... the gtid states on server_1
SHOW VARIABLES LIKE 'gtid_slave_pos';
Variable_name Value
gtid_slave_pos 0-2-1,10-1-3,20-2-1
SHOW VARIABLES LIKE 'gtid_binlog_pos';
Variable_name Value
gtid_binlog_pos 0-2-1,10-1-3,20-2-1
# Cleanup
connection server_1;
include/stop_slave.inc
set @@global.rpl_semi_sync_master_enabled = default;
set @@global.rpl_semi_sync_slave_enabled = default;
set @@global.rpl_semi_sync_master_wait_point=default;
DROP TABLE t1;
connection server_2;
set @@global.rpl_semi_sync_master_enabled = default;
set @@global.rpl_semi_sync_slave_enabled = default;
include/rpl_end.inc

View File

@@ -0,0 +1,11 @@
!include suite/rpl/rpl_1slave_base.cnf
!include include/default_client.cnf
[mysqld.1]
log-slave-updates
sync-binlog=1
[mysqld.2]
log-slave-updates
sync-binlog=1

View File

@@ -0,0 +1,115 @@
# ==== References ====
#
# MDEV-27760 event may non stop replicate in circular semisync setup
#
--source include/have_innodb.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
# The following tests prove
# A. out-of-order gtid error when the stict gtid mode semisync slave
# receives the same server-id gtid event inconsistent
# (rpl_semi_sync_fail_over tests the consistent case) with its state;
# B. in the non-strict mode the same server-id events remains ignored
# by default as usual.
#
--echo # Master server_1 and Slave server_2 initialiation ...
--connection server_2
--source include/stop_slave.inc
# Initial master
--connection server_1
set @@sql_log_bin = off;
call mtr.add_suppression("Slave: An attempt was made to binlog GTID 10-1-1 which would create an out-of-order sequence number with existing GTID");
set @@sql_log_bin = on;
RESET MASTER;
set @@session.gtid_domain_id=10;
set @@global.rpl_semi_sync_master_enabled = 1;
set @@global.rpl_semi_sync_master_wait_point=AFTER_SYNC;
--connection server_2
RESET MASTER;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
set @@session.gtid_domain_id=20;
set @@global.rpl_semi_sync_slave_enabled = 1;
--echo # a 1948 warning is expected
set @@global.gtid_slave_pos = "";
CHANGE MASTER TO master_use_gtid= slave_pos;
--source include/start_slave.inc
--echo # ... server_1 -> server_2 is set up
--connection server_1
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=Innodb;
INSERT INTO t1 VALUES (1);
--save_master_pos
--connection server_2
--sync_with_master
--echo # Circular configuration server_2 -> server_1 initialiation ...
--connection server_1
--echo # A. ... first when server_1 is in gtid strict mode...
set @@global.gtid_strict_mode = true;
set @@global.rpl_semi_sync_slave_enabled = 1;
evalp CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_2, master_user='root', master_use_gtid=SLAVE_POS;
--echo # ... only for it to fail 'cos if its inconsistent (empty) slave's gtid state:
SELECT @@global.gtid_slave_pos;
START SLAVE;
# ER_GTID_STRICT_OUT_OF_ORDER
--let $slave_sql_errno = 1950
--source include/wait_for_slave_sql_error.inc
--echo # B. ... Resume on the circular setup with the server_id now in the non-strict mode ...
set @@global.gtid_strict_mode = false;
--source include/start_slave.inc
--echo # ... to have succeeded.
--connection server_2
INSERT INTO t1 VALUES (2);
--save_master_pos
--connection server_1
--sync_with_master
INSERT INTO t1 VALUES (3);
--save_master_pos
--connection server_2
--sync_with_master
--echo # The gtid states on server_2 must be equal to ...
--let $wait_condition=select @@gtid_slave_pos=@@gtid_binlog_pos
--source include/wait_condition.inc
SHOW VARIABLES LIKE 'gtid_binlog_pos';
SHOW VARIABLES LIKE 'gtid_slave_pos';
--connection server_1
--echo # ... the gtid states on server_1
--let $wait_condition=select @@gtid_slave_pos=@@gtid_binlog_pos
--source include/wait_condition.inc
SHOW VARIABLES LIKE 'gtid_slave_pos';
SHOW VARIABLES LIKE 'gtid_binlog_pos';
--echo # Cleanup
--connection server_1
--source include/stop_slave.inc
set @@global.rpl_semi_sync_master_enabled = default;
set @@global.rpl_semi_sync_slave_enabled = default;
set @@global.rpl_semi_sync_master_wait_point=default;
DROP TABLE t1;
--save_master_pos
--connection server_2
--sync_with_master
set @@global.rpl_semi_sync_master_enabled = default;
set @@global.rpl_semi_sync_slave_enabled = default;
--source include/rpl_end.inc

View File

@@ -1214,16 +1214,6 @@ verify_ca_matches_cert()
return return
fi fi
local readable=1; [ ! -r "$cert" ] && readable=0
[ -n "$ca" ] && [ ! -r "$ca" ] && readable=0
[ -n "$cap" ] && [ ! -r "$cap" ] && readable=0
if [ readable -eq 0 ]; then
wsrep_log_error \
"Both PEM file and CA file (or path) must be readable"
exit 22
fi
local not_match=0 local not_match=0
local errmsg local errmsg
errmsg=$("$OPENSSL_BINARY" verify -verbose \ errmsg=$("$OPENSSL_BINARY" verify -verbose \

View File

@@ -3193,11 +3193,12 @@ err1:
@retval true Failure @retval true Failure
*/ */
bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) bool ha_partition::setup_engine_array(MEM_ROOT *mem_root,
handlerton* first_engine)
{ {
uint i; uint i;
uchar *buff; uchar *buff;
handlerton **engine_array, *first_engine; handlerton **engine_array;
enum legacy_db_type db_type, first_db_type; enum legacy_db_type db_type, first_db_type;
DBUG_ASSERT(!m_file); DBUG_ASSERT(!m_file);
@@ -3207,11 +3208,8 @@ bool ha_partition::setup_engine_array(MEM_ROOT *mem_root)
DBUG_RETURN(true); DBUG_RETURN(true);
buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET); buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET);
first_db_type= (enum legacy_db_type) buff[0];
first_engine= ha_resolve_by_legacy_type(ha_thd(), first_db_type);
if (!first_engine)
goto err;
first_db_type= (enum legacy_db_type) buff[0];
if (!(m_engine_array= (plugin_ref*) if (!(m_engine_array= (plugin_ref*)
alloc_root(&m_mem_root, m_tot_parts * sizeof(plugin_ref)))) alloc_root(&m_mem_root, m_tot_parts * sizeof(plugin_ref))))
goto err; goto err;
@@ -3252,6 +3250,75 @@ err:
} }
handlerton *ha_partition::get_def_part_engine(const char *name)
{
if (table_share)
{
if (table_share->default_part_plugin)
return plugin_data(table_share->default_part_plugin, handlerton *);
}
else
{
// DROP TABLE, for example
char buff[FN_REFLEN];
File file;
MY_STAT state;
uchar *frm_image= 0;
handlerton *hton= 0;
bool use_legacy_type= false;
fn_format(buff, name, "", reg_ext, MY_APPEND_EXT);
file= mysql_file_open(key_file_frm, buff, O_RDONLY | O_SHARE, MYF(0));
if (file < 0)
return NULL;
if (mysql_file_fstat(file, &state, MYF(MY_WME)))
goto err;
if (state.st_size <= 64)
goto err;
if (!(frm_image= (uchar*)my_malloc(key_memory_Partition_share,
state.st_size, MYF(MY_WME))))
goto err;
if (mysql_file_read(file, frm_image, state.st_size, MYF(MY_NABP)))
goto err;
if (frm_image[64] != '/')
{
const uchar *e2= frm_image + 64;
const uchar *e2end = e2 + uint2korr(frm_image + 4);
if (e2end > frm_image + state.st_size)
goto err;
while (e2 + 3 < e2end)
{
uchar type= *e2++;
size_t length= extra2_read_len(&e2, e2end);
if (!length)
goto err;
if (type == EXTRA2_DEFAULT_PART_ENGINE)
{
LEX_CSTRING name= { (char*)e2, length };
plugin_ref plugin= ha_resolve_by_name(ha_thd(), &name, false);
if (plugin)
hton= plugin_data(plugin, handlerton *);
goto err;
}
e2+= length;
}
}
use_legacy_type= true;
err:
my_free(frm_image);
mysql_file_close(file, MYF(0));
if (!use_legacy_type)
return hton;
}
return ha_resolve_by_legacy_type(ha_thd(),
(enum legacy_db_type)m_file_buffer[PAR_ENGINES_OFFSET]);
}
/** /**
Get info about partition engines and their names from the .par file Get info about partition engines and their names from the .par file
@@ -3279,7 +3346,11 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root,
if (read_par_file(name)) if (read_par_file(name))
DBUG_RETURN(true); DBUG_RETURN(true);
if (!is_clone && setup_engine_array(mem_root)) handlerton *default_engine= get_def_part_engine(name);
if (!default_engine)
DBUG_RETURN(true);
if (!is_clone && setup_engine_array(mem_root, default_engine))
DBUG_RETURN(true); DBUG_RETURN(true);
DBUG_RETURN(false); DBUG_RETURN(false);

View File

@@ -590,8 +590,9 @@ private:
And one method to read it in. And one method to read it in.
*/ */
bool create_handler_file(const char *name); bool create_handler_file(const char *name);
bool setup_engine_array(MEM_ROOT *mem_root); bool setup_engine_array(MEM_ROOT *mem_root, handlerton *first_engine);
bool read_par_file(const char *name); bool read_par_file(const char *name);
handlerton *get_def_part_engine(const char *name);
bool get_from_handler_file(const char *name, MEM_ROOT *mem_root, bool get_from_handler_file(const char *name, MEM_ROOT *mem_root,
bool is_clone); bool is_clone);
bool new_handlers_from_part_info(MEM_ROOT *mem_root); bool new_handlers_from_part_info(MEM_ROOT *mem_root);

View File

@@ -3670,13 +3670,18 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific)
/* Ensure we don't get called here again */ /* Ensure we don't get called here again */
char buf[50], *buf2; char buf[50], *buf2;
thd->set_killed(KILL_QUERY); thd->set_killed(KILL_QUERY);
my_snprintf(buf, sizeof(buf), "--max-thread-mem-used=%llu", my_snprintf(buf, sizeof(buf), "--max-session-mem-used=%llu",
thd->variables.max_mem_used); thd->variables.max_mem_used);
if ((buf2= (char*) thd->alloc(256))) if ((buf2= (char*) thd->alloc(256)))
{ {
my_snprintf(buf2, 256, ER_THD(thd, ER_OPTION_PREVENTS_STATEMENT), buf); my_snprintf(buf2, 256, ER_THD(thd, ER_OPTION_PREVENTS_STATEMENT), buf);
thd->set_killed(KILL_QUERY, ER_OPTION_PREVENTS_STATEMENT, buf2); thd->set_killed(KILL_QUERY, ER_OPTION_PREVENTS_STATEMENT, buf2);
} }
else
{
thd->set_killed(KILL_QUERY, ER_OPTION_PREVENTS_STATEMENT,
"--max-session-mem-used");
}
} }
DBUG_ASSERT((longlong) thd->status_var.local_memory_used >= 0 || DBUG_ASSERT((longlong) thd->status_var.local_memory_used >= 0 ||
!debug_assert_on_not_freed_memory); !debug_assert_on_not_freed_memory);

View File

@@ -2234,30 +2234,30 @@ ER_NO_SUCH_THREAD
swe "Finns ingen tråd med id %lu" swe "Finns ingen tråd med id %lu"
ukr "Невідомий ідентифікатор гілки: %lu" ukr "Невідомий ідентифікатор гілки: %lu"
ER_KILL_DENIED_ERROR ER_KILL_DENIED_ERROR
cze "Nejste vlastníkem threadu %lu" cze "Nejste vlastníkem threadu %lld"
dan "Du er ikke ejer af tråden %lu" dan "Du er ikke ejer af tråden %lld"
nla "U bent geen bezitter van thread %lu" nla "U bent geen bezitter van thread %lld"
eng "You are not owner of thread %lu" eng "You are not owner of thread %lld"
est "Ei ole lõime %lu omanik" est "Ei ole lõime %lld omanik"
fre "Vous n'êtes pas propriétaire de la tâche no: %lu" fre "Vous n'êtes pas propriétaire de la tâche no: %lld"
ger "Sie sind nicht Eigentümer von Thread %lu" ger "Sie sind nicht Eigentümer von Thread %lld"
greek "Δεν είσθε owner του thread %lu" greek "Δεν είσθε owner του thread %lld"
hindi "आप थ्रेड %lu के OWNER नहीं हैं" hindi "आप थ्रेड %lld के OWNER नहीं हैं"
hun "A %lu thread-nek mas a tulajdonosa" hun "A %lld thread-nek mas a tulajdonosa"
ita "Utente non proprietario del thread %lu" ita "Utente non proprietario del thread %lld"
jpn "スレッド %lu のオーナーではありません。" jpn "スレッド %lld のオーナーではありません。"
kor "쓰레드(Thread) %lu의 소유자가 아닙니다." kor "쓰레드(Thread) %lld의 소유자가 아닙니다."
nor "Du er ikke eier av tråden %lu" nor "Du er ikke eier av tråden %lld"
norwegian-ny "Du er ikkje eigar av tråd %lu" norwegian-ny "Du er ikkje eigar av tråd %lld"
pol "Nie jeste? wła?cicielem w?tku %lu" pol "Nie jeste? wła?cicielem w?tku %lld"
por "Você não é proprietário da 'thread' %lu" por "Você não é proprietário da 'thread' %lld"
rum "Nu sinteti proprietarul threadului %lu" rum "Nu sinteti proprietarul threadului %lld"
rus "Вы не являетесь владельцем потока %lu" rus "Вы не являетесь владельцем потока %lld"
serbian "Vi niste vlasnik thread-a %lu" serbian "Vi niste vlasnik thread-a %lld"
slo "Nie ste vlastníkom vlákna %lu" slo "Nie ste vlastníkom vlákna %lld"
spa "No eres el propietario del hilo (thread) %lu" spa "No eres el propietario del hilo (thread) %lld"
swe "Du är inte ägare till tråd %lu" swe "Du är inte ägare till tråd %lld"
ukr "Ви не володар гілки %lu" ukr "Ви не володар гілки %lld"
ER_NO_TABLES_USED ER_NO_TABLES_USED
cze "Nejsou použity žádné tabulky" cze "Nejsou použity žádné tabulky"
dan "Ingen tabeller i brug" dan "Ingen tabeller i brug"

View File

@@ -6186,13 +6186,13 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
bool is_rows_event= false; bool is_rows_event= false;
/* /*
The flag has replicate_same_server_id semantics and is raised to accept The flag has replicate_same_server_id semantics and is raised to accept
a same-server-id event on the semisync slave, for both the gtid and legacy a same-server-id event group by the gtid strict mode semisync slave.
connection modes. Own server-id events can appear as result of this server crash-recovery:
Such events can appear as result of this server recovery so the event the transaction was created on this server then being master, got replicated
was created there and replicated elsewhere right before the crash. At recovery elsewhere right before the crash before commit;
it could be evicted from the server's binlog. finally at recovery the transaction gets evicted from the server's binlog.
*/ */
bool do_accept_own_server_id= false; bool do_accept_own_server_id;
/* /*
FD_q must have been prepared for the first R_a event FD_q must have been prepared for the first R_a event
inside get_master_version_and_clock() inside get_master_version_and_clock()
@@ -6281,6 +6281,8 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
dbug_rows_event_count = 0; dbug_rows_event_count = 0;
};); };);
#endif #endif
s_id= uint4korr(buf + SERVER_ID_OFFSET);
mysql_mutex_lock(&mi->data_lock); mysql_mutex_lock(&mi->data_lock);
switch (buf[EVENT_TYPE_OFFSET]) { switch (buf[EVENT_TYPE_OFFSET]) {
@@ -6722,6 +6724,7 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
++mi->events_queued_since_last_gtid; ++mi->events_queued_since_last_gtid;
inc_pos= event_len; inc_pos= event_len;
} }
break; break;
/* /*
@@ -6864,6 +6867,10 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
break; break;
} }
do_accept_own_server_id= (s_id == global_system_variables.server_id
&& rpl_semi_sync_slave_enabled && opt_gtid_strict_mode
&& mi->using_gtid != Master_info::USE_GTID_NO);
/* /*
Integrity of Rows- event group check. Integrity of Rows- event group check.
A sequence of Rows- events must end with STMT_END_F flagged one. A sequence of Rows- events must end with STMT_END_F flagged one.
@@ -6909,7 +6916,6 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
*/ */
mysql_mutex_lock(log_lock); mysql_mutex_lock(log_lock);
s_id= uint4korr(buf + SERVER_ID_OFFSET);
/* /*
Write the event to the relay log, unless we reconnected in the middle Write the event to the relay log, unless we reconnected in the middle
of an event group and now need to skip the initial part of the group that of an event group and now need to skip the initial part of the group that
@@ -6955,7 +6961,7 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
else else
if ((s_id == global_system_variables.server_id && if ((s_id == global_system_variables.server_id &&
!(mi->rli.replicate_same_server_id || !(mi->rli.replicate_same_server_id ||
(do_accept_own_server_id= rpl_semi_sync_slave_enabled))) || do_accept_own_server_id)) ||
event_that_should_be_ignored(buf) || event_that_should_be_ignored(buf) ||
/* /*
the following conjunction deals with IGNORE_SERVER_IDS, if set the following conjunction deals with IGNORE_SERVER_IDS, if set

View File

@@ -2353,6 +2353,58 @@ bool THD::convert_string(LEX_STRING *to, CHARSET_INFO *to_cs,
} }
/*
Reinterpret a binary string to a character string
@param[OUT] to The result will be written here,
either the original string as is,
or a newly alloced fixed string with
some zero bytes prepended.
@param cs The destination character set
@param str The binary string
@param length The length of the binary string
@return false on success
@return true on error
*/
bool THD::reinterpret_string_from_binary(LEX_CSTRING *to, CHARSET_INFO *cs,
const char *str, size_t length)
{
/*
When reinterpreting from binary to tricky character sets like
UCS2, UTF16, UTF32, we may need to prepend some zero bytes.
This is possible in scenarios like this:
SET COLLATION_CONNECTION=utf32_general_ci, CHARACTER_SET_CLIENT=binary;
This code is similar to String::copy_aligned().
*/
size_t incomplete= length % cs->mbminlen; // Bytes in an incomplete character
if (incomplete)
{
size_t zeros= cs->mbminlen - incomplete;
size_t aligned_length= zeros + length;
char *dst= (char*) alloc(aligned_length + 1);
if (!dst)
{
to->str= NULL; // Safety
to->length= 0;
return true;
}
bzero(dst, zeros);
memcpy(dst + zeros, str, length);
dst[aligned_length]= '\0';
to->str= dst;
to->length= aligned_length;
}
else
{
to->str= str;
to->length= length;
}
return check_string_for_wellformedness(to->str, to->length, cs);
}
/* /*
Convert a string between two character sets. Convert a string between two character sets.
dstcs and srccs cannot be &my_charset_bin. dstcs and srccs cannot be &my_charset_bin.

View File

@@ -4155,6 +4155,8 @@ public:
bool convert_string(LEX_STRING *to, CHARSET_INFO *to_cs, bool convert_string(LEX_STRING *to, CHARSET_INFO *to_cs,
const char *from, size_t from_length, const char *from, size_t from_length,
CHARSET_INFO *from_cs); CHARSET_INFO *from_cs);
bool reinterpret_string_from_binary(LEX_CSTRING *to, CHARSET_INFO *to_cs,
const char *from, size_t from_length);
bool convert_string(LEX_CSTRING *to, CHARSET_INFO *to_cs, bool convert_string(LEX_CSTRING *to, CHARSET_INFO *to_cs,
const char *from, size_t from_length, const char *from, size_t from_length,
CHARSET_INFO *from_cs) CHARSET_INFO *from_cs)
@@ -4171,6 +4173,8 @@ public:
{ {
if (!simple_copy_is_possible) if (!simple_copy_is_possible)
return unlikely(convert_string(to, tocs, from->str, from->length, fromcs)); return unlikely(convert_string(to, tocs, from->str, from->length, fromcs));
if (fromcs == &my_charset_bin)
return reinterpret_string_from_binary(to, tocs, from->str, from->length);
*to= *from; *to= *from;
return false; return false;
} }

View File

@@ -9391,15 +9391,17 @@ sql_kill_user(THD *thd, LEX_USER *user, killed_state state)
{ {
uint error; uint error;
ha_rows rows; ha_rows rows;
if (likely(!(error= kill_threads_for_user(thd, user, state, &rows)))) switch (error= kill_threads_for_user(thd, user, state, &rows))
my_ok(thd, rows);
else
{ {
/* case 0:
This is probably ER_OUT_OF_RESOURCES, but in the future we may my_ok(thd, rows);
want to write the name of the user we tried to kill break;
*/ case ER_KILL_DENIED_ERROR:
my_error(error, MYF(0), user->host.str, user->user.str); my_error(error, MYF(0), (long long) thd->thread_id);
break;
case ER_OUT_OF_RESOURCES:
default:
my_error(error, MYF(0));
} }
} }

View File

@@ -1999,17 +1999,13 @@ bool quick_rm_table(THD *thd, handlerton *base, const LEX_CSTRING *db,
const char *table_path) const char *table_path)
{ {
char path[FN_REFLEN + 1]; char path[FN_REFLEN + 1];
const size_t pathmax = sizeof(path) - 1 - reg_ext_length;
int error= 0; int error= 0;
DBUG_ENTER("quick_rm_table"); DBUG_ENTER("quick_rm_table");
size_t path_length= table_path ? size_t path_length= table_path ?
(strxnmov(path, sizeof(path) - 1, table_path, reg_ext, NullS) - path) : (strxnmov(path, pathmax, table_path, NullS) - path) :
build_table_filename(path, sizeof(path)-1, db->str, table_name->str, build_table_filename(path, pathmax, db->str, table_name->str, "", flags);
reg_ext, flags);
if (!(flags & NO_FRM_RENAME))
if (mysql_file_delete(key_file_frm, path, MYF(0)))
error= 1; /* purecov: inspected */
path[path_length - reg_ext_length]= '\0'; // Remove reg_ext
if ((flags & (NO_HA_TABLE | NO_PAR_TABLE)) == NO_HA_TABLE) if ((flags & (NO_HA_TABLE | NO_PAR_TABLE)) == NO_HA_TABLE)
{ {
handler *file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, base); handler *file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, base);
@@ -2019,8 +2015,14 @@ bool quick_rm_table(THD *thd, handlerton *base, const LEX_CSTRING *db,
delete file; delete file;
} }
if (!(flags & (FRM_ONLY|NO_HA_TABLE))) if (!(flags & (FRM_ONLY|NO_HA_TABLE)))
if (ha_delete_table(thd, base, path, db, table_name, 0) > 0) error|= ha_delete_table(thd, base, path, db, table_name, 0) > 0;
error= 1;
if (!(flags & NO_FRM_RENAME))
{
memcpy(path + path_length, reg_ext, reg_ext_length + 1);
if (mysql_file_delete(key_file_frm, path, MYF(0)))
error= 1; /* purecov: inspected */
}
if (likely(error == 0)) if (likely(error == 0))
{ {

View File

@@ -6691,7 +6691,7 @@ static Sys_var_ulong Sys_log_tc_size(
DEFAULT(my_getpagesize() * 6), BLOCK_SIZE(my_getpagesize())); DEFAULT(my_getpagesize() * 6), BLOCK_SIZE(my_getpagesize()));
#endif #endif
static Sys_var_ulonglong Sys_max_thread_mem( static Sys_var_ulonglong Sys_max_session_mem_used(
"max_session_mem_used", "Amount of memory a single user session " "max_session_mem_used", "Amount of memory a single user session "
"is allowed to allocate. This limits the value of the " "is allowed to allocate. This limits the value of the "
"session variable MEM_USED", SESSION_VAR(max_mem_used), "session variable MEM_USED", SESSION_VAR(max_mem_used),

View File

@@ -1553,21 +1553,6 @@ bool TABLE_SHARE::init_period_from_extra2(period_info_t *period,
} }
static size_t extra2_read_len(const uchar **extra2, const uchar *extra2_end)
{
size_t length= *(*extra2)++;
if (length)
return length;
if ((*extra2) + 2 >= extra2_end)
return 0;
length= uint2korr(*extra2);
(*extra2)+= 2;
if (length < 256 || *extra2 + length > extra2_end)
return 0;
return length;
}
static static
bool read_extra2_section_once(const uchar *extra2, size_t len, LEX_CUSTRING *section) bool read_extra2_section_once(const uchar *extra2, size_t len, LEX_CUSTRING *section)
{ {
@@ -1867,7 +1852,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (frm_image[61] && !share->default_part_plugin) if (frm_image[61] && !share->default_part_plugin)
{ {
enum legacy_db_type db_type= (enum legacy_db_type) (uint) frm_image[61]; enum legacy_db_type db_type= (enum legacy_db_type) (uint) frm_image[61];
share->default_part_plugin= ha_lock_engine(NULL, ha_checktype(thd, db_type)); share->default_part_plugin= ha_lock_engine(NULL, ha_checktype(thd, db_type, 1));
if (!share->default_part_plugin) if (!share->default_part_plugin)
goto err; goto err;
} }

View File

@@ -697,9 +697,7 @@ bool THD::rm_temporary_table(handlerton *base, const char *path)
char frm_path[FN_REFLEN + 1]; char frm_path[FN_REFLEN + 1];
strxnmov(frm_path, sizeof(frm_path) - 1, path, reg_ext, NullS); strxnmov(frm_path, sizeof(frm_path) - 1, path, reg_ext, NullS);
if (mysql_file_delete(key_file_frm, frm_path,
MYF(MY_WME | MY_IGNORE_ENOENT)))
error= true;
if (base->drop_table(base, path) > 0) if (base->drop_table(base, path) > 0)
{ {
error= true; error= true;
@@ -707,6 +705,10 @@ bool THD::rm_temporary_table(handlerton *base, const char *path)
path, my_errno); path, my_errno);
} }
if (mysql_file_delete(key_file_frm, frm_path,
MYF(MY_WME | MY_IGNORE_ENOENT)))
error= true;
DBUG_RETURN(error); DBUG_RETURN(error);
} }

View File

@@ -192,6 +192,22 @@ enum extra2_index_flags {
EXTRA2_IGNORED_KEY EXTRA2_IGNORED_KEY
}; };
static inline size_t extra2_read_len(const uchar **extra2, const uchar *end)
{
size_t length= *(*extra2)++;
if (length)
return length;
if ((*extra2) + 2 >= end)
return 0;
length= uint2korr(*extra2);
(*extra2)+= 2;
if (length < 256 || *extra2 + length > end)
return 0;
return length;
}
LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING &table, LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING &table,
HA_CREATE_INFO *create_info, HA_CREATE_INFO *create_info,
List<Create_field> &create_fields, List<Create_field> &create_fields,

View File

@@ -2072,74 +2072,26 @@ const char *dict_load_table_low(const span<const char> &name,
return(NULL); return(NULL);
} }
/********************************************************************//**
Using the table->heap, copy the null-terminated filepath into
table->data_dir_path and replace the 'databasename/tablename.ibd'
portion with 'tablename'.
This allows SHOW CREATE TABLE to return the correct DATA DIRECTORY path.
Make this data directory path only if it has not yet been saved. */
static
void
dict_save_data_dir_path(
/*====================*/
dict_table_t* table, /*!< in/out: table */
const char* filepath) /*!< in: filepath of tablespace */
{
ut_ad(dict_sys.frozen());
ut_a(DICT_TF_HAS_DATA_DIR(table->flags));
ut_a(!table->data_dir_path);
ut_a(filepath);
/* Be sure this filepath is not the default filepath. */
if (char* default_filepath = fil_make_filepath(nullptr, table->name,
IBD, false)) {
if (0 != strcmp(filepath, default_filepath)) {
ulint pathlen = strlen(filepath);
ut_a(pathlen < OS_FILE_MAX_PATH);
ut_a(0 == strcmp(filepath + pathlen - 4, DOT_IBD));
table->data_dir_path = mem_heap_strdup(
table->heap, filepath);
os_file_make_data_dir_path(table->data_dir_path);
}
ut_free(default_filepath);
}
}
/** Make sure the data_file_name is saved in dict_table_t if needed. /** Make sure the data_file_name is saved in dict_table_t if needed.
@param[in,out] table Table object @param[in,out] table Table object */
@param[in] dict_locked dict_sys.frozen() */ void dict_get_and_save_data_dir_path(dict_table_t *table)
void dict_get_and_save_data_dir_path(dict_table_t* table, bool dict_locked)
{ {
ut_ad(!table->is_temporary()); ut_ad(!table->is_temporary());
ut_ad(!table->space || table->space->id == table->space_id); ut_ad(!table->space || table->space->id == table->space_id);
if (!table->data_dir_path && table->space_id && table->space) { if (!table->data_dir_path && table->space_id && table->space)
if (!dict_locked) { {
dict_sys.freeze(SRW_LOCK_CALL); const char *filepath= table->space->chain.start->name;
} if (strncmp(fil_path_to_mysql_datadir, filepath,
strlen(fil_path_to_mysql_datadir)))
table->flags |= 1 << DICT_TF_POS_DATA_DIR {
& ((1U << DICT_TF_BITS) - 1); table->lock_mutex_lock();
dict_save_data_dir_path(table, table->flags|= 1 << DICT_TF_POS_DATA_DIR & ((1U << DICT_TF_BITS) - 1);
table->space->chain.start->name); table->data_dir_path= mem_heap_strdup(table->heap, filepath);
os_file_make_data_dir_path(table->data_dir_path);
if (table->data_dir_path == NULL) { table->lock_mutex_unlock();
/* Since we did not set the table data_dir_path, }
unset the flag. This does not change }
SYS_TABLES or FSP_SPACE_FLAGS on the header page
of the tablespace, but it makes dict_table_t
consistent. */
table->flags &= ~DICT_TF_MASK_DATA_DIR
& ((1U << DICT_TF_BITS) - 1);
}
if (!dict_locked) {
dict_sys.unfreeze();
}
}
} }
/** Opens a tablespace for dict_load_table_one() /** Opens a tablespace for dict_load_table_one()
@@ -2193,7 +2145,7 @@ dict_load_tablespace(
char* filepath = NULL; char* filepath = NULL;
if (DICT_TF_HAS_DATA_DIR(table->flags)) { if (DICT_TF_HAS_DATA_DIR(table->flags)) {
/* This will set table->data_dir_path from fil_system */ /* This will set table->data_dir_path from fil_system */
dict_get_and_save_data_dir_path(table, true); dict_get_and_save_data_dir_path(table);
if (table->data_dir_path) { if (table->data_dir_path) {
filepath = fil_make_filepath( filepath = fil_make_filepath(

View File

@@ -1567,10 +1567,13 @@ static void fts_table_no_ref_count(const char *table_name)
/** Stop the purge thread and check n_ref_count of all auxiliary /** Stop the purge thread and check n_ref_count of all auxiliary
and common table associated with the fts table. and common table associated with the fts table.
@param table parent FTS table */ @param table parent FTS table
void purge_sys_t::stop_FTS(const dict_table_t &table) @param already_stopped True indicates purge threads were
already stopped*/
void purge_sys_t::stop_FTS(const dict_table_t &table, bool already_stopped)
{ {
purge_sys.stop_FTS(); if (!already_stopped)
purge_sys.stop_FTS();
fts_table_t fts_table; fts_table_t fts_table;
char table_name[MAX_FULL_NAME_LEN]; char table_name[MAX_FULL_NAME_LEN];

View File

@@ -11411,7 +11411,7 @@ ha_innobase::update_create_info(
return; return;
} }
dict_get_and_save_data_dir_path(m_prebuilt->table, false); dict_get_and_save_data_dir_path(m_prebuilt->table);
if (m_prebuilt->table->data_dir_path) { if (m_prebuilt->table->data_dir_path) {
create_info->data_file_name = m_prebuilt->table->data_dir_path; create_info->data_file_name = m_prebuilt->table->data_dir_path;
@@ -13531,29 +13531,26 @@ int ha_innobase::delete_table(const char *name)
dict_sys.unfreeze(); dict_sys.unfreeze();
} }
auto &timeout= THDVAR(thd, lock_wait_timeout); const bool skip_wait{table->name.is_temporary()};
const auto save_timeout= timeout;
if (table->name.is_temporary())
timeout= 0;
if (table_stats && index_stats && if (table_stats && index_stats &&
!strcmp(table_stats->name.m_name, TABLE_STATS_NAME) && !strcmp(table_stats->name.m_name, TABLE_STATS_NAME) &&
!strcmp(index_stats->name.m_name, INDEX_STATS_NAME) && !strcmp(index_stats->name.m_name, INDEX_STATS_NAME) &&
!(err= lock_table_for_trx(table_stats, trx, LOCK_X))) !(err= lock_table_for_trx(table_stats, trx, LOCK_X, skip_wait)))
err= lock_table_for_trx(index_stats, trx, LOCK_X); err= lock_table_for_trx(index_stats, trx, LOCK_X, skip_wait);
if (err != DB_SUCCESS && !timeout) if (err != DB_SUCCESS && skip_wait)
{ {
/* We may skip deleting statistics if we cannot lock the tables, /* We may skip deleting statistics if we cannot lock the tables,
when the table carries a temporary name. */ when the table carries a temporary name. */
ut_ad(err == DB_LOCK_WAIT);
ut_ad(trx->error_state == DB_SUCCESS);
err= DB_SUCCESS; err= DB_SUCCESS;
dict_table_close(table_stats, false, thd, mdl_table); dict_table_close(table_stats, false, thd, mdl_table);
dict_table_close(index_stats, false, thd, mdl_index); dict_table_close(index_stats, false, thd, mdl_index);
table_stats= nullptr; table_stats= nullptr;
index_stats= nullptr; index_stats= nullptr;
} }
timeout= save_timeout;
} }
if (err == DB_SUCCESS) if (err == DB_SUCCESS)
@@ -13826,7 +13823,7 @@ int ha_innobase::truncate()
mem_heap_t* heap = mem_heap_create(1000); mem_heap_t* heap = mem_heap_create(1000);
dict_get_and_save_data_dir_path(ib_table, false); dict_get_and_save_data_dir_path(ib_table);
info.data_file_name = ib_table->data_dir_path; info.data_file_name = ib_table->data_dir_path;
const char* temp_name = dict_mem_create_temporary_tablename( const char* temp_name = dict_mem_create_temporary_tablename(
heap, ib_table->name.m_name, ib_table->id); heap, ib_table->name.m_name, ib_table->id);
@@ -14059,17 +14056,15 @@ ha_innobase::rename_table(
if (error == DB_SUCCESS && table_stats && index_stats if (error == DB_SUCCESS && table_stats && index_stats
&& !strcmp(table_stats->name.m_name, TABLE_STATS_NAME) && !strcmp(table_stats->name.m_name, TABLE_STATS_NAME)
&& !strcmp(index_stats->name.m_name, INDEX_STATS_NAME)) { && !strcmp(index_stats->name.m_name, INDEX_STATS_NAME)) {
auto &timeout = THDVAR(thd, lock_wait_timeout); error = lock_table_for_trx(table_stats, trx, LOCK_X,
const auto save_timeout = timeout; from_temp);
if (from_temp) {
timeout = 0;
}
error = lock_table_for_trx(table_stats, trx, LOCK_X);
if (error == DB_SUCCESS) { if (error == DB_SUCCESS) {
error = lock_table_for_trx(index_stats, trx, error = lock_table_for_trx(index_stats, trx,
LOCK_X); LOCK_X, from_temp);
} }
if (error != DB_SUCCESS && from_temp) { if (error != DB_SUCCESS && from_temp) {
ut_ad(error == DB_LOCK_WAIT);
ut_ad(trx->error_state == DB_SUCCESS);
error = DB_SUCCESS; error = DB_SUCCESS;
/* We may skip renaming statistics if /* We may skip renaming statistics if
we cannot lock the tables, when the we cannot lock the tables, when the
@@ -14082,7 +14077,6 @@ ha_innobase::rename_table(
table_stats = nullptr; table_stats = nullptr;
index_stats = nullptr; index_stats = nullptr;
} }
timeout = save_timeout;
} }
} }

View File

@@ -10903,12 +10903,14 @@ ha_innobase::commit_inplace_alter_table(
} }
} }
bool already_stopped= false;
for (inplace_alter_handler_ctx** pctx = ctx_array; *pctx; pctx++) { for (inplace_alter_handler_ctx** pctx = ctx_array; *pctx; pctx++) {
auto ctx = static_cast<ha_innobase_inplace_ctx*>(*pctx); auto ctx = static_cast<ha_innobase_inplace_ctx*>(*pctx);
dberr_t error = DB_SUCCESS; dberr_t error = DB_SUCCESS;
if (fts_exist) { if (fts_exist) {
purge_sys.stop_FTS(*ctx->old_table); purge_sys.stop_FTS(*ctx->old_table, already_stopped);
already_stopped = true;
} }
if (new_clustered && ctx->old_table->fts) { if (new_clustered && ctx->old_table->fts) {

View File

@@ -1,7 +1,7 @@
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2021, MariaDB Corporation. Copyright (c) 2017, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
@@ -53,9 +53,8 @@ We also scan the biggest space id, and store it to fil_system. */
void dict_check_tablespaces_and_store_max_id(); void dict_check_tablespaces_and_store_max_id();
/** Make sure the data_file_name is saved in dict_table_t if needed. /** Make sure the data_file_name is saved in dict_table_t if needed.
@param[in,out] table Table object @param[in,out] table Table object */
@param[in] dict_locked dict_sys.frozen() */ void dict_get_and_save_data_dir_path(dict_table_t* table);
void dict_get_and_save_data_dir_path(dict_table_t* table, bool dict_locked);
/***********************************************************************//** /***********************************************************************//**
Loads a table object based on the table id. Loads a table object based on the table id.

View File

@@ -394,15 +394,13 @@ lock_table(
void lock_table_resurrect(dict_table_t *table, trx_t *trx, lock_mode mode); void lock_table_resurrect(dict_table_t *table, trx_t *trx, lock_mode mode);
/** Sets a lock on a table based on the given mode. /** Sets a lock on a table based on the given mode.
@param[in] table table to lock @param table table to lock
@param[in,out] trx transaction @param trx transaction
@param[in] mode LOCK_X or LOCK_S @param mode LOCK_X or LOCK_S
@return error code or DB_SUCCESS. */ @param no_wait whether to skip handling DB_LOCK_WAIT
dberr_t @return error code */
lock_table_for_trx( dberr_t lock_table_for_trx(dict_table_t *table, trx_t *trx, lock_mode mode,
dict_table_t* table, bool no_wait= false)
trx_t* trx,
enum lock_mode mode)
MY_ATTRIBUTE((nonnull, warn_unused_result)); MY_ATTRIBUTE((nonnull, warn_unused_result));
/** Exclusively lock the data dictionary tables. /** Exclusively lock the data dictionary tables.
@@ -915,10 +913,8 @@ public:
@param page whether to discard also from lock_sys.prdt_hash */ @param page whether to discard also from lock_sys.prdt_hash */
void prdt_page_free_from_discard(const page_id_t id, bool all= false); void prdt_page_free_from_discard(const page_id_t id, bool all= false);
#ifdef WITH_WSREP
/** Cancel possible lock waiting for a transaction */ /** Cancel possible lock waiting for a transaction */
static void cancel_lock_wait_for_trx(trx_t *trx); static void cancel_lock_wait_for_trx(trx_t *trx);
#endif /* WITH_WSREP */
}; };
/** The lock system */ /** The lock system */

View File

@@ -286,8 +286,10 @@ public:
/** Stop the purge thread and check n_ref_count of all auxiliary /** Stop the purge thread and check n_ref_count of all auxiliary
and common table associated with the fts table. and common table associated with the fts table.
@param table parent FTS table */ @param table parent FTS table
void stop_FTS(const dict_table_t &table); @param already_stopped True indicates purge threads were
already stopped */
void stop_FTS(const dict_table_t &table, bool already_stopped=false);
}; };
/** The global data structure coordinating a purge */ /** The global data structure coordinating a purge */

View File

@@ -3627,52 +3627,50 @@ static void lock_table_dequeue(lock_t *in_lock, bool owns_wait_mutex)
} }
} }
/** Sets a lock on a table based on the given mode. /** Sets a lock on a table based on the given mode.
@param[in] table table to lock @param table table to lock
@param[in,out] trx transaction @param trx transaction
@param[in] mode LOCK_X or LOCK_S @param mode LOCK_X or LOCK_S
@return error code or DB_SUCCESS. */ @param no_wait whether to skip handling DB_LOCK_WAIT
dberr_t @return error code */
lock_table_for_trx( dberr_t lock_table_for_trx(dict_table_t *table, trx_t *trx, lock_mode mode,
dict_table_t* table, bool no_wait)
trx_t* trx,
enum lock_mode mode)
{ {
mem_heap_t* heap; mem_heap_t *heap= mem_heap_create(512);
que_thr_t* thr; sel_node_t *node= sel_node_create(heap);
dberr_t err; que_thr_t *thr= pars_complete_graph_for_exec(node, trx, heap, nullptr);
sel_node_t* node; thr->graph->state= QUE_FORK_ACTIVE;
heap = mem_heap_create(512);
node = sel_node_create(heap); thr= static_cast<que_thr_t*>
thr = pars_complete_graph_for_exec(node, trx, heap, NULL); (que_fork_get_first_thr(static_cast<que_fork_t*>
thr->graph->state = QUE_FORK_ACTIVE; (que_node_get_parent(thr))));
/* We use the select query graph as the dummy graph needed
in the lock module call */
thr = static_cast<que_thr_t*>(
que_fork_get_first_thr(
static_cast<que_fork_t*>(que_node_get_parent(thr))));
run_again: run_again:
thr->run_node = thr; thr->run_node= thr;
thr->prev_node = thr->common.parent; thr->prev_node= thr->common.parent;
dberr_t err= lock_table(table, mode, thr);
err = lock_table(table, mode, thr); switch (err) {
case DB_SUCCESS:
break;
case DB_LOCK_WAIT:
if (no_wait)
{
lock_sys.cancel_lock_wait_for_trx(trx);
break;
}
/* fall through */
default:
trx->error_state= err;
if (row_mysql_handle_errors(&err, trx, thr, nullptr))
goto run_again;
}
trx->error_state = err; que_graph_free(thr->graph);
trx->op_info= "";
if (UNIV_UNLIKELY(err != DB_SUCCESS)) { return err;
if (row_mysql_handle_errors(&err, trx, thr, NULL)) {
goto run_again;
}
}
que_graph_free(thr->graph);
trx->op_info = "";
return(err);
} }
/** Exclusively lock the data dictionary tables. /** Exclusively lock the data dictionary tables.
@@ -5639,8 +5637,7 @@ static void lock_cancel_waiting_and_release(lock_t *lock)
lock_wait_end(trx); lock_wait_end(trx);
trx->mutex_unlock(); trx->mutex_unlock();
} }
#ifdef WITH_WSREP
TRANSACTIONAL_TARGET
void lock_sys_t::cancel_lock_wait_for_trx(trx_t *trx) void lock_sys_t::cancel_lock_wait_for_trx(trx_t *trx)
{ {
lock_sys.wr_lock(SRW_LOCK_CALL); lock_sys.wr_lock(SRW_LOCK_CALL);
@@ -5654,7 +5651,6 @@ void lock_sys_t::cancel_lock_wait_for_trx(trx_t *trx)
lock_sys.wr_unlock(); lock_sys.wr_unlock();
mysql_mutex_unlock(&lock_sys.wait_mutex); mysql_mutex_unlock(&lock_sys.wait_mutex);
} }
#endif /* WITH_WSREP */
/** Cancel a waiting lock request. /** Cancel a waiting lock request.
@tparam check_victim whether to check for DB_DEADLOCK @tparam check_victim whether to check for DB_DEADLOCK

View File

@@ -3114,7 +3114,7 @@ and apply it to dict_table_t
static dberr_t handle_instant_metadata(dict_table_t *table, static dberr_t handle_instant_metadata(dict_table_t *table,
const row_import &cfg) const row_import &cfg)
{ {
dict_get_and_save_data_dir_path(table, false); dict_get_and_save_data_dir_path(table);
char *filepath; char *filepath;
if (DICT_TF_HAS_DATA_DIR(table->flags)) if (DICT_TF_HAS_DATA_DIR(table->flags))
@@ -4149,7 +4149,7 @@ fil_tablespace_iterate(
return(DB_CORRUPTION);); return(DB_CORRUPTION););
/* Make sure the data_dir_path is set. */ /* Make sure the data_dir_path is set. */
dict_get_and_save_data_dir_path(table, false); dict_get_and_save_data_dir_path(table);
ut_ad(!DICT_TF_HAS_DATA_DIR(table->flags) || table->data_dir_path); ut_ad(!DICT_TF_HAS_DATA_DIR(table->flags) || table->data_dir_path);
@@ -4470,7 +4470,7 @@ row_import_for_mysql(
/* If the table is stored in a remote tablespace, we need to /* If the table is stored in a remote tablespace, we need to
determine that filepath from the link file and system tables. determine that filepath from the link file and system tables.
Find the space ID in SYS_TABLES since this is an ALTER TABLE. */ Find the space ID in SYS_TABLES since this is an ALTER TABLE. */
dict_get_and_save_data_dir_path(table, true); dict_get_and_save_data_dir_path(table);
ut_ad(!DICT_TF_HAS_DATA_DIR(table->flags) || table->data_dir_path); ut_ad(!DICT_TF_HAS_DATA_DIR(table->flags) || table->data_dir_path);
const char *data_dir_path = DICT_TF_HAS_DATA_DIR(table->flags) const char *data_dir_path = DICT_TF_HAS_DATA_DIR(table->flags)

View File

@@ -2075,7 +2075,7 @@ srv_get_meta_data_filename(
char* path; char* path;
/* Make sure the data_dir_path is set. */ /* Make sure the data_dir_path is set. */
dict_get_and_save_data_dir_path(table, false); dict_get_and_save_data_dir_path(table);
const char* data_dir_path = DICT_TF_HAS_DATA_DIR(table->flags) const char* data_dir_path = DICT_TF_HAS_DATA_DIR(table->flags)
? table->data_dir_path : nullptr; ? table->data_dir_path : nullptr;

View File

@@ -1,5 +1,5 @@
/* Copyright (C) 2006 MySQL AB & MySQL Finland AB & TCX DataKonsult AB /* Copyright (C) 2006 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
Copyright (c) 2009, 2021, MariaDB Corporation Ab Copyright (c) 2009, 2022, MariaDB Corporation Ab
This program is free software; you can redistribute it and/or modify This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by it under the terms of the GNU General Public License as published by
@@ -271,7 +271,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags,
char name_buff[FN_REFLEN], org_name[FN_REFLEN], index_name[FN_REFLEN], char name_buff[FN_REFLEN], org_name[FN_REFLEN], index_name[FN_REFLEN],
data_name[FN_REFLEN]; data_name[FN_REFLEN];
uchar *UNINIT_VAR(disk_cache), *disk_pos, *end_pos; uchar *UNINIT_VAR(disk_cache), *disk_pos, *end_pos;
MARIA_HA info, *UNINIT_VAR(m_info), *old_info; MARIA_HA info, *UNINIT_VAR(m_info), *old_info= NULL;
MARIA_SHARE share_buff,*share; MARIA_SHARE share_buff,*share;
double *rec_per_key_part; double *rec_per_key_part;
ulong *nulls_per_key_part; ulong *nulls_per_key_part;
@@ -322,7 +322,6 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags,
} }
#endif /* WITH_S3_STORAGE_ENGINE */ #endif /* WITH_S3_STORAGE_ENGINE */
old_info= 0;
if (!internal_table) if (!internal_table)
mysql_mutex_lock(&THR_LOCK_maria); mysql_mutex_lock(&THR_LOCK_maria);
if ((open_flags & HA_OPEN_COPY) || if ((open_flags & HA_OPEN_COPY) ||