1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-08 11:22:35 +03:00

Merge 10.6 into 10.7

This commit is contained in:
Marko Mäkelä
2021-08-19 13:03:48 +03:00
98 changed files with 1887 additions and 578 deletions

View File

@@ -30,13 +30,13 @@ INCLUDE(CMakeParseArguments)
# [CONFIG cnf_file_name]
# [VERSION version_string]
# [LINK_LIBRARIES lib1...libN]
# [DEPENDENCIES target1...targetN]
# [DEPENDS target1...targetN]
MACRO(MYSQL_ADD_PLUGIN)
CMAKE_PARSE_ARGUMENTS(ARG
"STORAGE_ENGINE;STATIC_ONLY;MODULE_ONLY;MANDATORY;DEFAULT;DISABLED;NOT_EMBEDDED;RECOMPILE_FOR_EMBEDDED;CLIENT"
"MODULE_OUTPUT_NAME;STATIC_OUTPUT_NAME;COMPONENT;CONFIG;VERSION"
"LINK_LIBRARIES;DEPENDENCIES"
"LINK_LIBRARIES;DEPENDS"
${ARGN}
)
IF(NOT WITHOUT_SERVER OR ARG_CLIENT)
@@ -115,8 +115,8 @@ MACRO(MYSQL_ADD_PLUGIN)
ENDIF()
UNSET(${with_var} CACHE)
IF(NOT ARG_DEPENDENCIES)
SET(ARG_DEPENDENCIES)
IF(NOT ARG_DEPENDS)
SET(ARG_DEPENDS)
ENDIF()
IF(ARG_VERSION)
@@ -146,7 +146,7 @@ MACRO(MYSQL_ADD_PLUGIN)
ADD_LIBRARY(${target} STATIC ${SOURCES})
DTRACE_INSTRUMENT(${target})
ADD_DEPENDENCIES(${target} GenError ${ARG_DEPENDENCIES})
ADD_DEPENDENCIES(${target} GenError ${ARG_DEPENDS})
RESTRICT_SYMBOL_EXPORTS(${target})
IF(WITH_EMBEDDED_SERVER AND (NOT ARG_NOT_EMBEDDED))
# Embedded library should contain PIC code and be linkable
@@ -160,7 +160,7 @@ MACRO(MYSQL_ADD_PLUGIN)
SET_TARGET_PROPERTIES(${target}_embedded
PROPERTIES COMPILE_DEFINITIONS "EMBEDDED_LIBRARY${version_string}")
ENDIF()
ADD_DEPENDENCIES(${target}_embedded GenError)
ADD_DEPENDENCIES(${target}_embedded GenError ${ARG_DEPENDS})
ENDIF()
ENDIF()
@@ -235,7 +235,7 @@ MACRO(MYSQL_ADD_PLUGIN)
TARGET_LINK_LIBRARIES (${target} "-Wl,--no-undefined")
ENDIF()
ADD_DEPENDENCIES(${target} GenError ${ARG_DEPENDENCIES})
ADD_DEPENDENCIES(${target} GenError ${ARG_DEPENDS})
SET_TARGET_PROPERTIES(${target} PROPERTIES
OUTPUT_NAME "${ARG_MODULE_OUTPUT_NAME}")

View File

@@ -17,29 +17,17 @@ set -e
# building the deb packages here.
export DEB_BUILD_OPTIONS="nocheck $DEB_BUILD_OPTIONS"
# Take the files and part of control from MCS directory
if [[ -d storage/columnstore/columnstore/debian ]]
then
cp -v storage/columnstore/columnstore/debian/mariadb-plugin-columnstore.* debian/
echo >> debian/control
cat storage/columnstore/columnstore/debian/control >> debian/control
# ColumnStore is explicitly disabled in the native build, so allow it now
# when build it when triggered by autobake-deb.sh
sed '/-DPLUGIN_COLUMNSTORE=NO/d' -i debian/rules
fi
# General CI optimizations to keep build output smaller
if [[ $TRAVIS ]] || [[ $GITLAB_CI ]]
then
# On both Travis and Gitlab the output log must stay under 4MB so make the
# build less verbose
sed '/Add support for verbose builds/,/^$/d' -i debian/rules
# MCOL-4149: ColumnStore builds are so slow and big that they must be skipped on
# both Travis-CI and Gitlab-CI
sed 's|$(CMAKEFLAGS)|$(CMAKEFLAGS) -DPLUGIN_COLUMNSTORE=NO|' -i debian/rules
sed "/Package: mariadb-plugin-columnstore/,/^$/d" -i debian/control
sed -e '/Add support for verbose builds/,/^$/d' \
-e '/ColumnStore is part of the build/,/^$/d' \
-e 's|$(CMAKEFLAGS)|$(CMAKEFLAGS) -DPLUGIN_COLUMNSTORE=NO|' \
-i debian/rules
fi
# Don't build or try to put files in a package for selected plugins and components on Travis-CI

28
debian/rules vendored
View File

@@ -38,11 +38,6 @@ else
NUMJOBS = 1
endif
# RocksDB cannot build on 32-bit platforms
ifeq (32,$(DEB_HOST_ARCH_BITS))
CMAKEFLAGS += -DPLUGIN_ROCKSDB=NO
endif
# Cross building requires stack direction instruction
ifneq ($(DEB_BUILD_ARCH),$(DEB_HOST_ARCH))
ifneq (,$(filter $(DEB_HOST_ARCH_CPU),alpha amd64 arm arm64 i386 ia64 m68k mips64el mipsel powerpc ppc64 ppc64el riscv64 s390x sh4 sparc64))
@@ -59,14 +54,6 @@ ifneq (,$(filter $(DEB_HOST_ARCH_CPU),amd64 arm64 ppc64el))
CMAKEFLAGS += -DWITH_PMEM=yes
endif
# Add extra flag to avoid WolfSSL code crashing the entire mariadbd on s390x. This
# can be removed once upstream has made the code s390x compatible, see
# https://jira.mariadb.org/browse/MDEV-21705 and
# https://github.com/wolfSSL/wolfssl/issues/2828
ifeq ($(DEB_HOST_ARCH),s390x)
CFLAGS += -DWC_NO_CACHE_RESISTANT
endif
# Add support for verbose builds
MAKEFLAGS += VERBOSE=1
@@ -85,15 +72,25 @@ ifneq ($(DEB_BUILD_ARCH),$(DEB_HOST_ARCH))
dpkg-architecture -a$(DEB_BUILD_ARCH) -f -c dh_auto_configure --builddirectory=builddir-native
dh_auto_build --builddirectory=builddir-native -- import_executables
endif
# ColumnStore is part of the build
ifneq (32,$(DEB_HOST_ARCH_BITS))
# Take the files and part of control from MCS directory
cp -v storage/columnstore/columnstore/debian/mariadb-plugin-columnstore.* debian/
# Don't include twice
grep -q '^Package: mariadb-plugin-columnstore$$' debian/control || \
( echo && cat storage/columnstore/columnstore/debian/control ) >> debian/control
endif
echo "server:Version=$(DEB_VERSION)" >> debian/substvars
# Don't build ColumnStore as part of the native build, only build it when
# triggered by autobake-deb.sh. Saves build time and disk space.
# RocksDB and Column Store cannot build on 32-bit platforms
PATH=$${MYSQL_BUILD_PATH:-"/usr/lib/ccache:/usr/local/bin:/usr/bin:/bin"} \
NO_UPDATE_BUILD_VERSION=1 \
dh_auto_configure --builddirectory=$(BUILDDIR) -- \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
$(CMAKEFLAGS) \
$(if $(findstring $(DEB_HOST_ARCH_BITS),32),-DPLUGIN_ROCKSDB=NO -DPLUGIN_COLUMNSTORE=NO) \
$(if $(filter $(DEB_BUILD_ARCH),$(DEB_HOST_ARCH)),,-DIMPORT_EXECUTABLES=$(CURDIR)/builddir-native/import_executables.cmake) \
-DCOMPILATION_COMMENT="mariadb.org binary distribution" \
-DMYSQL_SERVER_SUFFIX="-$(DEB_VERSION_REVISION)" \
@@ -102,7 +99,6 @@ endif
-DBUILD_CONFIG=mysql_release \
-DCONC_DEFAULT_CHARSET=utf8mb4 \
-DPLUGIN_AWS_KEY_MANAGEMENT=NO \
-DPLUGIN_COLUMNSTORE=NO \
-DIGNORE_AIO_CHECK=YES \
-DWITH_URING=yes \
-DDEB=$(DEB_VENDOR)

View File

@@ -3451,7 +3451,7 @@ next_file:
if (err == ERROR_NO_MORE_FILES) {
status = 1;
} else {
msg("readdir_next_file in %s returned %lu", dir, err);
msg("FindNextFile in %s returned %lu", dirname, err);
status = -1;
}
}

View File

@@ -1,6 +1,7 @@
call mtr.add_suppression("Can.t init tc log");
call mtr.add_suppression("Aborting");
RESET MASTER;
SET @@global.sync_binlog=1;
CREATE TABLE t (f INT) ENGINE=INNODB;
CREATE TABLE t2 (f INT) ENGINE=INNODB;
CREATE TABLE tm (f INT) ENGINE=Aria;
@@ -28,7 +29,7 @@ connection default;
disconnect master1;
disconnect master2;
disconnect master3;
# restart: --rpl-semi-sync-slave-enabled=1
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
FOUND 1 /Successfully truncated.*to remove transactions starting from GTID 0-1-6/ in mysqld.1.err
Pre-crash binlog file content:
include/show_binlog_events.inc
@@ -77,7 +78,7 @@ connection default;
disconnect master1;
disconnect master2;
disconnect master3;
# restart: --rpl-semi-sync-slave-enabled=1
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
FOUND 1 /Successfully truncated.*to remove transactions starting from GTID 0-1-10/ in mysqld.1.err
Pre-crash binlog file content:
include/show_binlog_events.inc
@@ -133,7 +134,7 @@ connection default;
disconnect master1;
disconnect master2;
disconnect master3;
# restart: --rpl-semi-sync-slave-enabled=1
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
FOUND 1 /Successfully truncated.*to remove transactions starting from GTID 0-1-14/ in mysqld.1.err
Pre-crash binlog file content:
include/show_binlog_events.inc
@@ -195,7 +196,7 @@ connection default;
disconnect master1;
disconnect master2;
disconnect master3;
# restart: --rpl-semi-sync-slave-enabled=1
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
FOUND 1 /Successfully truncated.*to remove transactions starting from GTID 0-1-20/ in mysqld.1.err
Pre-crash binlog file content:
include/show_binlog_events.inc
@@ -234,4 +235,5 @@ DELETE FROM t;
DROP PROCEDURE sp_xa;
# Cleanup
DROP TABLE t,t2,tm;
SET @@global.sync_binlog= default;
# End of the tests

View File

@@ -1,5 +1,6 @@
call mtr.add_suppression("Can.t init tc log");
call mtr.add_suppression("Aborting");
SET @@global.sync_binlog= 1;
CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
CREATE TABLE t2 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=rocksdb;
#
@@ -30,9 +31,9 @@ Log_name File_size
master-bin.000001 #
master-bin.000002 #
master-bin.000003 #
# restart the server with --rpl-semi-sync-slave-enabled=1
# restart the server with --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
# the server is restarted
# restart: --rpl-semi-sync-slave-enabled=1
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
connection default;
#
# *** Summary: 1 row should be present in both tables; binlog is truncated; number of binlogs at reconnect - 3:
@@ -97,7 +98,7 @@ INSERT INTO t2 VALUES (2, REPEAT("x", 4100));
INSERT INTO t1 VALUES (2, REPEAT("x", 4100));
COMMIT;
connection default;
# restart: --rpl-semi-sync-slave-enabled=1
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
connection default;
#
# *** Summary: 2 rows should be present in both tables; no binlog truncation; one extra binlog file compare with A; number of binlogs at reconnect - 4:
@@ -154,9 +155,9 @@ Log_name File_size
master-bin.000001 #
master-bin.000002 #
master-bin.000003 #
# restart the server with --rpl-semi-sync-slave-enabled=1
# restart the server with --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
# the server is restarted
# restart: --rpl-semi-sync-slave-enabled=1
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
connection default;
#
# *** Summary: 2 rows should be present in both tables; no binlog truncation; the same # of binlog files as in B; number of binlogs at reconnect - 4:
@@ -186,4 +187,5 @@ DELETE FROM t2;
disconnect con1;
#
DROP TABLE t1, t2;
SET @@global.sync_binlog= default;
# End of the tests

View File

@@ -1,6 +1,7 @@
call mtr.add_suppression("Can.t init tc log");
call mtr.add_suppression("Aborting");
SET @@global.max_binlog_size= 4096;
SET @@global.sync_binlog= 1;
RESET MASTER;
FLUSH LOGS;
CREATE TABLE ti (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
@@ -13,11 +14,11 @@ master-bin.000001 #
master-bin.000002 #
INSERT INTO ti VALUES(1,"I am gonna survive");
INSERT INTO tm VALUES(1,"me too!");
SET DEBUG_SYNC= "commit_after_release_LOCK_after_binlog_sync SIGNAL master1_ready WAIT_FOR con1_go";
SET DEBUG_SYNC= "commit_after_release_LOCK_after_binlog_sync SIGNAL master1_ready WAIT_FOR master1_go_never_arrives";
INSERT INTO ti VALUES (2, REPEAT("x", 4100));
connect master2,localhost,root,,;
SET DEBUG_SYNC= "now WAIT_FOR master1_ready";
SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL master2_ready WAIT_FOR master2_go";
SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL master2_ready WAIT_FOR master2_go_never_arrives";
INSERT INTO ti VALUES (3, "not gonna survive");
connection default;
SET DEBUG_SYNC= "now WAIT_FOR master2_ready";
@@ -35,7 +36,7 @@ connection default;
# Kill the server
disconnect master1;
disconnect master2;
# restart: --rpl-semi-sync-slave-enabled=1
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
FOUND 1 /truncated binlog file:.*master.*000002/ in mysqld.1.err
"One record should be present in table"
SELECT * FROM ti;
@@ -50,4 +51,5 @@ SELECT @@global.gtid_binlog_pos;
0-1-4
# Cleanup
DROP TABLE ti;
SET @@global.sync_binlog= default;
# End of the tests

View File

@@ -12,6 +12,7 @@ call mtr.add_suppression("mysqld: Table.*tm.*is marked as crashed");
call mtr.add_suppression("Checking table.*tm");
RESET MASTER;
FLUSH LOGS;
SET @@global.sync_binlog=1;
CREATE TABLE ti (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
CREATE TABLE tm (f INT) ENGINE=MYISAM;
INSERT INTO tm VALUES(1);
@@ -19,10 +20,11 @@ connect master1,localhost,root,,;
connect master2,localhost,root,,;
connect master3,localhost,root,,;
connection master1;
SET DEBUG_SYNC= "commit_after_release_LOCK_after_binlog_sync SIGNAL master1_ready WAIT_FOR master1_go";
SET DEBUG_SYNC= "commit_after_release_LOCK_after_binlog_sync SIGNAL master1_ready WAIT_FOR master1_go_never_arrives";
INSERT INTO ti VALUES (5 - 1, REPEAT("x", 4100));
connection master2;
SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL master2_ready WAIT_FOR master2_go";
SET DEBUG_SYNC= "now WAIT_FOR master1_ready";
SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL master2_ready WAIT_FOR master2_go_never_arrives";
INSERT INTO ti VALUES (5, REPEAT("x", 1));
connection master3;
SET DEBUG_SYNC= "now WAIT_FOR master2_ready";

View File

@@ -2,9 +2,10 @@ connect con1,localhost,root,,;
FLUSH LOGS;
FLUSH LOGS;
FLUSH LOGS;
SET DEBUG_SYNC= "at_after_lock_index WAIT_FOR con1_go";
SET DEBUG_SYNC= "at_after_lock_index SIGNAL con1_ready WAIT_FOR con1_go";
SHOW BINARY LOGS;
connect con2,localhost,root,,;
SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
RESET MASTER;
FLUSH LOGS;
SET DEBUG_SYNC= "now SIGNAL con1_go";

View File

@@ -31,7 +31,7 @@ SELECT @@global.gtid_binlog_pos as 'Before the crash';
#
# Server restart
#
--let $restart_parameters= --rpl-semi-sync-slave-enabled=1
--let $restart_parameters= --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
--source include/start_mysqld.inc
# Check error log for a successful truncate message.

View File

@@ -22,6 +22,7 @@ call mtr.add_suppression("Aborting");
# C. Similarly to A, with the XA blank transaction
RESET MASTER;
SET @@global.sync_binlog=1;
CREATE TABLE t (f INT) ENGINE=INNODB;
CREATE TABLE t2 (f INT) ENGINE=INNODB;
CREATE TABLE tm (f INT) ENGINE=Aria;
@@ -98,5 +99,5 @@ DROP PROCEDURE sp_xa;
--echo # Cleanup
DROP TABLE t,t2,tm;
SET @@global.sync_binlog= default;
--echo # End of the tests

View File

@@ -18,6 +18,7 @@ call mtr.add_suppression("Can.t init tc log");
call mtr.add_suppression("Aborting");
--let $MYSQLD_DATADIR= `SELECT @@datadir`
SET @@global.sync_binlog= 1;
CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
CREATE TABLE t2 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=rocksdb;
@@ -27,7 +28,7 @@ CREATE TABLE t2 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=rocksdb;
# The transaction is killed along with the server after that.
--let $shutdown_timeout=0
--let $debug_sync_action = "commit_after_release_LOCK_log SIGNAL con1_ready WAIT_FOR signal_no_signal"
--let $restart_parameters = --rpl-semi-sync-slave-enabled=1
--let $restart_parameters = --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
--let $test_outcome= 1 row should be present in both tables; binlog is truncated; number of binlogs at reconnect - 3
--source binlog_truncate_multi_engine.inc
--echo Proof of the truncated binlog file is readable (two transactions must be seen):
@@ -40,7 +41,7 @@ CREATE TABLE t2 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=rocksdb;
--let $debug_sync_action = ""
# Both debug_sync and debug-dbug are required to make sure Engines remember the commit state
# debug_sync alone will not help.
--let $restart_parameters = --rpl-semi-sync-slave-enabled=1
--let $restart_parameters = --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
--let $test_outcome= 2 rows should be present in both tables; no binlog truncation; one extra binlog file compare with A; number of binlogs at reconnect - 4
--source binlog_truncate_multi_engine.inc
@@ -49,12 +50,10 @@ CREATE TABLE t2 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=rocksdb;
--let $debug_sync_action = "commit_after_run_commit_ordered SIGNAL con1_ready WAIT_FOR signal_no_signal"
# Hold off after both engines have committed. The server is shut down.
--let $shutdown_timeout=
--let $restart_parameters = --rpl-semi-sync-slave-enabled=1
--let $restart_parameters = --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
--let $test_outcome= 2 rows should be present in both tables; no binlog truncation; the same # of binlog files as in B; number of binlogs at reconnect - 4
--source binlog_truncate_multi_engine.inc
DROP TABLE t1, t2;
SET @@global.sync_binlog= default;
--echo # End of the tests

View File

@@ -13,7 +13,7 @@ call mtr.add_suppression("Can.t init tc log");
call mtr.add_suppression("Aborting");
SET @@global.max_binlog_size= 4096;
SET @@global.sync_binlog= 1;
RESET MASTER;
FLUSH LOGS;
CREATE TABLE ti (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
@@ -29,13 +29,13 @@ INSERT INTO ti VALUES(1,"I am gonna survive");
INSERT INTO tm VALUES(1,"me too!");
# hold on near engine commit
SET DEBUG_SYNC= "commit_after_release_LOCK_after_binlog_sync SIGNAL master1_ready WAIT_FOR con1_go";
SET DEBUG_SYNC= "commit_after_release_LOCK_after_binlog_sync SIGNAL master1_ready WAIT_FOR master1_go_never_arrives";
--send INSERT INTO ti VALUES (2, REPEAT("x", 4100))
connect(master2,localhost,root,,);
# The 2nd trx for recovery, it does not rotate binlog
SET DEBUG_SYNC= "now WAIT_FOR master1_ready";
SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL master2_ready WAIT_FOR master2_go";
SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL master2_ready WAIT_FOR master2_go_never_arrives";
--send INSERT INTO ti VALUES (3, "not gonna survive")
--connection default
@@ -53,7 +53,7 @@ SELECT @@global.gtid_binlog_state;
#
# Server restart
#
--let $restart_parameters= --rpl-semi-sync-slave-enabled=1
--let $restart_parameters= --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
--source include/start_mysqld.inc
# Check error log for a successful truncate message.
@@ -73,5 +73,5 @@ SELECT @@global.gtid_binlog_pos;
--echo # Cleanup
DROP TABLE ti;
SET @@global.sync_binlog= default;
--echo # End of the tests

View File

@@ -45,6 +45,7 @@ call mtr.add_suppression("Checking table.*tm");
RESET MASTER;
FLUSH LOGS;
SET @@global.sync_binlog=1;
CREATE TABLE ti (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
CREATE TABLE tm (f INT) ENGINE=MYISAM;
@@ -66,13 +67,13 @@ connect(master3,localhost,root,,);
--connection master1
# The 1st trx binlogs, rotate binlog and hold on before committing at engine
SET DEBUG_SYNC= "commit_after_release_LOCK_after_binlog_sync SIGNAL master1_ready WAIT_FOR master1_go";
SET DEBUG_SYNC= "commit_after_release_LOCK_after_binlog_sync SIGNAL master1_ready WAIT_FOR master1_go_never_arrives";
--send_eval INSERT INTO ti VALUES ($row_count - 1, REPEAT("x", 4100))
--connection master2
SET DEBUG_SYNC= "now WAIT_FOR master1_ready";
# The 2nd trx for recovery, it does not rotate binlog
SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL master2_ready WAIT_FOR master2_go";
SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL master2_ready WAIT_FOR master2_go_never_arrives";
--send_eval INSERT INTO ti VALUES ($row_count, REPEAT("x", 1))
--connection master3

View File

@@ -8,10 +8,15 @@ FLUSH LOGS;
FLUSH LOGS;
FLUSH LOGS;
SET DEBUG_SYNC= "at_after_lock_index WAIT_FOR con1_go";
# This forced synchronization pattern ensures con1 will execute its retry
# path. More specifically, con1 should see that the cache of log files it
# creates during SHOW BINARY LOGS becomes invalidated after con2 completes
# RESET MASTER.
SET DEBUG_SYNC= "at_after_lock_index SIGNAL con1_ready WAIT_FOR con1_go";
--send SHOW BINARY LOGS
connect(con2,localhost,root,,);
SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
RESET MASTER;
FLUSH LOGS;
SET DEBUG_SYNC= "now SIGNAL con1_go";

View File

@@ -0,0 +1,21 @@
#
# MDEV-26131 SEGV in ha_innobase::discard_or_import_tablespace
#
CREATE TABLE t1(f1 int,f2 text)ENGINE=InnoDB;
INSERT INTO t1 VALUES(1, "InnoDB");
CREATE TABLE t2 LIKE t1;
ALTER TABLE t2 ADD KEY idx (f2(13));
ALTER TABLE t2 DISCARD TABLESPACE;
FLUSH TABLES t1 FOR EXPORT;
UNLOCK TABLES;
ALTER TABLE t2 IMPORT TABLESPACE;
ERROR HY000: Internal error: Drop all secondary indexes before importing table test/t2 when .cfg file is missing.
ALTER TABLE t2 DROP KEY idx;
ALTER TABLE t2 IMPORT TABLESPACE;
Warnings:
Warning 1814 Tablespace has been discarded for table `t2`
Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t2.cfg', will attempt to import without schema verification
SELECT * FROM t2;
f1 f2
1 InnoDB
DROP TABLE t1, t2;

View File

@@ -0,0 +1,7 @@
[page_compressed]
innodb-compression-default=1
[encryption]
innodb-encrypt-tables=1
[page_compressed_encryption]
innodb-compression-default=1
innodb-encrypt-tables=1

View File

@@ -0,0 +1 @@
--innodb-checksum-algorithm=crc32

View File

@@ -0,0 +1,23 @@
--source include/have_innodb.inc
--source include/have_example_key_management_plugin.inc
--source include/innodb_checksum_algorithm.inc
--echo #
--echo # MDEV-26131 SEGV in ha_innobase::discard_or_import_tablespace
--echo #
let $MYSQLD_DATADIR = `SELECT @@datadir`;
CREATE TABLE t1(f1 int,f2 text)ENGINE=InnoDB;
INSERT INTO t1 VALUES(1, "InnoDB");
CREATE TABLE t2 LIKE t1;
ALTER TABLE t2 ADD KEY idx (f2(13));
ALTER TABLE t2 DISCARD TABLESPACE;
FLUSH TABLES t1 FOR EXPORT;
--copy_file $MYSQLD_DATADIR/test/t1.ibd $MYSQLD_DATADIR/test/t2.ibd
UNLOCK TABLES;
--error ER_INTERNAL_ERROR
ALTER TABLE t2 IMPORT TABLESPACE;
ALTER TABLE t2 DROP KEY idx;
--replace_regex /opening '.*\/test\//opening '.\/test\//
ALTER TABLE t2 IMPORT TABLESPACE;
SELECT * FROM t2;
DROP TABLE t1, t2;

View File

@@ -47,6 +47,42 @@ connection slave;
drop table federated.t1_1;
drop table federated.t1_2;
End of 5.1 tests
#
# MDEV-18734 ASAN heap-use-after-free upon sorting by blob column from partitioned table
#
connection slave;
use federated;
create table t1_1 (x int, b text, key(x));
create table t1_2 (x int, b text, key(x));
connection master;
create table t1 (x int, b text, key(x)) engine=federated
partition by range columns (x) (
partition p1 values less than (40) connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1_1',
partition pn values less than (maxvalue) connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1_2'
);
insert t1 values (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8);
insert t1 select x + 8, x + 8 from t1;
insert t1 select x + 16, x + 16 from t1;
insert t1 select x + 49, repeat(x + 49, 100) from t1;
flush tables;
# This produces wrong result before MDEV-17573
select x, left(b, 10) from t1 where x > 30 and x < 60 order by b;
x left(b, 10)
31 31
32 32
50 5050505050
51 5151515151
52 5252525252
53 5353535353
54 5454545454
55 5555555555
56 5656565656
57 5757575757
58 5858585858
59 5959595959
drop table t1;
connection slave;
drop table t1_1, t1_2;
connection master;
DROP TABLE IF EXISTS federated.t1;
DROP DATABASE IF EXISTS federated;

View File

@@ -51,4 +51,29 @@ drop table federated.t1_2;
--echo End of 5.1 tests
--echo #
--echo # MDEV-18734 ASAN heap-use-after-free upon sorting by blob column from partitioned table
--echo #
connection slave;
use federated;
create table t1_1 (x int, b text, key(x));
create table t1_2 (x int, b text, key(x));
connection master;
--replace_result $SLAVE_MYPORT SLAVE_PORT
eval create table t1 (x int, b text, key(x)) engine=federated
partition by range columns (x) (
partition p1 values less than (40) connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1_1',
partition pn values less than (maxvalue) connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1_2'
);
insert t1 values (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8);
insert t1 select x + 8, x + 8 from t1;
insert t1 select x + 16, x + 16 from t1;
insert t1 select x + 49, repeat(x + 49, 100) from t1;
flush tables;
--echo # This produces wrong result before MDEV-17573
select x, left(b, 10) from t1 where x > 30 and x < 60 order by b;
drop table t1;
connection slave;
drop table t1_1, t1_2;
source include/federated_cleanup.inc;

View File

@@ -84,11 +84,21 @@ SET GLOBAL wsrep_provider_options = 'dbug=';
SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync';
SET DEBUG_SYNC = "RESET";
connection node_2a;
set session wsrep_sync_wait=15;
SELECT COUNT(*) = 1 FROM test.t1 WHERE f2 = 'e';
COUNT(*) = 1
1
set session wsrep_sync_wait=0;
SELECT * from test.t1;
f1 f2
1 a
2 b
3 e
4 d
connection node_1;
SELECT * from test.t1;
f1 f2
1 a
2 b
3 e
4 d
connection node_2a;
STOP SLAVE;
RESET SLAVE;
DROP TABLE t1;

View File

@@ -185,11 +185,17 @@ SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
SET DEBUG_SYNC = "RESET";
--connection node_2a
set session wsrep_sync_wait=15;
SELECT COUNT(*) = 1 FROM test.t1 WHERE f2 = 'e';
set session wsrep_sync_wait=0;
--let $wait_condition = SELECT COUNT(*) = 1 FROM test.t1 where f2 = 'e'
--source include/wait_condition.inc
SELECT * from test.t1;
--connection node_1
--let $wait_condition = SELECT COUNT(*) = 1 FROM test.t1 where f2 = 'e'
--source include/wait_condition.inc
SELECT * from test.t1;
--connection node_2a
STOP SLAVE;
RESET SLAVE;

View File

@@ -0,0 +1,47 @@
connection node_2;
connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB;
INSERT INTO t1 VALUES (1), (2), (3);
connection node_2;
SET SESSION wsrep_trx_fragment_size = 1;
START TRANSACTION;
INSERT INTO t1 VALUES (4);
connection node_1;
SELECT COUNT(*) FROM t1;
COUNT(*)
3
connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2a;
SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_toi";
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connection node_1a;
SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_bf_abort";
connection node_1;
TRUNCATE TABLE t1;
connection node_1a;
SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_bf_abort_reached";
connection node_2a;
SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_toi_reached";
connection node_2;
INSERT INTO t1 VALUES (5);
connection node_2a;
SET SESSION wsrep_sync_wait = 0;
SET SESSION wsrep_sync_wait = DEFAULT;
SET GLOBAL DEBUG_DBUG = "";
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_toi";
connection node_2;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
connection node_1a;
SET SESSION wsrep_sync_wait=0;
SET GLOBAL DEBUG_DBUG = "+d,sync.wsrep_log_dummy_write_set";
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_bf_abort";
SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_log_dummy_write_set_reached";
connection node_1;
connection node_2;
SET GLOBAL DEBUG_DBUG = "";
SET DEBUG_SYNC = "RESET";
connection node_1;
SET GLOBAL DEBUG_DBUG = "";
SET DEBUG_SYNC = "RESET";
DROP TABLE t1;

View File

@@ -0,0 +1,113 @@
#
# MDEV-25717 Assertion `owning_thread_id_ == wsrep::this_thread::get_id()'
#
# This test exposes a race condition between rollbacker thread and rollback
# fragment processing.
#
--source include/galera_cluster.inc
--source include/have_debug_sync.inc
--connection node_1
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB;
INSERT INTO t1 VALUES (1), (2), (3);
#
# On node_2 we start a SR transaction, it going to
# be BF aborted later on
#
--connection node_2
SET SESSION wsrep_trx_fragment_size = 1;
START TRANSACTION;
INSERT INTO t1 VALUES (4);
--connection node_1
SELECT COUNT(*) FROM t1; # Sync wait
#
# Issue a conflicting TRUNCATE statement on node_1:
# - on node_2, block it before it is going to apply
# - on node_1, block before the before it BF aborts the INSERT
#
--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--connection node_2a
SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_toi";
--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
--connection node_1a
SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_bf_abort";
--connection node_1
--send TRUNCATE TABLE t1
--connection node_1a
SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_bf_abort_reached";
--connection node_2a
SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_toi_reached";
#
# Generate one more fragment on the SR transaction.
# This is going to fail certification and results
# in a rollback fragment.
#
--connection node_2
--let $expected_cert_failures = `SELECT VARIABLE_VALUE + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_cert_failures'`
--send INSERT INTO t1 VALUES (5)
#
# Wait until after certify and observe the certification
# failure. Let both continue and we are done on node_2.
#
--connection node_2a
SET SESSION wsrep_sync_wait = 0;
--let $wait_condition = SELECT VARIABLE_VALUE = $expected_cert_failures FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_cert_failures'
--source include/wait_condition.inc
SET SESSION wsrep_sync_wait = DEFAULT;
SET GLOBAL DEBUG_DBUG = "";
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_toi";
--connection node_2
--error ER_LOCK_DEADLOCK
--reap
#
# On node_1 we expect the following things:
# - the TRUNCATE should successfully bf abort the transaction
# - A rollback fragment should be delivered as a result of
# certification failure. We expect the rollback fragment to
# be delivered after TRUNCATE has bf aborted, therefore rollback
# fragment logs a dummy writeset.
#
--connection node_1a
SET SESSION wsrep_sync_wait=0;
SET GLOBAL DEBUG_DBUG = "+d,sync.wsrep_log_dummy_write_set";
# Signal the TRUNCATE to continue and observe the BF abort
--let $expected_bf_aborts = `SELECT VARIABLE_VALUE + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'`
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_bf_abort";
# Expect a timeout if bug is present
--let $wait_condition = SELECT VARIABLE_VALUE = $expected_bf_aborts FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'
--source include/wait_condition.inc
# Observe logging of dummy writeset
SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_log_dummy_write_set_reached";
# TRUNCATE succeeds
--connection node_1
--reap
#
# Cleanup
#
--connection node_2
SET GLOBAL DEBUG_DBUG = "";
SET DEBUG_SYNC = "RESET";
--connection node_1
SET GLOBAL DEBUG_DBUG = "";
SET DEBUG_SYNC = "RESET";
DROP TABLE t1;

View File

@@ -36,19 +36,21 @@ db.opt
t1.frm
restore: t1 .ibd and .cfg files
ALTER TABLE t1 IMPORT TABLESPACE;
ERROR HY000: Internal error: Drop all secondary indexes before importing table test/t1 when .cfg file is missing.
ALTER TABLE t1 DROP INDEX b;
ALTER TABLE t1 IMPORT TABLESPACE;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL AUTO_INCREMENT,
`b` blob DEFAULT NULL,
`c` blob DEFAULT NULL,
PRIMARY KEY (`a`),
KEY `b` (`b`(200))
PRIMARY KEY (`a`)
) ENGINE=InnoDB AUTO_INCREMENT=46 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
UPDATE t1 set b = repeat("de", 100) where b = repeat("cd", 200);
explain SELECT a FROM t1 where b = repeat("de", 100);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref b b 203 const # Using where
1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where
SELECT a FROM t1 where b = repeat("de", 100);
a
3
@@ -112,14 +114,19 @@ ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
ALTER TABLE t1 DISCARD TABLESPACE;
restore: t1 .ibd and .cfg files
ALTER TABLE t1 IMPORT TABLESPACE;
ERROR HY000: Internal error: Drop all secondary indexes before importing table test/t1 when .cfg file is missing.
ALTER TABLE t1 DROP INDEX idx1;
ALTER TABLE t1 IMPORT TABLESPACE;
Warnings:
Warning 1814 Tablespace has been discarded for table `t1`
Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t1.cfg', will attempt to import without schema verification
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`c1` int(11) NOT NULL AUTO_INCREMENT,
`c2` point NOT NULL,
`c3` linestring NOT NULL,
PRIMARY KEY (`c1`),
SPATIAL KEY `idx1` (`c2`)
PRIMARY KEY (`c1`)
) ENGINE=InnoDB AUTO_INCREMENT=14325 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
UPDATE t1 SET C2 = ST_GeomFromText('POINT(0 0)');
SELECT COUNT(*) FROM t1;

View File

@@ -0,0 +1,17 @@
CREATE TABLE t(a INT UNSIGNED PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t VALUES (10), (30);
connect con1,localhost,root,,;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
BEGIN;
INSERT INTO t VALUES (20);
SELECT * FROM t WHERE a BETWEEN 10 AND 30;
a
10
20
30
connection default;
SET session innodb_lock_wait_timeout=1;
INSERT INTO t VALUES (15);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
disconnect con1;
DROP TABLE t;

View File

@@ -0,0 +1,30 @@
call mtr.add_suppression("Table `test`.`t2` should have 2 indexes but the tablespace has 1 indexes");
call mtr.add_suppression("Index for table 't2' is corrupt; try to repair it");
call mtr.add_suppression("Trying to read .* bytes at .* outside the bounds of the file: \\..test.t2\\.ibd");
CREATE TABLE t1 (
id INT AUTO_INCREMENT PRIMARY KEY,
not_id INT,
data CHAR(255),
data2 BLOB
) ENGINE=INNODB;
ALTER TABLE t1 MODIFY not_id INT UNIQUE KEY;
connect purge_control,localhost,root,,;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
DELETE FROM t1 WHERE id % 2 = 1;
FLUSH TABLES t1 FOR EXPORT;
UNLOCK TABLES;
connection purge_control;
COMMIT;
connection default;
DROP TABLE t1;
CREATE TABLE t2 (
id INT AUTO_INCREMENT PRIMARY KEY,
not_id INT UNIQUE KEY,
data CHAR(255),
data2 BLOB
) ENGINE=INNODB;
ALTER TABLE t2 DISCARD TABLESPACE;
ALTER TABLE t2 IMPORT TABLESPACE;
ERROR HY000: Index for table 't2' is corrupt; try to repair it
DROP TABLE t2;

View File

@@ -129,7 +129,7 @@ create table t2(a char(20), key(a), foreign key(a) references t1(f1)) engine=inn
ERROR HY000: Can't create table `test`.`t2` (errno: 150 "Foreign key constraint is incorrectly formed")
show warnings;
Level Code Message
Warning 150 Create table `test`.`t2` with foreign key (a) constraint failed. Field type or character set for column 'a' does not mach referenced column 'f1'.
Warning 150 Create table `test`.`t2` with foreign key (a) constraint failed. Field type or character set for column 'a' does not match referenced column 'f1'.
Error 1005 Can't create table `test`.`t2` (errno: 150 "Foreign key constraint is incorrectly formed")
Warning 1215 Cannot add foreign key constraint for `t2`
drop table t1;

View File

@@ -45,7 +45,7 @@ trx_last_foreign_key_error varchar(256) YES NULL
trx_is_read_only int(1) NO 0
trx_autocommit_non_locking int(1) NO 0
trx_state trx_weight trx_tables_in_use trx_tables_locked trx_rows_locked trx_rows_modified trx_concurrency_tickets trx_isolation_level trx_unique_checks trx_foreign_key_checks
RUNNING 3 0 1 5 1 0 REPEATABLE READ 1 1
RUNNING 3 0 1 6 1 0 REPEATABLE READ 1 1
trx_isolation_level trx_unique_checks trx_foreign_key_checks
SERIALIZABLE 0 0
trx_state trx_isolation_level trx_last_foreign_key_error

View File

@@ -1 +0,0 @@
--innodb_checksum_algorithm=full_crc32

View File

@@ -53,6 +53,9 @@ ib_restore_tablespaces("test", "t1");
EOF
--remove_file $MYSQLD_DATADIR/test/t1.cfg
--error ER_INTERNAL_ERROR
ALTER TABLE t1 IMPORT TABLESPACE;
ALTER TABLE t1 DROP INDEX b;
--disable_warnings
ALTER TABLE t1 IMPORT TABLESPACE;
--enable_warnings
@@ -131,9 +134,13 @@ ib_restore_tablespaces("test", "t1");
EOF
--remove_file $MYSQLD_DATADIR/test/t1.cfg
--disable_warnings
--error ER_INTERNAL_ERROR
ALTER TABLE t1 IMPORT TABLESPACE;
--enable_warnings
ALTER TABLE t1 DROP INDEX idx1;
--replace_regex /opening '.*\/test\//opening '.\/test\//
ALTER TABLE t1 IMPORT TABLESPACE;
--disable_warnings
SHOW CREATE TABLE t1;
UPDATE t1 SET C2 = ST_GeomFromText('POINT(0 0)');
SELECT COUNT(*) FROM t1;

View File

@@ -0,0 +1,21 @@
--source include/have_innodb.inc
--source include/count_sessions.inc
CREATE TABLE t(a INT UNSIGNED PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t VALUES (10), (30);
--connect (con1,localhost,root,,)
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
BEGIN;
INSERT INTO t VALUES (20);
SELECT * FROM t WHERE a BETWEEN 10 AND 30;
--connection default
SET session innodb_lock_wait_timeout=1;
--error ER_LOCK_WAIT_TIMEOUT
INSERT INTO t VALUES (15);
--disconnect con1
DROP TABLE t;
--source include/wait_until_count_sessions.inc

View File

@@ -0,0 +1,69 @@
--source include/have_innodb.inc
call mtr.add_suppression("Table `test`.`t2` should have 2 indexes but the tablespace has 1 indexes");
call mtr.add_suppression("Index for table 't2' is corrupt; try to repair it");
call mtr.add_suppression("Trying to read .* bytes at .* outside the bounds of the file: \\..test.t2\\.ibd");
let MYSQLD_DATADIR = `SELECT @@datadir`;
CREATE TABLE t1 (
id INT AUTO_INCREMENT PRIMARY KEY,
not_id INT,
data CHAR(255),
data2 BLOB
) ENGINE=INNODB;
--disable_query_log
--let i = 0
while ($i != 1000) {
eval INSERT INTO t1 VALUES (DEFAULT, $i, REPEAT('b', 255), REPEAT('a', 5000));
--inc $i
}
--enable_query_log
ALTER TABLE t1 MODIFY not_id INT UNIQUE KEY;
connect (purge_control,localhost,root,,);
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
DELETE FROM t1 WHERE id % 2 = 1;
FLUSH TABLES t1 FOR EXPORT;
--copy_file $MYSQLD_DATADIR/test/t1.ibd $MYSQLD_DATADIR/test/tmp.ibd
--copy_file $MYSQLD_DATADIR/test/t1.cfg $MYSQLD_DATADIR/test/tmp.cfg
perl;
use strict;
die unless open(FILE, "+<$ENV{MYSQLD_DATADIR}/test/tmp.ibd");
die unless truncate(FILE, 16384*23);
close(FILE);
EOF
UNLOCK TABLES;
connection purge_control;
COMMIT;
connection default;
DROP TABLE t1;
CREATE TABLE t2 (
id INT AUTO_INCREMENT PRIMARY KEY,
not_id INT UNIQUE KEY,
data CHAR(255),
data2 BLOB
) ENGINE=INNODB;
ALTER TABLE t2 DISCARD TABLESPACE;
--copy_file $MYSQLD_DATADIR/test/tmp.ibd $MYSQLD_DATADIR/test/t2.ibd
--copy_file $MYSQLD_DATADIR/test/tmp.cfg $MYSQLD_DATADIR/test/t2.cfg
--error ER_NOT_KEYFILE
ALTER TABLE t2 IMPORT TABLESPACE;
DROP TABLE t2;
--remove_file $MYSQLD_DATADIR/test/t2.ibd
--remove_file $MYSQLD_DATADIR/test/tmp.ibd
--remove_file $MYSQLD_DATADIR/test/tmp.cfg

View File

@@ -145,3 +145,13 @@ id title
2 database
3 good
DROP TABLE t1;
#
# MDEV-26273 InnoDB fts DDL fails when
# innodb_force_recovery is set to 2
#
# restart: --innodb_force_recovery=2
CREATE TABLE t1 (FTS_DOC_ID BIGINT UNSIGNED KEY,
f1 CHAR(200)) ENGINE=InnoDB;
ALTER TABLE t1 ADD FULLTEXT INDEX(f1);
DROP TABLE t1;
# restart

View File

@@ -170,3 +170,17 @@ SET GLOBAL innodb_ft_aux_table=default;
SELECT * FROM t1 WHERE MATCH(title) AGAINST ('mysql database good');
DROP TABLE t1;
--echo #
--echo # MDEV-26273 InnoDB fts DDL fails when
--echo # innodb_force_recovery is set to 2
--echo #
let $restart_parameters=--innodb_force_recovery=2;
--source include/restart_mysqld.inc
CREATE TABLE t1 (FTS_DOC_ID BIGINT UNSIGNED KEY,
f1 CHAR(200)) ENGINE=InnoDB;
ALTER TABLE t1 ADD FULLTEXT INDEX(f1);
DROP TABLE t1;
let $restart_parameters=;
--source include/restart_mysqld.inc

View File

@@ -252,6 +252,16 @@ UNLOCK TABLES;
ALTER TABLE tab DISCARD TABLESPACE;
SELECT c1,ST_Astext(c2),ST_Astext(c4) FROM tab;
ERROR HY000: Tablespace has been discarded for table `tab`
ERROR HY000: Internal error: Drop all secondary indexes before importing table test/tab when .cfg file is missing.
Table Create Table
tab CREATE TABLE `tab` (
`c1` int(11) NOT NULL,
`c2` point NOT NULL,
`c3` linestring NOT NULL,
`c4` polygon NOT NULL,
`c5` geometry NOT NULL,
PRIMARY KEY (`c2`(25))
) ENGINE=InnoDB DEFAULT CHARSET=latin1
CHECK TABLE tab;
Table Op Msg_type Msg_text
test.tab check status OK
@@ -282,9 +292,6 @@ INSERT INTO tab SELECT * FROM tab1;
ALTER TABLE tab DROP PRIMARY KEY;
affected rows: 1
info: Records: 1 Duplicates: 0 Warnings: 0
ALTER TABLE tab DROP INDEX idx2;
affected rows: 0
info: Records: 0 Duplicates: 0 Warnings: 0
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
CREATE TEMPORARY TABLE temp_tab AS SELECT * FROM tab where c1 = c2;
ERROR HY000: Illegal parameter data types int and point for operation '='
@@ -325,18 +332,10 @@ tab CREATE TABLE `tab` (
`c2` point NOT NULL,
`c3` linestring NOT NULL,
`c4` polygon NOT NULL,
`c5` geometry NOT NULL,
SPATIAL KEY `idx3` (`c3`),
SPATIAL KEY `idx4` (`c4`) COMMENT 'testing spatial index on Polygon',
SPATIAL KEY `idx5` (`c5`) COMMENT 'testing spatial index on Geometry',
KEY `idx6` (`c4`(10)) USING BTREE
`c5` geometry NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
SHOW INDEX FROM tab;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Ignored
tab 1 idx3 1 c3 A # 32 NULL SPATIAL NO
tab 1 idx4 1 c4 A # 32 NULL SPATIAL testing spatial index on Polygon NO
tab 1 idx5 1 c5 A # 32 NULL SPATIAL testing spatial index on Geometry NO
tab 1 idx6 1 c4 A # 10 NULL BTREE NO
DELETE FROM tab;
ALTER TABLE tab ADD PRIMARY KEY(c2);
affected rows: 0
@@ -357,20 +356,12 @@ tab CREATE TABLE `tab` (
`c5` geometry NOT NULL,
PRIMARY KEY (`c2`(25)),
UNIQUE KEY `const_1` (`c2`(25)),
SPATIAL KEY `idx3` (`c3`),
SPATIAL KEY `idx4` (`c4`) COMMENT 'testing spatial index on Polygon',
SPATIAL KEY `idx5` (`c5`) COMMENT 'testing spatial index on Geometry',
KEY `idx6` (`c4`(10)) USING BTREE,
SPATIAL KEY `idx2` (`c2`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
SHOW INDEX FROM tab;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Ignored
tab 0 PRIMARY 1 c2 A # 25 NULL BTREE NO
tab 0 const_1 1 c2 A # 25 NULL BTREE NO
tab 1 idx3 1 c3 A # 32 NULL SPATIAL NO
tab 1 idx4 1 c4 A # 32 NULL SPATIAL testing spatial index on Polygon NO
tab 1 idx5 1 c5 A # 32 NULL SPATIAL testing spatial index on Geometry NO
tab 1 idx6 1 c4 A # 10 NULL BTREE NO
tab 1 idx2 1 c2 A # 32 NULL SPATIAL NO
INSERT INTO tab(c1,c2,c3,c4,c5)
VALUES(1,ST_GeomFromText('POINT(10 10)'),ST_GeomFromText('LINESTRING(5 5,20 20,30 30)'),
@@ -399,20 +390,12 @@ tab CREATE TABLE `tab` (
`c5` geometry NOT NULL,
PRIMARY KEY (`c5`(10)),
UNIQUE KEY `const_1` (`c5`(10)),
SPATIAL KEY `idx3` (`c3`),
SPATIAL KEY `idx4` (`c4`) COMMENT 'testing spatial index on Polygon',
SPATIAL KEY `idx5` (`c5`) COMMENT 'testing spatial index on Geometry',
KEY `idx6` (`c4`(10)) USING BTREE,
SPATIAL KEY `idx2` (`c2`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
SHOW INDEX FROM tab;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Ignored
tab 0 PRIMARY 1 c5 A # 10 NULL BTREE NO
tab 0 const_1 1 c5 A # 10 NULL BTREE NO
tab 1 idx3 1 c3 A # 32 NULL SPATIAL NO
tab 1 idx4 1 c4 A # 32 NULL SPATIAL testing spatial index on Polygon NO
tab 1 idx5 1 c5 A # 32 NULL SPATIAL testing spatial index on Geometry NO
tab 1 idx6 1 c4 A # 10 NULL BTREE NO
tab 1 idx2 1 c2 A # 32 NULL SPATIAL NO
INSERT INTO tab(c1,c2,c3,c4,c5)
VALUES(1,ST_GeomFromText('POINT(10 10)'),ST_GeomFromText('LINESTRING(5 5,20 20,30 30)'),

View File

@@ -277,8 +277,17 @@ SELECT c1,ST_Astext(c2),ST_Astext(c4) FROM tab;
--disable_query_log
--error ER_INTERNAL_ERROR
ALTER TABLE tab IMPORT TABLESPACE;
ALTER TABLE tab DROP INDEX idx2;
ALTER TABLE tab DROP INDEX idx3;
ALTER TABLE tab DROP INDEX idx4;
ALTER TABLE tab DROP INDEX idx5;
ALTER TABLE tab DROP INDEX idx6;
SHOW CREATE TABLE tab;
ALTER TABLE tab IMPORT TABLESPACE;
--enable_query_log
CHECK TABLE tab;
@@ -308,7 +317,6 @@ INSERT INTO tab SELECT * FROM tab1;
--enable_info
ALTER TABLE tab DROP PRIMARY KEY;
ALTER TABLE tab DROP INDEX idx2;
--disable_info
# Check spatial index on temp tables

View File

@@ -16,12 +16,25 @@ connection server_1;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
set @@global.rpl_semi_sync_master_enabled = 1;
set @@global.rpl_semi_sync_master_wait_point=AFTER_SYNC;
call mtr.add_suppression("Can.t init tc log");
call mtr.add_suppression("Aborting");
call mtr.add_suppression("1 client is using or hasn.t closed the table properly");
call mtr.add_suppression("Table './mtr/test_suppressions' is marked as crashed and should be repaired");
CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
INSERT INTO t1 VALUES (1, 'dummy1');
connection server_2;
connection server_1;
#
# Case:1
#
# CRASH the original master, and FAILOVER to the new
# INSERT INTO t1 VALUES (2, REPEAT("x", 4100))
# Row - 2 will be in master's binlog but not committed, gets replicated
# to slave and applied. On crash master should have 1 row and slave
# should have 2 rows.
#
# Expected State post crash:
#=================================================================
# Master | Slave |
# 0-1-4 (Not committed) | 0-1-4 (Received through semi-sync |
# | replication and applied) |
#=================================================================
connect conn_client,127.0.0.1,root,,test,$SERVER_MYPORT_1,;
SET DEBUG_SYNC= "commit_after_release_LOCK_after_binlog_sync SIGNAL con1_ready WAIT_FOR con1_go";
INSERT INTO t1 VALUES (2, REPEAT("x", 4100));
@@ -29,12 +42,15 @@ connection server_1;
SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
# Kill the server
connection server_2;
include/wait_for_slave_param.inc [Slave_SQL_Running_State]
include/stop_slave.inc
include/assert.inc [Table t1 should have 2 rows.]
SELECT @@GLOBAL.gtid_current_pos;
@@GLOBAL.gtid_current_pos
0-1-8
# restart: --rpl-semi-sync-slave-enabled=1
0-1-4
# restart: --skip-slave-start=1 --rpl-semi-sync-slave-enabled=1
connection server_1;
include/assert.inc [Table t1 should have 1 rows.]
FOUND 1 /truncated binlog file:.*master.*000001/ in mysqld.1.err
disconnect conn_client;
connection server_2;
@@ -45,12 +61,18 @@ CHANGE MASTER TO master_host='127.0.0.1', master_port=$new_master_port, master_u
set global rpl_semi_sync_slave_enabled = 1;
set @@global.gtid_slave_pos=@@global.gtid_binlog_pos;
include/start_slave.inc
#
# Server_2 promoted as master will send 0-1-4 to new slave Server_1
#
connection server_2;
INSERT INTO t1 VALUES (3, 'dummy3');
# The gtid state on current master must be equal to ...
SHOW VARIABLES LIKE 'gtid_binlog_pos';
Variable_name Value
gtid_binlog_pos 0-2-9
gtid_binlog_pos 0-2-5
SHOW VARIABLES LIKE 'gtid_slave_pos';
Variable_name Value
gtid_slave_pos 0-1-4
connection server_1;
SELECT COUNT(*) = 3 as 'true' FROM t1;
true
@@ -58,29 +80,50 @@ true
# ... the gtid states on the slave:
SHOW VARIABLES LIKE 'gtid_slave_pos';
Variable_name Value
gtid_slave_pos 0-2-9
gtid_slave_pos 0-2-5
SHOW VARIABLES LIKE 'gtid_binlog_pos';
Variable_name Value
gtid_binlog_pos 0-2-9
gtid_binlog_pos 0-2-5
connection server_2;
#
# Case:2
#
# CRASH the new master, and FAILOVER back to the original
# INSERT INTO t1 VALUES (4, REPEAT("x", 4100))
# INSERT INTO t1 VALUES (5, REPEAT("x", 4100))
# Rows 4 and 5 will be in master's binlog but not committed, they get
# replicated to slave and applied. On crash master should have 3 rows
# and slave should have 5 rows.
#
# Expected State post crash:
#=================================================================
# Master | Slave |
# 0-2-6 (Not commited) | 0-2-6 (Received through semi-sync |
# | replication and applied) |
# 0-2-7 (Not commited) | 0-2-7 (Received through semi-sync |
# | replication and applied) |
#=================================================================
connect conn_client,127.0.0.1,root,,test,$SERVER_MYPORT_2,;
SET DEBUG_SYNC= "commit_after_release_LOCK_after_binlog_sync SIGNAL con1_ready WAIT_FOR con1_go";
SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL con1_ready WAIT_FOR con1_go";
INSERT INTO t1 VALUES (4, REPEAT("x", 4100));
connect conn_client_2,127.0.0.1,root,,test,$SERVER_MYPORT_2,;
SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
SET DEBUG_SYNC= "commit_after_release_LOCK_log SIGNAL con1_ready WAIT_FOR con2_go";
SET GLOBAL debug_dbug="d,Notify_binlog_EOF";
INSERT INTO t1 VALUES (5, REPEAT("x", 4100));
connection server_2;
SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
SET DEBUG_SYNC= "now WAIT_FOR eof_reached";
# Kill the server
connection server_1;
include/wait_for_slave_param.inc [Slave_SQL_Running_State]
include/stop_slave.inc
include/assert.inc [Table t1 should have 5 rows.]
SELECT @@GLOBAL.gtid_current_pos;
@@GLOBAL.gtid_current_pos
0-2-11
# restart: --rpl-semi-sync-slave-enabled=1
0-2-7
# restart: --skip-slave-start=1 --rpl-semi-sync-slave-enabled=1
connection server_2;
NOT FOUND /truncated binlog file:.*slave.*000001/ in mysqld.2.err
include/assert.inc [Table t1 should have 3 rows.]
FOUND 1 /truncated binlog file:.*slave.*000002/ in mysqld.2.err
disconnect conn_client;
connection server_1;
set global rpl_semi_sync_master_enabled = 1;
@@ -90,12 +133,18 @@ CHANGE MASTER TO master_host='127.0.0.1', master_port=$new_master_port, master_u
set global rpl_semi_sync_slave_enabled = 1;
set @@global.gtid_slave_pos=@@global.gtid_binlog_pos;
include/start_slave.inc
#
# Server_1 promoted as master will send 0-2-6 and 0-2-7 to slave Server_2
#
connection server_1;
INSERT INTO t1 VALUES (6, 'Done');
INSERT INTO t1 VALUES (6, 'dummy6');
# The gtid state on current master must be equal to ...
SHOW VARIABLES LIKE 'gtid_binlog_pos';
Variable_name Value
gtid_binlog_pos 0-1-12
gtid_binlog_pos 0-1-8
SHOW VARIABLES LIKE 'gtid_slave_pos';
Variable_name Value
gtid_slave_pos 0-2-7
connection server_2;
SELECT COUNT(*) = 6 as 'true' FROM t1;
true
@@ -103,27 +152,104 @@ true
# ... the gtid states on the slave:
SHOW VARIABLES LIKE 'gtid_slave_pos';
Variable_name Value
gtid_slave_pos 0-1-12
gtid_slave_pos 0-1-8
SHOW VARIABLES LIKE 'gtid_binlog_pos';
Variable_name Value
gtid_binlog_pos 0-1-12
gtid_binlog_pos 0-1-8
include/diff_tables.inc [server_1:t1, server_2:t1]
connection server_1;
#
# Case:3
#
# CRASH the master and FAILOVER to slave
# INSERT INTO t1 VALUES (7, REPEAT("x", 4100))
# INSERT INTO t1 VALUES (8, REPEAT("x", 4100))
# Rows 7 and 8 will be in master's binlog but not committed, only 7
# gets replicated to slave and applied. On crash master should have 6
# rows and slave should have 7 rows.
#
# Expected State post crash:
#=================================================================
# Master | Slave |
# 0-1-9 (Not commited) | 0-1-9 (Received through semi-sync |
# | replication and applied) |
# 0-1-10 (Not commited - | |
# never sent to slave) | |
#=================================================================
connect conn_client,127.0.0.1,root,,test,$SERVER_MYPORT_1,;
SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL con1_ready WAIT_FOR con1_go";
INSERT INTO t1 VALUES (7, REPEAT("x", 4100));
connect conn_client_3,127.0.0.1,root,,test,$SERVER_MYPORT_1,;
SET DEBUG_SYNC= "commit_before_update_binlog_end_pos SIGNAL con3_ready WAIT_FOR con1_go";
INSERT INTO t1 VALUES (8, REPEAT("x", 4100));
connection server_1;
SET DEBUG_SYNC= "now WAIT_FOR con3_ready";
# Kill the server
connection server_2;
include/wait_for_slave_param.inc [Slave_SQL_Running_State]
include/stop_slave.inc
include/assert.inc [Table t1 should have 7 rows.]
SELECT @@GLOBAL.gtid_current_pos;
@@GLOBAL.gtid_current_pos
0-1-9
# restart: --skip-slave-start=1 --rpl-semi-sync-slave-enabled=1
connection server_1;
include/assert.inc [Table t1 should have 6 rows.]
NOT FOUND /truncated binlog file:.*master.*000003/ in mysqld.1.err
disconnect conn_client;
connection server_2;
set global rpl_semi_sync_master_enabled = 1;
set global rpl_semi_sync_master_wait_point=AFTER_SYNC;
connection server_1;
CHANGE MASTER TO master_host='127.0.0.1', master_port=$new_master_port, master_user='root', master_use_gtid=SLAVE_POS;
set global rpl_semi_sync_slave_enabled = 1;
set @@global.gtid_slave_pos=@@global.gtid_binlog_pos;
include/start_slave.inc
#
# Server_2 promoted as master will send 0-1-9 to slave Server_1
#
connection server_2;
INSERT INTO t1 VALUES (8, 'Done');
include/save_master_gtid.inc
# The gtid state on current master must be equal to ...
SHOW VARIABLES LIKE 'gtid_binlog_pos';
Variable_name Value
gtid_binlog_pos 0-2-10
SHOW VARIABLES LIKE 'gtid_slave_pos';
Variable_name Value
gtid_slave_pos 0-1-9
connection server_1;
include/sync_with_master_gtid.inc
SELECT COUNT(*) = 8 as 'true' FROM t1;
true
1
# ... the gtid states on the slave:
SHOW VARIABLES LIKE 'gtid_slave_pos';
Variable_name Value
gtid_slave_pos 0-2-10
SHOW VARIABLES LIKE 'gtid_binlog_pos';
Variable_name Value
gtid_binlog_pos 0-2-10
#
# Cleanup
#
include/stop_slave.inc
set global rpl_semi_sync_slave_enabled = 0;
set global rpl_semi_sync_master_enabled = 0;
set global rpl_semi_sync_master_wait_point=default;
RESET MASTER;
RESET SLAVE;
connection server_2;
RESET MASTER;
RESET SLAVE;
set @@global.rpl_semi_sync_master_enabled = 0;
set @@global.rpl_semi_sync_slave_enabled = 0;
set @@global.rpl_semi_sync_master_wait_point=default;
CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1, master_user='root', master_use_gtid=SLAVE_POS;
set @@global.gtid_slave_pos=@@global.gtid_binlog_pos;
include/start_slave.inc
connection server_1;
DROP TABLE t1;
connection server_2;
include/stop_slave.inc
connection server_1;
set @@global.rpl_semi_sync_master_enabled = 0;
set @@global.rpl_semi_sync_slave_enabled = 0;
set @@global.rpl_semi_sync_master_wait_point=default;
RESET SLAVE;
RESET MASTER;
connection server_2;
set @@global.rpl_semi_sync_master_enabled = 0;
set @@global.rpl_semi_sync_slave_enabled = 0;
set @@global.rpl_semi_sync_master_wait_point=default;
CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1, master_user='root', master_use_gtid=no;
include/start_slave.inc
connection default;
include/rpl_end.inc

View File

@@ -20,36 +20,70 @@ if (!$failover_to_slave)
# Hold insert after write to binlog and before "run_commit_ordered" in engine
SET DEBUG_SYNC= "commit_after_release_LOCK_after_binlog_sync SIGNAL con1_ready WAIT_FOR con1_go";
--send_eval $query_to_crash
if ($case == 1)
{
SET DEBUG_SYNC= "commit_after_release_LOCK_after_binlog_sync SIGNAL con1_ready WAIT_FOR con1_go";
--send_eval $query_to_crash
--connection server_$server_to_crash
SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
--source include/kill_mysqld.inc
}
# complicate recovery with an extra binlog file
if (!$failover_to_slave)
if ($case == 2)
{
SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL con1_ready WAIT_FOR con1_go";
--send_eval $query_to_crash
--connect (conn_client_2,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
# use the same signal with $query_to_crash
SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
SET DEBUG_SYNC= "commit_after_release_LOCK_log SIGNAL con1_ready WAIT_FOR con2_go";
SET GLOBAL debug_dbug="d,Notify_binlog_EOF";
--send_eval $query2_to_crash
--connection server_$server_to_crash
SET DEBUG_SYNC= "now WAIT_FOR eof_reached";
--source include/kill_mysqld.inc
}
--connection server_$server_to_crash
SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
--source include/kill_mysqld.inc
# complicate recovery with an extra binlog file
if ($case == 3)
{
SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL con1_ready WAIT_FOR con1_go";
--send_eval $query_to_crash
--connect (conn_client_3,127.0.0.1,root,,test,$SERVER_MYPORT_1,)
# use the same signal with $query_to_crash
SET DEBUG_SYNC= "commit_before_update_binlog_end_pos SIGNAL con3_ready WAIT_FOR con1_go";
--send_eval $query2_to_crash
--connection server_$server_to_crash
SET DEBUG_SYNC= "now WAIT_FOR con3_ready";
--source include/kill_mysqld.inc
}
--connection server_$server_to_promote
--let $slave_param= Slave_SQL_Running_State
--let $slave_param_value= Slave has read all relay log; waiting for more updates
source include/wait_for_slave_param.inc;
--error 2003
--source include/stop_slave.inc
--let $assert_cond= COUNT(*) = $expected_rows_on_slave FROM t1
--let $assert_text= Table t1 should have $expected_rows_on_slave rows.
--source include/assert.inc
SELECT @@GLOBAL.gtid_current_pos;
--let $restart_parameters=--rpl-semi-sync-slave-enabled=1
--let $restart_parameters=--skip-slave-start=1 --rpl-semi-sync-slave-enabled=1
--let $allow_rpl_inited=1
--source include/start_mysqld.inc
--connection server_$server_to_crash
--enable_reconnect
--source include/wait_until_connected_again.inc
--let $assert_cond= COUNT(*) = $expected_rows_on_master FROM t1
--let $assert_text= Table t1 should have $expected_rows_on_master rows.
--source include/assert.inc
# Check error log for correct messages.
let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.$server_to_crash.err;
--let SEARCH_FILE=$log_error_

View File

@@ -5,7 +5,9 @@
[mysqld.1]
log-slave-updates
gtid-strict-mode=1
sync-binlog=1
[mysqld.2]
log-slave-updates
gtid-strict-mode=1
sync-binlog=1

View File

@@ -1,27 +1,10 @@
# ==== Purpose ====
#
# Test verifies replication failover scenario.
#
# ==== Implementation ====
#
# Steps:
# 0 - Having two servers 1 and 2 enable semi-sync replication with
# with the master wait 'after_sync'.
# 1 - Insert a row. While inserting second row simulate
# a server crash at once the transaction is written to binlog, flushed
# and synced but the binlog position is not updated.
# 2 - Post crash-recovery on the old master execute there CHANGE MASTER
# TO command to connect to server id 2.
# 3 - The old master new slave server 1 must connect to the new
# master server 2.
# 4 - repeat the above to crash the new master and restore in role the old one
#
# ==== References ====
#
# MDEV-21117: recovery for --rpl-semi-sync-slave-enabled server
#
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
@@ -43,36 +26,53 @@ set @@global.gtid_slave_pos = "";
CHANGE MASTER TO master_use_gtid= slave_pos;
--source include/start_slave.inc
--connection server_1
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
set @@global.rpl_semi_sync_master_enabled = 1;
set @@global.rpl_semi_sync_master_wait_point=AFTER_SYNC;
call mtr.add_suppression("Can.t init tc log");
call mtr.add_suppression("Aborting");
call mtr.add_suppression("1 client is using or hasn.t closed the table properly");
call mtr.add_suppression("Table './mtr/test_suppressions' is marked as crashed and should be repaired");
CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
INSERT INTO t1 VALUES (1, 'dummy1');
--save_master_pos
#
# CRASH the original master, and FAILOVER to the new
#
--connection server_2
--sync_with_master
--connection server_1
--let $case = 1
--echo #
--echo # Case:$case
--echo #
--echo # CRASH the original master, and FAILOVER to the new
# value 1 for server id 1 -> 2 failover
--let $failover_to_slave=1
--let $query_to_crash= INSERT INTO t1 VALUES (2, REPEAT("x", 4100))
--echo # $query_to_crash
--echo # Row - 2 will be in master's binlog but not committed, gets replicated
--echo # to slave and applied. On crash master should have 1 row and slave
--echo # should have 2 rows.
--echo #
--echo # Expected State post crash:
--echo #=================================================================
--echo # Master | Slave |
--echo # 0-1-4 (Not committed) | 0-1-4 (Received through semi-sync |
--echo # | replication and applied) |
--echo #=================================================================
--let $log_search_pattern=truncated binlog file:.*master.*000001
--let $expected_rows_on_master= 1
--let $expected_rows_on_slave= 2
--source rpl_semi_sync_crash.inc
--echo #
--echo # Server_2 promoted as master will send 0-1-4 to new slave Server_1
--echo #
--connection server_2
--let $rows_so_far=3
--eval INSERT INTO t1 VALUES ($rows_so_far, 'dummy3')
--save_master_pos
--echo # The gtid state on current master must be equal to ...
SHOW VARIABLES LIKE 'gtid_binlog_pos';
SHOW VARIABLES LIKE 'gtid_slave_pos';
--connection server_1
--sync_with_master
@@ -82,23 +82,44 @@ SHOW VARIABLES LIKE 'gtid_slave_pos';
SHOW VARIABLES LIKE 'gtid_binlog_pos';
--connection server_2
#
# CRASH the new master and FAILOVER back to the original
#
--let $case = 2
--echo #
--echo # Case:$case
--echo #
--echo # CRASH the new master, and FAILOVER back to the original
# value 0 for the reverse server id 2 -> 1 failover
--let $failover_to_slave=0
--let $query_to_crash = INSERT INTO t1 VALUES (4, REPEAT("x", 4100))
--let $query2_to_crash= INSERT INTO t1 VALUES (5, REPEAT("x", 4100))
--let $log_search_pattern=truncated binlog file:.*slave.*000001
--echo # $query_to_crash
--echo # $query2_to_crash
--echo # Rows 4 and 5 will be in master's binlog but not committed, they get
--echo # replicated to slave and applied. On crash master should have 3 rows
--echo # and slave should have 5 rows.
--echo #
--echo # Expected State post crash:
--echo #=================================================================
--echo # Master | Slave |
--echo # 0-2-6 (Not commited) | 0-2-6 (Received through semi-sync |
--echo # | replication and applied) |
--echo # 0-2-7 (Not commited) | 0-2-7 (Received through semi-sync |
--echo # | replication and applied) |
--echo #=================================================================
--let $log_search_pattern=truncated binlog file:.*slave.*000002
--let $expected_rows_on_master= 3
--let $expected_rows_on_slave= 5
--source rpl_semi_sync_crash.inc
--echo #
--echo # Server_1 promoted as master will send 0-2-6 and 0-2-7 to slave Server_2
--echo #
--connection server_1
--let $rows_so_far=6
--eval INSERT INTO t1 VALUES ($rows_so_far, 'Done')
--eval INSERT INTO t1 VALUES ($rows_so_far, 'dummy6')
--save_master_pos
--echo # The gtid state on current master must be equal to ...
SHOW VARIABLES LIKE 'gtid_binlog_pos';
SHOW VARIABLES LIKE 'gtid_slave_pos';
--connection server_2
--sync_with_master
@@ -107,35 +128,82 @@ SHOW VARIABLES LIKE 'gtid_binlog_pos';
SHOW VARIABLES LIKE 'gtid_slave_pos';
SHOW VARIABLES LIKE 'gtid_binlog_pos';
--let $diff_tables=server_1:t1, server_2:t1
--source include/diff_tables.inc
#
--connection server_1
--let $case = 3
--echo #
--echo # Case:$case
--echo #
--echo # CRASH the master and FAILOVER to slave
--let $failover_to_slave=1
--let $query_to_crash = INSERT INTO t1 VALUES (7, REPEAT("x", 4100))
--let $query2_to_crash= INSERT INTO t1 VALUES (8, REPEAT("x", 4100))
--echo # $query_to_crash
--echo # $query2_to_crash
--echo # Rows 7 and 8 will be in master's binlog but not committed, only 7
--echo # gets replicated to slave and applied. On crash master should have 6
--echo # rows and slave should have 7 rows.
--echo #
--echo # Expected State post crash:
--echo #=================================================================
--echo # Master | Slave |
--echo # 0-1-9 (Not commited) | 0-1-9 (Received through semi-sync |
--echo # | replication and applied) |
--echo # 0-1-10 (Not commited - | |
--echo # never sent to slave) | |
--echo #=================================================================
--let $log_search_pattern=truncated binlog file:.*master.*000003
--let $expected_rows_on_master= 6
--let $expected_rows_on_slave= 7
--source rpl_semi_sync_crash.inc
--echo #
--echo # Server_2 promoted as master will send 0-1-9 to slave Server_1
--echo #
--connection server_2
--let $rows_so_far=8
--eval INSERT INTO t1 VALUES ($rows_so_far, 'Done')
--source include/save_master_gtid.inc
--echo # The gtid state on current master must be equal to ...
SHOW VARIABLES LIKE 'gtid_binlog_pos';
SHOW VARIABLES LIKE 'gtid_slave_pos';
--connection server_1
--source include/sync_with_master_gtid.inc
--eval SELECT COUNT(*) = $rows_so_far as 'true' FROM t1
--echo # ... the gtid states on the slave:
SHOW VARIABLES LIKE 'gtid_slave_pos';
SHOW VARIABLES LIKE 'gtid_binlog_pos';
--echo #
--echo # Cleanup
#
--echo #
--source include/stop_slave.inc
set global rpl_semi_sync_slave_enabled = 0;
set global rpl_semi_sync_master_enabled = 0;
set global rpl_semi_sync_master_wait_point=default;
RESET MASTER;
RESET SLAVE;
--connection server_2
RESET MASTER;
RESET SLAVE;
set @@global.rpl_semi_sync_master_enabled = 0;
set @@global.rpl_semi_sync_slave_enabled = 0;
set @@global.rpl_semi_sync_master_wait_point=default;
evalp CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1, master_user='root', master_use_gtid=SLAVE_POS;
set @@global.gtid_slave_pos=@@global.gtid_binlog_pos;
--source include/start_slave.inc
--connection server_1
DROP TABLE t1;
--save_master_pos
--connection server_2
--sync_with_master
--source include/stop_slave.inc
--connection server_1
set @@global.rpl_semi_sync_master_enabled = 0;
set @@global.rpl_semi_sync_slave_enabled = 0;
set @@global.rpl_semi_sync_master_wait_point=default;
RESET SLAVE;
RESET MASTER;
--connection server_2
set @@global.rpl_semi_sync_master_enabled = 0;
set @@global.rpl_semi_sync_slave_enabled = 0;
set @@global.rpl_semi_sync_master_wait_point=default;
evalp CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1, master_user='root', master_use_gtid=no;
--source include/start_slave.inc
connection default;
--enable_reconnect

View File

@@ -28,3 +28,76 @@ set statement sql_mode= '' for update t1 set i= 1, v= 2;
Warnings:
Warning 1906 The value specified for generated column 'v' in table 't1' has been ignored
drop table t1;
#
# MDEV-18734 ASAN heap-use-after-free in my_strnxfrm_simple_internal upon update on versioned partitioned table
#
# Cover queue_fix() in ha_partition::handle_ordered_index_scan()
create or replace table t1 (
x int auto_increment primary key,
b text, v mediumtext as (b) virtual,
index (v(10))
) partition by range columns (x) (
partition p1 values less than (3),
partition p2 values less than (6),
partition p3 values less than (9),
partition p4 values less than (12),
partition p5 values less than (15),
partition p6 values less than (17),
partition p7 values less than (19),
partition p8 values less than (21),
partition p9 values less than (23),
partition p10 values less than (25),
partition p11 values less than (27),
partition p12 values less than (29),
partition p13 values less than (31),
partition p14 values less than (33),
partition p15 values less than (35),
partition pn values less than (maxvalue));
insert into t1 (b) values
(repeat('q', 8192)), (repeat('z', 8192)), (repeat('a', 8192)), (repeat('b', 8192)),
(repeat('x', 8192)), (repeat('y', 8192));
insert t1 (b) select b from t1;
insert t1 (b) select b from t1;
insert t1 (b) select b from t1;
insert t1 (b) select b from t1;
select x, left(b, 10), left(v, 10) from t1 where x > 30 and x < 60 order by v;
x left(b, 10) left(v, 10)
33 aaaaaaaaaa aaaaaaaaaa
39 aaaaaaaaaa aaaaaaaaaa
45 aaaaaaaaaa aaaaaaaaaa
51 aaaaaaaaaa aaaaaaaaaa
57 aaaaaaaaaa aaaaaaaaaa
34 bbbbbbbbbb bbbbbbbbbb
40 bbbbbbbbbb bbbbbbbbbb
46 bbbbbbbbbb bbbbbbbbbb
52 bbbbbbbbbb bbbbbbbbbb
58 bbbbbbbbbb bbbbbbbbbb
31 qqqqqqqqqq qqqqqqqqqq
37 qqqqqqqqqq qqqqqqqqqq
43 qqqqqqqqqq qqqqqqqqqq
49 qqqqqqqqqq qqqqqqqqqq
55 qqqqqqqqqq qqqqqqqqqq
35 xxxxxxxxxx xxxxxxxxxx
41 xxxxxxxxxx xxxxxxxxxx
47 xxxxxxxxxx xxxxxxxxxx
53 xxxxxxxxxx xxxxxxxxxx
59 xxxxxxxxxx xxxxxxxxxx
36 yyyyyyyyyy yyyyyyyyyy
42 yyyyyyyyyy yyyyyyyyyy
48 yyyyyyyyyy yyyyyyyyyy
54 yyyyyyyyyy yyyyyyyyyy
32 zzzzzzzzzz zzzzzzzzzz
38 zzzzzzzzzz zzzzzzzzzz
44 zzzzzzzzzz zzzzzzzzzz
50 zzzzzzzzzz zzzzzzzzzz
56 zzzzzzzzzz zzzzzzzzzz
update t1 set b= 'bar' where v > 'a' limit 20;
drop table t1;
# Cover return_top_record() in ha_partition::handle_ordered_index_scan()
create table t1 (x int primary key, b tinytext, v text as (b) virtual)
partition by range columns (x) (
partition p1 values less than (4),
partition pn values less than (maxvalue));
insert into t1 (x, b) values (1, ''), (2, ''), (3, 'a'), (4, 'b');
update t1 set b= 'bar' where x > 0 order by v limit 2;
drop table t1;

View File

@@ -30,3 +30,51 @@ subpartition by hash(v) subpartitions 3 (
insert t1 set i= 0;
set statement sql_mode= '' for update t1 set i= 1, v= 2;
drop table t1;
--echo #
--echo # MDEV-18734 ASAN heap-use-after-free in my_strnxfrm_simple_internal upon update on versioned partitioned table
--echo #
--echo # Cover queue_fix() in ha_partition::handle_ordered_index_scan()
create or replace table t1 (
x int auto_increment primary key,
b text, v mediumtext as (b) virtual,
index (v(10))
) partition by range columns (x) (
partition p1 values less than (3),
partition p2 values less than (6),
partition p3 values less than (9),
partition p4 values less than (12),
partition p5 values less than (15),
partition p6 values less than (17),
partition p7 values less than (19),
partition p8 values less than (21),
partition p9 values less than (23),
partition p10 values less than (25),
partition p11 values less than (27),
partition p12 values less than (29),
partition p13 values less than (31),
partition p14 values less than (33),
partition p15 values less than (35),
partition pn values less than (maxvalue));
insert into t1 (b) values
(repeat('q', 8192)), (repeat('z', 8192)), (repeat('a', 8192)), (repeat('b', 8192)),
(repeat('x', 8192)), (repeat('y', 8192));
insert t1 (b) select b from t1;
insert t1 (b) select b from t1;
insert t1 (b) select b from t1;
insert t1 (b) select b from t1;
select x, left(b, 10), left(v, 10) from t1 where x > 30 and x < 60 order by v;
update t1 set b= 'bar' where v > 'a' limit 20;
drop table t1;
--echo # Cover return_top_record() in ha_partition::handle_ordered_index_scan()
create table t1 (x int primary key, b tinytext, v text as (b) virtual)
partition by range columns (x) (
partition p1 values less than (4),
partition pn values less than (maxvalue));
insert into t1 (x, b) values (1, ''), (2, ''), (3, 'a'), (4, 'b');
update t1 set b= 'bar' where x > 0 order by v limit 2;
drop table t1;

View File

@@ -446,6 +446,19 @@ pk f1 f2 left(f3, 4) check_row_ts(row_start, row_end)
2 8 8 LONG HISTORICAL ROW
drop table t1;
#
# MDEV-21555 Assertion secondary index is out of sync on delete from versioned table
#
create table t1 (a int, b int as (a + 1) virtual, key(a)) engine=innodb with system versioning;
set foreign_key_checks= off;
insert into t1 (a) values (1), (2);
alter table t1 add foreign key (b) references t1 (a), algorithm=copy;
update t1 set a= null where a = 1;
delete from t1 where a is null;
set foreign_key_checks= on;
delete history from t1;
delete from t1;
drop table t1;
#
# MDEV-20729 Fix REFERENCES constraint in column definition
#
create or replace table t1(

View File

@@ -478,6 +478,24 @@ select pk, f1, f2, left(f3, 4), check_row_ts(row_start, row_end) from t1 for sys
# cleanup
drop table t1;
--echo #
--echo # MDEV-21555 Assertion secondary index is out of sync on delete from versioned table
--echo #
create table t1 (a int, b int as (a + 1) virtual, key(a)) engine=innodb with system versioning;
set foreign_key_checks= off;
insert into t1 (a) values (1), (2);
alter table t1 add foreign key (b) references t1 (a), algorithm=copy;
update t1 set a= null where a = 1;
delete from t1 where a is null;
set foreign_key_checks= on;
delete history from t1;
delete from t1;
# cleanup
drop table t1;
--echo #
--echo # MDEV-20729 Fix REFERENCES constraint in column definition
--echo #

View File

@@ -1010,7 +1010,13 @@ check_port()
lsof -Pnl -i ":$port" 2>/dev/null | \
grep -q -E "^($utils)[^[:space:]]*[[:space:]]+$pid[[:space:]].*\\(LISTEN\\)" && rc=0
elif [ $sockstat_available -ne 0 ]; then
sockstat -p "$port" 2>/dev/null | \
local opts='-p'
if [ "$OS" = 'FreeBSD' ]; then
# sockstat on FreeBSD requires the "-s" option
# to display the connection state:
opts='-sp'
fi
sockstat "$opts" "$port" 2>/dev/null | \
grep -q -E "[[:space:]]+($utils)[^[:space:]]*[[:space:]]+$pid[[:space:]].*[[:space:]]LISTEN" && rc=0
elif [ $ss_available -ne 0 ]; then
ss -nlpH "( sport = :$port )" 2>/dev/null | \

View File

@@ -166,7 +166,8 @@ get_keys()
fi
if [ -z "$ekey" -a ! -r "$ekeyfile" ]; then
wsrep_log_error "FATAL: Either key or keyfile must be readable"
wsrep_log_error "FATAL: Either key must be specified " \
"or keyfile must be readable"
exit 3
fi
@@ -448,9 +449,30 @@ encgroups='--mysqld|sst|xtrabackup'
check_server_ssl_config()
{
tcert=$(parse_cnf "$encgroups" 'ssl-ca')
tpem=$(parse_cnf "$encgroups" 'ssl-cert')
tkey=$(parse_cnf "$encgroups" 'ssl-key')
# backward-compatible behavior:
tcert=$(parse_cnf 'sst' 'tca')
tpem=$(parse_cnf 'sst' 'tcert')
tkey=$(parse_cnf 'sst' 'tkey')
# reading new ssl configuration options:
local tcert2=$(parse_cnf "$encgroups" 'ssl-ca')
local tpem2=$(parse_cnf "$encgroups" 'ssl-cert')
local tkey2=$(parse_cnf "$encgroups" 'ssl-key')
# if there are no old options, then we take new ones:
if [ -z "$tcert" -a -z "$tpem" -a -z "$tkey" ]; then
tcert="$tcert2"
tpem="$tpem2"
tkey="$tkey2"
# checking for presence of the new-style SSL configuration:
elif [ -n "$tcert2" -o -n "$tpem2" -o -n "$tkey2" ]; then
if [ "$tcert" != "$tcert2" -o \
"$tpem" != "$tpem2" -o \
"$tkey" != "$tkey2" ]
then
wsrep_log_info "new ssl configuration options (ssl-ca, ssl-cert " \
"and ssl-key) are ignored by SST due to presence " \
"of the tca, tcert and/or tkey in the [sst] section"
fi
fi
}
read_cnf()
@@ -463,18 +485,10 @@ read_cnf()
if [ $encrypt -eq 0 -o $encrypt -ge 2 ]
then
if [ "$tmode" != 'DISABLED' -o $encrypt -ge 2 ]
then
tcert=$(parse_cnf 'sst' 'tca')
tpem=$(parse_cnf 'sst' 'tcert')
tkey=$(parse_cnf 'sst' 'tkey')
fi
if [ "$tmode" != 'DISABLED' ]; then
# backward-incompatible behavior
if [ -z "$tpem" -a -z "$tkey" -a -z "$tcert" ]; then
# no old-style SSL config in [sst]
if [ "$tmode" != 'DISABLED' -o $encrypt -ge 2 ]; then
check_server_ssl_config
fi
if [ "$tmode" != 'DISABLED' ]; then
if [ 0 -eq $encrypt -a -n "$tpem" -a -n "$tkey" ]
then
encrypt=3 # enable cert/key SSL encyption
@@ -489,8 +503,12 @@ read_cnf()
ealgo=$(parse_cnf "$encgroups" 'encrypt-algo')
eformat=$(parse_cnf "$encgroups" 'encrypt-format' 'openssl')
ekey=$(parse_cnf "$encgroups" 'encrypt-key')
# The keyfile should be read only when the key
# is not specified or empty:
if [ -z "$ekey" ]; then
ekeyfile=$(parse_cnf "$encgroups" 'encrypt-key-file')
fi
fi
wsrep_log_info "SSL configuration: CA='$tcert', CERT='$tpem'," \
"KEY='$tkey', MODE='$tmode', encrypt='$encrypt'"

View File

@@ -93,7 +93,15 @@ check_pid_and_port()
else
local filter='([^[:space:]]+[[:space:]]+){4}[^[:space:]]+'
if [ $sockstat_available -eq 1 ]; then
port_info=$(sockstat -p "$port" 2>/dev/null | \
local opts='-p'
if [ "$OS" = 'FreeBSD' ]; then
# sockstat on FreeBSD requires the "-s" option
# to display the connection state:
opts='-sp'
# in addition, sockstat produces an additional column:
filter='([^[:space:]]+[[:space:]]+){5}[^[:space:]]+'
fi
port_info=$(sockstat "$opts" "$port" 2>/dev/null | \
grep -E '[[:space:]]LISTEN' | grep -o -E "$filter")
else
port_info=$(ss -nlpH "( sport = :$port )" 2>/dev/null | \
@@ -388,7 +396,7 @@ EOF
# Use deltaxfer only for WAN
inv=$(basename "$0")
WHOLE_FILE_OPT=""
if [ "${inv%wsrep_sst_rsync_wan*}" != "$inv" ]; then
if [ "${inv%wsrep_sst_rsync_wan*}" = "$inv" ]; then
WHOLE_FILE_OPT="--whole-file"
fi

View File

@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2017, Oracle and/or its affiliates.
Copyright (c) 2008, 2020, MariaDB
Copyright (c) 2008, 2021, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -8637,6 +8637,7 @@ int Field_blob::store(const char *from,size_t length,CHARSET_INFO *cs)
rc= well_formed_copy_with_check((char*) value.ptr(), (uint) new_length,
cs, from, length,
length, true, &copy_len);
value.length(copy_len);
Field_blob::store_length(copy_len);
bmove(ptr+packlength,(uchar*) &tmp,sizeof(char*));

View File

@@ -4561,7 +4561,13 @@ public:
uchar *new_ptr, uint32 length,
uchar *new_null_ptr, uint new_null_bit) override;
void sql_type(String &str) const override;
inline bool copy()
/**
Copy blob buffer into internal storage "value" and update record pointer.
@retval true Memory allocation error
@retval false Success
*/
bool copy()
{
uchar *tmp= get_ptr();
if (value.copy((char*) tmp, get_length(), charset()))
@@ -4573,6 +4579,33 @@ public:
memcpy(ptr+packlength, &tmp, sizeof(char*));
return 0;
}
void swap(String &inout, bool set_read_value)
{
if (set_read_value)
read_value.swap(inout);
else
value.swap(inout);
}
/**
Return pointer to blob cache or NULL if not cached.
*/
String * cached(bool *set_read_value)
{
char *tmp= (char *) get_ptr();
if (!value.is_empty() && tmp == value.ptr())
{
*set_read_value= false;
return &value;
}
if (!read_value.is_empty() && tmp == read_value.ptr())
{
*set_read_value= true;
return &read_value;
}
return NULL;
}
/* store value for the duration of the current read record */
inline void swap_value_and_read_value()
{

View File

@@ -720,6 +720,15 @@ const char* dbug_print_table_row(TABLE *table)
}
const char* dbug_print_row(TABLE *table, uchar *rec)
{
table->move_fields(table->field, rec, table->record[0]);
const char* ret= dbug_print_table_row(table);
table->move_fields(table->field, table->record[0], rec);
return ret;
}
/*
Print a text, SQL-like record representation into dbug trace.

View File

@@ -5433,8 +5433,6 @@ bool ha_partition::init_record_priority_queue()
/*
Initialize the ordered record buffer.
*/
if (!m_ordered_rec_buffer)
{
size_t alloc_len;
uint used_parts= bitmap_bits_set(&m_part_info->read_partitions);
@@ -5442,15 +5440,20 @@ bool ha_partition::init_record_priority_queue()
DBUG_RETURN(false);
/* Allocate record buffer for each used partition. */
m_priority_queue_rec_len= m_rec_length + PARTITION_BYTES_IN_POS;
m_priority_queue_rec_len= m_rec_length + ORDERED_REC_OFFSET;
if (!m_using_extended_keys)
m_priority_queue_rec_len += get_open_file_sample()->ref_length;
m_priority_queue_rec_len+= get_open_file_sample()->ref_length;
alloc_len= used_parts * m_priority_queue_rec_len;
/* Allocate a key for temporary use when setting up the scan. */
alloc_len+= table_share->max_key_length;
Ordered_blob_storage **blob_storage;
Ordered_blob_storage *objs;
const size_t n_all= used_parts * table->s->blob_fields;
if (!(m_ordered_rec_buffer= (uchar*)my_malloc(key_memory_partition_sort_buffer,
alloc_len, MYF(MY_WME))))
if (!my_multi_malloc(key_memory_partition_sort_buffer, MYF(MY_WME),
&m_ordered_rec_buffer, alloc_len,
&blob_storage, n_all * sizeof *blob_storage,
&objs, n_all * sizeof *objs, NULL))
DBUG_RETURN(true);
/*
@@ -5467,7 +5470,14 @@ bool ha_partition::init_record_priority_queue()
i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
DBUG_PRINT("info", ("init rec-buf for part %u", i));
int2store(ptr, i);
if (table->s->blob_fields)
{
for (uint j= 0; j < table->s->blob_fields; ++j, ++objs)
blob_storage[j]= new (objs) Ordered_blob_storage;
*((Ordered_blob_storage ***) ptr)= blob_storage;
blob_storage+= table->s->blob_fields;
}
int2store(ptr + sizeof(String **), i);
ptr+= m_priority_queue_rec_len;
}
m_start_key.key= (const uchar*)ptr;
@@ -5480,13 +5490,13 @@ bool ha_partition::init_record_priority_queue()
else
cmp_func= cmp_key_part_id;
DBUG_PRINT("info", ("partition queue_init(1) used_parts: %u", used_parts));
if (init_queue(&m_queue, used_parts, 0, 0, cmp_func, cmp_arg, 0, 0))
if (init_queue(&m_queue, used_parts, ORDERED_PART_NUM_OFFSET,
0, cmp_func, cmp_arg, 0, 0))
{
my_free(m_ordered_rec_buffer);
m_ordered_rec_buffer= NULL;
DBUG_RETURN(true);
}
}
DBUG_RETURN(false);
}
@@ -5500,6 +5510,20 @@ void ha_partition::destroy_record_priority_queue()
DBUG_ENTER("ha_partition::destroy_record_priority_queue");
if (m_ordered_rec_buffer)
{
if (table->s->blob_fields)
{
char *ptr= (char *) m_ordered_rec_buffer;
for (uint i= bitmap_get_first_set(&m_part_info->read_partitions);
i < m_tot_parts;
i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
Ordered_blob_storage **blob_storage= *((Ordered_blob_storage ***) ptr);
for (uint b= 0; b < table->s->blob_fields; ++b)
blob_storage[b]->blob.free();
ptr+= m_priority_queue_rec_len;
}
}
delete_queue(&m_queue);
my_free(m_ordered_rec_buffer);
m_ordered_rec_buffer= NULL;
@@ -5727,12 +5751,10 @@ static int cmp_part_ids(uchar *ref1, uchar *ref2)
extern "C" int cmp_key_part_id(void *ptr, uchar *ref1, uchar *ref2)
{
ha_partition *file= (ha_partition*)ptr;
int res;
if ((res= key_rec_cmp(file->m_curr_key_info, ref1 + PARTITION_BYTES_IN_POS,
ref2 + PARTITION_BYTES_IN_POS)))
{
if (int res= key_rec_cmp(file->m_curr_key_info,
ref1 + PARTITION_BYTES_IN_POS,
ref2 + PARTITION_BYTES_IN_POS))
return res;
}
return cmp_part_ids(ref1, ref2);
}
@@ -6980,6 +7002,48 @@ int ha_partition::pre_ft_end()
}
void ha_partition::swap_blobs(uchar * rec_buf, Ordered_blob_storage ** storage, bool restore)
{
uint *ptr, *end;
uint blob_n= 0;
table->move_fields(table->field, rec_buf, table->record[0]);
for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields;
ptr != end; ++ptr, ++blob_n)
{
DBUG_ASSERT(*ptr < table->s->fields);
Field_blob *blob= (Field_blob*) table->field[*ptr];
DBUG_ASSERT(blob->flags & BLOB_FLAG);
DBUG_ASSERT(blob->field_index == *ptr);
if (!bitmap_is_set(table->read_set, *ptr) || blob->is_null())
continue;
Ordered_blob_storage &s= *storage[blob_n];
if (restore)
{
/*
We protect only blob cache (value or read_value). If the cache was
empty that doesn't mean the blob was empty. Blobs allocated by a
storage engine should work just fine.
*/
if (!s.blob.is_empty())
blob->swap(s.blob, s.set_read_value);
}
else
{
bool set_read_value;
String *cached= blob->cached(&set_read_value);
if (cached)
{
cached->swap(s.blob);
s.set_read_value= set_read_value;
}
}
}
table->move_fields(table->field, table->record[0], rec_buf);
}
/**
Initialize a full text search using the extended API.
@@ -7687,8 +7751,8 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
{
DBUG_PRINT("info", ("reading from part %u (scan_type: %u)",
i, m_index_scan_type));
DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr));
uchar *rec_buf_ptr= part_rec_buf_ptr + PARTITION_BYTES_IN_POS;
DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr + ORDERED_PART_NUM_OFFSET));
uchar *rec_buf_ptr= part_rec_buf_ptr + ORDERED_REC_OFFSET;
handler *file= m_file[i];
switch (m_index_scan_type) {
@@ -7768,6 +7832,12 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
Initialize queue without order first, simply insert
*/
queue_element(&m_queue, j++)= part_rec_buf_ptr;
if (table->s->blob_fields)
{
Ordered_blob_storage **storage=
*((Ordered_blob_storage ***) part_rec_buf_ptr);
swap_blobs(rec_buf_ptr, storage, false);
}
}
else if (error == HA_ERR_KEY_NOT_FOUND)
{
@@ -7810,7 +7880,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
DBUG_PRINT("info", ("partition !bitmap_is_set(&m_mrr_used_partitions, i)"));
continue;
}
DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr));
DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr + ORDERED_PART_NUM_OFFSET));
if (smallest_range_seq == m_stock_range_seq[i])
{
m_stock_range_seq[i]= 0;
@@ -7857,12 +7927,17 @@ void ha_partition::return_top_record(uchar *buf)
{
uint part_id;
uchar *key_buffer= queue_top(&m_queue);
uchar *rec_buffer= key_buffer + PARTITION_BYTES_IN_POS;
uchar *rec_buffer= key_buffer + ORDERED_REC_OFFSET;
DBUG_ENTER("ha_partition::return_top_record");
DBUG_PRINT("enter", ("partition this: %p", this));
part_id= uint2korr(key_buffer);
part_id= uint2korr(key_buffer + ORDERED_PART_NUM_OFFSET);
memcpy(buf, rec_buffer, m_rec_length);
if (table->s->blob_fields)
{
Ordered_blob_storage **storage= *((Ordered_blob_storage ***) key_buffer);
swap_blobs(buf, storage, true);
}
m_last_part= part_id;
DBUG_PRINT("info", ("partition m_last_part: %u", m_last_part));
m_top_entry= part_id;
@@ -7914,7 +7989,7 @@ int ha_partition::handle_ordered_index_scan_key_not_found()
This partition is used and did return HA_ERR_KEY_NOT_FOUND
in index_read_map.
*/
curr_rec_buf= part_buf + PARTITION_BYTES_IN_POS;
curr_rec_buf= part_buf + ORDERED_REC_OFFSET;
error= m_file[i]->ha_index_next(curr_rec_buf);
/* HA_ERR_KEY_NOT_FOUND is not allowed from index_next! */
DBUG_ASSERT(error != HA_ERR_KEY_NOT_FOUND);
@@ -7965,7 +8040,8 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
DBUG_RETURN(HA_ERR_END_OF_FILE);
uint part_id= m_top_entry;
uchar *rec_buf= queue_top(&m_queue) + PARTITION_BYTES_IN_POS;
uchar *part_rec_buf_ptr= queue_top(&m_queue);
uchar *rec_buf= part_rec_buf_ptr + ORDERED_REC_OFFSET;
handler *file;
if (m_key_not_found)
@@ -8007,7 +8083,16 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
if (m_index_scan_type == partition_read_range)
{
error= file->read_range_next();
if (likely(!error))
{
memcpy(rec_buf, table->record[0], m_rec_length);
if (table->s->blob_fields)
{
Ordered_blob_storage **storage=
*((Ordered_blob_storage ***) part_rec_buf_ptr);
swap_blobs(rec_buf, storage, false);
}
}
}
else if (m_index_scan_type == partition_read_multi_range)
{
@@ -8044,6 +8129,11 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
DBUG_PRINT("info", ("m_mrr_range_current->id: %u",
m_mrr_range_current->id));
memcpy(rec_buf, table->record[0], m_rec_length);
if (table->s->blob_fields)
{
Ordered_blob_storage **storage= *((Ordered_blob_storage ***) part_rec_buf_ptr);
swap_blobs(rec_buf, storage, false);
}
if (((PARTITION_KEY_MULTI_RANGE *) m_range_info[part_id])->id !=
m_mrr_range_current->id)
{
@@ -8094,9 +8184,8 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
DBUG_PRINT("info",("partition !bitmap_is_set(&m_mrr_used_partitions, i)"));
continue;
}
DBUG_PRINT("info",("partition uint2korr: %u",
uint2korr(part_rec_buf_ptr)));
DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr));
DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr +
ORDERED_PART_NUM_OFFSET));
DBUG_PRINT("info", ("partition m_stock_range_seq[%u]: %u",
i, m_stock_range_seq[i]));
if (smallest_range_seq == m_stock_range_seq[i])
@@ -8185,7 +8274,7 @@ int ha_partition::handle_ordered_prev(uchar *buf)
DBUG_RETURN(HA_ERR_END_OF_FILE);
uint part_id= m_top_entry;
uchar *rec_buf= queue_top(&m_queue) + PARTITION_BYTES_IN_POS;
uchar *rec_buf= queue_top(&m_queue) + ORDERED_REC_OFFSET;
handler *file= m_file[part_id];
if (unlikely((error= file->ha_index_prev(rec_buf))))

View File

@@ -21,8 +21,19 @@
#include "sql_partition.h" /* part_id_range, partition_element */
#include "queues.h" /* QUEUE */
#define PARTITION_BYTES_IN_POS 2
struct Ordered_blob_storage
{
String blob;
bool set_read_value;
Ordered_blob_storage() : set_read_value(false)
{}
};
#define PAR_EXT ".par"
#define PARTITION_BYTES_IN_POS 2
#define ORDERED_PART_NUM_OFFSET sizeof(Ordered_blob_storage **)
#define ORDERED_REC_OFFSET (ORDERED_PART_NUM_OFFSET + PARTITION_BYTES_IN_POS)
/** Struct used for partition_name_hash */
typedef struct st_part_name_def
@@ -935,6 +946,7 @@ private:
int handle_ordered_next(uchar * buf, bool next_same);
int handle_ordered_prev(uchar * buf);
void return_top_record(uchar * buf);
void swap_blobs(uchar* rec_buf, Ordered_blob_storage ** storage, bool restore);
public:
/*
-------------------------------------------------------------------------

View File

@@ -560,9 +560,9 @@ public:
{
static LEX_CSTRING json_set= {STRING_WITH_LEN("json_set") };
static LEX_CSTRING json_insert= {STRING_WITH_LEN("json_insert") };
static LEX_CSTRING json_update= {STRING_WITH_LEN("json_update") };
static LEX_CSTRING json_replace= {STRING_WITH_LEN("json_replace") };
return (mode_insert ?
(mode_replace ? json_set : json_insert) : json_update);
(mode_replace ? json_set : json_insert) : json_replace);
}
Item *get_copy(THD *thd) override
{ return get_item_copy<Item_func_json_insert>(thd, this); }

View File

@@ -3396,7 +3396,7 @@ MYSQL_BIN_LOG::MYSQL_BIN_LOG(uint *sync_period)
checksum_alg_reset(BINLOG_CHECKSUM_ALG_UNDEF),
relay_log_checksum_alg(BINLOG_CHECKSUM_ALG_UNDEF),
description_event_for_exec(0), description_event_for_queue(0),
current_binlog_id(0)
current_binlog_id(0), reset_master_count(0)
{
/*
We don't want to initialize locks here as such initialization depends on
@@ -4489,6 +4489,7 @@ err:
}
mysql_cond_broadcast(&COND_xid_list);
reset_master_pending--;
reset_master_count++;
mysql_mutex_unlock(&LOCK_xid_list);
}
@@ -8316,6 +8317,7 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
}
else
{
DEBUG_SYNC(leader->thd, "commit_before_update_binlog_end_pos");
bool any_error= false;
mysql_mutex_assert_not_owner(&LOCK_prepare_ordered);

View File

@@ -676,6 +676,11 @@ public:
my_off_t last_commit_pos_offset;
ulong current_binlog_id;
/*
Tracks the number of times that the master has been reset
*/
Atomic_counter<uint64> reset_master_count;
MYSQL_BIN_LOG(uint *sync_period);
/*
note that there's no destructor ~MYSQL_BIN_LOG() !
@@ -893,6 +898,7 @@ public:
inline mysql_mutex_t* get_log_lock() { return &LOCK_log; }
inline mysql_cond_t* get_bin_log_cond() { return &COND_bin_log_updated; }
inline IO_CACHE* get_log_file() { return &log_file; }
inline uint64 get_reset_master_count() { return reset_master_count; }
inline void lock_index() { mysql_mutex_lock(&LOCK_index);}
inline void unlock_index() { mysql_mutex_unlock(&LOCK_index);}

View File

@@ -155,7 +155,7 @@ void mysql_audit_general(THD *thd, uint event_subtype,
DBUG_ENTER("mysql_audit_general");
if (mysql_audit_general_enabled())
{
char user_buff[MAX_USER_HOST_SIZE];
char user_buff[MAX_USER_HOST_SIZE+1];
mysql_event_general event;
event.event_subclass= event_subtype;

View File

@@ -4786,6 +4786,19 @@ extern "C" const char *thd_priv_host(MYSQL_THD thd, size_t *length)
}
extern "C" const char *thd_priv_user(MYSQL_THD thd, size_t *length)
{
const Security_context *sctx= thd->security_ctx;
if (!sctx)
{
*length= 0;
return NULL;
}
*length= strlen(sctx->priv_user);
return sctx->priv_user;
}
#ifdef INNODB_COMPATIBILITY_HOOKS
/** open a table and add it to thd->open_tables
@@ -5445,8 +5458,8 @@ extern "C" bool thd_is_strict_mode(const MYSQL_THD thd)
*/
void thd_get_query_start_data(THD *thd, char *buf)
{
LEX_CSTRING field_name;
Field_timestampf f((uchar *)buf, NULL, 0, Field::NONE, &field_name, NULL, 6);
Field_timestampf f((uchar *)buf, nullptr, 0, Field::NONE, &empty_clex_str,
nullptr, 6);
f.store_TIME(thd->query_start(), thd->query_start_sec_part());
}

View File

@@ -199,6 +199,7 @@ extern MYSQL_PLUGIN_IMPORT const char **errmesg;
extern "C" LEX_STRING * thd_query_string (MYSQL_THD thd);
extern "C" unsigned long long thd_query_id(const MYSQL_THD thd);
extern "C" size_t thd_query_safe(MYSQL_THD thd, char *buf, size_t buflen);
extern "C" const char *thd_priv_user(MYSQL_THD thd, size_t *length);
extern "C" const char *thd_priv_host(MYSQL_THD thd, size_t *length);
extern "C" const char *thd_user_name(MYSQL_THD thd);
extern "C" const char *thd_client_host(MYSQL_THD thd);

View File

@@ -35,7 +35,10 @@ public:
#ifdef _WIN32
HANDLE pipe;
CONNECT(HANDLE pipe_arg): pipe(pipe_arg), vio_type(VIO_TYPE_NAMEDPIPE),
scheduler(thread_scheduler), thread_id(0), prior_thr_create_utime(0) {}
scheduler(thread_scheduler), thread_id(0), prior_thr_create_utime(0)
{
count++;
}
#endif
enum enum_vio_type vio_type;
scheduler_functions *scheduler;

View File

@@ -3562,7 +3562,7 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num)
prepared statement
*/
Query_arena *arena= thd->stmt_arena;
const uint n_elems= (n_sum_items +
const size_t n_elems= (n_sum_items +
n_child_sum_items +
item_list.elements +
select_n_reserved +
@@ -3570,7 +3570,8 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num)
select_n_where_fields +
order_group_num +
hidden_bit_fields +
fields_in_window_functions) * 5;
fields_in_window_functions) * (size_t) 5;
DBUG_ASSERT(n_elems % 5 == 0);
if (!ref_pointer_array.is_null())
{
/*

View File

@@ -3542,6 +3542,11 @@ static void mysql_stmt_execute_common(THD *thd,
stmt_id == LAST_STMT_ID, read_types))
{
my_error(ER_MALFORMED_PACKET, MYF(0));
/*
Let's set the thd->query_string so the audit plugin
can report the executed query that failed.
*/
thd->set_query_inner(stmt->query_string);
DBUG_VOID_RETURN;
}

View File

@@ -2828,6 +2828,12 @@ static int send_one_binlog_file(binlog_send_info *info,
*/
if (send_events(info, log, linfo, end_pos))
return 1;
DBUG_EXECUTE_IF("Notify_binlog_EOF",
{
const char act[]= "now signal eof_reached";
DBUG_ASSERT(!debug_sync_set_action(current_thd,
STRING_WITH_LEN(act)));
};);
}
return 1;
@@ -4381,6 +4387,7 @@ bool show_binlogs(THD* thd)
Protocol *protocol= thd->protocol;
uint retry_count= 0;
size_t cur_dir_len;
uint64 expected_reset_masters;
DBUG_ENTER("show_binlogs");
if (!mysql_bin_log.is_open())
@@ -4405,6 +4412,7 @@ retry:
mysql_mutex_lock(mysql_bin_log.get_log_lock());
mysql_bin_log.lock_index();
mysql_bin_log.raw_get_current_log(&cur);
expected_reset_masters= mysql_bin_log.get_reset_master_count();
mysql_mutex_unlock(mysql_bin_log.get_log_lock());
/* The following call unlocks lock_index */
@@ -4425,6 +4433,16 @@ retry:
cur_link->name.str+= dir_len;
cur_link->name.length-= dir_len;
if (mysql_bin_log.get_reset_master_count() > expected_reset_masters)
{
/*
Reset master was called after we cached filenames.
Reinitialize the cache.
*/
free_root(&mem_root, MYF(MY_MARK_BLOCKS_FREE));
goto retry;
}
if (!(strncmp(fname+dir_len, cur.log_file_name+cur_dir_len, length)))
cur_link->size= cur.pos; /* The active log, use the active position */
else

View File

@@ -887,7 +887,7 @@ int mariadb_fix_view(THD *thd, TABLE_LIST *view, bool wrong_checksum,
if ((view->md5.str= (char *)thd->alloc(32 + 1)) == NULL)
DBUG_RETURN(HA_ADMIN_FAILED);
}
view->calc_md5(view->md5.str);
view->calc_md5(const_cast<char*>(view->md5.str));
view->md5.length= 32;
}
view->mariadb_version= MYSQL_VERSION_ID;

View File

@@ -5627,12 +5627,12 @@ void TABLE::reset_item_list(List<Item> *item_list, uint skip) const
buffer buffer for md5 writing
*/
void TABLE_LIST::calc_md5(const char *buffer)
void TABLE_LIST::calc_md5(char *buffer)
{
uchar digest[16];
compute_md5_hash(digest, select_stmt.str,
select_stmt.length);
sprintf((char *) buffer,
sprintf(buffer,
"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
digest[0], digest[1], digest[2], digest[3],
digest[4], digest[5], digest[6], digest[7],

View File

@@ -2636,7 +2636,7 @@ struct TABLE_LIST
List<String> *partition_names;
#endif /* WITH_PARTITION_STORAGE_ENGINE */
void calc_md5(const char *buffer);
void calc_md5(char *buffer);
int view_check_option(THD *thd, bool ignore_failure);
bool create_field_translation(THD *thd);
bool setup_underlying(THD *thd);

View File

@@ -802,17 +802,20 @@ void wsrep_init_globals()
{
wsrep_gtid_server.domain_id= wsrep_gtid_domain_id;
wsrep_init_sidno(Wsrep_server_state::instance().connected_gtid().id());
wsrep_init_gtid();
/* Recover last written wsrep gtid */
wsrep_init_gtid();
if (wsrep_new_cluster)
{
wsrep_server_gtid_t gtid= {wsrep_gtid_server.domain_id,
wsrep_gtid_server.server_id, 0};
wsrep_get_binlog_gtid_seqno(gtid);
wsrep_gtid_server.seqno(gtid.seqno);
/* Start with provided domain_id & server_id found in configuration */
wsrep_server_gtid_t new_gtid;
new_gtid.domain_id= wsrep_gtid_domain_id;
new_gtid.server_id= global_system_variables.server_id;
new_gtid.seqno= 0;
/* Try to search for domain_id and server_id combination in binlog if found continue from last seqno */
wsrep_get_binlog_gtid_seqno(new_gtid);
wsrep_gtid_server.gtid(new_gtid);
}
wsrep_init_schema();
if (WSREP_ON)
{
Wsrep_server_state::instance().initialized();

View File

@@ -3119,8 +3119,8 @@ func_exit:
@param[in] block page to remove
@param[in] index index tree
@param[in,out] mtr mini-transaction */
void btr_level_list_remove(const buf_block_t& block, const dict_index_t& index,
mtr_t* mtr)
dberr_t btr_level_list_remove(const buf_block_t& block,
const dict_index_t& index, mtr_t* mtr)
{
ut_ad(mtr->memo_contains_flagged(&block, MTR_MEMO_PAGE_X_FIX));
ut_ad(block.zip_size() == index.table->space->zip_size());
@@ -3152,6 +3152,10 @@ void btr_level_list_remove(const buf_block_t& block, const dict_index_t& index,
buf_block_t* next_block = btr_block_get(
index, next_page_no, RW_X_LATCH, page_is_leaf(page),
mtr);
if (!next_block) {
return DB_ERROR;
}
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(next_block->frame) == page_is_comp(page));
static_assert(FIL_PAGE_PREV % 4 == 0, "alignment");
@@ -3162,6 +3166,8 @@ void btr_level_list_remove(const buf_block_t& block, const dict_index_t& index,
btr_page_set_prev(next_block, prev_page_no, mtr);
}
return DB_SUCCESS;
}
/*************************************************************//**
@@ -3537,7 +3543,9 @@ retry:
btr_search_drop_page_hash_index(block);
/* Remove the page from the level list */
btr_level_list_remove(*block, *index, mtr);
if (DB_SUCCESS != btr_level_list_remove(*block, *index, mtr)) {
goto err_exit;
}
const page_id_t id{block->page.id()};
@@ -3662,7 +3670,9 @@ retry:
#endif /* UNIV_BTR_DEBUG */
/* Remove the page from the level list */
btr_level_list_remove(*block, *index, mtr);
if (DB_SUCCESS != btr_level_list_remove(*block, *index, mtr)) {
goto err_exit;
}
ut_ad(btr_node_ptr_get_child_page_no(
btr_cur_get_rec(&father_cursor), offsets)
@@ -4040,7 +4050,7 @@ btr_discard_page(
}
/* Remove the page from the level list */
btr_level_list_remove(*block, *index, mtr);
ut_a(DB_SUCCESS == btr_level_list_remove(*block, *index, mtr));
#ifdef UNIV_ZIP_DEBUG
{

View File

@@ -331,10 +331,12 @@ btr_cur_latch_leaves(
true, mtr);
latch_leaves.blocks[2] = get_block;
#ifdef UNIV_BTR_DEBUG
if (get_block) {
ut_a(page_is_comp(get_block->frame)
== page_is_comp(block->frame));
ut_a(btr_page_get_prev(get_block->frame)
== block->page.id().page_no());
}
#endif /* UNIV_BTR_DEBUG */
if (spatial) {
cursor->rtr_info->tree_blocks[

View File

@@ -400,7 +400,8 @@ btr_defragment_merge_pages(
const page_id_t from{from_block->page.id()};
lock_update_merge_left(*to_block, orig_pred, from);
btr_search_drop_page_hash_index(from_block);
btr_level_list_remove(*from_block, *index, mtr);
ut_a(DB_SUCCESS == btr_level_list_remove(*from_block, *index,
mtr));
btr_page_get_father(index, from_block, mtr, &parent);
btr_cur_node_ptr_delete(&parent, mtr);
/* btr_blob_dbg_remove(from_page, index,

View File

@@ -2652,6 +2652,10 @@ lookup:
}
}
if (local_err == DB_IO_ERROR) {
return NULL;
}
ib::fatal() << "Unable to read page " << page_id
<< " into the buffer pool after "
<< BUF_PAGE_READ_MAX_RETRIES

View File

@@ -339,7 +339,8 @@ nothing_read:
*err= fio.err;
if (UNIV_UNLIKELY(fio.err != DB_SUCCESS)) {
if (!sync || fio.err == DB_TABLESPACE_DELETED) {
if (!sync || fio.err == DB_TABLESPACE_DELETED
|| fio.err == DB_IO_ERROR) {
buf_pool.corrupted_evict(bpage);
return false;
}

View File

@@ -2684,17 +2684,20 @@ func_exit:
/*============================ FILE I/O ================================*/
/** Report information about an invalid page access. */
ATTRIBUTE_COLD __attribute__((noreturn))
static void
fil_report_invalid_page_access(const char *name,
os_offset_t offset, ulint len, bool is_read)
ATTRIBUTE_COLD
static void fil_invalid_page_access_msg(bool fatal, const char *name,
os_offset_t offset, ulint len,
bool is_read)
{
ib::fatal() << "Trying to " << (is_read ? "read " : "write ") << len
<< " bytes at " << offset
<< " outside the bounds of the file: " << name;
sql_print_error("%s%s %zu bytes at " UINT64PF
" outside the bounds of the file: %s",
fatal ? "[FATAL] InnoDB: " : "InnoDB: ",
is_read ? "Trying to read" : "Trying to write",
len, offset, name);
if (fatal)
abort();
}
/** Update the data structures on write completion */
inline void fil_node_t::complete_write()
{
@@ -2747,6 +2750,7 @@ fil_io_t fil_space_t::io(const IORequest &type, os_offset_t offset, size_t len,
}
ulint p = static_cast<ulint>(offset >> srv_page_size_shift);
bool fatal;
if (UNIV_LIKELY_NULL(UT_LIST_GET_NEXT(chain, node))) {
ut_ad(this == fil_system.sys_space
@@ -2757,30 +2761,34 @@ fil_io_t fil_space_t::io(const IORequest &type, os_offset_t offset, size_t len,
p -= node->size;
node = UT_LIST_GET_NEXT(chain, node);
if (!node) {
if (type.type == IORequest::READ_ASYNC) {
release();
return {DB_ERROR, nullptr};
}
fil_report_invalid_page_access(node->name,
if (type.type != IORequest::READ_ASYNC) {
fatal = true;
fail:
fil_invalid_page_access_msg(
fatal, node->name,
offset, len,
type.is_read());
}
return {DB_IO_ERROR, nullptr};
}
}
offset = os_offset_t{p} << srv_page_size_shift;
}
if (UNIV_UNLIKELY(node->size <= p)) {
if (type.type == IORequest::READ_ASYNC) {
release();
if (type.type == IORequest::READ_ASYNC) {
/* If we can tolerate the non-existent pages, we
should return with DB_ERROR and let caller decide
what to do. */
return {DB_ERROR, nullptr};
}
fil_report_invalid_page_access(
node->name, offset, len, type.is_read());
fatal = node->space->purpose != FIL_TYPE_IMPORT;
goto fail;
}
dberr_t err;

View File

@@ -595,6 +595,7 @@ static size_t fil_page_decompress_for_non_full_crc32(byte *tmp_buf, byte *buf)
/** Decompress a page that may be subject to page_compressed compression.
@param[in,out] tmp_buf temporary buffer (of innodb_page_size)
@param[in,out] buf possibly compressed page buffer
@param[in] flags tablespace flags
@return size of the compressed data
@retval 0 if decompression failed
@retval srv_page_size if the page was not compressed */

View File

@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2020, MariaDB Corporation.
Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -97,7 +97,7 @@ fts_config_get_value(
fts_table->suffix = "CONFIG";
fts_get_table_name(fts_table, table_name);
pars_info_bind_id(info, true, "table_name", table_name);
pars_info_bind_id(info, "table_name", table_name);
graph = fts_parse_sql(
fts_table,
@@ -213,7 +213,7 @@ fts_config_set_value(
fts_table->suffix = "CONFIG";
fts_get_table_name(fts_table, table_name, dict_locked);
pars_info_bind_id(info, true, "table_name", table_name);
pars_info_bind_id(info, "table_name", table_name);
graph = fts_parse_sql(
fts_table, info,
@@ -241,7 +241,7 @@ fts_config_set_value(
info, "value", value->f_str, value->f_len);
fts_get_table_name(fts_table, table_name, dict_locked);
pars_info_bind_id(info, true, "table_name", table_name);
pars_info_bind_id(info, "table_name", table_name);
graph = fts_parse_sql(
fts_table, info,

View File

@@ -484,7 +484,7 @@ cleanup:
pars_info_t* info = pars_info_create();
pars_info_bind_id(info, TRUE, "table_stopword", stopword_table_name);
pars_info_bind_id(info, "table_stopword", stopword_table_name);
pars_info_bind_function(info, "my_func", fts_read_stopword,
stopword_info);
@@ -1903,7 +1903,7 @@ fts_create_common_tables(
fts_table.suffix = "CONFIG";
fts_get_table_name(&fts_table, fts_name, true);
pars_info_bind_id(info, true, "config_table", fts_name);
pars_info_bind_id(info, "config_table", fts_name);
graph = fts_parse_sql_no_dict_lock(
info, fts_config_table_insert_values_sql);
@@ -2583,7 +2583,7 @@ retry:
info, "my_func", fts_fetch_store_doc_id, doc_id);
fts_get_table_name(&fts_table, table_name);
pars_info_bind_id(info, true, "config_table", table_name);
pars_info_bind_id(info, "config_table", table_name);
graph = fts_parse_sql(
&fts_table, info,
@@ -2711,7 +2711,7 @@ fts_update_sync_doc_id(
fts_get_table_name(&fts_table, fts_name,
table->fts->dict_locked);
pars_info_bind_id(info, true, "table_name", fts_name);
pars_info_bind_id(info, "table_name", fts_name);
graph = fts_parse_sql(
&fts_table, info,
@@ -2854,7 +2854,7 @@ fts_delete(
fts_table.suffix = "DELETED";
fts_get_table_name(&fts_table, table_name);
pars_info_bind_id(info, true, "deleted", table_name);
pars_info_bind_id(info, "deleted", table_name);
graph = fts_parse_sql(
&fts_table,
@@ -3665,7 +3665,7 @@ fts_doc_fetch_by_doc_id(
pars_info_bind_function(info, "my_func", callback, arg);
select_str = fts_get_select_columns_str(index, info, info->heap);
pars_info_bind_id(info, TRUE, "table_name", index->table->name.m_name);
pars_info_bind_id(info, "table_name", index->table->name.m_name);
if (!get_doc || !get_doc->get_document_graph) {
if (option == FTS_FETCH_DOC_BY_ID_EQUAL) {
@@ -3772,7 +3772,7 @@ fts_write_node(
info = pars_info_create();
fts_get_table_name(fts_table, table_name);
pars_info_bind_id(info, true, "index_table_name", table_name);
pars_info_bind_id(info, "index_table_name", table_name);
}
pars_info_bind_varchar_literal(info, "token", word->f_str, word->f_len);
@@ -3847,7 +3847,7 @@ fts_sync_add_deleted_cache(
&fts_table, "DELETED_CACHE", FTS_COMMON_TABLE, sync->table);
fts_get_table_name(&fts_table, table_name);
pars_info_bind_id(info, true, "table_name", table_name);
pars_info_bind_id(info, "table_name", table_name);
graph = fts_parse_sql(
&fts_table,
@@ -4844,7 +4844,7 @@ fts_get_rows_count(
pars_info_bind_function(info, "my_func", fts_read_ulint, &count);
fts_get_table_name(fts_table, table_name);
pars_info_bind_id(info, true, "table_name", table_name);
pars_info_bind_id(info, "table_name", table_name);
graph = fts_parse_sql(
fts_table,

View File

@@ -499,7 +499,7 @@ fts_index_fetch_nodes(
fts_get_table_name(fts_table, table_name);
pars_info_bind_id(info, true, "table_name", table_name);
pars_info_bind_id(info, "table_name", table_name);
}
pars_info_bind_function(info, "my_func", fetch->read_record, fetch);
@@ -828,7 +828,7 @@ fts_index_fetch_words(
info, "word", word->f_str, word->f_len);
fts_get_table_name(&optim->fts_index_table, table_name);
pars_info_bind_id(info, true, "table_name", table_name);
pars_info_bind_id(info, "table_name", table_name);
graph = fts_parse_sql(
&optim->fts_index_table,
@@ -984,7 +984,7 @@ fts_table_fetch_doc_ids(
pars_info_bind_function(info, "my_func", fts_fetch_doc_ids, doc_ids);
fts_get_table_name(fts_table, table_name);
pars_info_bind_id(info, true, "table_name", table_name);
pars_info_bind_id(info, "table_name", table_name);
graph = fts_parse_sql(
fts_table,
@@ -1445,7 +1445,7 @@ fts_optimize_write_word(
fts_table->suffix = fts_get_suffix(selected);
fts_get_table_name(fts_table, table_name);
pars_info_bind_id(info, true, "table_name", table_name);
pars_info_bind_id(info, "table_name", table_name);
graph = fts_parse_sql(
fts_table,
@@ -2037,11 +2037,11 @@ fts_optimize_purge_deleted_doc_ids(
used in the fts_delete_doc_ids_sql */
optim->fts_common_table.suffix = fts_common_tables[3];
fts_get_table_name(&optim->fts_common_table, deleted);
pars_info_bind_id(info, true, fts_common_tables[3], deleted);
pars_info_bind_id(info, fts_common_tables[3], deleted);
optim->fts_common_table.suffix = fts_common_tables[4];
fts_get_table_name(&optim->fts_common_table, deleted_cache);
pars_info_bind_id(info, true, fts_common_tables[4], deleted_cache);
pars_info_bind_id(info, fts_common_tables[4], deleted_cache);
graph = fts_parse_sql(NULL, info, fts_delete_doc_ids_sql);
@@ -2094,12 +2094,11 @@ fts_optimize_purge_deleted_doc_id_snapshot(
used in the fts_end_delete_sql */
optim->fts_common_table.suffix = fts_common_tables[0];
fts_get_table_name(&optim->fts_common_table, being_deleted);
pars_info_bind_id(info, true, fts_common_tables[0], being_deleted);
pars_info_bind_id(info, fts_common_tables[0], being_deleted);
optim->fts_common_table.suffix = fts_common_tables[1];
fts_get_table_name(&optim->fts_common_table, being_deleted_cache);
pars_info_bind_id(info, true, fts_common_tables[1],
being_deleted_cache);
pars_info_bind_id(info, fts_common_tables[1], being_deleted_cache);
/* Delete the doc ids that were copied to delete pending state at
the start of optimize. */
@@ -2155,20 +2154,19 @@ fts_optimize_create_deleted_doc_id_snapshot(
used in the fts_init_delete_sql */
optim->fts_common_table.suffix = fts_common_tables[0];
fts_get_table_name(&optim->fts_common_table, being_deleted);
pars_info_bind_id(info, true, fts_common_tables[0], being_deleted);
pars_info_bind_id(info, fts_common_tables[0], being_deleted);
optim->fts_common_table.suffix = fts_common_tables[3];
fts_get_table_name(&optim->fts_common_table, deleted);
pars_info_bind_id(info, true, fts_common_tables[3], deleted);
pars_info_bind_id(info, fts_common_tables[3], deleted);
optim->fts_common_table.suffix = fts_common_tables[1];
fts_get_table_name(&optim->fts_common_table, being_deleted_cache);
pars_info_bind_id(info, true, fts_common_tables[1],
being_deleted_cache);
pars_info_bind_id(info, fts_common_tables[1], being_deleted_cache);
optim->fts_common_table.suffix = fts_common_tables[4];
fts_get_table_name(&optim->fts_common_table, deleted_cache);
pars_info_bind_id(info, true, fts_common_tables[4], deleted_cache);
pars_info_bind_id(info, fts_common_tables[4], deleted_cache);
/* Move doc_ids that are to be deleted to state being deleted. */
graph = fts_parse_sql(NULL, info, fts_init_delete_sql);
@@ -3005,6 +3003,8 @@ fts_optimize_shutdown()
@param[in] table table to be synced */
void fts_sync_during_ddl(dict_table_t* table)
{
if (!fts_optimize_wq)
return;
mysql_mutex_lock(&fts_optimize_wq->mutex);
const auto sync_message= table->fts->sync_message;
mysql_mutex_unlock(&fts_optimize_wq->mutex);

View File

@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2007, 2020, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2020, MariaDB Corporation.
Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2147,7 +2147,7 @@ fts_query_find_term(
query->fts_index_table.suffix = fts_get_suffix(selected);
fts_get_table_name(&query->fts_index_table, table_name);
pars_info_bind_id(info, true, "index_table_name", table_name);
pars_info_bind_id(info, "index_table_name", table_name);
}
select.found = FALSE;
@@ -2287,7 +2287,7 @@ fts_query_total_docs_containing_term(
fts_get_table_name(&query->fts_index_table, table_name);
pars_info_bind_id(info, true, "index_table_name", table_name);
pars_info_bind_id(info, "index_table_name", table_name);
graph = fts_parse_sql(
&query->fts_index_table,
@@ -2370,7 +2370,7 @@ fts_query_terms_in_document(
fts_get_table_name(&query->fts_index_table, table_name);
pars_info_bind_id(info, true, "index_table_name", table_name);
pars_info_bind_id(info, "index_table_name", table_name);
graph = fts_parse_sql(
&query->fts_index_table,

View File

@@ -245,7 +245,7 @@ fts_get_select_columns_str(
sel_str = mem_heap_printf(heap, "sel%lu", (ulong) i);
/* Set copy_name to TRUE since it's dynamic. */
pars_info_bind_id(info, TRUE, sel_str, field->name);
pars_info_bind_id(info, sel_str, field->name);
str = mem_heap_printf(
heap, "%s%s$%s", str, (*str) ? ", " : "", sel_str);

View File

@@ -12034,7 +12034,7 @@ foreign_push_index_error(trx_t* trx, const char* operation,
trx, DB_CANNOT_ADD_CONSTRAINT, create_name,
"%s table %s with foreign key %s constraint"
" failed. Field type or character set for column '%s' "
"does not mach referenced column '%s'.",
"does not match referenced column '%s'.",
operation, create_name, fk_text, columns[err_col],
col_name);
return;

View File

@@ -2921,7 +2921,7 @@ i_s_fts_index_table_fill_selected(
FTS_INIT_INDEX_TABLE(&fts_table, fts_get_suffix(selected),
FTS_INDEX_TABLE, index);
fts_get_table_name(&fts_table, table_name);
pars_info_bind_id(info, true, "table_name", table_name);
pars_info_bind_id(info, "table_name", table_name);
graph = fts_parse_sql(
&fts_table, info,

View File

@@ -689,8 +689,9 @@ btr_validate_index(
@param[in] block page to remove
@param[in] index index tree
@param[in,out] mtr mini-transaction */
void btr_level_list_remove(const buf_block_t& block, const dict_index_t& index,
mtr_t* mtr);
dberr_t btr_level_list_remove(const buf_block_t& block,
const dict_index_t& index, mtr_t* mtr)
MY_ATTRIBUTE((warn_unused_result));
/*************************************************************//**
If page is the only on its level, this function moves its records to the

View File

@@ -48,7 +48,7 @@ ulint fil_page_compress(
/** Decompress a page that may be subject to page_compressed compression.
@param[in,out] tmp_buf temporary buffer (of innodb_page_size)
@param[in,out] buf compressed page buffer
@param[in] flags talespace flags
@param[in] flags tablespace flags
@return size of the compressed data
@retval 0 if decompression failed
@retval srv_page_size if the page was not compressed */

View File

@@ -480,7 +480,6 @@ void
pars_info_bind_id(
/*=============*/
pars_info_t* info, /*!< in: info struct */
ibool copy_name,/* in: make a copy of name if TRUE */
const char* name, /*!< in: name */
const char* id); /*!< in: id */
/****************************************************************//**
@@ -526,15 +525,6 @@ pars_info_bind_ull_literal(
const ib_uint64_t* val) /*!< in: value */
MY_ATTRIBUTE((nonnull));
/****************************************************************//**
Add bound id. */
void
pars_info_add_id(
/*=============*/
pars_info_t* info, /*!< in: info struct */
const char* name, /*!< in: name */
const char* id); /*!< in: id */
/****************************************************************//**
Get bound literal with the given name.
@return bound literal, or NULL if not found */

View File

@@ -5205,16 +5205,18 @@ lock_sec_rec_read_check_and_lock(
if the max trx id for the page >= min trx id for the trx list or a
database recovery is running. */
if (!page_rec_is_supremum(rec)
trx_t *trx = thr_get_trx(thr);
if (!lock_table_has(trx, index->table, LOCK_X)
&& !page_rec_is_supremum(rec)
&& page_get_max_trx_id(block->frame) >= trx_sys.get_min_trx_id()
&& lock_rec_convert_impl_to_expl(thr_get_trx(thr), id, rec,
index, offsets)) {
index, offsets)
&& gap_mode == LOCK_REC_NOT_GAP) {
/* We already hold an implicit exclusive lock. */
return DB_SUCCESS;
}
#ifdef WITH_WSREP
trx_t *trx= thr_get_trx(thr);
/* If transaction scanning an unique secondary key is wsrep
high priority thread (brute force) this scanning may involve
GAP-locking in the index. As this locking happens also when
@@ -5267,9 +5269,6 @@ lock_clust_rec_read_check_and_lock(
LOCK_REC_NOT_GAP */
que_thr_t* thr) /*!< in: query thread */
{
dberr_t err;
ulint heap_no;
ut_ad(dict_index_is_clust(index));
ut_ad(block->frame == page_align(rec));
ut_ad(page_rec_is_user_rec(rec) || page_rec_is_supremum(rec));
@@ -5288,16 +5287,18 @@ lock_clust_rec_read_check_and_lock(
const page_id_t id{block->page.id()};
heap_no = page_rec_get_heap_no(rec);
ulint heap_no = page_rec_get_heap_no(rec);
if (heap_no != PAGE_HEAP_NO_SUPREMUM
&& lock_rec_convert_impl_to_expl(thr_get_trx(thr), id, rec,
index, offsets)) {
trx_t *trx = thr_get_trx(thr);
if (!lock_table_has(trx, index->table, LOCK_X)
&& heap_no != PAGE_HEAP_NO_SUPREMUM
&& lock_rec_convert_impl_to_expl(trx, id, rec, index, offsets)
&& gap_mode == LOCK_REC_NOT_GAP) {
/* We already hold an implicit exclusive lock. */
return DB_SUCCESS;
}
err = lock_rec_lock(false, gap_mode | mode,
dberr_t err = lock_rec_lock(false, gap_mode | mode,
block, heap_no, index, thr);
ut_ad(lock_rec_queue_validate(false, id, rec, index, offsets));

View File

@@ -2341,7 +2341,6 @@ void
pars_info_bind_id(
/*==============*/
pars_info_t* info, /*!< in: info struct */
ibool copy_name, /* in: copy name if TRUE */
const char* name, /*!< in: name */
const char* id) /*!< in: id */
{
@@ -2364,8 +2363,7 @@ pars_info_bind_id(
bid = static_cast<pars_bound_id_t*>(
ib_vector_push(info->bound_ids, NULL));
bid->name = (copy_name)
? mem_heap_strdup(info->heap, name) : name;
bid->name = name;
}
bid->id = id;

View File

@@ -225,6 +225,19 @@ struct row_import {
found and was readable */
};
struct fil_iterator_t {
pfs_os_file_t file; /*!< File handle */
const char* filepath; /*!< File path name */
os_offset_t start; /*!< From where to start */
os_offset_t end; /*!< Where to stop */
os_offset_t file_size; /*!< File size in bytes */
ulint n_io_buffers; /*!< Number of pages to use
for IO */
byte* io_buffer; /*!< Buffer to use for IO */
fil_space_crypt_t *crypt_data; /*!< Crypt data (if encrypted) */
byte* crypt_io_buffer; /*!< IO buffer when encrypted */
};
/** Use the page cursor to iterate over records in a block. */
class RecIterator {
public:
@@ -465,6 +478,10 @@ public:
? block->page.zip.data : block->frame;
}
/** Invoke the functionality for the callback */
virtual dberr_t run(const fil_iterator_t& iter,
buf_block_t* block) UNIV_NOTHROW = 0;
protected:
/** Get the physical offset of the extent descriptor within the page.
@param page_no page number of the extent descriptor
@@ -620,6 +637,24 @@ AbstractCallback::init(
return set_current_xdes(0, page);
}
/**
TODO: This can be made parallel trivially by chunking up the file
and creating a callback per thread.. Main benefit will be to use
multiple CPUs for checksums and compressed tables. We have to do
compressed tables block by block right now. Secondly we need to
decompress/compress and copy too much of data. These are
CPU intensive.
Iterate over all the pages in the tablespace.
@param iter - Tablespace iterator
@param block - block to use for IO
@param callback - Callback to inspect and update page contents
@retval DB_SUCCESS or error code */
static dberr_t fil_iterate(
const fil_iterator_t& iter,
buf_block_t* block,
AbstractCallback& callback);
/**
Try and determine the index root pages by checking if the next/prev
pointers are both FIL_NULL. We need to ensure that skip deleted pages. */
@@ -637,19 +672,24 @@ struct FetchIndexRootPages : public AbstractCallback {
uint32_t m_page_no; /*!< Root page number */
};
typedef std::vector<Index, ut_allocator<Index> > Indexes;
/** Constructor
@param trx covering (user) transaction
@param table table definition in server .*/
FetchIndexRootPages(const dict_table_t* table, trx_t* trx)
:
AbstractCallback(trx, UINT32_MAX),
m_table(table) UNIV_NOTHROW { }
m_table(table), m_index(0, 0) UNIV_NOTHROW { }
/** Destructor */
~FetchIndexRootPages() UNIV_NOTHROW override { }
/** Fetch the clustered index root page in the tablespace
@param iter Tablespace iterator
@param block Block to use for IO
@retval DB_SUCCESS or error code */
dberr_t run(const fil_iterator_t& iter,
buf_block_t* block) UNIV_NOTHROW override;
/** Called for each block as it is read from the file.
@param block block to convert, it is not from the buffer pool.
@retval DB_SUCCESS or error code. */
@@ -663,7 +703,7 @@ struct FetchIndexRootPages : public AbstractCallback {
const dict_table_t* m_table;
/** Index information */
Indexes m_indexes;
Index m_index;
};
/** Called for each block as it is read from the file. Check index pages to
@@ -678,23 +718,11 @@ dberr_t FetchIndexRootPages::operator()(buf_block_t* block) UNIV_NOTHROW
const page_t* page = get_frame(block);
ulint page_type = fil_page_get_type(page);
m_index.m_id = btr_page_get_index_id(page);
m_index.m_page_no = block->page.id().page_no();
if (page_type == FIL_PAGE_TYPE_XDES) {
return set_current_xdes(block->page.id().page_no(), page);
} else if (fil_page_index_page_check(page)
&& !is_free(block->page.id().page_no())
&& !page_has_siblings(page)) {
index_id_t id = btr_page_get_index_id(page);
m_indexes.push_back(Index(id, block->page.id().page_no()));
if (m_indexes.size() == 1) {
/* Check that the tablespace flags match the
table flags. */
const uint32_t expected
= dict_tf_to_fsp_flags(m_table->flags);
/* Check that the tablespace flags match the table flags. */
const uint32_t expected = dict_tf_to_fsp_flags(m_table->flags);
if (!fsp_flags_match(expected, m_space_flags)) {
ib_errf(m_trx->mysql_thd, IB_LOG_LEVEL_ERROR,
ER_TABLE_SCHEMA_MISMATCH,
@@ -704,7 +732,6 @@ dberr_t FetchIndexRootPages::operator()(buf_block_t* block) UNIV_NOTHROW
unsigned(m_space_flags));
return(DB_CORRUPTION);
}
}
if (!page_is_comp(block->frame) !=
!dict_table_is_comp(m_table)) {
@@ -713,7 +740,6 @@ dberr_t FetchIndexRootPages::operator()(buf_block_t* block) UNIV_NOTHROW
"ROW_FORMAT mismatch");
return DB_CORRUPTION;
}
}
return DB_SUCCESS;
}
@@ -724,11 +750,9 @@ Update the import configuration that will be used to import the tablespace.
dberr_t
FetchIndexRootPages::build_row_import(row_import* cfg) const UNIV_NOTHROW
{
Indexes::const_iterator end = m_indexes.end();
ut_a(cfg->m_table == m_table);
cfg->m_zip_size = m_zip_size;
cfg->m_n_indexes = m_indexes.size();
cfg->m_n_indexes = 1;
if (cfg->m_n_indexes == 0) {
@@ -754,13 +778,9 @@ FetchIndexRootPages::build_row_import(row_import* cfg) const UNIV_NOTHROW
row_index_t* cfg_index = cfg->m_indexes;
for (Indexes::const_iterator it = m_indexes.begin();
it != end;
++it, ++cfg_index) {
char name[BUFSIZ];
snprintf(name, sizeof(name), "index" IB_ID_FMT, it->m_id);
snprintf(name, sizeof(name), "index" IB_ID_FMT, m_index.m_id);
ulint len = strlen(name) + 1;
@@ -779,12 +799,11 @@ FetchIndexRootPages::build_row_import(row_import* cfg) const UNIV_NOTHROW
memcpy(cfg_index->m_name, name, len);
cfg_index->m_id = it->m_id;
cfg_index->m_id = m_index.m_id;
cfg_index->m_space = m_space;
cfg_index->m_page_no = it->m_page_no;
}
cfg_index->m_page_no = m_index.m_page_no;
return(DB_SUCCESS);
}
@@ -840,6 +859,12 @@ public:
}
}
dberr_t run(const fil_iterator_t& iter,
buf_block_t* block) UNIV_NOTHROW override
{
return fil_iterate(iter, block, *this);
}
/** Called for each block as it is read from the file.
@param block block to convert, it is not from the buffer pool.
@retval DB_SUCCESS or error code. */
@@ -1896,8 +1921,10 @@ PageConverter::update_index_page(
row_index_t* index = find_index(id);
if (UNIV_UNLIKELY(!index)) {
if (!m_cfg->m_missing) {
ib::warn() << "Unknown index id " << id
<< " on page " << page_id.page_no();
}
return DB_SUCCESS;
}
@@ -3320,20 +3347,6 @@ dberr_t row_import_update_discarded_flag(trx_t* trx, table_id_t table_id,
return(err);
}
struct fil_iterator_t {
pfs_os_file_t file; /*!< File handle */
const char* filepath; /*!< File path name */
os_offset_t start; /*!< From where to start */
os_offset_t end; /*!< Where to stop */
os_offset_t file_size; /*!< File size in bytes */
ulint n_io_buffers; /*!< Number of pages to use
for IO */
byte* io_buffer; /*!< Buffer to use for IO */
fil_space_crypt_t *crypt_data; /*!< Crypt data (if encrypted) */
byte* crypt_io_buffer; /*!< IO buffer when encrypted */
};
/** InnoDB writes page by page when there is page compressed
tablespace involved. It does help to save the disk space when
punch hole is enabled
@@ -3391,22 +3404,102 @@ dberr_t fil_import_compress_fwrite(const fil_iterator_t &iter,
return DB_SUCCESS;
}
/********************************************************************//**
TODO: This can be made parallel trivially by chunking up the file and creating
a callback per thread. . Main benefit will be to use multiple CPUs for
checksums and compressed tables. We have to do compressed tables block by
block right now. Secondly we need to decompress/compress and copy too much
of data. These are CPU intensive.
dberr_t FetchIndexRootPages::run(const fil_iterator_t& iter,
buf_block_t* block) UNIV_NOTHROW
{
const unsigned zip_size= fil_space_t::zip_size(m_space_flags);
const unsigned size= zip_size ? zip_size : unsigned(srv_page_size);
const ulint buf_size=
#ifdef HAVE_LZO
LZO1X_1_15_MEM_COMPRESS+
#elif defined HAVE_SNAPPY
snappy_max_compressed_length(srv_page_size) +
#endif
srv_page_size;
byte* page_compress_buf = static_cast<byte*>(malloc(buf_size));
const bool full_crc32 = fil_space_t::full_crc32(m_space_flags);
bool skip_checksum_check = false;
ut_ad(!srv_read_only_mode);
Iterate over all the pages in the tablespace.
@param iter - Tablespace iterator
@param block - block to use for IO
@param callback - Callback to inspect and update page contents
@retval DB_SUCCESS or error code */
static
dberr_t
fil_iterate(
/*========*/
if (!page_compress_buf)
return DB_OUT_OF_MEMORY;
const bool encrypted= iter.crypt_data != NULL &&
iter.crypt_data->should_encrypt();
byte* const readptr= iter.io_buffer;
block->frame= readptr;
if (block->page.zip.data)
block->page.zip.data= readptr;
bool page_compressed= false;
dberr_t err= os_file_read_no_error_handling(
IORequestReadPartial, iter.file, readptr, 3 * size, size, 0);
if (err != DB_SUCCESS)
{
ib::error() << iter.filepath << ": os_file_read() failed";
goto func_exit;
}
if (page_get_page_no(readptr) != 3)
{
page_corrupted:
ib::warn() << filename() << ": Page 3 at offset "
<< 3 * size << " looks corrupted.";
err= DB_CORRUPTION;
goto func_exit;
}
block->page.id_.set_page_no(3);
if (full_crc32 && fil_space_t::is_compressed(m_space_flags))
page_compressed= buf_page_is_compressed(readptr, m_space_flags);
else
{
switch (fil_page_get_type(readptr)) {
case FIL_PAGE_PAGE_COMPRESSED:
case FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED:
if (block->page.zip.data)
goto page_corrupted;
page_compressed= true;
}
}
if (encrypted)
{
if (!buf_page_verify_crypt_checksum(readptr, m_space_flags))
goto page_corrupted;
if (!fil_space_decrypt(get_space_id(), m_space_flags, iter.crypt_data,
readptr, size, readptr, &err) ||
err != DB_SUCCESS)
goto func_exit;
}
/* For full_crc32 format, skip checksum check
after decryption. */
skip_checksum_check= full_crc32 && encrypted;
if (page_compressed)
{
ulint compress_length= fil_page_decompress(page_compress_buf,
readptr,
m_space_flags);
ut_ad(compress_length != srv_page_size);
if (compress_length == 0)
goto page_corrupted;
}
else if (!skip_checksum_check
&& buf_page_is_corrupted(false, readptr, m_space_flags))
goto page_corrupted;
err= this->operator()(block);
func_exit:
free(page_compress_buf);
return err;
}
static dberr_t fil_iterate(
const fil_iterator_t& iter,
buf_block_t* block,
AbstractCallback& callback)
@@ -3868,7 +3961,7 @@ fil_tablespace_iterate(
block->page.zip.data = block->frame + srv_page_size;
}
err = fil_iterate(iter, block, callback);
err = callback.run(iter, block);
if (iter.crypt_data) {
fil_space_destroy_crypt_data(&iter.crypt_data);
@@ -3997,6 +4090,16 @@ row_import_for_mysql(
cfg.m_zip_size = 0;
if (UT_LIST_GET_LEN(table->indexes) > 1) {
ib_errf(trx->mysql_thd, IB_LOG_LEVEL_ERROR,
ER_INTERNAL_ERROR,
"Drop all secondary indexes before importing "
"table %s when .cfg file is missing.",
table->name.m_name);
err = DB_ERROR;
return row_import_error(prebuilt, trx, err);
}
FetchIndexRootPages fetchIndexRootPages(table, trx);
err = fil_tablespace_iterate(

View File

@@ -1657,23 +1657,6 @@ row_ins_check_foreign_constraint(
cmp = cmp_dtuple_rec(entry, rec, offsets);
if (cmp == 0) {
if (check_table->versioned()) {
bool history_row = false;
if (check_index->is_primary()) {
history_row = check_index->
vers_history_row(rec, offsets);
} else if (check_index->
vers_history_row(rec, history_row))
{
break;
}
if (history_row) {
continue;
}
}
if (rec_get_deleted_flag(rec,
rec_offs_comp(offsets))) {
/* In delete-marked records, DB_TRX_ID must
@@ -1695,6 +1678,23 @@ row_ins_check_foreign_constraint(
goto end_scan;
}
} else {
if (check_table->versioned()) {
bool history_row = false;
if (check_index->is_primary()) {
history_row = check_index->
vers_history_row(rec,
offsets);
} else if (check_index->
vers_history_row(rec,
history_row)) {
break;
}
if (history_row) {
continue;
}
}
/* Found a matching record. Lock only
a record because we can allow inserts
into gaps */

View File

@@ -72,25 +72,6 @@ Created 9/17/2000 Heikki Tuuri
#include "wsrep_mysqld.h"
#endif
/*******************************************************************//**
Determine if the given name is a name reserved for MySQL system tables.
@return TRUE if name is a MySQL system table name */
static
ibool
row_mysql_is_system_table(
/*======================*/
const char* name)
{
if (strncmp(name, "mysql/", 6) != 0) {
return(FALSE);
}
return(0 == strcmp(name + 6, "host")
|| 0 == strcmp(name + 6, "user")
|| 0 == strcmp(name + 6, "db"));
}
/*******************************************************************//**
Delays an INSERT, DELETE or UPDATE operation if the purge is lagging. */
static
@@ -2207,30 +2188,17 @@ row_create_table_for_mysql(
DBUG_EXECUTE_IF(
"ib_create_table_fail_at_start_of_row_create_table_for_mysql",
goto err_exit;
dict_mem_table_free(table); return DB_ERROR;
);
trx->op_info = "creating table";
if (row_mysql_is_system_table(table->name.m_name)) {
ib::error() << "Trying to create a MariaDB system table "
<< table->name << " of type InnoDB. MariaDB system"
" tables must be of the MyISAM type!";
err_exit:
dict_mem_table_free(table);
trx->op_info = "";
return(DB_ERROR);
}
if (!dict_sys.sys_tables_exist()) {
ib::error() << "Some InnoDB system tables are missing";
goto err_exit;
sql_print_error("InnoDB: Some system tables are missing");
dict_mem_table_free(table);
return DB_ERROR;
}
trx->op_info = "creating table";
trx_start_if_not_started_xa(trx, true);
heap = mem_heap_create(512);
@@ -2770,14 +2738,6 @@ row_rename_table_for_mysql(
if (high_level_read_only) {
return(DB_READ_ONLY);
} else if (row_mysql_is_system_table(new_name)) {
ib::error() << "Trying to create a MariaDB system table "
<< new_name << " of type InnoDB. MariaDB system tables"
" must be of the MyISAM type!";
goto funct_exit;
}
trx->op_info = "renaming table";

View File

@@ -1798,7 +1798,7 @@ static std::mutex purge_thd_mutex;
extern void* thd_attach_thd(THD*);
extern void thd_detach_thd(void *);
THD* acquire_thd(void **ctx)
static THD *acquire_thd(void **ctx)
{
std::unique_lock<std::mutex> lk(purge_thd_mutex);
if (purge_thds.empty()) {
@@ -1816,7 +1816,7 @@ THD* acquire_thd(void **ctx)
return thd;
}
void release_thd(THD *thd, void *ctx)
static void release_thd(THD *thd, void *ctx)
{
thd_detach_thd(ctx);
std::unique_lock<std::mutex> lk(purge_thd_mutex);

View File

@@ -341,9 +341,8 @@ FAIL_REGEX "warning: incompatible pointer to integer conversion"
CONFIGURE_FILE(pfs_config.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/pfs_config.h)
MYSQL_ADD_PLUGIN(perfschema ${PERFSCHEMA_SOURCES} STORAGE_ENGINE DEFAULT
STATIC_ONLY RECOMPILE_FOR_EMBEDDED)
STATIC_ONLY RECOMPILE_FOR_EMBEDDED DEPENDS GenServerSource)
IF (TARGET perfschema)
ADD_DEPENDENCIES(perfschema GenServerSource)
IF(WITH_UNIT_TESTS)
ADD_SUBDIRECTORY(unittest)
ENDIF(WITH_UNIT_TESTS)

View File

@@ -0,0 +1,9 @@
--let $MASTER_1_COMMENT_P_2_1= $MASTER_1_COMMENT_P_2_1_BACKUP
--let $CHILD2_1_CREATE_TABLES= $CHILD2_1_CREATE_TABLES_BACKUP
--disable_warnings
--disable_query_log
--disable_result_log
--source ../t/test_deinit.inc
--enable_result_log
--enable_query_log
--enable_warnings

View File

@@ -0,0 +1,31 @@
--disable_warnings
--disable_query_log
--disable_result_log
--source ../t/test_init.inc
--enable_result_log
--enable_query_log
--enable_warnings
--let $MASTER_1_COMMENT_P_2_1_BACKUP= $MASTER_1_COMMENT_P_2_1
let $MASTER_1_COMMENT_P_2_1=
PARTITION BY RANGE(i) (
PARTITION pt1 VALUES LESS THAN (5) COMMENT='srv "s_2_1", table "ta_r2"',
PARTITION pt2 VALUES LESS THAN (10) COMMENT='srv "s_2_1", table "ta_r3"',
PARTITION pt3 VALUES LESS THAN MAXVALUE COMMENT='srv "s_2_1", table "ta_r4"'
);
--let $CHILD2_1_CREATE_TABLES_BACKUP= $CHILD2_1_CREATE_TABLES
let $CHILD2_1_CREATE_TABLES=
CREATE TABLE ta_r2 (
i INT,
j JSON,
PRIMARY KEY(i)
) $CHILD2_1_ENGINE $CHILD2_1_CHARSET $STR_SEMICOLON
CREATE TABLE ta_r3 (
i INT,
j JSON,
PRIMARY KEY(i)
) $CHILD2_1_ENGINE $CHILD2_1_CHARSET $STR_SEMICOLON
CREATE TABLE ta_r4 (
i INT,
j JSON,
PRIMARY KEY(i)
) $CHILD2_1_ENGINE $CHILD2_1_CHARSET;

View File

@@ -0,0 +1,58 @@
for master_1
for child2
child2_1
child2_2
child2_3
for child3
this test is for MDEV-24523
drop and create databases
connection master_1;
CREATE DATABASE auto_test_local;
USE auto_test_local;
connection child2_1;
CREATE DATABASE auto_test_remote;
USE auto_test_remote;
create table and insert
connection child2_1;
CHILD2_1_CREATE_TABLES
connection master_1;
CREATE TABLE tbl_a (
i INT,
j JSON,
PRIMARY KEY(i)
) ENGINE=Spider PARTITION BY RANGE(i) (
PARTITION pt1 VALUES LESS THAN (5) COMMENT='srv "s_2_1", table "ta_r2"',
PARTITION pt2 VALUES LESS THAN (10) COMMENT='srv "s_2_1", table "ta_r3"',
PARTITION pt3 VALUES LESS THAN MAXVALUE COMMENT='srv "s_2_1", table "ta_r4"'
)
INSERT INTO tbl_a VALUES (1, '{ "a": 1, "b": [2, 3]}');
test 1
connection master_1;
UPDATE tbl_a SET j = JSON_REPLACE(j, '$.a', 10, '$.c', '[1, 2]');
SELECT * FROM tbl_a;
i j
1 {"a": 10, "b": [2, 3]}
TRUNCATE TABLE tbl_a;
INSERT INTO tbl_a VALUES (1, '{ "a": 1, "b": [2, 3]}');
UPDATE tbl_a SET j = JSON_REPLACE(j, '$.a', 10, '$.b', '[1, 2]');
SELECT * FROM tbl_a;
i j
1 {"a": 10, "b": "[1, 2]"}
deinit
connection master_1;
DROP DATABASE IF EXISTS auto_test_local;
connection child2_1;
DROP DATABASE IF EXISTS auto_test_remote;
for master_1
for child2
child2_1
child2_2
child2_3
for child3
end of test

View File

@@ -0,0 +1,3 @@
!include include/default_mysqld.cnf
!include ../my_1_1.cnf
!include ../my_2_1.cnf

View File

@@ -0,0 +1,66 @@
--source ../include/mdev_24523_init.inc
--echo
--echo this test is for MDEV-24523
--echo
--echo drop and create databases
--connection master_1
--disable_warnings
CREATE DATABASE auto_test_local;
USE auto_test_local;
--connection child2_1
CREATE DATABASE auto_test_remote;
USE auto_test_remote;
--enable_warnings
--echo
--echo create table and insert
--connection child2_1
--disable_query_log
--disable_ps_protocol
echo CHILD2_1_CREATE_TABLES;
eval $CHILD2_1_CREATE_TABLES;
--enable_ps_protocol
--enable_query_log
--connection master_1
--disable_query_log
echo CREATE TABLE tbl_a (
i INT,
j JSON,
PRIMARY KEY(i)
) $MASTER_1_ENGINE $MASTER_1_COMMENT_P_2_1;
eval CREATE TABLE tbl_a (
i INT,
j JSON,
PRIMARY KEY(i)
) $MASTER_1_ENGINE $MASTER_1_COMMENT_P_2_1;
--enable_query_log
INSERT INTO tbl_a VALUES (1, '{ "a": 1, "b": [2, 3]}');
--echo
--echo test 1
--connection master_1
UPDATE tbl_a SET j = JSON_REPLACE(j, '$.a', 10, '$.c', '[1, 2]');
SELECT * FROM tbl_a;
TRUNCATE TABLE tbl_a;
INSERT INTO tbl_a VALUES (1, '{ "a": 1, "b": [2, 3]}');
UPDATE tbl_a SET j = JSON_REPLACE(j, '$.a', 10, '$.b', '[1, 2]');
SELECT * FROM tbl_a;
--echo
--echo deinit
--disable_warnings
--connection master_1
DROP DATABASE IF EXISTS auto_test_local;
--connection child2_1
DROP DATABASE IF EXISTS auto_test_remote;
--enable_warnings
--source ../include/mdev_24523_deinit.inc
--echo
--echo end of test

View File

@@ -5256,10 +5256,10 @@ static void test_manual_sample()
{
unsigned int param_count;
MYSQL_STMT *stmt;
short small_data;
int int_data;
short small_data= 1;
int int_data= 2;
int rc;
char str_data[50];
char str_data[50]= "std_data";
ulonglong affected_rows;
MYSQL_BIND my_bind[3];
my_bool is_null;