mirror of
https://github.com/MariaDB/server.git
synced 2025-12-24 11:21:21 +03:00
Merge 10.5 into 10.6
This commit is contained in:
@@ -198,8 +198,9 @@ OPTION(NOT_FOR_DISTRIBUTION "Allow linking with GPLv2-incompatible system librar
|
||||
# Can be switched on only for debug build.
|
||||
#
|
||||
OPTION(WITH_PROTECT_STATEMENT_MEMROOT "Enable protection of statement's memory root after first SP/PS execution. Turned into account only for debug build" OFF)
|
||||
IF (CMAKE_BUILD_TYPE MATCHES "Debug" AND WITH_PROTECT_STATEMENT_MEMROOT)
|
||||
ADD_DEFINITIONS(-DPROTECT_STATEMENT_MEMROOT)
|
||||
IF (WITH_PROTECT_STATEMENT_MEMROOT)
|
||||
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DPROTECT_STATEMENT_MEMROOT")
|
||||
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DPROTECT_STATEMENT_MEMROOT")
|
||||
ENDIF()
|
||||
|
||||
INCLUDE(check_compiler_flag)
|
||||
|
||||
19
README.md
19
README.md
@@ -3,7 +3,7 @@ Code status:
|
||||
|
||||
* [](https://ci.appveyor.com/project/rasmushoj/server) ci.appveyor.com
|
||||
|
||||
## MariaDB: The open source relational database
|
||||
## MariaDB: The innovative open source database
|
||||
|
||||
MariaDB was designed as a drop-in replacement of MySQL(R) with more
|
||||
features, new storage engines, fewer bugs, and better performance.
|
||||
@@ -33,28 +33,23 @@ https://mariadb.com/kb/en/mariadb-versus-mysql-compatibility/
|
||||
|
||||
https://mariadb.com/kb/en/new-and-old-releases/
|
||||
|
||||
Getting the code, building it and testing it
|
||||
---------------------------------------------------------------
|
||||
|
||||
Refer to the following guide: https://mariadb.org/get-involved/getting-started-for-developers/get-code-build-test/ which outlines how to correctly build the source code and run the MariaDB testing framework.
|
||||
|
||||
Help
|
||||
-----
|
||||
|
||||
More help is available from the Maria Discuss mailing list
|
||||
https://launchpad.net/~maria-discuss, MariaDB's Zulip
|
||||
https://lists.mariadb.org/postorius/lists/discuss.lists.mariadb.org/ and MariaDB's Zulip
|
||||
instance, https://mariadb.zulipchat.com/
|
||||
|
||||
Live QA for beginner contributors
|
||||
----
|
||||
MariaDB has a dedicated time each week when we answer new contributor questions live on Zulip.
|
||||
From 8:00 to 10:00 UTC on Mondays, and 10:00 to 12:00 UTC on Thursdays,
|
||||
anyone can ask any questions they’d like, and a live developer will be available to assist.
|
||||
|
||||
New contributors can ask questions any time, but we will provide immediate feedback during that interval.
|
||||
|
||||
Licensing
|
||||
---------
|
||||
|
||||
***************************************************************************
|
||||
|
||||
NOTE:
|
||||
|
||||
MariaDB is specifically available only under version 2 of the GNU
|
||||
General Public License (GPLv2). (I.e. Without the "any later version"
|
||||
clause.) This is inherited from MySQL. Please see the README file in
|
||||
|
||||
@@ -1404,7 +1404,9 @@ static void usage(void)
|
||||
refresh Flush all tables and close and open logfiles\n\
|
||||
shutdown Take server down\n\
|
||||
status Gives a short status message from the server\n\
|
||||
start-all-slaves Start all slaves\n\
|
||||
start-slave Start slave\n\
|
||||
stop-all-slaves Stop all slaves\n\
|
||||
stop-slave Stop slave\n\
|
||||
variables Prints variables available\n\
|
||||
version Get version info from server");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
if "%MTR_PARALLEL%"=="" set MTR_PARALLEL=%NUMBER_OF_PROCESSORS%
|
||||
perl mysql-test-run.pl --verbose-restart --force --suite-timeout=120 --max-test-fail=10 --retry=3 --suite=^
|
||||
perl mysql-test-run.pl --force --suite-timeout=120 --max-test-fail=10 --retry=3 --suite=^
|
||||
vcol,gcol,perfschema,^
|
||||
main,^
|
||||
innodb,^
|
||||
|
||||
@@ -613,13 +613,17 @@ call p_verify_status_increment(2, 0, 2, 0);
|
||||
drop table t2;
|
||||
set sql_mode=no_engine_substitution;
|
||||
create temporary table t2 (a int);
|
||||
call p_verify_status_increment(1, 0, 0, 0);
|
||||
# One commit for the create temporary table, and two for committing the
|
||||
# read of the stored procedure from Aria table (creating temporary table
|
||||
# clears the sp cache).
|
||||
call p_verify_status_increment(3, 0, 2, 0);
|
||||
set sql_mode=default;
|
||||
--echo # 19. A function changes temp-trans-table.
|
||||
--echo #
|
||||
select f1();
|
||||
--echo # Two commits because a binary log record is written
|
||||
call p_verify_status_increment(2, 0, 1, 0);
|
||||
--echo # Two commits because a binary log record is written, and another two
|
||||
--echo # as the function f1() is reloaded after creating temporary table.
|
||||
call p_verify_status_increment(4, 0, 3, 0);
|
||||
commit;
|
||||
call p_verify_status_increment(2, 0, 1, 0);
|
||||
|
||||
@@ -672,9 +676,11 @@ call p_verify_status_increment(2, 0, 1, 0);
|
||||
--echo # 25. DDL: DROP TEMPORARY TABLE, does not start a transaction
|
||||
--echo #
|
||||
drop temporary table t2;
|
||||
call p_verify_status_increment(1, 0, 1, 0);
|
||||
# Dropping temporary table clears SP caches, so get another two commit
|
||||
# increments from loading the p_verify_status_increment procedure.
|
||||
call p_verify_status_increment(3, 0, 2, 0);
|
||||
commit;
|
||||
call p_verify_status_increment(1, 0, 1, 0);
|
||||
call p_verify_status_increment(1, 0, 0, 0);
|
||||
|
||||
--echo # 26. Verify that SET AUTOCOMMIT issues an implicit commit
|
||||
--echo #
|
||||
@@ -721,7 +727,9 @@ call p_verify_status_increment(1, 0, 1, 0);
|
||||
create table t2 (a int);
|
||||
call p_verify_status_increment(0, 0, 0, 0);
|
||||
do (select f1() from t1 where a=2);
|
||||
call p_verify_status_increment(2, 2, 2, 2);
|
||||
# Again extra 2 commit increments from re-loading function f1 after
|
||||
# dropping temporary table.
|
||||
call p_verify_status_increment(4, 2, 4, 2);
|
||||
commit;
|
||||
call p_verify_status_increment(2, 2, 2, 2);
|
||||
|
||||
|
||||
@@ -634,7 +634,7 @@ SUCCESS
|
||||
drop table t2;
|
||||
set sql_mode=no_engine_substitution;
|
||||
create temporary table t2 (a int);
|
||||
call p_verify_status_increment(1, 0, 0, 0);
|
||||
call p_verify_status_increment(3, 0, 2, 0);
|
||||
SUCCESS
|
||||
|
||||
set sql_mode=default;
|
||||
@@ -643,8 +643,9 @@ set sql_mode=default;
|
||||
select f1();
|
||||
f1()
|
||||
2
|
||||
# Two commits because a binary log record is written
|
||||
call p_verify_status_increment(2, 0, 1, 0);
|
||||
# Two commits because a binary log record is written, and another two
|
||||
# as the function f1() is reloaded after creating temporary table.
|
||||
call p_verify_status_increment(4, 0, 3, 0);
|
||||
SUCCESS
|
||||
|
||||
commit;
|
||||
@@ -715,11 +716,11 @@ SUCCESS
|
||||
# 25. DDL: DROP TEMPORARY TABLE, does not start a transaction
|
||||
#
|
||||
drop temporary table t2;
|
||||
call p_verify_status_increment(1, 0, 1, 0);
|
||||
call p_verify_status_increment(3, 0, 2, 0);
|
||||
SUCCESS
|
||||
|
||||
commit;
|
||||
call p_verify_status_increment(1, 0, 1, 0);
|
||||
call p_verify_status_increment(1, 0, 0, 0);
|
||||
SUCCESS
|
||||
|
||||
# 26. Verify that SET AUTOCOMMIT issues an implicit commit
|
||||
@@ -801,7 +802,7 @@ call p_verify_status_increment(0, 0, 0, 0);
|
||||
SUCCESS
|
||||
|
||||
do (select f1() from t1 where a=2);
|
||||
call p_verify_status_increment(2, 2, 2, 2);
|
||||
call p_verify_status_increment(4, 2, 4, 2);
|
||||
SUCCESS
|
||||
|
||||
commit;
|
||||
|
||||
@@ -6396,5 +6396,27 @@ b b d c c
|
||||
10 NULL NULL NULL NULL
|
||||
DROP TABLE t1,t2,t3,t4;
|
||||
#
|
||||
# MDEV-21102: Server crashes in JOIN_CACHE::write_record_data upon EXPLAIN with subqueries and constant tables
|
||||
#
|
||||
CREATE TABLE t1 (a int, b int) ENGINE=MyISAM;
|
||||
CREATE TABLE t2 (c int, d int) ENGINE=MyISAM;
|
||||
INSERT INTO t2 VALUES (1,10);
|
||||
CREATE TABLE t3 (e int, key (e)) ENGINE=MyISAM;
|
||||
INSERT INTO t3 VALUES (2),(3);
|
||||
# Must not crash, must use join buffer in subquery
|
||||
EXPLAIN
|
||||
SELECT * FROM t1
|
||||
WHERE a > b OR a IN (
|
||||
SELECT c FROM t2 WHERE EXISTS (
|
||||
SELECT * FROM t3 t3a JOIN t3 t3b WHERE t3a.e < d
|
||||
)
|
||||
);
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
|
||||
2 DEPENDENT SUBQUERY t2 system NULL NULL NULL NULL 1
|
||||
3 SUBQUERY t3a range e e 5 NULL 2 Using where; Using index
|
||||
3 SUBQUERY t3b index NULL e 5 NULL 2 Using index; Using join buffer (flat, BNL join)
|
||||
DROP TABLE t1,t2,t3;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
|
||||
@@ -4304,6 +4304,27 @@ eval $q2;
|
||||
|
||||
DROP TABLE t1,t2,t3,t4;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-21102: Server crashes in JOIN_CACHE::write_record_data upon EXPLAIN with subqueries and constant tables
|
||||
--echo #
|
||||
CREATE TABLE t1 (a int, b int) ENGINE=MyISAM;
|
||||
|
||||
CREATE TABLE t2 (c int, d int) ENGINE=MyISAM;
|
||||
INSERT INTO t2 VALUES (1,10);
|
||||
|
||||
CREATE TABLE t3 (e int, key (e)) ENGINE=MyISAM;
|
||||
INSERT INTO t3 VALUES (2),(3);
|
||||
|
||||
--echo # Must not crash, must use join buffer in subquery
|
||||
EXPLAIN
|
||||
SELECT * FROM t1
|
||||
WHERE a > b OR a IN (
|
||||
SELECT c FROM t2 WHERE EXISTS (
|
||||
SELECT * FROM t3 t3a JOIN t3 t3b WHERE t3a.e < d
|
||||
)
|
||||
);
|
||||
DROP TABLE t1,t2,t3;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
||||
@@ -4,13 +4,16 @@
|
||||
--source include/not_embedded.inc
|
||||
--source include/have_debug_sync.inc
|
||||
|
||||
let $wait_condition=select count(*) = 1 from information_schema.processlist;
|
||||
source include/wait_condition.inc;
|
||||
--disable_ps_protocol
|
||||
# Ensure no lingering connections from an earlier test run, which can very
|
||||
# rarely still be visible in SHOW PROCESSLIST here.
|
||||
--let $wait_condition= SELECT COUNT(*) = 1 from information_schema.processlist
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--connect (con1,localhost,root,,)
|
||||
--let $con_id = `SELECT CONNECTION_ID()`
|
||||
|
||||
let $wait_condition=select info is NULL from information_schema.processlist where id != $con_id;
|
||||
let $wait_condition=select command = 'sleep' from information_schema.processlist where id != $con_id;
|
||||
source include/wait_condition.inc;
|
||||
|
||||
--replace_result Execute Query
|
||||
@@ -32,9 +35,7 @@ reap;
|
||||
SET DEBUG_SYNC='reset';
|
||||
|
||||
# Wait until default connection has reset query string
|
||||
let $wait_condition=
|
||||
SELECT COUNT(*) = 1 from information_schema.processlist
|
||||
WHERE info is NULL;
|
||||
let $wait_condition=select command = 'sleep' from information_schema.processlist where id != $con_id;
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--replace_result Execute Query
|
||||
|
||||
@@ -4,13 +4,16 @@ Test bad default storage engine.
|
||||
Test non-numeric value passed to number option.
|
||||
Test that bad value for plugin enum option is rejected correctly.
|
||||
Test to see if multiple unknown options will be displayed in the error output
|
||||
unknown option '--nonexistentoption'
|
||||
unknown option '--alsononexistent'
|
||||
unknown variable 'nonexistentvariable=1'
|
||||
FOUND 1 /unknown option '--nonexistentoption2'/ in mysqltest.log
|
||||
FOUND 1 /unknown option '--alsononexistent'/ in mysqltest.log
|
||||
FOUND 1 /unknown variable 'nonexistentvariable=1'/ in mysqltest.log
|
||||
Test to see if multiple ambiguous options and invalid arguments will be displayed in the error output
|
||||
Error while setting value 'invalid_value' to 'sql_mode'
|
||||
ambiguous option '--character' (character-set-client-handshake, character_sets_dir)
|
||||
option '--bootstrap' cannot take an argument
|
||||
FOUND 1 /Error while setting value 'invalid_value' to 'sql_mode'/ in mysqltest.log
|
||||
FOUND 1 /ambiguous option '--character'/ in mysqltest.log
|
||||
FOUND 1 /option '--bootstrap' cannot take an argument/ in mysqltest.log
|
||||
FOUND 1 /Integer value out of range for uint64: '18446744073709551616' for binlog_cache_size/ in mysqltest.log
|
||||
FOUND 1 /Unknown suffix 'y' used for variable 'bulk_insert_buffer_size' \(value '123y'\). Legal suffix characters are: K, M, G, T, P, E/ in mysqltest.log
|
||||
FOUND 1 /Error while setting value '123y' to 'bulk_insert_buffer_size'/ in mysqltest.log
|
||||
Test that --help --verbose works
|
||||
Test that --not-known-option --help --verbose gives error
|
||||
Done.
|
||||
|
||||
@@ -25,7 +25,7 @@ mkdir $MYSQLTEST_VARDIR/tmp/mysqld_option_err;
|
||||
|
||||
|
||||
--echo Test bad binlog format.
|
||||
--error 1
|
||||
--error 13
|
||||
--exec $MYSQLD_BOOTSTRAP_CMD --skip-networking --datadir=$MYSQLTEST_VARDIR/tmp/mysqld_option_err --skip-grant-tables --log-bin --binlog-format=badformat >>$MYSQLTEST_VARDIR/tmp/mysqld_option_err/mysqltest.log 2>&1
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ mkdir $MYSQLTEST_VARDIR/tmp/mysqld_option_err;
|
||||
|
||||
|
||||
--echo Test non-numeric value passed to number option.
|
||||
--error 1
|
||||
--error 9
|
||||
--exec $MYSQLD_BOOTSTRAP_CMD --skip-networking --datadir=$MYSQLTEST_VARDIR/tmp/mysqld_option_err --skip-grant-tables --min-examined-row-limit=notanumber >>$MYSQLTEST_VARDIR/tmp/mysqld_option_err/mysqltest.log 2>&1
|
||||
|
||||
|
||||
@@ -46,17 +46,35 @@ mkdir $MYSQLTEST_VARDIR/tmp/mysqld_option_err;
|
||||
--error 7
|
||||
--exec $MYSQLD_BOOTSTRAP_CMD --skip-networking --datadir=$MYSQLTEST_VARDIR/tmp/mysqld_option_err --skip-grant-tables --plugin-dir=$MYSQLTEST_VARDIR/plugins --plugin-load=example=ha_example.so --plugin-example-enum-var=noexist >>$MYSQLTEST_VARDIR/tmp/mysqld_option_err/mysqltest.log 2>&1
|
||||
|
||||
--let SEARCH_FILE = $MYSQLTEST_VARDIR/tmp/mysqld_option_err/mysqltest.log
|
||||
|
||||
--echo Test to see if multiple unknown options will be displayed in the error output
|
||||
# Remove the noise to make the test robust
|
||||
--replace_regex /^((?!nonexistent).)*$// /.*unknown/unknown/
|
||||
--error 7
|
||||
--exec $MYSQLD_BOOTSTRAP_CMD --skip-networking --datadir=$MYSQLTEST_VARDIR/tmp/mysqld_option_err --skip-grant-tables --nonexistentoption --alsononexistent --nonexistentvariable=1 2>&1
|
||||
--exec $MYSQLD_BOOTSTRAP_CMD --skip-networking --datadir=$MYSQLTEST_VARDIR/tmp/mysqld_option_err --skip-grant-tables --nonexistentoption2 --alsononexistent --nonexistentvariable=1 >>$MYSQLTEST_VARDIR/tmp/mysqld_option_err/mysqltest.log 2>&1
|
||||
|
||||
--let SEARCH_PATTERN=unknown option '--nonexistentoption2'
|
||||
--source include/search_pattern_in_file.inc
|
||||
--let SEARCH_PATTERN=unknown option '--alsononexistent'
|
||||
--source include/search_pattern_in_file.inc
|
||||
--let SEARCH_PATTERN=unknown variable 'nonexistentvariable=1'
|
||||
--source include/search_pattern_in_file.inc
|
||||
|
||||
--echo Test to see if multiple ambiguous options and invalid arguments will be displayed in the error output
|
||||
# Remove the noise to make the test robust
|
||||
--replace_regex /^((?!('sql_mode'|'--character'|'--bootstrap')).)*$// /.*Error while setting value/Error while setting value/ /.*ambiguous option/ambiguous option/ /.*option '--bootstrap'/option '--bootstrap'/
|
||||
--error 1
|
||||
--exec $MYSQLD_BOOTSTRAP_CMD --skip-networking --datadir=$MYSQLTEST_VARDIR/tmp/mysqld_option_err --skip-grant-tables --getopt-prefix-matching --sql-mode=invalid_value --character --bootstrap=partstoob 2>&1
|
||||
--error 9
|
||||
--exec $MYSQLD_BOOTSTRAP_CMD --skip-networking --datadir=$MYSQLTEST_VARDIR/tmp/mysqld_option_err --skip-grant-tables --getopt-prefix-matching --sql-mode=invalid_value --character --bootstrap=partstoob --binlog_cache_size=18446744073709551616 --bulk_insert_buffer_size=123y >>$MYSQLTEST_VARDIR/tmp/mysqld_option_err/mysqltest.log 2>&1
|
||||
|
||||
--let SEARCH_PATTERN=Error while setting value 'invalid_value' to 'sql_mode'
|
||||
--source include/search_pattern_in_file.inc
|
||||
--let SEARCH_PATTERN=ambiguous option '--character'
|
||||
--source include/search_pattern_in_file.inc
|
||||
--let SEARCH_PATTERN=option '--bootstrap' cannot take an argument
|
||||
--source include/search_pattern_in_file.inc
|
||||
--let SEARCH_PATTERN=Integer value out of range for uint64: '18446744073709551616' for binlog_cache_size
|
||||
--source include/search_pattern_in_file.inc
|
||||
--let SEARCH_PATTERN=Unknown suffix 'y' used for variable 'bulk_insert_buffer_size' \(value '123y'\). Legal suffix characters are: K, M, G, T, P, E
|
||||
--source include/search_pattern_in_file.inc
|
||||
--let SEARCH_PATTERN=Error while setting value '123y' to 'bulk_insert_buffer_size'
|
||||
--source include/search_pattern_in_file.inc
|
||||
|
||||
#
|
||||
# Test that an wrong option with --help --verbose gives an error
|
||||
|
||||
@@ -5979,6 +5979,19 @@ EXECUTE stmt USING DEFAULT;
|
||||
# Clean up
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1, t2;
|
||||
# MDEV-33218: Assertion `active_arena->is_stmt_prepare_or_first_stmt_execute() || active_arena->state == Query_arena::STMT_SP_QUERY_ARGUMENTS' failed. in st_select_lex::fix_prepare_information
|
||||
CREATE TABLE t1 AS SELECT 1 f;
|
||||
PREPARE stmt FROM 'SHOW CREATE TABLE t1';
|
||||
DROP TABLE t1;
|
||||
EXECUTE stmt;
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist
|
||||
CREATE VIEW t1 AS SELECT 1;
|
||||
EXECUTE stmt;
|
||||
View Create View character_set_client collation_connection
|
||||
t1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `t1` AS select 1 AS `1` latin1 latin1_swedish_ci
|
||||
# Clean up
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP VIEW t1;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
|
||||
@@ -5421,6 +5421,18 @@ EXECUTE stmt USING DEFAULT;
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo # MDEV-33218: Assertion `active_arena->is_stmt_prepare_or_first_stmt_execute() || active_arena->state == Query_arena::STMT_SP_QUERY_ARGUMENTS' failed. in st_select_lex::fix_prepare_information
|
||||
CREATE TABLE t1 AS SELECT 1 f;
|
||||
PREPARE stmt FROM 'SHOW CREATE TABLE t1';
|
||||
DROP TABLE t1;
|
||||
--error ER_NO_SUCH_TABLE
|
||||
EXECUTE stmt;
|
||||
CREATE VIEW t1 AS SELECT 1;
|
||||
EXECUTE stmt;
|
||||
--echo # Clean up
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP VIEW t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
||||
@@ -2208,12 +2208,42 @@ Qcache_queries_in_cache 0
|
||||
DROP FUNCTION foo;
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-33861: main.query_cache fails with embedded after
|
||||
# enabling WITH_PROTECT_STATEMENT_MEMROOT
|
||||
#
|
||||
create table t1 (s1 int);
|
||||
create procedure f3 () begin
|
||||
select * from t1;
|
||||
end;
|
||||
//
|
||||
create procedure f4 () begin
|
||||
select * from t1;
|
||||
end;
|
||||
//
|
||||
Call f4();
|
||||
s1
|
||||
cAll f3();
|
||||
s1
|
||||
insert into t1 values (2);
|
||||
caLl f3();
|
||||
s1
|
||||
2
|
||||
drop procedure f3;
|
||||
drop procedure f4;
|
||||
drop table t1;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
#
|
||||
# MDEV-24858 SIGABRT in DbugExit from my_malloc in Query_cache::init_cache Regression
|
||||
#
|
||||
SET @qc= @@query_cache_size;
|
||||
set global Query_cache_size=18446744073709547520;
|
||||
SET GLOBAL query_cache_size= @qc;
|
||||
#
|
||||
# End of 10.5 tests
|
||||
#
|
||||
#
|
||||
# MDEV-22301 JSON_TABLE: Queries are not inserted into query cache.
|
||||
#
|
||||
create table t1 (a text);
|
||||
@@ -2239,3 +2269,4 @@ DROP TABLE t;
|
||||
restore defaults
|
||||
SET GLOBAL query_cache_type= default;
|
||||
SET GLOBAL query_cache_size=@save_query_cache_size;
|
||||
# End of 10.6 tests
|
||||
|
||||
@@ -1807,6 +1807,40 @@ show status like "Qcache_queries_in_cache";
|
||||
DROP FUNCTION foo;
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33861: main.query_cache fails with embedded after
|
||||
--echo # enabling WITH_PROTECT_STATEMENT_MEMROOT
|
||||
--echo #
|
||||
|
||||
create table t1 (s1 int);
|
||||
--delimiter //
|
||||
create procedure f3 () begin
|
||||
select * from t1;
|
||||
end;
|
||||
//
|
||||
create procedure f4 () begin
|
||||
select * from t1;
|
||||
end;
|
||||
//
|
||||
--delimiter ;
|
||||
|
||||
Call f4();
|
||||
|
||||
cAll f3();
|
||||
|
||||
insert into t1 values (2);
|
||||
|
||||
caLl f3();
|
||||
|
||||
drop procedure f3;
|
||||
drop procedure f4;
|
||||
drop table t1;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-24858 SIGABRT in DbugExit from my_malloc in Query_cache::init_cache Regression
|
||||
--echo #
|
||||
@@ -1816,6 +1850,10 @@ set global Query_cache_size=18446744073709547520;
|
||||
SET GLOBAL query_cache_size= @qc;
|
||||
--enable_warnings
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.5 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-22301 JSON_TABLE: Queries are not inserted into query cache.
|
||||
--echo #
|
||||
@@ -1838,3 +1876,5 @@ DROP TABLE t;
|
||||
SET GLOBAL query_cache_type= default;
|
||||
SET GLOBAL query_cache_size=@save_query_cache_size;
|
||||
--enable_ps2_protocol
|
||||
|
||||
--echo # End of 10.6 tests
|
||||
|
||||
@@ -70,7 +70,7 @@ UNLOCK TABLES;
|
||||
DROP TABLE t1;
|
||||
DROP USER test@localhost;
|
||||
disconnect con1;
|
||||
echo End of 5.1 tests
|
||||
# End of 5.1 tests
|
||||
#
|
||||
# Bug#33669: Transactional temporary tables do not work under --read-only
|
||||
#
|
||||
@@ -244,3 +244,26 @@ connection default;
|
||||
SET GLOBAL READ_ONLY = OFF;
|
||||
DROP USER bug33669@localhost;
|
||||
DROP DATABASE db1;
|
||||
# End of 5.5 tests
|
||||
#
|
||||
# MDEV-33889 Read only server throws error when running a create temporary table as select statement
|
||||
#
|
||||
create table t1(a int) engine=innodb;
|
||||
create user u1@localhost;
|
||||
grant insert, select, update, delete, create temporary tables on test.* to u1@localhost;
|
||||
insert into t1 values (1);
|
||||
set global read_only=1;
|
||||
connect u1,localhost,u1;
|
||||
set default_tmp_storage_engine=innodb;
|
||||
create temporary table tt1 (a int);
|
||||
create temporary table tt2 like t1;
|
||||
create temporary table tt3 as select * from t1;
|
||||
select * from tt3;
|
||||
a
|
||||
1
|
||||
disconnect u1;
|
||||
connection default;
|
||||
drop table t1;
|
||||
drop user u1@localhost;
|
||||
set global read_only=0;
|
||||
# End of 10.5 tests
|
||||
|
||||
@@ -103,7 +103,7 @@ DROP USER test@localhost;
|
||||
|
||||
disconnect con1;
|
||||
|
||||
--echo echo End of 5.1 tests
|
||||
--echo # End of 5.1 tests
|
||||
|
||||
--echo #
|
||||
--echo # Bug#33669: Transactional temporary tables do not work under --read-only
|
||||
@@ -250,3 +250,29 @@ SET GLOBAL READ_ONLY = OFF;
|
||||
DROP USER bug33669@localhost;
|
||||
DROP DATABASE db1;
|
||||
|
||||
--echo # End of 5.5 tests
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33889 Read only server throws error when running a create temporary table as select statement
|
||||
--echo #
|
||||
create table t1(a int) engine=innodb;
|
||||
create user u1@localhost;
|
||||
grant insert, select, update, delete, create temporary tables on test.* to u1@localhost;
|
||||
insert into t1 values (1);
|
||||
set global read_only=1;
|
||||
|
||||
connect u1,localhost,u1;
|
||||
set default_tmp_storage_engine=innodb;
|
||||
|
||||
create temporary table tt1 (a int);
|
||||
create temporary table tt2 like t1;
|
||||
create temporary table tt3 as select * from t1;
|
||||
select * from tt3;
|
||||
disconnect u1;
|
||||
|
||||
connection default;
|
||||
drop table t1;
|
||||
drop user u1@localhost;
|
||||
set global read_only=0;
|
||||
|
||||
--echo # End of 10.5 tests
|
||||
|
||||
@@ -3900,5 +3900,105 @@ Warnings:
|
||||
Note 1276 Field or reference 'test.t1.pk' of SELECT #2 was resolved in SELECT #1
|
||||
Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1` AS `c1` from `test`.`t1` where !<expr_cache><`test`.`t1`.`c1`,`test`.`t1`.`pk`>(<in_optimizer>(`test`.`t1`.`c1`,<exists>(/* select#2 */ select `test`.`t2`.`c1` from `test`.`t2` join `test`.`t1` `a1` where `test`.`t2`.`i1` = `test`.`t1`.`pk` and `test`.`t2`.`i1` between 3 and 5 and trigcond(<cache>(`test`.`t1`.`c1`) = `test`.`t2`.`c1`))))
|
||||
DROP TABLE t1,t2;
|
||||
set global innodb_stats_persistent= @stats.save;
|
||||
#
|
||||
# MDEV-31154: Fatal InnoDB error or assertion `!is_v' failure upon multi-update with indexed virtual column
|
||||
#
|
||||
# Test with auto generated Primary Key
|
||||
#
|
||||
SET @save_optimizer_switch= @@optimizer_switch;
|
||||
SET optimizer_switch='rowid_filter=on';
|
||||
CREATE TABLE t0(a int);
|
||||
INSERT INTO t0 SELECT seq FROM seq_1_to_20;
|
||||
ANALYZE TABLE t0 PERSISTENT FOR ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t0 analyze status Engine-independent statistics collected
|
||||
test.t0 analyze status OK
|
||||
CREATE TABLE t1 (
|
||||
a int,
|
||||
b int as (a * 2) VIRTUAL,
|
||||
f char(200), /* Filler */
|
||||
key (b),
|
||||
key (a)
|
||||
) engine=innodb;
|
||||
INSERT INTO t1 (a, f) SELECT seq, seq FROM seq_1_to_1000;
|
||||
ANALYZE TABLE t1 PERSISTENT FOR ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
# Test for type 'ref|filter'
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t0 ALL NULL NULL NULL NULL 20 Using where
|
||||
1 SIMPLE t1 ref|filter b,a b|a 5|5 test.t0.a 1 (2%) Using where; Using rowid filter
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
count(*)
|
||||
10
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t0 ALL NULL NULL NULL NULL 20 Using where
|
||||
1 SIMPLE t1 ref|filter b,a b|a 5|5 test.t0.a 1 (2%) Using where; Using rowid filter
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
count(*)
|
||||
10
|
||||
# Test for type 'range|filter'
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range|filter b,a b|a 5|5 NULL 49 (10%) Using where; Using rowid filter
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
count(*)
|
||||
49
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range|filter b,a b|a 5|5 NULL 49 (10%) Using where; Using rowid filter
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
count(*)
|
||||
49
|
||||
# Test with Primary Key
|
||||
#
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
p int PRIMARY KEY AUTO_INCREMENT,
|
||||
a int,
|
||||
b int as (a * 2) VIRTUAL,
|
||||
f char(200), /* Filler */
|
||||
key (b),
|
||||
key (a)
|
||||
) engine=innodb;
|
||||
INSERT INTO t1 (a, f) SELECT seq, seq FROM seq_1_to_1000;
|
||||
ANALYZE TABLE t1 PERSISTENT FOR ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
# Test for type 'ref|filter'
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t0 ALL NULL NULL NULL NULL 20 Using where
|
||||
1 SIMPLE t1 ref|filter b,a b|a 5|5 test.t0.a 1 (2%) Using where; Using rowid filter
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
count(*)
|
||||
10
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t0 ALL NULL NULL NULL NULL 20 Using where
|
||||
1 SIMPLE t1 ref|filter b,a b|a 5|5 test.t0.a 1 (2%) Using where; Using rowid filter
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
count(*)
|
||||
10
|
||||
# Test for type 'range|filter'
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range|filter b,a b|a 5|5 NULL 49 (10%) Using where; Using rowid filter
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
count(*)
|
||||
49
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range|filter b,a b|a 5|5 NULL 49 (10%) Using where; Using rowid filter
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
count(*)
|
||||
49
|
||||
SET optimizer_switch=@save_optimizer_switch;
|
||||
DROP TABLE t0, t1;
|
||||
# End of 10.4 tests
|
||||
# End of 10.6 tests
|
||||
set global innodb_stats_persistent= @stats.save;
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
--source include/no_valgrind_without_big.inc
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
--source include/have_sequence.inc
|
||||
--source include/innodb_stable_estimates.inc
|
||||
|
||||
SET SESSION DEFAULT_STORAGE_ENGINE='InnoDB';
|
||||
|
||||
@@ -683,6 +685,82 @@ eval EXPLAIN EXTENDED $q;
|
||||
|
||||
DROP TABLE t1,t2;
|
||||
|
||||
set global innodb_stats_persistent= @stats.save;
|
||||
--echo #
|
||||
--echo # MDEV-31154: Fatal InnoDB error or assertion `!is_v' failure upon multi-update with indexed virtual column
|
||||
--echo #
|
||||
|
||||
--echo # Test with auto generated Primary Key
|
||||
--echo #
|
||||
|
||||
SET @save_optimizer_switch= @@optimizer_switch;
|
||||
SET optimizer_switch='rowid_filter=on';
|
||||
|
||||
CREATE TABLE t0(a int);
|
||||
INSERT INTO t0 SELECT seq FROM seq_1_to_20;
|
||||
ANALYZE TABLE t0 PERSISTENT FOR ALL;
|
||||
|
||||
CREATE TABLE t1 (
|
||||
a int,
|
||||
b int as (a * 2) VIRTUAL,
|
||||
f char(200), /* Filler */
|
||||
key (b),
|
||||
key (a)
|
||||
) engine=innodb;
|
||||
|
||||
INSERT INTO t1 (a, f) SELECT seq, seq FROM seq_1_to_1000;
|
||||
ANALYZE TABLE t1 PERSISTENT FOR ALL;
|
||||
|
||||
--echo # Test for type 'ref|filter'
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
|
||||
--echo # Test for type 'range|filter'
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
|
||||
--echo # Test with Primary Key
|
||||
--echo #
|
||||
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
p int PRIMARY KEY AUTO_INCREMENT,
|
||||
a int,
|
||||
b int as (a * 2) VIRTUAL,
|
||||
f char(200), /* Filler */
|
||||
key (b),
|
||||
key (a)
|
||||
) engine=innodb;
|
||||
|
||||
INSERT INTO t1 (a, f) SELECT seq, seq FROM seq_1_to_1000;
|
||||
ANALYZE TABLE t1 PERSISTENT FOR ALL;
|
||||
|
||||
--echo # Test for type 'ref|filter'
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
|
||||
--echo # Test for type 'range|filter'
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
|
||||
SET optimizer_switch=@save_optimizer_switch;
|
||||
|
||||
DROP TABLE t0, t1;
|
||||
|
||||
--echo # End of 10.4 tests
|
||||
|
||||
--echo # End of 10.6 tests
|
||||
|
||||
set global innodb_stats_persistent= @stats.save;
|
||||
|
||||
|
||||
@@ -7179,15 +7179,14 @@ CREATE VIEW t1 AS SELECT 10 AS f1;
|
||||
CALL p1(1);
|
||||
ERROR HY000: The target table t1 of the INSERT is not insertable-into
|
||||
CREATE TEMPORARY TABLE t1 (f1 INT);
|
||||
# t1 still refers to the view since it was inlined
|
||||
CALL p1(2);
|
||||
ERROR HY000: The target table t1 of the INSERT is not insertable-into
|
||||
DROP VIEW t1;
|
||||
# t1 now refers to the temporary table
|
||||
CALL p1(3);
|
||||
# Check which values were inserted into the temp table.
|
||||
SELECT * FROM t1;
|
||||
f1
|
||||
2
|
||||
3
|
||||
DROP TEMPORARY TABLE t1;
|
||||
DROP PROCEDURE p1;
|
||||
|
||||
@@ -8632,8 +8632,6 @@ CALL p1(1);
|
||||
|
||||
CREATE TEMPORARY TABLE t1 (f1 INT);
|
||||
|
||||
--echo # t1 still refers to the view since it was inlined
|
||||
--error ER_NON_INSERTABLE_TABLE
|
||||
CALL p1(2);
|
||||
|
||||
DROP VIEW t1;
|
||||
|
||||
@@ -3303,4 +3303,33 @@ a
|
||||
2
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1,t2,t3;
|
||||
#
|
||||
# MDEV-33747: Optimization of (SELECT) IN (SELECT ...) executes subquery at prepare stage
|
||||
#
|
||||
create table t1 (a int, b int);
|
||||
insert into t1 select seq, seq from seq_1_to_200;
|
||||
create table t2 as select * from t1;
|
||||
create table t3 as select * from t1;
|
||||
analyze table t1,t2,t3;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
test.t2 analyze status Engine-independent statistics collected
|
||||
test.t2 analyze status OK
|
||||
test.t3 analyze status Engine-independent statistics collected
|
||||
test.t3 analyze status OK
|
||||
select @@expensive_subquery_limit < 200 as DEFAULTS_ARE_SUITABLE;
|
||||
DEFAULTS_ARE_SUITABLE
|
||||
1
|
||||
flush status;
|
||||
explain select * from t1 where a<3 or (select max(a) from t2) in (select b from t3);
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY t1 ALL NULL NULL NULL NULL 200 Using where
|
||||
3 SUBQUERY t3 ALL NULL NULL NULL NULL 200 Using where
|
||||
2 SUBQUERY t2 ALL NULL NULL NULL NULL 200
|
||||
# Must show 0. If this shows 200, this means subquery was executed and you have a bug:
|
||||
show status like 'Handler_read_rnd_next%';
|
||||
Variable_name Value
|
||||
Handler_read_rnd_next 0
|
||||
drop table t1,t2,t3;
|
||||
# End of 10.4 tests
|
||||
|
||||
@@ -2670,4 +2670,19 @@ DEALLOCATE PREPARE stmt;
|
||||
|
||||
DROP TABLE t1,t2,t3;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33747: Optimization of (SELECT) IN (SELECT ...) executes subquery at prepare stage
|
||||
--echo #
|
||||
create table t1 (a int, b int);
|
||||
insert into t1 select seq, seq from seq_1_to_200;
|
||||
create table t2 as select * from t1;
|
||||
create table t3 as select * from t1;
|
||||
analyze table t1,t2,t3;
|
||||
select @@expensive_subquery_limit < 200 as DEFAULTS_ARE_SUITABLE;
|
||||
flush status;
|
||||
explain select * from t1 where a<3 or (select max(a) from t2) in (select b from t3);
|
||||
--echo # Must show 0. If this shows 200, this means subquery was executed and you have a bug:
|
||||
show status like 'Handler_read_rnd_next%';
|
||||
drop table t1,t2,t3;
|
||||
|
||||
--echo # End of 10.4 tests
|
||||
|
||||
@@ -614,6 +614,55 @@ Tables_in_test
|
||||
# in 11.2 and above here should be listed above used temporary tables
|
||||
DROP TEMPORARY TABLE t1, t2;
|
||||
#
|
||||
# MDEV-33218: Assertion `active_arena->is_stmt_prepare_or_first_stmt_execute() || active_arena->state == Query_arena::STMT_SP_QUERY_ARGUMENTS' failed. in st_select_lex::fix_prepare_information
|
||||
#
|
||||
CREATE VIEW v1 AS SELECT 5;
|
||||
CREATE PROCEDURE sp() SELECT * FROM v1;
|
||||
CREATE TEMPORARY TABLE v1 as SELECT 7;
|
||||
# sp() accesses the temporary table v1 that hides the view with the same name
|
||||
# Therefore expected output is the row (7)
|
||||
CALL sp();
|
||||
7
|
||||
7
|
||||
DROP TEMPORARY TABLE v1;
|
||||
# After the temporary table v1 has been dropped the next invocation of sp()
|
||||
# accesses the view v1. So, expected output is the row (5)
|
||||
CALL sp();
|
||||
5
|
||||
5
|
||||
# Clean up
|
||||
DROP VIEW v1;
|
||||
DROP PROCEDURE sp;
|
||||
# Another use case is when a temporary table hides a view is dropped
|
||||
# inside a stored routine being called.
|
||||
CREATE VIEW t1 AS SELECT 1;
|
||||
CREATE PROCEDURE p1()
|
||||
BEGIN
|
||||
DROP TEMPORARY TABLE t1;
|
||||
END
|
||||
|
|
||||
CREATE FUNCTION f1() RETURNS INT
|
||||
BEGIN
|
||||
CALL p1();
|
||||
RETURN 1;
|
||||
END
|
||||
|
|
||||
CREATE TEMPORARY TABLE t1 AS SELECT 1 AS a;
|
||||
PREPARE stmt FROM 'SELECT f1()';
|
||||
EXECUTE stmt;
|
||||
f1()
|
||||
1
|
||||
# The temporary table t1 has been dropped on first
|
||||
# execution of the prepared statement 'stmt',
|
||||
# next time this statement is run it results in issuing
|
||||
# the error ER_BAD_TABLE_ERROR
|
||||
EXECUTE stmt;
|
||||
ERROR 42S02: Unknown table 'test.t1'
|
||||
# Clean up
|
||||
DROP VIEW t1;
|
||||
DROP FUNCTION f1;
|
||||
DROP PROCEDURE p1;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
create function f1() returns int
|
||||
|
||||
@@ -669,6 +669,60 @@ SHOW TABLES;
|
||||
|
||||
DROP TEMPORARY TABLE t1, t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33218: Assertion `active_arena->is_stmt_prepare_or_first_stmt_execute() || active_arena->state == Query_arena::STMT_SP_QUERY_ARGUMENTS' failed. in st_select_lex::fix_prepare_information
|
||||
--echo #
|
||||
CREATE VIEW v1 AS SELECT 5;
|
||||
CREATE PROCEDURE sp() SELECT * FROM v1;
|
||||
CREATE TEMPORARY TABLE v1 as SELECT 7;
|
||||
--echo # sp() accesses the temporary table v1 that hides the view with the same name
|
||||
--echo # Therefore expected output is the row (7)
|
||||
CALL sp();
|
||||
DROP TEMPORARY TABLE v1;
|
||||
--echo # After the temporary table v1 has been dropped the next invocation of sp()
|
||||
--echo # accesses the view v1. So, expected output is the row (5)
|
||||
CALL sp();
|
||||
|
||||
--echo # Clean up
|
||||
DROP VIEW v1;
|
||||
DROP PROCEDURE sp;
|
||||
|
||||
--echo # Another use case is when a temporary table hides a view is dropped
|
||||
--echo # inside a stored routine being called.
|
||||
|
||||
CREATE VIEW t1 AS SELECT 1;
|
||||
|
||||
--delimiter |
|
||||
CREATE PROCEDURE p1()
|
||||
BEGIN
|
||||
DROP TEMPORARY TABLE t1;
|
||||
END
|
||||
|
|
||||
|
||||
CREATE FUNCTION f1() RETURNS INT
|
||||
BEGIN
|
||||
CALL p1();
|
||||
RETURN 1;
|
||||
END
|
||||
|
|
||||
|
||||
--delimiter ;
|
||||
|
||||
CREATE TEMPORARY TABLE t1 AS SELECT 1 AS a;
|
||||
PREPARE stmt FROM 'SELECT f1()';
|
||||
EXECUTE stmt;
|
||||
--echo # The temporary table t1 has been dropped on first
|
||||
--echo # execution of the prepared statement 'stmt',
|
||||
--echo # next time this statement is run it results in issuing
|
||||
--echo # the error ER_BAD_TABLE_ERROR
|
||||
--error ER_BAD_TABLE_ERROR
|
||||
EXECUTE stmt;
|
||||
|
||||
--echo # Clean up
|
||||
DROP VIEW t1;
|
||||
DROP FUNCTION f1;
|
||||
DROP PROCEDURE p1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
||||
@@ -4550,8 +4550,6 @@ sub extract_warning_lines ($$) {
|
||||
qr/WSREP: Guessing address for incoming client/,
|
||||
|
||||
qr/InnoDB: Difficult to find free blocks in the buffer pool*/,
|
||||
# for UBSAN
|
||||
qr/decimal\.c.*: runtime error: signed integer overflow/,
|
||||
# Disable test for UBSAN on dynamically loaded objects
|
||||
qr/runtime error: member call.*object.*'Handler_share'/,
|
||||
qr/sql_type\.cc.* runtime error: member call.*object.* 'Type_collection'/,
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
# This is the first half of include/restart_mysqld.inc.
|
||||
if ($rpl_inited)
|
||||
{
|
||||
if (!$allow_rpl_inited)
|
||||
{
|
||||
--die ERROR IN TEST: When using the replication test framework (master-slave.inc, rpl_init.inc etc), use rpl_restart_server.inc instead of restart_mysqld.inc. If you know what you are doing and you really have to use restart_mysqld.inc, set allow_rpl_inited=1 before you source restart_mysqld.inc
|
||||
}
|
||||
}
|
||||
|
||||
# Write file to make mysql-test-run.pl expect the "crash", but don't start it
|
||||
--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')`
|
||||
--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect
|
||||
--exec echo "wait" > $_expect_file_name
|
||||
|
||||
# Send shutdown to the connected server
|
||||
--shutdown_server
|
||||
--source include/wait_until_disconnected.inc
|
||||
|
||||
6
mysql-test/suite/galera/r/MDEV-26499.result
Normal file
6
mysql-test/suite/galera/r/MDEV-26499.result
Normal file
@@ -0,0 +1,6 @@
|
||||
connection node_2;
|
||||
connection node_1;
|
||||
connection node_1;
|
||||
connection node_2;
|
||||
connection node_2;
|
||||
SET GLOBAL debug_dbug="+d,simulate_slow_client_at_shutdown";
|
||||
@@ -37,7 +37,7 @@ mysqld-bin.000002 # Gtid # # BEGIN GTID #-#-#
|
||||
mysqld-bin.000002 # Query # # use `test`; CREATE TABLE `ts1` (
|
||||
`f1` int(11) NOT NULL
|
||||
)
|
||||
mysqld-bin.000002 # Xid # # COMMIT /* XID */
|
||||
mysqld-bin.000002 # Query # # COMMIT
|
||||
connection node_2;
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package My::Suite::Galera;
|
||||
|
||||
use warnings;
|
||||
use lib 'suite';
|
||||
use wsrep::common;
|
||||
|
||||
@@ -63,7 +64,7 @@ push @::global_suppressions,
|
||||
qr(WSREP: Failed to remove page file .*),
|
||||
qr(WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to .*),
|
||||
qr|WSREP: Sending JOIN failed: -107 \(Transport endpoint is not connected\). Will retry in new primary component.|,
|
||||
qr|WSREP: Send action {.* STATE_REQUEST} returned -107 \(Transport endpoint is not connected\)|,
|
||||
qr|WSREP: Send action \{.* STATE_REQUEST} returned -107 \(Transport endpoint is not connected\)|,
|
||||
qr|WSREP: Trying to continue unpaused monitor|,
|
||||
qr|WSREP: Wait for gtid returned error 3 while waiting for prior transactions to commit before setting position|,
|
||||
qr|WSREP: Failed to report last committed|,
|
||||
|
||||
20
mysql-test/suite/galera/t/MDEV-26499.test
Normal file
20
mysql-test/suite/galera/t/MDEV-26499.test
Normal file
@@ -0,0 +1,20 @@
|
||||
#
|
||||
# MDEV-26499
|
||||
#
|
||||
# This test reproduces some failure on mysql_shutdown() call
|
||||
# which manifests sporadically in some galera MTR tests during
|
||||
# restart of a node.
|
||||
#
|
||||
|
||||
--source include/galera_cluster.inc
|
||||
--source include/have_debug_sync.inc
|
||||
|
||||
--let $node_1=node_1
|
||||
--let $node_2=node_2
|
||||
--source include/auto_increment_offset_save.inc
|
||||
|
||||
--connection node_2
|
||||
SET GLOBAL debug_dbug="+d,simulate_slow_client_at_shutdown";
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
--source include/auto_increment_offset_restore.inc
|
||||
33
mysql-test/suite/galera_sr/r/galera_sr_bf_abort_idle.result
Normal file
33
mysql-test/suite/galera_sr/r/galera_sr_bf_abort_idle.result
Normal file
@@ -0,0 +1,33 @@
|
||||
connection node_2;
|
||||
connection node_1;
|
||||
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
|
||||
connection node_1;
|
||||
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER);
|
||||
INSERT INTO t1 VALUES (1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1);
|
||||
SET SESSION wsrep_trx_fragment_size=10;
|
||||
SET SESSION wsrep_trx_fragment_unit='rows';
|
||||
START TRANSACTION;
|
||||
UPDATE t1 SET f2 = f2 + 10;
|
||||
connection node_2;
|
||||
INSERT INTO t1 VALUES (10,2);
|
||||
connection node_1a;
|
||||
connection node_1;
|
||||
INSERT INTO t1 VALUES (9,1);
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
ROLLBACK;
|
||||
DROP TABLE t1;
|
||||
connection node_1;
|
||||
CREATE TABLE t1(f1 INTEGER PRIMARY KEY, f2 INTEGER);
|
||||
INSERT INTO t1 VALUES (1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1);
|
||||
SET SESSION wsrep_trx_fragment_size=5;
|
||||
SET SESSION wsrep_trx_fragment_unit='rows';
|
||||
START TRANSACTION;
|
||||
UPDATE t1 SET f2 = f2 + 10;
|
||||
connection node_2;
|
||||
INSERT INTO t1 VALUES (10,2);
|
||||
connection node_1a;
|
||||
connection node_1;
|
||||
INSERT INTO t1 VALUES (9,1);
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
ROLLBACK;
|
||||
DROP TABLE t1;
|
||||
@@ -43,8 +43,9 @@ SET SESSION wsrep_sync_wait = 0;
|
||||
SET debug_sync = "now SIGNAL write_row_continue";
|
||||
|
||||
# Let's give the INSERT some time, to make sure it does rollback
|
||||
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO = "INSERT INTO t1 VALUES (1)" AND STATE = "Freeing items";
|
||||
--source include/wait_condition.inc
|
||||
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO = "INSERT INTO t1 VALUES (1)" AND (STATE = "Freeing items" OR STATE = 'Rollback');
|
||||
--let $wait_condition_on_error_output = SELECT INFO, STATE FROM INFORMATION_SCHEMA.PROCESSLIST
|
||||
--source include/wait_condition_with_debug.inc
|
||||
|
||||
# Resume the DDL in streaming_rollback
|
||||
SET SESSION debug_sync = "now SIGNAL wsrep_streaming_rollback_continue";
|
||||
|
||||
68
mysql-test/suite/galera_sr/t/galera_sr_bf_abort_idle.test
Normal file
68
mysql-test/suite/galera_sr/t/galera_sr_bf_abort_idle.test
Normal file
@@ -0,0 +1,68 @@
|
||||
#
|
||||
# Test BF abort for idle SR transactions
|
||||
#
|
||||
|
||||
--source include/galera_cluster.inc
|
||||
|
||||
--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
|
||||
|
||||
#
|
||||
# Case 1: BF abort idle SR transaction that has not yet replicated any fragments
|
||||
#
|
||||
--connection node_1
|
||||
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER);
|
||||
INSERT INTO t1 VALUES (1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1);
|
||||
|
||||
--let $bf_count = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.global_status WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'`
|
||||
|
||||
SET SESSION wsrep_trx_fragment_size=10;
|
||||
SET SESSION wsrep_trx_fragment_unit='rows';
|
||||
START TRANSACTION;
|
||||
UPDATE t1 SET f2 = f2 + 10;
|
||||
|
||||
--connection node_2
|
||||
INSERT INTO t1 VALUES (10,2);
|
||||
|
||||
# Wait for SR transaction to be BF aborted
|
||||
--connection node_1a
|
||||
--let $wait_condition = SELECT VARIABLE_VALUE = $bf_count + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'
|
||||
--source include/wait_condition.inc
|
||||
|
||||
|
||||
--connection node_1
|
||||
--error ER_LOCK_DEADLOCK
|
||||
INSERT INTO t1 VALUES (9,1);
|
||||
ROLLBACK;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
#
|
||||
# Case 2: BF abort idle SR transaction that has already replicated a fragment
|
||||
#
|
||||
--connection node_1
|
||||
CREATE TABLE t1(f1 INTEGER PRIMARY KEY, f2 INTEGER);
|
||||
INSERT INTO t1 VALUES (1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1);
|
||||
|
||||
--let $bf_count = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.global_status WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'`
|
||||
|
||||
|
||||
SET SESSION wsrep_trx_fragment_size=5;
|
||||
SET SESSION wsrep_trx_fragment_unit='rows';
|
||||
START TRANSACTION;
|
||||
UPDATE t1 SET f2 = f2 + 10;
|
||||
|
||||
--connection node_2
|
||||
INSERT INTO t1 VALUES (10,2);
|
||||
|
||||
# Wait for SR transaction to be BF aborted
|
||||
--connection node_1a
|
||||
--let $wait_condition = SELECT VARIABLE_VALUE = $bf_count + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--connection node_1
|
||||
--error ER_LOCK_DEADLOCK
|
||||
INSERT INTO t1 VALUES (9,1);
|
||||
ROLLBACK;
|
||||
|
||||
DROP TABLE t1;
|
||||
@@ -1,5 +1,5 @@
|
||||
--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
|
||||
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:29:25.129637040 +0530
|
||||
--- mysql-test/suite/innodb/r/check_ibd_filesize.result
|
||||
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject
|
||||
@@ -3,18 +3,12 @@
|
||||
# SPACE IN 5.7 THAN IN 5.6
|
||||
#
|
||||
@@ -14,7 +14,7 @@
|
||||
-# bytes: 65536
|
||||
+# bytes: 131072
|
||||
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
|
||||
-# bytes: 4194304
|
||||
-# bytes: 2097152
|
||||
-DROP TABLE t1;
|
||||
-CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB)
|
||||
-ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
|
||||
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:31:39.288769153 +0530
|
||||
--- mysql-test/suite/innodb/r/check_ibd_filesize.result
|
||||
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject
|
||||
@@ -3,18 +3,18 @@
|
||||
# SPACE IN 5.7 THAN IN 5.6
|
||||
#
|
||||
@@ -13,7 +13,7 @@
|
||||
-# bytes: 65536
|
||||
+# bytes: 16384
|
||||
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
|
||||
# bytes: 4194304
|
||||
# bytes: 2097152
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB)
|
||||
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
|
||||
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:30:28.957174270 +0530
|
||||
--- mysql-test/suite/innodb/r/check_ibd_filesize.result
|
||||
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject
|
||||
@@ -3,18 +3,12 @@
|
||||
# SPACE IN 5.7 THAN IN 5.6
|
||||
#
|
||||
@@ -14,7 +14,7 @@
|
||||
-# bytes: 65536
|
||||
+# bytes: 262144
|
||||
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
|
||||
-# bytes: 4194304
|
||||
-# bytes: 2097152
|
||||
-DROP TABLE t1;
|
||||
-CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB)
|
||||
-ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
|
||||
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:31:03.516962339 +0530
|
||||
--- mysql-test/suite/innodb/r/check_ibd_filesize.result
|
||||
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject
|
||||
@@ -3,18 +3,18 @@
|
||||
# SPACE IN 5.7 THAN IN 5.6
|
||||
#
|
||||
@@ -13,7 +13,7 @@
|
||||
-# bytes: 65536
|
||||
+# bytes: 32768
|
||||
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
|
||||
# bytes: 4194304
|
||||
# bytes: 2097152
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB)
|
||||
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1;
|
||||
|
||||
@@ -10,7 +10,7 @@ DROP TABLE t1;
|
||||
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB;
|
||||
# bytes: 65536
|
||||
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
|
||||
# bytes: 4194304
|
||||
# bytes: 2097152
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB)
|
||||
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1;
|
||||
|
||||
24
mysql-test/suite/innodb/r/cursor-restore-unique-null.result
Normal file
24
mysql-test/suite/innodb/r/cursor-restore-unique-null.result
Normal file
@@ -0,0 +1,24 @@
|
||||
CREATE TABLE t(a INT PRIMARY KEY, b INT, c INT, UNIQUE KEY `b_c` (`b`,`c`))
|
||||
ENGINE=InnoDB, STATS_PERSISTENT=0;
|
||||
INSERT INTO t SET a = 1, c = 2;
|
||||
connect con1,localhost,root;
|
||||
BEGIN;
|
||||
INSERT INTO t SET a=2, c=2;
|
||||
connection default;
|
||||
BEGIN;
|
||||
SET DEBUG_SYNC="lock_wait_start SIGNAL select_locked";
|
||||
SELECT * FROM t FORCE INDEX(b) FOR UPDATE;
|
||||
connection con1;
|
||||
SET DEBUG_SYNC="now WAIT_FOR select_locked";
|
||||
ROLLBACK;
|
||||
connection default;
|
||||
# If the bug is not fixed, and the both unique index key fields are
|
||||
# NULL, there will be two (1, NULL, 2) rows in the result,
|
||||
# because cursor will be restored to (NULL, 2, 1) position for
|
||||
# secondary key instead of "supremum".
|
||||
a b c
|
||||
1 NULL 2
|
||||
COMMIT;
|
||||
SET DEBUG_SYNC="RESET";
|
||||
disconnect con1;
|
||||
DROP TABLE t;
|
||||
Binary file not shown.
@@ -64,6 +64,7 @@ alter table t1 discard tablespace;
|
||||
flush tables t2 for export;
|
||||
unlock tables;
|
||||
alter table t1 import tablespace;
|
||||
# restart
|
||||
select * from t1;
|
||||
z
|
||||
42
|
||||
|
||||
@@ -123,7 +123,6 @@ ROLLBACK;
|
||||
connection con_weird;
|
||||
a b
|
||||
1 NULL
|
||||
1 NULL
|
||||
SELECT * FROM t FORCE INDEX (b) FOR UPDATE;
|
||||
a b
|
||||
1 NULL
|
||||
|
||||
36
mysql-test/suite/innodb/t/cursor-restore-unique-null.test
Normal file
36
mysql-test/suite/innodb/t/cursor-restore-unique-null.test
Normal file
@@ -0,0 +1,36 @@
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
--source include/have_debug_sync.inc
|
||||
--source include/count_sessions.inc
|
||||
|
||||
|
||||
CREATE TABLE t(a INT PRIMARY KEY, b INT, c INT, UNIQUE KEY `b_c` (`b`,`c`))
|
||||
ENGINE=InnoDB, STATS_PERSISTENT=0;
|
||||
INSERT INTO t SET a = 1, c = 2;
|
||||
|
||||
--connect con1,localhost,root
|
||||
BEGIN;
|
||||
INSERT INTO t SET a=2, c=2;
|
||||
|
||||
--connection default
|
||||
BEGIN;
|
||||
SET DEBUG_SYNC="lock_wait_start SIGNAL select_locked";
|
||||
--send SELECT * FROM t FORCE INDEX(b) FOR UPDATE
|
||||
|
||||
--connection con1
|
||||
SET DEBUG_SYNC="now WAIT_FOR select_locked";
|
||||
ROLLBACK;
|
||||
|
||||
--connection default
|
||||
--echo # If the bug is not fixed, and the both unique index key fields are
|
||||
--echo # NULL, there will be two (1, NULL, 2) rows in the result,
|
||||
--echo # because cursor will be restored to (NULL, 2, 1) position for
|
||||
--echo # secondary key instead of "supremum".
|
||||
--reap
|
||||
COMMIT;
|
||||
|
||||
SET DEBUG_SYNC="RESET";
|
||||
|
||||
--disconnect con1
|
||||
DROP TABLE t;
|
||||
--source include/wait_until_count_sessions.inc
|
||||
@@ -256,3 +256,16 @@ select * from t1;
|
||||
check table t1;
|
||||
|
||||
drop database best;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33214 Table is getting rebuild with
|
||||
--echo # ALTER TABLE ADD COLUMN
|
||||
--echo #
|
||||
use test;
|
||||
CREATE TABLE t1(f1 INT, f2 VARCHAR(10)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;
|
||||
INSERT INTO t1 VALUES(1,'abc'),(2,'def');
|
||||
ALTER TABLE t1 ADD (f3 VARCHAR(5000), f4 VARCHAR(20)), ALGORITHM=instant;
|
||||
ALTER TABLE t1 ADD f5 TEXT, ALGORITHM=INSTANT;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo # End of 10.4 tests
|
||||
|
||||
@@ -83,6 +83,7 @@ flush tables t2 for export;
|
||||
unlock tables;
|
||||
|
||||
alter table t1 import tablespace;
|
||||
--source include/restart_mysqld.inc
|
||||
select * from t1;
|
||||
--remove_file $MYSQLD_DATADIR/test/t1.ibd
|
||||
|
||||
|
||||
@@ -174,6 +174,105 @@ a
|
||||
10
|
||||
11
|
||||
12
|
||||
*** MDEV-33475: --gtid-ignore-duplicate can double-apply event in case of parallel replication retry
|
||||
connection server_2;
|
||||
STOP SLAVE "c2b";
|
||||
SET default_master_connection = "c2b";
|
||||
include/wait_for_slave_to_stop.inc
|
||||
STOP SLAVE "a2b";
|
||||
SET default_master_connection = "a2b";
|
||||
include/wait_for_slave_to_stop.inc
|
||||
connection server_1;
|
||||
CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (0, 0);
|
||||
INSERT INTO t2 VALUES (1, 0);
|
||||
INSERT INTO t2 VALUES (2, 0);
|
||||
INSERT INTO t2 VALUES (3, 0);
|
||||
INSERT INTO t2 VALUES (4, 0);
|
||||
INSERT INTO t2 VALUES (5, 0);
|
||||
INSERT INTO t2 VALUES (6, 0);
|
||||
INSERT INTO t2 VALUES (7, 0);
|
||||
INSERT INTO t2 VALUES (8, 0);
|
||||
INSERT INTO t2 VALUES (9, 0);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (0+10, 100);
|
||||
UPDATE t2 SET b=0 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (0+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (1+10, 100);
|
||||
UPDATE t2 SET b=1 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (1+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (2+10, 100);
|
||||
UPDATE t2 SET b=2 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (2+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (3+10, 100);
|
||||
UPDATE t2 SET b=3 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (3+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (4+10, 100);
|
||||
UPDATE t2 SET b=4 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (4+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (5+10, 100);
|
||||
UPDATE t2 SET b=5 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (5+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (6+10, 100);
|
||||
UPDATE t2 SET b=6 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (6+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (7+10, 100);
|
||||
UPDATE t2 SET b=7 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (7+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (8+10, 100);
|
||||
UPDATE t2 SET b=8 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (8+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (9+10, 100);
|
||||
UPDATE t2 SET b=9 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (9+20, 200);
|
||||
COMMIT;
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
COUNT(*) SUM(a) SUM(b)
|
||||
30 435 3090
|
||||
include/save_master_gtid.inc
|
||||
connection server_2;
|
||||
SET @old_mode= @@GLOBAL.slave_parallel_mode;
|
||||
SET GLOBAL slave_parallel_mode=aggressive;
|
||||
SET default_master_connection = "a2b";
|
||||
START SLAVE;
|
||||
include/wait_for_slave_to_start.inc
|
||||
SET default_master_connection = "c2b";
|
||||
START SLAVE;
|
||||
include/wait_for_slave_to_start.inc
|
||||
include/sync_with_master_gtid.inc
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
COUNT(*) SUM(a) SUM(b)
|
||||
30 435 3090
|
||||
connection server_3;
|
||||
include/sync_with_master_gtid.inc
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
COUNT(*) SUM(a) SUM(b)
|
||||
30 435 3090
|
||||
connection server_4;
|
||||
include/sync_with_master_gtid.inc
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
COUNT(*) SUM(a) SUM(b)
|
||||
30 435 3090
|
||||
*** Test also with not using parallel replication.
|
||||
connection server_1;
|
||||
SET default_master_connection = "b2a";
|
||||
@@ -474,6 +573,7 @@ Warnings:
|
||||
Note 1938 SLAVE 'a2b' stopped
|
||||
Note 1938 SLAVE 'c2b' stopped
|
||||
SET GLOBAL slave_parallel_threads= @old_parallel;
|
||||
SET GLOBAL slave_parallel_mode= @old_mode;
|
||||
SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates;
|
||||
connection server_3;
|
||||
SET GLOBAL gtid_domain_id=0;
|
||||
@@ -491,22 +591,22 @@ Note 1938 SLAVE 'a2d' stopped
|
||||
SET GLOBAL slave_parallel_threads= @old_parallel;
|
||||
SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates;
|
||||
connection server_1;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
include/reset_master_slave.inc
|
||||
disconnect server_1;
|
||||
connection server_2;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
include/reset_master_slave.inc
|
||||
disconnect server_2;
|
||||
connection server_3;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
include/reset_master_slave.inc
|
||||
disconnect server_3;
|
||||
connection server_4;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
include/reset_master_slave.inc
|
||||
disconnect server_4;
|
||||
|
||||
@@ -173,6 +173,65 @@ SET default_master_connection = "a2b";
|
||||
SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
|
||||
|
||||
|
||||
--echo *** MDEV-33475: --gtid-ignore-duplicate can double-apply event in case of parallel replication retry
|
||||
|
||||
# Create a bunch of transactions that will cause conflicts and retries.
|
||||
# The bug was that the retry code was not handling the --gtid-ignore-duplicates
|
||||
# option, so events could be doubly-applied.
|
||||
|
||||
--connection server_2
|
||||
STOP SLAVE "c2b";
|
||||
SET default_master_connection = "c2b";
|
||||
--source include/wait_for_slave_to_stop.inc
|
||||
STOP SLAVE "a2b";
|
||||
SET default_master_connection = "a2b";
|
||||
--source include/wait_for_slave_to_stop.inc
|
||||
|
||||
--connection server_1
|
||||
CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
|
||||
BEGIN;
|
||||
--let $i= 0
|
||||
while ($i < 10) {
|
||||
eval INSERT INTO t2 VALUES ($i, 0);
|
||||
inc $i;
|
||||
}
|
||||
COMMIT;
|
||||
|
||||
--let $i= 0
|
||||
while ($i < 10) {
|
||||
BEGIN;
|
||||
eval INSERT INTO t2 VALUES ($i+10, 100);
|
||||
eval UPDATE t2 SET b=$i WHERE a<10;
|
||||
eval INSERT INTO t2 VALUES ($i+20, 200);
|
||||
COMMIT;
|
||||
inc $i;
|
||||
}
|
||||
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection server_2
|
||||
SET @old_mode= @@GLOBAL.slave_parallel_mode;
|
||||
SET GLOBAL slave_parallel_mode=aggressive;
|
||||
SET default_master_connection = "a2b";
|
||||
START SLAVE;
|
||||
--source include/wait_for_slave_to_start.inc
|
||||
SET default_master_connection = "c2b";
|
||||
START SLAVE;
|
||||
--source include/wait_for_slave_to_start.inc
|
||||
|
||||
--source include/sync_with_master_gtid.inc
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
|
||||
--connection server_3
|
||||
--source include/sync_with_master_gtid.inc
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
|
||||
--connection server_4
|
||||
--source include/sync_with_master_gtid.inc
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
|
||||
|
||||
--echo *** Test also with not using parallel replication.
|
||||
|
||||
--connection server_1
|
||||
@@ -414,6 +473,7 @@ SET GLOBAL gtid_domain_id=0;
|
||||
--sorted_result
|
||||
STOP ALL SLAVES;
|
||||
SET GLOBAL slave_parallel_threads= @old_parallel;
|
||||
SET GLOBAL slave_parallel_mode= @old_mode;
|
||||
SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates;
|
||||
|
||||
--connection server_3
|
||||
@@ -431,25 +491,25 @@ SET GLOBAL slave_parallel_threads= @old_parallel;
|
||||
SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates;
|
||||
|
||||
--connection server_1
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
--source include/reset_master_slave.inc
|
||||
--disconnect server_1
|
||||
|
||||
--connection server_2
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
--source include/reset_master_slave.inc
|
||||
--disconnect server_2
|
||||
|
||||
--connection server_3
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
--source include/reset_master_slave.inc
|
||||
--disconnect server_3
|
||||
|
||||
--connection server_4
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
--source include/reset_master_slave.inc
|
||||
--disconnect server_4
|
||||
|
||||
5
mysql-test/suite/parts/r/mdev_21007.result
Normal file
5
mysql-test/suite/parts/r/mdev_21007.result
Normal file
@@ -0,0 +1,5 @@
|
||||
CREATE TABLE t1 (a INT) PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (1), PARTITION p1 VALUES LESS THAN (MAXVALUE));
|
||||
INSERT INTO t1 VALUES (1),(2);
|
||||
ALTER TABLE t1 MODIFY a INT AUTO_INCREMENT PRIMARY KEY;
|
||||
UPDATE t1 PARTITION (p1) SET a=9 ORDER BY a LIMIT 1;
|
||||
DROP TABLE t1;
|
||||
9
mysql-test/suite/parts/t/mdev_21007.test
Normal file
9
mysql-test/suite/parts/t/mdev_21007.test
Normal file
@@ -0,0 +1,9 @@
|
||||
--source include/have_partition.inc
|
||||
|
||||
CREATE TABLE t1 (a INT) PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (1), PARTITION p1 VALUES LESS THAN (MAXVALUE));
|
||||
INSERT INTO t1 VALUES (1),(2);
|
||||
ALTER TABLE t1 MODIFY a INT AUTO_INCREMENT PRIMARY KEY;
|
||||
UPDATE t1 PARTITION (p1) SET a=9 ORDER BY a LIMIT 1;
|
||||
|
||||
# Cleanup
|
||||
DROP TABLE t1;
|
||||
@@ -125,7 +125,7 @@ include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.innodb_lock_wait_timeout =5;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
@@ -168,7 +168,7 @@ include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.innodb_lock_wait_timeout =5;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
|
||||
@@ -128,7 +128,7 @@ include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.innodb_lock_wait_timeout =5;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
@@ -171,7 +171,7 @@ include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.innodb_lock_wait_timeout =5;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
|
||||
@@ -128,7 +128,7 @@ include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.innodb_lock_wait_timeout =5;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
@@ -171,7 +171,7 @@ include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.innodb_lock_wait_timeout =5;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
|
||||
@@ -21,8 +21,9 @@ INSERT INTO t2 VALUES(1);
|
||||
SELECT * FROM t2;
|
||||
i
|
||||
1
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
connection slave;
|
||||
include/sync_with_master_gtid.inc
|
||||
SELECT * FROM t1;
|
||||
i
|
||||
1
|
||||
|
||||
@@ -41,7 +41,7 @@ if ($slave_ooo_error)
|
||||
{
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.innodb_lock_wait_timeout =5;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
}
|
||||
--source include/start_slave.inc
|
||||
|
||||
@@ -35,9 +35,10 @@ SET @@session.gtid_domain_id= 1;
|
||||
INSERT INTO t2 VALUES(1);
|
||||
SELECT * FROM t2;
|
||||
|
||||
sync_slave_with_master;
|
||||
source include/save_master_gtid.inc;
|
||||
|
||||
connection slave;
|
||||
source include/sync_with_master_gtid.inc;
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
|
||||
|
||||
@@ -133,7 +133,7 @@ double getopt_ulonglong2double(ulonglong v)
|
||||
return u.dbl;
|
||||
}
|
||||
|
||||
#define SET_HO_ERROR_AND_CONTINUE(e) { ho_error= (e); continue; }
|
||||
#define SET_HO_ERROR_AND_CONTINUE(e) { ho_error= (e); (*argc)--; continue; }
|
||||
|
||||
/**
|
||||
Handle command line options.
|
||||
@@ -864,7 +864,7 @@ static int setval(const struct my_option *opts, void *value, char *argument,
|
||||
}
|
||||
if (err)
|
||||
{
|
||||
res= EXIT_UNKNOWN_SUFFIX;
|
||||
res= err;
|
||||
goto ret;
|
||||
};
|
||||
}
|
||||
@@ -998,7 +998,7 @@ static inline ulonglong eval_num_suffix(char *suffix, int *error)
|
||||
case 'E':
|
||||
return 1ULL << 60;
|
||||
default:
|
||||
*error= 1;
|
||||
*error= EXIT_UNKNOWN_SUFFIX;
|
||||
return 0ULL;
|
||||
}
|
||||
}
|
||||
@@ -1024,15 +1024,18 @@ static longlong eval_num_suffix_ll(char *argument,
|
||||
if (errno == ERANGE)
|
||||
{
|
||||
my_getopt_error_reporter(ERROR_LEVEL,
|
||||
"Incorrect integer value: '%s'", argument);
|
||||
*error= 1;
|
||||
"Integer value out of range for int64:"
|
||||
" '%s' for %s",
|
||||
argument, option_name);
|
||||
*error= EXIT_ARGUMENT_INVALID;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
num*= eval_num_suffix(endchar, error);
|
||||
if (*error)
|
||||
fprintf(stderr,
|
||||
"Unknown suffix '%c' used for variable '%s' (value '%s')\n",
|
||||
*endchar, option_name, argument);
|
||||
my_getopt_error_reporter(ERROR_LEVEL,
|
||||
"Unknown suffix '%c' used for variable '%s' (value '%s'). "
|
||||
"Legal suffix characters are: K, M, G, T, P, E",
|
||||
*endchar, option_name, argument);
|
||||
DBUG_RETURN(num);
|
||||
}
|
||||
|
||||
@@ -1055,7 +1058,7 @@ static ulonglong eval_num_suffix_ull(char *argument,
|
||||
my_getopt_error_reporter(ERROR_LEVEL,
|
||||
"Incorrect unsigned value: '%s' for %s",
|
||||
argument, option_name);
|
||||
*error= 1;
|
||||
*error= EXIT_ARGUMENT_INVALID;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
*error= 0;
|
||||
@@ -1064,15 +1067,18 @@ static ulonglong eval_num_suffix_ull(char *argument,
|
||||
if (errno == ERANGE)
|
||||
{
|
||||
my_getopt_error_reporter(ERROR_LEVEL,
|
||||
"Incorrect integer value: '%s' for %s",
|
||||
"Integer value out of range for uint64:"
|
||||
" '%s' for %s",
|
||||
argument, option_name);
|
||||
*error= 1;
|
||||
*error= EXIT_ARGUMENT_INVALID;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
num*= eval_num_suffix(endchar, error);
|
||||
if (*error)
|
||||
my_getopt_error_reporter(ERROR_LEVEL,
|
||||
"Unknown suffix '%c' used for variable '%s' (value '%s')",
|
||||
"Unknown suffix '%c' used for variable '%s'"
|
||||
" (value '%s')."
|
||||
" Legal suffix characters are: K, M, G, T, P, E",
|
||||
*endchar, option_name, argument);
|
||||
DBUG_RETURN(num);
|
||||
}
|
||||
@@ -1092,6 +1098,8 @@ static ulonglong eval_num_suffix_ull(char *argument,
|
||||
static longlong getopt_ll(char *arg, const struct my_option *optp, int *err)
|
||||
{
|
||||
longlong num=eval_num_suffix_ll(arg, err, (char*) optp->name);
|
||||
if (*err)
|
||||
return(0);
|
||||
return getopt_ll_limit_value(num, optp, NULL);
|
||||
}
|
||||
|
||||
@@ -1169,6 +1177,8 @@ longlong getopt_ll_limit_value(longlong num, const struct my_option *optp,
|
||||
static ulonglong getopt_ull(char *arg, const struct my_option *optp, int *err)
|
||||
{
|
||||
ulonglong num= eval_num_suffix_ull(arg, err, (char*) optp->name);
|
||||
if (*err)
|
||||
return(0);
|
||||
return getopt_ull_limit_value(num, optp, NULL);
|
||||
}
|
||||
|
||||
|
||||
@@ -339,6 +339,10 @@ static int free(void *p)
|
||||
shutdown_plugin= true;
|
||||
mysql_cond_signal(&sleep_condition);
|
||||
mysql_mutex_unlock(&sleep_mutex);
|
||||
|
||||
for (uint i= 0; i < url_count; i++)
|
||||
urls[i]->abort();
|
||||
|
||||
pthread_join(sender_thread, NULL);
|
||||
|
||||
mysql_mutex_destroy(&sleep_mutex);
|
||||
|
||||
@@ -52,6 +52,7 @@ class Url {
|
||||
|
||||
const char *url() { return full_url.str; }
|
||||
size_t url_length() { return full_url.length; }
|
||||
virtual void abort() = 0;
|
||||
virtual int send(const char* data, size_t data_length) = 0;
|
||||
virtual int set_proxy(const char *proxy, size_t proxy_len)
|
||||
{
|
||||
|
||||
@@ -37,8 +37,9 @@ static const uint FOR_WRITING= 1;
|
||||
class Url_http: public Url {
|
||||
protected:
|
||||
const LEX_STRING host, port, path;
|
||||
bool ssl;
|
||||
LEX_STRING proxy_host, proxy_port;
|
||||
my_socket fd;
|
||||
bool ssl;
|
||||
|
||||
bool use_proxy()
|
||||
{
|
||||
@@ -47,7 +48,8 @@ class Url_http: public Url {
|
||||
|
||||
Url_http(LEX_STRING &url_arg, LEX_STRING &host_arg,
|
||||
LEX_STRING &port_arg, LEX_STRING &path_arg, bool ssl_arg) :
|
||||
Url(url_arg), host(host_arg), port(port_arg), path(path_arg), ssl(ssl_arg)
|
||||
Url(url_arg), host(host_arg), port(port_arg), path(path_arg),
|
||||
fd(INVALID_SOCKET), ssl(ssl_arg)
|
||||
{
|
||||
proxy_host.length= 0;
|
||||
}
|
||||
@@ -60,6 +62,7 @@ class Url_http: public Url {
|
||||
}
|
||||
|
||||
public:
|
||||
void abort();
|
||||
int send(const char* data, size_t data_length);
|
||||
int set_proxy(const char *proxy, size_t proxy_len)
|
||||
{
|
||||
@@ -158,13 +161,18 @@ Url* http_create(const char *url, size_t url_length)
|
||||
return new Url_http(full_url, host, port, path, ssl);
|
||||
}
|
||||
|
||||
void Url_http::abort()
|
||||
{
|
||||
if (fd != INVALID_SOCKET)
|
||||
closesocket(fd); // interrupt I/O waits
|
||||
}
|
||||
|
||||
/* do the vio_write and check that all data were sent ok */
|
||||
#define write_check(VIO, DATA, LEN) \
|
||||
(vio_write((VIO), (uchar*)(DATA), (LEN)) != (LEN))
|
||||
|
||||
int Url_http::send(const char* data, size_t data_length)
|
||||
{
|
||||
my_socket fd= INVALID_SOCKET;
|
||||
char buf[1024];
|
||||
size_t len= 0;
|
||||
|
||||
@@ -180,6 +188,7 @@ int Url_http::send(const char* data, size_t data_length)
|
||||
return 1;
|
||||
}
|
||||
|
||||
DBUG_ASSERT(fd == INVALID_SOCKET);
|
||||
for (addr= addrs; addr != NULL; addr= addr->ai_next)
|
||||
{
|
||||
fd= socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol);
|
||||
@@ -208,6 +217,7 @@ int Url_http::send(const char* data, size_t data_length)
|
||||
sql_print_error("feedback plugin: vio_new failed for url '%s'",
|
||||
full_url.str);
|
||||
closesocket(fd);
|
||||
fd= INVALID_SOCKET;
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -236,6 +246,7 @@ int Url_http::send(const char* data, size_t data_length)
|
||||
free_vio_ssl_acceptor_fd(ssl_fd);
|
||||
closesocket(fd);
|
||||
vio_delete(vio);
|
||||
fd= INVALID_SOCKET;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@@ -334,6 +345,7 @@ int Url_http::send(const char* data, size_t data_length)
|
||||
}
|
||||
#endif
|
||||
|
||||
fd= INVALID_SOCKET;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
@@ -2358,3 +2358,14 @@ Warning 1292 Incorrect inet6 value: ''
|
||||
Warning 1292 Incorrect inet6 value: ''
|
||||
Warning 1292 Incorrect inet6 value: ''
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-32458 ASAN unknown-crash in Inet6::ascii_to_fbt when casting character string to inet6
|
||||
#
|
||||
CREATE TABLE t1 (c CHAR(3));
|
||||
INSERT INTO t1 VALUES ('1:0'),('00:');
|
||||
SELECT * FROM t1 WHERE c>CAST('::1' AS INET6);
|
||||
c
|
||||
Warnings:
|
||||
Warning 1292 Incorrect inet6 value: '1:0'
|
||||
Warning 1292 Incorrect inet6 value: '00:'
|
||||
DROP TABLE t1;
|
||||
|
||||
@@ -1701,3 +1701,13 @@ SELECT 1.00 + (b = a) AS f FROM t1 ORDER BY f;
|
||||
SELECT 1.00 + (b BETWEEN a AND '') AS f FROM t1 ORDER BY f;
|
||||
SELECT 1.00 + (b IN (a,'')) AS f FROM t1 ORDER BY f;
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-32458 ASAN unknown-crash in Inet6::ascii_to_fbt when casting character string to inet6
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (c CHAR(3));
|
||||
INSERT INTO t1 VALUES ('1:0'),('00:');
|
||||
SELECT * FROM t1 WHERE c>CAST('::1' AS INET6);
|
||||
DROP TABLE t1;
|
||||
|
||||
@@ -229,7 +229,7 @@ bool Inet6::ascii_to_fbt(const char *str, size_t str_length)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!*p || p >= str_end)
|
||||
if (p >= str_end || !*p)
|
||||
{
|
||||
DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: "
|
||||
"ending at ':'.", (int) str_length, str));
|
||||
|
||||
@@ -36,8 +36,8 @@ public:
|
||||
static Create_func_trt<TRT_FIELD> s_singleton;
|
||||
|
||||
protected:
|
||||
Create_func_trt<TRT_FIELD>() = default;
|
||||
virtual ~Create_func_trt<TRT_FIELD>() = default;
|
||||
Create_func_trt() = default;
|
||||
virtual ~Create_func_trt() = default;
|
||||
};
|
||||
|
||||
template<TR_table::field_id_t TRT_FIELD>
|
||||
@@ -132,8 +132,8 @@ public:
|
||||
static Create_func_trt_trx_sees<Item_func_trt_trx_seesX> s_singleton;
|
||||
|
||||
protected:
|
||||
Create_func_trt_trx_sees<Item_func_trt_trx_seesX>() = default;
|
||||
virtual ~Create_func_trt_trx_sees<Item_func_trt_trx_seesX>() = default;
|
||||
Create_func_trt_trx_sees() = default;
|
||||
virtual ~Create_func_trt_trx_sees() = default;
|
||||
};
|
||||
|
||||
template<class X>
|
||||
|
||||
@@ -693,6 +693,7 @@ int ha_partition::create_partitioning_metadata(const char *path,
|
||||
partition_element *part;
|
||||
DBUG_ENTER("ha_partition::create_partitioning_metadata");
|
||||
|
||||
mark_trx_read_write();
|
||||
/*
|
||||
We need to update total number of parts since we might write the handler
|
||||
file as part of a partition management command
|
||||
@@ -8636,7 +8637,7 @@ int ha_partition::info(uint flag)
|
||||
file->stats.auto_increment_value);
|
||||
} while (*(++file_array));
|
||||
|
||||
DBUG_ASSERT(auto_increment_value);
|
||||
DBUG_ASSERT(!all_parts_opened || auto_increment_value);
|
||||
stats.auto_increment_value= auto_increment_value;
|
||||
if (all_parts_opened && auto_inc_is_first_in_idx)
|
||||
{
|
||||
|
||||
@@ -5596,8 +5596,6 @@ handler::ha_create_partitioning_metadata(const char *name,
|
||||
DBUG_ASSERT(m_lock_type == F_UNLCK ||
|
||||
(!old_name && strcmp(name, table_share->path.str)));
|
||||
|
||||
|
||||
mark_trx_read_write();
|
||||
return create_partitioning_metadata(name, old_name, action_flag);
|
||||
}
|
||||
|
||||
|
||||
@@ -1411,7 +1411,13 @@ bool Item_in_optimizer::fix_left(THD *thd)
|
||||
eval_not_null_tables(NULL);
|
||||
with_flags|= (args[0]->with_flags |
|
||||
(args[1]->with_flags & item_with_t::SP_VAR));
|
||||
if ((const_item_cache= args[0]->const_item()))
|
||||
|
||||
/*
|
||||
If left expression is a constant, cache its value.
|
||||
But don't do that if that involves computing a subquery, as we are in a
|
||||
prepare-phase rewrite.
|
||||
*/
|
||||
if ((const_item_cache= args[0]->const_item()) && !args[0]->with_subquery())
|
||||
{
|
||||
cache->store(args[0]);
|
||||
cache->cache_value();
|
||||
|
||||
@@ -3529,13 +3529,11 @@ template <template<class> class LI, typename T> class Item_equal_iterator
|
||||
{
|
||||
protected:
|
||||
Item_equal *item_equal;
|
||||
Item *curr_item;
|
||||
Item *curr_item= nullptr;
|
||||
public:
|
||||
Item_equal_iterator<LI,T>(Item_equal &item_eq)
|
||||
:LI<T> (item_eq.equal_items)
|
||||
Item_equal_iterator(Item_equal &item_eq)
|
||||
:LI<T> (item_eq.equal_items), item_equal(&item_eq)
|
||||
{
|
||||
curr_item= NULL;
|
||||
item_equal= &item_eq;
|
||||
if (item_eq.with_const)
|
||||
{
|
||||
LI<T> *list_it= this;
|
||||
|
||||
@@ -1377,15 +1377,16 @@ String *Item_func_regexp_replace::val_str_internal(String *str,
|
||||
LEX_CSTRING src, rpl;
|
||||
size_t startoffset= 0;
|
||||
|
||||
if ((null_value=
|
||||
(!(source= args[0]->val_str(&tmp0)) ||
|
||||
!(replace= args[2]->val_str_null_to_empty(&tmp2, null_to_empty)) ||
|
||||
re.recompile(args[1]))))
|
||||
return (String *) 0;
|
||||
|
||||
source= args[0]->val_str(&tmp0);
|
||||
if (!source)
|
||||
goto err;
|
||||
replace= args[2]->val_str_null_to_empty(&tmp2, null_to_empty);
|
||||
if (!replace || re.recompile(args[1]))
|
||||
goto err;
|
||||
if (!(source= re.convert_if_needed(source, &re.subject_converter)) ||
|
||||
!(replace= re.convert_if_needed(replace, &re.replace_converter)))
|
||||
goto err;
|
||||
null_value= false;
|
||||
|
||||
source->get_value(&src);
|
||||
replace->get_value(&rpl);
|
||||
@@ -1431,7 +1432,7 @@ String *Item_func_regexp_replace::val_str_internal(String *str,
|
||||
|
||||
err:
|
||||
null_value= true;
|
||||
return (String *) 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
@@ -1567,13 +1568,21 @@ bool Item_func_insert::fix_length_and_dec()
|
||||
String *Item_str_conv::val_str(String *str)
|
||||
{
|
||||
DBUG_ASSERT(fixed());
|
||||
String *res;
|
||||
size_t alloced_length, len;
|
||||
String *res= args[0]->val_str(&tmp_value);
|
||||
|
||||
if ((null_value= (!(res= args[0]->val_str(&tmp_value)) ||
|
||||
str->alloc((alloced_length= res->length() * multiply)))))
|
||||
return 0;
|
||||
if (!res)
|
||||
{
|
||||
err:
|
||||
null_value= true;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
size_t alloced_length= res->length() * multiply, len;
|
||||
|
||||
if (str->alloc((alloced_length)))
|
||||
goto err;
|
||||
|
||||
null_value= false;
|
||||
len= converter(collation.collation, (char*) res->ptr(), res->length(),
|
||||
(char*) str->ptr(), alloced_length);
|
||||
DBUG_ASSERT(len <= alloced_length);
|
||||
|
||||
@@ -568,6 +568,9 @@ void Item_subselect::recalc_used_tables(st_select_lex *new_parent,
|
||||
This measure is used instead of JOIN::read_time, because it is considered
|
||||
to be much more reliable than the cost estimate.
|
||||
|
||||
Note: the logic in this function must agree with
|
||||
JOIN::init_join_cache_and_keyread().
|
||||
|
||||
@return true if the subquery is expensive
|
||||
@return false otherwise
|
||||
*/
|
||||
|
||||
@@ -4013,8 +4013,9 @@ static int init_common_variables()
|
||||
SQLCOM_END + 10);
|
||||
#endif
|
||||
|
||||
if (get_options(&remaining_argc, &remaining_argv))
|
||||
exit(1);
|
||||
int opt_err;
|
||||
if ((opt_err= get_options(&remaining_argc, &remaining_argv)))
|
||||
exit(opt_err);
|
||||
if (IS_SYSVAR_AUTOSIZE(&server_version_ptr))
|
||||
set_server_version(server_version, sizeof(server_version));
|
||||
|
||||
|
||||
@@ -212,6 +212,13 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id,
|
||||
signal_error_to_sql_driver_thread(thd, rgi, err);
|
||||
thd->wait_for_commit_ptr= NULL;
|
||||
|
||||
/*
|
||||
Calls to check_duplicate_gtid() must match up with
|
||||
record_and_update_gtid() (or release_domain_owner() in error case). This
|
||||
assertion tries to catch any missing release of the domain.
|
||||
*/
|
||||
DBUG_ASSERT(rgi->gtid_ignore_duplicate_state != rpl_group_info::GTID_DUPLICATE_OWNER);
|
||||
|
||||
mysql_mutex_lock(&entry->LOCK_parallel_entry);
|
||||
/*
|
||||
We need to mark that this event group started its commit phase, in case we
|
||||
@@ -875,7 +882,13 @@ do_retry:
|
||||
});
|
||||
#endif
|
||||
|
||||
rgi->cleanup_context(thd, 1);
|
||||
/*
|
||||
We are still applying the event group, even though we will roll it back
|
||||
and retry it. So for --gtid-ignore-duplicates, keep ownership of the
|
||||
domain during the retry so another master connection will not try to take
|
||||
over and duplicate apply the same event group (MDEV-33475).
|
||||
*/
|
||||
rgi->cleanup_context(thd, 1, 1 /* keep_domain_owner */);
|
||||
wait_for_pending_deadlock_kill(thd, rgi);
|
||||
thd->reset_killed();
|
||||
thd->clear_error();
|
||||
|
||||
@@ -2253,7 +2253,7 @@ delete_or_keep_event_post_apply(rpl_group_info *rgi,
|
||||
}
|
||||
|
||||
|
||||
void rpl_group_info::cleanup_context(THD *thd, bool error)
|
||||
void rpl_group_info::cleanup_context(THD *thd, bool error, bool keep_domain_owner)
|
||||
{
|
||||
DBUG_ENTER("rpl_group_info::cleanup_context");
|
||||
DBUG_PRINT("enter", ("error: %d", (int) error));
|
||||
@@ -2308,7 +2308,7 @@ void rpl_group_info::cleanup_context(THD *thd, bool error)
|
||||
Ensure we always release the domain for others to process, when using
|
||||
--gtid-ignore-duplicates.
|
||||
*/
|
||||
if (gtid_ignore_duplicate_state != GTID_DUPLICATE_NULL)
|
||||
if (gtid_ignore_duplicate_state != GTID_DUPLICATE_NULL && !keep_domain_owner)
|
||||
rpl_global_gtid_slave_state->release_domain_owner(this);
|
||||
}
|
||||
|
||||
|
||||
@@ -922,7 +922,7 @@ struct rpl_group_info
|
||||
}
|
||||
|
||||
void clear_tables_to_lock();
|
||||
void cleanup_context(THD *, bool);
|
||||
void cleanup_context(THD *, bool, bool keep_domain_owner= false);
|
||||
void slave_close_thread_tables(THD *);
|
||||
void mark_start_commit_no_lock();
|
||||
void mark_start_commit();
|
||||
|
||||
22
sql/sp.cc
22
sql/sp.cc
@@ -1946,7 +1946,7 @@ Sp_handler::sp_show_create_routine(THD *thd,
|
||||
|
||||
DBUG_EXECUTE_IF("cache_sp_in_show_create",
|
||||
/* Some tests need just need a way to cache SP without other side-effects.*/
|
||||
sp_cache_routine(thd, name, false, &sp);
|
||||
sp_cache_routine(thd, name, &sp);
|
||||
sp->show_create_routine(thd, this);
|
||||
DBUG_RETURN(false);
|
||||
);
|
||||
@@ -2370,7 +2370,7 @@ Sp_handler::sp_cache_routine_reentrant(THD *thd,
|
||||
int ret;
|
||||
Parser_state *oldps= thd->m_parser_state;
|
||||
thd->m_parser_state= NULL;
|
||||
ret= sp_cache_routine(thd, name, false, sp);
|
||||
ret= sp_cache_routine(thd, name, sp);
|
||||
thd->m_parser_state= oldps;
|
||||
return ret;
|
||||
}
|
||||
@@ -2777,7 +2777,6 @@ void sp_update_stmt_used_routines(THD *thd, Query_tables_list *prelocking_ctx,
|
||||
*/
|
||||
|
||||
int Sroutine_hash_entry::sp_cache_routine(THD *thd,
|
||||
bool lookup_only,
|
||||
sp_head **sp) const
|
||||
{
|
||||
char qname_buff[NAME_LEN*2+1+1];
|
||||
@@ -2790,7 +2789,7 @@ int Sroutine_hash_entry::sp_cache_routine(THD *thd,
|
||||
*/
|
||||
DBUG_ASSERT(mdl_request.ticket || this == thd->lex->sroutines_list.first);
|
||||
|
||||
return m_handler->sp_cache_routine(thd, &name, lookup_only, sp);
|
||||
return m_handler->sp_cache_routine(thd, &name, sp);
|
||||
}
|
||||
|
||||
|
||||
@@ -2802,9 +2801,6 @@ int Sroutine_hash_entry::sp_cache_routine(THD *thd,
|
||||
|
||||
@param[in] thd Thread context.
|
||||
@param[in] name Name of routine.
|
||||
@param[in] lookup_only Only check that the routine is in the cache.
|
||||
If it's not, don't try to load. If it is present,
|
||||
but old, don't try to reload.
|
||||
@param[out] sp Pointer to sp_head object for routine, NULL if routine was
|
||||
not found.
|
||||
|
||||
@@ -2815,7 +2811,6 @@ int Sroutine_hash_entry::sp_cache_routine(THD *thd,
|
||||
|
||||
int Sp_handler::sp_cache_routine(THD *thd,
|
||||
const Database_qualified_name *name,
|
||||
bool lookup_only,
|
||||
sp_head **sp) const
|
||||
{
|
||||
int ret= 0;
|
||||
@@ -2827,9 +2822,6 @@ int Sp_handler::sp_cache_routine(THD *thd,
|
||||
|
||||
*sp= sp_cache_lookup(spc, name);
|
||||
|
||||
if (lookup_only)
|
||||
DBUG_RETURN(SP_OK);
|
||||
|
||||
if (*sp)
|
||||
{
|
||||
sp_cache_flush_obsolete(spc, sp);
|
||||
@@ -2881,7 +2873,6 @@ int Sp_handler::sp_cache_routine(THD *thd,
|
||||
* name->m_db is a database name, e.g. "dbname"
|
||||
* name->m_name is a package-qualified name,
|
||||
e.g. "pkgname.spname"
|
||||
@param lookup_only - don't load mysql.proc if not cached
|
||||
@param [OUT] sp - the result is returned here.
|
||||
@retval false - loaded or does not exists
|
||||
@retval true - error while loading mysql.proc
|
||||
@@ -2891,14 +2882,13 @@ int
|
||||
Sp_handler::sp_cache_package_routine(THD *thd,
|
||||
const LEX_CSTRING &pkgname_cstr,
|
||||
const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const
|
||||
sp_head **sp) const
|
||||
{
|
||||
DBUG_ENTER("sp_cache_package_routine");
|
||||
DBUG_ASSERT(type() == SP_TYPE_FUNCTION || type() == SP_TYPE_PROCEDURE);
|
||||
sp_name pkgname(&name->m_db, &pkgname_cstr, false);
|
||||
sp_head *ph= NULL;
|
||||
int ret= sp_handler_package_body.sp_cache_routine(thd, &pkgname,
|
||||
lookup_only,
|
||||
&ph);
|
||||
if (!ret)
|
||||
{
|
||||
@@ -2933,12 +2923,12 @@ Sp_handler::sp_cache_package_routine(THD *thd,
|
||||
|
||||
int Sp_handler::sp_cache_package_routine(THD *thd,
|
||||
const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const
|
||||
sp_head **sp) const
|
||||
{
|
||||
DBUG_ENTER("Sp_handler::sp_cache_package_routine");
|
||||
Prefix_name_buf pkgname(thd, name->m_name);
|
||||
DBUG_ASSERT(pkgname.length);
|
||||
DBUG_RETURN(sp_cache_package_routine(thd, pkgname, name, lookup_only, sp));
|
||||
DBUG_RETURN(sp_cache_package_routine(thd, pkgname, name, sp));
|
||||
}
|
||||
|
||||
|
||||
|
||||
16
sql/sp.h
16
sql/sp.h
@@ -102,10 +102,10 @@ protected:
|
||||
int sp_cache_package_routine(THD *thd,
|
||||
const LEX_CSTRING &pkgname_cstr,
|
||||
const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const;
|
||||
sp_head **sp) const;
|
||||
int sp_cache_package_routine(THD *thd,
|
||||
const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const;
|
||||
sp_head **sp) const;
|
||||
sp_head *sp_find_package_routine(THD *thd,
|
||||
const LEX_CSTRING pkgname_str,
|
||||
const Database_qualified_name *name,
|
||||
@@ -202,7 +202,7 @@ public:
|
||||
const Database_qualified_name *name,
|
||||
bool cache_only) const;
|
||||
virtual int sp_cache_routine(THD *thd, const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const;
|
||||
sp_head **sp) const;
|
||||
|
||||
int sp_cache_routine_reentrant(THD *thd,
|
||||
const Database_qualified_name *nm,
|
||||
@@ -283,9 +283,9 @@ class Sp_handler_package_procedure: public Sp_handler_procedure
|
||||
{
|
||||
public:
|
||||
int sp_cache_routine(THD *thd, const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const
|
||||
sp_head **sp) const
|
||||
{
|
||||
return sp_cache_package_routine(thd, name, lookup_only, sp);
|
||||
return sp_cache_package_routine(thd, name, sp);
|
||||
}
|
||||
sp_head *sp_find_routine(THD *thd,
|
||||
const Database_qualified_name *name,
|
||||
@@ -332,9 +332,9 @@ class Sp_handler_package_function: public Sp_handler_function
|
||||
{
|
||||
public:
|
||||
int sp_cache_routine(THD *thd, const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const
|
||||
sp_head **sp) const
|
||||
{
|
||||
return sp_cache_package_routine(thd, name, lookup_only, sp);
|
||||
return sp_cache_package_routine(thd, name, sp);
|
||||
}
|
||||
sp_head *sp_find_routine(THD *thd,
|
||||
const Database_qualified_name *name,
|
||||
@@ -632,7 +632,7 @@ public:
|
||||
|
||||
const Sp_handler *m_handler;
|
||||
|
||||
int sp_cache_routine(THD *thd, bool lookup_only, sp_head **sp) const;
|
||||
int sp_cache_routine(THD *thd, sp_head **sp) const;
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -78,6 +78,8 @@ private:
|
||||
|
||||
/* All routines in this cache */
|
||||
HASH m_hashtable;
|
||||
public:
|
||||
void clear();
|
||||
}; // class sp_cache
|
||||
|
||||
#ifdef HAVE_PSI_INTERFACE
|
||||
@@ -313,6 +315,10 @@ sp_cache::cleanup()
|
||||
my_hash_free(&m_hashtable);
|
||||
}
|
||||
|
||||
void sp_cache::clear()
|
||||
{
|
||||
my_hash_reset(&m_hashtable);
|
||||
}
|
||||
|
||||
void Sp_caches::sp_caches_clear()
|
||||
{
|
||||
@@ -321,3 +327,15 @@ void Sp_caches::sp_caches_clear()
|
||||
sp_cache_clear(&sp_package_spec_cache);
|
||||
sp_cache_clear(&sp_package_body_cache);
|
||||
}
|
||||
|
||||
void Sp_caches::sp_caches_empty()
|
||||
{
|
||||
if (sp_proc_cache)
|
||||
sp_proc_cache->clear();
|
||||
if (sp_func_cache)
|
||||
sp_func_cache->clear();
|
||||
if (sp_package_spec_cache)
|
||||
sp_package_spec_cache->clear();
|
||||
if (sp_package_body_cache)
|
||||
sp_package_body_cache->clear();
|
||||
}
|
||||
|
||||
@@ -3784,6 +3784,9 @@ sp_instr_stmt::execute(THD *thd, uint *nextp)
|
||||
thd->update_stats();
|
||||
thd->lex->sql_command= save_sql_command;
|
||||
*nextp= m_ip+1;
|
||||
#ifdef PROTECT_STATEMENT_MEMROOT
|
||||
mark_as_qc_used();
|
||||
#endif
|
||||
}
|
||||
thd->set_query(query_backup);
|
||||
thd->query_name_consts= 0;
|
||||
|
||||
@@ -1126,7 +1126,7 @@ public:
|
||||
sp_instr(uint ip, sp_pcontext *ctx)
|
||||
:Query_arena(0, STMT_INITIALIZED_FOR_SP), marked(0), m_ip(ip), m_ctx(ctx)
|
||||
#ifdef PROTECT_STATEMENT_MEMROOT
|
||||
, m_has_been_run(false)
|
||||
, m_has_been_run(NON_RUN)
|
||||
#endif
|
||||
{}
|
||||
|
||||
@@ -1221,21 +1221,29 @@ public:
|
||||
#ifdef PROTECT_STATEMENT_MEMROOT
|
||||
bool has_been_run() const
|
||||
{
|
||||
return m_has_been_run;
|
||||
return m_has_been_run == RUN;
|
||||
}
|
||||
|
||||
void mark_as_qc_used()
|
||||
{
|
||||
m_has_been_run= QC;
|
||||
}
|
||||
|
||||
void mark_as_run()
|
||||
{
|
||||
m_has_been_run= true;
|
||||
if (m_has_been_run == QC)
|
||||
m_has_been_run= NON_RUN; // answer was from WC => not really executed
|
||||
else
|
||||
m_has_been_run= RUN;
|
||||
}
|
||||
|
||||
void mark_as_not_run()
|
||||
{
|
||||
m_has_been_run= false;
|
||||
m_has_been_run= NON_RUN;
|
||||
}
|
||||
|
||||
private:
|
||||
bool m_has_been_run;
|
||||
enum {NON_RUN, QC, RUN} m_has_been_run;
|
||||
#endif
|
||||
}; // class sp_instr : public Sql_alloc
|
||||
|
||||
|
||||
@@ -3511,7 +3511,7 @@ open_and_process_routine(THD *thd, Query_tables_list *prelocking_ctx,
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
/* Ensures the routine is up-to-date and cached, if exists. */
|
||||
if (rt->sp_cache_routine(thd, has_prelocking_list, &sp))
|
||||
if (rt->sp_cache_routine(thd, &sp))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
/* Remember the version of the routine in the parse tree. */
|
||||
@@ -3552,7 +3552,7 @@ open_and_process_routine(THD *thd, Query_tables_list *prelocking_ctx,
|
||||
Validating routine version is unnecessary, since CALL
|
||||
does not affect the prepared statement prelocked list.
|
||||
*/
|
||||
if (rt->sp_cache_routine(thd, false, &sp))
|
||||
if (rt->sp_cache_routine(thd, &sp))
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
}
|
||||
@@ -5578,13 +5578,23 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint count, uint flags)
|
||||
}
|
||||
}
|
||||
|
||||
DEBUG_SYNC(thd, "before_lock_tables_takes_lock");
|
||||
#ifdef ENABLED_DEBUG_SYNC
|
||||
if (!tables ||
|
||||
!(strcmp(tables->db.str, "mysql") == 0 &&
|
||||
strcmp(tables->table_name.str, "proc") == 0))
|
||||
DEBUG_SYNC(thd, "before_lock_tables_takes_lock");
|
||||
#endif
|
||||
|
||||
if (! (thd->lock= mysql_lock_tables(thd, start, (uint) (ptr - start),
|
||||
flags)))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
DEBUG_SYNC(thd, "after_lock_tables_takes_lock");
|
||||
#ifdef ENABLED_DEBUG_SYNC
|
||||
if (!tables ||
|
||||
!(strcmp(tables->db.str, "mysql") == 0 &&
|
||||
strcmp(tables->table_name.str, "proc") == 0))
|
||||
DEBUG_SYNC(thd, "after_lock_tables_takes_lock");
|
||||
#endif
|
||||
|
||||
if (thd->lex->requires_prelocking() &&
|
||||
thd->lex->sql_command != SQLCOM_LOCK_TABLES &&
|
||||
|
||||
@@ -896,6 +896,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
|
||||
prepare_derived_at_open= FALSE;
|
||||
create_tmp_table_for_derived= FALSE;
|
||||
save_prep_leaf_list= FALSE;
|
||||
reset_sp_cache= false;
|
||||
org_charset= 0;
|
||||
/* Restore THR_THD */
|
||||
set_current_thd(old_THR_THD);
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
/*
|
||||
Copyright (c) 2000, 2016, Oracle and/or its affiliates.
|
||||
Copyright (c) 2009, 2022, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
@@ -2485,6 +2484,11 @@ public:
|
||||
swap_variables(sp_cache*, sp_package_body_cache, rhs.sp_package_body_cache);
|
||||
}
|
||||
void sp_caches_clear();
|
||||
/**
|
||||
Clear content of sp related caches.
|
||||
Don't delete cache objects itself.
|
||||
*/
|
||||
void sp_caches_empty();
|
||||
};
|
||||
|
||||
|
||||
@@ -2975,6 +2979,12 @@ public:
|
||||
|
||||
bool save_prep_leaf_list;
|
||||
|
||||
/**
|
||||
The data member reset_sp_cache is to signal that content of sp_cache
|
||||
must be reset (all items be removed from it).
|
||||
*/
|
||||
bool reset_sp_cache;
|
||||
|
||||
/* container for handler's private per-connection data */
|
||||
Ha_data ha_data[MAX_HA];
|
||||
|
||||
|
||||
@@ -1590,6 +1590,7 @@ bool JOIN_CACHE::put_record()
|
||||
{
|
||||
bool is_full;
|
||||
uchar *link= 0;
|
||||
DBUG_ASSERT(!for_explain_only);
|
||||
if (prev_cache)
|
||||
link= prev_cache->get_curr_rec_link();
|
||||
write_record_data(link, &is_full);
|
||||
|
||||
@@ -26,7 +26,11 @@
|
||||
#include "sql_manager.h"
|
||||
#include "sql_base.h" // flush_tables
|
||||
|
||||
static bool volatile manager_thread_in_use = 0;
|
||||
/*
|
||||
Values for manager_thread_in_use: 0 means "not started". 1 means "started
|
||||
and active". 2 means "stopped".
|
||||
*/
|
||||
static int volatile manager_thread_in_use = 0;
|
||||
static bool abort_manager = false;
|
||||
|
||||
pthread_t manager_thread;
|
||||
@@ -44,7 +48,7 @@ static struct handler_cb *cb_list; // protected by LOCK_manager
|
||||
bool mysql_manager_submit(void (*action)(void *), void *data)
|
||||
{
|
||||
bool result= FALSE;
|
||||
DBUG_ASSERT(manager_thread_in_use);
|
||||
DBUG_ASSERT(manager_thread_in_use == 1);
|
||||
struct handler_cb **cb;
|
||||
mysql_mutex_lock(&LOCK_manager);
|
||||
cb= &cb_list;
|
||||
@@ -119,7 +123,7 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused)))
|
||||
mysql_mutex_lock(&LOCK_manager);
|
||||
}
|
||||
DBUG_ASSERT(cb_list == NULL);
|
||||
manager_thread_in_use = 0;
|
||||
manager_thread_in_use = 2;
|
||||
mysql_mutex_unlock(&LOCK_manager);
|
||||
mysql_mutex_destroy(&LOCK_manager);
|
||||
mysql_cond_destroy(&COND_manager);
|
||||
@@ -148,6 +152,15 @@ void start_handle_manager()
|
||||
}
|
||||
|
||||
mysql_mutex_lock(&LOCK_manager);
|
||||
/*
|
||||
Wait for manager thread to have started, otherwise in extreme cases the
|
||||
server may start up and have initiated shutdown at the time the manager
|
||||
thread even starts to run.
|
||||
|
||||
Allow both values 1 and 2 for manager_thread_in_use, so that we will not
|
||||
get stuck here if the manager thread somehow manages to start up and
|
||||
abort again before we have time to test it here.
|
||||
*/
|
||||
while (!manager_thread_in_use)
|
||||
mysql_cond_wait(&COND_manager, &LOCK_manager);
|
||||
mysql_mutex_unlock(&LOCK_manager);
|
||||
|
||||
@@ -2252,6 +2252,7 @@ dispatch_command_return dispatch_command(enum enum_server_command command, THD *
|
||||
my_eof(thd);
|
||||
kill_mysql(thd);
|
||||
error=TRUE;
|
||||
DBUG_EXECUTE_IF("simulate_slow_client_at_shutdown", my_sleep(2000000););
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
@@ -2409,6 +2410,11 @@ resume:
|
||||
}
|
||||
#endif /* WITH_WSREP */
|
||||
|
||||
if (thd->reset_sp_cache)
|
||||
{
|
||||
thd->sp_caches_empty();
|
||||
thd->reset_sp_cache= false;
|
||||
}
|
||||
|
||||
if (do_end_of_statement)
|
||||
{
|
||||
@@ -2485,6 +2491,7 @@ resume:
|
||||
MYSQL_COMMAND_DONE(res);
|
||||
}
|
||||
DEBUG_SYNC(thd,"dispatch_command_end");
|
||||
DEBUG_SYNC(thd,"dispatch_command_end2");
|
||||
|
||||
/* Check that some variables are reset properly */
|
||||
DBUG_ASSERT(thd->abort_on_warning == 0);
|
||||
@@ -5883,7 +5890,7 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt)
|
||||
if (sph->sp_resolve_package_routine(thd, thd->lex->sphead,
|
||||
lex->spname, &sph, &pkgname))
|
||||
return true;
|
||||
if (sph->sp_cache_routine(thd, lex->spname, false, &sp))
|
||||
if (sph->sp_cache_routine(thd, lex->spname, &sp))
|
||||
goto error;
|
||||
if (!sp || sp->show_routine_code(thd))
|
||||
{
|
||||
|
||||
@@ -4263,7 +4263,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
|
||||
|
||||
if (unlikely(error))
|
||||
{
|
||||
sql_print_error("Parsing options for plugin '%s' failed.",
|
||||
sql_print_error("Parsing options for plugin '%s' failed. Disabling plugin",
|
||||
tmp->name.str);
|
||||
goto err;
|
||||
}
|
||||
|
||||
@@ -221,7 +221,7 @@ static int fake_rotate_event(binlog_send_info *info, ulonglong position,
|
||||
char* p = info->log_file_name+dirname_length(info->log_file_name);
|
||||
uint ident_len = (uint) strlen(p);
|
||||
String *packet= info->packet;
|
||||
ha_checksum crc;
|
||||
ha_checksum crc= 0;
|
||||
|
||||
/* reset transmit packet for the fake rotate event below */
|
||||
if (reset_transmit_packet(info, info->flags, &ev_offset, &info->errmsg))
|
||||
@@ -262,7 +262,7 @@ static int fake_gtid_list_event(binlog_send_info *info,
|
||||
{
|
||||
my_bool do_checksum;
|
||||
int err;
|
||||
ha_checksum crc;
|
||||
ha_checksum crc= 0;
|
||||
char buf[128];
|
||||
String str(buf, sizeof(buf), system_charset_info);
|
||||
String* packet= info->packet;
|
||||
|
||||
@@ -3568,7 +3568,7 @@ bool JOIN::make_aggr_tables_info()
|
||||
distinct in the engine, so we do this for all queries, not only
|
||||
GROUP BY queries.
|
||||
*/
|
||||
if (tables_list && top_join_tab_count && !procedure)
|
||||
if (tables_list && top_join_tab_count && !only_const_tables() && !procedure)
|
||||
{
|
||||
/*
|
||||
At the moment we only support push down for queries where
|
||||
@@ -31424,7 +31424,26 @@ void JOIN::init_join_cache_and_keyread()
|
||||
if (!(table->file->index_flags(table->file->keyread, 0, 1) & HA_CLUSTERED_INDEX))
|
||||
table->mark_index_columns(table->file->keyread, table->read_set);
|
||||
}
|
||||
if (tab->cache && tab->cache->init(select_options & SELECT_DESCRIBE))
|
||||
bool init_for_explain= false;
|
||||
|
||||
/*
|
||||
Can we use lightweight initalization mode just for EXPLAINs? We can if
|
||||
we're certain that the optimizer will not execute the subquery.
|
||||
The optimzier will not execute the subquery if it's too expensive. For
|
||||
the exact criteria, see Item_subselect::is_expensive().
|
||||
Note that the subquery might be a UNION and we might not yet know if it
|
||||
is expensive.
|
||||
What we do know is that if this SELECT is too expensive, then the whole
|
||||
subquery will be too expensive as well.
|
||||
So, we can use lightweight initialization (init_for_explain=true) if this
|
||||
SELECT examines more than @@expensive_subquery_limit rows.
|
||||
*/
|
||||
if ((select_options & SELECT_DESCRIBE) &&
|
||||
get_examined_rows() >= thd->variables.expensive_subquery_limit)
|
||||
{
|
||||
init_for_explain= true;
|
||||
}
|
||||
if (tab->cache && tab->cache->init(init_for_explain))
|
||||
revise_cache_usage(tab);
|
||||
else
|
||||
tab->remove_redundant_bnl_scan_conds();
|
||||
|
||||
@@ -1326,6 +1326,7 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables,
|
||||
temporary_table_was_dropped= 1;
|
||||
}
|
||||
is_temporary= 1;
|
||||
thd->reset_sp_cache= true;
|
||||
}
|
||||
|
||||
if ((drop_temporary && if_exists) || temporary_table_was_dropped)
|
||||
@@ -1699,8 +1700,11 @@ report_error:
|
||||
}
|
||||
DBUG_PRINT("table", ("table: %p s: %p", table->table,
|
||||
table->table ? table->table->s : NULL));
|
||||
if (is_temporary_table(table))
|
||||
thd->reset_sp_cache= true;
|
||||
}
|
||||
DEBUG_SYNC(thd, "rm_table_no_locks_before_binlog");
|
||||
|
||||
thd->thread_specific_used= TRUE;
|
||||
error= 0;
|
||||
|
||||
@@ -4492,6 +4496,7 @@ int create_table_impl(THD *thd,
|
||||
if (is_trans != NULL)
|
||||
*is_trans= table->file->has_transactions();
|
||||
|
||||
thd->reset_sp_cache= true;
|
||||
thd->thread_specific_used= TRUE;
|
||||
create_info->table= table; // Store pointer to table
|
||||
}
|
||||
|
||||
@@ -1108,10 +1108,8 @@ void wsrep_recover()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void wsrep_stop_replication(THD *thd)
|
||||
static void wsrep_stop_replication_common(THD *thd)
|
||||
{
|
||||
WSREP_INFO("Stop replication by %llu", (thd) ? thd->thread_id : 0);
|
||||
if (Wsrep_server_state::instance().state() !=
|
||||
Wsrep_server_state::s_disconnected)
|
||||
{
|
||||
@@ -1124,10 +1122,10 @@ void wsrep_stop_replication(THD *thd)
|
||||
}
|
||||
}
|
||||
|
||||
/* my connection, should not terminate with wsrep_close_client_connection(),
|
||||
make transaction to rollback
|
||||
*/
|
||||
if (thd && !thd->wsrep_applier) trans_rollback(thd);
|
||||
/* my connection, should not terminate with
|
||||
wsrep_close_client_connections(), make transaction to rollback */
|
||||
if (thd && !thd->wsrep_applier)
|
||||
trans_rollback(thd);
|
||||
wsrep_close_client_connections(TRUE, thd);
|
||||
|
||||
/* wait until appliers have stopped */
|
||||
@@ -1136,26 +1134,16 @@ void wsrep_stop_replication(THD *thd)
|
||||
node_uuid= WSREP_UUID_UNDEFINED;
|
||||
}
|
||||
|
||||
void wsrep_stop_replication(THD *thd)
|
||||
{
|
||||
WSREP_INFO("Stop replication by %llu", (thd) ? thd->thread_id : 0);
|
||||
wsrep_stop_replication_common(thd);
|
||||
}
|
||||
|
||||
void wsrep_shutdown_replication()
|
||||
{
|
||||
WSREP_INFO("Shutdown replication");
|
||||
if (Wsrep_server_state::instance().state() != wsrep::server_state::s_disconnected)
|
||||
{
|
||||
WSREP_DEBUG("Disconnect provider");
|
||||
Wsrep_server_state::instance().disconnect();
|
||||
if (Wsrep_server_state::instance().wait_until_state(
|
||||
Wsrep_server_state::s_disconnected))
|
||||
{
|
||||
WSREP_WARN("Wsrep interrupted while waiting for disconnected state");
|
||||
}
|
||||
}
|
||||
|
||||
wsrep_close_client_connections(TRUE);
|
||||
|
||||
/* wait until appliers have stopped */
|
||||
wsrep_wait_appliers_close(NULL);
|
||||
node_uuid= WSREP_UUID_UNDEFINED;
|
||||
|
||||
wsrep_stop_replication_common(nullptr);
|
||||
/* Undocking the thread specific data. */
|
||||
set_current_thd(nullptr);
|
||||
}
|
||||
@@ -3266,14 +3254,20 @@ static my_bool have_client_connections(THD *thd, void*)
|
||||
{
|
||||
DBUG_PRINT("quit",("Informing thread %lld that it's time to die",
|
||||
(longlong) thd->thread_id));
|
||||
if (is_client_connection(thd) &&
|
||||
(thd->killed == KILL_CONNECTION ||
|
||||
thd->killed == KILL_CONNECTION_HARD))
|
||||
if (is_client_connection(thd))
|
||||
{
|
||||
(void)abort_replicated(thd);
|
||||
return 1;
|
||||
if (thd->killed == KILL_CONNECTION ||
|
||||
thd->killed == KILL_CONNECTION_HARD)
|
||||
{
|
||||
(void)abort_replicated(thd);
|
||||
return true;
|
||||
}
|
||||
if (thd->get_stmt_da()->is_eof())
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void wsrep_close_thread(THD *thd)
|
||||
@@ -3311,14 +3305,24 @@ static my_bool kill_all_threads(THD *thd, THD *caller_thd)
|
||||
/* We skip slave threads & scheduler on this first loop through. */
|
||||
if (is_client_connection(thd) && thd != caller_thd)
|
||||
{
|
||||
if (thd->get_stmt_da()->is_eof())
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (is_replaying_connection(thd))
|
||||
{
|
||||
thd->set_killed(KILL_CONNECTION_HARD);
|
||||
else if (!abort_replicated(thd))
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!abort_replicated(thd))
|
||||
{
|
||||
/* replicated transactions must be skipped */
|
||||
WSREP_DEBUG("closing connection %lld", (longlong) thd->thread_id);
|
||||
/* instead of wsrep_close_thread() we do now soft kill by THD::awake */
|
||||
thd->awake(KILL_CONNECTION_HARD);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@@ -3330,6 +3334,7 @@ static my_bool kill_remaining_threads(THD *thd, THD *caller_thd)
|
||||
if (is_client_connection(thd) &&
|
||||
!abort_replicated(thd) &&
|
||||
!is_replaying_connection(thd) &&
|
||||
!thd->get_stmt_da()->is_eof() &&
|
||||
thd_is_connection_alive(thd) &&
|
||||
thd != caller_thd)
|
||||
{
|
||||
|
||||
@@ -264,25 +264,17 @@ static void finish_stmt(THD* thd) {
|
||||
close_thread_tables(thd);
|
||||
}
|
||||
|
||||
static int open_table(THD* thd,
|
||||
const LEX_CSTRING *schema_name,
|
||||
const LEX_CSTRING *table_name,
|
||||
enum thr_lock_type const lock_type,
|
||||
TABLE** table) {
|
||||
assert(table);
|
||||
*table= NULL;
|
||||
|
||||
static int open_table(THD *thd, const LEX_CSTRING *schema_name,
|
||||
const LEX_CSTRING *table_name,
|
||||
enum thr_lock_type const lock_type,
|
||||
TABLE_LIST *table_list)
|
||||
{
|
||||
assert(table_list);
|
||||
DBUG_ENTER("Wsrep_schema::open_table()");
|
||||
|
||||
TABLE_LIST tables;
|
||||
uint flags= (MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK |
|
||||
MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY |
|
||||
MYSQL_OPEN_IGNORE_FLUSH |
|
||||
MYSQL_LOCK_IGNORE_TIMEOUT);
|
||||
|
||||
tables.init_one_table(schema_name,
|
||||
table_name,
|
||||
NULL, lock_type);
|
||||
const uint flags= (MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK |
|
||||
MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY |
|
||||
MYSQL_OPEN_IGNORE_FLUSH | MYSQL_LOCK_IGNORE_TIMEOUT);
|
||||
table_list->init_one_table(schema_name, table_name, NULL, lock_type);
|
||||
thd->lex->query_tables_own_last= 0;
|
||||
|
||||
// No need to open table if the query was bf aborted,
|
||||
@@ -292,37 +284,39 @@ static int open_table(THD* thd,
|
||||
(thd->get_stmt_da()->sql_errno() == ER_QUERY_INTERRUPTED));
|
||||
|
||||
if (interrupted ||
|
||||
!open_n_lock_single_table(thd, &tables, tables.lock_type, flags)) {
|
||||
!open_n_lock_single_table(thd, table_list, table_list->lock_type, flags))
|
||||
{
|
||||
close_thread_tables(thd);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
*table= tables.table;
|
||||
(*table)->use_all_columns();
|
||||
table_list->table->use_all_columns();
|
||||
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
||||
static int open_for_write(THD* thd, const char* table_name, TABLE** table) {
|
||||
static int open_for_write(THD* thd, const char* table_name, TABLE_LIST* table_list)
|
||||
{
|
||||
LEX_CSTRING schema_str= { wsrep_schema_str.c_str(), wsrep_schema_str.length() };
|
||||
LEX_CSTRING table_str= { table_name, strlen(table_name) };
|
||||
if (Wsrep_schema_impl::open_table(thd, &schema_str, &table_str, TL_WRITE,
|
||||
table)) {
|
||||
table_list))
|
||||
{
|
||||
// No need to log an error if the query was bf aborted,
|
||||
// thd client will get ER_LOCK_DEADLOCK in the end.
|
||||
const bool interrupted= thd->killed ||
|
||||
(thd->is_error() &&
|
||||
(thd->get_stmt_da()->sql_errno() == ER_QUERY_INTERRUPTED));
|
||||
if (!interrupted) {
|
||||
if (!interrupted)
|
||||
{
|
||||
WSREP_ERROR("Failed to open table %s.%s for writing",
|
||||
schema_str.str, table_name);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
empty_record(*table);
|
||||
(*table)->use_all_columns();
|
||||
restore_record(*table, s->default_values);
|
||||
empty_record(table_list->table);
|
||||
table_list->table->use_all_columns();
|
||||
restore_record(table_list->table, s->default_values);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -464,19 +458,21 @@ static int delete_row(TABLE* table) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int open_for_read(THD* thd, const char* table_name, TABLE** table) {
|
||||
|
||||
static int open_for_read(THD *thd, const char *table_name,
|
||||
TABLE_LIST *table_list)
|
||||
{
|
||||
LEX_CSTRING schema_str= { wsrep_schema_str.c_str(), wsrep_schema_str.length() };
|
||||
LEX_CSTRING table_str= { table_name, strlen(table_name) };
|
||||
if (Wsrep_schema_impl::open_table(thd, &schema_str, &table_str, TL_READ,
|
||||
table)) {
|
||||
table_list))
|
||||
{
|
||||
WSREP_ERROR("Failed to open table %s.%s for reading",
|
||||
schema_str.str, table_name);
|
||||
return 1;
|
||||
}
|
||||
empty_record(*table);
|
||||
(*table)->use_all_columns();
|
||||
restore_record(*table, s->default_values);
|
||||
empty_record(table_list->table);
|
||||
table_list->table->use_all_columns();
|
||||
restore_record(table_list->table, s->default_values);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -731,8 +727,10 @@ int Wsrep_schema::store_view(THD* thd, const Wsrep_view& view)
|
||||
assert(view.status() == Wsrep_view::primary);
|
||||
int ret= 1;
|
||||
int error;
|
||||
TABLE_LIST cluster_table_l;
|
||||
TABLE* cluster_table= 0;
|
||||
TABLE* members_table= 0;
|
||||
TABLE_LIST members_table_l;
|
||||
TABLE* members_table = 0;
|
||||
#ifdef WSREP_SCHEMA_MEMBERS_HISTORY
|
||||
TABLE* members_history_table= 0;
|
||||
#endif /* WSREP_SCHEMA_MEMBERS_HISTORY */
|
||||
@@ -757,11 +755,13 @@ int Wsrep_schema::store_view(THD* thd, const Wsrep_view& view)
|
||||
Store cluster view info
|
||||
*/
|
||||
Wsrep_schema_impl::init_stmt(thd);
|
||||
if (Wsrep_schema_impl::open_for_write(thd, cluster_table_str.c_str(), &cluster_table))
|
||||
if (Wsrep_schema_impl::open_for_write(thd, cluster_table_str.c_str(), &cluster_table_l))
|
||||
{
|
||||
goto out;
|
||||
}
|
||||
|
||||
cluster_table= cluster_table_l.table;
|
||||
|
||||
Wsrep_schema_impl::store(cluster_table, 0, view.state_id().id());
|
||||
Wsrep_schema_impl::store(cluster_table, 1, view.view_seqno().get());
|
||||
Wsrep_schema_impl::store(cluster_table, 2, view.state_id().seqno().get());
|
||||
@@ -781,12 +781,14 @@ int Wsrep_schema::store_view(THD* thd, const Wsrep_view& view)
|
||||
*/
|
||||
Wsrep_schema_impl::init_stmt(thd);
|
||||
if (Wsrep_schema_impl::open_for_write(thd, members_table_str.c_str(),
|
||||
&members_table))
|
||||
&members_table_l))
|
||||
{
|
||||
WSREP_ERROR("failed to open wsrep.members table");
|
||||
goto out;
|
||||
}
|
||||
|
||||
members_table= members_table_l.table;
|
||||
|
||||
for (size_t i= 0; i < view.members().size(); ++i)
|
||||
{
|
||||
Wsrep_schema_impl::store(members_table, 0, view.members()[i].id());
|
||||
@@ -840,8 +842,10 @@ Wsrep_view Wsrep_schema::restore_view(THD* thd, const Wsrep_id& own_id) const {
|
||||
int ret= 1;
|
||||
int error;
|
||||
|
||||
TABLE_LIST cluster_table_l;
|
||||
TABLE* cluster_table= 0;
|
||||
bool end_cluster_scan= false;
|
||||
TABLE_LIST members_table_l;
|
||||
TABLE* members_table= 0;
|
||||
bool end_members_scan= false;
|
||||
|
||||
@@ -867,8 +871,12 @@ Wsrep_view Wsrep_schema::restore_view(THD* thd, const Wsrep_id& own_id) const {
|
||||
Read cluster info from cluster table
|
||||
*/
|
||||
Wsrep_schema_impl::init_stmt(thd);
|
||||
if (Wsrep_schema_impl::open_for_read(thd, cluster_table_str.c_str(), &cluster_table) ||
|
||||
Wsrep_schema_impl::init_for_scan(cluster_table)) {
|
||||
if (Wsrep_schema_impl::open_for_read(thd, cluster_table_str.c_str(), &cluster_table_l)) {
|
||||
goto out;
|
||||
}
|
||||
cluster_table = cluster_table_l.table;
|
||||
|
||||
if (Wsrep_schema_impl::init_for_scan(cluster_table)) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -892,8 +900,14 @@ Wsrep_view Wsrep_schema::restore_view(THD* thd, const Wsrep_id& own_id) const {
|
||||
Read members from members table
|
||||
*/
|
||||
Wsrep_schema_impl::init_stmt(thd);
|
||||
if (Wsrep_schema_impl::open_for_read(thd, members_table_str.c_str(), &members_table) ||
|
||||
Wsrep_schema_impl::init_for_scan(members_table)) {
|
||||
if (Wsrep_schema_impl::open_for_read(thd, members_table_str.c_str(),
|
||||
&members_table_l))
|
||||
{
|
||||
goto out;
|
||||
}
|
||||
|
||||
members_table= members_table_l.table;
|
||||
if (Wsrep_schema_impl::init_for_scan(members_table)) {
|
||||
goto out;
|
||||
}
|
||||
end_members_scan= true;
|
||||
@@ -997,14 +1011,15 @@ int Wsrep_schema::append_fragment(THD* thd,
|
||||
Wsrep_schema_impl::sql_safe_updates sql_safe_updates(thd);
|
||||
Wsrep_schema_impl::init_stmt(thd);
|
||||
|
||||
TABLE* frag_table= 0;
|
||||
if (Wsrep_schema_impl::open_for_write(thd, sr_table_str.c_str(), &frag_table))
|
||||
TABLE_LIST frag_table_l;
|
||||
if (Wsrep_schema_impl::open_for_write(thd, sr_table_str.c_str(), &frag_table_l))
|
||||
{
|
||||
trans_rollback_stmt(thd);
|
||||
thd->lex->restore_backup_query_tables_list(&query_tables_list_backup);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
TABLE* frag_table= frag_table_l.table;
|
||||
Wsrep_schema_impl::store(frag_table, 0, server_id);
|
||||
Wsrep_schema_impl::store(frag_table, 1, transaction_id.get());
|
||||
Wsrep_schema_impl::store(frag_table, 2, seqno.get());
|
||||
@@ -1048,13 +1063,15 @@ int Wsrep_schema::update_fragment_meta(THD* thd,
|
||||
uchar *key=NULL;
|
||||
key_part_map key_map= 0;
|
||||
TABLE* frag_table= 0;
|
||||
TABLE_LIST frag_table_l;
|
||||
|
||||
Wsrep_schema_impl::init_stmt(thd);
|
||||
if (Wsrep_schema_impl::open_for_write(thd, sr_table_str.c_str(), &frag_table))
|
||||
if (Wsrep_schema_impl::open_for_write(thd, sr_table_str.c_str(), &frag_table_l))
|
||||
{
|
||||
thd->lex->restore_backup_query_tables_list(&query_tables_list_backup);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
frag_table= frag_table_l.table;
|
||||
|
||||
/* Find record with the given uuid, trx id, and seqno -1 */
|
||||
Wsrep_schema_impl::store(frag_table, 0, ws_meta.server_id());
|
||||
@@ -1142,7 +1159,10 @@ static int remove_fragment(THD* thd,
|
||||
seqno.get(),
|
||||
error);
|
||||
}
|
||||
ret= error;
|
||||
else
|
||||
{
|
||||
ret= error;
|
||||
}
|
||||
}
|
||||
else if (Wsrep_schema_impl::delete_row(frag_table))
|
||||
{
|
||||
@@ -1174,12 +1194,14 @@ int Wsrep_schema::remove_fragments(THD* thd,
|
||||
thd->reset_n_backup_open_tables_state(&open_tables_backup);
|
||||
|
||||
TABLE* frag_table= 0;
|
||||
if (Wsrep_schema_impl::open_for_write(thd, sr_table_str.c_str(), &frag_table))
|
||||
TABLE_LIST frag_table_l;
|
||||
if (Wsrep_schema_impl::open_for_write(thd, sr_table_str.c_str(), &frag_table_l))
|
||||
{
|
||||
ret= 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
frag_table= frag_table_l.table;
|
||||
for (std::vector<wsrep::seqno>::const_iterator i= fragments.begin();
|
||||
i != fragments.end(); ++i)
|
||||
{
|
||||
@@ -1243,6 +1265,7 @@ int Wsrep_schema::replay_transaction(THD* orig_thd,
|
||||
int ret= 1;
|
||||
int error;
|
||||
TABLE* frag_table= 0;
|
||||
TABLE_LIST frag_table_l;
|
||||
uchar *key=NULL;
|
||||
key_part_map key_map= 0;
|
||||
|
||||
@@ -1250,12 +1273,13 @@ int Wsrep_schema::replay_transaction(THD* orig_thd,
|
||||
i != fragments.end(); ++i)
|
||||
{
|
||||
Wsrep_schema_impl::init_stmt(&thd);
|
||||
if ((error= Wsrep_schema_impl::open_for_read(&thd, sr_table_str.c_str(), &frag_table)))
|
||||
if ((error= Wsrep_schema_impl::open_for_read(&thd, sr_table_str.c_str(), &frag_table_l)))
|
||||
{
|
||||
WSREP_WARN("Could not open SR table for read: %d", error);
|
||||
Wsrep_schema_impl::finish_stmt(&thd);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
frag_table= frag_table_l.table;
|
||||
|
||||
Wsrep_schema_impl::store(frag_table, 0, ws_meta.server_id());
|
||||
Wsrep_schema_impl::store(frag_table, 1, ws_meta.transaction_id().get());
|
||||
@@ -1301,12 +1325,13 @@ int Wsrep_schema::replay_transaction(THD* orig_thd,
|
||||
|
||||
if ((error= Wsrep_schema_impl::open_for_write(&thd,
|
||||
sr_table_str.c_str(),
|
||||
&frag_table)))
|
||||
&frag_table_l)))
|
||||
{
|
||||
WSREP_WARN("Could not open SR table for write: %d", error);
|
||||
Wsrep_schema_impl::finish_stmt(&thd);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
frag_table= frag_table_l.table;
|
||||
|
||||
error= Wsrep_schema_impl::init_for_index_scan(frag_table,
|
||||
key,
|
||||
@@ -1348,7 +1373,9 @@ int Wsrep_schema::recover_sr_transactions(THD *orig_thd)
|
||||
(char*) &storage_thd);
|
||||
wsrep_assign_from_threadvars(&storage_thd);
|
||||
TABLE* frag_table= 0;
|
||||
TABLE_LIST frag_table_l;
|
||||
TABLE* cluster_table= 0;
|
||||
TABLE_LIST cluster_table_l;
|
||||
Wsrep_storage_service storage_service(&storage_thd);
|
||||
Wsrep_schema_impl::binlog_off binlog_off(&storage_thd);
|
||||
Wsrep_schema_impl::wsrep_off wsrep_off(&storage_thd);
|
||||
@@ -1363,10 +1390,15 @@ int Wsrep_schema::recover_sr_transactions(THD *orig_thd)
|
||||
|
||||
Wsrep_schema_impl::init_stmt(&storage_thd);
|
||||
storage_thd.wsrep_skip_locking= FALSE;
|
||||
if (Wsrep_schema_impl::open_for_read(&storage_thd,
|
||||
cluster_table_str.c_str(),
|
||||
&cluster_table) ||
|
||||
Wsrep_schema_impl::init_for_scan(cluster_table))
|
||||
if (Wsrep_schema_impl::open_for_read(&storage_thd, cluster_table_str.c_str(),
|
||||
&cluster_table_l))
|
||||
{
|
||||
Wsrep_schema_impl::finish_stmt(&storage_thd);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
cluster_table= cluster_table_l.table;
|
||||
|
||||
if (Wsrep_schema_impl::init_for_scan(cluster_table))
|
||||
{
|
||||
Wsrep_schema_impl::finish_stmt(&storage_thd);
|
||||
DBUG_RETURN(1);
|
||||
@@ -1404,12 +1436,19 @@ int Wsrep_schema::recover_sr_transactions(THD *orig_thd)
|
||||
Open the table for reading and writing so that fragments without
|
||||
valid seqno can be deleted.
|
||||
*/
|
||||
if (Wsrep_schema_impl::open_for_write(&storage_thd, sr_table_str.c_str(), &frag_table) ||
|
||||
Wsrep_schema_impl::init_for_scan(frag_table))
|
||||
if (Wsrep_schema_impl::open_for_write(&storage_thd, sr_table_str.c_str(),
|
||||
&frag_table_l))
|
||||
{
|
||||
WSREP_ERROR("Failed to open SR table for write");
|
||||
goto out;
|
||||
}
|
||||
frag_table= frag_table_l.table;
|
||||
|
||||
if (Wsrep_schema_impl::init_for_scan(frag_table))
|
||||
{
|
||||
WSREP_ERROR("Failed to init for index scan");
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (0 == error)
|
||||
{
|
||||
|
||||
@@ -6127,7 +6127,6 @@ btr_store_big_rec_extern_fields(
|
||||
for (ulint blob_npages = 0;; ++blob_npages) {
|
||||
buf_block_t* block;
|
||||
const ulint commit_freq = 4;
|
||||
uint32_t r_extents;
|
||||
|
||||
ut_ad(page_align(field_ref) == page_align(rec));
|
||||
|
||||
@@ -6162,24 +6161,18 @@ btr_store_big_rec_extern_fields(
|
||||
hint_prev = rec_block->page.id().page_no();
|
||||
}
|
||||
|
||||
error = fsp_reserve_free_extents(
|
||||
&r_extents, index->table->space, 1,
|
||||
FSP_BLOB, &mtr, 1);
|
||||
if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
|
||||
alloc_fail:
|
||||
mtr.commit();
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
block = btr_page_alloc(index, hint_prev + 1,
|
||||
FSP_NO_DIR, 0, &mtr, &mtr,
|
||||
&error);
|
||||
|
||||
index->table->space->release_free_extents(r_extents);
|
||||
if (!block) {
|
||||
goto alloc_fail;
|
||||
alloc_fail:
|
||||
mtr.commit();
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
ut_a(block != NULL);
|
||||
|
||||
const uint32_t page_no = block->page.id().page_no();
|
||||
|
||||
if (prev_page_no == FIL_NULL) {
|
||||
|
||||
@@ -468,8 +468,15 @@ btr_pcur_t::restore_position(btr_latch_mode restore_latch_mode, mtr_t *mtr)
|
||||
|
||||
return restore_status::SAME_ALL;
|
||||
}
|
||||
if (n_matched_fields >= index->n_uniq)
|
||||
ret_val= restore_status::SAME_UNIQ;
|
||||
if (n_matched_fields >= index->n_uniq
|
||||
/* Unique indexes can contain "NULL" keys, and if all
|
||||
unique fields are NULL and not all tuple
|
||||
fields match to record fields, then treat it as if
|
||||
restored cursor position points to the record with
|
||||
not the same unique key. */
|
||||
&& !(index->n_nullable
|
||||
&& dtuple_contains_null(tuple, index->n_uniq)))
|
||||
ret_val= restore_status::SAME_UNIQ;
|
||||
}
|
||||
|
||||
mem_heap_free(heap);
|
||||
|
||||
@@ -1987,7 +1987,6 @@ dict_index_add_to_cache(
|
||||
new_index->n_fields = new_index->n_def;
|
||||
new_index->trx_id = index->trx_id;
|
||||
new_index->set_committed(index->is_committed());
|
||||
new_index->nulls_equal = index->nulls_equal;
|
||||
|
||||
n_ord = new_index->n_uniq;
|
||||
/* Flag the ordering columns and also set column max_prefix */
|
||||
|
||||
@@ -2289,7 +2289,7 @@ void fil_space_crypt_close_tablespace(const fil_space_t *space)
|
||||
<< space->chain.start->name << " ("
|
||||
<< space->id << ") active threads "
|
||||
<< crypt_data->rotate_state.active_threads
|
||||
<< "flushing="
|
||||
<< " flushing="
|
||||
<< crypt_data->rotate_state.flushing << ".";
|
||||
last = now;
|
||||
}
|
||||
|
||||
@@ -507,7 +507,7 @@ rtr_pcur_move_to_next(
|
||||
mysql_mutex_unlock(&rtr_info->matches->rtr_match_mutex);
|
||||
|
||||
cursor->btr_cur.page_cur.rec = rec.r_rec;
|
||||
cursor->btr_cur.page_cur.block = &rtr_info->matches->block;
|
||||
cursor->btr_cur.page_cur.block = rtr_info->matches->block;
|
||||
|
||||
DEBUG_SYNC_C("rtr_pcur_move_to_next_return");
|
||||
return(true);
|
||||
@@ -1318,21 +1318,15 @@ rtr_create_rtr_info(
|
||||
rtr_info->index = index;
|
||||
|
||||
if (init_matches) {
|
||||
rtr_info->heap = mem_heap_create(sizeof(*(rtr_info->matches)));
|
||||
rtr_info->matches = static_cast<matched_rec_t*>(
|
||||
mem_heap_zalloc(
|
||||
rtr_info->heap,
|
||||
sizeof(*rtr_info->matches)));
|
||||
ut_zalloc_nokey(sizeof *rtr_info->matches));
|
||||
|
||||
rtr_info->matches->matched_recs
|
||||
= UT_NEW_NOKEY(rtr_rec_vector());
|
||||
|
||||
rtr_info->matches->bufp = page_align(rtr_info->matches->rec_buf
|
||||
+ UNIV_PAGE_SIZE_MAX + 1);
|
||||
mysql_mutex_init(rtr_match_mutex_key,
|
||||
&rtr_info->matches->rtr_match_mutex,
|
||||
nullptr);
|
||||
rtr_info->matches->block.page.lock.init();
|
||||
}
|
||||
|
||||
rtr_info->path = UT_NEW_NOKEY(rtr_node_path_t());
|
||||
@@ -1451,18 +1445,16 @@ rtr_clean_rtr_info(
|
||||
|
||||
if (free_all) {
|
||||
if (rtr_info->matches) {
|
||||
if (rtr_info->matches->matched_recs != NULL) {
|
||||
UT_DELETE(rtr_info->matches->matched_recs);
|
||||
if (rtr_info->matches->block) {
|
||||
buf_block_free(rtr_info->matches->block);
|
||||
rtr_info->matches->block = nullptr;
|
||||
}
|
||||
|
||||
rtr_info->matches->block.page.lock.free();
|
||||
UT_DELETE(rtr_info->matches->matched_recs);
|
||||
|
||||
mysql_mutex_destroy(
|
||||
&rtr_info->matches->rtr_match_mutex);
|
||||
}
|
||||
|
||||
if (rtr_info->heap) {
|
||||
mem_heap_free(rtr_info->heap);
|
||||
ut_free(rtr_info->matches);
|
||||
}
|
||||
|
||||
if (initialized) {
|
||||
@@ -1572,7 +1564,7 @@ rtr_check_discard_page(
|
||||
if (auto matches = rtr_info->matches) {
|
||||
mysql_mutex_lock(&matches->rtr_match_mutex);
|
||||
|
||||
if (matches->block.page.id() == id) {
|
||||
if (matches->block->page.id() == id) {
|
||||
matches->matched_recs->clear();
|
||||
matches->valid = false;
|
||||
}
|
||||
@@ -1766,7 +1758,7 @@ rtr_leaf_push_match_rec(
|
||||
ulint data_len;
|
||||
rtr_rec_t rtr_rec;
|
||||
|
||||
buf = match_rec->block.page.frame + match_rec->used;
|
||||
buf = match_rec->block->page.frame + match_rec->used;
|
||||
ut_ad(page_rec_is_leaf(rec));
|
||||
|
||||
copy = rec_copy(buf, rec, offsets);
|
||||
@@ -1863,43 +1855,6 @@ rtr_non_leaf_insert_stack_push(
|
||||
new_seq, level, child_no, my_cursor, mbr_inc);
|
||||
}
|
||||
|
||||
/** Copy a buf_block_t, except "block->page.lock".
|
||||
@param[in,out] matches copy to match->block
|
||||
@param[in] block block to copy */
|
||||
static
|
||||
void
|
||||
rtr_copy_buf(
|
||||
matched_rec_t* matches,
|
||||
const buf_block_t* block)
|
||||
{
|
||||
/* Copy all members of "block" to "matches->block" except "lock".
|
||||
We skip "lock" because it is not used
|
||||
from the dummy buf_block_t we create here and because memcpy()ing
|
||||
it generates (valid) compiler warnings that the vtable pointer
|
||||
will be copied. */
|
||||
matches->block.page.lock.free();
|
||||
new (&matches->block.page) buf_page_t(block->page);
|
||||
matches->block.page.frame = block->page.frame;
|
||||
matches->block.unzip_LRU = block->unzip_LRU;
|
||||
|
||||
ut_d(matches->block.in_unzip_LRU_list = block->in_unzip_LRU_list);
|
||||
ut_d(matches->block.in_withdraw_list = block->in_withdraw_list);
|
||||
|
||||
/* Skip buf_block_t::lock */
|
||||
matches->block.modify_clock = block->modify_clock;
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
matches->block.n_hash_helps = block->n_hash_helps;
|
||||
matches->block.n_fields = block->n_fields;
|
||||
matches->block.left_side = block->left_side;
|
||||
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
matches->block.n_pointers = 0;
|
||||
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
|
||||
matches->block.curr_n_fields = block->curr_n_fields;
|
||||
matches->block.curr_left_side = block->curr_left_side;
|
||||
matches->block.index = block->index;
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Generate a shadow copy of the page block header to save the
|
||||
matched records */
|
||||
@@ -1913,16 +1868,18 @@ rtr_init_match(
|
||||
{
|
||||
ut_ad(matches->matched_recs->empty());
|
||||
matches->locked = false;
|
||||
rtr_copy_buf(matches, block);
|
||||
matches->block.page.frame = matches->bufp;
|
||||
matches->valid = false;
|
||||
if (!matches->block) {
|
||||
matches->block = buf_block_alloc();
|
||||
}
|
||||
|
||||
matches->block->page.init(buf_page_t::MEMORY, block->page.id());
|
||||
/* We have to copy PAGE_*_SUPREMUM_END bytes so that we can
|
||||
use infimum/supremum of this page as normal btr page for search. */
|
||||
memcpy(matches->block.page.frame, page, page_is_comp(page)
|
||||
? PAGE_NEW_SUPREMUM_END : PAGE_OLD_SUPREMUM_END);
|
||||
matches->used = page_is_comp(page)
|
||||
? PAGE_NEW_SUPREMUM_END
|
||||
: PAGE_OLD_SUPREMUM_END;
|
||||
memcpy(matches->block->page.frame, page, matches->used);
|
||||
#ifdef RTR_SEARCH_DIAGNOSTIC
|
||||
ulint pageno = page_get_page_no(page);
|
||||
fprintf(stderr, "INNODB_RTR: Searching leaf page %d\n",
|
||||
@@ -2349,7 +2306,7 @@ rtr_cur_search_with_match(
|
||||
#endif /* UNIV_DEBUG */
|
||||
/* Pop the last match record and position on it */
|
||||
match_rec->matched_recs->pop_back();
|
||||
page_cur_position(test_rec.r_rec, &match_rec->block,
|
||||
page_cur_position(test_rec.r_rec, match_rec->block,
|
||||
cursor);
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -7408,26 +7408,55 @@ ha_innobase::build_template(
|
||||
|
||||
ulint num_v = 0;
|
||||
|
||||
if (active_index != MAX_KEY
|
||||
&& active_index == pushed_idx_cond_keyno) {
|
||||
m_prebuilt->idx_cond = this;
|
||||
goto icp;
|
||||
} else if (pushed_rowid_filter && rowid_filter_is_active) {
|
||||
icp:
|
||||
/* Push down an index condition or an end_range check. */
|
||||
/* MDEV-31154: For pushed down index condition we don't support virtual
|
||||
column and idx_cond_push() does check for it. For row ID filtering we
|
||||
don't need such restrictions but we get into trouble trying to use the
|
||||
ICP path.
|
||||
|
||||
1. It should be fine to follow no_icp path if primary key is generated.
|
||||
However, with user specified primary key(PK), the row is identified by
|
||||
the PK and those columns need to be converted to mysql format in
|
||||
row_search_idx_cond_check before doing the comparison. Since secondary
|
||||
indexes always have PK appended in innodb, it works with current ICP
|
||||
handling code when fetch_primary_key_cols is set to TRUE.
|
||||
|
||||
2. Although ICP comparison and Row ID comparison works on different
|
||||
columns the current ICP code can be shared by both.
|
||||
|
||||
3. In most cases, it works today by jumping to goto no_icp when we
|
||||
encounter a virtual column. This is hackish and already have some
|
||||
issues as it cannot handle PK and all states are not reset properly,
|
||||
for example, idx_cond_n_cols is not reset.
|
||||
|
||||
4. We already encountered MDEV-28747 m_prebuilt->idx_cond was being set.
|
||||
|
||||
Neither ICP nor row ID comparison needs virtual columns and the code is
|
||||
simplified to handle both. It should handle the issues. */
|
||||
|
||||
const bool pushed_down = active_index != MAX_KEY
|
||||
&& active_index == pushed_idx_cond_keyno;
|
||||
|
||||
m_prebuilt->idx_cond = pushed_down ? this : nullptr;
|
||||
|
||||
if (m_prebuilt->idx_cond || m_prebuilt->pk_filter) {
|
||||
/* Push down an index condition, end_range check or row ID
|
||||
filter */
|
||||
for (ulint i = 0; i < n_fields; i++) {
|
||||
const Field* field = table->field[i];
|
||||
const bool is_v = !field->stored_in_db();
|
||||
if (is_v && skip_virtual) {
|
||||
num_v++;
|
||||
continue;
|
||||
}
|
||||
|
||||
bool index_contains = index->contains_col_or_prefix(
|
||||
is_v ? num_v : i - num_v, is_v);
|
||||
if (is_v && index_contains) {
|
||||
m_prebuilt->n_template = 0;
|
||||
num_v = 0;
|
||||
goto no_icp;
|
||||
|
||||
if (is_v) {
|
||||
if (index_contains) {
|
||||
/* We want to ensure that ICP is not
|
||||
used with virtual columns. */
|
||||
ut_ad(!pushed_down);
|
||||
m_prebuilt->idx_cond = nullptr;
|
||||
}
|
||||
num_v++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Test if an end_range or an index condition
|
||||
@@ -7447,7 +7476,7 @@ icp:
|
||||
which would be acceptable if end_range==NULL. */
|
||||
if (build_template_needs_field_in_icp(
|
||||
index, m_prebuilt, index_contains,
|
||||
is_v ? num_v : i - num_v, is_v)) {
|
||||
i - num_v, false)) {
|
||||
if (!whole_row) {
|
||||
field = build_template_needs_field(
|
||||
index_contains,
|
||||
@@ -7456,15 +7485,10 @@ icp:
|
||||
fetch_primary_key_cols,
|
||||
index, table, i, num_v);
|
||||
if (!field) {
|
||||
if (is_v) {
|
||||
num_v++;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
ut_ad(!is_v);
|
||||
|
||||
mysql_row_templ_t* templ= build_template_field(
|
||||
m_prebuilt, clust_index, index,
|
||||
table, field, i - num_v, 0);
|
||||
@@ -7541,15 +7565,16 @@ icp:
|
||||
*/
|
||||
}
|
||||
|
||||
if (is_v) {
|
||||
num_v++;
|
||||
}
|
||||
}
|
||||
|
||||
ut_ad(m_prebuilt->idx_cond_n_cols > 0);
|
||||
ut_ad(m_prebuilt->idx_cond_n_cols == m_prebuilt->n_template);
|
||||
|
||||
num_v = 0;
|
||||
ut_ad(m_prebuilt->idx_cond_n_cols == m_prebuilt->n_template);
|
||||
if (m_prebuilt->idx_cond_n_cols == 0) {
|
||||
/* No columns to push down. It is safe to jump to np ICP
|
||||
path. */
|
||||
m_prebuilt->idx_cond = nullptr;
|
||||
goto no_icp;
|
||||
}
|
||||
|
||||
/* Include the fields that are not needed in index condition
|
||||
pushdown. */
|
||||
@@ -7564,7 +7589,7 @@ icp:
|
||||
bool index_contains = index->contains_col_or_prefix(
|
||||
is_v ? num_v : i - num_v, is_v);
|
||||
|
||||
if (!build_template_needs_field_in_icp(
|
||||
if (is_v || !build_template_needs_field_in_icp(
|
||||
index, m_prebuilt, index_contains,
|
||||
is_v ? num_v : i - num_v, is_v)) {
|
||||
/* Not needed in ICP */
|
||||
@@ -7597,7 +7622,7 @@ icp:
|
||||
} else {
|
||||
no_icp:
|
||||
/* No index condition pushdown */
|
||||
m_prebuilt->idx_cond = NULL;
|
||||
ut_ad(!m_prebuilt->idx_cond);
|
||||
ut_ad(num_v == 0);
|
||||
|
||||
for (ulint i = 0; i < n_fields; i++) {
|
||||
|
||||
@@ -1728,11 +1728,9 @@ instant_alter_column_possible(
|
||||
ut_ad(!is_null || nullable);
|
||||
n_nullable += nullable;
|
||||
n_add++;
|
||||
uint l;
|
||||
uint l = (*af)->pack_length();
|
||||
switch ((*af)->type()) {
|
||||
case MYSQL_TYPE_VARCHAR:
|
||||
l = reinterpret_cast<const Field_varstring*>
|
||||
(*af)->get_length();
|
||||
variable_length:
|
||||
if (l >= min_local_len) {
|
||||
max_size += blob_prefix
|
||||
@@ -1746,7 +1744,6 @@ instant_alter_column_possible(
|
||||
if (!is_null) {
|
||||
min_size += l;
|
||||
}
|
||||
l = (*af)->pack_length();
|
||||
max_size += l;
|
||||
lenlen += l > 255 ? 2 : 1;
|
||||
}
|
||||
@@ -1760,7 +1757,6 @@ instant_alter_column_possible(
|
||||
((*af))->get_length();
|
||||
goto variable_length;
|
||||
default:
|
||||
l = (*af)->pack_length();
|
||||
if (l > 255 && ib_table.not_redundant()) {
|
||||
goto variable_length;
|
||||
}
|
||||
|
||||
@@ -349,15 +349,12 @@ dtuple_set_types_binary(
|
||||
dtuple_t* tuple, /*!< in: data tuple */
|
||||
ulint n) /*!< in: number of fields to set */
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
/**********************************************************************//**
|
||||
Checks if a dtuple contains an SQL null value.
|
||||
@return TRUE if some field is SQL null */
|
||||
/** Checks if a dtuple contains an SQL null value.
|
||||
@param tuple tuple
|
||||
@param fields_number number of fields in the tuple to check
|
||||
@return true if some field is SQL null */
|
||||
UNIV_INLINE
|
||||
ibool
|
||||
dtuple_contains_null(
|
||||
/*=================*/
|
||||
const dtuple_t* tuple) /*!< in: dtuple */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
bool dtuple_contains_null(const dtuple_t *tuple, ulint fields_number = 0);
|
||||
/**********************************************************//**
|
||||
Checks that a data field is typed. Asserts an error if not.
|
||||
@return TRUE if ok */
|
||||
|
||||
@@ -596,28 +596,18 @@ data_write_sql_null(
|
||||
memset(data, 0, len);
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
Checks if a dtuple contains an SQL null value.
|
||||
@return TRUE if some field is SQL null */
|
||||
/** Checks if a dtuple contains an SQL null value.
|
||||
@param tuple tuple
|
||||
@param fields_number number of fields in the tuple to check
|
||||
@return true if some field is SQL null */
|
||||
UNIV_INLINE
|
||||
ibool
|
||||
dtuple_contains_null(
|
||||
/*=================*/
|
||||
const dtuple_t* tuple) /*!< in: dtuple */
|
||||
bool dtuple_contains_null(const dtuple_t *tuple, ulint fields_number)
|
||||
{
|
||||
ulint n;
|
||||
ulint i;
|
||||
|
||||
n = dtuple_get_n_fields(tuple);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (dfield_is_null(dtuple_get_nth_field(tuple, i))) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
return(FALSE);
|
||||
ulint n= fields_number ? fields_number : dtuple_get_n_fields(tuple);
|
||||
for (ulint i= 0; i < n; i++)
|
||||
if (dfield_is_null(dtuple_get_nth_field(tuple, i)))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**************************************************************//**
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user