1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-05 13:16:09 +03:00

Merge 10.11 -> 11.4

Signed-off-by: Kristian Nielsen <knielsen@knielsen-hq.org>
This commit is contained in:
Kristian Nielsen
2024-12-05 11:01:42 +01:00
78 changed files with 1333 additions and 273 deletions

View File

@@ -9817,7 +9817,7 @@ void init_re(void)
//"[[:space:]]*CALL[[:space:]]|" // XXX run_query_stmt doesn't read multiple result sets //"[[:space:]]*CALL[[:space:]]|" // XXX run_query_stmt doesn't read multiple result sets
"[[:space:]]*CHANGE[[:space:]]|" "[[:space:]]*CHANGE[[:space:]]|"
"[[:space:]]*CHECKSUM[[:space:]]|" "[[:space:]]*CHECKSUM[[:space:]]|"
"[[:space:]]*COMMIT[[:space:]]|" "[[:space:]]*COMMIT[[:space:]]*|"
"[[:space:]]*COMPOUND[[:space:]]|" "[[:space:]]*COMPOUND[[:space:]]|"
"[[:space:]]*CREATE[[:space:]]+DATABASE[[:space:]]|" "[[:space:]]*CREATE[[:space:]]+DATABASE[[:space:]]|"
"[[:space:]]*CREATE[[:space:]]+INDEX[[:space:]]|" "[[:space:]]*CREATE[[:space:]]+INDEX[[:space:]]|"

View File

@@ -125,16 +125,20 @@ FUNCTION(DTRACE_INSTRUMENT target)
WORKING_DIRECTORY ${objdir} WORKING_DIRECTORY ${objdir}
) )
ELSEIF(CMAKE_SYSTEM_NAME MATCHES "Linux") ELSEIF(CMAKE_SYSTEM_NAME MATCHES "Linux")
# dtrace on Linux runs gcc and uses flags from environment IF (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
SET(CFLAGS_SAVED $ENV{CFLAGS}) # dtrace on Linux runs gcc and uses flags from environment
SET(ENV{CFLAGS} ${CMAKE_C_FLAGS}) SET(CFLAGS_SAVED $ENV{CFLAGS})
SET(ENV{CFLAGS} ${CMAKE_C_FLAGS})
ENDIF()
SET(outfile "${CMAKE_BINARY_DIR}/probes_mysql.o") SET(outfile "${CMAKE_BINARY_DIR}/probes_mysql.o")
# Systemtap object # Systemtap object
EXECUTE_PROCESS( EXECUTE_PROCESS(
COMMAND ${DTRACE} -G -s ${CMAKE_SOURCE_DIR}/include/probes_mysql.d.base COMMAND ${DTRACE} -G -s ${CMAKE_SOURCE_DIR}/include/probes_mysql.d.base
-o ${outfile} -o ${outfile}
) )
SET(ENV{CFLAGS} ${CFLAGS_SAVED}) IF (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
SET(ENV{CFLAGS} ${CFLAGS_SAVED})
ENDIF()
ENDIF() ENDIF()
# Do not try to extend the library if we have not built the .o file # Do not try to extend the library if we have not built the .o file

View File

@@ -70,6 +70,17 @@
# endif /* GNUC >= 3.1 */ # endif /* GNUC >= 3.1 */
#endif #endif
/* gcc 7.5.0 does not support __attribute__((no_sanitize("undefined")) */
#ifndef ATTRIBUTE_NO_UBSAN
# if (GCC_VERSION >= 8000) || defined(__clang__)
# define ATTRIBUTE_NO_UBSAN __attribute__((no_sanitize("undefined")))
# elif (GCC_VERSION >= 6001)
# define ATTRIBUTE_NO_UBSAN __attribute__((no_sanitize_undefined))
# else
# define ATTRIBUTE_NO_UBSAN
# endif
#endif
/* Define pragmas to disable warnings for stack frame checking */ /* Define pragmas to disable warnings for stack frame checking */
#if defined(__clang__) #if defined(__clang__)

View File

@@ -97,7 +97,7 @@ C_MODE_START
/** /**
A cycle timer. A cycle timer.
On clang we use __builtin_readcyclecounter(), except for AARCH64. On clang we use __builtin_readcyclecounter(), except for AARCH64 and RISC-V.
On other compilers: On other compilers:
On IA-32 and AMD64, we use the RDTSC instruction. On IA-32 and AMD64, we use the RDTSC instruction.
@@ -152,7 +152,7 @@ C_MODE_START
*/ */
static inline ulonglong my_timer_cycles(void) static inline ulonglong my_timer_cycles(void)
{ {
# if __has_builtin(__builtin_readcyclecounter) && !defined (__aarch64__) # if __has_builtin(__builtin_readcyclecounter) && !defined (__aarch64__) && !(defined(__linux__) && defined(__riscv))
#define MY_TIMER_ROUTINE_CYCLES MY_TIMER_ROUTINE_AARCH64 #define MY_TIMER_ROUTINE_CYCLES MY_TIMER_ROUTINE_AARCH64
return __builtin_readcyclecounter(); return __builtin_readcyclecounter();
# elif defined _M_IX86 || defined _M_X64 || defined __i386__ || defined __x86_64__ # elif defined _M_IX86 || defined _M_X64 || defined __i386__ || defined __x86_64__

View File

@@ -91,12 +91,12 @@ while ($_rpl_server)
--echo **** SHOW BINLOG EVENTS on $CURRENT_CONNECTION **** --echo **** SHOW BINLOG EVENTS on $CURRENT_CONNECTION ****
let $binlog_name= query_get_value("SHOW MASTER STATUS", File, 1); let $binlog_name= query_get_value("SHOW MASTER STATUS", File, 1);
--echo binlog_name = '$binlog_name' --echo binlog_name = '$binlog_name'
eval SHOW BINLOG EVENTS IN '$binlog_name'; eval SHOW BINLOG EVENTS IN '$binlog_name' LIMIT 200;
--echo --echo
--echo **** SHOW RELAYLOG EVENTS on $CURRENT_CONNECTION **** --echo **** SHOW RELAYLOG EVENTS on $CURRENT_CONNECTION ****
let $relaylog_name= query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1); let $relaylog_name= query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1);
--echo relaylog_name = '$relaylog_name' --echo relaylog_name = '$relaylog_name'
eval SHOW RELAYLOG EVENTS IN '$relaylog_name'; eval SHOW RELAYLOG EVENTS IN '$relaylog_name' LIMIT 200;
--let $_rpl_is_first_server= 0 --let $_rpl_is_first_server= 0

View File

@@ -23588,6 +23588,60 @@ f a c
0 0 -1 0 0 -1
DROP VIEW v; DROP VIEW v;
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-32395: update_depend_map_for_order: SEGV at
# /mariadb-11.3.0/sql/sql_select.cc:16583
#
create table t1 (c1 int);
insert into t1 values (1), (2);
create table t2 (c2 int);
insert into t2 values (1), (2);
create table t3 (c3 int);
insert into t3 values (1), (2);
set statement optimizer_switch='condition_pushdown_for_derived=off,condition_pushdown_for_subquery=off,condition_pushdown_from_having=off' for
select t1.c1 as a from t2, t1 where t1.c1=t2.c2
order by (select c3 from t3 group by a having a=2);
a
1
2
drop table t1, t2, t3;
# Test case 2
CREATE TABLE t1 ( x BOOLEAN NOT NULL );
INSERT INTO t1 ( x ) VALUES ( 1 ) ;
UPDATE t1 SET x = 1 WHERE x = 1 ;
INSERT INTO t1 ( x ) VALUES ( 1 ) , ( x IN ( SELECT x FROM ( SELECT ( SELECT EXISTS ( SELECT * FROM ( SELECT DISTINCT ( - CASE WHEN x = 1 THEN 1 ELSE x + 1 END >= x IS NOT NULL = 1 AND x = 1 ) OR x = x OR x = 'x' FROM t1 AS x GROUP BY x ) AS x WHERE 1 / x GROUP BY x HAVING ( 1 = 1 AND x = 1 ) ) FROM t1 GROUP BY EXISTS ( SELECT 1 ) ) FROM t1 UNION SELECT x FROM t1 ) AS x ) ) ;
DROP TABLE t1;
# Test case 3
CREATE TABLE t0 ( c6 INT , c21 INT ) ;
INSERT INTO t0 VALUES ( 55 , -95 ) , ( 9 , 90 ) ;
ALTER TABLE t0 ADD COLUMN c37 INT AFTER c6 ;
INSERT INTO t0 VALUES ( ) , ( ) ;
SELECT t0 . c6 AS c42 FROM ( SELECT t0 . c6 = TRIM( TRAILING FROM 96 ) SOUNDS LIKE CONVERT ( t0 . c6 , UNSIGNED ) >> PI ( ) AS c49 FROM t0 ) AS t1 JOIN t0 ON RTRIM ( - RAND ( -66 ) BETWEEN FIND_IN_SET ( 20 , UNHEX ( -80 ) IS NULL OR IF ( 85 , -83 , -113 ) ) AND -125 ) / EXP ( c21 ) = t1 . c49 ORDER BY c42 , ( c42 + ( SELECT c21 AS c61 FROM t0 WHERE t0 . c37 >= -19.601384 = RAND ( ) / TRIM( t0 . c21 FROM 'C@rG3D(#9*17(a.,rV' ) = -106 GROUP BY c21 , c42 HAVING c42 = -73 LIMIT 1 ) ) ;
c42
9
9
55
55
drop table t0;
#
# MDEV-32329 pushdown from having into where: Server crashes at sub_select
#
WITH RECURSIVE cte AS ( SELECT 1 as x UNION SELECT x FROM cte)
SELECT ( SELECT 1 FROM ( SELECT 1 FROM cte) dt GROUP BY x HAVING x= 1 )
FROM cte
GROUP BY 1 ;
( SELECT 1 FROM ( SELECT 1 FROM cte) dt GROUP BY x HAVING x= 1 )
1
# Test case 2
WITH
cte1 AS ( SELECT 1 as x UNION SELECT 1),
cte2 AS ( SELECT 1 as x UNION SELECT 1)
SELECT
( SELECT 1 FROM ( SELECT 1 FROM cte1) dt GROUP BY x HAVING x= 1 )
FROM cte2
GROUP BY 1 ;
( SELECT 1 FROM ( SELECT 1 FROM cte1) dt GROUP BY x HAVING x= 1 )
1
# End of 10.5 tests # End of 10.5 tests
# #
# MDEV-28958: condition pushable into view after simplification # MDEV-28958: condition pushable into view after simplification

View File

@@ -4418,6 +4418,58 @@ SELECT * FROM v,t1 WHERE f = DEFAULT(c);
DROP VIEW v; DROP VIEW v;
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # MDEV-32395: update_depend_map_for_order: SEGV at
--echo # /mariadb-11.3.0/sql/sql_select.cc:16583
--echo #
create table t1 (c1 int);
insert into t1 values (1), (2);
create table t2 (c2 int);
insert into t2 values (1), (2);
create table t3 (c3 int);
insert into t3 values (1), (2);
set statement optimizer_switch='condition_pushdown_for_derived=off,condition_pushdown_for_subquery=off,condition_pushdown_from_having=off' for
select t1.c1 as a from t2, t1 where t1.c1=t2.c2
order by (select c3 from t3 group by a having a=2);
drop table t1, t2, t3;
--echo # Test case 2
CREATE TABLE t1 ( x BOOLEAN NOT NULL );
INSERT INTO t1 ( x ) VALUES ( 1 ) ;
UPDATE t1 SET x = 1 WHERE x = 1 ;
INSERT INTO t1 ( x ) VALUES ( 1 ) , ( x IN ( SELECT x FROM ( SELECT ( SELECT EXISTS ( SELECT * FROM ( SELECT DISTINCT ( - CASE WHEN x = 1 THEN 1 ELSE x + 1 END >= x IS NOT NULL = 1 AND x = 1 ) OR x = x OR x = 'x' FROM t1 AS x GROUP BY x ) AS x WHERE 1 / x GROUP BY x HAVING ( 1 = 1 AND x = 1 ) ) FROM t1 GROUP BY EXISTS ( SELECT 1 ) ) FROM t1 UNION SELECT x FROM t1 ) AS x ) ) ;
DROP TABLE t1;
--echo # Test case 3
CREATE TABLE t0 ( c6 INT , c21 INT ) ;
INSERT INTO t0 VALUES ( 55 , -95 ) , ( 9 , 90 ) ;
ALTER TABLE t0 ADD COLUMN c37 INT AFTER c6 ;
INSERT INTO t0 VALUES ( ) , ( ) ;
SELECT t0 . c6 AS c42 FROM ( SELECT t0 . c6 = TRIM( TRAILING FROM 96 ) SOUNDS LIKE CONVERT ( t0 . c6 , UNSIGNED ) >> PI ( ) AS c49 FROM t0 ) AS t1 JOIN t0 ON RTRIM ( - RAND ( -66 ) BETWEEN FIND_IN_SET ( 20 , UNHEX ( -80 ) IS NULL OR IF ( 85 , -83 , -113 ) ) AND -125 ) / EXP ( c21 ) = t1 . c49 ORDER BY c42 , ( c42 + ( SELECT c21 AS c61 FROM t0 WHERE t0 . c37 >= -19.601384 = RAND ( ) / TRIM( t0 . c21 FROM 'C@rG3D(#9*17(a.,rV' ) = -106 GROUP BY c21 , c42 HAVING c42 = -73 LIMIT 1 ) ) ;
drop table t0;
--echo #
--echo # MDEV-32329 pushdown from having into where: Server crashes at sub_select
--echo #
WITH RECURSIVE cte AS ( SELECT 1 as x UNION SELECT x FROM cte)
SELECT ( SELECT 1 FROM ( SELECT 1 FROM cte) dt GROUP BY x HAVING x= 1 )
FROM cte
GROUP BY 1 ;
--echo # Test case 2
WITH
cte1 AS ( SELECT 1 as x UNION SELECT 1),
cte2 AS ( SELECT 1 as x UNION SELECT 1)
SELECT
( SELECT 1 FROM ( SELECT 1 FROM cte1) dt GROUP BY x HAVING x= 1 )
FROM cte2
GROUP BY 1 ;
--echo # End of 10.5 tests --echo # End of 10.5 tests

View File

@@ -1299,3 +1299,49 @@ ERROR: Bad syntax in rewrite-db. Expected syntax is FROM->TO.
ERROR: Bad syntax in rewrite-db. Expected syntax is FROM->TO. ERROR: Bad syntax in rewrite-db. Expected syntax is FROM->TO.
ERROR: Bad syntax in rewrite-db. Expected syntax is FROM->TO. ERROR: Bad syntax in rewrite-db. Expected syntax is FROM->TO.
ERROR: Bad syntax in rewrite-db. Expected syntax is FROM->TO. ERROR: Bad syntax in rewrite-db. Expected syntax is FROM->TO.
#
# MDEV-31761: mariadb-binlog prints fractional timestamp part incorrectly
#
SET SESSION binlog_format= MIXED;
RESET MASTER;
SET time_zone= '+02:00';
CREATE TABLE t (a INT,
b TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6));
set SESSION timestamp= 1689978980.012345;
INSERT INTO t (a) VALUES (1);
SELECT * from t;
a b
1 2023-07-22 00:36:20.012345
FLUSH BINARY LOGS;
SET SESSION timestamp= 1689978980.567890;
SET SESSION binlog_format= ROW;
UPDATE t SET a = 2;
FLUSH BINARY LOGS;
SET SESSION binlog_format= STATEMENT;
DROP TABLE t;
SELECT * FROM t;
a b
1 2023-07-22 00:36:20.012345
SELECT * FROM t;
a b
2 2023-07-22 00:36:20.567890
DROP TABLE t;
SET time_zone= default;
#
# MDEV-24959: ER_BINLOG_ROW_LOGGING_FAILED (1534: Writing one row to the row-based binary log failed)
#
SET SESSION binlog_format= ROW;
SET SESSION binlog_row_image= MINIMAL;
RESET MASTER;
CREATE TABLE t1 (a INT NOT NULL DEFAULT 0 PRIMARY KEY);
REPLACE INTO t1 () VALUES (),();
DROP TABLE t1;
FLUSH BINARY LOGS;
SET SESSION binlog_format= STATEMENT;
SET SESSION binlog_row_image= default;
FOUND 1 /Number of rows: 2/ in mdev24959_1.txt
FOUND 1 /DROP TABLE/ in mdev24959_1.txt
FOUND 1 /Number of rows: 2/ in mdev24959_2.txt
FOUND 1 /DROP TABLE/ in mdev24959_2.txt
FOUND 1 /INSERT INTO .* VALUES/ in mdev24959_2.txt
FOUND 1 /SET /[*] no columns [*]// in mdev24959_2.txt

View File

@@ -637,3 +637,82 @@ FLUSH LOGS;
--exec $MYSQL_BINLOG --rewrite-db=" ->" --short-form $MYSQLD_DATADIR/master-bin.000001 2>&1 --exec $MYSQL_BINLOG --rewrite-db=" ->" --short-form $MYSQLD_DATADIR/master-bin.000001 2>&1
--exec $MYSQL_BINLOG --rewrite-db=" test -> foo " --short-form $MYSQLD_DATADIR/master-bin.000001 > /dev/null 2> $MYSQLTEST_VARDIR/tmp/mysqlbinlog.warn --exec $MYSQL_BINLOG --rewrite-db=" test -> foo " --short-form $MYSQLD_DATADIR/master-bin.000001 > /dev/null 2> $MYSQLTEST_VARDIR/tmp/mysqlbinlog.warn
--echo #
--echo # MDEV-31761: mariadb-binlog prints fractional timestamp part incorrectly
--echo #
SET SESSION binlog_format= MIXED;
RESET MASTER;
SET time_zone= '+02:00';
CREATE TABLE t (a INT,
b TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6));
set SESSION timestamp= 1689978980.012345;
INSERT INTO t (a) VALUES (1);
SELECT * from t;
FLUSH BINARY LOGS;
SET SESSION timestamp= 1689978980.567890;
SET SESSION binlog_format= ROW;
UPDATE t SET a = 2;
FLUSH BINARY LOGS;
SET SESSION binlog_format= STATEMENT;
# Replay to see that timestamps are applied correctly.
# The bug was that leading zeros on the fractional part were not included in
# the mysqlbinlog output, so 1689978980.012345 was applied as 1689978980.12345.
DROP TABLE t;
--let $datadir= `select @@datadir`
--exec $MYSQL_BINLOG $datadir/master-bin.000001 | $MYSQL test
SELECT * FROM t;
--exec $MYSQL_BINLOG $datadir/master-bin.000002 | $MYSQL test
SELECT * FROM t;
DROP TABLE t;
SET time_zone= default;
--echo #
--echo # MDEV-24959: ER_BINLOG_ROW_LOGGING_FAILED (1534: Writing one row to the row-based binary log failed)
--echo #
SET SESSION binlog_format= ROW;
SET SESSION binlog_row_image= MINIMAL;
RESET MASTER;
CREATE TABLE t1 (a INT NOT NULL DEFAULT 0 PRIMARY KEY);
REPLACE INTO t1 () VALUES (),();
DROP TABLE t1;
FLUSH BINARY LOGS;
SET SESSION binlog_format= STATEMENT;
SET SESSION binlog_row_image= default;
--exec $MYSQL_BINLOG --base64-output=decode-rows $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mdev24959_1.txt
--exec $MYSQL_BINLOG --base64-output=decode-rows --verbose $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mdev24959_2.txt
--let SEARCH_FILE= $MYSQLTEST_VARDIR/tmp/mdev24959_1.txt
--let SEARCH_ABORT= NOT FOUND
--let SEARCH_PATTERN= Number of rows: 2
--source include/search_pattern_in_file.inc
# There was a bug that mysqlbinlog would get an error while decoding the
# update rows event with no after image and abort the dump; test that now
# the dump is complete and includes the final DROP TABLE.
--let SEARCH_PATTERN= DROP TABLE
--source include/search_pattern_in_file.inc
--let SEARCH_FILE= $MYSQLTEST_VARDIR/tmp/mdev24959_2.txt
--let SEARCH_PATTERN= Number of rows: 2
--source include/search_pattern_in_file.inc
--let SEARCH_PATTERN= DROP TABLE
--source include/search_pattern_in_file.inc
--let SEARCH_PATTERN= INSERT INTO .* VALUES
--source include/search_pattern_in_file.inc
--let SEARCH_PATTERN= SET /[*] no columns [*]/
--source include/search_pattern_in_file.inc
--remove_file $MYSQLTEST_VARDIR/tmp/mdev24959_1.txt
--remove_file $MYSQLTEST_VARDIR/tmp/mdev24959_2.txt

View File

@@ -2858,6 +2858,26 @@ id select_type table type possible_keys key key_len ref rows Extra
set optimizer_switch=@tmp_os; set optimizer_switch=@tmp_os;
drop table t1,t2,t3; drop table t1,t2,t3;
# #
# MDEV-31030 Assertion `!error' failed in ha_partition::update_row on UPDATE
#
create table t (c int)
partition by list (1 div c) (
partition p0 values in (null),
partition p values in (1));
insert ignore into t values (0), (1), (0);
Warnings:
Warning 1365 Division by 0
Warning 1365 Division by 0
update t set c= 2;
ERROR HY000: Table has no partition for value 0
update ignore t set c= 3;
select * from t;
c
0
0
1
drop table t;
#
# MDEV-32388 MSAN / Valgrind errors in # MDEV-32388 MSAN / Valgrind errors in
# Item_func_like::get_mm_leaf upon query from partitioned table # Item_func_like::get_mm_leaf upon query from partitioned table
# #

View File

@@ -3070,6 +3070,22 @@ set optimizer_switch=@tmp_os;
drop table t1,t2,t3; drop table t1,t2,t3;
--echo #
--echo # MDEV-31030 Assertion `!error' failed in ha_partition::update_row on UPDATE
--echo #
create table t (c int)
partition by list (1 div c) (
partition p0 values in (null),
partition p values in (1));
insert ignore into t values (0), (1), (0);
--error ER_NO_PARTITION_FOR_GIVEN_VALUE
update t set c= 2;
--disable_warnings
update ignore t set c= 3;
--enable_warnings
select * from t;
drop table t;
--echo # --echo #
--echo # MDEV-32388 MSAN / Valgrind errors in --echo # MDEV-32388 MSAN / Valgrind errors in

View File

@@ -191,3 +191,58 @@ Warnings:
Warning 1292 Truncated incorrect INTEGER value: '+ -><()~*:""&|' Warning 1292 Truncated incorrect INTEGER value: '+ -><()~*:""&|'
SET @@sort_buffer_size=DEFAULT; SET @@sort_buffer_size=DEFAULT;
End of 5.0 tests. End of 5.0 tests.
# Start of 10.5 tests
#
# MDEV-25593 Assertion `0' failed in Type_handler_temporal_result::Item_get_date on double EXECUTE
#
SET time_zone='+00:00';
SET timestamp=UNIX_TIMESTAMP('2001-01-01 10:20:30');
PREPARE stmt FROM 'SELECT ? AS c1';
EXECUTE stmt USING current_timestamp;
c1
2001-01-01 10:20:30
EXECUTE stmt USING @unknown;
c1
NULL
DEALLOCATE PREPARE stmt;
PREPARE stmt FROM 'SELECT ? AS c1 FROM DUAL';
EXECUTE stmt USING current_time;
c1
10:20:30
EXECUTE stmt USING DEFAULT;
c1
NULL
DEALLOCATE PREPARE stmt;
PREPARE stmt FROM 'SELECT ? AS c1 FROM DUAL';
EXECUTE stmt USING current_time;
c1
10:20:30
EXECUTE stmt USING IGNORE;
c1
NULL
DEALLOCATE PREPARE stmt;
PREPARE stmt FROM "SELECT DATE('') between''AND ? AS c1";
EXECUTE stmt USING current_time;
c1
1
Warnings:
Warning 1292 Truncated incorrect datetime value: ''
Warning 1292 Truncated incorrect datetime value: ''
EXECUTE stmt USING @unknown;
c1
NULL
Warnings:
Warning 1292 Truncated incorrect datetime value: ''
Warning 1292 Truncated incorrect datetime value: ''
DEALLOCATE PREPARE stmt;
PREPARE stmt FROM 'SELECT CONCAT(UNIX_TIMESTAMP(?)) AS c1';
EXECUTE stmt USING CURRENT_TIMESTAMP;
c1
978344430
EXECUTE stmt USING @unknown;
c1
NULL
DEALLOCATE PREPARE stmt;
SET timestamp=DEFAULT;
SET time_zone=DEFAULT;
# End of 10.5 tests

View File

@@ -217,3 +217,41 @@ SELECT LEFT("12345", @@ft_boolean_syntax);
SET @@sort_buffer_size=DEFAULT; SET @@sort_buffer_size=DEFAULT;
--echo End of 5.0 tests. --echo End of 5.0 tests.
--echo # Start of 10.5 tests
--echo #
--echo # MDEV-25593 Assertion `0' failed in Type_handler_temporal_result::Item_get_date on double EXECUTE
--echo #
SET time_zone='+00:00';
SET timestamp=UNIX_TIMESTAMP('2001-01-01 10:20:30');
PREPARE stmt FROM 'SELECT ? AS c1';
EXECUTE stmt USING current_timestamp;
EXECUTE stmt USING @unknown;
DEALLOCATE PREPARE stmt;
PREPARE stmt FROM 'SELECT ? AS c1 FROM DUAL';
EXECUTE stmt USING current_time;
EXECUTE stmt USING DEFAULT;
DEALLOCATE PREPARE stmt;
PREPARE stmt FROM 'SELECT ? AS c1 FROM DUAL';
EXECUTE stmt USING current_time;
EXECUTE stmt USING IGNORE;
DEALLOCATE PREPARE stmt;
PREPARE stmt FROM "SELECT DATE('') between''AND ? AS c1";
EXECUTE stmt USING current_time;
EXECUTE stmt USING @unknown;
DEALLOCATE PREPARE stmt;
PREPARE stmt FROM 'SELECT CONCAT(UNIX_TIMESTAMP(?)) AS c1';
EXECUTE stmt USING CURRENT_TIMESTAMP;
EXECUTE stmt USING @unknown;
DEALLOCATE PREPARE stmt;
SET timestamp=DEFAULT;
SET time_zone=DEFAULT;
--echo # End of 10.5 tests

View File

@@ -24,3 +24,17 @@ SELECT * FROM mysql.servers;
Server_name Host Db Username Password Port Socket Wrapper Owner Server_name Host Db Username Password Port Socket Wrapper Owner
s1 3306 bar mysql s1 3306 bar mysql
DROP SERVER s1; DROP SERVER s1;
#
# MDEV-33783 CREATE SERVER segfaults on wrong mysql.servers
#
create server s1 foreign data wrapper foo options(user 'a');
alter server s1 options(host 'server.example.org');
rename table mysql.servers to mysql.servers_save;
create table mysql.servers (x int);
alter server s1 options(host 'server.example.org');
ERROR HY000: The foreign server name you are trying to reference does not exist. Data source error: s1
create server s2 foreign data wrapper foo options(user 'a');
ERROR HY000: Can't read record in system table
drop table mysql.servers;
rename table mysql.servers_save to mysql.servers;
drop server s1;

View File

@@ -22,3 +22,18 @@ DROP SERVER s1;
CREATE SERVER s1 FOREIGN DATA WRAPPER mysql OPTIONS(SOCKET 'bar'); CREATE SERVER s1 FOREIGN DATA WRAPPER mysql OPTIONS(SOCKET 'bar');
SELECT * FROM mysql.servers; SELECT * FROM mysql.servers;
DROP SERVER s1; DROP SERVER s1;
--echo #
--echo # MDEV-33783 CREATE SERVER segfaults on wrong mysql.servers
--echo #
create server s1 foreign data wrapper foo options(user 'a');
alter server s1 options(host 'server.example.org');
rename table mysql.servers to mysql.servers_save;
create table mysql.servers (x int);
--error ER_FOREIGN_SERVER_DOESNT_EXIST
alter server s1 options(host 'server.example.org');
--error ER_CANT_FIND_SYSTEM_REC
create server s2 foreign data wrapper foo options(user 'a');
drop table mysql.servers;
rename table mysql.servers_save to mysql.servers;
drop server s1;

View File

@@ -10,8 +10,6 @@
# #
############################################################################## ##############################################################################
galera_as_slave_ctas : MDEV-28378 timeout
galera_sequences : MDEV-32561 WSREP FSM failure: no such a transition REPLICATING -> COMMITTED galera_sequences : MDEV-32561 WSREP FSM failure: no such a transition REPLICATING -> COMMITTED
galera_concurrent_ctas : MDEV-32779 galera_concurrent_ctas: assertion in the galera::ReplicatorSMM::finish_cert()
galera_as_slave_replay : MDEV-32780 galera_as_slave_replay: assertion in the wsrep::transaction::before_rollback() galera_as_slave_replay : MDEV-32780 galera_as_slave_replay: assertion in the wsrep::transaction::before_rollback()
galera_slave_replay : MDEV-32780 galera_as_slave_replay: assertion in the wsrep::transaction::before_rollback() galera_slave_replay : MDEV-32780 galera_as_slave_replay: assertion in the wsrep::transaction::before_rollback()

View File

@@ -0,0 +1,22 @@
connection node_2;
connection node_1;
connect bf_trx, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connect victim_trx, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connect node_2_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_2;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
connection victim_trx;
START TRANSACTION;
INSERT INTO t1 VALUES (2), (1);
connection node_2_ctrl;
SET GLOBAL debug_dbug = '+d,sync.wsrep_apply_cb';
connection bf_trx;
INSERT INTO t1 VALUES (1), (2);
connection node_2_ctrl;
SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
SET GLOBAL debug_dbug = '';
connection victim_trx;
SET DEBUG_SYNC = "wsrep_at_dispatch_end_before_result SIGNAL signal.wsrep_apply_cb WAIT_FOR bf_abort";
COMMIT;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
SET DEBUG_SYNC = 'RESET';
DROP TABLE t1;

View File

@@ -0,0 +1,22 @@
connection node_2;
connection node_1;
connect master, 127.0.0.1, root, , test, $NODE_MYPORT_3;
connect node_1_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connection node_1;
START SLAVE;
connection master;
CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
connection node_1;
SET GLOBAL debug_dbug = '+d,rpl_parallel_simulate_temp_err_xid,sync.wsrep_retry_event_group';
connection master;
INSERT INTO t1 VALUES (1);
connection node_1_ctrl;
SET debug_sync = 'now WAIT_FOR sync.wsrep_retry_event_group_reached';
SET GLOBAL debug_dbug = '';
SET debug_sync = 'now SIGNAL signal.wsrep_retry_event_group';
connection node_1;
SET debug_sync = 'RESET';
connection master;
DROP TABLE t1;
connection node_1;
STOP SLAVE;

View File

@@ -3,6 +3,7 @@ connection node_1;
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1; connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_1; connection node_1;
SET DEBUG_SYNC = 'wsrep_create_table_as_select WAIT_FOR continue'; SET DEBUG_SYNC = 'wsrep_create_table_as_select WAIT_FOR continue';
CREATE table t1 as SELECT SLEEP(0.1);; CREATE table t1 as SELECT SLEEP(0.1);;
@@ -17,14 +18,30 @@ connection node_1b;
SET SESSION debug_sync = 'now WAIT_FOR sync.wsrep_apply_cb_reached'; SET SESSION debug_sync = 'now WAIT_FOR sync.wsrep_apply_cb_reached';
# Signal first CTAS to continue and wait until CTAS has executed # Signal first CTAS to continue and wait until CTAS has executed
SET DEBUG_SYNC= 'now SIGNAL continue'; SET DEBUG_SYNC= 'now SIGNAL continue';
connection node_2b;
# Wait first CTAS to replicate
SELECT * FROM t1;
SLEEP(0.2)
0
connection node_1b;
SET GLOBAL debug_dbug= ''; SET GLOBAL debug_dbug= '';
SET DEBUG_SYNC = 'now SIGNAL signal.wsrep_apply_cb'; SET DEBUG_SYNC = 'now SIGNAL signal.wsrep_apply_cb';
connection node_2a; connection node_2a;
connection node_1b; connection node_1b;
SET DEBUG_SYNC= 'RESET'; SET DEBUG_SYNC= 'RESET';
connection node_2; connection node_2;
SELECT * FROM t1;
SLEEP(0.2)
0
connection node_1; connection node_1;
SELECT * FROM t1;
SLEEP(0.2)
0
DROP TABLE t1; DROP TABLE t1;
disconnect node_1a;
disconnect node_1b;
disconnect node_2a;
disconnect node_2b;
disconnect node_2; disconnect node_2;
disconnect node_1; disconnect node_1;
# End of test # End of test

View File

@@ -35,3 +35,10 @@ SELECT VARIABLE_VALUE LIKE '%gmcast.segment = 3%' FROM INFORMATION_SCHEMA.GLOBAL
VARIABLE_VALUE LIKE '%gmcast.segment = 3%' VARIABLE_VALUE LIKE '%gmcast.segment = 3%'
1 1
DROP TABLE t1; DROP TABLE t1;
connection node_1;
call mtr.add_suppression("WSREP: read_completion_condition.*");
call mtr.add_suppression("WSREP: read_handler.*");
disconnect node_3;
disconnect node_4;
disconnect node_2;
disconnect node_1;

View File

@@ -0,0 +1,4 @@
!include ../galera_2nodes.cnf
[mysqltest]
ps-protocol

View File

@@ -0,0 +1,57 @@
#
# MDEV-35446
#
# Test BF abort of a transaction under PS protocol, after
# a statement is prepared (and the diagnostics area is
# disabled).
#
--source include/have_debug_sync.inc
--source include/galera_cluster.inc
#
# Setup: bf_trx executes in node_1 and will BF abort victim_trx.
#
--connect bf_trx, 127.0.0.1, root, , test, $NODE_MYPORT_1
--connect victim_trx, 127.0.0.1, root, , test, $NODE_MYPORT_2
--connect node_2_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_2
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
--connection victim_trx
START TRANSACTION;
INSERT INTO t1 VALUES (2), (1);
--connection node_2_ctrl
SET GLOBAL debug_dbug = '+d,sync.wsrep_apply_cb';
--connection bf_trx
INSERT INTO t1 VALUES (1), (2);
--connection node_2_ctrl
SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
SET GLOBAL debug_dbug = '';
#
# COMMIT the victim_trx and expect a deadlock error.
# Here we park the client in a sync point at the end of prepare
# command (COM_STMT_PREPARE), where the diagnostics area of the
# client has already been disabled. The client signals the
# applier to continue and will be BF aborted.
# If bug is present, the transaction is aborted, but the COMMIT
# statement succeeds (instead of returning deadlock error).
#
--connection victim_trx
--disable_ps_protocol
SET DEBUG_SYNC = "wsrep_at_dispatch_end_before_result SIGNAL signal.wsrep_apply_cb WAIT_FOR bf_abort";
--enable_ps_protocol
--error ER_LOCK_DEADLOCK
COMMIT;
#
# Cleanup
#
SET DEBUG_SYNC = 'RESET';
DROP TABLE t1;

View File

@@ -0,0 +1,10 @@
!include ../galera_2nodes_as_slave.cnf
[mysqld]
log-bin=mysqld-bin
log-slave-updates
binlog-format=ROW
[mysqld.1]
slave-parallel-threads=2
slave-parallel-mode=optimistic

View File

@@ -0,0 +1,52 @@
# MDEV-35465 Async replication stops working on Galera async replica node
# when parallel replication is enabled
--source include/have_innodb.inc
--source include/have_log_bin.inc
--source include/galera_cluster.inc
--source include/have_debug_sync.inc
# Node 3 is not a Galera node, use it as a master
--connect master, 127.0.0.1, root, , test, $NODE_MYPORT_3
--connect node_1_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_1
--connection node_1
--disable_query_log
--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_3;
--enable_query_log
START SLAVE;
--connection master
CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
--connection node_1
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'
--source include/wait_condition.inc
#
--let debug_dbug_orig = `SELECT @@GLOBAL.debug_dbug`
SET GLOBAL debug_dbug = '+d,rpl_parallel_simulate_temp_err_xid,sync.wsrep_retry_event_group';
--connection master
INSERT INTO t1 VALUES (1);
--connection node_1_ctrl
SET debug_sync = 'now WAIT_FOR sync.wsrep_retry_event_group_reached';
--eval SET GLOBAL debug_dbug = '$debug_dbug_orig'
SET debug_sync = 'now SIGNAL signal.wsrep_retry_event_group';
--let $wait_condition = SELECT COUNT(*) = 1 FROM t1;
--source include/wait_condition.inc
--connection node_1
SET debug_sync = 'RESET';
--connection master
DROP TABLE t1;
--connection node_1
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'
--source include/wait_condition.inc
STOP SLAVE;

View File

@@ -0,0 +1,4 @@
[binlogoff]
[binlogon]
log-bin

View File

@@ -9,6 +9,7 @@
--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 --connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1 --connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1
--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 --connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2
--connection node_1 --connection node_1
# #
@@ -48,6 +49,14 @@ SET SESSION debug_sync = 'now WAIT_FOR sync.wsrep_apply_cb_reached';
SET DEBUG_SYNC= 'now SIGNAL continue'; SET DEBUG_SYNC= 'now SIGNAL continue';
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'Creating table CREATE table t1 as SELECT SLEEP(0.1)' --let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'Creating table CREATE table t1 as SELECT SLEEP(0.1)'
--source include/wait_condition.inc --source include/wait_condition.inc
--connection node_2b
--echo # Wait first CTAS to replicate
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'
--source include/wait_condition.inc
SELECT * FROM t1;
--connection node_1b
# #
# Release second CTAS and cleanup # Release second CTAS and cleanup
# #
@@ -74,12 +83,18 @@ SET DEBUG_SYNC= 'RESET';
# #
--connection node_2 --connection node_2
--reap --reap
SELECT * FROM t1;
--connection node_1 --connection node_1
--error 0,ER_TABLE_EXISTS_ERROR,ER_QUERY_INTERRUPTED --error 0,ER_QUERY_INTERRUPTED,ER_LOCK_DEADLOCK
--reap --reap
SELECT * FROM t1;
DROP TABLE t1; DROP TABLE t1;
--disconnect node_1a
--disconnect node_1b
--disconnect node_2a
--disconnect node_2b
--source include/galera_end.inc --source include/galera_end.inc
--echo # End of test --echo # End of test

View File

@@ -40,3 +40,12 @@ SELECT COUNT(*) AS EXPECT_1 FROM t1;
SELECT VARIABLE_VALUE LIKE '%gmcast.segment = 3%' FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME = 'wsrep_provider_options'; SELECT VARIABLE_VALUE LIKE '%gmcast.segment = 3%' FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME = 'wsrep_provider_options';
DROP TABLE t1; DROP TABLE t1;
--connection node_1
call mtr.add_suppression("WSREP: read_completion_condition.*");
call mtr.add_suppression("WSREP: read_handler.*");
--disconnect node_3
--disconnect node_4
--source include/galera_end.inc

View File

@@ -1,3 +1,6 @@
#
# MDEV-35507 and MDEV-35522
#
install plugin ed25519 soname 'auth_ed25519'; install plugin ed25519 soname 'auth_ed25519';
install plugin server_audit soname 'server_audit'; install plugin server_audit soname 'server_audit';
set global server_audit_file_path='server_audit.log'; set global server_audit_file_path='server_audit.log';
@@ -6,17 +9,25 @@ set global server_audit_logging=on;
# unsafe to log passwords (pwd-123) # unsafe to log passwords (pwd-123)
CREATE USER u1 IDENTIFIED BY 'pwd_123'; CREATE USER u1 IDENTIFIED BY 'pwd_123';
create user u2 IDENTIFIED VIA ed25519 USING PASSWORD('pwd_123'); create user u2 IDENTIFIED VIA ed25519 USING PASSWORD('pwd_123');
CREATE OR REPLACE USER u1 IDENTIFIED BY 'pwd_123';
SET PASSWORD FOR u1 = PASSWORD('pwd_123'); SET PASSWORD FOR u1 = PASSWORD('pwd_123');
ALTER USER u1 IDENTIFIED BY 'pwd_123'; ALTER USER u1 IDENTIFIED BY 'pwd_123';
ALTER USER if exists u1 IDENTIFIED BY 'pwd_123';
SET STATEMENT max_statement_time=10 FOR ALTER USER u1 IDENTIFIED BY 'pwd_123';
alter user u2 identified VIA ed25519 USING password('pwd_123'); alter user u2 identified VIA ed25519 USING password('pwd_123');
GRANT ALL ON test TO u1 IDENTIFIED BY "pwd_123"; GRANT ALL ON test TO u1 IDENTIFIED BY "pwd_123";
GRANT ALL ON test TO u1 identified VIA ed25519 as password('pwd_123') or ed25519 using password('pwd_123'); GRANT ALL ON test TO u1 identified VIA ed25519 as password('pwd_123') or ed25519 using password('pwd_123');
CREATE SERVER s1 FOREIGN DATA WRAPPER mariadb OPTIONS ( PASSWORD "pwd_123");
CREATE OR REPLACE SERVER s1 FOREIGN DATA WRAPPER mariadb OPTIONS ( PASSWORD "pwd_123");
CREATE OR REPLACE SERVER s1 FOREIGN DATA WRAPPER mariadb OPTIONS ( PASSWORD "pwd_123");
# pattern should not be found # pattern should not be found
NOT FOUND /pwd_123/ in server_audit.log NOT FOUND /pwd_123/ in server_audit.log
# pattern should not be found # pattern should not be found
# cleaunup # cleaunup
DROP SERVER s1;
DROP USER u1; DROP USER u1;
DROP USER u2; DROP USER u2;
set global server_audit_logging=off; set global server_audit_logging=off;
UNINSTALL PLUGIN ed25519; UNINSTALL PLUGIN ed25519;
UNINSTALL PLUGIN server_audit; UNINSTALL PLUGIN server_audit;
# end of 10.5 tests

View File

@@ -13,6 +13,10 @@ if (!$AUTH_ED25519_SO) {
let $MYSQLD_DATADIR= `SELECT @@datadir`; let $MYSQLD_DATADIR= `SELECT @@datadir`;
let SEARCH_FILE= $MYSQLD_DATADIR/server_audit.log; let SEARCH_FILE= $MYSQLD_DATADIR/server_audit.log;
--echo #
--echo # MDEV-35507 and MDEV-35522
--echo #
install plugin ed25519 soname 'auth_ed25519'; install plugin ed25519 soname 'auth_ed25519';
install plugin server_audit soname 'server_audit'; install plugin server_audit soname 'server_audit';
@@ -25,17 +29,24 @@ set global server_audit_logging=on;
CREATE USER u1 IDENTIFIED BY 'pwd_123'; CREATE USER u1 IDENTIFIED BY 'pwd_123';
create user u2 IDENTIFIED VIA ed25519 USING PASSWORD('pwd_123'); create user u2 IDENTIFIED VIA ed25519 USING PASSWORD('pwd_123');
CREATE OR REPLACE USER u1 IDENTIFIED BY 'pwd_123';
SET PASSWORD FOR u1 = PASSWORD('pwd_123'); SET PASSWORD FOR u1 = PASSWORD('pwd_123');
ALTER USER u1 IDENTIFIED BY 'pwd_123'; ALTER USER u1 IDENTIFIED BY 'pwd_123';
ALTER USER if exists u1 IDENTIFIED BY 'pwd_123';
SET STATEMENT max_statement_time=10 FOR ALTER USER u1 IDENTIFIED BY 'pwd_123';
alter user u2 identified VIA ed25519 USING password('pwd_123'); alter user u2 identified VIA ed25519 USING password('pwd_123');
GRANT ALL ON test TO u1 IDENTIFIED BY "pwd_123"; GRANT ALL ON test TO u1 IDENTIFIED BY "pwd_123";
GRANT ALL ON test TO u1 identified VIA ed25519 as password('pwd_123') or ed25519 using password('pwd_123'); GRANT ALL ON test TO u1 identified VIA ed25519 as password('pwd_123') or ed25519 using password('pwd_123');
CREATE SERVER s1 FOREIGN DATA WRAPPER mariadb OPTIONS ( PASSWORD "pwd_123");
CREATE OR REPLACE SERVER s1 FOREIGN DATA WRAPPER mariadb OPTIONS ( PASSWORD "pwd_123");
CREATE OR REPLACE SERVER s1 FOREIGN DATA WRAPPER mariadb OPTIONS ( PASSWORD "pwd_123");
--let SEARCH_PATTERN=pwd_123 --let SEARCH_PATTERN=pwd_123
--echo # pattern should not be found --echo # pattern should not be found
--source include/search_pattern_in_file.inc --source include/search_pattern_in_file.inc
--echo # pattern should not be found --echo # pattern should not be found
--echo # cleaunup --echo # cleaunup
DROP SERVER s1;
DROP USER u1; DROP USER u1;
DROP USER u2; DROP USER u2;
set global server_audit_logging=off; set global server_audit_logging=off;
@@ -44,3 +55,5 @@ set global server_audit_logging=off;
UNINSTALL PLUGIN ed25519; UNINSTALL PLUGIN ed25519;
UNINSTALL PLUGIN server_audit; UNINSTALL PLUGIN server_audit;
--enable_warnings --enable_warnings
--echo # end of 10.5 tests

View File

@@ -95,6 +95,33 @@ SELECT * FROM test.regular_tbl ORDER BY fkid LIMIT 2;
--replace_column 2 date-time 3 USER 4 UUID --replace_column 2 date-time 3 USER 4 UUID
SELECT * FROM test.regular_tbl ORDER BY fkid DESC LIMIT 2; SELECT * FROM test.regular_tbl ORDER BY fkid DESC LIMIT 2;
--echo *** MDEV-5798: Wrong errorcode for missing partition after TRUNCATE PARTITION
--connection master
eval CREATE TABLE t1 (a INT)
ENGINE=$engine_type
PARTITION BY LIST(a) (
PARTITION p0 VALUES IN (9, NULL),
PARTITION p1 VALUES IN (8, 2, 7),
PARTITION p2 VALUES IN (6, 4, 5),
PARTITION p3 VALUES IN (3, 1, 0)
);
ALTER TABLE t1 DROP PARTITION p0;
# This failed statement leaves ALTER_PARTITION_TRUNCATE set in
# thd->lex->alter_info.partition_flags
--error ER_NO_SUCH_TABLE
ALTER TABLE non_existent TRUNCATE PARTITION p1,p2;
# The bug was that the code would wrongly look at the (now stale) value of
# thd->lex->alter_info.partition_flags and give the wrong error code
# ER_WRONG_PARTITION_NAME.
--error ER_NO_PARTITION_FOR_GIVEN_VALUE
INSERT INTO t1 PARTITION (p1,p2,p3) VALUES (0),(9);
--sync_slave_with_master
###### CLEAN UP SECTION ############## ###### CLEAN UP SECTION ##############
connection master; connection master;
@@ -102,3 +129,4 @@ DROP PROCEDURE test.proc_norm;
DROP PROCEDURE test.proc_byrange; DROP PROCEDURE test.proc_byrange;
DROP TABLE test.regular_tbl; DROP TABLE test.regular_tbl;
DROP TABLE test.byrange_tbl; DROP TABLE test.byrange_tbl;
DROP TABLE test.t1;

View File

@@ -61,6 +61,34 @@ ROLLBACK;
SELECT * FROM t1 ORDER BY a; SELECT * FROM t1 ORDER BY a;
a a
1 1
SET @old_mode= @@SESSION.binlog_format;
SET SESSION binlog_format= row;
SET SESSION gtid_domain_id= 200;
CREATE TEMPORARY TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB;
SET SESSION gtid_domain_id= 0;
BEGIN;
INSERT INTO t2 VALUES (200);
INSERT INTO t1 SELECT * FROM t2;
COMMIT;
SET SESSION gtid_domain_id= 201;
SET SESSION gtid_domain_id= 0;
DELETE FROM t1 WHERE a=200;
SET SESSION gtid_domain_id= 202;
DROP TEMPORARY TABLE t2;
SET SESSION binlog_format= mixed;
SET SESSION gtid_domain_id= 0;
CREATE TEMPORARY TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t2 VALUES (1);
SET SESSION gtid_domain_id= 0;
SET SESSION gtid_domain_id= 204;
ERROR HY000: Cannot modify @@session.gtid_domain_id or @@session.gtid_seq_no inside a transaction
SET SESSION binlog_format=statement;
INSERT INTO t2 VALUES (2);
SET SESSION gtid_domain_id= 205;
ERROR HY000: Cannot modify @@session.gtid_domain_id or @@session.gtid_seq_no inside a transaction
DROP TEMPORARY TABLE t2;
SET SESSION gtid_domain_id= @old_domain;
SET SESSION binlog_format= @old_mode;
*** Test requesting an explicit GTID position that conflicts with newer GTIDs of our own in the binlog. *** *** Test requesting an explicit GTID position that conflicts with newer GTIDs of our own in the binlog. ***
connection slave; connection slave;
include/stop_slave.inc include/stop_slave.inc
@@ -84,16 +112,16 @@ ERROR 25000: You are not allowed to execute this command in a transaction
ROLLBACK; ROLLBACK;
SET GLOBAL gtid_strict_mode= 1; SET GLOBAL gtid_strict_mode= 1;
SET GLOBAL gtid_slave_pos = "0-1-1"; SET GLOBAL gtid_slave_pos = "0-1-1";
ERROR HY000: Specified GTID 0-1-1 conflicts with the binary log which contains a more recent GTID 0-2-12. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos ERROR HY000: Specified GTID 0-1-1 conflicts with the binary log which contains a more recent GTID 0-2-18. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos
SET GLOBAL gtid_slave_pos = ""; SET GLOBAL gtid_slave_pos = "";
ERROR HY000: Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-12. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos ERROR HY000: Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-18. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos
SET GLOBAL gtid_strict_mode= 0; SET GLOBAL gtid_strict_mode= 0;
SET GLOBAL gtid_slave_pos = "0-1-1"; SET GLOBAL gtid_slave_pos = "0-1-1";
Warnings: Warnings:
Warning 1947 Specified GTID 0-1-1 conflicts with the binary log which contains a more recent GTID 0-2-12. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos Warning 1947 Specified GTID 0-1-1 conflicts with the binary log which contains a more recent GTID 0-2-18. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos
SET GLOBAL gtid_slave_pos = ""; SET GLOBAL gtid_slave_pos = "";
Warnings: Warnings:
Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-12. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-18. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos
RESET MASTER; RESET MASTER;
SET GLOBAL gtid_slave_pos = "0-1-1"; SET GLOBAL gtid_slave_pos = "0-1-1";
START SLAVE; START SLAVE;

View File

@@ -140,8 +140,26 @@ SELECT * FROM test.regular_tbl ORDER BY fkid DESC LIMIT 2;
id dt user uuidf fkid filler id dt user uuidf fkid filler
1 date-time USER UUID 300 Partitioned table! Going to test replication for MySQL 1 date-time USER UUID 300 Partitioned table! Going to test replication for MySQL
2 date-time USER UUID 299 Partitioned table! Going to test replication for MySQL 2 date-time USER UUID 299 Partitioned table! Going to test replication for MySQL
*** MDEV-5798: Wrong errorcode for missing partition after TRUNCATE PARTITION
connection master;
CREATE TABLE t1 (a INT)
ENGINE='Archive'
PARTITION BY LIST(a) (
PARTITION p0 VALUES IN (9, NULL),
PARTITION p1 VALUES IN (8, 2, 7),
PARTITION p2 VALUES IN (6, 4, 5),
PARTITION p3 VALUES IN (3, 1, 0)
);
ALTER TABLE t1 DROP PARTITION p0;
ALTER TABLE non_existent TRUNCATE PARTITION p1,p2;
ERROR 42S02: Table 'test.non_existent' doesn't exist
INSERT INTO t1 PARTITION (p1,p2,p3) VALUES (0),(9);
ERROR HY000: Table has no partition for value 9
connection slave;
connection master;
DROP PROCEDURE test.proc_norm; DROP PROCEDURE test.proc_norm;
DROP PROCEDURE test.proc_byrange; DROP PROCEDURE test.proc_byrange;
DROP TABLE test.regular_tbl; DROP TABLE test.regular_tbl;
DROP TABLE test.byrange_tbl; DROP TABLE test.byrange_tbl;
DROP TABLE test.t1;
include/rpl_end.inc include/rpl_end.inc

View File

@@ -142,9 +142,26 @@ SELECT * FROM test.regular_tbl ORDER BY fkid DESC LIMIT 2;
id dt user uuidf fkid filler id dt user uuidf fkid filler
1 date-time USER UUID 300 Partitioned table! Going to test replication for MySQL 1 date-time USER UUID 300 Partitioned table! Going to test replication for MySQL
2 date-time USER UUID 299 Partitioned table! Going to test replication for MySQL 2 date-time USER UUID 299 Partitioned table! Going to test replication for MySQL
*** MDEV-5798: Wrong errorcode for missing partition after TRUNCATE PARTITION
connection master;
CREATE TABLE t1 (a INT)
ENGINE='InnoDB'
PARTITION BY LIST(a) (
PARTITION p0 VALUES IN (9, NULL),
PARTITION p1 VALUES IN (8, 2, 7),
PARTITION p2 VALUES IN (6, 4, 5),
PARTITION p3 VALUES IN (3, 1, 0)
);
ALTER TABLE t1 DROP PARTITION p0;
ALTER TABLE non_existent TRUNCATE PARTITION p1,p2;
ERROR 42S02: Table 'test.non_existent' doesn't exist
INSERT INTO t1 PARTITION (p1,p2,p3) VALUES (0),(9);
ERROR HY000: Table has no partition for value 9
connection slave;
connection master; connection master;
DROP PROCEDURE test.proc_norm; DROP PROCEDURE test.proc_norm;
DROP PROCEDURE test.proc_byrange; DROP PROCEDURE test.proc_byrange;
DROP TABLE test.regular_tbl; DROP TABLE test.regular_tbl;
DROP TABLE test.byrange_tbl; DROP TABLE test.byrange_tbl;
DROP TABLE test.t1;
include/rpl_end.inc include/rpl_end.inc

View File

@@ -142,9 +142,26 @@ SELECT * FROM test.regular_tbl ORDER BY fkid DESC LIMIT 2;
id dt user uuidf fkid filler id dt user uuidf fkid filler
1 date-time USER UUID 300 Partitioned table! Going to test replication for MySQL 1 date-time USER UUID 300 Partitioned table! Going to test replication for MySQL
2 date-time USER UUID 299 Partitioned table! Going to test replication for MySQL 2 date-time USER UUID 299 Partitioned table! Going to test replication for MySQL
*** MDEV-5798: Wrong errorcode for missing partition after TRUNCATE PARTITION
connection master;
CREATE TABLE t1 (a INT)
ENGINE='Memory'
PARTITION BY LIST(a) (
PARTITION p0 VALUES IN (9, NULL),
PARTITION p1 VALUES IN (8, 2, 7),
PARTITION p2 VALUES IN (6, 4, 5),
PARTITION p3 VALUES IN (3, 1, 0)
);
ALTER TABLE t1 DROP PARTITION p0;
ALTER TABLE non_existent TRUNCATE PARTITION p1,p2;
ERROR 42S02: Table 'test.non_existent' doesn't exist
INSERT INTO t1 PARTITION (p1,p2,p3) VALUES (0),(9);
ERROR HY000: Table has no partition for value 9
connection slave;
connection master; connection master;
DROP PROCEDURE test.proc_norm; DROP PROCEDURE test.proc_norm;
DROP PROCEDURE test.proc_byrange; DROP PROCEDURE test.proc_byrange;
DROP TABLE test.regular_tbl; DROP TABLE test.regular_tbl;
DROP TABLE test.byrange_tbl; DROP TABLE test.byrange_tbl;
DROP TABLE test.t1;
include/rpl_end.inc include/rpl_end.inc

View File

@@ -142,9 +142,26 @@ SELECT * FROM test.regular_tbl ORDER BY fkid DESC LIMIT 2;
id dt user uuidf fkid filler id dt user uuidf fkid filler
1 date-time USER UUID 300 Partitioned table! Going to test replication for MySQL 1 date-time USER UUID 300 Partitioned table! Going to test replication for MySQL
2 date-time USER UUID 299 Partitioned table! Going to test replication for MySQL 2 date-time USER UUID 299 Partitioned table! Going to test replication for MySQL
*** MDEV-5798: Wrong errorcode for missing partition after TRUNCATE PARTITION
connection master;
CREATE TABLE t1 (a INT)
ENGINE='MyISAM'
PARTITION BY LIST(a) (
PARTITION p0 VALUES IN (9, NULL),
PARTITION p1 VALUES IN (8, 2, 7),
PARTITION p2 VALUES IN (6, 4, 5),
PARTITION p3 VALUES IN (3, 1, 0)
);
ALTER TABLE t1 DROP PARTITION p0;
ALTER TABLE non_existent TRUNCATE PARTITION p1,p2;
ERROR 42S02: Table 'test.non_existent' doesn't exist
INSERT INTO t1 PARTITION (p1,p2,p3) VALUES (0),(9);
ERROR HY000: Table has no partition for value 9
connection slave;
connection master; connection master;
DROP PROCEDURE test.proc_norm; DROP PROCEDURE test.proc_norm;
DROP PROCEDURE test.proc_byrange; DROP PROCEDURE test.proc_byrange;
DROP TABLE test.regular_tbl; DROP TABLE test.regular_tbl;
DROP TABLE test.byrange_tbl; DROP TABLE test.byrange_tbl;
DROP TABLE test.t1;
include/rpl_end.inc include/rpl_end.inc

View File

@@ -69,6 +69,46 @@ SELECT * FROM t1 ORDER BY a;
ROLLBACK; ROLLBACK;
SELECT * FROM t1 ORDER BY a; SELECT * FROM t1 ORDER BY a;
# MDEV-34049: Parallel access to temptable in different domain_id in parallel replication
#
# Temporary tables must be prevented from being accessed from multiple threads
# at the same time in parallel replication. Withon one domain_id, this is done
# by running wait_for_prior_commit() before accessing a temporary table. To
# prevent the same temporary table from being accessed in parallel from two
# domains in out-of-order parallel replication, an error must be raised on
# attempt to change the gtid_domain_id while temporary tables are in use in
# a session and binlogged. In row-based binlogging, temporary tables are not
# binlogged, so gtid_domain_id can be freely changed.
SET @old_mode= @@SESSION.binlog_format;
SET SESSION binlog_format= row;
SET SESSION gtid_domain_id= 200;
CREATE TEMPORARY TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB;
SET SESSION gtid_domain_id= 0;
BEGIN;
INSERT INTO t2 VALUES (200);
INSERT INTO t1 SELECT * FROM t2;
COMMIT;
SET SESSION gtid_domain_id= 201;
SET SESSION gtid_domain_id= 0;
DELETE FROM t1 WHERE a=200;
SET SESSION gtid_domain_id= 202;
DROP TEMPORARY TABLE t2;
SET SESSION binlog_format= mixed;
SET SESSION gtid_domain_id= 0;
CREATE TEMPORARY TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t2 VALUES (1);
SET SESSION gtid_domain_id= 0;
--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO
SET SESSION gtid_domain_id= 204;
SET SESSION binlog_format=statement;
INSERT INTO t2 VALUES (2);
--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO
SET SESSION gtid_domain_id= 205;
DROP TEMPORARY TABLE t2;
SET SESSION gtid_domain_id= @old_domain;
SET SESSION binlog_format= @old_mode;
--echo *** Test requesting an explicit GTID position that conflicts with newer GTIDs of our own in the binlog. *** --echo *** Test requesting an explicit GTID position that conflicts with newer GTIDs of our own in the binlog. ***
--connection slave --connection slave

View File

@@ -1,8 +0,0 @@
CREATE TABLE t1 (a INT, b INT, c INT, vc INT AS (c), UNIQUE(a), UNIQUE(b)) WITH SYSTEM VERSIONING;
INSERT IGNORE INTO t1 (a,b,c) VALUES (1,2,3);
SELECT a, b, c FROM t1 INTO OUTFILE '15330.data';
LOAD DATA INFILE '15330.data' IGNORE INTO TABLE t1 (a,b,c);
Warnings:
Warning 1062 Duplicate entry '1' for key 'a'
LOAD DATA INFILE '15330.data' REPLACE INTO TABLE t1 (a,b,c);
DROP TABLE t1;

View File

@@ -61,3 +61,14 @@ connection con1;
replace into t1 values (1),(2); replace into t1 values (1),(2);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY' ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
drop table t1; drop table t1;
#
# MDEV-15330 Server crash or assertion `table->insert_values' failure in write_record upon LOAD DATA
#
create table t1 (a int, b int, c int, vc int as (c), unique(a), unique(b)) with system versioning;
insert ignore into t1 (a,b,c) values (1,2,3);
select a, b, c into outfile '15330.data' from t1;
load data infile '15330.data' ignore into table t1 (a,b,c);
Warnings:
Warning 1062 Duplicate entry '1' for key 'a'
load data infile '15330.data' replace into table t1 (a,b,c);
drop table t1;

View File

@@ -1,20 +0,0 @@
#
# MDEV-15330 Server crash or assertion `table->insert_values' failure in write_record upon LOAD DATA
#
--let $datadir= `select @@datadir`
CREATE TABLE t1 (a INT, b INT, c INT, vc INT AS (c), UNIQUE(a), UNIQUE(b)) WITH SYSTEM VERSIONING;
INSERT IGNORE INTO t1 (a,b,c) VALUES (1,2,3);
--disable_cursor_protocol
--enable_prepare_warnings
--disable_ps2_protocol
SELECT a, b, c FROM t1 INTO OUTFILE '15330.data';
--disable_prepare_warnings
--enable_ps2_protocol
--enable_cursor_protocol
LOAD DATA INFILE '15330.data' IGNORE INTO TABLE t1 (a,b,c);
LOAD DATA INFILE '15330.data' REPLACE INTO TABLE t1 (a,b,c);
# Cleanup
DROP TABLE t1;
--remove_file $datadir/test/15330.data

View File

@@ -77,4 +77,32 @@ replace into t1 values (1),(2);
drop table t1; drop table t1;
--echo #
--echo # MDEV-15330 Server crash or assertion `table->insert_values' failure in write_record upon LOAD DATA
--echo #
if ($default_engine == MEMORY)
{
--disable_query_log
set default_storage_engine= myisam;
--enable_query_log
}
create table t1 (a int, b int, c int, vc int as (c), unique(a), unique(b)) with system versioning;
insert ignore into t1 (a,b,c) values (1,2,3);
--disable_cursor_protocol
--disable_ps2_protocol
select a, b, c into outfile '15330.data' from t1;
--enable_ps2_protocol
--enable_cursor_protocol
load data infile '15330.data' ignore into table t1 (a,b,c);
load data infile '15330.data' replace into table t1 (a,b,c);
--let $datadir=`select @@datadir`
--remove_file $datadir/test/15330.data
# cleanup
drop table t1;
--disable_query_log
eval set default_storage_engine= $default_engine;
--enable_query_log
--source suite/versioning/common_finish.inc --source suite/versioning/common_finish.inc

View File

@@ -826,6 +826,7 @@ enum sa_keywords
SQLCOM_TRUNCATE, SQLCOM_TRUNCATE,
SQLCOM_QUERY_ADMIN, SQLCOM_QUERY_ADMIN,
SQLCOM_DCL, SQLCOM_DCL,
SQLCOM_FOUND=-1,
}; };
struct sa_keyword struct sa_keyword
@@ -837,30 +838,87 @@ struct sa_keyword
}; };
struct sa_keyword xml_word= {3, "XML", 0, SQLCOM_NOTHING}; struct sa_keyword xml_word[]=
struct sa_keyword user_word= {4, "USER", 0, SQLCOM_NOTHING}; {
struct sa_keyword data_word= {4, "DATA", 0, SQLCOM_NOTHING}; {3, "XML", 0, SQLCOM_FOUND},
struct sa_keyword server_word= {6, "SERVER", 0, SQLCOM_NOTHING}; {0, NULL, 0, SQLCOM_NOTHING}
struct sa_keyword master_word= {6, "MASTER", 0, SQLCOM_NOTHING}; };
struct sa_keyword password_word= {8, "PASSWORD", 0, SQLCOM_NOTHING}; struct sa_keyword user_word[]=
struct sa_keyword function_word= {8, "FUNCTION", 0, SQLCOM_NOTHING}; {
struct sa_keyword statement_word= {9, "STATEMENT", 0, SQLCOM_NOTHING}; {4, "USER", 0, SQLCOM_FOUND},
struct sa_keyword procedure_word= {9, "PROCEDURE", 0, SQLCOM_NOTHING}; {0, NULL, 0, SQLCOM_NOTHING}
};
struct sa_keyword data_word[]=
{
{4, "DATA", 0, SQLCOM_FOUND},
{0, NULL, 0, SQLCOM_NOTHING}
};
struct sa_keyword server_word[]=
{
{6, "SERVER", 0, SQLCOM_FOUND},
{0, NULL, 0, SQLCOM_NOTHING}
};
struct sa_keyword master_word[]=
{
{6, "MASTER", 0, SQLCOM_FOUND},
{0, NULL, 0, SQLCOM_NOTHING}
};
struct sa_keyword password_word[]=
{
{8, "PASSWORD", 0, SQLCOM_FOUND},
{0, NULL, 0, SQLCOM_NOTHING}
};
struct sa_keyword function_word[]=
{
{8, "FUNCTION", 0, SQLCOM_FOUND},
{0, NULL, 0, SQLCOM_NOTHING}
};
struct sa_keyword statement_word[]=
{
{9, "STATEMENT", 0, SQLCOM_FOUND},
{0, NULL, 0, SQLCOM_NOTHING}
};
struct sa_keyword procedure_word[]=
{
{9, "PROCEDURE", 0, SQLCOM_FOUND},
{0, NULL, 0, SQLCOM_NOTHING}
};
struct sa_keyword replace_user_word[]=
{
{7, "REPLACE", user_word, SQLCOM_FOUND},
{0, NULL, 0, SQLCOM_NOTHING}
};
struct sa_keyword or_replace_user_word[]=
{
{2, "OR", replace_user_word, SQLCOM_FOUND},
{0, NULL, 0, SQLCOM_NOTHING}
};
struct sa_keyword replace_server_word[]=
{
{7, "REPLACE", server_word, SQLCOM_FOUND},
{0, NULL, 0, SQLCOM_NOTHING}
};
struct sa_keyword or_replace_server_word[]=
{
{2, "OR", replace_server_word, SQLCOM_FOUND},
{0, NULL, 0, SQLCOM_NOTHING}
};
struct sa_keyword keywords_to_skip[]= struct sa_keyword keywords_to_skip[]=
{ {
{3, "SET", &statement_word, SQLCOM_QUERY_ADMIN}, {3, "SET", statement_word, SQLCOM_QUERY_ADMIN},
{0, NULL, 0, SQLCOM_DDL} {0, NULL, 0, SQLCOM_NOTHING}
}; };
struct sa_keyword not_ddl_keywords[]= struct sa_keyword not_ddl_keywords[]=
{ {
{4, "DROP", &user_word, SQLCOM_DCL}, {4, "DROP", user_word, SQLCOM_DCL},
{6, "CREATE", &user_word, SQLCOM_DCL}, {6, "CREATE", user_word, SQLCOM_DCL},
{6, "RENAME", &user_word, SQLCOM_DCL}, {6, "CREATE", or_replace_user_word, SQLCOM_DCL},
{0, NULL, 0, SQLCOM_DDL} {6, "RENAME", user_word, SQLCOM_DCL},
{0, NULL, 0, SQLCOM_NOTHING}
}; };
@@ -871,7 +929,7 @@ struct sa_keyword ddl_keywords[]=
{6, "CREATE", 0, SQLCOM_DDL}, {6, "CREATE", 0, SQLCOM_DDL},
{6, "RENAME", 0, SQLCOM_DDL}, {6, "RENAME", 0, SQLCOM_DDL},
{8, "TRUNCATE", 0, SQLCOM_DDL}, {8, "TRUNCATE", 0, SQLCOM_DDL},
{0, NULL, 0, SQLCOM_DDL} {0, NULL, 0, SQLCOM_NOTHING}
}; };
@@ -879,15 +937,15 @@ struct sa_keyword dml_keywords[]=
{ {
{2, "DO", 0, SQLCOM_DML}, {2, "DO", 0, SQLCOM_DML},
{4, "CALL", 0, SQLCOM_DML}, {4, "CALL", 0, SQLCOM_DML},
{4, "LOAD", &data_word, SQLCOM_DML}, {4, "LOAD", data_word, SQLCOM_DML},
{4, "LOAD", &xml_word, SQLCOM_DML}, {4, "LOAD", xml_word, SQLCOM_DML},
{6, "DELETE", 0, SQLCOM_DML}, {6, "DELETE", 0, SQLCOM_DML},
{6, "INSERT", 0, SQLCOM_DML}, {6, "INSERT", 0, SQLCOM_DML},
{6, "SELECT", 0, SQLCOM_DML}, {6, "SELECT", 0, SQLCOM_DML},
{6, "UPDATE", 0, SQLCOM_DML}, {6, "UPDATE", 0, SQLCOM_DML},
{7, "HANDLER", 0, SQLCOM_DML}, {7, "HANDLER", 0, SQLCOM_DML},
{7, "REPLACE", 0, SQLCOM_DML}, {7, "REPLACE", 0, SQLCOM_DML},
{0, NULL, 0, SQLCOM_DML} {0, NULL, 0, SQLCOM_NOTHING}
}; };
@@ -895,38 +953,41 @@ struct sa_keyword dml_no_select_keywords[]=
{ {
{2, "DO", 0, SQLCOM_DML}, {2, "DO", 0, SQLCOM_DML},
{4, "CALL", 0, SQLCOM_DML}, {4, "CALL", 0, SQLCOM_DML},
{4, "LOAD", &data_word, SQLCOM_DML}, {4, "LOAD", data_word, SQLCOM_DML},
{4, "LOAD", &xml_word, SQLCOM_DML}, {4, "LOAD", xml_word, SQLCOM_DML},
{6, "DELETE", 0, SQLCOM_DML}, {6, "DELETE", 0, SQLCOM_DML},
{6, "INSERT", 0, SQLCOM_DML}, {6, "INSERT", 0, SQLCOM_DML},
{6, "UPDATE", 0, SQLCOM_DML}, {6, "UPDATE", 0, SQLCOM_DML},
{7, "HANDLER", 0, SQLCOM_DML}, {7, "HANDLER", 0, SQLCOM_DML},
{7, "REPLACE", 0, SQLCOM_DML}, {7, "REPLACE", 0, SQLCOM_DML},
{0, NULL, 0, SQLCOM_DML} {0, NULL, 0, SQLCOM_NOTHING}
}; };
struct sa_keyword dcl_keywords[]= struct sa_keyword dcl_keywords[]=
{ {
{6, "CREATE", &user_word, SQLCOM_DCL}, {6, "CREATE", user_word, SQLCOM_DCL},
{4, "DROP", &user_word, SQLCOM_DCL}, {6, "CREATE", or_replace_user_word, SQLCOM_DCL},
{6, "RENAME", &user_word, SQLCOM_DCL}, {4, "DROP", user_word, SQLCOM_DCL},
{6, "RENAME", user_word, SQLCOM_DCL},
{5, "GRANT", 0, SQLCOM_DCL}, {5, "GRANT", 0, SQLCOM_DCL},
{6, "REVOKE", 0, SQLCOM_DCL}, {6, "REVOKE", 0, SQLCOM_DCL},
{3, "SET", &password_word, SQLCOM_DCL}, {3, "SET", password_word, SQLCOM_DCL},
{0, NULL, 0, SQLCOM_DDL} {0, NULL, 0, SQLCOM_NOTHING}
}; };
struct sa_keyword passwd_keywords[]= struct sa_keyword passwd_keywords[]=
{ {
{3, "SET", &password_word, SQLCOM_SET_OPTION}, {3, "SET", password_word, SQLCOM_SET_OPTION},
{5, "ALTER", &server_word, SQLCOM_ALTER_SERVER}, {5, "ALTER", server_word, SQLCOM_ALTER_SERVER},
{5, "ALTER", &user_word, SQLCOM_ALTER_USER}, {5, "ALTER", user_word, SQLCOM_ALTER_USER},
{5, "GRANT", 0, SQLCOM_GRANT}, {5, "GRANT", 0, SQLCOM_GRANT},
{6, "CREATE", &user_word, SQLCOM_CREATE_USER}, {6, "CREATE", user_word, SQLCOM_CREATE_USER},
{6, "CREATE", &server_word, SQLCOM_CREATE_SERVER}, {6, "CREATE", or_replace_user_word, SQLCOM_CREATE_USER},
{6, "CHANGE", &master_word, SQLCOM_CHANGE_MASTER}, {6, "CREATE", server_word, SQLCOM_CREATE_SERVER},
{6, "CREATE", or_replace_server_word, SQLCOM_CREATE_SERVER},
{6, "CHANGE", master_word, SQLCOM_CHANGE_MASTER},
{0, NULL, 0, SQLCOM_NOTHING} {0, NULL, 0, SQLCOM_NOTHING}
}; };
@@ -1749,7 +1810,7 @@ static int filter_query_type(const char *query, struct sa_keyword *kwd)
query++; query++;
} }
qwe_in_list= 0; qwe_in_list= SQLCOM_NOTHING;
if (!(len= get_next_word(query, fword))) if (!(len= get_next_word(query, fword)))
goto not_in_list; goto not_in_list;
query+= len+1; query+= len+1;
@@ -1767,8 +1828,7 @@ static int filter_query_type(const char *query, struct sa_keyword *kwd)
query++; query++;
nlen= get_next_word(query, nword); nlen= get_next_word(query, nword);
} }
if (l_keywords->next->length != nlen || if (filter_query_type(query, l_keywords->next) == SQLCOM_NOTHING)
strncmp(l_keywords->next->wd, nword, nlen) != 0)
goto do_loop; goto do_loop;
} }
@@ -1783,6 +1843,25 @@ not_in_list:
return qwe_in_list; return qwe_in_list;
} }
static const char *skip_set_statement(const char *query)
{
if (filter_query_type(query, keywords_to_skip))
{
char fword[MAX_KEYWORD + 1];
int len;
do
{
len= get_next_word(query, fword);
query+= len ? len : 1;
if (len == 3 && strncmp(fword, "FOR", 3) == 0)
break;
} while (*query);
if (*query == 0)
return 0;
}
return query;
}
static int log_statement_ex(const struct connection_info *cn, static int log_statement_ex(const struct connection_info *cn,
time_t ev_time, unsigned long thd_id, time_t ev_time, unsigned long thd_id,
@@ -1826,21 +1905,8 @@ static int log_statement_ex(const struct connection_info *cn,
{ {
const char *orig_query= query; const char *orig_query= query;
if (filter_query_type(query, keywords_to_skip)) if ((query= skip_set_statement(query)) == SQLCOM_NOTHING)
{ return 0;
char fword[MAX_KEYWORD + 1];
int len;
do
{
len= get_next_word(query, fword);
query+= len ? len : 1;
if (len == 3 && strncmp(fword, "FOR", 3) == 0)
break;
} while (*query);
if (*query == 0)
return 0;
}
if (events & EVENT_QUERY_DDL) if (events & EVENT_QUERY_DDL)
{ {
@@ -1896,7 +1962,7 @@ do_log_query:
if (query_log_limit > 0 && uh_buffer_size > query_log_limit+2) if (query_log_limit > 0 && uh_buffer_size > query_log_limit+2)
uh_buffer_size= query_log_limit+2; uh_buffer_size= query_log_limit+2;
switch (filter_query_type(query, passwd_keywords)) switch (filter_query_type(skip_set_statement(query), passwd_keywords))
{ {
case SQLCOM_GRANT: case SQLCOM_GRANT:
case SQLCOM_CREATE_USER: case SQLCOM_CREATE_USER:

View File

@@ -0,0 +1,17 @@
# Start of 10.5 tests
#
# MDEV-35427 Assertion `is_null() >= item->null_value' failed in Timestamp_or_zero_datetime_native_null::Timestamp_or_zero_datetime_native_null on EXECUTE
#
SET time_zone='+00:00';
SET timestamp=1000000000;
PREPARE s FROM 'SELECT CONCAT (UNIX_TIMESTAMP(?))';
EXECUTE s USING CAST('::' AS INET6);
CONCAT (UNIX_TIMESTAMP(?))
999993600
EXECUTE s USING NULL;
CONCAT (UNIX_TIMESTAMP(?))
NULL
DEALLOCATE PREPARE s;
SET timestamp=DEFAULT;
SET time_zone=DEFAULT;
# End of 10.5 tests

View File

@@ -0,0 +1,16 @@
--echo # Start of 10.5 tests
--echo #
--echo # MDEV-35427 Assertion `is_null() >= item->null_value' failed in Timestamp_or_zero_datetime_native_null::Timestamp_or_zero_datetime_native_null on EXECUTE
--echo #
SET time_zone='+00:00';
SET timestamp=1000000000;
PREPARE s FROM 'SELECT CONCAT (UNIX_TIMESTAMP(?))';
EXECUTE s USING CAST('::' AS INET6);
EXECUTE s USING NULL;
DEALLOCATE PREPARE s;
SET timestamp=DEFAULT;
SET time_zone=DEFAULT;
--echo # End of 10.5 tests

View File

@@ -4720,7 +4720,10 @@ int ha_partition::update_row(const uchar *old_data, const uchar *new_data)
Notice that HA_READ_BEFORE_WRITE_REMOVAL does not require this protocol, Notice that HA_READ_BEFORE_WRITE_REMOVAL does not require this protocol,
so this is not supported for this engine. so this is not supported for this engine.
*/ */
error= get_part_for_buf(old_data, m_rec0, m_part_info, &old_part_id); {
Abort_on_warning_instant_set old_abort_on_warning(thd, 0);
error= get_part_for_buf(old_data, m_rec0, m_part_info, &old_part_id);
}
DBUG_ASSERT(!error); DBUG_ASSERT(!error);
DBUG_ASSERT(old_part_id == m_last_part); DBUG_ASSERT(old_part_id == m_last_part);
DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), old_part_id)); DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), old_part_id));
@@ -10346,7 +10349,8 @@ void ha_partition::print_error(int error, myf errflag)
/* Should probably look for my own errors first */ /* Should probably look for my own errors first */
if ((error == HA_ERR_NO_PARTITION_FOUND) && if ((error == HA_ERR_NO_PARTITION_FOUND) &&
! (thd->lex->alter_info.partition_flags & ALTER_PARTITION_TRUNCATE)) ! (thd->lex->sql_command == SQLCOM_ALTER_TABLE &&
(thd->lex->alter_info.partition_flags & ALTER_PARTITION_TRUNCATE)))
{ {
m_part_info->print_no_partition_found(table, errflag); m_part_info->print_no_partition_found(table, errflag);
DBUG_VOID_RETURN; DBUG_VOID_RETURN;

View File

@@ -3095,12 +3095,19 @@ uint calculate_key_len(TABLE *, uint, const uchar *, key_part_map);
bitmap with first N+1 bits set bitmap with first N+1 bits set
(keypart_map for a key prefix of [0..N] keyparts) (keypart_map for a key prefix of [0..N] keyparts)
*/ */
#define make_keypart_map(N) (((key_part_map)2 << (N)) - 1) inline key_part_map make_keypart_map(uint N)
{
return ((key_part_map)2 << (N)) - 1;
}
/* /*
bitmap with first N bits set bitmap with first N bits set
(keypart_map for a key prefix of [0..N-1] keyparts) (keypart_map for a key prefix of [0..N-1] keyparts)
*/ */
#define make_prev_keypart_map(N) (((key_part_map)1 << (N)) - 1) inline key_part_map make_prev_keypart_map(uint N)
{
return ((key_part_map)1 << (N)) - 1;
}
/** Base class to be used by handlers different shares */ /** Base class to be used by handlers different shares */

View File

@@ -4494,6 +4494,7 @@ bool Item_param::set_from_item(THD *thd, Item *item)
if (item->null_value) if (item->null_value)
{ {
set_null(DTCollation_numeric()); set_null(DTCollation_numeric());
set_handler(&type_handler_null);
DBUG_RETURN(false); DBUG_RETURN(false);
} }
else else
@@ -4511,7 +4512,10 @@ bool Item_param::set_from_item(THD *thd, Item *item)
DBUG_RETURN(set_value(thd, item, &tmp, h)); DBUG_RETURN(set_value(thd, item, &tmp, h));
} }
else else
{
set_null_string(item->collation); set_null_string(item->collation);
set_handler(&type_handler_null);
}
DBUG_RETURN(0); DBUG_RETURN(0);
} }
@@ -5047,7 +5051,7 @@ Item_param::set_param_type_and_swap_value(Item_param *src)
} }
void Item_param::set_default() void Item_param::set_default(bool set_type_handler_null)
{ {
m_is_settable_routine_parameter= false; m_is_settable_routine_parameter= false;
state= DEFAULT_VALUE; state= DEFAULT_VALUE;
@@ -5060,13 +5064,17 @@ void Item_param::set_default()
can misbehave (e.g. crash on asserts). can misbehave (e.g. crash on asserts).
*/ */
null_value= true; null_value= true;
if (set_type_handler_null)
set_handler(&type_handler_null);
} }
void Item_param::set_ignore() void Item_param::set_ignore(bool set_type_handler_null)
{ {
m_is_settable_routine_parameter= false; m_is_settable_routine_parameter= false;
state= IGNORE_VALUE; state= IGNORE_VALUE;
null_value= true; null_value= true;
if (set_type_handler_null)
set_handler(&type_handler_null);
} }
/** /**

View File

@@ -2733,12 +2733,6 @@ public:
*/ */
virtual void under_not(Item_func_not * upper virtual void under_not(Item_func_not * upper
__attribute__((unused))) {}; __attribute__((unused))) {};
/*
If Item_field is wrapped in Item_direct_wrep remove this Item_direct_ref
wrapper.
*/
virtual Item *remove_item_direct_ref() { return this; }
void register_in(THD *thd); void register_in(THD *thd);
@@ -4322,8 +4316,8 @@ public:
int save_in_field(Field *field, bool no_conversions) override; int save_in_field(Field *field, bool no_conversions) override;
void set_default(); void set_default(bool set_type_handler_null);
void set_ignore(); void set_ignore(bool set_type_handler_null);
void set_null(const DTCollation &c); void set_null(const DTCollation &c);
void set_null_string(const DTCollation &c) void set_null_string(const DTCollation &c)
{ {
@@ -6078,11 +6072,6 @@ public:
} }
Item *field_transformer_for_having_pushdown(THD *thd, uchar *arg) override Item *field_transformer_for_having_pushdown(THD *thd, uchar *arg) override
{ return (*ref)->field_transformer_for_having_pushdown(thd, arg); } { return (*ref)->field_transformer_for_having_pushdown(thd, arg); }
Item *remove_item_direct_ref() override
{
*ref= (*ref)->remove_item_direct_ref();
return this;
}
}; };
@@ -6130,8 +6119,6 @@ public:
Ref_Type ref_type() override { return DIRECT_REF; } Ref_Type ref_type() override { return DIRECT_REF; }
Item *do_get_copy(THD *thd) const override Item *do_get_copy(THD *thd) const override
{ return get_item_copy<Item_direct_ref>(thd, this); } { return get_item_copy<Item_direct_ref>(thd, this); }
Item *remove_item_direct_ref() override
{ return (*ref)->remove_item_direct_ref(); }
/* Should be called if ref is changed */ /* Should be called if ref is changed */
inline void ref_changed() inline void ref_changed()
@@ -6515,7 +6502,6 @@ public:
{ return get_item_copy<Item_direct_view_ref>(thd, this); } { return get_item_copy<Item_direct_view_ref>(thd, this); }
Item *field_transformer_for_having_pushdown(THD *, uchar *) override Item *field_transformer_for_having_pushdown(THD *, uchar *) override
{ return this; } { return this; }
Item *remove_item_direct_ref() override { return this; }
void print(String *str, enum_query_type query_type) override; void print(String *str, enum_query_type query_type) override;
}; };
@@ -7018,7 +7004,7 @@ public:
{ {
// It should not be possible to have "EXECUTE .. USING DEFAULT(a)" // It should not be possible to have "EXECUTE .. USING DEFAULT(a)"
DBUG_ASSERT(0); DBUG_ASSERT(0);
param->set_default(); param->set_default(true);
return false; return false;
} }
table_map used_tables() const override; table_map used_tables() const override;
@@ -7151,7 +7137,7 @@ public:
} }
bool save_in_param(THD *, Item_param *param) override bool save_in_param(THD *, Item_param *param) override
{ {
param->set_default(); param->set_default(true);
return false; return false;
} }
Item *do_get_copy(THD *thd) const override Item *do_get_copy(THD *thd) const override
@@ -7189,7 +7175,7 @@ public:
} }
bool save_in_param(THD *, Item_param *param) override bool save_in_param(THD *, Item_param *param) override
{ {
param->set_ignore(); param->set_ignore(true);
return false; return false;
} }

View File

@@ -7926,10 +7926,11 @@ bool Item_equal::create_pushable_equalities(THD *thd,
while ((item=it++)) while ((item=it++))
{ {
left_item= item; if (!checker || ((item->*checker)(arg)))
if (checker && !((item->*checker) (arg))) {
continue; left_item= item;
break; break;
}
} }
if (!left_item) if (!left_item)

View File

@@ -304,7 +304,7 @@ class Native_functions_hash: public HASH
public: public:
Native_functions_hash() Native_functions_hash()
{ {
bzero(this, sizeof(*this)); bzero((void*) this, sizeof(*this));
} }
~Native_functions_hash() ~Native_functions_hash()
{ {

View File

@@ -4755,7 +4755,7 @@ public:
*/ */
bool is_valid() const override bool is_valid() const override
{ {
return m_rows_buf && m_cols.bitmap; return m_cols.bitmap;
} }
uint m_row_count; /* The number of rows added to the event */ uint m_row_count; /* The number of rows added to the event */

View File

@@ -1462,6 +1462,13 @@ void Rows_log_event::count_row_events(PRINT_EVENT_INFO *print_event_info)
switch (general_type_code) { switch (general_type_code) {
case WRITE_ROWS_EVENT: case WRITE_ROWS_EVENT:
/*
A write rows event containing no after image (can happen for REPLACE
INTO t() VALUES ()), count this correctly as 1 row and no 0.
*/
if (unlikely(m_rows_buf == m_rows_end))
print_event_info->row_events++;
/* Fall through. */
case DELETE_ROWS_EVENT: case DELETE_ROWS_EVENT:
row_events= 1; row_events= 1;
break; break;
@@ -1586,6 +1593,7 @@ bool Rows_log_event::print_verbose(IO_CACHE *file,
/* If the write rows event contained no values for the AI */ /* If the write rows event contained no values for the AI */
if (((general_type_code == WRITE_ROWS_EVENT) && (m_rows_buf==m_rows_end))) if (((general_type_code == WRITE_ROWS_EVENT) && (m_rows_buf==m_rows_end)))
{ {
print_event_info->row_events++;
if (my_b_printf(file, "### INSERT INTO %`s.%`s VALUES ()\n", if (my_b_printf(file, "### INSERT INTO %`s.%`s VALUES ()\n",
map->get_db_name(), map->get_table_name())) map->get_db_name(), map->get_table_name()))
goto err; goto err;
@@ -1619,9 +1627,16 @@ bool Rows_log_event::print_verbose(IO_CACHE *file,
/* Print the second image (for UPDATE only) */ /* Print the second image (for UPDATE only) */
if (sql_clause2) if (sql_clause2)
{ {
if (!(length= print_verbose_one_row(file, td, print_event_info, /* If the update rows event contained no values for the AI */
&m_cols_ai, value, if (unlikely(bitmap_is_clear_all(&m_cols_ai)))
(const uchar*) sql_clause2))) {
length= (bitmap_bits_set(&m_cols_ai) + 7) / 8;
if (my_b_printf(file, "### SET /* no columns */\n"))
goto err;
}
else if (!(length= print_verbose_one_row(file, td, print_event_info,
&m_cols_ai, value,
(const uchar*) sql_clause2)))
goto err; goto err;
value+= length; value+= length;
} }
@@ -1929,8 +1944,11 @@ bool Query_log_event::print_query_header(IO_CACHE* file,
end=int10_to_str((long) when, strmov(buff,"SET TIMESTAMP="),10); end=int10_to_str((long) when, strmov(buff,"SET TIMESTAMP="),10);
if (when_sec_part && when_sec_part <= TIME_MAX_SECOND_PART) if (when_sec_part && when_sec_part <= TIME_MAX_SECOND_PART)
{ {
*end++= '.'; char buff2[1 + 6 + 1];
end=int10_to_str(when_sec_part, end, 10); /* Ensure values < 100000 are printed with leading zeros, MDEV-31761. */
snprintf(buff2, sizeof(buff2), ".%06lu", when_sec_part);
DBUG_ASSERT(strlen(buff2) == 1 + 6);
end= strmov(end, buff2);
} }
end= strmov(end, print_event_info->delimiter); end= strmov(end, print_event_info->delimiter);
*end++='\n'; *end++='\n';

View File

@@ -4881,7 +4881,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
if (unlikely(open_and_lock_tables(thd, rgi->tables_to_lock, FALSE, 0))) if (unlikely(open_and_lock_tables(thd, rgi->tables_to_lock, FALSE, 0)))
{ {
#ifdef WITH_WSREP #ifdef WITH_WSREP
if (WSREP(thd)) if (WSREP(thd) && !thd->slave_thread)
{ {
WSREP_WARN("BF applier thread=%lu failed to open_and_lock_tables for " WSREP_WARN("BF applier thread=%lu failed to open_and_lock_tables for "
"%s, fatal: %d " "%s, fatal: %d "

View File

@@ -505,7 +505,8 @@ do_ftwrl_wait(rpl_group_info *rgi,
{ {
thd->set_time_for_next_stage(); thd->set_time_for_next_stage();
thd->ENTER_COND(&entry->COND_parallel_entry, &entry->LOCK_parallel_entry, thd->ENTER_COND(&entry->COND_parallel_entry, &entry->LOCK_parallel_entry,
&stage_waiting_for_ftwrl, old_stage); &stage_waiting_for_ftwrl,
(*did_enter_cond ? nullptr : old_stage));
*did_enter_cond= true; *did_enter_cond= true;
do do
{ {
@@ -865,8 +866,12 @@ do_retry:
err= 0; err= 0;
errmsg= NULL; errmsg= NULL;
#ifdef WITH_WSREP #ifdef WITH_WSREP
thd->wsrep_cs().reset_error(); DBUG_EXECUTE_IF("sync.wsrep_retry_event_group", {
WSREP_DEBUG("retrying async replication event"); const char act[]= "now "
"SIGNAL sync.wsrep_retry_event_group_reached "
"WAIT_FOR signal.wsrep_retry_event_group";
debug_sync_set_action(thd, STRING_WITH_LEN(act));
};);
#endif /* WITH_WSREP */ #endif /* WITH_WSREP */
/* /*
@@ -1013,15 +1018,20 @@ do_retry:
*/ */
thd->reset_killed(); thd->reset_killed();
#ifdef WITH_WSREP #ifdef WITH_WSREP
if (wsrep_before_command(thd)) if (WSREP(thd))
{ {
WSREP_WARN("Parallel slave worker failed at wsrep_before_command() hook"); /* Exec after statement hook to make sure that the failed transaction
err= 1; * gets cleared and reset error state. */
goto err; if (wsrep_after_statement(thd))
{
WSREP_WARN("Parallel slave worker failed at wsrep_after_statement() hook");
err= 1;
goto err;
}
thd->wsrep_cs().reset_error();
wsrep_start_trx_if_not_started(thd);
WSREP_DEBUG("parallel slave retry, after trx start");
} }
wsrep_start_trx_if_not_started(thd);
WSREP_DEBUG("parallel slave retry, after trx start");
#endif /* WITH_WSREP */ #endif /* WITH_WSREP */
strmake_buf(log_name, ir->name); strmake_buf(log_name, ir->name);
if ((fd= open_binlog(&rlog, log_name, &errmsg)) <0) if ((fd= open_binlog(&rlog, log_name, &errmsg)) <0)

View File

@@ -187,39 +187,29 @@ extern "C" my_bool wsrep_thd_is_SR(const THD *thd)
thd->wsrep_cs().transaction().state() == wsrep::transaction::s_executing; thd->wsrep_cs().transaction().state() == wsrep::transaction::s_executing;
} }
extern "C" void wsrep_handle_SR_rollback(THD *bf_thd, extern "C" void wsrep_handle_SR_rollback(THD *bf_thd __attribute__((unused)),
THD *victim_thd) THD *victim_thd)
{ {
/*
We should always be in victim_thd context, either client session is
rolling back or rollbacker thread should be in control.
*/
DBUG_ASSERT(victim_thd); DBUG_ASSERT(victim_thd);
DBUG_ASSERT(current_thd == victim_thd);
DBUG_ASSERT(wsrep_thd_is_SR(victim_thd)); DBUG_ASSERT(wsrep_thd_is_SR(victim_thd));
if (!victim_thd || !wsrep_on(bf_thd)) return;
WSREP_DEBUG("handle rollback, for deadlock: thd %llu trx_id %" PRIu64 " frags %zu conf %s", /* Defensive measure to avoid crash in production. */
if (!victim_thd) return;
WSREP_DEBUG("Handle SR rollback, for deadlock: thd %llu trx_id %" PRIu64 " frags %zu conf %s",
victim_thd->thread_id, victim_thd->thread_id,
victim_thd->wsrep_trx_id(), victim_thd->wsrep_trx_id(),
victim_thd->wsrep_sr().fragments_certified(), victim_thd->wsrep_sr().fragments_certified(),
wsrep_thd_transaction_state_str(victim_thd)); wsrep_thd_transaction_state_str(victim_thd));
/* Note: do not store/reset globals before wsrep_bf_abort() call DEBUG_SYNC(victim_thd, "wsrep_before_SR_rollback");
to avoid losing BF thd context. */
if (!(bf_thd && bf_thd != victim_thd)) wsrep_thd_self_abort(victim_thd);
{
DEBUG_SYNC(victim_thd, "wsrep_before_SR_rollback");
}
mysql_mutex_lock(&victim_thd->LOCK_thd_data);
if (bf_thd)
{
wsrep_bf_abort(bf_thd, victim_thd);
}
else
{
wsrep_thd_self_abort(victim_thd);
}
mysql_mutex_unlock(&victim_thd->LOCK_thd_data);
if (bf_thd)
{
wsrep_store_threadvars(bf_thd);
}
} }
extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd, extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd,

View File

@@ -26,6 +26,7 @@
#endif #endif
#include <my_getopt.h> #include <my_getopt.h>
#include <my_attribute.h>
class sys_var; class sys_var;
class set_var; class set_var;
@@ -249,9 +250,11 @@ protected:
Typically it's the same as session_value_ptr(), but it's different, Typically it's the same as session_value_ptr(), but it's different,
for example, for ENUM, that is printed as a string, but stored as a number. for example, for ENUM, that is printed as a string, but stored as a number.
*/ */
ATTRIBUTE_NO_UBSAN
uchar *session_var_ptr(THD *thd) const uchar *session_var_ptr(THD *thd) const
{ return ((uchar*)&(thd->variables)) + offset; } { return ((uchar*)&(thd->variables)) + offset; }
ATTRIBUTE_NO_UBSAN
uchar *global_var_ptr() const uchar *global_var_ptr() const
{ return ((uchar*)&global_system_variables) + offset; } { return ((uchar*)&global_system_variables) + offset; }

View File

@@ -78,7 +78,7 @@ const char *safe_vio_type_name(Vio *vio)
return vio_type_name(vio_type(vio), &unused); return vio_type_name(vio_type(vio), &unused);
} }
#include "sql_acl_getsort.ic" #include "sql_acl_getsort.inl"
static LEX_CSTRING native_password_plugin_name= { static LEX_CSTRING native_password_plugin_name= {
STRING_WITH_LEN("mysql_native_password") STRING_WITH_LEN("mysql_native_password")

View File

@@ -1154,7 +1154,8 @@ static bool wsrep_command_no_result(char command)
{ {
return (command == COM_STMT_FETCH || return (command == COM_STMT_FETCH ||
command == COM_STMT_SEND_LONG_DATA || command == COM_STMT_SEND_LONG_DATA ||
command == COM_STMT_CLOSE); command == COM_STMT_CLOSE ||
command == COM_STMT_PREPARE);
} }
#endif /* WITH_WSREP */ #endif /* WITH_WSREP */
#ifndef EMBEDDED_LIBRARY #ifndef EMBEDDED_LIBRARY
@@ -2394,6 +2395,10 @@ resume:
{ {
WSREP_DEBUG("THD is killed at dispatch_end"); WSREP_DEBUG("THD is killed at dispatch_end");
} }
if (thd->lex->sql_command != SQLCOM_SET_OPTION)
{
DEBUG_SYNC(thd, "wsrep_at_dispatch_end_before_result");
}
wsrep_after_command_before_result(thd); wsrep_after_command_before_result(thd);
if (wsrep_current_error(thd) && !wsrep_command_no_result(command)) if (wsrep_current_error(thd) && !wsrep_command_no_result(command))
{ {

View File

@@ -921,10 +921,10 @@ static bool insert_bulk_params(Prepared_statement *stmt,
param->set_null(); param->set_null();
break; break;
case STMT_INDICATOR_DEFAULT: case STMT_INDICATOR_DEFAULT:
param->set_default(); param->set_default(false);
break; break;
case STMT_INDICATOR_IGNORE: case STMT_INDICATOR_IGNORE:
param->set_ignore(); param->set_ignore(false);
break; break;
default: default:
DBUG_ASSERT(0); DBUG_ASSERT(0);

View File

@@ -18900,9 +18900,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
*/ */
Item *head_item= (!item_const && current_sjm && Item *head_item= (!item_const && current_sjm &&
current_sjm_head != field_item) ? current_sjm_head: head; current_sjm_head != field_item) ? current_sjm_head: head;
eq_item= new (thd->mem_root) Item_func_eq(thd, eq_item= new (thd->mem_root) Item_func_eq(thd, field_item, head_item);
field_item->remove_item_direct_ref(),
head_item->remove_item_direct_ref());
if (!eq_item || eq_item->set_cmp_func(thd)) if (!eq_item || eq_item->set_cmp_func(thd))
return 0; return 0;

View File

@@ -464,7 +464,7 @@ get_server_from_table_to_cache(TABLE *table)
RETURN VALUES RETURN VALUES
0 - no error 0 - no error
other - error code other - ER_ error code
*/ */
static int static int
@@ -548,15 +548,19 @@ insert_server_record_into_cache(FOREIGN_SERVER *server)
advance of insertion into the mysql.servers table advance of insertion into the mysql.servers table
RETURN VALUE RETURN VALUE
VOID 0 - no errors
>0 - ER_ error code
*/ */
static void static int
store_server_fields(TABLE *table, FOREIGN_SERVER *server) store_server_fields(TABLE *table, FOREIGN_SERVER *server)
{ {
table->use_all_columns(); table->use_all_columns();
if (table->s->fields < 9)
return ER_CANT_FIND_SYSTEM_REC;
/* /*
"server" has already been prepped by prepare_server_struct_for_<> "server" has already been prepped by prepare_server_struct_for_<>
so, all we need to do is check if the value is set (> -1 for port) so, all we need to do is check if the value is set (> -1 for port)
@@ -565,30 +569,43 @@ store_server_fields(TABLE *table, FOREIGN_SERVER *server)
have changed will be set. If an insert, then all will be set, have changed will be set. If an insert, then all will be set,
even if with empty strings even if with empty strings
*/ */
if (server->host) if (server->host &&
table->field[1]->store(server->host, table->field[1]->store(server->host,
(uint) strlen(server->host), system_charset_info); (uint) strlen(server->host), system_charset_info))
if (server->db) goto err;
if (server->db &&
table->field[2]->store(server->db, table->field[2]->store(server->db,
(uint) strlen(server->db), system_charset_info); (uint) strlen(server->db), system_charset_info))
if (server->username) goto err;
if (server->username &&
table->field[3]->store(server->username, table->field[3]->store(server->username,
(uint) strlen(server->username), system_charset_info); (uint) strlen(server->username), system_charset_info))
if (server->password) goto err;
if (server->password &&
table->field[4]->store(server->password, table->field[4]->store(server->password,
(uint) strlen(server->password), system_charset_info); (uint) strlen(server->password), system_charset_info))
if (server->port > -1) goto err;
table->field[5]->store(server->port); if (server->port > -1 &&
table->field[5]->store(server->port))
if (server->socket) goto err;
if (server->socket &&
table->field[6]->store(server->socket, table->field[6]->store(server->socket,
(uint) strlen(server->socket), system_charset_info); (uint) strlen(server->socket), system_charset_info))
if (server->scheme) goto err;
if (server->scheme &&
table->field[7]->store(server->scheme, table->field[7]->store(server->scheme,
(uint) strlen(server->scheme), system_charset_info); (uint) strlen(server->scheme), system_charset_info))
if (server->owner) goto err;
if (server->owner &&
table->field[8]->store(server->owner, table->field[8]->store(server->owner,
(uint) strlen(server->owner), system_charset_info); (uint) strlen(server->owner), system_charset_info))
goto err;
return 0;
err:
THD *thd= table->in_use;
DBUG_ASSERT(thd->is_error());
return thd->get_stmt_da()->get_sql_errno();
} }
/* /*
@@ -610,7 +627,7 @@ store_server_fields(TABLE *table, FOREIGN_SERVER *server)
RETURN VALUE RETURN VALUE
0 - no errors 0 - no errors
>0 - error code >0 - ER_ error code
*/ */
@@ -644,7 +661,8 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server)
error= 1; error= 1;
} }
/* store each field to be inserted */ /* store each field to be inserted */
store_server_fields(table, server); if ((error= store_server_fields(table, server)))
DBUG_RETURN(error);
DBUG_PRINT("info",("record for server '%s' not found!", DBUG_PRINT("info",("record for server '%s' not found!",
server->server_name)); server->server_name));
@@ -974,9 +992,15 @@ update_server_record(TABLE *table, FOREIGN_SERVER *server)
table->use_all_columns(); table->use_all_columns();
/* set the field that's the PK to the value we're looking for */ /* set the field that's the PK to the value we're looking for */
table->field[0]->store(server->server_name, if (table->field[0]->store(server->server_name,
server->server_name_length, server->server_name_length,
system_charset_info); system_charset_info))
{
DBUG_ASSERT(0); /* Protected by servers_cache */
THD *thd= table->in_use;
DBUG_ASSERT(thd->is_error());
return thd->get_stmt_da()->get_sql_errno();
}
if (unlikely((error= if (unlikely((error=
table->file->ha_index_read_idx_map(table->record[0], 0, table->file->ha_index_read_idx_map(table->record[0], 0,
@@ -994,7 +1018,8 @@ update_server_record(TABLE *table, FOREIGN_SERVER *server)
{ {
/* ok, so we can update since the record exists in the table */ /* ok, so we can update since the record exists in the table */
store_record(table,record[1]); store_record(table,record[1]);
store_server_fields(table, server); if ((error= store_server_fields(table, server)))
goto end;
if (unlikely((error=table->file->ha_update_row(table->record[1], if (unlikely((error=table->file->ha_update_row(table->record[1],
table->record[0])) && table->record[0])) &&
error != HA_ERR_RECORD_IS_THE_SAME)) error != HA_ERR_RECORD_IS_THE_SAME))

View File

@@ -1932,12 +1932,33 @@ Sys_pseudo_thread_id(
static bool static bool
check_gtid_domain_id(sys_var *self, THD *thd, set_var *var) check_gtid_domain_id(sys_var *self, THD *thd, set_var *var)
{ {
if (var->type != OPT_GLOBAL && if (var->type != OPT_GLOBAL)
error_if_in_trans_or_substatement(thd, {
if (error_if_in_trans_or_substatement(thd,
ER_STORED_FUNCTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO, ER_STORED_FUNCTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO,
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO)) ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO))
return true; return true;
/*
All binlogged statements on a temporary table must be binlogged in the
same domain_id; it is not safe to run them in parallel in different
domains, temporary table must be exclusive to a single thread.
In row-based binlogging, temporary tables do not end up in the binlog,
so there is no such issue.
ToDo: When merging to next (non-GA) release, introduce a more specific
error that describes that the problem is changing gtid_domain_id with
open temporary tables in statement/mixed binlogging mode; it is not
really due to doing it inside a "transaction".
*/
if (thd->has_thd_temporary_tables() &&
!thd->is_current_stmt_binlog_format_row() &&
var->save_result.ulonglong_value != thd->variables.gtid_domain_id)
{
my_error(ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO,
MYF(0));
return true;
}
}
return false; return false;
} }

View File

@@ -3911,11 +3911,16 @@ enum wsrep::streaming_context::fragment_unit wsrep_fragment_unit(ulong unit)
bool THD::wsrep_parallel_slave_wait_for_prior_commit() bool THD::wsrep_parallel_slave_wait_for_prior_commit()
{ {
if (rgi_slave && rgi_slave->is_parallel_exec && wait_for_prior_commit()) if (rgi_slave && rgi_slave->is_parallel_exec)
{ {
return 1; wait_for_pending_deadlock_kill(this, rgi_slave);
if (rgi_slave->killed_for_retry) {
my_error(ER_LOCK_DEADLOCK, MYF(0));
return true;
}
return wait_for_prior_commit();
} }
return 0; return false;
} }
/***** callbacks for wsrep service ************/ /***** callbacks for wsrep service ************/

View File

@@ -2,14 +2,14 @@ Table Create Table
t1 CREATE TABLE `t1` ( t1 CREATE TABLE `t1` (
`Description` char(128) NOT NULL, `Description` char(128) NOT NULL,
`Attributes` varchar(256) DEFAULT NULL `Attributes` varchar(256) DEFAULT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='ODBC' `CATFUNC`='Drivers' ) ENGINE=CONNECT DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci `TABLE_TYPE`='ODBC' `CATFUNC`='Drivers'
SET NAMES utf8; SET NAMES utf8;
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='Driver=SQLite3 ODBC Driver;Database=MTR_SUITE_DIR/std_data/test.sqlite3;NoWCHAR=yes' CHARSET=utf8 DATA_CHARSET=utf8;; CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='Driver=SQLite3 ODBC Driver;Database=MTR_SUITE_DIR/std_data/test.sqlite3;NoWCHAR=yes' CHARSET=utf8 DATA_CHARSET=utf8;;
SHOW CREATE TABLE t1; SHOW CREATE TABLE t1;
Table Create Table Table Create Table
t1 CREATE TABLE `t1` ( t1 CREATE TABLE `t1` (
`a` varchar(64) DEFAULT NULL `a` varchar(64) DEFAULT NULL
) ENGINE=CONNECT DEFAULT CHARSET=utf8 CONNECTION='Driver=SQLite3 ODBC Driver;Database=MTR_SUITE_DIR/std_data/test.sqlite3;NoWCHAR=yes' `TABLE_TYPE`='ODBC' `DATA_CHARSET`='utf8' ) ENGINE=CONNECT DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci CONNECTION='Driver=SQLite3 ODBC Driver;Database=MTR_SUITE_DIR/std_data/test.sqlite3;NoWCHAR=yes' `TABLE_TYPE`='ODBC' `DATA_CHARSET`='utf8'
SELECT * FROM t1; SELECT * FROM t1;
a a
test1 test1
@@ -21,8 +21,8 @@ CREATE TABLE t2 AS SELECT * FROM t1;
SHOW CREATE TABLE t2; SHOW CREATE TABLE t2;
Table Create Table Table Create Table
t2 CREATE TABLE `t2` ( t2 CREATE TABLE `t2` (
`a` varchar(64) CHARACTER SET utf8 DEFAULT NULL `a` varchar(64) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
SELECT * FROM t2; SELECT * FROM t2;
a a
test1 test1

View File

@@ -1549,6 +1549,7 @@ release_tree:
ut_ad(block_savepoint + 2 == mtr->get_savepoint()); ut_ad(block_savepoint + 2 == mtr->get_savepoint());
if (ret < 0) if (ret < 0)
{ {
up_match= 0, low_match= 0, up_bytes= 0, low_bytes= 0;
/* While our latch on the level-2 page prevents splits or /* While our latch on the level-2 page prevents splits or
merges of this level-1 block, other threads may have merges of this level-1 block, other threads may have
modified it due to splitting or merging some level-0 (leaf) modified it due to splitting or merging some level-0 (leaf)

View File

@@ -27,6 +27,7 @@ Created 2/17/1996 Heikki Tuuri
#include "btr0sea.h" #include "btr0sea.h"
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
#include "buf0buf.h" #include "buf0buf.h"
#include "buf0lru.h"
#include "page0page.h" #include "page0page.h"
#include "page0cur.h" #include "page0cur.h"
#include "btr0cur.h" #include "btr0cur.h"

View File

@@ -31,6 +31,7 @@ Created 11/11/1995 Heikki Tuuri
#include <sql_class.h> #include <sql_class.h>
#include "buf0flu.h" #include "buf0flu.h"
#include "buf0lru.h"
#include "buf0buf.h" #include "buf0buf.h"
#include "buf0checksum.h" #include "buf0checksum.h"
#include "buf0dblwr.h" #include "buf0dblwr.h"

View File

@@ -140,17 +140,7 @@ operator<<(
#ifndef UNIV_INNOCHECKSUM #ifndef UNIV_INNOCHECKSUM
# define buf_pool_get_curr_size() srv_buf_pool_curr_size # define buf_pool_get_curr_size() srv_buf_pool_curr_size
# define buf_block_free(block) buf_pool.free_block(block)
/** Allocate a buffer block.
@return own: the allocated block, state()==MEMORY */
inline buf_block_t *buf_block_alloc();
/********************************************************************//**
Frees a buffer block which does not contain a file page. */
UNIV_INLINE
void
buf_block_free(
/*===========*/
buf_block_t* block); /*!< in, own: block to be freed */
#define buf_page_get(ID, SIZE, LA, MTR) \ #define buf_page_get(ID, SIZE, LA, MTR) \
buf_page_get_gen(ID, SIZE, LA, NULL, BUF_GET, MTR) buf_page_get_gen(ID, SIZE, LA, NULL, BUF_GET, MTR)
@@ -478,8 +468,19 @@ public: // FIXME: fix fil_iterate()
/** Page id. Protected by buf_pool.page_hash.lock_get() when /** Page id. Protected by buf_pool.page_hash.lock_get() when
the page is in buf_pool.page_hash. */ the page is in buf_pool.page_hash. */
page_id_t id_; page_id_t id_;
/** buf_pool.page_hash link; protected by buf_pool.page_hash.lock_get() */ union {
buf_page_t *hash; /** for in_file(): buf_pool.page_hash link;
protected by buf_pool.page_hash.lock_get() */
buf_page_t *hash;
/** for state()==MEMORY that are part of recv_sys.pages and
protected by recv_sys.mutex */
struct {
/** number of recv_sys.pages entries stored in the block */
uint16_t used_records;
/** the offset of the next free record */
uint16_t free_offset;
};
};
private: private:
/** log sequence number of the START of the log entry written of the /** log sequence number of the START of the log entry written of the
oldest modification to this block which has not yet been written oldest modification to this block which has not yet been written
@@ -567,16 +568,7 @@ public:
/* @} */ /* @} */
Atomic_counter<unsigned> access_time; /*!< time of first access, or Atomic_counter<unsigned> access_time; /*!< time of first access, or
0 if the block was never accessed 0 if the block was never accessed
in the buffer pool. in the buffer pool. */
For state() == MEMORY
blocks, this field can be repurposed
for something else.
When this field counts log records
and bytes allocated for recv_sys.pages,
the field is protected by
recv_sys_t::mutex. */
buf_page_t() : id_{0} buf_page_t() : id_{0}
{ {
static_assert(NOT_USED == 0, "compatibility"); static_assert(NOT_USED == 0, "compatibility");

View File

@@ -75,26 +75,6 @@ inline bool buf_page_peek_if_too_old(const buf_page_t *bpage)
} }
} }
/** Allocate a buffer block.
@return own: the allocated block, in state BUF_BLOCK_MEMORY */
inline buf_block_t *buf_block_alloc()
{
return buf_LRU_get_free_block(have_no_mutex);
}
/********************************************************************//**
Frees a buffer block which does not contain a file page. */
UNIV_INLINE
void
buf_block_free(
/*===========*/
buf_block_t* block) /*!< in, own: block to be freed */
{
mysql_mutex_lock(&buf_pool.mutex);
buf_LRU_block_free_non_file_page(block);
mysql_mutex_unlock(&buf_pool.mutex);
}
/********************************************************************//** /********************************************************************//**
Increments the modify clock of a frame by 1. The caller must (1) own the Increments the modify clock of a frame by 1. The caller must (1) own the
buf_pool mutex and block bufferfix count has to be zero, (2) or own an x-lock buf_pool mutex and block bufferfix count has to be zero, (2) or own an x-lock

View File

@@ -93,14 +93,17 @@ we put it to free list to be used.
buf_block_t* buf_LRU_get_free_block(buf_LRU_get get) buf_block_t* buf_LRU_get_free_block(buf_LRU_get get)
MY_ATTRIBUTE((malloc,warn_unused_result)); MY_ATTRIBUTE((malloc,warn_unused_result));
#define buf_block_alloc() buf_LRU_get_free_block(have_no_mutex)
/** @return whether the unzip_LRU list should be used for evicting a victim /** @return whether the unzip_LRU list should be used for evicting a victim
instead of the general LRU list */ instead of the general LRU list */
bool buf_LRU_evict_from_unzip_LRU(); bool buf_LRU_evict_from_unzip_LRU();
/** Puts a block back to the free list. /** Free a buffer block which does not contain a file page,
@param[in] block block; not containing a file page */ while holding buf_pool.mutex.
void @param block block to be put to buf_pool.free */
buf_LRU_block_free_non_file_page(buf_block_t* block); void buf_LRU_block_free_non_file_page(buf_block_t *block);
/******************************************************************//** /******************************************************************//**
Adds a block to the LRU list. Please make sure that the page_size is Adds a block to the LRU list. Please make sure that the page_size is
already set when invoking the function, so that we can get correct already set when invoking the function, so that we can get correct

View File

@@ -1359,6 +1359,7 @@ void recv_sys_t::clear()
{ {
buf_block_t *prev_block= UT_LIST_GET_PREV(unzip_LRU, block); buf_block_t *prev_block= UT_LIST_GET_PREV(unzip_LRU, block);
ut_ad(block->page.state() == buf_page_t::MEMORY); ut_ad(block->page.state() == buf_page_t::MEMORY);
block->page.hash= nullptr;
UT_LIST_REMOVE(blocks, block); UT_LIST_REMOVE(blocks, block);
MEM_MAKE_ADDRESSABLE(block->page.frame, srv_page_size); MEM_MAKE_ADDRESSABLE(block->page.frame, srv_page_size);
buf_block_free(block); buf_block_free(block);
@@ -1405,14 +1406,11 @@ inline void recv_sys_t::free(const void *data)
buf_block_t *block= &chunk->blocks[offs]; buf_block_t *block= &chunk->blocks[offs];
ut_ad(block->page.frame == page_align(data)); ut_ad(block->page.frame == page_align(data));
ut_ad(block->page.state() == buf_page_t::MEMORY); ut_ad(block->page.state() == buf_page_t::MEMORY);
ut_ad(static_cast<uint16_t>(block->page.access_time - 1) < ut_ad(uint16_t(block->page.free_offset - 1) < srv_page_size);
srv_page_size); ut_ad(block->page.used_records);
unsigned a= block->page.access_time; if (!--block->page.used_records)
ut_ad(a >= 1U << 16);
a-= 1U << 16;
block->page.access_time= a;
if (!(a >> 16))
{ {
block->page.hash= nullptr;
UT_LIST_REMOVE(blocks, block); UT_LIST_REMOVE(blocks, block);
MEM_MAKE_ADDRESSABLE(block->page.frame, srv_page_size); MEM_MAKE_ADDRESSABLE(block->page.frame, srv_page_size);
buf_block_free(block); buf_block_free(block);
@@ -2006,7 +2004,7 @@ bool recv_sys_t::add(map::iterator it, lsn_t start_lsn, lsn_t lsn,
ut_ad(tail->lsn == lsn); ut_ad(tail->lsn == lsn);
block= UT_LIST_GET_LAST(blocks); block= UT_LIST_GET_LAST(blocks);
ut_ad(block); ut_ad(block);
const size_t used= static_cast<uint16_t>(block->page.access_time - 1) + 1; const size_t used= uint16_t(block->page.free_offset - 1) + 1;
ut_ad(used >= ALIGNMENT); ut_ad(used >= ALIGNMENT);
const byte *end= const_cast<const log_phys_t*>(tail)->end(); const byte *end= const_cast<const log_phys_t*>(tail)->end();
if (!((reinterpret_cast<size_t>(end + len) ^ if (!((reinterpret_cast<size_t>(end + len) ^
@@ -2027,7 +2025,7 @@ append:
ut_ad(new_used > used); ut_ad(new_used > used);
if (new_used > srv_page_size) if (new_used > srv_page_size)
break; break;
block->page.access_time= (block->page.access_time & ~0U << 16) | block->page.free_offset=
ut_calc_align<uint16_t>(static_cast<uint16_t>(new_used), ALIGNMENT); ut_calc_align<uint16_t>(static_cast<uint16_t>(new_used), ALIGNMENT);
goto append; goto append;
} }
@@ -2042,7 +2040,8 @@ append:
block= add_block(); block= add_block();
if (UNIV_UNLIKELY(!block)) if (UNIV_UNLIKELY(!block))
return true; return true;
block->page.access_time= 1U << 16 | block->page.used_records= 1;
block->page.free_offset=
ut_calc_align<uint16_t>(static_cast<uint16_t>(size), ALIGNMENT); ut_calc_align<uint16_t>(static_cast<uint16_t>(size), ALIGNMENT);
static_assert(ut_is_2pow(ALIGNMENT), "ALIGNMENT must be a power of 2"); static_assert(ut_is_2pow(ALIGNMENT), "ALIGNMENT must be a power of 2");
UT_LIST_ADD_FIRST(blocks, block); UT_LIST_ADD_FIRST(blocks, block);
@@ -2052,7 +2051,7 @@ append:
} }
else else
{ {
size_t free_offset= static_cast<uint16_t>(block->page.access_time); size_t free_offset= block->page.free_offset;
ut_ad(!ut_2pow_remainder(free_offset, ALIGNMENT)); ut_ad(!ut_2pow_remainder(free_offset, ALIGNMENT));
if (UNIV_UNLIKELY(!free_offset)) if (UNIV_UNLIKELY(!free_offset))
{ {
@@ -2065,7 +2064,8 @@ append:
if (free_offset > srv_page_size) if (free_offset > srv_page_size)
goto create_block; goto create_block;
block->page.access_time= ((block->page.access_time >> 16) + 1) << 16 | block->page.used_records++;
block->page.free_offset=
ut_calc_align<uint16_t>(static_cast<uint16_t>(free_offset), ALIGNMENT); ut_calc_align<uint16_t>(static_cast<uint16_t>(free_offset), ALIGNMENT);
MEM_MAKE_ADDRESSABLE(block->page.frame + free_offset - size, size); MEM_MAKE_ADDRESSABLE(block->page.frame + free_offset - size, size);
buf= block->page.frame + free_offset - size; buf= block->page.frame + free_offset - size;

View File

@@ -26,6 +26,7 @@ Created 6/9/1994 Heikki Tuuri
#include "mem0mem.h" #include "mem0mem.h"
#include "buf0buf.h" #include "buf0buf.h"
#include "buf0lru.h"
#include "srv0srv.h" #include "srv0srv.h"
#include <stdarg.h> #include <stdarg.h>

View File

@@ -18,6 +18,13 @@ SELECT * FROM t2 ORDER BY CAST(c AS INET6);
c c
456 456
123 123
SELECT * FROM t2 GROUP BY CAST(c AS char(60));
c
123
456
SELECT * FROM t2 GROUP BY CAST(c AS INET6);
c
456
DROP TABLE t1,t2; DROP TABLE t1,t2;
drop server srv; drop server srv;
for master_1 for master_1

View File

@@ -0,0 +1,24 @@
for master_1
for child2
for child3
set spider_same_server_link= 1;
CREATE SERVER srv FOREIGN DATA WRAPPER mysql
OPTIONS (SOCKET "$MASTER_1_MYSOCK", DATABASE 'test',user 'root');
create table t2 (c varchar(10));
create table t1 (c varchar(10)) ENGINE=Spider
COMMENT='WRAPPER "mysql", srv "srv",TABLE "t2"';
insert into t1 values ('abc'), ('abd'), ('abcd'), ('abc');
SELECT DISTINCT c FROM t1;
c
abc
abd
abcd
SELECT DISTINCT c FROM t1 WHERE (c LIKE 'abc%');
c
abc
abcd
drop table t1, t2;
drop server srv;
for master_1
for child2
for child3

View File

@@ -13,6 +13,8 @@ CREATE TABLE t2 (c INT) ENGINE=Spider COMMENT='WRAPPER "mysql",SRV "srv",TABLE "
insert into t2 values (456), (123); insert into t2 values (456), (123);
SELECT * FROM t2 ORDER BY CAST(c AS char(60)); SELECT * FROM t2 ORDER BY CAST(c AS char(60));
SELECT * FROM t2 ORDER BY CAST(c AS INET6); SELECT * FROM t2 ORDER BY CAST(c AS INET6);
SELECT * FROM t2 GROUP BY CAST(c AS char(60));
SELECT * FROM t2 GROUP BY CAST(c AS INET6);
# Cleanup # Cleanup
DROP TABLE t1,t2; DROP TABLE t1,t2;
drop server srv; drop server srv;

View File

@@ -0,0 +1,21 @@
--disable_query_log
--disable_result_log
--source ../../t/test_init.inc
--enable_result_log
--enable_query_log
set spider_same_server_link= 1;
evalp CREATE SERVER srv FOREIGN DATA WRAPPER mysql
OPTIONS (SOCKET "$MASTER_1_MYSOCK", DATABASE 'test',user 'root');
create table t2 (c varchar(10));
create table t1 (c varchar(10)) ENGINE=Spider
COMMENT='WRAPPER "mysql", srv "srv",TABLE "t2"';
insert into t1 values ('abc'), ('abd'), ('abcd'), ('abc');
SELECT DISTINCT c FROM t1;
SELECT DISTINCT c FROM t1 WHERE (c LIKE 'abc%');
drop table t1, t2;
drop server srv;
--disable_query_log
--disable_result_log
--source ../../t/test_deinit.inc
--enable_result_log
--enable_query_log

View File

@@ -13754,6 +13754,11 @@ int spider_mbase_handler::append_group_by_part(
DBUG_RETURN(error_num); DBUG_RETURN(error_num);
} }
/*
Append the GROUP BY part.
Only used by the group by handler for query construction.
*/
int spider_mbase_handler::append_group_by( int spider_mbase_handler::append_group_by(
ORDER *order, ORDER *order,
spider_string *str, spider_string *str,
@@ -13772,6 +13777,13 @@ int spider_mbase_handler::append_group_by(
str->q_append(SPIDER_SQL_GROUP_STR, SPIDER_SQL_GROUP_LEN); str->q_append(SPIDER_SQL_GROUP_STR, SPIDER_SQL_GROUP_LEN);
for (; order; order = order->next) for (; order; order = order->next)
{ {
/*
This is not expected to happen, as NULL check was performed
at the creation of the group by handler, and any NULL item_ptr
would have resulted in the gbh not being created.
*/
if (!order->item_ptr)
DBUG_RETURN(ER_INTERNAL_ERROR);
if ((error_num = spider_db_print_item_type(order->item_ptr, NULL, spider, if ((error_num = spider_db_print_item_type(order->item_ptr, NULL, spider,
str, alias, alias_length, dbton_id, use_fields, fields))) str, alias, alias_length, dbton_id, use_fields, fields)))
{ {
@@ -13811,6 +13823,11 @@ int spider_mbase_handler::append_order_by_part(
DBUG_RETURN(error_num); DBUG_RETURN(error_num);
} }
/*
Append the ORDER BY part.
Only used by the group by handler for query construction.
*/
int spider_mbase_handler::append_order_by( int spider_mbase_handler::append_order_by(
ORDER *order, ORDER *order,
spider_string *str, spider_string *str,
@@ -13829,6 +13846,13 @@ int spider_mbase_handler::append_order_by(
str->q_append(SPIDER_SQL_ORDER_STR, SPIDER_SQL_ORDER_LEN); str->q_append(SPIDER_SQL_ORDER_STR, SPIDER_SQL_ORDER_LEN);
for (; order; order = order->next) for (; order; order = order->next)
{ {
/*
This is not expected to happen, as NULL check was performed
at the creation of the group by handler, and any NULL item_ptr
would have resulted in the gbh not being created.
*/
if (!order->item_ptr)
DBUG_RETURN(ER_INTERNAL_ERROR);
if ((error_num = spider_db_print_item_type(order->item_ptr, NULL, spider, if ((error_num = spider_db_print_item_type(order->item_ptr, NULL, spider,
str, alias, alias_length, dbton_id, use_fields, fields))) str, alias, alias_length, dbton_id, use_fields, fields)))
{ {

View File

@@ -1603,8 +1603,10 @@ group_by_handler *spider_create_group_by_handler(
{ {
for (order = query->group_by; order; order = order->next) for (order = query->group_by; order; order = order->next)
{ {
if (spider_db_print_item_type((*order->item), NULL, spider, NULL, NULL, 0, if (order->item_ptr == NULL ||
roop_count, TRUE, fields_arg)) spider_db_print_item_type(order->item_ptr, NULL, spider,
NULL, NULL, 0, roop_count, TRUE,
fields_arg))
{ {
DBUG_PRINT("info",("spider dbton_id=%d can't create group by", roop_count)); DBUG_PRINT("info",("spider dbton_id=%d can't create group by", roop_count));
spider_clear_bit(dbton_bitmap, roop_count); spider_clear_bit(dbton_bitmap, roop_count);
@@ -1621,10 +1623,10 @@ group_by_handler *spider_create_group_by_handler(
{ {
for (order = query->order_by; order; order = order->next) for (order = query->order_by; order; order = order->next)
{ {
if ((*order->item)->type() == Item::SUM_FUNC_ITEM) if (order->item_ptr == NULL ||
continue; spider_db_print_item_type(order->item_ptr, NULL, spider,
if (spider_db_print_item_type((*order->item), NULL, spider, NULL, NULL, 0, NULL, NULL, 0, roop_count, TRUE,
roop_count, TRUE, fields_arg)) fields_arg))
{ {
DBUG_PRINT("info",("spider dbton_id=%d can't create order by", roop_count)); DBUG_PRINT("info",("spider dbton_id=%d can't create order by", roop_count));
spider_clear_bit(dbton_bitmap, roop_count); spider_clear_bit(dbton_bitmap, roop_count);