diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc
index 9c1c2e5731e..585ad8308fd 100644
--- a/extra/mariabackup/xtrabackup.cc
+++ b/extra/mariabackup/xtrabackup.cc
@@ -4562,7 +4562,7 @@ xb_register_filter_entry(
databases_hash->cell_get(my_crc32c(0, name, p - name))
->search(&xb_filter_entry_t::name_hash,
[dbname](xb_filter_entry_t* f)
- { return f && !strcmp(f->name, dbname); });
+ { return !f || !strcmp(f->name, dbname); });
if (!*prev) {
(*prev = xb_new_filter_entry(dbname))
->has_tables = TRUE;
@@ -4696,7 +4696,7 @@ xb_load_list_file(
FILE* fp;
/* read and store the filenames */
- fp = fopen(filename, "r");
+ fp = fopen(filename, "rt");
if (!fp) {
die("Can't open %s",
filename);
diff --git a/extra/wolfssl/CMakeLists.txt b/extra/wolfssl/CMakeLists.txt
index 38203a07911..f5b5c8bb5e8 100644
--- a/extra/wolfssl/CMakeLists.txt
+++ b/extra/wolfssl/CMakeLists.txt
@@ -134,6 +134,8 @@ if(MSVC)
remove_definitions(-DHAVE_CONFIG_H)
target_compile_definitions(wolfssl PRIVATE
WOLFSSL_HAVE_MIN WOLFSSL_HAVE_MAX)
+ # Workaround https://github.com/wolfSSL/wolfssl/issues/9004
+ target_compile_definitions(wolfssl PRIVATE WOLFSSL_NO_SOCK SOCKET_INVALID=-1)
endif()
CONFIGURE_FILE(user_settings.h.in user_settings.h)
diff --git a/extra/wolfssl/wolfssl b/extra/wolfssl/wolfssl
index 239b85c8043..b077c81eb63 160000
--- a/extra/wolfssl/wolfssl
+++ b/extra/wolfssl/wolfssl
@@ -1 +1 @@
-Subproject commit 239b85c80438bf60d9a5b9e0ebe9ff097a760d0d
+Subproject commit b077c81eb635392e694ccedbab8b644297ec0285
diff --git a/libmariadb b/libmariadb
index 55abb320382..77bdf5a5725 160000
--- a/libmariadb
+++ b/libmariadb
@@ -1 +1 @@
-Subproject commit 55abb3203826a7b3593f0728d6d077d4e0f19259
+Subproject commit 77bdf5a5725ec13c9067723ee2d3e1c5787e8c71
diff --git a/mysql-test/include/galera_variables_ok.inc b/mysql-test/include/galera_variables_ok.inc
index d10bf1fd36e..9bd31bb32b2 100644
--- a/mysql-test/include/galera_variables_ok.inc
+++ b/mysql-test/include/galera_variables_ok.inc
@@ -5,7 +5,7 @@ if (!$_galera_variables_delta) {
--let $galera_variables_delta=0
}
---let $galera_variables_expected=`SELECT 50 + $galera_variables_delta`
+--let $galera_variables_expected=`SELECT 51 + $galera_variables_delta`
--let $galera_variables_count=`SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep%'`
diff --git a/mysql-test/main/column_compression.result b/mysql-test/main/column_compression.result
index 15976939f70..7f176290637 100644
--- a/mysql-test/main/column_compression.result
+++ b/mysql-test/main/column_compression.result
@@ -2978,4 +2978,60 @@ SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1;
f
nc,mmmmmmmmmmd
DROP TABLE t1;
+#
+# MDEV-24726 Assertion `0' failed in Field_varstring_compressed::key_cmp
+#
+# VARCHAR
+create table t1 (a varchar(8) compressed) character set utf8mb4;
+create algorithm=temptable view v1 as select * from t1;
+insert into t1 values ('foo'),('bar'),('foo');
+select * from v1 where a in (select a from t1);
+a
+foo
+foo
+bar
+drop view v1;
+drop table t1;
+create table t1 (f1 varchar(8)) charset=eucjpms collate=eucjpms_nopad_bin;
+insert into t1 values ('');
+create table t2 (f2 varchar(8) compressed) charset=eucjpms collate=eucjpms_nopad_bin;
+insert into t2 values ('a'),('b');
+select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1;
+f1
+
+drop table t1, t2;
+# BLOB
+create table t1 (a text compressed) character set utf8mb4;
+create algorithm=temptable view v1 as select * from t1;
+insert into t1 values ('foo'),('bar'),('foo');
+select * from v1 where a in (select a from t1);
+a
+foo
+foo
+bar
+drop view v1;
+drop table t1;
+create table t1 (f1 text) charset=eucjpms collate=eucjpms_nopad_bin;
+insert into t1 values ('');
+create table t2 (f2 text compressed) charset=eucjpms collate=eucjpms_nopad_bin;
+insert into t2 values ('a'),('b');
+select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1;
+f1
+
+drop table t1, t2;
+#
+# MDEV-16808 Assertion on compressed blob as key field
+#
+set join_cache_level= 3;
+create table t1 (col_blob text) engine=innodb;
+create table t2 (col_blob text compressed) engine=innodb;
+select * from t1 join t2 using ( col_blob );
+col_blob
+drop tables t1, t2;
+create table t (a text compressed,b text) engine=innodb;
+create table t4 like t;
+set session join_cache_level=3;
+select * from (select * from t) as t natural join (select * from t) as t1;
+a b
+drop tables t, t4;
# End of 10.5 tests
diff --git a/mysql-test/main/column_compression.test b/mysql-test/main/column_compression.test
index f9b7cd31355..874f3c3580b 100644
--- a/mysql-test/main/column_compression.test
+++ b/mysql-test/main/column_compression.test
@@ -519,4 +519,57 @@ INSERT INTO t1 VALUES ('c','n'),('d','mmmmmmmmmm');
SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1;
DROP TABLE t1;
+--echo #
+--echo # MDEV-24726 Assertion `0' failed in Field_varstring_compressed::key_cmp
+--echo #
+
+--echo # VARCHAR
+create table t1 (a varchar(8) compressed) character set utf8mb4;
+create algorithm=temptable view v1 as select * from t1;
+insert into t1 values ('foo'),('bar'),('foo');
+select * from v1 where a in (select a from t1);
+# cleanup
+drop view v1;
+drop table t1;
+
+create table t1 (f1 varchar(8)) charset=eucjpms collate=eucjpms_nopad_bin;
+insert into t1 values ('');
+create table t2 (f2 varchar(8) compressed) charset=eucjpms collate=eucjpms_nopad_bin;
+insert into t2 values ('a'),('b');
+select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1;
+# cleanup
+drop table t1, t2;
+
+--echo # BLOB
+create table t1 (a text compressed) character set utf8mb4;
+create algorithm=temptable view v1 as select * from t1;
+insert into t1 values ('foo'),('bar'),('foo');
+select * from v1 where a in (select a from t1);
+# cleanup
+drop view v1;
+drop table t1;
+
+create table t1 (f1 text) charset=eucjpms collate=eucjpms_nopad_bin;
+insert into t1 values ('');
+create table t2 (f2 text compressed) charset=eucjpms collate=eucjpms_nopad_bin;
+insert into t2 values ('a'),('b');
+select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1;
+# cleanup
+drop table t1, t2;
+
+--echo #
+--echo # MDEV-16808 Assertion on compressed blob as key field
+--echo #
+set join_cache_level= 3;
+create table t1 (col_blob text) engine=innodb;
+create table t2 (col_blob text compressed) engine=innodb;
+select * from t1 join t2 using ( col_blob );
+drop tables t1, t2;
+
+create table t (a text compressed,b text) engine=innodb;
+create table t4 like t;
+set session join_cache_level=3;
+select * from (select * from t) as t natural join (select * from t) as t1;
+drop tables t, t4;
+
--echo # End of 10.5 tests
diff --git a/mysql-test/main/default.result b/mysql-test/main/default.result
index cf1d31e3bc7..03b02d37ce5 100644
--- a/mysql-test/main/default.result
+++ b/mysql-test/main/default.result
@@ -3432,10 +3432,8 @@ DEFAULT(a) CASE a WHEN 0 THEN 1 ELSE 2 END
NULL 2
DROP TABLE t;
DROP VIEW v;
-#
# End of 10.2 test
#
-#
# MDEV-22703 DEFAULT() on a BLOB column can overwrite the default
# record, which can cause crashes when accessing already released
# memory.
@@ -3450,10 +3448,8 @@ length(DEFAULT(h))
25
INSERT INTO t1 () VALUES ();
drop table t1;
-#
# End of 10.3 test
#
-#
# MDEV-26423: MariaDB server crash in Create_tmp_table::finalize
#
CREATE TABLE t1 (pk text DEFAULT length(uuid()));
@@ -3483,6 +3479,14 @@ column_name column_default has_default is_nullable
a NULL 1 YES
drop view v1;
drop table t1;
-#
# End of 10.4 test
#
+# MDEV-37320 ASAN errors in Field::is_null / Item_param::assign_default
+#
+create table t1 (f01 timestamp, f03 timestamp);
+insert into t1 () values ();
+create trigger tr before insert on t1 for each row set @a=1;
+prepare stmt from "update t1 set f03 = ?";
+execute stmt using default;
+drop table t1;
+# End of 10.6 test
diff --git a/mysql-test/main/default.test b/mysql-test/main/default.test
index 2ebe9ee9c5e..2e67d31d06b 100644
--- a/mysql-test/main/default.test
+++ b/mysql-test/main/default.test
@@ -2137,9 +2137,8 @@ CREATE ALGORITHM=TEMPTABLE VIEW v AS SELECT * FROM t;
SELECT DISTINCT DEFAULT(a), CASE a WHEN 0 THEN 1 ELSE 2 END FROM v GROUP BY a WITH ROLLUP;
DROP TABLE t;
DROP VIEW v;
---echo #
+
--echo # End of 10.2 test
---echo #
--echo #
--echo # MDEV-22703 DEFAULT() on a BLOB column can overwrite the default
@@ -2157,9 +2156,7 @@ SELECT length(DEFAULT(h)) FROM t1;
INSERT INTO t1 () VALUES ();
drop table t1;
---echo #
--echo # End of 10.3 test
---echo #
--echo #
--echo # MDEV-26423: MariaDB server crash in Create_tmp_table::finalize
@@ -2183,6 +2180,16 @@ select column_name, column_default, column_default is not null as 'has_default',
drop view v1;
drop table t1;
---echo #
--echo # End of 10.4 test
+
--echo #
+--echo # MDEV-37320 ASAN errors in Field::is_null / Item_param::assign_default
+--echo #
+create table t1 (f01 timestamp, f03 timestamp);
+insert into t1 () values ();
+create trigger tr before insert on t1 for each row set @a=1;
+prepare stmt from "update t1 set f03 = ?";
+execute stmt using default;
+drop table t1;
+
+--echo # End of 10.6 test
diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result
index df4685ba9ee..e86515e41e7 100644
--- a/mysql-test/main/func_json.result
+++ b/mysql-test/main/func_json.result
@@ -957,10 +957,8 @@ FROM (SELECT * FROM json_test) AS json_test_values;
json_object("a", json_compact(a), "b", json_compact(b))
{"a": [1,2,3], "b": {"a":"foo"}}
DROP TABLE json_test;
-#
# End of 10.2 tests
#
-#
# MDEV-12854 Synchronize CREATE..SELECT data type and result set metadata data type for INT functions
#
SELECT
@@ -1492,10 +1490,8 @@ JSON_VALID(' {"number": 01E-4}')
select JSON_VALID(' {"number": 0E-4.0}');
JSON_VALID(' {"number": 0E-4.0}')
0
-#
# End of 10.4 tests
#
-#
# MDEV-16620 JSON_ARRAYAGG
#
CREATE TABLE t1 (a INT);
@@ -1727,10 +1723,8 @@ NULL
Warnings:
Warning 4036 Character disallowed in JSON in argument 1 to function 'json_extract' at position 2
SET @@collation_connection= @save_collation_connection;
-#
# End of 10.5 tests
#
-#
# MDEV-26054 Server crashes in Item_func_json_arrayagg::get_str_from_field
#
CREATE TABLE t (a VARCHAR(8));
@@ -1766,6 +1760,15 @@ FROM JSON_TABLE (@data, '$[*]' COLUMNS (data text PATH '$.Data')) AS t;
data
#
+# MDEV-21530: json_extract STILL crashes in Item_func_json_extract::read_json
+#
+select null<=>json_extract('1',json_object(null,'{ }',null,null),'{}');
+null<=>json_extract('1',json_object(null,'{ }',null,null),'{}')
+1
+Warnings:
+Warning 4042 Syntax error in JSON path in argument 2 to function 'json_extract' at position 1
+# End of 10.6 tests
+#
# MDEV-35614 JSON_UNQUOTE doesn't work with emojis
#
SELECT HEX(JSON_UNQUOTE('"\\ud83d\\ude0a"')) as hex_smiley;
@@ -1803,9 +1806,6 @@ show warnings;
Level Code Message
Warning 4035 Broken JSON string in argument 1 to function 'json_unquote' at position 13
#
-# End of 10.6 tests
-#
-#
# MDEV-31147 json_normalize does not work correctly with MSAN build
#
CREATE TABLE t1 (val JSON);
@@ -1815,10 +1815,8 @@ SELECT * FROM t1;
val normalized_json
15 1.5E1
DROP TABLE t1;
-#
# End of 10.8 tests
#
-#
# MDEV-27677: Implement JSON_OVERLAPS()
#
# Testing scalar json datatypes
@@ -2670,6 +2668,4 @@ SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1');
JSON_VALUE(@json,'$.A[last-1][last-1].key1')
NULL
SET @@collation_connection= @save_collation_connection;
-#
# End of 10.9 Test
-#
diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test
index bdb53be509f..f4183b475ee 100644
--- a/mysql-test/main/func_json.test
+++ b/mysql-test/main/func_json.test
@@ -607,9 +607,7 @@ SELECT json_object("a", json_compact(a), "b", json_compact(b))
FROM (SELECT * FROM json_test) AS json_test_values;
DROP TABLE json_test;
---echo #
--echo # End of 10.2 tests
---echo #
--echo #
--echo # MDEV-12854 Synchronize CREATE..SELECT data type and result set metadata data type for INT functions
@@ -971,9 +969,7 @@ select JSON_VALID(' {"number": 00E-4}');
select JSON_VALID(' {"number": 01E-4}');
select JSON_VALID(' {"number": 0E-4.0}');
---echo #
--echo # End of 10.4 tests
---echo #
-- echo #
-- echo # MDEV-16620 JSON_ARRAYAGG
@@ -1195,9 +1191,7 @@ SELECT JSON_EXTRACT('{"a": 1,"b": 2}','$.a');
SET @@collation_connection= @save_collation_connection;
---echo #
--echo # End of 10.5 tests
---echo #
--echo #
--echo # MDEV-26054 Server crashes in Item_func_json_arrayagg::get_str_from_field
@@ -1233,6 +1227,14 @@ SELECT
FROM JSON_TABLE (@data, '$[*]' COLUMNS (data text PATH '$.Data')) AS t;
+--echo #
+--echo # MDEV-21530: json_extract STILL crashes in Item_func_json_extract::read_json
+--echo #
+
+select null<=>json_extract('1',json_object(null,'{ }',null,null),'{}');
+
+--echo # End of 10.6 tests
+
--echo #
--echo # MDEV-35614 JSON_UNQUOTE doesn't work with emojis
--echo #
@@ -1253,10 +1255,6 @@ select json_unquote(json_extract(@v,'$.color')) as unquoted, collation(json_unqu
SELECT JSON_UNQUOTE('"\\uc080\\ude0a"') as invalid_utf8mb4;
show warnings;
---echo #
---echo # End of 10.6 tests
---echo #
-
--echo #
--echo # MDEV-31147 json_normalize does not work correctly with MSAN build
--echo #
@@ -1266,9 +1264,7 @@ INSERT INTO t1 (val) VALUES ('15');
SELECT * FROM t1;
DROP TABLE t1;
---echo #
--echo # End of 10.8 tests
---echo #
--echo #
--echo # MDEV-27677: Implement JSON_OVERLAPS()
@@ -1942,6 +1938,4 @@ SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1');
SET @@collation_connection= @save_collation_connection;
---echo #
--echo # End of 10.9 Test
---echo #
diff --git a/mysql-test/main/long_unique.result b/mysql-test/main/long_unique.result
index ec68afa8f00..b1b537a29b0 100644
--- a/mysql-test/main/long_unique.result
+++ b/mysql-test/main/long_unique.result
@@ -1242,6 +1242,7 @@ t1 CREATE TABLE `t1` (
insert into t1 value(concat(repeat('s',3000),'1'));
insert into t1 value(concat(repeat('s',3000),'2'));
ERROR 23000: Duplicate entry 'sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss...' for key 'a'
+update t1 set a= concat(repeat('s',3000),'2');
insert into t1 value(concat(repeat('a',3000),'2'));
drop table t1;
create table t1(a varchar(4000), b longblob , c varchar(5000), d longblob,
diff --git a/mysql-test/main/long_unique.test b/mysql-test/main/long_unique.test
index b3c1e8e9efc..e759fa6d4b7 100644
--- a/mysql-test/main/long_unique.test
+++ b/mysql-test/main/long_unique.test
@@ -404,6 +404,7 @@ show create table t1;
insert into t1 value(concat(repeat('s',3000),'1'));
--error ER_DUP_ENTRY
insert into t1 value(concat(repeat('s',3000),'2'));
+update t1 set a= concat(repeat('s',3000),'2');
insert into t1 value(concat(repeat('a',3000),'2'));
drop table t1;
diff --git a/mysql-test/main/long_unique_bugs.result b/mysql-test/main/long_unique_bugs.result
index 6bf3a7dcc44..55cc07ce067 100644
--- a/mysql-test/main/long_unique_bugs.result
+++ b/mysql-test/main/long_unique_bugs.result
@@ -356,6 +356,7 @@ ERROR 42000: Specified key was too long; max key length is 2300 bytes
#
create table t1(a int, unique(a) using hash);
#BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES)
+insert into t1 values(200),(150),(149),(148),(147),(146),(145),(144),(143),(142),(141),(140),(139),(138),(137),(136),(135),(134),(133),(132),(131),(130),(129),(128),(127),(126),(125),(124),(123),(122),(121),(120),(119),(118),(117),(116),(115),(114),(113),(112),(111),(110),(109),(108),(107),(106),(105),(104),(103),(102),(101),(100),(99),(98),(97),(96),(95),(94),(93),(92),(91),(90),(89),(88),(87),(86),(85),(84),(83),(82),(81),(80),(79),(78),(77),(76),(75),(74),(73),(72),(71),(70),(69),(68),(67),(66),(65),(64),(63),(62),(61),(60),(59),(58),(57),(56),(55),(54),(53),(52),(51),(50),(49),(48),(47),(46),(45),(44),(43),(42),(41),(40),(39),(38),(37),(36),(35),(34),(33),(32),(31),(30),(29),(28),(27),(26),(25),(24),(23),(22),(21),(20),(19),(18),(17),(16),(15),(14),(13),(12),(11),(10),(9),(8),(7),(6),(5),(4),(3),(2),(1);
drop table t1;
#
# MDEV-21804 Assertion `marked_for_read()' failed upon INSERT into table with long unique blob under binlog_row_image=NOBLOB
@@ -809,3 +810,36 @@ hex(c1) hex(c2) c3 hex(c4)
NULL NULL NULL NULL
drop table t1;
# End of 10.5 tests
+#
+# MDEV-36852 Table definition gets corrupt after adding unique hash key
+#
+create table t1 (a text, b int, foreign key(a) references x(x)) engine=myisam;
+Warnings:
+Note 1071 Specified key was too long; max key length is 1000 bytes
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` text DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ KEY `a` (`a`(1000))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
+alter table t1 add unique(a), add key(a);
+Warnings:
+Note 1071 Specified key was too long; max key length is 1000 bytes
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` text DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ KEY `a_2` (`a`(1000))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
+drop table t1;
+#
+# MDEV-37203 UBSAN: applying zero offset to null pointer in strings/ctype-uca.inl | my_uca_strnncollsp_onelevel_utf8mb4 | handler::check_duplicate_long_entries_update
+#
+create table t (a int,b text unique key);
+insert into t (a) values (1);
+update t set a=2;
+drop table t;
+# End of 10.6 tests
diff --git a/mysql-test/main/long_unique_bugs.test b/mysql-test/main/long_unique_bugs.test
index b82d22d0792..256cfcf286f 100644
--- a/mysql-test/main/long_unique_bugs.test
+++ b/mysql-test/main/long_unique_bugs.test
@@ -332,17 +332,8 @@ CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria;
--echo # MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes
--echo #
create table t1(a int, unique(a) using hash);
---let $count=150
---let insert_stmt= insert into t1 values(200)
-while ($count)
-{
- --let $insert_stmt=$insert_stmt,($count)
- --dec $count
-}
---disable_query_log
--echo #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES)
---eval $insert_stmt
---enable_query_log
+insert into t1 values(200),(150),(149),(148),(147),(146),(145),(144),(143),(142),(141),(140),(139),(138),(137),(136),(135),(134),(133),(132),(131),(130),(129),(128),(127),(126),(125),(124),(123),(122),(121),(120),(119),(118),(117),(116),(115),(114),(113),(112),(111),(110),(109),(108),(107),(106),(105),(104),(103),(102),(101),(100),(99),(98),(97),(96),(95),(94),(93),(92),(91),(90),(89),(88),(87),(86),(85),(84),(83),(82),(81),(80),(79),(78),(77),(76),(75),(74),(73),(72),(71),(70),(69),(68),(67),(66),(65),(64),(63),(62),(61),(60),(59),(58),(57),(56),(55),(54),(53),(52),(51),(50),(49),(48),(47),(46),(45),(44),(43),(42),(41),(40),(39),(38),(37),(36),(35),(34),(33),(32),(31),(30),(29),(28),(27),(26),(25),(24),(23),(22),(21),(20),(19),(18),(17),(16),(15),(14),(13),(12),(11),(10),(9),(8),(7),(6),(5),(4),(3),(2),(1);
drop table t1;
--echo #
@@ -756,3 +747,23 @@ select hex(c1), hex(c2), c3, hex(c4) from t1;
drop table t1;
--echo # End of 10.5 tests
+
+--echo #
+--echo # MDEV-36852 Table definition gets corrupt after adding unique hash key
+--echo #
+
+create table t1 (a text, b int, foreign key(a) references x(x)) engine=myisam;
+show create table t1;
+alter table t1 add unique(a), add key(a);
+show create table t1;
+drop table t1;
+
+--echo #
+--echo # MDEV-37203 UBSAN: applying zero offset to null pointer in strings/ctype-uca.inl | my_uca_strnncollsp_onelevel_utf8mb4 | handler::check_duplicate_long_entries_update
+--echo #
+create table t (a int,b text unique key);
+insert into t (a) values (1);
+update t set a=2;
+drop table t;
+
+--echo # End of 10.6 tests
diff --git a/mysql-test/main/long_unique_innodb.result b/mysql-test/main/long_unique_innodb.result
index 8ed45ce6b10..fd2b3daf8e5 100644
--- a/mysql-test/main/long_unique_innodb.result
+++ b/mysql-test/main/long_unique_innodb.result
@@ -134,3 +134,39 @@ disconnect con2;
# MDEV-20131 Assertion `!pk->has_virtual()' failed
create table t1 (a text, primary key(a(1871))) engine=innodb;
ERROR 42000: Specified key was too long; max key length is 1536 bytes
+# End of 10.4 tests
+#
+# MDEV-37268 ER_NOT_KEYFILE or assertion failure upon REPLACE into table with unique hash under READ-COMMITTED
+#
+create table t1 (id int not null primary key, f varchar(100), unique(f) using hash) engine=innodb;
+insert t1 values (1,'x');
+set transaction isolation level read committed;
+replace t1 values (2,'x');
+select * from t1;
+id f
+2 x
+drop table t1;
+create table t1 (id int, f longtext, primary key (id), unique(f)) engine=innodb partition by key (id) partitions 9;
+insert t1 (id) values (1),(2);
+set transaction isolation level read committed;
+update ignore t1 set f = 'x';
+ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported
+select * from t1;
+id f
+1 NULL
+2 NULL
+drop table t1;
+#
+# MDEV-37310 Non-debug failing assertion node->pcur->rel_pos == BTR_PCUR_ON upon violating long unique under READ-COMMITTED
+#
+create table t1 (id int, f blob, unique(id,f)) engine=innodb partition by key(id) partitions 2;
+insert t1 values (1,'foo'),(2,'foo');
+set transaction isolation level read committed;
+update ignore t1 set id = 2;
+ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported
+select * from t1;
+id f
+1 foo
+2 foo
+drop table t1;
+# End of 10.6 tests
diff --git a/mysql-test/main/long_unique_innodb.test b/mysql-test/main/long_unique_innodb.test
index dd2d9f94de3..cbe2d7431fe 100644
--- a/mysql-test/main/long_unique_innodb.test
+++ b/mysql-test/main/long_unique_innodb.test
@@ -1,4 +1,5 @@
--source include/have_innodb.inc
+--source include/have_partition.inc
#
# MDEV-371 Unique indexes for blobs
@@ -143,3 +144,36 @@ disconnect con2;
--error ER_TOO_LONG_KEY
create table t1 (a text, primary key(a(1871))) engine=innodb;
+
+--echo # End of 10.4 tests
+
+--echo #
+--echo # MDEV-37268 ER_NOT_KEYFILE or assertion failure upon REPLACE into table with unique hash under READ-COMMITTED
+--echo #
+create table t1 (id int not null primary key, f varchar(100), unique(f) using hash) engine=innodb;
+insert t1 values (1,'x');
+set transaction isolation level read committed;
+replace t1 values (2,'x');
+select * from t1;
+drop table t1;
+
+create table t1 (id int, f longtext, primary key (id), unique(f)) engine=innodb partition by key (id) partitions 9;
+insert t1 (id) values (1),(2);
+set transaction isolation level read committed;
+--error ER_NOT_SUPPORTED_YET
+update ignore t1 set f = 'x';
+select * from t1;
+drop table t1;
+
+--echo #
+--echo # MDEV-37310 Non-debug failing assertion node->pcur->rel_pos == BTR_PCUR_ON upon violating long unique under READ-COMMITTED
+--echo #
+create table t1 (id int, f blob, unique(id,f)) engine=innodb partition by key(id) partitions 2;
+insert t1 values (1,'foo'),(2,'foo');
+set transaction isolation level read committed;
+--error ER_NOT_SUPPORTED_YET
+update ignore t1 set id = 2;
+select * from t1;
+drop table t1;
+
+--echo # End of 10.6 tests
diff --git a/mysql-test/main/long_unique_innodb_debug.result b/mysql-test/main/long_unique_innodb_debug.result
new file mode 100644
index 00000000000..497f2af39bf
--- /dev/null
+++ b/mysql-test/main/long_unique_innodb_debug.result
@@ -0,0 +1,255 @@
+#
+# MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records
+#
+## INSERT
+create table t1 (
+col1 int primary key,
+col2 varchar(3000),
+unique (col2) using hash) engine=innodb;
+# Keep a Read View open to prevent purge
+start transaction;
+select * from t1;
+col1 col2
+connect con1,localhost,root;
+# Create delete marked secondary index Record ('a', 10)
+insert t1 values(10, 'a');
+delete from t1;
+# Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum
+set transaction isolation level read committed;
+set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
+insert t1 values(15, 'a');
+connect con2,localhost,root;
+set debug_sync="now WAIT_FOR checked_duplicate";
+# Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10)
+set transaction isolation level read committed;
+insert t1 values(5, 'a');
+set debug_sync="now SIGNAL do_insert";
+connection con1;
+ERROR 23000: Duplicate entry 'a' for key 'col2'
+connection default;
+select * from t1;
+col1 col2
+commit;
+select * from t1;
+col1 col2
+5 a
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+drop table t1;
+## INSERT, row-level locking proof
+create table t1 (
+col1 int primary key,
+col2 varchar(3000),
+unique (col2) using hash) engine=innodb;
+# Keep a Read View open to prevent purge
+start transaction;
+select * from t1;
+col1 col2
+connect con1,localhost,root;
+# Create delete marked secondary index Record ('a', 10)
+insert t1 values(10, 'a');
+delete from t1;
+# Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum
+set transaction isolation level read committed;
+set debug_sync="ha_write_row_end SIGNAL checked_duplicate WAIT_FOR do_insert";
+insert t1 values(15, 'a');
+connect con2,localhost,root;
+set debug_sync="now WAIT_FOR checked_duplicate";
+# Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10)
+set session innodb_lock_wait_timeout= 1;
+set transaction isolation level read committed;
+insert t1 values(5, 'a');
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+set debug_sync="now SIGNAL do_insert";
+connection con1;
+connection default;
+select * from t1;
+col1 col2
+commit;
+select * from t1;
+col1 col2
+15 a
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+drop table t1;
+## UPDATE
+create table t1 (
+col1 int primary key,
+col2 varchar(3000),
+unique (col2) using hash) engine=innodb;
+start transaction;
+select * from t1;
+col1 col2
+connect con1,localhost,root;
+insert into t1 values(10, 'a');
+delete from t1;
+insert into t1 values( 5, 'b');
+insert into t1 values(15, 'c');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+update t1 set col2='a' where col1=5;
+connect con2,localhost,root;
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set col2='a' where col1=15;
+set debug_sync="now SIGNAL do_insert";
+connection con1;
+ERROR 23000: Duplicate entry 'a' for key 'col2'
+connection default;
+select * from t1;
+col1 col2
+commit;
+select * from t1;
+col1 col2
+5 b
+15 a
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+drop table t1;
+## INSERT IGNORE
+create table t1 (
+col1 int primary key,
+col2 varchar(3000),
+unique (col2) using hash) engine=innodb;
+start transaction;
+select * from t1;
+col1 col2
+connect con1,localhost,root;
+insert t1 values(10, 'a');
+delete from t1;
+set transaction isolation level read committed;
+set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
+insert ignore t1 values(15, 'a'), (16, 'b');
+connect con2,localhost,root;
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+insert t1 values(5, 'a');
+set debug_sync="now SIGNAL do_insert";
+connection con1;
+Warnings:
+Warning 1062 Duplicate entry 'a' for key 'col2'
+connection default;
+select * from t1;
+col1 col2
+commit;
+select * from t1;
+col1 col2
+5 a
+16 b
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+drop table t1;
+## UPDATE IGNORE
+create table t1 (
+col1 int primary key,
+col2 varchar(3000),
+unique (col2) using hash) engine=innodb;
+start transaction;
+select * from t1;
+col1 col2
+connect con1,localhost,root;
+insert into t1 values(10, 'a');
+delete from t1;
+insert into t1 values( 5, 'b');
+insert into t1 values(15, 'c');
+insert into t1 values( 9, 'd');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+update ignore t1 set col2=chr(92+col1) where col1<=9;
+connect con2,localhost,root;
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set col2='a' where col1=15;
+set debug_sync="now SIGNAL do_insert";
+connection con1;
+ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported
+connection default;
+select * from t1;
+col1 col2
+commit;
+select * from t1;
+col1 col2
+5 b
+9 d
+15 a
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+drop table t1;
+## UPDATE modifying PK
+create table t1 (
+col1 int primary key,
+col2 varchar(3000),
+unique (col2) using hash) engine=innodb;
+start transaction;
+select * from t1;
+col1 col2
+connect con1,localhost,root;
+insert into t1 values(10, 'a');
+delete from t1;
+insert into t1 values( 5, 'b');
+insert into t1 values(15, 'c');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+update t1 set col2='a', col1=4 where col1=5;
+connect con2,localhost,root;
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set col2='a' where col1=15;
+set debug_sync="now SIGNAL do_insert";
+connection con1;
+ERROR 23000: Duplicate entry 'a' for key 'col2'
+connection default;
+select * from t1;
+col1 col2
+commit;
+select * from t1;
+col1 col2
+5 b
+15 a
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+drop table t1;
+## UPDATE IGNORE modifying PK
+create table t1 (
+col1 int primary key,
+col2 varchar(3000),
+unique (col2) using hash) engine=innodb;
+start transaction;
+select * from t1;
+col1 col2
+connect con1,localhost,root;
+insert into t1 values(10, 'a');
+delete from t1;
+insert into t1 values( 5, 'b');
+insert into t1 values(15, 'c');
+insert into t1 values( 9, 'd');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+update ignore t1 set col2=chr(92+col1), col1=col1-1 where col1<=9;
+connect con2,localhost,root;
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set col2='a' where col1=15;
+set debug_sync="now SIGNAL do_insert";
+connection con1;
+ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported
+connection default;
+select * from t1;
+col1 col2
+commit;
+select * from t1;
+col1 col2
+5 b
+9 d
+15 a
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+drop table t1;
+# End of 10.6 tests
diff --git a/mysql-test/main/long_unique_innodb_debug.test b/mysql-test/main/long_unique_innodb_debug.test
new file mode 100644
index 00000000000..d1a0673b54a
--- /dev/null
+++ b/mysql-test/main/long_unique_innodb_debug.test
@@ -0,0 +1,242 @@
+--source include/have_innodb.inc
+--source include/have_debug_sync.inc
+
+--echo #
+--echo # MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records
+--echo #
+
+--disable_view_protocol
+--echo ## INSERT
+create table t1 (
+ col1 int primary key,
+ col2 varchar(3000),
+ unique (col2) using hash) engine=innodb;
+--echo # Keep a Read View open to prevent purge
+start transaction;
+select * from t1;
+--connect con1,localhost,root
+--echo # Create delete marked secondary index Record ('a', 10)
+insert t1 values(10, 'a');
+delete from t1;
+--echo # Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum
+set transaction isolation level read committed;
+set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
+--send insert t1 values(15, 'a')
+--connect con2,localhost,root
+set debug_sync="now WAIT_FOR checked_duplicate";
+--echo # Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10)
+set transaction isolation level read committed;
+insert t1 values(5, 'a');
+set debug_sync="now SIGNAL do_insert";
+--connection con1
+--error ER_DUP_ENTRY
+--reap
+--connection default
+select * from t1;
+commit;
+select * from t1;
+--disconnect con1
+--disconnect con2
+set debug_sync='RESET';
+drop table t1;
+
+--echo ## INSERT, row-level locking proof
+create table t1 (
+ col1 int primary key,
+ col2 varchar(3000),
+ unique (col2) using hash) engine=innodb;
+--echo # Keep a Read View open to prevent purge
+start transaction;
+select * from t1;
+
+--connect con1,localhost,root
+--echo # Create delete marked secondary index Record ('a', 10)
+insert t1 values(10, 'a');
+delete from t1;
+--echo # Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum
+set transaction isolation level read committed;
+set debug_sync="ha_write_row_end SIGNAL checked_duplicate WAIT_FOR do_insert";
+--send insert t1 values(15, 'a')
+
+--connect con2,localhost,root
+set debug_sync="now WAIT_FOR checked_duplicate";
+--echo # Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10)
+set session innodb_lock_wait_timeout= 1;
+set transaction isolation level read committed;
+--error ER_LOCK_WAIT_TIMEOUT
+insert t1 values(5, 'a');
+set debug_sync="now SIGNAL do_insert";
+
+--connection con1
+--reap
+
+--connection default
+select * from t1;
+commit;
+select * from t1;
+--disconnect con1
+--disconnect con2
+set debug_sync='RESET';
+drop table t1;
+
+--echo ## UPDATE
+create table t1 (
+ col1 int primary key,
+ col2 varchar(3000),
+ unique (col2) using hash) engine=innodb;
+start transaction;
+select * from t1;
+--connect con1,localhost,root
+insert into t1 values(10, 'a');
+delete from t1;
+insert into t1 values( 5, 'b');
+insert into t1 values(15, 'c');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+--send update t1 set col2='a' where col1=5
+--connect con2,localhost,root
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set col2='a' where col1=15;
+set debug_sync="now SIGNAL do_insert";
+--connection con1
+--error ER_DUP_ENTRY
+--reap
+--connection default
+select * from t1;
+commit;
+select * from t1;
+--disconnect con1
+--disconnect con2
+set debug_sync='RESET';
+drop table t1;
+
+--echo ## INSERT IGNORE
+create table t1 (
+ col1 int primary key,
+ col2 varchar(3000),
+ unique (col2) using hash) engine=innodb;
+start transaction;
+select * from t1;
+--connect con1,localhost,root
+insert t1 values(10, 'a');
+delete from t1;
+set transaction isolation level read committed;
+set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
+--send insert ignore t1 values(15, 'a'), (16, 'b')
+--connect con2,localhost,root
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+insert t1 values(5, 'a');
+set debug_sync="now SIGNAL do_insert";
+--connection con1
+--reap
+--connection default
+select * from t1;
+commit;
+select * from t1;
+--disconnect con1
+--disconnect con2
+set debug_sync='RESET';
+drop table t1;
+
+--echo ## UPDATE IGNORE
+create table t1 (
+ col1 int primary key,
+ col2 varchar(3000),
+ unique (col2) using hash) engine=innodb;
+start transaction;
+select * from t1;
+--connect con1,localhost,root
+insert into t1 values(10, 'a');
+delete from t1;
+insert into t1 values( 5, 'b');
+insert into t1 values(15, 'c');
+insert into t1 values( 9, 'd');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+--send update ignore t1 set col2=chr(92+col1) where col1<=9
+--connect con2,localhost,root
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set col2='a' where col1=15;
+set debug_sync="now SIGNAL do_insert";
+--connection con1
+--error ER_NOT_SUPPORTED_YET
+--reap
+--connection default
+select * from t1;
+commit;
+select * from t1;
+--disconnect con1
+--disconnect con2
+set debug_sync='RESET';
+drop table t1;
+
+--echo ## UPDATE modifying PK
+create table t1 (
+ col1 int primary key,
+ col2 varchar(3000),
+ unique (col2) using hash) engine=innodb;
+start transaction;
+select * from t1;
+--connect con1,localhost,root
+insert into t1 values(10, 'a');
+delete from t1;
+insert into t1 values( 5, 'b');
+insert into t1 values(15, 'c');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+--send update t1 set col2='a', col1=4 where col1=5
+--connect con2,localhost,root
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set col2='a' where col1=15;
+set debug_sync="now SIGNAL do_insert";
+--connection con1
+--error ER_DUP_ENTRY
+--reap
+--connection default
+select * from t1;
+commit;
+select * from t1;
+--disconnect con1
+--disconnect con2
+set debug_sync='RESET';
+drop table t1;
+
+--echo ## UPDATE IGNORE modifying PK
+create table t1 (
+ col1 int primary key,
+ col2 varchar(3000),
+ unique (col2) using hash) engine=innodb;
+start transaction;
+select * from t1;
+--connect con1,localhost,root
+insert into t1 values(10, 'a');
+delete from t1;
+insert into t1 values( 5, 'b');
+insert into t1 values(15, 'c');
+insert into t1 values( 9, 'd');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+--send update ignore t1 set col2=chr(92+col1), col1=col1-1 where col1<=9
+--connect con2,localhost,root
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set col2='a' where col1=15;
+set debug_sync="now SIGNAL do_insert";
+--connection con1
+--error ER_NOT_SUPPORTED_YET
+--reap
+--connection default
+select * from t1;
+commit;
+select * from t1;
+--disconnect con1
+--disconnect con2
+set debug_sync='RESET';
+drop table t1;
+--enable_view_protocol
+
+--echo # End of 10.6 tests
diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result
index c36c8307f86..0231974fed9 100644
--- a/mysql-test/main/ps.result
+++ b/mysql-test/main/ps.result
@@ -5995,3 +5995,55 @@ DROP VIEW t1;
#
# End of 10.4 tests
#
+#
+# MDEV-35330: Assertion `marked_for_read()` failed in VSec9::VSec9 | Item_func_from_unixtime::get_date
+#
+CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a));
+INSERT INTO t VALUES (1,'2025-07-18 18:37:10');
+SELECT * FROM t;
+a b
+1 2025-07-18 18:37:10
+EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT;
+SELECT * FROM t;
+a b
+1 1970-01-01 09:00:01
+DROP TABLE t;
+CREATE TABLE t (a INT, b INT DEFAULT (a+5));
+INSERT INTO t values (1,2), (2,DEFAULT);
+EXECUTE IMMEDIATE 'INSERT INTO t VALUES (3,4), (4,?)' USING DEFAULT;
+SELECT * FROM t;
+a b
+1 2
+2 7
+3 4
+4 9
+EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT;
+SELECT * FROM t;
+a b
+1 6
+2 7
+3 8
+4 9
+DROP TABLE t;
+CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a));
+INSERT INTO t VALUES (1,'2025-07-18 18:37:10');
+SELECT * FROM t;
+a b
+1 2025-07-18 18:37:10
+PREPARE s FROM 'UPDATE t SET b=?';
+EXECUTE s USING DEFAULT;
+SELECT * FROM t;
+a b
+1 1970-01-01 09:00:01
+DROP TABLE t;
+CREATE TABLE t (a INT, b TIMESTAMP DEFAULT FROM_UNIXTIME(a), c INT DEFAULT (a+5));
+INSERT INTO t VALUES (1,'2025-07-18 18:37:10',3);
+SELECT * FROM t;
+a b c
+1 2025-07-18 18:37:10 3
+EXECUTE IMMEDIATE 'UPDATE t SET b=?, c=?' USING DEFAULT, DEFAULT;
+SELECT * FROM t;
+a b c
+1 1970-01-01 09:00:01 6
+DROP TABLE t;
+# End of 10.6 tests
diff --git a/mysql-test/main/ps.test b/mysql-test/main/ps.test
index ff1e9e9f13e..54666d64012 100644
--- a/mysql-test/main/ps.test
+++ b/mysql-test/main/ps.test
@@ -5447,3 +5447,54 @@ DROP VIEW t1;
--echo #
--echo # End of 10.4 tests
--echo #
+
+--echo #
+--echo # MDEV-35330: Assertion `marked_for_read()` failed in VSec9::VSec9 | Item_func_from_unixtime::get_date
+--echo #
+
+CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a));
+INSERT INTO t VALUES (1,'2025-07-18 18:37:10');
+
+SELECT * FROM t;
+
+EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT;
+
+SELECT * FROM t;
+
+DROP TABLE t;
+
+CREATE TABLE t (a INT, b INT DEFAULT (a+5));
+INSERT INTO t values (1,2), (2,DEFAULT);
+EXECUTE IMMEDIATE 'INSERT INTO t VALUES (3,4), (4,?)' USING DEFAULT;
+
+SELECT * FROM t;
+
+EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT;
+
+SELECT * FROM t;
+
+DROP TABLE t;
+
+CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a));
+INSERT INTO t VALUES (1,'2025-07-18 18:37:10');
+
+SELECT * FROM t;
+
+PREPARE s FROM 'UPDATE t SET b=?';
+EXECUTE s USING DEFAULT;
+
+SELECT * FROM t;
+
+DROP TABLE t;
+
+CREATE TABLE t (a INT, b TIMESTAMP DEFAULT FROM_UNIXTIME(a), c INT DEFAULT (a+5));
+INSERT INTO t VALUES (1,'2025-07-18 18:37:10',3);
+
+SELECT * FROM t;
+
+EXECUTE IMMEDIATE 'UPDATE t SET b=?, c=?' USING DEFAULT, DEFAULT;
+
+SELECT * FROM t;
+
+DROP TABLE t;
+--echo # End of 10.6 tests
diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def
index 23bf40c409a..438b1e63cfd 100644
--- a/mysql-test/suite/galera/disabled.def
+++ b/mysql-test/suite/galera/disabled.def
@@ -14,3 +14,5 @@ galera_wan : MDEV-35940 Unallowed state transition: donor -> synced in galera_wa
galera_vote_rejoin_ddl : MDEV-35940 Unallowed state transition: donor -> synced in galera_wan
MW-329 : MDEV-35951 Complete freeze during MW-329 test
galera_vote_rejoin_dml : MDEV-35964 Assertion `ist_seqno >= cc_seqno' failed in galera_vote_rejoin_dml
+galera_var_notify_cmd : MDEV-37257 WSREP: Notification command failed: 1 (Operation not permitted)
+galera_var_notify_ssl_ipv6 : MDEV-37257 WSREP: Notification command failed: 1 (Operation not permitted)
diff --git a/mysql-test/suite/galera/r/enforce_storage_engine2.result b/mysql-test/suite/galera/r/enforce_storage_engine2.result
index b652eacd4f4..9239a4fdb93 100644
--- a/mysql-test/suite/galera/r/enforce_storage_engine2.result
+++ b/mysql-test/suite/galera/r/enforce_storage_engine2.result
@@ -7,23 +7,15 @@ connection node_1;
connection node_1;
CREATE TABLE t1(i INT) ENGINE=INNODB;
CREATE TABLE t2(i INT) ENGINE=MYISAM;
-Warnings:
-Note 1266 Using storage engine InnoDB for table 't2'
-Note 1266 Using storage engine InnoDB for table 't2'
+ERROR HY000: The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
connection node_2;
SHOW TABLES;
Tables_in_test
t1
-t2
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`i` int(11) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
-SHOW CREATE TABLE t2;
-Table Create Table
-t2 CREATE TABLE `t2` (
- `i` int(11) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
-DROP TABLE t1, t2;
+DROP TABLE t1;
# End of tests
diff --git a/mysql-test/suite/galera/r/galera_aria.result b/mysql-test/suite/galera/r/galera_aria.result
new file mode 100644
index 00000000000..435a0525a0f
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_aria.result
@@ -0,0 +1,25 @@
+connection node_2;
+connection node_1;
+set session sql_mode='';
+SET @@enforce_storage_engine=INNODB;
+CREATE TABLE t1 (c INT ) ENGINE=ARIA;
+ERROR HY000: The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
+SHOW WARNINGS;
+Level Code Message
+Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
+Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set
+Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
+Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set
+CREATE TABLE t1 (c INT );
+DROP TABLE t1;
+CREATE TABLE t1 (c INT ) ENGINE=INNODB;
+DROP TABLE t1;
+SET @@enforce_storage_engine=ARIA;
+CREATE TABLE t1 (c INT ) ENGINE=INNODB;
+ERROR HY000: The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
+SHOW WARNINGS;
+Level Code Message
+Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
+Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set
+Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
+Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set
diff --git a/mysql-test/suite/galera/r/mysql-wsrep#198.result b/mysql-test/suite/galera/r/mysql-wsrep#198.result
index 7759c4f1982..95fb3e67fd1 100644
--- a/mysql-test/suite/galera/r/mysql-wsrep#198.result
+++ b/mysql-test/suite/galera/r/mysql-wsrep#198.result
@@ -7,14 +7,18 @@ SELECT 1 FROM DUAL;
1
1
LOCK TABLE t2 WRITE;
+connect node_2_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2_ctrl;
+SET SESSION wsrep_sync_wait=0;
connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2a;
OPTIMIZE TABLE t1,t2;;
+connection node_2_ctrl;
+SET SESSION wsrep_sync_wait = 0;
connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2b;
REPAIR TABLE t1,t2;;
-connection node_2;
-SET SESSION wsrep_sync_wait = 0;
+connection node_2_ctrl;
connection node_1;
INSERT INTO t2 VALUES (1);
connection node_2;
@@ -34,3 +38,4 @@ DROP TABLE t2;
connection node_1;
disconnect node_2a;
disconnect node_2b;
+disconnect node_2_ctrl;
diff --git a/mysql-test/suite/galera/t/enforce_storage_engine2.test b/mysql-test/suite/galera/t/enforce_storage_engine2.test
index 7a822bced59..dd52ea9e239 100644
--- a/mysql-test/suite/galera/t/enforce_storage_engine2.test
+++ b/mysql-test/suite/galera/t/enforce_storage_engine2.test
@@ -1,5 +1,6 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
+--source include/have_aria.inc
--echo #
--echo # MDEV-9312: storage engine not enforced during galera cluster
@@ -7,14 +8,21 @@
--echo #
--connection node_1
CREATE TABLE t1(i INT) ENGINE=INNODB;
+#
+# This is not anymore supported because enforce_storage_engine
+# is local setting and final used storage engine
+# on other members of cluster depend on their configuration.
+# Currently, there is no way to query remote node
+# configuration.
+#
+--error ER_OPTION_PREVENTS_STATEMENT
CREATE TABLE t2(i INT) ENGINE=MYISAM;
--connection node_2
SHOW TABLES;
SHOW CREATE TABLE t1;
-SHOW CREATE TABLE t2;
# Cleanup
-DROP TABLE t1, t2;
+DROP TABLE t1;
--echo # End of tests
diff --git a/mysql-test/suite/galera/t/galera_aria.test b/mysql-test/suite/galera/t/galera_aria.test
new file mode 100644
index 00000000000..24dd2e5048b
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_aria.test
@@ -0,0 +1,19 @@
+--source include/galera_cluster.inc
+--source include/have_aria.inc
+--source include/log_bin.inc
+
+set session sql_mode='';
+SET @@enforce_storage_engine=INNODB;
+--error ER_OPTION_PREVENTS_STATEMENT
+CREATE TABLE t1 (c INT ) ENGINE=ARIA;
+SHOW WARNINGS;
+
+CREATE TABLE t1 (c INT );
+DROP TABLE t1;
+CREATE TABLE t1 (c INT ) ENGINE=INNODB;
+DROP TABLE t1;
+
+SET @@enforce_storage_engine=ARIA;
+--error ER_OPTION_PREVENTS_STATEMENT
+CREATE TABLE t1 (c INT ) ENGINE=INNODB;
+SHOW WARNINGS;
diff --git a/mysql-test/suite/galera/t/mysql-wsrep#198.test b/mysql-test/suite/galera/t/mysql-wsrep#198.test
index 98dea684f0d..78facd64356 100644
--- a/mysql-test/suite/galera/t/mysql-wsrep#198.test
+++ b/mysql-test/suite/galera/t/mysql-wsrep#198.test
@@ -10,21 +10,33 @@ SELECT 1 FROM DUAL;
LOCK TABLE t2 WRITE;
+--connect node_2_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connection node_2_ctrl
+SET SESSION wsrep_sync_wait=0;
+
--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--connection node_2a
--send OPTIMIZE TABLE t1,t2;
+--connection node_2_ctrl
+SET SESSION wsrep_sync_wait = 0;
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'acquiring total order isolation%';
+--let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST
+--source include/wait_condition_with_debug_and_kill.inc
+
--connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2
--connection node_2b
--send REPAIR TABLE t1,t2;
---connection node_2
-SET SESSION wsrep_sync_wait = 0;
---let $wait_condition = SELECT COUNT(*) BETWEEN 1 AND 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'Waiting to execute in isolation%';
+--connection node_2_ctrl
+--let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'acquiring total order isolation%';
--let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST
--source include/wait_condition_with_debug_and_kill.inc
--connection node_1
+# We have LOCK TABLE in node_2 so this could fail on lock wait
+# or next statement is fast enought and succeed
+--error 0,ER_LOCK_WAIT_TIMEOUT
INSERT INTO t2 VALUES (1);
--connection node_2
@@ -43,3 +55,4 @@ DROP TABLE t2;
--disconnect node_2a
--disconnect node_2b
+--disconnect node_2_ctrl
diff --git a/mysql-test/suite/galera_3nodes/r/inconsistency_shutdown.result b/mysql-test/suite/galera_3nodes/r/inconsistency_shutdown.result
index cd4087e01ca..bbcad5ee4db 100644
--- a/mysql-test/suite/galera_3nodes/r/inconsistency_shutdown.result
+++ b/mysql-test/suite/galera_3nodes/r/inconsistency_shutdown.result
@@ -32,8 +32,8 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 6;
UPDATE t1 SET f2 = 1 WHERE f1 = 7;
UPDATE t1 SET f2 = 1 WHERE f1 = 8;
connection node_2;
-SET wsrep_on=OFF;
-SET wsrep_on=ON;
+# make sure all events landed to slave queue
+set wsrep_sync_wait=0;
UNLOCK TABLES;
SET SESSION wsrep_on = ON;
SET SESSION wsrep_sync_wait = 15;
@@ -56,7 +56,8 @@ f1 f2
7 1
8 1
connection node_2;
-SET GLOBAL wsrep_on=OFF;
+# Gracefully restart the node
+set wsrep_on=OFF;
# restart
DROP TABLE t1;
connection node_1;
@@ -73,11 +74,15 @@ INSERT INTO t1 VALUES (8, 0);
COMMIT;
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 INT);
connection node_2;
+# Allow 1K slave queue without flow control
SET GLOBAL wsrep_provider_options='gcs.fc_limit=1K';
+# Introduce inconsistency
SET wsrep_on=OFF;
DROP TABLE t2;
SET wsrep_on=ON;
+# set up sync point to ensure DROP TABLE replication order below
SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+# Build up slave queue:
LOCK TABLES t1 READ;
connection node_1;
UPDATE t1 SET f2 = 1 WHERE f1 = 1;
@@ -86,18 +91,19 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 3;
UPDATE t1 SET f2 = 1 WHERE f1 = 4;
UPDATE t1 SET f2 = 2 WHERE f1 = 4;
/* dependent applier */;
+# interleave a failing statement
connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2a;
DROP TABLE t2;;
+# make sure DROP TABLE from above has replicated
connection node_2;
-SET wsrep_on=OFF;
+set wsrep_sync_wait=0;
"Wait for DROP TABLE to replicate"
SET SESSION wsrep_on = 0;
-SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
"DROP TABLE replicated"
-SET wsrep_on=ON;
connection node_1;
UPDATE t1 SET f2 = 3 WHERE f1 = 4;
/* dependent applier */
@@ -106,8 +112,7 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 6;
UPDATE t1 SET f2 = 1 WHERE f1 = 7;
UPDATE t1 SET f2 = 1 WHERE f1 = 8;
connection node_2;
-SET wsrep_on=OFF;
-SET wsrep_on=ON;
+# make sure all events landed to slave queue
UNLOCK TABLES;
connection node_2a;
ERROR 42S02: Unknown table 'test.t2'
@@ -128,11 +133,11 @@ f1 f2
7 1
8 1
connection node_2;
-SET SESSION wsrep_on = ON;
+set wsrep_on=OFF;
SET SESSION wsrep_sync_wait = 15;
-SET SESSION wsrep_on = ON;
+# Wait for the node to shutdown replication
SET SESSION wsrep_sync_wait = 15;
-SET GLOBAL wsrep_on=OFF;
+# Gracefully restart the node
# restart
DROP TABLE t1;
CALL mtr.add_suppression("Can't find record in 't1'");
diff --git a/mysql-test/suite/galera_3nodes/t/inconsistency_shutdown.test b/mysql-test/suite/galera_3nodes/t/inconsistency_shutdown.test
index 347433a6f14..dcd8a7b15ca 100644
--- a/mysql-test/suite/galera_3nodes/t/inconsistency_shutdown.test
+++ b/mysql-test/suite/galera_3nodes/t/inconsistency_shutdown.test
@@ -33,6 +33,7 @@ SET wsrep_on=OFF;
DELETE FROM t1 WHERE f1 = 2;
DELETE FROM t1 WHERE f1 = 4;
SET wsrep_on=ON;
+--source include/galera_wait_ready.inc
# Build up slave queue:
# - first 8 events will be picked by slave threads
@@ -51,11 +52,11 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 7;
UPDATE t1 SET f2 = 1 WHERE f1 = 8;
--connection node_2
-# make sure all events landed to slave queue
-SET wsrep_on=OFF;
+--echo # make sure all events landed to slave queue
+set wsrep_sync_wait=0;
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_recv_queue';
--source include/wait_condition.inc
-SET wsrep_on=ON;
+
UNLOCK TABLES;
--source include/wsrep_wait_disconnect.inc
# Wait for the node to shutdown replication
@@ -70,8 +71,8 @@ SHOW STATUS LIKE 'wsrep_cluster_size';
SELECT * FROM t1;
--connection node_2
-#Gracefully restart the node
-SET GLOBAL wsrep_on=OFF;
+--echo # Gracefully restart the node
+set wsrep_on=OFF;
--source include/shutdown_mysqld.inc
--source include/start_mysqld.inc
--source include/galera_wait_ready.inc
@@ -98,20 +99,21 @@ COMMIT;
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 INT);
--connection node_2
-# Allow 1K slave queue without flow control
+--echo # Allow 1K slave queue without flow control
SET GLOBAL wsrep_provider_options='gcs.fc_limit=1K';
-# Introduce inconsistency
-SET wsrep_on=OFF;
--let $wait_condition = SELECT COUNT(*)=1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't2';
--source include/wait_condition.inc
+--echo # Introduce inconsistency
+SET wsrep_on=OFF;
DROP TABLE t2;
SET wsrep_on=ON;
+--source include/galera_wait_ready.inc
-# set up sync point to ensure DROP TABLE replication order below
+--echo # set up sync point to ensure DROP TABLE replication order below
--let galera_sync_point = after_replicate_sync
--source include/galera_set_sync_point.inc
-# Build up slave queue:
+--echo # Build up slave queue:
# - first 8 events will be picked by slave threads
# - one more event will be waiting in slave queue
LOCK TABLES t1 READ;
@@ -123,20 +125,19 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 3;
UPDATE t1 SET f2 = 1 WHERE f1 = 4;
UPDATE t1 SET f2 = 2 WHERE f1 = 4; /* dependent applier */;
-# interleave a failing statement
+--echo # interleave a failing statement
--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--connection node_2a
--send DROP TABLE t2;
-# make sure DROP TABLE from above has replicated
+--echo # make sure DROP TABLE from above has replicated
--connection node_2
-SET wsrep_on=OFF;
+set wsrep_sync_wait=0;
--echo "Wait for DROP TABLE to replicate"
--source include/galera_wait_sync_point.inc
--source include/galera_signal_sync_point.inc
--source include/galera_clear_sync_point.inc
--echo "DROP TABLE replicated"
-SET wsrep_on=ON;
--connection node_1
UPDATE t1 SET f2 = 3 WHERE f1 = 4; /* dependent applier */
@@ -146,11 +147,10 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 7;
UPDATE t1 SET f2 = 1 WHERE f1 = 8;
--connection node_2
-# make sure all events landed to slave queue
-SET wsrep_on=OFF;
+--echo # make sure all events landed to slave queue
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_recv_queue';
--source include/wait_condition.inc
-SET wsrep_on=ON;
+
UNLOCK TABLES;
--connection node_2a
@@ -165,12 +165,13 @@ SHOW STATUS LIKE 'wsrep_cluster_size';
SELECT * FROM t1;
--connection node_2
+set wsrep_on=OFF;
--source include/wsrep_wait_disconnect.inc
-# Wait for the node to shutdown replication
+--echo # Wait for the node to shutdown replication
--let $members=0
--source include/wsrep_wait_membership.inc
-# Gracefully restart the node
-SET GLOBAL wsrep_on=OFF;
+--echo # Gracefully restart the node
+
--source include/shutdown_mysqld.inc
--source include/start_mysqld.inc
--source include/galera_wait_ready.inc
diff --git a/mysql-test/suite/innodb/r/doublewrite_debug.result b/mysql-test/suite/innodb/r/doublewrite_debug.result
index a743217f34e..e1d2b0137e1 100644
--- a/mysql-test/suite/innodb/r/doublewrite_debug.result
+++ b/mysql-test/suite/innodb/r/doublewrite_debug.result
@@ -26,13 +26,13 @@ SET GLOBAL innodb_fast_shutdown = 0;
# restart: --debug_dbug=+d,ib_log_checkpoint_avoid_hard --innodb_flush_sync=0
begin;
insert into t1 values (6, repeat('%', 400));
+SET GLOBAL innodb_max_dirty_pages_pct_lwm=0, innodb_max_dirty_pages_pct=0;
# Make the first page dirty for system tablespace
set global innodb_saved_page_number_debug = 0;
set global innodb_fil_make_page_dirty_debug = 0;
# Make the second page dirty for system tablespace
set global innodb_saved_page_number_debug = 1;
set global innodb_fil_make_page_dirty_debug = 0;
-set global innodb_buf_flush_list_now = 1;
# Kill the server
# Make the 1st page (page_no=0) and 2nd page (page_no=1)
# of the system tablespace all zeroes.
diff --git a/mysql-test/suite/innodb/r/innodb_defrag_stats.result b/mysql-test/suite/innodb/r/innodb_defrag_stats.result
index c6fd7006f9e..934405c33cd 100644
--- a/mysql-test/suite/innodb/r/innodb_defrag_stats.result
+++ b/mysql-test/suite/innodb/r/innodb_defrag_stats.result
@@ -131,3 +131,11 @@ ALTER TABLE t2 STATS_PERSISTENT=1;
DROP TABLE t2;
SELECT * FROM mysql.innodb_index_stats;
database_name table_name index_name last_update stat_name stat_value sample_size stat_description
+#
+# MDEV-37123 dict_table_open_on_id() fails to release dict_sys.latch
+#
+SET GLOBAL innodb_defragment_stats_accuracy=1;
+CREATE TABLE t (f INT,f2 CHAR(1),KEY k1 (f2),FULLTEXT KEY(f2),
+FOREIGN KEY(f2) REFERENCES t (f3)) ENGINE=InnoDB;
+ERROR HY000: Can't create table `test`.`t` (errno: 150 "Foreign key constraint is incorrectly formed")
+SET GLOBAL innodb_defragment_stats_accuracy=default;
diff --git a/mysql-test/suite/innodb/r/lock_isolation.result b/mysql-test/suite/innodb/r/lock_isolation.result
index 1e1625ae458..2044f001ad8 100644
--- a/mysql-test/suite/innodb/r/lock_isolation.result
+++ b/mysql-test/suite/innodb/r/lock_isolation.result
@@ -166,7 +166,6 @@ SELECT * FROM t FORCE INDEX (b) FOR UPDATE;
a b
1 NULL
COMMIT;
-disconnect con_weird;
connection consistent;
SELECT * FROM t FORCE INDEX (b) FOR UPDATE;
a b
@@ -230,9 +229,67 @@ UPDATE t SET b=4 WHERE a=1;
connection consistent;
SELECT * FROM t WHERE a=1 FOR UPDATE;
ERROR HY000: Record has changed since last read in table 't'
-disconnect consistent;
disconnect disable_purging;
connection default;
SET DEBUG_SYNC="RESET";
DROP TABLE t;
+CREATE TABLE t1(a INT) ENGINE=InnoDB STATS_PERSISTENT=0;
+CREATE TABLE t2(a INT) ENGINE=InnoDB STATS_PERSISTENT=0;
+BEGIN;
+INSERT INTO t1 SET a=1;
+connection con_weird;
+SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+BEGIN;
+INSERT INTO t2 SET a=1;
+connection consistent;
+SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+BEGIN;
+INSERT INTO t2 SET a=2;
+connection default;
+COMMIT;
+connection con_weird;
+SELECT * FROM t1;
+a
+1
+COMMIT;
+connection consistent;
+SELECT * FROM t1;
+ERROR HY000: Record has changed since last read in table 't1'
+COMMIT;
+connection default;
+BEGIN;
+INSERT INTO t1 SET a=2;
+connection con_weird;
+SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+INSERT INTO t2 SET a=3;
+connection consistent;
+SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+INSERT INTO t2 SET a=2;
+connection default;
+COMMIT;
+connection con_weird;
+SELECT * FROM t1;
+a
+1
+2
+COMMIT;
+disconnect con_weird;
+connection consistent;
+SELECT * FROM t1;
+ERROR HY000: Record has changed since last read in table 't1'
+COMMIT;
+disconnect consistent;
+connection default;
+DROP TABLE t1,t2;
+#
+# MDEV-37215 SELECT...FOR UPDATE crash under SERIALIZABLE
+#
+SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB;
+SELECT * FROM t1 FOR UPDATE;
+a
+DROP TABLE t1;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
# End of 10.6 tests
diff --git a/mysql-test/suite/innodb/r/scrub_debug.result b/mysql-test/suite/innodb/r/scrub_debug.result
index 7b0a9fd501c..7da2e0c51b0 100644
--- a/mysql-test/suite/innodb/r/scrub_debug.result
+++ b/mysql-test/suite/innodb/r/scrub_debug.result
@@ -15,5 +15,21 @@ FLUSH TABLE t1 FOR EXPORT;
NOT FOUND /repairman/ in t1.ibd
UNLOCK TABLES;
DROP TABLE t1;
+#
+# MDEV-37183 innodb_immediate_scrub_data_uncompressed=ON may break
+# crash recovery
+#
+SET GLOBAL innodb_limit_optimistic_insert_debug=0;
+CREATE TABLE t(a VARCHAR(1) PRIMARY KEY,INDEX(a DESC)) ENGINE=InnoDB;
+INSERT INTO t VALUES('2'),('1'),(''),('6'),('4'),('3');
+SET GLOBAL innodb_limit_optimistic_insert_debug=3;
+INSERT INTO t VALUES('8');
+CHECK TABLE t;
+Table Op Msg_type Msg_text
+test.t check status OK
+SELECT COUNT(*) FROM t;
+COUNT(*)
+7
+DROP TABLE t;
SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=@save_debug;
SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=@save_scrub;
diff --git a/mysql-test/suite/innodb/t/doublewrite_debug.test b/mysql-test/suite/innodb/t/doublewrite_debug.test
index b207823e3d1..e31cf34dbc1 100644
--- a/mysql-test/suite/innodb/t/doublewrite_debug.test
+++ b/mysql-test/suite/innodb/t/doublewrite_debug.test
@@ -51,6 +51,8 @@ let $restart_parameters=--debug_dbug=+d,ib_log_checkpoint_avoid_hard --innodb_fl
begin;
insert into t1 values (6, repeat('%', 400));
+SET GLOBAL innodb_max_dirty_pages_pct_lwm=0, innodb_max_dirty_pages_pct=0;
+
--echo # Make the first page dirty for system tablespace
set global innodb_saved_page_number_debug = 0;
set global innodb_fil_make_page_dirty_debug = 0;
@@ -59,7 +61,11 @@ set global innodb_fil_make_page_dirty_debug = 0;
set global innodb_saved_page_number_debug = 1;
set global innodb_fil_make_page_dirty_debug = 0;
-set global innodb_buf_flush_list_now = 1;
+let $wait_condition =
+SELECT variable_value = 0
+FROM information_schema.global_status
+WHERE variable_name = 'INNODB_BUFFER_POOL_PAGES_DIRTY';
+--source include/wait_condition.inc
--let CLEANUP_IF_CHECKPOINT=drop table t1, unexpected_checkpoint;
--source ../include/no_checkpoint_end.inc
diff --git a/mysql-test/suite/innodb/t/innodb_defrag_stats.test b/mysql-test/suite/innodb/t/innodb_defrag_stats.test
index 3730eb657af..56477e8f17e 100644
--- a/mysql-test/suite/innodb/t/innodb_defrag_stats.test
+++ b/mysql-test/suite/innodb/t/innodb_defrag_stats.test
@@ -86,3 +86,12 @@ ALTER TABLE t2 STATS_PERSISTENT=1;
DROP TABLE t2;
SELECT * FROM mysql.innodb_index_stats;
+
+--echo #
+--echo # MDEV-37123 dict_table_open_on_id() fails to release dict_sys.latch
+--echo #
+SET GLOBAL innodb_defragment_stats_accuracy=1;
+--error ER_CANT_CREATE_TABLE
+CREATE TABLE t (f INT,f2 CHAR(1),KEY k1 (f2),FULLTEXT KEY(f2),
+ FOREIGN KEY(f2) REFERENCES t (f3)) ENGINE=InnoDB;
+SET GLOBAL innodb_defragment_stats_accuracy=default;
diff --git a/mysql-test/suite/innodb/t/lock_isolation.test b/mysql-test/suite/innodb/t/lock_isolation.test
index b332f2e867a..7506754cf8a 100644
--- a/mysql-test/suite/innodb/t/lock_isolation.test
+++ b/mysql-test/suite/innodb/t/lock_isolation.test
@@ -174,7 +174,6 @@ ROLLBACK;
--reap
SELECT * FROM t FORCE INDEX (b) FOR UPDATE;
COMMIT;
---disconnect con_weird
--connection consistent
SELECT * FROM t FORCE INDEX (b) FOR UPDATE;
@@ -246,12 +245,65 @@ UPDATE t SET b=4 WHERE a=1;
--connection consistent
--error ER_CHECKREAD
SELECT * FROM t WHERE a=1 FOR UPDATE;
---disconnect consistent
--disconnect disable_purging
--connection default
SET DEBUG_SYNC="RESET";
DROP TABLE t;
+CREATE TABLE t1(a INT) ENGINE=InnoDB STATS_PERSISTENT=0;
+CREATE TABLE t2(a INT) ENGINE=InnoDB STATS_PERSISTENT=0;
+BEGIN; INSERT INTO t1 SET a=1;
+--connection con_weird
+SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+BEGIN; INSERT INTO t2 SET a=1;
+--connection consistent
+SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+BEGIN; INSERT INTO t2 SET a=2;
+--connection default
+COMMIT;
+--connection con_weird
+SELECT * FROM t1;
+COMMIT;
+--connection consistent
+--disable_ps2_protocol
+--error ER_CHECKREAD
+SELECT * FROM t1;
+--enable_ps2_protocol
+COMMIT;
+--connection default
+BEGIN; INSERT INTO t1 SET a=2;
+--connection con_weird
+SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+START TRANSACTION WITH CONSISTENT SNAPSHOT; INSERT INTO t2 SET a=3;
+--connection consistent
+SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+START TRANSACTION WITH CONSISTENT SNAPSHOT; INSERT INTO t2 SET a=2;
+--connection default
+COMMIT;
+--connection con_weird
+SELECT * FROM t1;
+COMMIT;
+--disconnect con_weird
+--connection consistent
+--disable_ps2_protocol
+--error ER_CHECKREAD
+SELECT * FROM t1;
+--enable_ps2_protocol
+COMMIT;
+--disconnect consistent
+--connection default
+DROP TABLE t1,t2;
+
+--echo #
+--echo # MDEV-37215 SELECT...FOR UPDATE crash under SERIALIZABLE
+--echo #
+
+SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB;
+SELECT * FROM t1 FOR UPDATE;
+DROP TABLE t1;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+
--source include/wait_until_count_sessions.inc
--echo # End of 10.6 tests
diff --git a/mysql-test/suite/innodb/t/scrub_debug.test b/mysql-test/suite/innodb/t/scrub_debug.test
index 8cebfca6106..b1603e961fd 100644
--- a/mysql-test/suite/innodb/t/scrub_debug.test
+++ b/mysql-test/suite/innodb/t/scrub_debug.test
@@ -24,5 +24,20 @@ FLUSH TABLE t1 FOR EXPORT;
-- source include/search_pattern_in_file.inc
UNLOCK TABLES;
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-37183 innodb_immediate_scrub_data_uncompressed=ON may break
+--echo # crash recovery
+--echo #
+SET GLOBAL innodb_limit_optimistic_insert_debug=0;
+# Note: MariaDB 10.6 fails to reproduce the crash; it maps DESC to ASC.
+CREATE TABLE t(a VARCHAR(1) PRIMARY KEY,INDEX(a DESC)) ENGINE=InnoDB;
+INSERT INTO t VALUES('2'),('1'),(''),('6'),('4'),('3');
+SET GLOBAL innodb_limit_optimistic_insert_debug=3;
+INSERT INTO t VALUES('8');
+CHECK TABLE t;
+SELECT COUNT(*) FROM t;
+DROP TABLE t;
+
SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=@save_debug;
SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=@save_scrub;
diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result
index 52cbede7314..52bd819286f 100644
--- a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result
+++ b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result
@@ -993,3 +993,15 @@ FTS_DOC_ID f1 f2
4294967298 txt bbb
100000000000 aaa bbb
DROP TABLE t1;
+#
+# MDEV-30363 Failing assertion: trx->error_state == DB_SUCCESS
+# in que_run_threads
+#
+CREATE TABLE server_stopword (value VARCHAR(1))engine=innodb;
+SET GLOBAL innodb_ft_server_stopword_table='test/server_stopword';
+CREATE TABLE t (t VARCHAR(1) COLLATE utf8_unicode_ci,
+FULLTEXT (t))engine=innodb;
+TRUNCATE TABLE t;
+DROP TABLE t;
+DROP TABLE server_stopword;
+SET GLOBAL innodb_ft_server_stopword_table= default;
diff --git a/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test b/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test
index 4eaf5b2e0bd..7f2c21ee404 100644
--- a/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test
+++ b/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test
@@ -967,3 +967,16 @@ CREATE FULLTEXT INDEX i ON t1 (f2);
SELECT * FROM t1 WHERE match(f2) against("bbb");
# Cleanup
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-30363 Failing assertion: trx->error_state == DB_SUCCESS
+--echo # in que_run_threads
+--echo #
+CREATE TABLE server_stopword (value VARCHAR(1))engine=innodb;
+SET GLOBAL innodb_ft_server_stopword_table='test/server_stopword';
+CREATE TABLE t (t VARCHAR(1) COLLATE utf8_unicode_ci,
+ FULLTEXT (t))engine=innodb;
+TRUNCATE TABLE t;
+DROP TABLE t;
+DROP TABLE server_stopword;
+SET GLOBAL innodb_ft_server_stopword_table= default;
diff --git a/mysql-test/suite/mariabackup/partial.result b/mysql-test/suite/mariabackup/partial.result
index cefda922868..27d515dfb8a 100644
--- a/mysql-test/suite/mariabackup/partial.result
+++ b/mysql-test/suite/mariabackup/partial.result
@@ -25,3 +25,15 @@ i
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t21;
+#
+# MDEV-36287 maribackup ignores tables-file option
+#
+CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB;
+CREATE TABLE t2(f1 INT NOT NULL)ENGINE=InnoDB;
+INSERT INTO t1 values(1);
+# Only backup table t1 by creating tables_file
+# Backup with --tables-file option
+# table t2 is skipped. Shows only t1
+t1.frm
+t1.ibd
+DROP TABLE t2, t1;
diff --git a/mysql-test/suite/mariabackup/partial.test b/mysql-test/suite/mariabackup/partial.test
index af6da274102..e9f4d90640b 100644
--- a/mysql-test/suite/mariabackup/partial.test
+++ b/mysql-test/suite/mariabackup/partial.test
@@ -78,3 +78,25 @@ DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t21;
rmdir $targetdir;
+
+--echo #
+--echo # MDEV-36287 maribackup ignores tables-file option
+--echo #
+CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB;
+CREATE TABLE t2(f1 INT NOT NULL)ENGINE=InnoDB;
+INSERT INTO t1 values(1);
+let targetdir=$MYSQLTEST_VARDIR/tmp/backup;
+let tables_list=$MYSQLTEST_VARDIR/tmp/tables_list.out;
+--echo # Only backup table t1 by creating tables_file
+--exec echo "test.t1" > $tables_list
+
+--echo # Backup with --tables-file option
+--disable_result_log
+exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --tables-file=$tables_list --target-dir=$targetdir;
+--enable_result_log
+
+--echo # table t2 is skipped. Shows only t1
+list_files $targetdir/test;
+DROP TABLE t2, t1;
+rmdir $targetdir;
+remove_file $tables_list;
diff --git a/mysql-test/suite/period/r/innodb_debug.result b/mysql-test/suite/period/r/innodb_debug.result
new file mode 100644
index 00000000000..eafc2230fdb
--- /dev/null
+++ b/mysql-test/suite/period/r/innodb_debug.result
@@ -0,0 +1,216 @@
+#
+# MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records
+#
+## INSERT
+create table t1 (
+id int, s date, e date,
+period for p(s,e),
+unique(id, p without overlaps)
+) engine=innodb;
+start transaction;
+select * from t1;
+id s e
+connect con1,localhost,root;
+insert t1 values(10, date'2010-10-10', date'2010-11-11');
+delete from t1;
+set transaction isolation level read committed;
+set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
+insert t1 values(10, date'2010-09-09', date'2010-11-10');
+connect con2,localhost,root;
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+insert t1 values(10, date'2010-10-10', date'2010-11-12');
+set debug_sync="now SIGNAL do_insert";
+connection con1;
+ERROR 23000: Duplicate entry '10-2010-11-10-2010-09-09' for key 'id'
+connection default;
+select * from t1;
+id s e
+commit;
+select * from t1;
+id s e
+10 2010-10-10 2010-11-12
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+drop table t1;
+## UPDATE
+create table t1 (
+id int, s date, e date,
+period for p(s,e),
+unique(id, p without overlaps)
+) engine=innodb;
+start transaction;
+select * from t1;
+id s e
+connect con1,localhost,root;
+insert t1 values(10, date'2010-10-10', date'2010-11-11');
+delete from t1;
+insert t1 values(10, date'2010-09-09', date'2010-09-10');
+insert t1 values(10, date'2010-12-10', date'2010-12-12');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+update t1 set e=e + interval 2 month where s=date'2010-09-09';
+connect con2,localhost,root;
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set s=date'2010-10-10' where e=date'2010-12-12';
+set debug_sync="now SIGNAL do_insert";
+connection con1;
+ERROR 23000: Duplicate entry '10-2010-11-10-2010-09-09' for key 'id'
+connection default;
+select * from t1;
+id s e
+commit;
+select * from t1;
+id s e
+10 2010-09-09 2010-09-10
+10 2010-10-10 2010-12-12
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+drop table t1;
+## INSERT IGNORE
+create table t1 (
+id int, s date, e date,
+period for p(s,e),
+unique(id, p without overlaps)
+) engine=innodb;
+start transaction;
+select * from t1;
+id s e
+connect con1,localhost,root;
+insert t1 values(10, date'2010-10-10', date'2010-11-11');
+delete from t1;
+set transaction isolation level read committed;
+set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
+insert ignore t1 values(10, date'2010-09-09', date'2010-11-10'),(11, date'2010-09-09', date'2010-11-10');
+connect con2,localhost,root;
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+insert t1 values(10, date'2010-10-10', date'2010-11-12');
+set debug_sync="now SIGNAL do_insert";
+connection con1;
+Warnings:
+Warning 1062 Duplicate entry '10-2010-11-10-2010-09-09' for key 'id'
+connection default;
+select * from t1;
+id s e
+commit;
+select * from t1;
+id s e
+10 2010-10-10 2010-11-12
+11 2010-09-09 2010-11-10
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+drop table t1;
+## UPDATE IGNORE
+create table t1 (
+id int, s date, e date,
+period for p(s,e),
+unique(id, p without overlaps)
+) engine=innodb;
+start transaction;
+select * from t1;
+id s e
+connect con1,localhost,root;
+insert t1 values(10, date'2010-10-10', date'2010-11-11');
+delete from t1;
+insert t1 values(10, date'2010-09-09', date'2010-09-10');
+insert t1 values(10, date'2010-12-10', date'2010-12-12');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+update ignore t1 set e=e + interval 2 month where s=date'2010-09-09';
+connect con2,localhost,root;
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set s=date'2010-10-10' where e=date'2010-12-12';
+set debug_sync="now SIGNAL do_insert";
+connection con1;
+ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint WITHOUT OVERLAPS is not currently supported
+connection default;
+select * from t1;
+id s e
+commit;
+select * from t1;
+id s e
+10 2010-09-09 2010-09-10
+10 2010-10-10 2010-12-12
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+drop table t1;
+## UPDATE modifying PK
+create table t1 (
+id int, s date, e date,
+period for p(s,e),
+primary key (id, p without overlaps)
+) engine=innodb;
+start transaction;
+select * from t1;
+id s e
+connect con1,localhost,root;
+insert t1 values(10, date'2010-10-10', date'2010-11-11');
+delete from t1;
+insert t1 values(10, date'2010-09-09', date'2010-09-10');
+insert t1 values(10, date'2010-12-10', date'2010-12-12');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+update t1 set e=e + interval 2 month where s=date'2010-09-09';
+connect con2,localhost,root;
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set s=date'2010-10-10' where e=date'2010-12-12';
+set debug_sync="now SIGNAL do_insert";
+connection con1;
+ERROR 23000: Duplicate entry '10-2010-11-10-2010-09-09' for key 'PRIMARY'
+connection default;
+select * from t1;
+id s e
+commit;
+select * from t1;
+id s e
+10 2010-09-09 2010-09-10
+10 2010-10-10 2010-12-12
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+drop table t1;
+## UPDATE IGNORE modifying PK
+create table t1 (
+id int, s date, e date,
+period for p(s,e),
+primary key (id, p without overlaps)
+) engine=innodb;
+start transaction;
+select * from t1;
+id s e
+connect con1,localhost,root;
+insert t1 values(10, date'2010-10-10', date'2010-11-11');
+delete from t1;
+insert t1 values(10, date'2010-09-09', date'2010-09-10');
+insert t1 values(10, date'2010-12-10', date'2010-12-12');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+update ignore t1 set e=e + interval 2 month where s=date'2010-09-09';
+connect con2,localhost,root;
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set s=date'2010-10-10' where e=date'2010-12-12';
+set debug_sync="now SIGNAL do_insert";
+connection con1;
+ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint WITHOUT OVERLAPS is not currently supported
+connection default;
+select * from t1;
+id s e
+commit;
+select * from t1;
+id s e
+10 2010-09-09 2010-09-10
+10 2010-10-10 2010-12-12
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+drop table t1;
+# End of 10.6 tests
diff --git a/mysql-test/suite/period/r/long_unique.result b/mysql-test/suite/period/r/long_unique.result
index 5c5f4297fb9..fa7817fb562 100644
--- a/mysql-test/suite/period/r/long_unique.result
+++ b/mysql-test/suite/period/r/long_unique.result
@@ -15,3 +15,20 @@ INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01');
DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01';
ERROR 23000: Duplicate entry 'foo' for key 'b'
DROP TABLE t1;
+# End of 10.5 tests
+#
+# MDEV-37312 ASAN errors or assertion failure upon attempt to UPDATE FOR PORTION violating long unique under READ COMMITTED
+#
+create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb;
+insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01');
+set transaction isolation level read committed;
+update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1;
+ERROR 23000: Duplicate entry 'foo' for key 'f'
+drop table t1;
+create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb partition by hash (a);
+insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01');
+set transaction isolation level read committed;
+update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1;
+ERROR 23000: Duplicate entry 'foo' for key 'f'
+drop table t1;
+# End of 10.6 tests
diff --git a/mysql-test/suite/period/t/innodb_debug.test b/mysql-test/suite/period/t/innodb_debug.test
new file mode 100644
index 00000000000..46ff13cb5fb
--- /dev/null
+++ b/mysql-test/suite/period/t/innodb_debug.test
@@ -0,0 +1,209 @@
+--source include/have_innodb.inc
+--source include/have_debug_sync.inc
+
+--echo #
+--echo # MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records
+--echo #
+
+--echo ## INSERT
+create table t1 (
+ id int, s date, e date,
+ period for p(s,e),
+ unique(id, p without overlaps)
+) engine=innodb;
+
+start transaction;
+select * from t1;
+--connect con1,localhost,root
+insert t1 values(10, date'2010-10-10', date'2010-11-11');
+delete from t1;
+set transaction isolation level read committed;
+set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
+--send insert t1 values(10, date'2010-09-09', date'2010-11-10')
+--connect con2,localhost,root
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+insert t1 values(10, date'2010-10-10', date'2010-11-12');
+set debug_sync="now SIGNAL do_insert";
+--connection con1
+--error ER_DUP_ENTRY
+--reap
+--connection default
+select * from t1;
+commit;
+select * from t1;
+--disconnect con1
+--disconnect con2
+set debug_sync='RESET';
+drop table t1;
+
+--echo ## UPDATE
+create table t1 (
+ id int, s date, e date,
+ period for p(s,e),
+ unique(id, p without overlaps)
+) engine=innodb;
+
+start transaction;
+select * from t1;
+--connect con1,localhost,root
+insert t1 values(10, date'2010-10-10', date'2010-11-11');
+delete from t1;
+insert t1 values(10, date'2010-09-09', date'2010-09-10');
+insert t1 values(10, date'2010-12-10', date'2010-12-12');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+--send update t1 set e=e + interval 2 month where s=date'2010-09-09'
+--connect con2,localhost,root
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set s=date'2010-10-10' where e=date'2010-12-12';
+set debug_sync="now SIGNAL do_insert";
+--connection con1
+--error ER_DUP_ENTRY
+--reap
+--connection default
+select * from t1;
+commit;
+select * from t1;
+--disconnect con1
+--disconnect con2
+set debug_sync='RESET';
+drop table t1;
+
+--echo ## INSERT IGNORE
+create table t1 (
+ id int, s date, e date,
+ period for p(s,e),
+ unique(id, p without overlaps)
+) engine=innodb;
+
+start transaction;
+select * from t1;
+--connect con1,localhost,root
+insert t1 values(10, date'2010-10-10', date'2010-11-11');
+delete from t1;
+set transaction isolation level read committed;
+set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
+--send insert ignore t1 values(10, date'2010-09-09', date'2010-11-10'),(11, date'2010-09-09', date'2010-11-10')
+--connect con2,localhost,root
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+insert t1 values(10, date'2010-10-10', date'2010-11-12');
+set debug_sync="now SIGNAL do_insert";
+--connection con1
+--reap
+--connection default
+select * from t1;
+commit;
+select * from t1;
+--disconnect con1
+--disconnect con2
+set debug_sync='RESET';
+drop table t1;
+
+--echo ## UPDATE IGNORE
+create table t1 (
+ id int, s date, e date,
+ period for p(s,e),
+ unique(id, p without overlaps)
+) engine=innodb;
+
+start transaction;
+select * from t1;
+--connect con1,localhost,root
+insert t1 values(10, date'2010-10-10', date'2010-11-11');
+delete from t1;
+insert t1 values(10, date'2010-09-09', date'2010-09-10');
+insert t1 values(10, date'2010-12-10', date'2010-12-12');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+--send update ignore t1 set e=e + interval 2 month where s=date'2010-09-09'
+--connect con2,localhost,root
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set s=date'2010-10-10' where e=date'2010-12-12';
+set debug_sync="now SIGNAL do_insert";
+--connection con1
+--error ER_NOT_SUPPORTED_YET
+--reap
+--connection default
+select * from t1;
+commit;
+select * from t1;
+--disconnect con1
+--disconnect con2
+set debug_sync='RESET';
+drop table t1;
+
+--echo ## UPDATE modifying PK
+create table t1 (
+ id int, s date, e date,
+ period for p(s,e),
+ primary key (id, p without overlaps)
+) engine=innodb;
+
+start transaction;
+select * from t1;
+--connect con1,localhost,root
+insert t1 values(10, date'2010-10-10', date'2010-11-11');
+delete from t1;
+insert t1 values(10, date'2010-09-09', date'2010-09-10');
+insert t1 values(10, date'2010-12-10', date'2010-12-12');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+--send update t1 set e=e + interval 2 month where s=date'2010-09-09'
+--connect con2,localhost,root
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set s=date'2010-10-10' where e=date'2010-12-12';
+set debug_sync="now SIGNAL do_insert";
+--connection con1
+--error ER_DUP_ENTRY
+--reap
+--connection default
+select * from t1;
+commit;
+select * from t1;
+--disconnect con1
+--disconnect con2
+set debug_sync='RESET';
+drop table t1;
+
+--echo ## UPDATE IGNORE modifying PK
+create table t1 (
+ id int, s date, e date,
+ period for p(s,e),
+ primary key (id, p without overlaps)
+) engine=innodb;
+
+start transaction;
+select * from t1;
+--connect con1,localhost,root
+insert t1 values(10, date'2010-10-10', date'2010-11-11');
+delete from t1;
+insert t1 values(10, date'2010-09-09', date'2010-09-10');
+insert t1 values(10, date'2010-12-10', date'2010-12-12');
+set transaction isolation level read committed;
+set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
+--send update ignore t1 set e=e + interval 2 month where s=date'2010-09-09'
+--connect con2,localhost,root
+set debug_sync="now WAIT_FOR checked_duplicate";
+set transaction isolation level read committed;
+update t1 set s=date'2010-10-10' where e=date'2010-12-12';
+set debug_sync="now SIGNAL do_insert";
+--connection con1
+--error ER_NOT_SUPPORTED_YET
+--reap
+--connection default
+select * from t1;
+commit;
+select * from t1;
+--disconnect con1
+--disconnect con2
+set debug_sync='RESET';
+drop table t1;
+
+
+--echo # End of 10.6 tests
+
diff --git a/mysql-test/suite/period/t/long_unique.test b/mysql-test/suite/period/t/long_unique.test
index c2dcd3f6c3f..bca2f15ebae 100644
--- a/mysql-test/suite/period/t/long_unique.test
+++ b/mysql-test/suite/period/t/long_unique.test
@@ -1,3 +1,4 @@
+--source include/have_innodb.inc
--source include/have_partition.inc
--echo #
@@ -21,3 +22,23 @@ INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01');
DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01';
DROP TABLE t1;
+--echo # End of 10.5 tests
+
+--echo #
+--echo # MDEV-37312 ASAN errors or assertion failure upon attempt to UPDATE FOR PORTION violating long unique under READ COMMITTED
+--echo #
+create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb;
+insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01');
+set transaction isolation level read committed;
+--error ER_DUP_ENTRY
+update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1;
+drop table t1;
+
+create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb partition by hash (a);
+insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01');
+set transaction isolation level read committed;
+--error ER_DUP_ENTRY
+update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1;
+drop table t1;
+
+--echo # End of 10.6 tests
diff --git a/mysql-test/suite/rpl/r/rpl_conditional_comments.result b/mysql-test/suite/rpl/r/rpl_conditional_comments.result
index 036824d60aa..8bfccb9c6a8 100644
--- a/mysql-test/suite/rpl/r/rpl_conditional_comments.result
+++ b/mysql-test/suite/rpl/r/rpl_conditional_comments.result
@@ -88,5 +88,19 @@ c1
3
20
connection master;
+insert t1 values /*! (100);insert t1 values */ (200) //
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'insert t1 values */ (200)' at line 1
+select * from t1;
+c1
+62
+3
+20
+connection slave;
+select * from t1;
+c1
+62
+3
+20
+connection master;
DROP TABLE t1;
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result b/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result
index 18ad5d3d2cc..96e7aa43fbf 100644
--- a/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result
+++ b/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result
@@ -7,6 +7,8 @@ call mtr.add_suppression("Could not read packet");
call mtr.add_suppression("Could not write packet");
set @save_bgc_count= @@global.binlog_commit_wait_count;
set @save_bgc_usec= @@global.binlog_commit_wait_usec;
+set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point;
+set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave;
set @save_debug_dbug= @@global.debug_dbug;
set @@global.binlog_commit_wait_count=3;
set @@global.binlog_commit_wait_usec=10000000;
@@ -46,8 +48,6 @@ drop table t1, t2, t3;
# the binlogging to semi-sync, and starting the wait for ACK; and during
# this pause, semi-sync is manually switched off and on.
connection master;
-set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point;
-set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave;
set @@global.rpl_semi_sync_master_wait_point= AFTER_SYNC;
set @@global.rpl_semi_sync_master_wait_no_slave= 1;
set @@global.debug_dbug="+d,semisync_log_skip_trx_wait";
@@ -100,7 +100,66 @@ commit;
# Cleanup
connection master;
drop table tn;
+set @@global.debug_dbug=@save_debug_dbug;
+#
+# MDEV-36934
+# The server could indefinitely hang due to a memory leak which tried to
+# pthread signal on a destroyed condition variable. In effect, no
+# connections could commit transactions because there would be a thread
+# stuck on a never-returning call to pthread_cond_signal() while
+# holding Repl_semi_sync_master::LOCK_log.
+connection master;
+set @@global.rpl_semi_sync_master_wait_point= AFTER_COMMIT;
+set @@global.rpl_semi_sync_master_wait_no_slave= 0;
+# Ensure servers are in proper state
+connection master;
+connection slave;
+# Test case initial set-up
+connection master;
+create table t_36934 (a int) engine=innodb;
+include/save_master_gtid.inc
+connection slave;
+include/sync_with_master_gtid.inc
+# Pause the user transaction before inserting into Active_tranx
+connect user_con,localhost,root,,;
+SET debug_sync= 'semisync_at_write_tranx_in_binlog SIGNAL at_write_tranx_in_binlog WAIT_FOR resume_write_tranx_in_binlog';
+insert into t_36934 values (1);
+connection server_1;
+set debug_sync="now wait_for at_write_tranx_in_binlog";
+# Disconnect the slave (note that the binlog dump thread won't yet be
+# notified of a binlog update from the last transaction, so the slave
+# should neither receiver nor ACK the transaction).
+connection slave;
+include/stop_slave.inc
+# Waiting for master to realize the slave has disconnected..
+connection server_1;
+# ..done
+# Resuming transaction (it will exit commit_trx early without waiting)
+set debug_sync="now signal resume_write_tranx_in_binlog";
+connection user_con;
+disconnect user_con;
+# Force delete the user thread (FLUSH THREADS ensures the thread won't
+# stay in the thread cache)
+connection master;
+FLUSH THREADS;
+# BUG: Re-connect slave. MDEV-36934 reports that the master would hang
+# when the slave would re-connect and try to ACK the last transaction
+# who's thread has been deleted
+connection slave;
+include/start_slave.inc
+# Try to commit another transaction (prior to MDEV-36934 fixes, this
+# would hang indefinitely)
+connection master;
+set debug_sync="RESET";
+insert into t_36934 values (2);
+connection server_1;
+# Waiting 30s for last query to complete..
+connection master;
+# ..done
+# Cleanup
+connection master;
set @@global.rpl_semi_sync_master_wait_point= @old_master_wait_point;
set @@global.rpl_semi_sync_master_wait_no_slave= @old_master_wait_no_slave;
set @@global.debug_dbug=@save_debug_dbug;
+drop table t_36934;
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_conditional_comments.test b/mysql-test/suite/rpl/t/rpl_conditional_comments.test
index 6e4ec8745f4..343ea0d3d13 100644
--- a/mysql-test/suite/rpl/t/rpl_conditional_comments.test
+++ b/mysql-test/suite/rpl/t/rpl_conditional_comments.test
@@ -80,5 +80,17 @@ sync_slave_with_master;
select * from t1;
connection master;
+#
+# Bug#37117875 Binlog record error when delimiter is set to other symbols
+#
+delimiter //;
+--error ER_PARSE_ERROR
+insert t1 values /*! (100);insert t1 values */ (200) //
+delimiter ;//
+select * from t1;
+sync_slave_with_master;
+select * from t1;
+connection master;
+
DROP TABLE t1;
--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test b/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test
index 537a4bf7fa8..5d6702f8c42 100644
--- a/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test
+++ b/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test
@@ -34,6 +34,8 @@ call mtr.add_suppression("Could not read packet");
call mtr.add_suppression("Could not write packet");
set @save_bgc_count= @@global.binlog_commit_wait_count;
set @save_bgc_usec= @@global.binlog_commit_wait_usec;
+set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point;
+set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave;
set @save_debug_dbug= @@global.debug_dbug;
set @@global.binlog_commit_wait_count=3;
set @@global.binlog_commit_wait_usec=10000000;
@@ -98,8 +100,6 @@ drop table t1, t2, t3;
--connection master
-set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point;
-set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave;
set @@global.rpl_semi_sync_master_wait_point= AFTER_SYNC;
set @@global.rpl_semi_sync_master_wait_no_slave= 1;
--eval set @@global.debug_dbug="+d,semisync_log_skip_trx_wait"
@@ -190,8 +190,108 @@ commit;
--echo # Cleanup
--connection master
drop table tn;
+set @@global.debug_dbug=@save_debug_dbug;
+
+
+--echo #
+--echo # MDEV-36934
+--echo # The server could indefinitely hang due to a memory leak which tried to
+--echo # pthread signal on a destroyed condition variable. In effect, no
+--echo # connections could commit transactions because there would be a thread
+--echo # stuck on a never-returning call to pthread_cond_signal() while
+--echo # holding Repl_semi_sync_master::LOCK_log.
+
+--connection master
+set @@global.rpl_semi_sync_master_wait_point= AFTER_COMMIT;
+set @@global.rpl_semi_sync_master_wait_no_slave= 0;
+
+--echo # Ensure servers are in proper state
+--connection master
+let $status_var= rpl_semi_sync_master_status;
+let $status_var_value= ON;
+source include/wait_for_status_var.inc;
+--connection slave
+let $status_var= rpl_semi_sync_slave_status;
+let $status_var_value= ON;
+source include/wait_for_status_var.inc;
+
+--echo # Test case initial set-up
+--connection master
+create table t_36934 (a int) engine=innodb;
+--source include/save_master_gtid.inc
+--connection slave
+--source include/sync_with_master_gtid.inc
+
+--echo # Pause the user transaction before inserting into Active_tranx
+--connect(user_con,localhost,root,,)
+SET debug_sync= 'semisync_at_write_tranx_in_binlog SIGNAL at_write_tranx_in_binlog WAIT_FOR resume_write_tranx_in_binlog';
+--send insert into t_36934 values (1)
+
+--connection server_1
+set debug_sync="now wait_for at_write_tranx_in_binlog";
+
+--echo # Disconnect the slave (note that the binlog dump thread won't yet be
+--echo # notified of a binlog update from the last transaction, so the slave
+--echo # should neither receiver nor ACK the transaction).
+--connection slave
+--source include/stop_slave.inc
+
+--echo # Waiting for master to realize the slave has disconnected..
+--connection server_1
+let $status_var= rpl_semi_sync_master_clients;
+let $status_var_value= 0;
+source include/wait_for_status_var.inc;
+--echo # ..done
+
+--echo # Resuming transaction (it will exit commit_trx early without waiting)
+set debug_sync="now signal resume_write_tranx_in_binlog";
+
+--connection user_con
+--reap
+--let $user_con_tid= `SELECT connection_id()`
+--disconnect user_con
+--source include/wait_until_disconnected.inc
+
+--echo # Force delete the user thread (FLUSH THREADS ensures the thread won't
+--echo # stay in the thread cache)
+--connection master
+FLUSH THREADS;
+
+--echo # BUG: Re-connect slave. MDEV-36934 reports that the master would hang
+--echo # when the slave would re-connect and try to ACK the last transaction
+--echo # who's thread has been deleted
+--connection slave
+--source include/start_slave.inc
+
+--echo # Try to commit another transaction (prior to MDEV-36934 fixes, this
+--echo # would hang indefinitely)
+--connection master
+set debug_sync="RESET";
+--send insert into t_36934 values (2)
+
+--connection server_1
+--echo # Waiting 30s for last query to complete..
+--let $wait_timeout= 30
+--let $wait_condition= SELECT count(*)=0 FROM information_schema.processlist WHERE info LIKE 'insert into t_36934%';
+--source include/wait_condition.inc
+
+# Variable `success` is set by wait_condition.inc
+if (!$success)
+{
+ --echo # ..error
+ --die Query is hung
+}
+
+--connection master
+--reap
+--echo # ..done
+
+
+--echo # Cleanup
+--connection master
set @@global.rpl_semi_sync_master_wait_point= @old_master_wait_point;
set @@global.rpl_semi_sync_master_wait_no_slave= @old_master_wait_no_slave;
set @@global.debug_dbug=@save_debug_dbug;
+drop table t_36934;
--source include/rpl_end.inc
diff --git a/mysql-test/suite/sql_sequence/default.result b/mysql-test/suite/sql_sequence/default.result
index eecef1d3527..fe9c6af1795 100644
--- a/mysql-test/suite/sql_sequence/default.result
+++ b/mysql-test/suite/sql_sequence/default.result
@@ -292,6 +292,25 @@ a b
10 j
DROP TABLE t1;
DROP SEQUENCE s1;
-#
# End of 10.3 tests
+# in UPDATE
+create sequence s1 cache 0;
+create table t1 (id int unsigned default nextval(s1));
+insert t1 values ();
+update t1 set id=default;
+prepare stmt from "update t1 set id=?";
+execute stmt using default;
+deallocate prepare stmt;
+drop table t1;
+drop sequence s1;
#
+# MDEV-37302 Assertion failure in Table_triggers_list::add_tables_and_routines_for_triggers upon attempt to insert DEFAULT into non-insertable view
+#
+create table t1 (f int);
+create algorithm=temptable view v1 as select * from t1;
+create trigger tr before update on t1 for each row set @a=1;
+insert v1 values (default);
+ERROR HY000: The target table v1 of the INSERT is not insertable-into
+drop view v1;
+drop table t1;
+# End of 10.6 tests
diff --git a/mysql-test/suite/sql_sequence/default.test b/mysql-test/suite/sql_sequence/default.test
index f965089d992..5cbfe237cd3 100644
--- a/mysql-test/suite/sql_sequence/default.test
+++ b/mysql-test/suite/sql_sequence/default.test
@@ -216,6 +216,28 @@ SELECT a, b FROM t1;
DROP TABLE t1;
DROP SEQUENCE s1;
---echo #
--echo # End of 10.3 tests
+
+--echo # in UPDATE
+create sequence s1 cache 0;
+create table t1 (id int unsigned default nextval(s1));
+insert t1 values ();
+update t1 set id=default;
+prepare stmt from "update t1 set id=?";
+execute stmt using default;
+deallocate prepare stmt;
+drop table t1;
+drop sequence s1;
+
--echo #
+--echo # MDEV-37302 Assertion failure in Table_triggers_list::add_tables_and_routines_for_triggers upon attempt to insert DEFAULT into non-insertable view
+--echo #
+create table t1 (f int);
+create algorithm=temptable view v1 as select * from t1;
+create trigger tr before update on t1 for each row set @a=1;
+--error ER_NON_INSERTABLE_TABLE
+insert v1 values (default);
+drop view v1;
+drop table t1;
+
+--echo # End of 10.6 tests
diff --git a/mysql-test/suite/sql_sequence/grant.result b/mysql-test/suite/sql_sequence/grant.result
index fc3421efcb6..0ea9f9d0667 100644
--- a/mysql-test/suite/sql_sequence/grant.result
+++ b/mysql-test/suite/sql_sequence/grant.result
@@ -97,7 +97,92 @@ ERROR 42000: SELECT, INSERT command denied to user 'u'@'localhost' for table `my
disconnect con1;
connection default;
drop user u;
+create user u_alter;
+create table t1 (id int);
+grant alter on t1 to u_alter;
+connect con_alter,localhost,u_alter,,mysqltest_1;
+alter table t1 modify id int default nextval(s1);
+ERROR 42000: SELECT, INSERT command denied to user 'u_alter'@'localhost' for table `mysqltest_1`.`s1`
+connection default;
+grant insert, select on s1 to u_alter;
+connection con_alter;
+alter table t1 modify id int default nextval(s1);
+disconnect con_alter;
+connection default;
+drop user u_alter;
drop database mysqltest_1;
#
-# End of 10.11 tests
+# MDEV-36870 Spurious unrelated permission error when selecting from table with default that uses nextval(sequence)
#
+create database db1;
+use db1;
+create sequence s1 cache 0;
+create table t1 (id int unsigned default (10+nextval(s1)));
+insert t1 values ();
+create table t2 (id int unsigned default nextval(s1), b int default(default(id)));
+insert t2 values ();
+create function f1(x int) returns int sql security invoker
+begin
+select id+x into x from t1;
+return x;
+insert t1 values ();
+end|
+create user u1@localhost;
+grant select on db1.* to u1@localhost;
+grant execute on db1.* to u1@localhost;
+grant all privileges on test.* to u1@localhost;
+use test;
+create table t3 (id int unsigned default (20+nextval(db1.s1)), b int);
+insert t3 values ();
+create sequence s2 cache 0;
+create table t4 (id int unsigned default (10+nextval(s2)), b int);
+insert t4 values ();
+connect u1,localhost,u1,,db1;
+select * from t1;
+id
+11
+connection default;
+flush tables;
+connection u1;
+select * from t1;
+id
+11
+select default(id) from t1;
+ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1`
+select * from t2;
+id b
+2 3
+select f1(100);
+f1(100)
+111
+select column_name, data_type, column_default from information_schema.columns where table_schema='db1' and table_name='t1';
+column_name data_type column_default
+id int (10 + nextval(`db1`.`s1`))
+use test;
+insert t3 values ();
+ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1`
+insert t4 values ();
+insert t3 (b) select 5;
+ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1`
+insert t4 (b) select 5;
+update t3 set id=default;
+ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1`
+update t4 set id=default;
+prepare stmt from "update t3 set id=?";
+execute stmt using default;
+ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1`
+prepare stmt from "update t4 set id=?";
+execute stmt using default;
+deallocate prepare stmt;
+insert t4 (b) values ((select * from db1.t1));
+insert t4 (b) values ((select default(id) from db1.t1));
+ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1`
+connection default;
+disconnect u1;
+select nextval(db1.s1) as 'must be 5';
+must be 5
+5
+drop user u1@localhost;
+drop database db1;
+drop table t3, t4, s2;
+# End of 10.6 tests
diff --git a/mysql-test/suite/sql_sequence/grant.test b/mysql-test/suite/sql_sequence/grant.test
index c205bd34223..dfde1275246 100644
--- a/mysql-test/suite/sql_sequence/grant.test
+++ b/mysql-test/suite/sql_sequence/grant.test
@@ -106,12 +106,121 @@ create table t1 (a int not null default(nextval(s1)),
--connection default
drop user u;
-#
-# Cleanup
-#
-
+# ALTER for table with DEFAULT NEXTVAL(seq) column needs INSERT/SELECT on seq
+# just like CREATE does in the example above
+create user u_alter;
+create table t1 (id int);
+grant alter on t1 to u_alter;
+--connect(con_alter,localhost,u_alter,,mysqltest_1)
+--error ER_TABLEACCESS_DENIED_ERROR
+alter table t1 modify id int default nextval(s1);
+--connection default
+grant insert, select on s1 to u_alter;
+--connection con_alter
+alter table t1 modify id int default nextval(s1);
+--disconnect con_alter
+--connection default
+drop user u_alter;
drop database mysqltest_1;
--echo #
---echo # End of 10.11 tests
+--echo # MDEV-36870 Spurious unrelated permission error when selecting from table with default that uses nextval(sequence)
--echo #
+
+# various tests for permission checking on sequences
+create database db1;
+use db1;
+create sequence s1 cache 0;
+create table t1 (id int unsigned default (10+nextval(s1)));
+insert t1 values ();
+
+create table t2 (id int unsigned default nextval(s1), b int default(default(id)));
+insert t2 values ();
+
+# INSERT affects prelocking, but is never actually executed
+delimiter |;
+create function f1(x int) returns int sql security invoker
+begin
+ select id+x into x from t1;
+ return x;
+ insert t1 values ();
+end|
+delimiter ;|
+
+create user u1@localhost;
+grant select on db1.* to u1@localhost;
+grant execute on db1.* to u1@localhost;
+grant all privileges on test.* to u1@localhost;
+
+use test;
+create table t3 (id int unsigned default (20+nextval(db1.s1)), b int);
+insert t3 values ();
+
+create sequence s2 cache 0;
+create table t4 (id int unsigned default (10+nextval(s2)), b int);
+insert t4 values ();
+
+connect u1,localhost,u1,,db1;
+
+# table already in the cache. must be re-fixed
+# SELECT * - no error
+select * from t1;
+
+# not in cache
+connection default;
+flush tables;
+connection u1;
+# SELECT * - no error
+select * from t1;
+
+# SELECT DEFAULT() - error
+--error ER_TABLEACCESS_DENIED_ERROR
+select default(id) from t1;
+
+# default(default(nextval))
+select * from t2;
+
+# SELECT but table has TL_WRITE because of prelocking
+select f1(100);
+
+# opening the table for I_S
+select column_name, data_type, column_default from information_schema.columns where table_schema='db1' and table_name='t1';
+
+use test;
+# insert
+--error ER_TABLEACCESS_DENIED_ERROR
+insert t3 values ();
+insert t4 values ();
+#insert select
+--error ER_TABLEACCESS_DENIED_ERROR
+insert t3 (b) select 5;
+insert t4 (b) select 5;
+#update
+--error ER_TABLEACCESS_DENIED_ERROR
+update t3 set id=default;
+update t4 set id=default;
+
+# PS UPDATE with ? = DEFAULT
+prepare stmt from "update t3 set id=?";
+--error ER_TABLEACCESS_DENIED_ERROR
+execute stmt using default;
+prepare stmt from "update t4 set id=?";
+execute stmt using default;
+deallocate prepare stmt;
+
+# SELECT * in a subquery, like INSERT t3 VALUES ((SELECT * FROM t1));
+# with sequences both on t3 and t1
+insert t4 (b) values ((select * from db1.t1));
+--error ER_TABLEACCESS_DENIED_ERROR
+insert t4 (b) values ((select default(id) from db1.t1));
+
+connection default;
+disconnect u1;
+--disable_ps2_protocol
+select nextval(db1.s1) as 'must be 5';
+--enable_ps2_protocol
+drop user u1@localhost;
+drop database db1;
+drop table t3, t4, s2;
+
+--echo # End of 10.6 tests
diff --git a/mysql-test/suite/wsrep/r/MDEV-20625.result b/mysql-test/suite/wsrep/r/MDEV-20625.result
index 3e2b621c8f9..d5e9df07374 100644
--- a/mysql-test/suite/wsrep/r/MDEV-20625.result
+++ b/mysql-test/suite/wsrep/r/MDEV-20625.result
@@ -1,4 +1,5 @@
SET GLOBAL wsrep_on=ON;
+ERROR HY000: Galera replication not supported
SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size';
Variable_name Value
wsrep_cluster_size 0
diff --git a/mysql-test/suite/wsrep/r/variables_debug.result b/mysql-test/suite/wsrep/r/variables_debug.result
index 2e7e92c4395..64ce515ae47 100644
--- a/mysql-test/suite/wsrep/r/variables_debug.result
+++ b/mysql-test/suite/wsrep/r/variables_debug.result
@@ -140,7 +140,11 @@ WSREP_SST_DONOR_REJECTS_QUERIES
WSREP_SST_METHOD
WSREP_SST_RECEIVE_ADDRESS
WSREP_START_POSITION
+<<<<<<< HEAD
WSREP_STATUS_FILE
+=======
+WSREP_STRICT_DDL
+>>>>>>> bb-10.6-serg
WSREP_SYNC_WAIT
WSREP_TRX_FRAGMENT_SIZE
WSREP_TRX_FRAGMENT_UNIT
diff --git a/mysql-test/suite/wsrep/r/wsrep_off.result b/mysql-test/suite/wsrep/r/wsrep_off.result
new file mode 100644
index 00000000000..95cd804e35c
--- /dev/null
+++ b/mysql-test/suite/wsrep/r/wsrep_off.result
@@ -0,0 +1,6 @@
+SET GLOBAL wsrep_on=ON;
+ERROR HY000: Galera replication not supported
+REPAIR TABLE performance_schema.setup_objects;
+Table Op Msg_type Msg_text
+performance_schema.setup_objects repair note The storage engine for the table doesn't support repair
+SET GLOBAL wsrep_on=OFF;
diff --git a/mysql-test/suite/wsrep/t/MDEV-20625.test b/mysql-test/suite/wsrep/t/MDEV-20625.test
index 2a537fe432e..7dcb622fde0 100644
--- a/mysql-test/suite/wsrep/t/MDEV-20625.test
+++ b/mysql-test/suite/wsrep/t/MDEV-20625.test
@@ -5,6 +5,7 @@
--source include/have_wsrep_provider.inc
--source include/have_binlog_format_row.inc
+--error ER_GALERA_REPLICATION_NOT_SUPPORTED
SET GLOBAL wsrep_on=ON;
SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size';
SET GLOBAL wsrep_on=OFF;
diff --git a/mysql-test/suite/wsrep/t/variables.test b/mysql-test/suite/wsrep/t/variables.test
index 762d783a09c..c82d0ae02c2 100644
--- a/mysql-test/suite/wsrep/t/variables.test
+++ b/mysql-test/suite/wsrep/t/variables.test
@@ -3,7 +3,7 @@
--source include/have_innodb.inc
--source include/galera_no_debug_sync.inc
---let $galera_version=26.4.21
+--let $galera_version=26.4.23
source include/check_galera_version.inc;
source include/galera_variables_ok.inc;
diff --git a/mysql-test/suite/wsrep/t/variables_debug.test b/mysql-test/suite/wsrep/t/variables_debug.test
index e55dbd4fa1f..e50cee28a15 100644
--- a/mysql-test/suite/wsrep/t/variables_debug.test
+++ b/mysql-test/suite/wsrep/t/variables_debug.test
@@ -5,7 +5,7 @@
--source include/have_debug_sync.inc
--source include/galera_have_debug_sync.inc
---let $galera_version=26.4.21
+--let $galera_version=26.4.23
source include/check_galera_version.inc;
source include/galera_variables_ok_debug.inc;
diff --git a/mysql-test/suite/wsrep/t/wsrep_off.cnf b/mysql-test/suite/wsrep/t/wsrep_off.cnf
new file mode 100644
index 00000000000..77eae0c4acd
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_off.cnf
@@ -0,0 +1,17 @@
+# Use default setting for mysqld processes
+!include include/default_mysqld.cnf
+
+[mysqld]
+wsrep-on=OFF
+wsrep-provider=@ENV.WSREP_PROVIDER
+log-bin
+binlog-format=row
+loose-wsrep_cluster_address=gcomm://
+loose-wsrep_node_address='127.0.0.1:@mysqld.1.#galera_port'
+loose-wsrep-incoming-address=127.0.0.1:@mysqld.1.port
+
+[mysqld.1]
+wsrep-on=OFF
+#galera_port=@OPT.port
+#ist_port=@OPT.port
+#sst_port=@OPT.port
diff --git a/mysql-test/suite/wsrep/t/wsrep_off.test b/mysql-test/suite/wsrep/t/wsrep_off.test
new file mode 100644
index 00000000000..27e64c92e93
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_off.test
@@ -0,0 +1,8 @@
+--source include/have_innodb.inc
+--source include/have_wsrep_provider.inc
+--source include/have_binlog_format_row.inc
+
+--error ER_GALERA_REPLICATION_NOT_SUPPORTED
+SET GLOBAL wsrep_on=ON;
+REPAIR TABLE performance_schema.setup_objects;
+SET GLOBAL wsrep_on=OFF;
diff --git a/mysys/crc32/crc32c_x86.cc b/mysys/crc32/crc32c_x86.cc
index fb5dc19f7a5..a66093e54cc 100644
--- a/mysys/crc32/crc32c_x86.cc
+++ b/mysys/crc32/crc32c_x86.cc
@@ -25,6 +25,9 @@
#else
# include
# ifdef __APPLE__ /* AVX512 states are not enabled in XCR0 */
+# elif __GNUC__ >= 15
+# define TARGET "pclmul,avx10.1,vpclmulqdq"
+# define USE_VPCLMULQDQ __attribute__((target(TARGET)))
# elif __GNUC__ >= 14 || (defined __clang_major__ && __clang_major__ >= 18)
# define TARGET "pclmul,evex512,avx512f,avx512dq,avx512bw,avx512vl,vpclmulqdq"
# define USE_VPCLMULQDQ __attribute__((target(TARGET)))
diff --git a/mysys/mf_qsort.c b/mysys/mf_qsort.c
index 4dee20750c0..fbd75451d9f 100644
--- a/mysys/mf_qsort.c
+++ b/mysys/mf_qsort.c
@@ -38,7 +38,7 @@ do { \
if (swap_ptrs) \
{ \
reg1 char **a = (char**) (A), **b = (char**) (B); \
- char *tmp = *a; *a++ = *b; *b++ = tmp; \
+ char *tmp = *a; *a = *b; *b = tmp; \
} \
else \
{ \
@@ -190,16 +190,16 @@ qsort_t my_qsort(void *base_ptr, size_t count, size_t size, qsort_cmp cmp)
This ensures that the stack is keept small.
*/
- if ((int) (high_ptr - low) <= 0)
+ if ((longlong) (high_ptr - low) <= 0)
{
- if ((int) (high - low_ptr) <= 0)
+ if ((longlong) (high - low_ptr) <= 0)
{
POP(low, high); /* Nothing more to sort */
}
else
low = low_ptr; /* Ignore small left part. */
}
- else if ((int) (high - low_ptr) <= 0)
+ else if ((longlong) (high - low_ptr) <= 0)
high = high_ptr; /* Ignore small right part. */
else if ((high_ptr - low) > (high - low_ptr))
{
diff --git a/sql/field.cc b/sql/field.cc
index d7d85d4c302..d6412417bfe 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -8426,6 +8426,59 @@ Field *Field_varstring::make_new_field(MEM_ROOT *root, TABLE *new_table,
}
+Field *Field_varstring_compressed::make_new_field(MEM_ROOT *root, TABLE *new_table,
+ bool keep_type)
+{
+ Field_varstring *res;
+ if (new_table->s->is_optimizer_tmp_table())
+ {
+ /*
+ Compressed field cannot be part of a key. For optimizer temporary
+ table we create uncompressed substitute.
+ */
+ res= new (root) Field_varstring(ptr, field_length, length_bytes, null_ptr,
+ null_bit, Field::NONE, &field_name,
+ new_table->s, charset());
+ if (res)
+ {
+ res->init_for_make_new_field(new_table, orig_table);
+ /* See Column_definition::create_length_to_internal_length_string() */
+ res->field_length--;
+ }
+ }
+ else
+ res= (Field_varstring*) Field::make_new_field(root, new_table, keep_type);
+ if (res)
+ res->length_bytes= length_bytes;
+ return res;
+}
+
+Field *Field_blob_compressed::make_new_field(MEM_ROOT *root, TABLE *new_table,
+ bool keep_type)
+{
+ Field_blob *res;
+ if (new_table->s->is_optimizer_tmp_table())
+ {
+ /*
+ Compressed field cannot be part of a key. For optimizer temporary
+ table we create uncompressed substitute.
+ */
+ res= new (root) Field_blob(ptr, null_ptr, null_bit, Field::NONE, &field_name,
+ new_table->s, packlength, charset());
+ if (res)
+ {
+ res->init_for_make_new_field(new_table, orig_table);
+ /* See Column_definition::create_length_to_internal_length_string() */
+ res->field_length--;
+ }
+ }
+ else
+ res= (Field_blob *) Field::make_new_field(root, new_table, keep_type);
+ return res;
+}
+
+
+
Field *Field_varstring::new_key_field(MEM_ROOT *root, TABLE *new_table,
uchar *new_ptr, uint32 length,
uchar *new_null_ptr, uint new_null_bit)
diff --git a/sql/field.h b/sql/field.h
index d651179f988..31d08e2e544 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -658,6 +658,7 @@ public:
bool fix_session_expr(THD *thd);
bool cleanup_session_expr();
bool fix_and_check_expr(THD *thd, TABLE *table);
+ bool check_access(THD *thd);
inline bool is_equal(const Virtual_column_info* vcol) const;
/* Same as is_equal() but for comparing with different table */
bool is_equivalent(THD *thd, TABLE_SHARE *share, TABLE_SHARE *vcol_share,
@@ -1542,7 +1543,14 @@ public:
{
ptr=ADD_TO_PTR(ptr,ptr_diff, uchar*);
if (null_ptr)
+ {
null_ptr=ADD_TO_PTR(null_ptr,ptr_diff,uchar*);
+ if (table)
+ {
+ DBUG_ASSERT(null_ptr < ptr);
+ DBUG_ASSERT(ptr - null_ptr <= (int)table->s->rec_buff_length);
+ }
+ }
}
/*
@@ -4327,6 +4335,7 @@ private:
{ DBUG_ASSERT(0); return 0; }
using Field_varstring::key_cmp;
Binlog_type_info binlog_type_info() const override;
+ Field *make_new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type) override;
};
@@ -4768,6 +4777,7 @@ private:
override
{ DBUG_ASSERT(0); return 0; }
Binlog_type_info binlog_type_info() const override;
+ Field *make_new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type) override;
};
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 70f4b03868e..0c5d147d493 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -409,7 +409,6 @@ void ha_partition::init_handler_variables()
m_top_entry= NO_CURRENT_PART_ID;
m_rec_length= 0;
m_last_part= 0;
- m_rec0= 0;
m_err_rec= NULL;
m_curr_key_info[0]= NULL;
m_curr_key_info[1]= NULL;
@@ -2209,7 +2208,7 @@ int ha_partition::copy_partitions(ulonglong * const copied,
goto init_error;
while (TRUE)
{
- if ((result= file->ha_rnd_next(m_rec0)))
+ if ((result= file->ha_rnd_next(table->record[0])))
{
if (result != HA_ERR_END_OF_FILE)
goto error;
@@ -2235,7 +2234,7 @@ int ha_partition::copy_partitions(ulonglong * const copied,
/* Copy record to new handler */
(*copied)++;
DBUG_ASSERT(!m_new_file[new_part]->row_logging);
- result= m_new_file[new_part]->ha_write_row(m_rec0);
+ result= m_new_file[new_part]->ha_write_row(table->record[0]);
if (result)
goto error;
}
@@ -3818,7 +3817,6 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
DBUG_RETURN(HA_ERR_INITIALIZATION);
}
m_start_key.length= 0;
- m_rec0= table->record[0];
m_rec_length= table_share->reclength;
if (!m_part_ids_sorted_by_num_of_records)
{
@@ -4731,15 +4729,15 @@ int ha_partition::update_row(const uchar *old_data, const uchar *new_data)
*/
{
Abort_on_warning_instant_set old_abort_on_warning(thd, 0);
- error= get_part_for_buf(old_data, m_rec0, m_part_info, &old_part_id);
+ error= get_part_for_buf(old_data, table->record[0], m_part_info, &old_part_id);
}
DBUG_ASSERT(!error);
DBUG_ASSERT(old_part_id == m_last_part);
DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), old_part_id));
#endif
- if (unlikely((error= get_part_for_buf(new_data, m_rec0, m_part_info,
- &new_part_id))))
+ if (unlikely((error= get_part_for_buf(new_data, table->record[0],
+ m_part_info, &new_part_id))))
goto exit;
if (unlikely(!bitmap_is_set(&(m_part_info->lock_partitions), new_part_id)))
{
@@ -5567,7 +5565,7 @@ int ha_partition::rnd_pos_by_record(uchar *record)
{
DBUG_ENTER("ha_partition::rnd_pos_by_record");
- if (unlikely(get_part_for_buf(record, m_rec0, m_part_info, &m_last_part)))
+ if (unlikely(get_part_for_buf(record, table->record[0], m_part_info, &m_last_part)))
DBUG_RETURN(1);
int err= m_file[m_last_part]->rnd_pos_by_record(record);
@@ -6351,7 +6349,7 @@ int ha_partition::read_range_first(const key_range *start_key,
m_start_key.key= NULL;
m_index_scan_type= partition_read_range;
- error= common_index_read(m_rec0, MY_TEST(start_key));
+ error= common_index_read(table->record[0], MY_TEST(start_key));
DBUG_RETURN(error);
}
@@ -10351,7 +10349,7 @@ void ha_partition::print_error(int error, myf errflag)
str.append('(');
str.append_ulonglong(m_last_part);
str.append(STRING_WITH_LEN(" != "));
- if (get_part_for_buf(m_err_rec, m_rec0, m_part_info, &part_id))
+ if (get_part_for_buf(m_err_rec, table->record[0], m_part_info, &part_id))
str.append('?');
else
str.append_ulonglong(part_id);
@@ -11336,7 +11334,7 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair)
while (true)
{
- if ((result= m_file[read_part_id]->ha_rnd_next(m_rec0)))
+ if ((result= m_file[read_part_id]->ha_rnd_next(table->record[0])))
{
if (result != HA_ERR_END_OF_FILE)
break;
@@ -11382,7 +11380,7 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair)
Insert row into correct partition. Notice that there are no commit
for every N row, so the repair will be one large transaction!
*/
- if ((result= m_file[correct_part_id]->ha_write_row(m_rec0)))
+ if ((result= m_file[correct_part_id]->ha_write_row(table->record[0])))
{
/*
We have failed to insert a row, it might have been a duplicate!
@@ -11426,7 +11424,7 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair)
}
/* Delete row from wrong partition. */
- if ((result= m_file[read_part_id]->ha_delete_row(m_rec0)))
+ if ((result= m_file[read_part_id]->ha_delete_row(table->record[0])))
{
if (m_file[correct_part_id]->has_transactions_and_rollback())
break;
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index d926bd03137..3e6d0b31baf 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -322,7 +322,6 @@ private:
and if clustered pk, [0]= current index, [1]= pk, [2]= NULL
*/
KEY *m_curr_key_info[3]; // Current index
- uchar *m_rec0; // table->record[0]
const uchar *m_err_rec; // record which gave error
QUEUE m_queue; // Prio queue used by sorted read
diff --git a/sql/handler.cc b/sql/handler.cc
index fade28431fe..f770296f8df 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -503,6 +503,8 @@ int ha_init_errors(void)
SETMSG(HA_ERR_FTS_TOO_MANY_WORDS_IN_PHRASE, "Too many words in a FTS phrase or proximity search");
SETMSG(HA_ERR_FK_DEPTH_EXCEEDED, "Foreign key cascade delete/update exceeds");
SETMSG(HA_ERR_TABLESPACE_MISSING, ER_DEFAULT(ER_TABLESPACE_MISSING));
+ SETMSG(HA_ERR_INCOMPATIBLE_DEFINITION,
+ "Mismatch between table definitions in sql and storage layer");
/* Register the error messages for use with my_error(). */
return my_error_register(get_handler_errmsgs, HA_ERR_FIRST, HA_ERR_LAST);
@@ -3368,7 +3370,7 @@ int handler::create_lookup_handler()
if (!(tmp= clone(table->s->normalized_path.str, table->in_use->mem_root)))
return 1;
lookup_handler= tmp;
- return lookup_handler->ha_external_lock(table->in_use, F_RDLCK);
+ return lookup_handler->ha_external_lock(table->in_use, F_WRLCK);
}
LEX_CSTRING *handler::engine_name()
@@ -6115,7 +6117,8 @@ int ha_create_table(THD *thd, const char *path, const char *db,
name= get_canonical_filename(table.file, share.path.str, name_buff);
- error= table.file->ha_create(name, &table, create_info);
+ error= table.check_sequence_privileges(thd) ? 1 :
+ table.file->ha_create(name, &table, create_info);
if (unlikely(error))
{
@@ -7353,10 +7356,10 @@ int handler::ha_reset()
DBUG_RETURN(reset());
}
-#ifdef WITH_WSREP
static int wsrep_after_row(THD *thd)
{
DBUG_ENTER("wsrep_after_row");
+#ifdef WITH_WSREP
if (thd->internal_transaction())
DBUG_RETURN(0);
@@ -7380,9 +7383,32 @@ static int wsrep_after_row(THD *thd)
{
DBUG_RETURN(ER_LOCK_DEADLOCK);
}
+#endif /* WITH_WSREP */
DBUG_RETURN(0);
}
-#endif /* WITH_WSREP */
+
+
+static bool long_unique_fields_differ(KEY *keyinfo, const uchar *other)
+{
+ uint key_parts= fields_in_hash_keyinfo(keyinfo);
+ KEY_PART_INFO *keypart= keyinfo->key_part - key_parts;
+ my_ptrdiff_t off= other - keypart->field->table->record[0];
+ DBUG_ASSERT(off);
+ do
+ {
+ Field *field= keypart->field;
+ if (field->is_null() || field->is_null(off))
+ return true;
+ else if (f_is_blob(keypart->key_type) && keypart->length)
+ {
+ if (field->cmp_prefix(field->ptr, field->ptr + off, keypart->length))
+ return true;
+ }
+ else if (field->cmp_offset(off))
+ return true;
+ } while (keypart++ < keyinfo->key_part);
+ return false;
+}
/**
@@ -7391,91 +7417,70 @@ static int wsrep_after_row(THD *thd)
int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no)
{
- int result, error= 0;
+ int result;
+ /* Skip just written row in the case of HA_CHECK_UNIQUE_AFTER_WRITE */
+ bool skip_self= ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE;
KEY *key_info= table->key_info + key_no;
- Field *hash_field= key_info->key_part->field;
uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL];
- String *blob_storage;
DBUG_ENTER("handler::check_duplicate_long_entry_key");
DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY &&
key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL) ||
key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL);
- if (hash_field->is_real_null())
+ if (key_info->key_part->field->is_real_null())
DBUG_RETURN(0);
+ if (skip_self)
+ position(table->record[0]);
+
key_copy(ptr, new_rec, key_info, key_info->key_length, false);
result= lookup_handler->ha_index_init(key_no, 0);
if (result)
DBUG_RETURN(result);
- blob_storage= (String*)alloca(sizeof(String)*table->s->virtual_not_stored_blob_fields);
+ auto blob_storage= (String*)alloca(sizeof(String)*table->s->virtual_not_stored_blob_fields);
table->remember_blob_values(blob_storage);
store_record(table, file->lookup_buffer);
result= lookup_handler->ha_index_read_map(table->record[0], ptr,
HA_WHOLE_KEY, HA_READ_KEY_EXACT);
- if (!result)
+ if (result)
+ goto end;
+
+ // restore pointers after swap_values in TABLE::update_virtual_fields()
+ for (Field **vf= table->vfield; *vf; vf++)
{
- bool is_same;
- Field * t_field;
- Item_func_hash * temp= (Item_func_hash *)hash_field->vcol_info->expr;
- Item ** arguments= temp->arguments();
- uint arg_count= temp->argument_count();
- // restore pointers after swap_values in TABLE::update_virtual_fields()
- for (Field **vf= table->vfield; *vf; vf++)
+ if (!(*vf)->stored_in_db() && (*vf)->flags & BLOB_FLAG &&
+ bitmap_is_set(table->read_set, (*vf)->field_index))
+ ((Field_blob*)*vf)->swap_value_and_read_value();
+ }
+ do
+ {
+ if (!long_unique_fields_differ(key_info, lookup_buffer))
{
- if (!(*vf)->stored_in_db() && (*vf)->flags & BLOB_FLAG &&
- bitmap_is_set(table->read_set, (*vf)->field_index))
- ((Field_blob*)*vf)->swap_value_and_read_value();
- }
- do
- {
- my_ptrdiff_t diff= table->file->lookup_buffer - new_rec;
- is_same= true;
- for (uint j=0; is_same && j < arg_count; j++)
+ lookup_handler->position(table->record[0]);
+ if (skip_self && !memcmp(ref, lookup_handler->ref, ref_length))
{
- DBUG_ASSERT(arguments[j]->type() == Item::FIELD_ITEM ||
- // this one for left(fld_name,length)
- arguments[j]->type() == Item::FUNC_ITEM);
- if (arguments[j]->type() == Item::FIELD_ITEM)
- {
- t_field= static_cast(arguments[j])->field;
- if (t_field->cmp_offset(diff))
- is_same= false;
- }
- else
- {
- Item_func_left *fnc= static_cast(arguments[j]);
- DBUG_ASSERT(!my_strcasecmp(system_charset_info, "left", fnc->func_name()));
- DBUG_ASSERT(fnc->arguments()[0]->type() == Item::FIELD_ITEM);
- t_field= static_cast(fnc->arguments()[0])->field;
- uint length= (uint)fnc->arguments()[1]->val_int();
- if (t_field->cmp_prefix(t_field->ptr, t_field->ptr + diff, length))
- is_same= false;
- }
+ skip_self= false; // cannot happen twice, so let's save a memcpy
+ continue;
}
+ result= HA_ERR_FOUND_DUPP_KEY;
+ table->file->lookup_errkey= key_no;
+ memcpy(table->file->dup_ref, lookup_handler->ref, ref_length);
+ goto end;
}
- while (!is_same &&
- !(result= lookup_handler->ha_index_next_same(table->record[0],
- ptr, key_info->key_length)));
- if (is_same)
- error= HA_ERR_FOUND_DUPP_KEY;
- goto exit;
- }
- if (result != HA_ERR_KEY_NOT_FOUND)
- error= result;
-exit:
- if (error == HA_ERR_FOUND_DUPP_KEY)
- {
- table->file->lookup_errkey= key_no;
- lookup_handler->position(table->record[0]);
- memcpy(table->file->dup_ref, lookup_handler->ref, ref_length);
}
+ while (!(result= lookup_handler->ha_index_next_same(table->record[0], ptr,
+ key_info->key_length)));
+
+end:
+ if (result == HA_ERR_END_OF_FILE || result == HA_ERR_KEY_NOT_FOUND)
+ result= 0;
+
restore_record(table, file->lookup_buffer);
table->restore_blob_values(blob_storage);
lookup_handler->ha_index_end();
- DBUG_RETURN(error);
+ DBUG_RETURN(result);
}
void handler::alloc_lookup_buffer()
@@ -7487,77 +7492,57 @@ void handler::alloc_lookup_buffer()
+ table_share->reclength);
}
-/** @brief
- check whether inserted records breaks the
- unique constraint on long columns.
- @returns 0 if no duplicate else returns error
- */
-int handler::check_duplicate_long_entries(const uchar *new_rec)
+
+int handler::ha_check_inserver_constraints(const uchar *old_data,
+ const uchar* new_data)
{
- lookup_errkey= (uint)-1;
- for (uint i= 0; i < table->s->keys; i++)
+ int error= 0;
+ /*
+ this != table->file is true in 3 cases:
+ 1. under copy_partitions() (REORGANIZE PARTITION): that does not
+ require long unique check as it does not introduce new rows or new index.
+ 2. under partition's ha_write_row() or ha_update_row(). Constraints
+ were already checked by ha_partition::ha_write_row(), no need re-check
+ for each partition.
+ 3. under ha_mroonga::wrapper_write_row(). Same as 2.
+ */
+ if (this == table->file)
{
- int result;
- if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH &&
- (result= check_duplicate_long_entry_key(new_rec, i)))
- return result;
+ uint saved_status= table->status;
+ if (!(error= ha_check_overlaps(old_data, new_data)))
+ error= ha_check_long_uniques(old_data, new_data);
+ table->status= saved_status;
}
- return 0;
+ return error;
}
/** @brief
- check whether updated records breaks the
- unique constraint on long columns.
- In the case of update we just need to check the specic key
- reason for that is consider case
- create table t1(a blob , b blob , x blob , y blob ,unique(a,b)
- ,unique(x,y))
- and update statement like this
- update t1 set a=23+a; in this case if we try to scan for
- whole keys in table then index scan on x_y will return 0
- because data is same so in the case of update we take
- key as a parameter in normal insert key should be -1
+ check whether inserted records breaks the unique constraint on long columns.
@returns 0 if no duplicate else returns error
*/
-int handler::check_duplicate_long_entries_update(const uchar *new_rec)
+int handler::ha_check_long_uniques(const uchar *old_rec, const uchar *new_rec)
{
- Field *field;
- uint key_parts;
- KEY *keyinfo;
- KEY_PART_INFO *keypart;
- /*
- Here we are comparing whether new record and old record are same
- with respect to fields in hash_str
- */
- uint reclength= (uint) (table->record[1] - table->record[0]);
-
+ if (!table->s->long_unique_table)
+ return 0;
+ DBUG_ASSERT(inited == NONE || lookup_handler != this);
+ DBUG_ASSERT(new_rec == table->record[0]);
+ DBUG_ASSERT(!old_rec || old_rec == table->record[1]);
+ lookup_errkey= (uint)-1;
for (uint i= 0; i < table->s->keys; i++)
{
- keyinfo= table->key_info + i;
+ KEY *keyinfo= table->key_info + i;
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
{
- key_parts= fields_in_hash_keyinfo(keyinfo);
- keypart= keyinfo->key_part - key_parts;
- for (uint j= 0; j < key_parts; j++, keypart++)
+ if (!old_rec || long_unique_fields_differ(keyinfo, old_rec))
{
- int error;
- field= keypart->field;
- /*
- Compare fields if they are different then check for duplicates
- cmp_binary_offset cannot differentiate between null and empty string
- So also check for that too
- */
- if((field->is_null(0) != field->is_null(reclength)) ||
- field->cmp_offset(reclength))
+ if (int res= check_duplicate_long_entry_key(new_rec, i))
{
- if((error= check_duplicate_long_entry_key(new_rec, i)))
- return error;
- /*
- break because check_duplicate_long_entries_key will
- take care of remaining fields
- */
- break;
+ if (!old_rec && table->next_number_field &&
+ !(ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE))
+ if (int err= update_auto_increment())
+ return err;
+ return res;
}
}
}
@@ -7569,14 +7554,14 @@ int handler::check_duplicate_long_entries_update(const uchar *new_rec)
int handler::ha_check_overlaps(const uchar *old_data, const uchar* new_data)
{
DBUG_ASSERT(new_data);
- if (this != table->file)
- return 0;
+ DBUG_ASSERT(this == table->file);
if (!table_share->period.unique_keys)
return 0;
if (table->versioned() && !table->vers_end_field()->is_max())
return 0;
- const bool is_update= old_data != NULL;
+ const bool after_write= ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE;
+ const bool is_update= !after_write && old_data;
uchar *record_buffer= lookup_buffer + table_share->max_unique_length
+ table_share->null_fields;
@@ -7631,17 +7616,22 @@ int handler::ha_check_overlaps(const uchar *old_data, const uchar* new_data)
key_part_map((1 << (key_parts - 1)) - 1),
HA_READ_AFTER_KEY);
- if (!error && is_update)
+ if (!error)
{
- /* In case of update it could happen that the nearest neighbour is
- a record we are updating. It means, that there are no overlaps
- from this side.
- */
- DBUG_ASSERT(lookup_handler != this);
- DBUG_ASSERT(ref_length == lookup_handler->ref_length);
+ if (is_update)
+ {
+ /* In case of update it could happen that the nearest neighbour is
+ a record we are updating. It means, that there are no overlaps
+ from this side.
+ */
+ DBUG_ASSERT(lookup_handler != this);
+ DBUG_ASSERT(ref_length == lookup_handler->ref_length);
- lookup_handler->position(record_buffer);
- if (memcmp(ref, lookup_handler->ref, ref_length) == 0)
+ lookup_handler->position(record_buffer);
+ if (memcmp(ref, lookup_handler->ref, ref_length) == 0)
+ error= lookup_handler->ha_index_next(record_buffer);
+ }
+ else if (after_write)
error= lookup_handler->ha_index_next(record_buffer);
}
@@ -7754,11 +7744,8 @@ int handler::prepare_for_insert(bool do_create)
int handler::ha_write_row(const uchar *buf)
{
int error;
- DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
- m_lock_type == F_WRLCK);
DBUG_ENTER("handler::ha_write_row");
DEBUG_SYNC_C("ha_write_row_start");
-#ifdef WITH_WSREP
DBUG_EXECUTE_IF("wsrep_ha_write_row",
{
const char act[]=
@@ -7767,36 +7754,11 @@ int handler::ha_write_row(const uchar *buf)
"WAIT_FOR wsrep_ha_write_row_continue";
DBUG_ASSERT(!debug_sync_set_action(ha_thd(), STRING_WITH_LEN(act)));
});
-#endif /* WITH_WSREP */
- if ((error= ha_check_overlaps(NULL, buf)))
- {
- DEBUG_SYNC_C("ha_write_row_end");
- DBUG_RETURN(error);
- }
+ DBUG_ASSERT(table_share->tmp_table || m_lock_type == F_WRLCK);
- /*
- NOTE: this != table->file is true in 3 cases:
-
- 1. under copy_partitions() (REORGANIZE PARTITION): that does not
- require long unique check as it does not introduce new rows or new index.
- 2. under partition's ha_write_row() (INSERT): check_duplicate_long_entries()
- was already done by ha_partition::ha_write_row(), no need to check it
- again for each single partition.
- 3. under ha_mroonga::wrapper_write_row()
- */
-
- if (table->s->long_unique_table && this == table->file)
- {
- DBUG_ASSERT(inited == NONE || lookup_handler != this);
- if ((error= check_duplicate_long_entries(buf)))
- {
- if (table->next_number_field && buf == table->record[0])
- if (int err= update_auto_increment())
- error= err;
- DEBUG_SYNC_C("ha_write_row_end");
- DBUG_RETURN(error);
- }
- }
+ if (!(ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) &&
+ (error= ha_check_inserver_constraints(NULL, buf)))
+ goto err;
MYSQL_INSERT_ROW_START(table_share->db.str, table_share->table_name.str);
mark_trx_read_write();
@@ -7808,23 +7770,43 @@ int handler::ha_write_row(const uchar *buf)
dbug_format_row(table, buf, false).c_ptr_safe(), error));
MYSQL_INSERT_ROW_DONE(error);
- if (likely(!error))
- {
- rows_changed++;
- if (row_logging)
- {
- Log_func *log_func= Write_rows_log_event::binlog_row_logging_function;
- error= binlog_log_row(table, 0, buf, log_func);
- }
+ if (error)
+ goto err;
-#ifdef WITH_WSREP
- THD *thd= ha_thd();
- if (WSREP_NNULL(thd) && table_share->tmp_table == NO_TMP_TABLE &&
- ht->flags & HTON_WSREP_REPLICATION && !error)
- error= wsrep_after_row(thd);
-#endif /* WITH_WSREP */
+ if ((ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) &&
+ (error= ha_check_inserver_constraints(NULL, buf)))
+ {
+ if (lookup_handler != this) // INSERT IGNORE or REPLACE or ODKU
+ {
+ int olderror= error;
+ if ((error= lookup_handler->rnd_init(0)))
+ goto err;
+ position(buf);
+ if ((error= lookup_handler->rnd_pos(lookup_buffer, ref)))
+ goto err;
+
+ increment_statistics(&SSV::ha_delete_count);
+ TABLE_IO_WAIT(tracker, PSI_TABLE_DELETE_ROW, MAX_KEY, error,
+ { error= lookup_handler->delete_row(buf);})
+ lookup_handler->rnd_end();
+ if (!error)
+ error= olderror;
+ }
+ goto err;
}
+ rows_changed++;
+ if (row_logging)
+ {
+ Log_func *log_func= Write_rows_log_event::binlog_row_logging_function;
+ error= binlog_log_row(table, 0, buf, log_func);
+ }
+
+ if (WSREP_NNULL(ha_thd()) && table_share->tmp_table == NO_TMP_TABLE &&
+ ht->flags & HTON_WSREP_REPLICATION && !error)
+ error= wsrep_after_row(ha_thd());
+
+err:
DEBUG_SYNC_C("ha_write_row_end");
DBUG_RETURN(error);
}
@@ -7833,30 +7815,16 @@ int handler::ha_write_row(const uchar *buf)
int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
{
int error;
- DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
- m_lock_type == F_WRLCK);
+ DBUG_ASSERT(table_share->tmp_table || m_lock_type == F_WRLCK);
/*
Some storage engines require that the new record is in record[0]
(and the old record is in record[1]).
- */
+ */
DBUG_ASSERT(new_data == table->record[0]);
DBUG_ASSERT(old_data == table->record[1]);
- uint saved_status= table->status;
- error= ha_check_overlaps(old_data, new_data);
-
- /*
- NOTE: this != table->file is true under partition's ha_update_row():
- check_duplicate_long_entries_update() was already done by
- ha_partition::ha_update_row(), no need to check it again for each single
- partition. Same applies to ha_mroonga wrapper.
- */
-
- if (!error && table->s->long_unique_table && this == table->file)
- error= check_duplicate_long_entries_update(new_data);
- table->status= saved_status;
-
- if (error)
+ if (!(ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) &&
+ (error= ha_check_inserver_constraints(old_data, new_data)))
return error;
MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str);
@@ -7871,35 +7839,50 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
error));
MYSQL_UPDATE_ROW_DONE(error);
- if (likely(!error))
- {
- rows_changed++;
- if (row_logging)
- {
- Log_func *log_func= Update_rows_log_event::binlog_row_logging_function;
- error= binlog_log_row(table, old_data, new_data, log_func);
- }
-#ifdef WITH_WSREP
- THD *thd= ha_thd();
- if (WSREP_NNULL(thd))
- {
- /* for streaming replication, the following wsrep_after_row()
- may replicate a fragment, so we have to declare potential PA
- unsafe before that */
- if (table->s->primary_key == MAX_KEY && wsrep_thd_is_local(thd))
- {
- WSREP_DEBUG("marking trx as PA unsafe pk %d", table->s->primary_key);
- if (thd->wsrep_cs().mark_transaction_pa_unsafe())
- WSREP_DEBUG("session does not have active transaction,"
- " can not mark as PA unsafe");
- }
+ if (error)
+ return error;
- if (!error && table_share->tmp_table == NO_TMP_TABLE &&
- ht->flags & HTON_WSREP_REPLICATION)
- error= wsrep_after_row(thd);
+ if ((ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) &&
+ (error= ha_check_inserver_constraints(old_data, new_data)))
+ {
+ int e= 0;
+ if (ha_thd()->lex->ignore)
+ {
+ my_printf_error(ER_NOT_SUPPORTED_YET, "UPDATE IGNORE in READ "
+ "COMMITTED isolation mode of a table with a UNIQUE constraint "
+ "%s is not currently supported", MYF(0),
+ table->s->long_unique_table ? "USING HASH" : "WITHOUT OVERLAPS");
+ return HA_ERR_UNSUPPORTED;
}
-#endif /* WITH_WSREP */
+ return e ? e : error;
}
+
+ rows_changed++;
+ if (row_logging)
+ {
+ Log_func *log_func= Update_rows_log_event::binlog_row_logging_function;
+ error= binlog_log_row(table, old_data, new_data, log_func);
+ }
+#ifdef WITH_WSREP
+ THD *thd= ha_thd();
+ if (WSREP_NNULL(thd))
+ {
+ /* for streaming replication, the following wsrep_after_row()
+ may replicate a fragment, so we have to declare potential PA
+ unsafe before that */
+ if (table->s->primary_key == MAX_KEY && wsrep_thd_is_local(thd))
+ {
+ WSREP_DEBUG("marking trx as PA unsafe pk %d", table->s->primary_key);
+ if (thd->wsrep_cs().mark_transaction_pa_unsafe())
+ WSREP_DEBUG("session does not have active transaction,"
+ " can not mark as PA unsafe");
+ }
+
+ if (!error && table_share->tmp_table == NO_TMP_TABLE &&
+ ht->flags & HTON_WSREP_REPLICATION)
+ error= wsrep_after_row(thd);
+ }
+#endif /* WITH_WSREP */
return error;
}
@@ -7934,13 +7917,11 @@ int handler::update_first_row(const uchar *new_data)
int handler::ha_delete_row(const uchar *buf)
{
int error;
- DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
- m_lock_type == F_WRLCK);
+ DBUG_ASSERT(table_share->tmp_table || m_lock_type == F_WRLCK);
/*
Normally table->record[0] is used, but sometimes table->record[1] is used.
*/
- DBUG_ASSERT(buf == table->record[0] ||
- buf == table->record[1]);
+ DBUG_ASSERT(buf == table->record[0] || buf == table->record[1]);
MYSQL_DELETE_ROW_START(table_share->db.str, table_share->table_name.str);
mark_trx_read_write();
diff --git a/sql/handler.h b/sql/handler.h
index 53c65866d27..eeeee42c2ca 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -368,7 +368,9 @@ enum chf_create_flags {
/* Implements SELECT ... FOR UPDATE SKIP LOCKED */
#define HA_CAN_SKIP_LOCKED (1ULL << 61)
-#define HA_LAST_TABLE_FLAG HA_CAN_SKIP_LOCKED
+#define HA_CHECK_UNIQUE_AFTER_WRITE (1ULL << 62)
+
+#define HA_LAST_TABLE_FLAG HA_CHECK_UNIQUE_AFTER_WRITE
/* bits in index_flags(index_number) for what you can do with index */
@@ -4825,11 +4827,11 @@ private:
int create_lookup_handler();
void alloc_lookup_buffer();
- int check_duplicate_long_entries(const uchar *new_rec);
- int check_duplicate_long_entries_update(const uchar *new_rec);
int check_duplicate_long_entry_key(const uchar *new_rec, uint key_no);
/** PRIMARY KEY/UNIQUE WITHOUT OVERLAPS check */
int ha_check_overlaps(const uchar *old_data, const uchar* new_data);
+ int ha_check_long_uniques(const uchar *old_rec, const uchar *new_rec);
+ int ha_check_inserver_constraints(const uchar *old_data, const uchar* new_data);
protected:
/*
diff --git a/sql/item.cc b/sql/item.cc
index 5d36f37e6d9..1472b909fbd 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -5098,6 +5098,7 @@ Item_param::set_param_type_and_swap_value(Item_param *src)
void Item_param::set_default(bool set_type_handler_null)
{
m_is_settable_routine_parameter= false;
+ current_thd->lex->default_used= true;
state= DEFAULT_VALUE;
/*
When Item_param is set to DEFAULT_VALUE:
@@ -5255,14 +5256,26 @@ static Field *make_default_field(THD *thd, Field *field_arg)
if (!newptr)
return nullptr;
+ /* Don't check privileges, if it's parse_vcol_defs() */
+ if (def_field->table->pos_in_table_list &&
+ def_field->default_value->check_access(thd))
+ return nullptr;
+
if (should_mark_column(thd->column_usage))
def_field->default_value->expr->update_used_tables();
def_field->move_field(newptr + 1, def_field->maybe_null() ? newptr : 0, 1);
}
- else
+ else if (field_arg->table && field_arg->table->s->field)
+ {
+ Field *def_val= field_arg->table->s->field[field_arg->field_index];
+ def_field->move_field(def_val->ptr, def_val->null_ptr, def_val->null_bit);
+ }
+ else /* e.g. non-updatable view */
+ {
def_field->move_field_offset((my_ptrdiff_t)
(def_field->table->s->default_values -
def_field->table->record[0]));
+ }
return def_field;
}
diff --git a/sql/item.h b/sql/item.h
index 0938c5dda78..23e4ed60dcd 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -2455,6 +2455,7 @@ public:
If there is some, sets a bit for this key in the proper key map.
*/
virtual bool check_index_dependence(void *arg) { return 0; }
+ virtual bool check_sequence_privileges(void *arg) { return 0; }
/*============== End of Item processor list ======================*/
/*
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index b4c660cb4f8..4c548e72db5 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -602,7 +602,7 @@ bool Arg_comparator::set_cmp_func_string(THD *thd)
else if ((*b)->type() == Item::FUNC_ITEM &&
((Item_func *) (*b))->functype() == Item_func::JSON_EXTRACT_FUNC)
{
- func= is_owner_equal_func() ? &Arg_comparator::compare_e_json_str:
+ func= is_owner_equal_func() ? &Arg_comparator::compare_e_str_json:
&Arg_comparator::compare_str_json;
return 0;
}
diff --git a/sql/item_func.cc b/sql/item_func.cc
index f9fd8a72a45..b4efd3143cc 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -7100,15 +7100,14 @@ longlong Item_func_cursor_rowcount::val_int()
/*****************************************************************************
SEQUENCE functions
*****************************************************************************/
-bool Item_func_nextval::check_access_and_fix_fields(THD *thd, Item **ref,
- privilege_t want_access)
+bool Item_func_nextval::check_access(THD *thd, privilege_t want_access)
{
table_list->sequence= false;
bool error= check_single_table_access(thd, want_access, table_list, false);
table_list->sequence= true;
if (error && table_list->belong_to_view)
table_list->replace_view_error_with_generic(thd);
- return error || Item_longlong_func::fix_fields(thd, ref);
+ return error;
}
longlong Item_func_nextval::val_int()
@@ -7123,7 +7122,8 @@ longlong Item_func_nextval::val_int()
String key_buff(buff,sizeof(buff), &my_charset_bin);
DBUG_ENTER("Item_func_nextval::val_int");
update_table();
- DBUG_ASSERT(table && table->s->sequence);
+ DBUG_ASSERT(table);
+ DBUG_ASSERT(table->s->sequence);
thd= table->in_use;
if (thd->count_cuted_fields == CHECK_FIELD_EXPRESSION)
diff --git a/sql/item_func.h b/sql/item_func.h
index ad7819cf8df..6f470e3a340 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -4236,7 +4236,7 @@ class Item_func_nextval :public Item_longlong_func
protected:
TABLE_LIST *table_list;
TABLE *table;
- bool check_access_and_fix_fields(THD *, Item **ref, privilege_t);
+ bool check_access(THD *, privilege_t);
public:
Item_func_nextval(THD *thd, TABLE_LIST *table_list_arg):
Item_longlong_func(thd), table_list(table_list_arg) {}
@@ -4247,7 +4247,13 @@ public:
return name;
}
bool fix_fields(THD *thd, Item **ref) override
- { return check_access_and_fix_fields(thd, ref, INSERT_ACL | SELECT_ACL); }
+ {
+ /* Don't check privileges, if it's parse_vcol_defs() */
+ return (table_list->table && check_sequence_privileges(thd)) ||
+ Item_longlong_func::fix_fields(thd, ref);
+ }
+ bool check_sequence_privileges(void *thd) override
+ { return check_access((THD*)thd, INSERT_ACL | SELECT_ACL); }
bool fix_length_and_dec(THD *thd) override
{
unsigned_flag= 0;
@@ -4289,8 +4295,8 @@ class Item_func_lastval :public Item_func_nextval
public:
Item_func_lastval(THD *thd, TABLE_LIST *table_list_arg):
Item_func_nextval(thd, table_list_arg) {}
- bool fix_fields(THD *thd, Item **ref) override
- { return check_access_and_fix_fields(thd, ref, SELECT_ACL); }
+ bool check_sequence_privileges(void *thd) override
+ { return check_access((THD*)thd, SELECT_ACL); }
longlong val_int() override;
LEX_CSTRING func_name_cstring() const override
{
@@ -4315,8 +4321,8 @@ public:
: Item_func_nextval(thd, table_list_arg),
nextval(nextval_arg), round(round_arg), is_used(is_used_arg)
{}
- bool fix_fields(THD *thd, Item **ref) override
- { return check_access_and_fix_fields(thd, ref, INSERT_ACL); }
+ bool check_sequence_privileges(void *thd) override
+ { return check_access((THD*)thd, INSERT_ACL); }
longlong val_int() override;
LEX_CSTRING func_name_cstring() const override
{
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 5f647aeaad9..b015bc445e9 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -7138,21 +7138,24 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
order R by (E(#records_matched) * key_record_length).
S= first(R); -- set of scans that will be used for ROR-intersection
- R= R-first(S);
+ R= R - S;
min_cost= cost(S);
min_scan= make_scan(S);
while (R is not empty)
{
- firstR= R - first(R);
- if (!selectivity(S + firstR < selectivity(S)))
+ firstR= first(R);
+ if (!selectivity(S + firstR) < selectivity(S))
+ {
+ R= R - firstR;
continue;
-
+ }
S= S + first(R);
if (cost(S) < min_cost)
{
min_cost= cost(S);
min_scan= make_scan(S);
}
+ R= R - firstR; -- Remove the processed scan from R
}
return min_scan;
}
diff --git a/sql/semisync_master.cc b/sql/semisync_master.cc
index 43afb8f996e..854b6a61eeb 100644
--- a/sql/semisync_master.cc
+++ b/sql/semisync_master.cc
@@ -68,15 +68,20 @@ static ulonglong timespec_to_usec(const struct timespec *ts)
return (ulonglong) ts->tv_sec * TIME_MILLION + ts->tv_nsec / TIME_THOUSAND;
}
-int signal_waiting_transaction(THD *waiting_thd, const char *binlog_file,
- my_off_t binlog_pos)
+static int
+signal_waiting_transaction(THD *waiting_thd, bool thd_valid,
+ const char *binlog_file, my_off_t binlog_pos)
{
/*
It is possible that the connection thd waiting for an ACK was killed. In
such circumstance, the connection thread will nullify the thd member of its
Active_tranx node. So before we try to signal, ensure the THD exists.
+
+ The thd_valid is only set while the THD is waiting in commit_trx(); this
+ is defensive coding to not signal an invalid THD if we somewhere
+ accidentally did not remove the transaction from the list.
*/
- if (waiting_thd)
+ if (waiting_thd && thd_valid)
mysql_cond_signal(&waiting_thd->COND_wakeup_ready);
return 0;
}
@@ -182,6 +187,7 @@ int Active_tranx::insert_tranx_node(THD *thd_to_wait,
ins_node->log_name[FN_REFLEN-1] = 0; /* make sure it ends properly */
ins_node->log_pos = log_file_pos;
ins_node->thd= thd_to_wait;
+ ins_node->thd_valid= false;
if (!m_trx_front)
{
@@ -263,7 +269,8 @@ void Active_tranx::clear_active_tranx_nodes(
if ((log_file_name != NULL) &&
compare(new_front, log_file_name, log_file_pos) > 0)
break;
- pre_delete_hook(new_front->thd, new_front->log_name, new_front->log_pos);
+ pre_delete_hook(new_front->thd, new_front->thd_valid,
+ new_front->log_name, new_front->log_pos);
new_front = new_front->next;
}
@@ -355,13 +362,17 @@ void Active_tranx::unlink_thd_as_waiter(const char *log_file_name,
}
if (entry)
+ {
entry->thd= NULL;
+ entry->thd_valid= false;
+ }
DBUG_VOID_RETURN;
}
-bool Active_tranx::is_thd_waiter(THD *thd_to_check, const char *log_file_name,
- my_off_t log_file_pos)
+Tranx_node *
+Active_tranx::is_thd_waiter(THD *thd_to_check, const char *log_file_name,
+ my_off_t log_file_pos)
{
DBUG_ENTER("Active_tranx::assert_thd_is_waiter");
mysql_mutex_assert_owner(m_lock);
@@ -377,7 +388,7 @@ bool Active_tranx::is_thd_waiter(THD *thd_to_check, const char *log_file_name,
entry = entry->hash_next;
}
- DBUG_RETURN(static_cast(entry));
+ DBUG_RETURN(entry);
}
/*******************************************************************************
@@ -863,6 +874,10 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name,
if (!rpl_semi_sync_master_clients && !rpl_semi_sync_master_wait_no_slave)
{
+ lock();
+ m_active_tranxs->unlink_thd_as_waiter(trx_wait_binlog_name,
+ trx_wait_binlog_pos);
+ unlock();
rpl_semi_sync_master_no_transactions++;
DBUG_RETURN(0);
}
@@ -922,6 +937,9 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name,
}
}
+ Tranx_node *tranx_entry=
+ m_active_tranxs->is_thd_waiter(thd, trx_wait_binlog_name,
+ trx_wait_binlog_pos);
/* In between the binlogging of this transaction and this wait, it is
* possible that our entry in Active_tranx was removed (i.e. if
* semi-sync was switched off and on). It is also possible that the
@@ -932,8 +950,7 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name,
* rpl_semi_sync_master_yes/no_tx consistent with it, we check for a
* semi-sync restart _after_ checking the reply state.
*/
- if (unlikely(!m_active_tranxs->is_thd_waiter(thd, trx_wait_binlog_name,
- trx_wait_binlog_pos)))
+ if (unlikely(!tranx_entry))
{
DBUG_EXECUTE_IF(
"semisync_log_skip_trx_wait",
@@ -952,6 +969,16 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name,
break;
}
+ /*
+ Mark that our THD is now valid for signalling to by the ack thread.
+ It is important to ensure that we can never leave a no longer valid
+ THD in the transaction list and signal it, eg. MDEV-36934. This way,
+ we ensure the THD will only be signalled while this function is
+ running, even in case of some incorrect error handling or similar
+ that might leave a dangling THD in the list.
+ */
+ tranx_entry->thd_valid= true;
+
/* Let us update the info about the minimum binlog position of waiting
* threads.
*/
@@ -1284,6 +1311,8 @@ int Repl_semi_sync_master::write_tranx_in_binlog(THD *thd,
DBUG_ENTER("Repl_semi_sync_master::write_tranx_in_binlog");
+ DEBUG_SYNC(current_thd, "semisync_at_write_tranx_in_binlog");
+
lock();
/* This is the real check inside the mutex. */
@@ -1317,7 +1346,8 @@ int Repl_semi_sync_master::write_tranx_in_binlog(THD *thd,
m_commit_file_name_inited = true;
}
- if (is_on())
+ if (is_on() &&
+ (rpl_semi_sync_master_clients || rpl_semi_sync_master_wait_no_slave))
{
DBUG_ASSERT(m_active_tranxs != NULL);
if(m_active_tranxs->insert_tranx_node(thd, log_file_name, log_file_pos))
diff --git a/sql/semisync_master.h b/sql/semisync_master.h
index a1c57959165..28de3ecf480 100644
--- a/sql/semisync_master.h
+++ b/sql/semisync_master.h
@@ -30,6 +30,7 @@ extern PSI_cond_key key_COND_binlog_send;
struct Tranx_node {
char log_name[FN_REFLEN];
+ bool thd_valid; /* thd is valid for signalling */
my_off_t log_pos;
THD *thd; /* The thread awaiting an ACK */
struct Tranx_node *next; /* the next node in the sorted list */
@@ -126,7 +127,9 @@ public:
trx_node= &(current_block->nodes[++last_node]);
trx_node->log_name[0] = '\0';
+ trx_node->thd_valid= false;
trx_node->log_pos= 0;
+ trx_node->thd= nullptr;
trx_node->next= 0;
trx_node->hash_next= 0;
return trx_node;
@@ -298,7 +301,8 @@ private:
its invocation. See the context in which it is called to know.
*/
-typedef int (*active_tranx_action)(THD *trx_thd, const char *log_file_name,
+typedef int (*active_tranx_action)(THD *trx_thd, bool thd_valid,
+ const char *log_file_name,
my_off_t trx_log_file_pos);
/**
@@ -381,8 +385,8 @@ public:
* matches the thread of the respective Tranx_node::thd of the passed in
* log_file_name and log_file_pos.
*/
- bool is_thd_waiter(THD *thd_to_check, const char *log_file_name,
- my_off_t log_file_pos);
+ Tranx_node * is_thd_waiter(THD *thd_to_check, const char *log_file_name,
+ my_off_t log_file_pos);
/* Given a position, check to see whether the position is an active
* transaction's ending position by probing the hash table.
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index e8d89bec6b4..9f1c4452ba9 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -8453,9 +8453,17 @@ bool check_grant(THD *thd, privilege_t want_access, TABLE_LIST *tables,
Direct SELECT of a sequence table doesn't set t_ref->sequence, so
privileges will be checked normally, as for any table.
*/
- if (t_ref->sequence &&
- !(want_access & ~(SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL)))
- continue;
+ if (t_ref->sequence)
+ {
+ if (!(want_access & ~(SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL)))
+ continue;
+ /*
+ If it is ALTER..SET DEFAULT= nextval(sequence), also defer checks
+ until ::fix_fields().
+ */
+ if (tl != tables && want_access == ALTER_ACL)
+ continue;
+ }
const ACL_internal_table_access *access=
get_cached_table_access(&t_ref->grant.m_internal,
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index ad514b73742..db616c9f034 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -5171,7 +5171,7 @@ bool DML_prelocking_strategy::handle_table(THD *thd,
DBUG_ASSERT(table_list->lock_type >= TL_FIRST_WRITE ||
thd->lex->default_used);
- if (table_list->trg_event_map)
+ if (table_list->trg_event_map && table_list->lock_type >= TL_FIRST_WRITE)
{
if (table->triggers)
{
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index b34ee9e76bb..6d0cbf90022 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1670,6 +1670,9 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(insert_view_fields(thd, &fields, table_list));
}
+ if (table_list->table->check_sequence_privileges(thd))
+ DBUG_RETURN(TRUE);
+
DBUG_RETURN(FALSE);
}
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index dcb63703e97..1bdf810ab3b 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2000, 2019, Oracle and/or its affiliates.
- Copyright (c) 2009, 2022, MariaDB Corporation.
+/* Copyright (c) 2000, 2025, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2025, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -2538,6 +2538,8 @@ int Lex_input_stream::lex_one_token(YYSTYPE *yylval, THD *thd)
state=MY_LEX_CHAR;
break;
case MY_LEX_END:
+ /* Unclosed special comments result in a syntax error */
+ if (in_comment == DISCARD_COMMENT) return (ABORT_SYM);
next_state= MY_LEX_END;
return(0); // We found end of input last time
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index f9a9fb45ae7..e722a0d701d 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1510,6 +1510,11 @@ static int mysql_test_update(Prepared_statement *stmt,
0, NULL, 0, THD_WHERE::SET_LIST) ||
check_unique_table(thd, table_list))
goto error;
+ {
+ List_iterator_fast- fs(select->item_list), vs(stmt->lex->value_list);
+ while (Item *f= fs++)
+ vs++->associate_with_target_field(thd, static_cast(f));
+ }
/* TODO: here we should send types of placeholders to the client. */
DBUG_RETURN(0);
error:
@@ -4602,6 +4607,8 @@ Prepared_statement::set_parameters(String *expanded_query,
res= set_params_data(this, expanded_query);
#endif
}
+ lex->default_used= thd->lex->default_used;
+ thd->lex->default_used= false;
if (res)
{
my_error(ER_WRONG_ARGUMENTS, MYF(0),
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index e66a98b71d2..d3e1ead482d 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -7154,7 +7154,13 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array, KEY_FIELD *key_field)
}
}
}
- if (field->hash_join_is_possible() &&
+ /*
+ Compressed field cannot be part of a key. For optimizer temporary table
+ compressed fields are replaced by uncompressed, see
+ is_optimizer_tmp_table() and Field_*_compressed::make_new_field().
+ */
+ if (!field->compression_method() &&
+ field->hash_join_is_possible() &&
(key_field->optimize & KEY_OPTIMIZE_EQ) &&
key_field->val->used_tables())
{
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index f60463a5cf8..ff6adb49363 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -3540,8 +3540,6 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
Create_field *auto_increment_key= 0;
Key_part_spec *column;
- bool is_hash_field_needed= key->key_create_info.algorithm
- == HA_KEY_ALG_LONG_HASH;
if (key->type == Key::IGNORE_KEY)
{
/* ignore redundant keys */
@@ -3552,6 +3550,9 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
break;
}
+ bool is_hash_field_needed= key->key_create_info.algorithm
+ == HA_KEY_ALG_LONG_HASH;
+
if (key_check_without_overlaps(thd, create_info, alter_info, *key))
DBUG_RETURN(true);
@@ -11343,7 +11344,8 @@ do_continue:;
thd->count_cuted_fields= CHECK_FIELD_EXPRESSION;
altered_table.reset_default_fields();
if (altered_table.default_field &&
- altered_table.update_default_fields(true))
+ (altered_table.check_sequence_privileges(thd) ||
+ altered_table.update_default_fields(true)))
{
cleanup_table_after_inplace_alter(&altered_table);
goto err_new_table_cleanup;
@@ -12761,6 +12763,23 @@ bool check_engine(THD *thd, const char *db_name,
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "NO_ENGINE_SUBSTITUTION");
DBUG_RETURN(TRUE);
}
+#ifdef WITH_WSREP
+ /* @@enforce_storage_engine is local, if user has used
+ ENGINE=XXX we can't allow it in cluster in this
+ case as enf_engine != new _engine. This is because
+ original stmt is replicated including ENGINE=XXX and
+ here */
+ if ((create_info->used_fields & HA_CREATE_USED_ENGINE) &&
+ WSREP(thd))
+ {
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "ENFORCE_STORAGE_ENGINE");
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_OPTION_PREVENTS_STATEMENT,
+ "Do not use ENGINE=x when @@enforce_storage_engine is set");
+
+ DBUG_RETURN(TRUE);
+ }
+#endif
*new_engine= enf_engine;
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index ed1942ff70b..e8d15b1497d 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -13252,6 +13252,7 @@ expr_or_ignore_or_default:
| DEFAULT
{
$$= new (thd->mem_root) Item_default_specification(thd);
+ Lex->default_used= TRUE;
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
@@ -13335,6 +13336,7 @@ update_elem:
{
Item *def= new (thd->mem_root) Item_default_value(thd,
Lex->current_context(), $1, 1);
+ Lex->default_used= TRUE;
if (!def || add_item_to_list(thd, $1) || add_value_to_list(thd, def))
MYSQL_YYABORT;
}
diff --git a/sql/table.cc b/sql/table.cc
index 279ae29bfe7..688e89ae018 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -2838,6 +2838,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
hash_keypart->fieldnr= hash_field_used_no + 1;
hash_field= share->field[hash_field_used_no];
hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs
+ DBUG_ASSERT(hash_field->invisible == INVISIBLE_FULL);
keyinfo->flags|= HA_NOSAME;
share->virtual_fields++;
share->stored_fields--;
@@ -3760,6 +3761,19 @@ Vcol_expr_context::~Vcol_expr_context()
}
+bool TABLE::check_sequence_privileges(THD *thd)
+{
+ if (internal_tables)
+ for (Field **fp= field; *fp; fp++)
+ {
+ Virtual_column_info *vcol= (*fp)->default_value;
+ if (vcol && vcol->check_access(thd))
+ return 1;
+ }
+ return 0;
+}
+
+
bool TABLE::vcol_fix_expr(THD *thd)
{
if (pos_in_table_list->placeholder() || vcol_refix_list.is_empty())
@@ -3896,6 +3910,13 @@ bool Virtual_column_info::fix_and_check_expr(THD *thd, TABLE *table)
}
+bool Virtual_column_info::check_access(THD *thd)
+{
+ return flags & VCOL_NEXTVAL &&
+ expr->walk(&Item::check_sequence_privileges, 0, thd);
+}
+
+
/*
@brief
Unpack the definition of a virtual column from its linear representation
diff --git a/sql/table.h b/sql/table.h
index 8068a69f946..86502589ec0 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -1109,6 +1109,11 @@ struct TABLE_SHARE
return (tmp_table == SYSTEM_TMP_TABLE) ? 0 : table_map_id;
}
+ bool is_optimizer_tmp_table()
+ {
+ return tmp_table == INTERNAL_TMP_TABLE && !db.length && table_name.length;
+ }
+
bool visit_subgraph(Wait_for_flush *waiting_ticket,
MDL_wait_for_graph_visitor *gvisitor);
@@ -1752,6 +1757,7 @@ public:
TABLE *tmp_table,
TMP_TABLE_PARAM *tmp_table_param,
bool with_cleanup);
+ bool check_sequence_privileges(THD *thd);
bool vcol_fix_expr(THD *thd);
bool vcol_cleanup_expr(THD *thd);
Field *find_field_by_name(LEX_CSTRING *str) const;
diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc
index fe0cedae330..7fd9a987cc5 100644
--- a/sql/wsrep_mysqld.cc
+++ b/sql/wsrep_mysqld.cc
@@ -2829,9 +2829,17 @@ static int wsrep_TOI_begin(THD *thd, const char *db, const char *table,
WSREP_DEBUG("TOI Begin: %s", wsrep_thd_query(thd));
DEBUG_SYNC(thd, "wsrep_before_toi_begin");
- if (wsrep_can_run_in_toi(thd, db, table, table_list, create_info) == false)
+ if (!wsrep_ready ||
+ wsrep_can_run_in_toi(thd, db, table, table_list, create_info) == false)
{
WSREP_DEBUG("No TOI for %s", wsrep_thd_query(thd));
+ if (!wsrep_ready)
+ {
+ my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0));
+ push_warning_printf(thd, Sql_state_errno_level::WARN_LEVEL_WARN,
+ ER_GALERA_REPLICATION_NOT_SUPPORTED,
+ "Galera cluster is not ready to execute replication");
+ }
return 1;
}
diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc
index 95e7cf7d4ad..c21fdf882f7 100644
--- a/sql/wsrep_var.cc
+++ b/sql/wsrep_var.cc
@@ -27,6 +27,7 @@
#include
#include "wsrep_trans_observer.h"
#include "wsrep_server_state.h"
+#include "wsrep_mysqld.h"
ulong wsrep_reject_queries;
@@ -123,6 +124,14 @@ bool wsrep_on_update (sys_var *self, THD* thd, enum_var_type var_type)
saved_wsrep_on= false;
}
+ if (!wsrep_ready_get())
+ {
+ my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0));
+ WSREP_INFO("Failed to start Galera replication. Please check your "
+ "configuration.");
+ saved_wsrep_on= false;
+ }
+
free(tmp);
mysql_mutex_lock(&LOCK_global_system_variables);
}
diff --git a/storage/columnstore/columnstore b/storage/columnstore/columnstore
index 9bc1ebc2631..5ba808d542e 160000
--- a/storage/columnstore/columnstore
+++ b/storage/columnstore/columnstore
@@ -1 +1 @@
-Subproject commit 9bc1ebc2631c361a079c892ab56b2429160126dd
+Subproject commit 5ba808d542e8552a4b3a868fce9e119623f366d7
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index 101fe6b72c3..a5148713b09 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -950,12 +950,15 @@ MY_ATTRIBUTE((warn_unused_result))
@return number of pages written or hole-punched */
uint32_t fil_space_t::flush_freed(bool writable) noexcept
{
+ mysql_mutex_assert_not_owner(&buf_pool.flush_list_mutex);
+ mysql_mutex_assert_not_owner(&buf_pool.mutex);
+
const bool punch_hole= chain.start->punch_hole == 1;
if (!punch_hole && !srv_immediate_scrub_data_uncompressed)
return 0;
-
- mysql_mutex_assert_not_owner(&buf_pool.flush_list_mutex);
- mysql_mutex_assert_not_owner(&buf_pool.mutex);
+ if (srv_is_undo_tablespace(id))
+ /* innodb_undo_log_truncate=ON can take care of these better */
+ return 0;
for (;;)
{
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 7d6b704f086..4312623fae2 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -870,6 +870,8 @@ retry:
else if (table)
table->acquire();
}
+ else if (!dict_locked)
+ dict_sys.unfreeze();
return table;
}
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index 39d6096e9cd..25e99e178d7 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -2705,23 +2705,30 @@ fil_io_t fil_space_t::io(const IORequest &type, os_offset_t offset, size_t len,
while (node->size <= p) {
p -= node->size;
- node = UT_LIST_GET_NEXT(chain, node);
- if (!node) {
+ if (!UT_LIST_GET_NEXT(chain, node)) {
fail:
- if (type.type != IORequest::READ_ASYNC) {
+ switch (type.type) {
+ case IORequest::READ_ASYNC:
+ /* Read-ahead may be requested for
+ non-existing pages. Ignore such
+ requests. */
+ break;
+ default:
fil_invalid_page_access_msg(
node->name,
offset, len,
type.is_read());
- }
#ifndef DBUG_OFF
io_error:
#endif
- set_corrupted();
+ set_corrupted();
+ }
+
err = DB_CORRUPTION;
node = nullptr;
goto release;
}
+ node = UT_LIST_GET_NEXT(chain, node);
}
offset = os_offset_t{p} << srv_page_size_shift;
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 4449ebd3853..4ae491b88af 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -122,8 +122,6 @@ simple_thread_local ha_handler_stats *mariadb_stats;
#include
#include // TT_FOR_UPGRADE
-#define thd_get_trx_isolation(X) ((enum_tx_isolation)thd_tx_isolation(X))
-
extern "C" void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all);
unsigned long long thd_get_query_id(const MYSQL_THD thd);
void thd_clear_error(MYSQL_THD thd);
@@ -839,14 +837,16 @@ innodb_tmpdir_validate(
return(0);
}
-/******************************************************************//**
-Maps a MySQL trx isolation level code to the InnoDB isolation level code
-@return InnoDB isolation level */
-static inline
-uint
-innobase_map_isolation_level(
-/*=========================*/
- enum_tx_isolation iso); /*!< in: MySQL isolation level code */
+/** @return the current transaction isolation level */
+static inline uint innodb_isolation_level(const THD *thd) noexcept
+{
+ static_assert(ISO_REPEATABLE_READ == TRX_ISO_REPEATABLE_READ, "");
+ static_assert(ISO_SERIALIZABLE == TRX_ISO_SERIALIZABLE, "");
+ static_assert(ISO_READ_COMMITTED == TRX_ISO_READ_COMMITTED, "");
+ static_assert(ISO_READ_UNCOMMITTED == TRX_ISO_READ_UNCOMMITTED, "");
+ return high_level_read_only
+ ? ISO_READ_UNCOMMITTED : (thd_tx_isolation(thd) & 3);
+}
/** Gets field offset for a field in a table.
@param[in] table MySQL table object
@@ -4363,21 +4363,18 @@ innobase_start_trx_and_assign_read_view(
trx_start_if_not_started_xa(trx, false);
- /* Assign a read view if the transaction does not have it yet.
- Do this only if transaction is using REPEATABLE READ isolation
- level. */
- trx->isolation_level = innobase_map_isolation_level(
- thd_get_trx_isolation(thd)) & 3;
+ /* Assign a read view if the transaction does not have one yet.
+ Skip this for the READ UNCOMMITTED isolation level. */
+ trx->isolation_level = innodb_isolation_level(thd) & 3;
- if (trx->isolation_level == TRX_ISO_REPEATABLE_READ) {
+ if (trx->isolation_level != TRX_ISO_READ_UNCOMMITTED) {
trx->read_view.open(trx);
} else {
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
HA_ERR_UNSUPPORTED,
"InnoDB: WITH CONSISTENT SNAPSHOT"
- " was ignored because this phrase"
- " can only be used with"
- " REPEATABLE READ isolation level.");
+ " is ignored at READ UNCOMMITTED"
+ " isolation level.");
}
/* Set the MySQL flag to mark that there is an active transaction */
@@ -4987,7 +4984,7 @@ ha_innobase::table_flags() const
called before prebuilt is inited. */
if (thd_tx_isolation(thd) <= ISO_READ_COMMITTED) {
- return(flags);
+ return(flags | HA_CHECK_UNIQUE_AFTER_WRITE);
}
return(flags | HA_BINLOG_STMT_CAPABLE);
@@ -14059,10 +14056,10 @@ int ha_innobase::truncate()
trx);
if (!err)
{
+ trx->commit(deleted);
m_prebuilt->table->acquire();
create_table_info_t::create_table_update_dict(m_prebuilt->table,
m_user_thd, info, *table);
- trx->commit(deleted);
}
else
{
@@ -16142,31 +16139,6 @@ ha_innobase::start_stmt(
DBUG_RETURN(0);
}
-/******************************************************************//**
-Maps a MySQL trx isolation level code to the InnoDB isolation level code
-@return InnoDB isolation level */
-static inline
-uint
-innobase_map_isolation_level(
-/*=========================*/
- enum_tx_isolation iso) /*!< in: MySQL isolation level code */
-{
- if (UNIV_UNLIKELY(srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN)
- || UNIV_UNLIKELY(srv_read_only_mode)) {
- return TRX_ISO_READ_UNCOMMITTED;
- }
- switch (iso) {
- case ISO_REPEATABLE_READ: return(TRX_ISO_REPEATABLE_READ);
- case ISO_READ_COMMITTED: return(TRX_ISO_READ_COMMITTED);
- case ISO_SERIALIZABLE: return(TRX_ISO_SERIALIZABLE);
- case ISO_READ_UNCOMMITTED: return(TRX_ISO_READ_UNCOMMITTED);
- }
-
- ut_error;
-
- return(0);
-}
-
/******************************************************************//**
As MySQL will execute an external lock for every new table it uses when it
starts to process an SQL statement (an exception is when MySQL calls
@@ -16633,19 +16605,30 @@ ha_innobase::store_lock(
Be careful to ignore TL_IGNORE if we are going to do something with
only 'real' locks! */
- /* If no MySQL table is in use, we need to set the isolation level
+ /* If no table handle is open, we need to set the isolation level
of the transaction. */
if (lock_type != TL_IGNORE
&& trx->n_mysql_tables_in_use == 0) {
- trx->isolation_level = innobase_map_isolation_level(
- (enum_tx_isolation) thd_tx_isolation(thd)) & 3;
-
- if (trx->isolation_level <= TRX_ISO_READ_COMMITTED) {
-
+ switch ((trx->isolation_level
+ = innodb_isolation_level(thd) & 3)) {
+ case ISO_REPEATABLE_READ:
+ break;
+ case ISO_READ_COMMITTED:
+ case ISO_READ_UNCOMMITTED:
/* At low transaction isolation levels we let
each consistent read set its own snapshot */
trx->read_view.close();
+ break;
+ case ISO_SERIALIZABLE:
+ auto trx_state = trx->state;
+ if (trx_state != TRX_STATE_NOT_STARTED) {
+ ut_ad(trx_state == TRX_STATE_ACTIVE);
+ } else if (trx->snapshot_isolation) {
+ trx->will_lock = true;
+ trx_start_if_not_started(trx, false);
+ trx->read_view.open(trx);
+ }
}
}
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index 07c8a13a72c..8d28952bd17 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -1850,12 +1850,17 @@ corrupted:
return true;
}
-/*********************************************************************//**
-Removes a page from the free list and frees it to the fsp system. */
-static void ibuf_remove_free_page()
+/** Removes a page from the free list and frees it to the fsp system.
+@param all Free all freed page. This should be useful only during slow
+shutdown
+@return error code when InnoDB fails to free the page
+@retval DB_SUCCESS_LOCKED_REC if all free pages are freed
+@retval DB_SUCCESS if page is freed */
+static dberr_t ibuf_remove_free_page(bool all = false)
{
mtr_t mtr;
page_t* header_page;
+ dberr_t err = DB_SUCCESS;
log_free_check();
@@ -1871,17 +1876,17 @@ static void ibuf_remove_free_page()
mysql_mutex_lock(&ibuf_pessimistic_insert_mutex);
mysql_mutex_lock(&ibuf_mutex);
- if (!header_page || !ibuf_data_too_much_free()) {
+ if (!header_page || (!all && !ibuf_data_too_much_free())) {
early_exit:
mysql_mutex_unlock(&ibuf_mutex);
mysql_mutex_unlock(&ibuf_pessimistic_insert_mutex);
-
+exit:
ibuf_mtr_commit(&mtr);
- return;
+ return err;
}
- buf_block_t* root = ibuf_tree_root_get(&mtr);
+ buf_block_t* root = ibuf_tree_root_get(&mtr, &err);
if (UNIV_UNLIKELY(!root)) {
goto early_exit;
@@ -1892,7 +1897,10 @@ early_exit:
+ PAGE_BTR_IBUF_FREE_LIST
+ root->page.frame).page;
+ /* If all the freed pages are removed during slow shutdown
+ then exit early with DB_SUCCESS_LOCKED_REC */
if (page_no >= fil_system.sys_space->free_limit) {
+ err = DB_SUCCESS_LOCKED_REC;
goto early_exit;
}
@@ -1914,7 +1922,7 @@ early_exit:
compile_time_assert(IBUF_SPACE_ID == 0);
const page_id_t page_id{IBUF_SPACE_ID, page_no};
buf_block_t* bitmap_page = nullptr;
- dberr_t err = fseg_free_page(
+ err = fseg_free_page(
header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER,
fil_system.sys_space, page_no, &mtr);
@@ -1959,7 +1967,7 @@ func_exit:
buf_page_free(fil_system.sys_space, page_no, &mtr);
}
- ibuf_mtr_commit(&mtr);
+ goto exit;
}
/***********************************************************************//**
@@ -2427,7 +2435,9 @@ ATTRIBUTE_COLD ulint ibuf_contract()
== page_id_t(IBUF_SPACE_ID, FSP_IBUF_TREE_ROOT_PAGE_NO));
ibuf_mtr_commit(&mtr);
-
+ /* Remove all free page from free list and
+ frees it to system tablespace */
+ while (ibuf_remove_free_page(true) == DB_SUCCESS);
return(0);
}
diff --git a/storage/innobase/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff b/storage/innobase/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff
index 2860d5cb0b8..b2251a7222a 100644
--- a/storage/innobase/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff
+++ b/storage/innobase/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff
@@ -1,14 +1,6 @@
--- suite/storage_engine/trx/cons_snapshot_serializable.result
+++ suite/storage_engine/trx/cons_snapshot_serializable.reject
-@@ -5,12 +5,15 @@
- CREATE TABLE t1 (a ) ENGINE= ;
- SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
- START TRANSACTION WITH CONSISTENT SNAPSHOT;
-+Warnings:
-+Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level.
- connection con2;
- INSERT INTO t1 (a) VALUES (1);
- connection con1;
+@@ -11,6 +11,7 @@
# If consistent read works on this isolation level (SERIALIZABLE), the following SELECT should not return the value we inserted (1)
SELECT a FROM t1;
a
diff --git a/storage/innobase/mysql-test/storage_engine/trx/level_read_committed.rdiff b/storage/innobase/mysql-test/storage_engine/trx/level_read_committed.rdiff
deleted file mode 100644
index d0a846ee1f7..00000000000
--- a/storage/innobase/mysql-test/storage_engine/trx/level_read_committed.rdiff
+++ /dev/null
@@ -1,11 +0,0 @@
---- suite/storage_engine/trx/level_read_committed.result
-+++ suite/storage_engine/trx/level_read_committed.reject
-@@ -77,6 +77,8 @@
- CREATE TABLE t1 (a ) ENGINE= ;
- SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
- START TRANSACTION WITH CONSISTENT SNAPSHOT;
-+Warnings:
-+Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level.
- connection con2;
- INSERT INTO t1 (a) VALUES (1);
- connection con1;
diff --git a/storage/innobase/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff b/storage/innobase/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff
index ee483dd64bb..756b8626f76 100644
--- a/storage/innobase/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff
+++ b/storage/innobase/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff
@@ -5,7 +5,7 @@
SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
+Warnings:
-+Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level.
++Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT is ignored at READ UNCOMMITTED isolation level.
connection con2;
INSERT INTO t1 (a) VALUES (1);
connection con1;
diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc
index 04910745c78..c4ed623a044 100644
--- a/storage/innobase/page/page0page.cc
+++ b/storage/innobase/page/page0page.cc
@@ -977,8 +977,9 @@ page_delete_rec_list_end(
size+= s;
n_recs++;
- if (scrub)
- mtr->memset(block, rec2 - page, rec_offs_data_size(offsets), 0);
+ if (UNIV_LIKELY(!scrub));
+ else if (size_t size= rec_offs_data_size(offsets))
+ mtr->memset(block, rec2 - page, size, 0);
rec2= page_rec_get_next(rec2);
}
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index abf5996615b..c06b88226f8 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -975,12 +975,19 @@ srv_open_tmp_tablespace(bool create_new_db)
return(err);
}
-/** Shutdown background threads, except the page cleaner. */
-static void srv_shutdown_threads()
+/** Shutdown background threads, except the page cleaner.
+@param init_abort set to true when InnoDB startup aborted */
+static void srv_shutdown_threads(bool init_abort= false)
{
ut_ad(!srv_undo_sources);
srv_master_timer.reset();
- srv_shutdown_state = SRV_SHUTDOWN_EXIT_THREADS;
+ /* In case of InnoDB start up aborted, Don't change
+ the srv_shutdown_state. Because innodb_shutdown()
+ does call innodb_preshutdown() which changes the
+ srv_shutdown_state back to SRV_SHUTDOWN_INITIATED */
+ if (!init_abort) {
+ srv_shutdown_state = SRV_SHUTDOWN_EXIT_THREADS;
+ }
if (purge_sys.enabled()) {
srv_purge_shutdown();
@@ -1050,7 +1057,7 @@ srv_init_abort_low(
}
srv_shutdown_bg_undo_sources();
- srv_shutdown_threads();
+ srv_shutdown_threads(true);
return(err);
}
diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c
index 3e312269b7f..1c52f89021e 100644
--- a/storage/myisam/myisamchk.c
+++ b/storage/myisam/myisamchk.c
@@ -129,6 +129,17 @@ int main(int argc, char **argv)
#endif
} /* main */
+
+/* Free memory and exit */
+
+void __attribute__ ((noreturn)) my_exit(int exit_state)
+{
+ free_defaults(default_argv);
+ my_end(MY_CHECK_ERROR);
+ exit(exit_state);
+}
+
+
enum options_mc {
OPT_CHARSETS_DIR=256, OPT_SET_COLLATION,OPT_START_CHECK_POS,
OPT_CORRECT_CHECKSUM, OPT_CREATE_MISSING_KEYS, OPT_KEY_BUFFER_SIZE,
@@ -660,7 +671,7 @@ get_one_option(const struct my_option *opt,
fprintf(stderr,
"The value of the sort key is bigger than max key: %d.\n",
MI_MAX_KEY);
- exit(1);
+ my_exit(1);
}
}
break;
@@ -694,7 +705,9 @@ get_one_option(const struct my_option *opt,
break;
case 'V':
print_version();
- exit(0);
+ free_defaults(default_argv);
+ my_end(MY_CHECK_ERROR);
+ my_exit(0);
case OPT_CORRECT_CHECKSUM:
if (argument == disabled_my_option)
check_param.testflag&= ~T_CALC_CHECKSUM;
@@ -711,7 +724,7 @@ get_one_option(const struct my_option *opt,
FIND_TYPE_BASIC)) <= 0)
{
fprintf(stderr, "Invalid value of stats_method: %s.\n", argument);
- exit(1);
+ my_exit(1);
}
switch (method-1) {
case 0:
@@ -735,10 +748,10 @@ get_one_option(const struct my_option *opt,
#endif
case 'H':
my_print_help(my_long_options);
- exit(0);
+ my_exit(0);
case '?':
usage();
- exit(0);
+ my_exit(0);
}
return 0;
}
@@ -754,7 +767,7 @@ static void get_options(register int *argc,register char ***argv)
check_param.testflag|=T_WRITE_LOOP;
if ((ho_error=handle_options(argc, argv, my_long_options, get_one_option)))
- exit(ho_error);
+ my_exit(ho_error);
/* If using repair, then update checksum if one uses --update-state */
if ((check_param.testflag & T_UPDATE_STATE) &&
@@ -764,7 +777,7 @@ static void get_options(register int *argc,register char ***argv)
if (*argc == 0)
{
usage();
- exit(-1);
+ my_exit(-1);
}
if ((check_param.testflag & T_UNPACK) &&
@@ -773,7 +786,7 @@ static void get_options(register int *argc,register char ***argv)
(void) fprintf(stderr,
"%s: --unpack can't be used with --quick or --sort-records\n",
my_progname_short);
- exit(1);
+ my_exit(1);
}
if ((check_param.testflag & T_READONLY) &&
(check_param.testflag &
@@ -783,11 +796,11 @@ static void get_options(register int *argc,register char ***argv)
(void) fprintf(stderr,
"%s: Can't use --readonly when repairing or sorting\n",
my_progname_short);
- exit(1);
+ my_exit(1);
}
if (init_tmpdir(&myisamchk_tmpdir, opt_tmpdir))
- exit(1);
+ my_exit(1);
check_param.tmpdir=&myisamchk_tmpdir;
check_param.key_cache_block_size= opt_key_cache_block_size;
@@ -795,7 +808,7 @@ static void get_options(register int *argc,register char ***argv)
if (set_collation_name)
if (!(set_collation= get_charset_by_name(set_collation_name,
MYF(MY_UTF8_IS_UTF8MB3 | MY_WME))))
- exit(1);
+ my_exit(1);
myisam_block_size=(uint) 1 << my_bit_log2_uint64(opt_myisam_block_size);
return;
diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_32907.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_32907.result
new file mode 100644
index 00000000000..9e8dbebc86c
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_32907.result
@@ -0,0 +1,21 @@
+for master_1
+for child2
+for child3
+set spider_same_server_link= 1;
+CREATE SERVER srv FOREIGN DATA WRAPPER mysql
+OPTIONS (SOCKET "$MASTER_1_MYSOCK", DATABASE 'test',user 'root');
+create table t2 (c int);
+create table t1 (c int) ENGINE=Spider
+COMMENT='WRAPPER "mysql", srv "srv",TABLE "t2"';
+insert into t1 values (3), (NULL);
+explain select nvl(sum(c), 0) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2
+select nvl(sum(c), 0) from t1;
+nvl(sum(c), 0)
+3
+drop table t1, t2;
+drop server srv;
+for master_1
+for child2
+for child3
diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_32907.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_32907.test
new file mode 100644
index 00000000000..50835f4e47d
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_32907.test
@@ -0,0 +1,26 @@
+--disable_query_log
+--disable_result_log
+--source ../../t/test_init.inc
+--enable_result_log
+--enable_query_log
+set spider_same_server_link= 1;
+evalp CREATE SERVER srv FOREIGN DATA WRAPPER mysql
+OPTIONS (SOCKET "$MASTER_1_MYSOCK", DATABASE 'test',user 'root');
+
+create table t2 (c int);
+create table t1 (c int) ENGINE=Spider
+COMMENT='WRAPPER "mysql", srv "srv",TABLE "t2"';
+
+insert into t1 values (3), (NULL);
+
+explain select nvl(sum(c), 0) from t1;
+select nvl(sum(c), 0) from t1;
+drop table t1, t2;
+
+drop server srv;
+
+--disable_query_log
+--disable_result_log
+--source ../../t/test_deinit.inc
+--enable_result_log
+--enable_query_log
diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc
index 7c670450e58..c17ddbc7eb7 100644
--- a/storage/spider/spd_db_conn.cc
+++ b/storage/spider/spd_db_conn.cc
@@ -6913,11 +6913,21 @@ int spider_db_print_item_type(
DBUG_ENTER("spider_db_print_item_type");
DBUG_PRINT("info",("spider COND type=%d", item->type()));
- if (item->type() == Item::REF_ITEM &&
- ((Item_ref*)item)->ref_type() == Item_ref::DIRECT_REF)
+ if (item->type() == Item::REF_ITEM)
{
- item= item->real_item();
- DBUG_PRINT("info",("spider new COND type=%d", item->type()));
+ const auto rtype= ((Item_ref*)item)->ref_type();
+ /*
+ The presence of an Item_aggregate_ref tends to lead to the query
+ being broken at the execution stage.
+ */
+ if (rtype == Item_ref::AGGREGATE_REF && !str)
+ DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM);
+ DBUG_ASSERT(rtype != Item_ref::AGGREGATE_REF);
+ if (rtype == Item_ref::DIRECT_REF)
+ {
+ item= item->real_item();
+ DBUG_PRINT("info", ("spider new COND type=%d", item->type()));
+ }
}
switch (item->type())
{
@@ -7345,6 +7355,10 @@ int spider_db_open_item_ref(
}
DBUG_RETURN(0);
}
+ /*
+ TODO: MDEV-25116 is the same case as MDEV-32907 (having an
+ Item_aggregate_ref). Perhaps the following is redundant.
+ */
DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM); // MDEV-25116
}
DBUG_RETURN(spider_db_open_item_ident((Item_ident *) item_ref, spider, str,
diff --git a/storage/spider/spd_db_include.h b/storage/spider/spd_db_include.h
index d9cebeed7c8..fe014468f5c 100644
--- a/storage/spider/spd_db_include.h
+++ b/storage/spider/spd_db_include.h
@@ -840,9 +840,6 @@ public:
virtual void free_result() = 0;
virtual SPIDER_DB_ROW *current_row() = 0;
virtual SPIDER_DB_ROW *fetch_row(MY_BITMAP *skips = NULL) = 0;
- virtual SPIDER_DB_ROW *fetch_row_from_result_buffer(
- spider_db_result_buffer *spider_res_buf
- ) = 0;
virtual SPIDER_DB_ROW *fetch_row_from_tmp_table(
TABLE *tmp_table
) = 0;
diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc
index 45d2c966fb7..4ea3c8fa5c1 100644
--- a/storage/spider/spd_db_mysql.cc
+++ b/storage/spider/spd_db_mysql.cc
@@ -759,30 +759,6 @@ SPIDER_DB_ROW *spider_db_mbase_result::fetch_row(MY_BITMAP *skips)
DBUG_RETURN((SPIDER_DB_ROW *) &row);
}
-SPIDER_DB_ROW *spider_db_mbase_result::fetch_row_from_result_buffer(
- spider_db_result_buffer *spider_res_buf
-) {
- DBUG_ENTER("spider_db_mbase_result::fetch_row_from_result_buffer");
- DBUG_PRINT("info",("spider this=%p", this));
- if (!(row.row = mysql_fetch_row(db_result)))
- {
- if (mysql_errno(((spider_db_mbase *) db_conn)->db_conn))
- {
- store_error_num = mysql_errno(((spider_db_mbase *) db_conn)->db_conn);
- my_message(store_error_num,
- mysql_error(((spider_db_mbase *) db_conn)->db_conn), MYF(0));
- } else
- store_error_num = HA_ERR_END_OF_FILE;
- DBUG_RETURN(NULL);
- }
- row.lengths = mysql_fetch_lengths(db_result);
- row.field_count = mysql_num_fields(db_result);
- row.row_first = row.row;
- row.lengths_first = row.lengths;
- row.record_size = 0;
- DBUG_RETURN((SPIDER_DB_ROW *) &row);
-}
-
SPIDER_DB_ROW *spider_db_mbase_result::fetch_row_from_tmp_table(
TABLE *tmp_table
) {
@@ -4836,13 +4812,9 @@ int spider_db_mbase_util::open_item_func(
) {
DBUG_ENTER("spider_db_mbase_util::open_item_func");
- int error = check_item_func(item_func, spider, alias,
- alias_length, use_fields, fields);
- if (error)
- DBUG_RETURN(error);
if (!str)
- DBUG_RETURN(0);
-
+ DBUG_RETURN(check_item_func(item_func, spider, alias,
+ alias_length, use_fields, fields));
DBUG_RETURN(print_item_func(item_func, spider, str, alias,
alias_length, use_fields, fields));
}
@@ -5002,8 +4974,6 @@ int spider_db_mbase_util::print_item_func(
int use_pushdown_udf, case_when_start, case_when_count;
bool merge_func = FALSE, case_with_else;
DBUG_ENTER("spider_db_mbase_util::print_item_func");
- DBUG_ASSERT(!check_item_func(item_func, spider, alias, alias_length,
- use_fields, fields));
DBUG_ASSERT(str);
if (str->reserve(SPIDER_SQL_OPEN_PAREN_LEN))
diff --git a/storage/spider/spd_db_mysql.h b/storage/spider/spd_db_mysql.h
index 5dc5218e6f1..3279bc99d5e 100644
--- a/storage/spider/spd_db_mysql.h
+++ b/storage/spider/spd_db_mysql.h
@@ -301,9 +301,6 @@ public:
void free_result() override;
SPIDER_DB_ROW *current_row() override;
SPIDER_DB_ROW *fetch_row(MY_BITMAP *) override;
- SPIDER_DB_ROW *fetch_row_from_result_buffer(
- spider_db_result_buffer *spider_res_buf
- ) override;
SPIDER_DB_ROW *fetch_row_from_tmp_table(
TABLE *tmp_table
) override;
diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc
index db63f739cec..806cee41c2c 100644
--- a/storage/spider/spd_table.cc
+++ b/storage/spider/spd_table.cc
@@ -6033,12 +6033,12 @@ int spider_db_done(
for (roop_count = spider_param_table_crd_thread_count() - 1;
roop_count >= 0; roop_count--)
{
- spider_free_crd_threads(&spider_table_crd_threads[roop_count]);
+ spider_free_sts_crd_threads(&spider_table_crd_threads[roop_count]);
}
for (roop_count = spider_param_table_sts_thread_count() - 1;
roop_count >= 0; roop_count--)
{
- spider_free_sts_threads(&spider_table_sts_threads[roop_count]);
+ spider_free_sts_crd_threads(&spider_table_sts_threads[roop_count]);
}
spider_free(NULL, spider_table_sts_threads, MYF(0));
@@ -6506,7 +6506,8 @@ int spider_db_init(
roop_count < (int) spider_param_table_sts_thread_count();
roop_count++)
{
- if ((error_num = spider_create_sts_threads(&spider_table_sts_threads[roop_count])))
+ if ((error_num = spider_create_sts_crd_threads(&spider_table_sts_threads[roop_count],
+ true)))
{
goto error_init_table_sts_threads;
}
@@ -6515,7 +6516,8 @@ int spider_db_init(
roop_count < (int) spider_param_table_crd_thread_count();
roop_count++)
{
- if ((error_num = spider_create_crd_threads(&spider_table_crd_threads[roop_count])))
+ if ((error_num = spider_create_sts_crd_threads(&spider_table_crd_threads[roop_count],
+ false)))
{
goto error_init_table_crd_threads;
}
@@ -6553,13 +6555,13 @@ error_init_dbton:
error_init_table_crd_threads:
for (; roop_count >= 0; roop_count--)
{
- spider_free_crd_threads(&spider_table_crd_threads[roop_count]);
+ spider_free_sts_crd_threads(&spider_table_crd_threads[roop_count]);
}
roop_count = spider_param_table_sts_thread_count() - 1;
error_init_table_sts_threads:
for (; roop_count >= 0; roop_count--)
{
- spider_free_sts_threads(&spider_table_sts_threads[roop_count]);
+ spider_free_sts_crd_threads(&spider_table_sts_threads[roop_count]);
}
error_alloc_table_sts_crd_threads:
spider_free(NULL, spider_table_sts_threads, MYF(0));
@@ -8563,32 +8565,46 @@ void spider_free_spider_object_for_share(
DBUG_VOID_RETURN;
}
-int spider_create_sts_threads(
- SPIDER_THREAD *spider_thread
+int spider_create_sts_crd_threads(
+ SPIDER_THREAD *spider_thread,
+ bool is_sts
) {
int error_num;
- DBUG_ENTER("spider_create_sts_threads");
- if (mysql_mutex_init(spd_key_mutex_bg_stss,
+ DBUG_ENTER("spider_create_sts_crd_threads");
+#ifdef HAVE_PSI_INTERFACE
+ PSI_mutex_key mutex_bg= is_sts ? spd_key_mutex_bg_stss :
+ spd_key_mutex_bg_crds;
+ PSI_cond_key cond_bg= is_sts ? spd_key_cond_bg_stss :
+ spd_key_cond_bg_crds;
+ PSI_cond_key cond_bg_syncs= is_sts ? spd_key_cond_bg_sts_syncs :
+ spd_key_cond_bg_crd_syncs;
+#endif
+ if (mysql_mutex_init(mutex_bg,
&spider_thread->mutex, MY_MUTEX_INIT_FAST))
{
error_num = HA_ERR_OUT_OF_MEM;
goto error_mutex_init;
}
- if (mysql_cond_init(spd_key_cond_bg_stss,
+ if (mysql_cond_init(cond_bg,
&spider_thread->cond, NULL))
{
error_num = HA_ERR_OUT_OF_MEM;
goto error_cond_init;
}
- if (mysql_cond_init(spd_key_cond_bg_sts_syncs,
+ if (mysql_cond_init(cond_bg_syncs,
&spider_thread->sync_cond, NULL))
{
error_num = HA_ERR_OUT_OF_MEM;
goto error_sync_cond_init;
}
- if (mysql_thread_create(spd_key_thd_bg_stss, &spider_thread->thread,
- &spider_pt_attr, spider_table_bg_sts_action, (void *) spider_thread)
- )
+ error_num = is_sts ?
+ mysql_thread_create(spd_key_thd_bg_stss, &spider_thread->thread,
+ &spider_pt_attr, spider_table_bg_sts_action,
+ (void *) spider_thread) :
+ mysql_thread_create(spd_key_thd_bg_crds, &spider_thread->thread,
+ &spider_pt_attr, spider_table_bg_crd_action,
+ (void *) spider_thread);
+ if (error_num)
{
error_num = HA_ERR_OUT_OF_MEM;
goto error_thread_create;
@@ -8605,11 +8621,11 @@ error_mutex_init:
DBUG_RETURN(error_num);
}
-void spider_free_sts_threads(
+void spider_free_sts_crd_threads(
SPIDER_THREAD *spider_thread
) {
bool thread_killed;
- DBUG_ENTER("spider_free_sts_threads");
+ DBUG_ENTER("spider_free_sts_crd_threads");
pthread_mutex_lock(&spider_thread->mutex);
thread_killed = spider_thread->killed;
spider_thread->killed = TRUE;
@@ -8631,86 +8647,20 @@ void spider_free_sts_threads(
DBUG_VOID_RETURN;
}
-int spider_create_crd_threads(
- SPIDER_THREAD *spider_thread
-) {
- int error_num;
- DBUG_ENTER("spider_create_crd_threads");
- if (mysql_mutex_init(spd_key_mutex_bg_crds,
- &spider_thread->mutex, MY_MUTEX_INIT_FAST))
- {
- error_num = HA_ERR_OUT_OF_MEM;
- goto error_mutex_init;
- }
- if (mysql_cond_init(spd_key_cond_bg_crds,
- &spider_thread->cond, NULL))
- {
- error_num = HA_ERR_OUT_OF_MEM;
- goto error_cond_init;
- }
- if (mysql_cond_init(spd_key_cond_bg_crd_syncs,
- &spider_thread->sync_cond, NULL))
- {
- error_num = HA_ERR_OUT_OF_MEM;
- goto error_sync_cond_init;
- }
- if (mysql_thread_create(spd_key_thd_bg_crds, &spider_thread->thread,
- &spider_pt_attr, spider_table_bg_crd_action, (void *) spider_thread)
- )
- {
- error_num = HA_ERR_OUT_OF_MEM;
- goto error_thread_create;
- }
- DBUG_RETURN(0);
-
-error_thread_create:
- pthread_cond_destroy(&spider_thread->sync_cond);
-error_sync_cond_init:
- pthread_cond_destroy(&spider_thread->cond);
-error_cond_init:
- pthread_mutex_destroy(&spider_thread->mutex);
-error_mutex_init:
- DBUG_RETURN(error_num);
-}
-
-void spider_free_crd_threads(
- SPIDER_THREAD *spider_thread
-) {
- bool thread_killed;
- DBUG_ENTER("spider_free_crd_threads");
- pthread_mutex_lock(&spider_thread->mutex);
- thread_killed = spider_thread->killed;
- spider_thread->killed = TRUE;
- if (!thread_killed)
- {
- if (spider_thread->thd_wait)
- {
- pthread_cond_signal(&spider_thread->cond);
- }
- pthread_cond_wait(&spider_thread->sync_cond, &spider_thread->mutex);
- }
- pthread_mutex_unlock(&spider_thread->mutex);
- pthread_join(spider_thread->thread, NULL);
- pthread_cond_destroy(&spider_thread->sync_cond);
- pthread_cond_destroy(&spider_thread->cond);
- pthread_mutex_destroy(&spider_thread->mutex);
- spider_thread->thd_wait = FALSE;
- spider_thread->killed = FALSE;
- DBUG_VOID_RETURN;
-}
-
-void *spider_table_bg_sts_action(
- void *arg
+static void *spider_table_bg_sts_crd_action(
+ void *arg,
+ bool is_sts
) {
SPIDER_THREAD *thread = (SPIDER_THREAD *) arg;
SPIDER_SHARE *share;
SPIDER_TRX *trx;
int error_num;
ha_spider *spider;
+ TABLE *table; /* only needed for crd */
SPIDER_CONN **conns;
THD *thd;
my_thread_init();
- DBUG_ENTER("spider_table_bg_sts_action");
+ DBUG_ENTER("spider_table_bg_sts_crd_action");
/* init start */
pthread_mutex_lock(&thread->mutex);
if (!(thd = spider_create_sys_thd(thread)))
@@ -8725,7 +8675,8 @@ void *spider_table_bg_sts_action(
#ifdef HAVE_PSI_INTERFACE
mysql_thread_set_psi_id(thd->thread_id);
#endif
- thd_proc_info(thd, "Spider table background statistics action handler");
+ thd_proc_info(thd, "Spider table background statistics/cardinality"
+ " action handler");
if (!(trx = spider_get_trx(NULL, FALSE, &error_num)))
{
spider_destroy_sys_thd(thd);
@@ -8741,10 +8692,6 @@ void *spider_table_bg_sts_action(
trx->thd = thd;
/* init end */
- if (thd->killed)
- {
- thread->killed = TRUE;
- }
if (thd->killed)
{
thread->killed = TRUE;
@@ -8752,10 +8699,10 @@ void *spider_table_bg_sts_action(
while (TRUE)
{
- DBUG_PRINT("info",("spider bg sts loop start"));
+ DBUG_PRINT("info",("spider bg sts/crd loop start"));
if (thread->killed)
{
- DBUG_PRINT("info",("spider bg sts kill start"));
+ DBUG_PRINT("info",("spider bg sts/crd kill start"));
trx->thd = NULL;
spider_free_trx(trx, TRUE);
spider_destroy_sys_thd(thd);
@@ -8769,7 +8716,7 @@ void *spider_table_bg_sts_action(
}
if (!thread->queue_first)
{
- DBUG_PRINT("info",("spider bg sts has no job"));
+ DBUG_PRINT("info",("spider bg sts/crd has no job"));
thread->thd_wait = TRUE;
pthread_cond_wait(&thread->cond, &thread->mutex);
thread->thd_wait = FALSE;
@@ -8778,155 +8725,16 @@ void *spider_table_bg_sts_action(
continue;
}
share = (SPIDER_SHARE *) thread->queue_first;
- share->sts_working = TRUE;
+ if (is_sts)
+ share->sts_working = TRUE;
+ else
+ share->crd_working = TRUE;
pthread_mutex_unlock(&thread->mutex);
-
- spider = share->sts_spider;
- conns = spider->conns;
- if (spider->search_link_idx < 0)
- {
- spider->wide_handler->trx = trx;
- spider_trx_set_link_idx_for_all(spider);
- spider->search_link_idx = spider_conn_first_link_idx(thd,
- share->link_statuses, share->access_balances, spider->conn_link_idx,
- share->link_count, SPIDER_LINK_STATUS_OK);
- }
- if (spider->search_link_idx >= 0)
- {
- DBUG_PRINT("info",
- ("spider difftime=%f",
- difftime(share->bg_sts_try_time, share->sts_get_time)));
- DBUG_PRINT("info",
- ("spider bg_sts_interval=%f", share->bg_sts_interval));
- if (difftime(share->bg_sts_try_time, share->sts_get_time) >=
- share->bg_sts_interval)
- {
- if (!conns[spider->search_link_idx])
- {
- spider_get_conn(share, spider->search_link_idx,
- share->conn_keys[spider->search_link_idx], trx,
- spider, FALSE, FALSE, &error_num);
- if (conns[spider->search_link_idx])
- {
- conns[spider->search_link_idx]->error_mode = 0;
- } else {
- spider->search_link_idx = -1;
- }
- }
- DBUG_PRINT("info",
- ("spider search_link_idx=%d", spider->search_link_idx));
- if (spider->search_link_idx >= 0 && conns[spider->search_link_idx])
- {
- if (spider_get_sts(share, spider->search_link_idx,
- share->bg_sts_try_time, spider,
- share->bg_sts_interval, share->bg_sts_mode,
- share->bg_sts_sync,
- 2, HA_STATUS_CONST | HA_STATUS_VARIABLE))
- {
- spider->search_link_idx = -1;
- }
- }
- }
- }
- memset(spider->need_mons, 0, sizeof(int) * share->link_count);
- pthread_mutex_lock(&thread->mutex);
- if (thread->queue_first == thread->queue_last)
- {
- thread->queue_first = NULL;
- thread->queue_last = NULL;
- } else {
- thread->queue_first = share->sts_next;
- share->sts_next->sts_prev = NULL;
- share->sts_next = NULL;
- }
- share->sts_working = FALSE;
- share->sts_wait = FALSE;
- if (thread->first_free_wait)
- {
- pthread_cond_signal(&thread->sync_cond);
- pthread_cond_wait(&thread->cond, &thread->mutex);
- if (thd->killed)
- thread->killed = TRUE;
- }
- }
-}
-
-void *spider_table_bg_crd_action(
- void *arg
-) {
- SPIDER_THREAD *thread = (SPIDER_THREAD *) arg;
- SPIDER_SHARE *share;
- SPIDER_TRX *trx;
- int error_num;
- ha_spider *spider;
- TABLE *table;
- SPIDER_CONN **conns;
- THD *thd;
- my_thread_init();
- DBUG_ENTER("spider_table_bg_crd_action");
- /* init start */
- pthread_mutex_lock(&thread->mutex);
- if (!(thd = spider_create_sys_thd(thread)))
- {
- thread->thd_wait = FALSE;
- thread->killed = FALSE;
- pthread_mutex_unlock(&thread->mutex);
- my_thread_end();
- DBUG_RETURN(NULL);
- }
- SPIDER_set_next_thread_id(thd);
-#ifdef HAVE_PSI_INTERFACE
- mysql_thread_set_psi_id(thd->thread_id);
-#endif
- thd_proc_info(thd, "Spider table background cardinality action handler");
- if (!(trx = spider_get_trx(NULL, FALSE, &error_num)))
- {
- spider_destroy_sys_thd(thd);
- thread->thd_wait = FALSE;
- thread->killed = FALSE;
- pthread_mutex_unlock(&thread->mutex);
-#if !defined(MYSQL_DYNAMIC_PLUGIN) || !defined(_WIN32)
- set_current_thd(nullptr);
-#endif
- my_thread_end();
- DBUG_RETURN(NULL);
- }
- trx->thd = thd;
- /* init end */
-
- while (TRUE)
- {
- DBUG_PRINT("info",("spider bg crd loop start"));
- if (thread->killed)
- {
- DBUG_PRINT("info",("spider bg crd kill start"));
- trx->thd = NULL;
- spider_free_trx(trx, TRUE);
- spider_destroy_sys_thd(thd);
- pthread_cond_signal(&thread->sync_cond);
- pthread_mutex_unlock(&thread->mutex);
-#if !defined(MYSQL_DYNAMIC_PLUGIN) || !defined(_WIN32)
- set_current_thd(nullptr);
-#endif
- my_thread_end();
- DBUG_RETURN(NULL);
- }
- if (!thread->queue_first)
- {
- DBUG_PRINT("info",("spider bg crd has no job"));
- thread->thd_wait = TRUE;
- pthread_cond_wait(&thread->cond, &thread->mutex);
- thread->thd_wait = FALSE;
- if (thd->killed)
- thread->killed = TRUE;
- continue;
- }
- share = (SPIDER_SHARE *) thread->queue_first;
- share->crd_working = TRUE;
- pthread_mutex_unlock(&thread->mutex);
-
table = &share->table;
- spider = share->crd_spider;
+ if (is_sts)
+ spider = share->sts_spider;
+ else
+ spider = share->crd_spider;
conns = spider->conns;
if (spider->search_link_idx < 0)
{
@@ -8938,13 +8746,13 @@ void *spider_table_bg_crd_action(
}
if (spider->search_link_idx >= 0)
{
- DBUG_PRINT("info",
- ("spider difftime=%f",
- difftime(share->bg_crd_try_time, share->crd_get_time)));
- DBUG_PRINT("info",
- ("spider bg_crd_interval=%f", share->bg_crd_interval));
- if (difftime(share->bg_crd_try_time, share->crd_get_time) >=
- share->bg_crd_interval)
+ double diff_time= is_sts ?
+ difftime(share->bg_sts_try_time, share->sts_get_time) :
+ difftime(share->bg_crd_try_time, share->crd_get_time);
+ double interval= is_sts? share->bg_sts_interval : share->bg_crd_interval;
+ DBUG_PRINT("info", ("spider difftime=%f", diff_time));
+ DBUG_PRINT("info", ("spider bg_sts_interval=%f", interval));
+ if (diff_time >= interval)
{
if (!conns[spider->search_link_idx])
{
@@ -8962,11 +8770,17 @@ void *spider_table_bg_crd_action(
("spider search_link_idx=%d", spider->search_link_idx));
if (spider->search_link_idx >= 0 && conns[spider->search_link_idx])
{
- if (spider_get_crd(share, spider->search_link_idx,
- share->bg_crd_try_time, spider, table,
- share->bg_crd_interval, share->bg_crd_mode,
- share->bg_crd_sync,
- 2))
+ int result = is_sts ?
+ spider_get_sts(share, spider->search_link_idx,
+ share->bg_sts_try_time, spider,
+ share->bg_sts_interval, share->bg_sts_mode,
+ share->bg_sts_sync,
+ 2, HA_STATUS_CONST | HA_STATUS_VARIABLE) :
+ spider_get_crd(share, spider->search_link_idx,
+ share->bg_crd_try_time, spider, table,
+ share->bg_crd_interval, share->bg_crd_mode,
+ share->bg_crd_sync, 2);
+ if (result)
{
spider->search_link_idx = -1;
}
@@ -8980,12 +8794,29 @@ void *spider_table_bg_crd_action(
thread->queue_first = NULL;
thread->queue_last = NULL;
} else {
- thread->queue_first = share->crd_next;
- share->crd_next->crd_prev = NULL;
- share->crd_next = NULL;
+ if (is_sts)
+ {
+ thread->queue_first = share->sts_next;
+ share->sts_next->sts_prev = NULL;
+ share->sts_next = NULL;
+ }
+ else
+ {
+ thread->queue_first = share->crd_next;
+ share->crd_next->crd_prev = NULL;
+ share->crd_next = NULL;
+ }
+ }
+ if (is_sts)
+ {
+ share->sts_working= FALSE;
+ share->sts_wait= FALSE;
+ }
+ else
+ {
+ share->crd_working= FALSE;
+ share->crd_wait= FALSE;
}
- share->crd_working = FALSE;
- share->crd_wait = FALSE;
if (thread->first_free_wait)
{
pthread_cond_signal(&thread->sync_cond);
@@ -8996,6 +8827,18 @@ void *spider_table_bg_crd_action(
}
}
+void *spider_table_bg_sts_action(void *arg)
+{
+ DBUG_ENTER("spider_table_bg_sts_action");
+ DBUG_RETURN(spider_table_bg_sts_crd_action(arg, true));
+}
+
+void *spider_table_bg_crd_action(void *arg)
+{
+ DBUG_ENTER("spider_table_bg_crd_action");
+ DBUG_RETURN(spider_table_bg_sts_crd_action(arg, false));
+}
+
void spider_table_add_share_to_sts_thread(
SPIDER_SHARE *share
) {
diff --git a/storage/spider/spd_table.h b/storage/spider/spd_table.h
index e6d14600389..18c26822187 100644
--- a/storage/spider/spd_table.h
+++ b/storage/spider/spd_table.h
@@ -468,19 +468,12 @@ void spider_free_spider_object_for_share(
ha_spider **spider
);
-int spider_create_sts_threads(
- SPIDER_THREAD *spider_thread
+int spider_create_sts_crd_threads(
+ SPIDER_THREAD *spider_thread,
+ bool is_sts
);
-void spider_free_sts_threads(
- SPIDER_THREAD *spider_thread
-);
-
-int spider_create_crd_threads(
- SPIDER_THREAD *spider_thread
-);
-
-void spider_free_crd_threads(
+void spider_free_sts_crd_threads(
SPIDER_THREAD *spider_thread
);
diff --git a/strings/my_vsnprintf.c b/strings/my_vsnprintf.c
index f7cbb507e35..a23e9904109 100644
--- a/strings/my_vsnprintf.c
+++ b/strings/my_vsnprintf.c
@@ -739,13 +739,7 @@ size_t my_vsnprintf_ex(CHARSET_INFO *cs, char *to, size_t n,
else if (*fmt == 'f' || *fmt == 'g')
{
double d;
-#if __has_feature(memory_sanitizer) /* QQ: MSAN has double trouble? */
- __msan_check_mem_is_initialized(ap, sizeof(double));
-#endif
d= va_arg(ap, double);
-#if __has_feature(memory_sanitizer) /* QQ: MSAN has double trouble? */
- __msan_unpoison(&d, sizeof(double));
-#endif
to= process_dbl_arg(to, end, width, d, *fmt);
continue;
}
diff --git a/tpool/aio_libaio.cc b/tpool/aio_libaio.cc
index b78afbc6b66..06e93f179f5 100644
--- a/tpool/aio_libaio.cc
+++ b/tpool/aio_libaio.cc
@@ -17,6 +17,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111 - 1301 USA*/
#include
#include
#include
+#include "my_valgrind.h"
/**
Invoke the io_getevents() system call, without timeout parameter.
@@ -115,6 +116,9 @@ class aio_libaio final : public aio
abort();
goto end;
}
+#if __has_feature(memory_sanitizer)
+ MEM_MAKE_DEFINED(events, ret * sizeof *events);
+#endif
for (int i= 0; i < ret; i++)
{
const io_event &event= events[i];
@@ -126,6 +130,10 @@ class aio_libaio final : public aio
}
else
{
+#if __has_feature(memory_sanitizer)
+ if (iocb->m_opcode == aio_opcode::AIO_PREAD)
+ MEM_MAKE_DEFINED(iocb->m_buffer, event.res);
+#endif
iocb->m_ret_len= event.res;
iocb->m_err= 0;
finish_synchronous(iocb);
diff --git a/win/packaging/heidisql.cmake b/win/packaging/heidisql.cmake
index 0deffe78178..33d3e84b875 100644
--- a/win/packaging/heidisql.cmake
+++ b/win/packaging/heidisql.cmake
@@ -1,4 +1,4 @@
-SET(HEIDISQL_BASE_NAME "HeidiSQL_12.10_64_Portable")
+SET(HEIDISQL_BASE_NAME "HeidiSQL_12.11_64_Portable")
SET(HEIDISQL_ZIP "${HEIDISQL_BASE_NAME}.zip")
SET(HEIDISQL_URL "https://www.heidisql.com/downloads/releases/${HEIDISQL_ZIP}")
SET(HEIDISQL_DOWNLOAD_DIR ${THIRD_PARTY_DOWNLOAD_LOCATION}/${HEIDISQL_BASE_NAME})