1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-08 11:22:35 +03:00

Merge branch '10.6' into 10.11

This commit is contained in:
Sergei Golubchik
2025-07-28 18:06:31 +02:00
113 changed files with 2937 additions and 832 deletions

View File

@@ -4562,7 +4562,7 @@ xb_register_filter_entry(
databases_hash->cell_get(my_crc32c(0, name, p - name)) databases_hash->cell_get(my_crc32c(0, name, p - name))
->search(&xb_filter_entry_t::name_hash, ->search(&xb_filter_entry_t::name_hash,
[dbname](xb_filter_entry_t* f) [dbname](xb_filter_entry_t* f)
{ return f && !strcmp(f->name, dbname); }); { return !f || !strcmp(f->name, dbname); });
if (!*prev) { if (!*prev) {
(*prev = xb_new_filter_entry(dbname)) (*prev = xb_new_filter_entry(dbname))
->has_tables = TRUE; ->has_tables = TRUE;
@@ -4696,7 +4696,7 @@ xb_load_list_file(
FILE* fp; FILE* fp;
/* read and store the filenames */ /* read and store the filenames */
fp = fopen(filename, "r"); fp = fopen(filename, "rt");
if (!fp) { if (!fp) {
die("Can't open %s", die("Can't open %s",
filename); filename);

View File

@@ -134,6 +134,8 @@ if(MSVC)
remove_definitions(-DHAVE_CONFIG_H) remove_definitions(-DHAVE_CONFIG_H)
target_compile_definitions(wolfssl PRIVATE target_compile_definitions(wolfssl PRIVATE
WOLFSSL_HAVE_MIN WOLFSSL_HAVE_MAX) WOLFSSL_HAVE_MIN WOLFSSL_HAVE_MAX)
# Workaround https://github.com/wolfSSL/wolfssl/issues/9004
target_compile_definitions(wolfssl PRIVATE WOLFSSL_NO_SOCK SOCKET_INVALID=-1)
endif() endif()
CONFIGURE_FILE(user_settings.h.in user_settings.h) CONFIGURE_FILE(user_settings.h.in user_settings.h)

View File

@@ -5,7 +5,7 @@ if (!$_galera_variables_delta) {
--let $galera_variables_delta=0 --let $galera_variables_delta=0
} }
--let $galera_variables_expected=`SELECT 50 + $galera_variables_delta` --let $galera_variables_expected=`SELECT 51 + $galera_variables_delta`
--let $galera_variables_count=`SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep%'` --let $galera_variables_count=`SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep%'`

View File

@@ -2978,4 +2978,60 @@ SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1;
f f
nc,mmmmmmmmmmd nc,mmmmmmmmmmd
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-24726 Assertion `0' failed in Field_varstring_compressed::key_cmp
#
# VARCHAR
create table t1 (a varchar(8) compressed) character set utf8mb4;
create algorithm=temptable view v1 as select * from t1;
insert into t1 values ('foo'),('bar'),('foo');
select * from v1 where a in (select a from t1);
a
foo
foo
bar
drop view v1;
drop table t1;
create table t1 (f1 varchar(8)) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t1 values ('');
create table t2 (f2 varchar(8) compressed) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t2 values ('a'),('b');
select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1;
f1
drop table t1, t2;
# BLOB
create table t1 (a text compressed) character set utf8mb4;
create algorithm=temptable view v1 as select * from t1;
insert into t1 values ('foo'),('bar'),('foo');
select * from v1 where a in (select a from t1);
a
foo
foo
bar
drop view v1;
drop table t1;
create table t1 (f1 text) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t1 values ('');
create table t2 (f2 text compressed) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t2 values ('a'),('b');
select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1;
f1
drop table t1, t2;
#
# MDEV-16808 Assertion on compressed blob as key field
#
set join_cache_level= 3;
create table t1 (col_blob text) engine=innodb;
create table t2 (col_blob text compressed) engine=innodb;
select * from t1 join t2 using ( col_blob );
col_blob
drop tables t1, t2;
create table t (a text compressed,b text) engine=innodb;
create table t4 like t;
set session join_cache_level=3;
select * from (select * from t) as t natural join (select * from t) as t1;
a b
drop tables t, t4;
# End of 10.5 tests # End of 10.5 tests

View File

@@ -519,4 +519,57 @@ INSERT INTO t1 VALUES ('c','n'),('d','mmmmmmmmmm');
SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1; SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1;
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # MDEV-24726 Assertion `0' failed in Field_varstring_compressed::key_cmp
--echo #
--echo # VARCHAR
create table t1 (a varchar(8) compressed) character set utf8mb4;
create algorithm=temptable view v1 as select * from t1;
insert into t1 values ('foo'),('bar'),('foo');
select * from v1 where a in (select a from t1);
# cleanup
drop view v1;
drop table t1;
create table t1 (f1 varchar(8)) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t1 values ('');
create table t2 (f2 varchar(8) compressed) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t2 values ('a'),('b');
select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1;
# cleanup
drop table t1, t2;
--echo # BLOB
create table t1 (a text compressed) character set utf8mb4;
create algorithm=temptable view v1 as select * from t1;
insert into t1 values ('foo'),('bar'),('foo');
select * from v1 where a in (select a from t1);
# cleanup
drop view v1;
drop table t1;
create table t1 (f1 text) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t1 values ('');
create table t2 (f2 text compressed) charset=eucjpms collate=eucjpms_nopad_bin;
insert into t2 values ('a'),('b');
select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1;
# cleanup
drop table t1, t2;
--echo #
--echo # MDEV-16808 Assertion on compressed blob as key field
--echo #
set join_cache_level= 3;
create table t1 (col_blob text) engine=innodb;
create table t2 (col_blob text compressed) engine=innodb;
select * from t1 join t2 using ( col_blob );
drop tables t1, t2;
create table t (a text compressed,b text) engine=innodb;
create table t4 like t;
set session join_cache_level=3;
select * from (select * from t) as t natural join (select * from t) as t1;
drop tables t, t4;
--echo # End of 10.5 tests --echo # End of 10.5 tests

View File

@@ -3432,10 +3432,8 @@ DEFAULT(a) CASE a WHEN 0 THEN 1 ELSE 2 END
NULL 2 NULL 2
DROP TABLE t; DROP TABLE t;
DROP VIEW v; DROP VIEW v;
#
# End of 10.2 test # End of 10.2 test
# #
#
# MDEV-22703 DEFAULT() on a BLOB column can overwrite the default # MDEV-22703 DEFAULT() on a BLOB column can overwrite the default
# record, which can cause crashes when accessing already released # record, which can cause crashes when accessing already released
# memory. # memory.
@@ -3450,10 +3448,8 @@ length(DEFAULT(h))
25 25
INSERT INTO t1 () VALUES (); INSERT INTO t1 () VALUES ();
drop table t1; drop table t1;
#
# End of 10.3 test # End of 10.3 test
# #
#
# MDEV-26423: MariaDB server crash in Create_tmp_table::finalize # MDEV-26423: MariaDB server crash in Create_tmp_table::finalize
# #
CREATE TABLE t1 (pk text DEFAULT length(uuid())); CREATE TABLE t1 (pk text DEFAULT length(uuid()));
@@ -3483,6 +3479,14 @@ column_name column_default has_default is_nullable
a NULL 1 YES a NULL 1 YES
drop view v1; drop view v1;
drop table t1; drop table t1;
#
# End of 10.4 test # End of 10.4 test
# #
# MDEV-37320 ASAN errors in Field::is_null / Item_param::assign_default
#
create table t1 (f01 timestamp, f03 timestamp);
insert into t1 () values ();
create trigger tr before insert on t1 for each row set @a=1;
prepare stmt from "update t1 set f03 = ?";
execute stmt using default;
drop table t1;
# End of 10.6 test

View File

@@ -2137,9 +2137,8 @@ CREATE ALGORITHM=TEMPTABLE VIEW v AS SELECT * FROM t;
SELECT DISTINCT DEFAULT(a), CASE a WHEN 0 THEN 1 ELSE 2 END FROM v GROUP BY a WITH ROLLUP; SELECT DISTINCT DEFAULT(a), CASE a WHEN 0 THEN 1 ELSE 2 END FROM v GROUP BY a WITH ROLLUP;
DROP TABLE t; DROP TABLE t;
DROP VIEW v; DROP VIEW v;
--echo #
--echo # End of 10.2 test --echo # End of 10.2 test
--echo #
--echo # --echo #
--echo # MDEV-22703 DEFAULT() on a BLOB column can overwrite the default --echo # MDEV-22703 DEFAULT() on a BLOB column can overwrite the default
@@ -2157,9 +2156,7 @@ SELECT length(DEFAULT(h)) FROM t1;
INSERT INTO t1 () VALUES (); INSERT INTO t1 () VALUES ();
drop table t1; drop table t1;
--echo #
--echo # End of 10.3 test --echo # End of 10.3 test
--echo #
--echo # --echo #
--echo # MDEV-26423: MariaDB server crash in Create_tmp_table::finalize --echo # MDEV-26423: MariaDB server crash in Create_tmp_table::finalize
@@ -2183,6 +2180,16 @@ select column_name, column_default, column_default is not null as 'has_default',
drop view v1; drop view v1;
drop table t1; drop table t1;
--echo #
--echo # End of 10.4 test --echo # End of 10.4 test
--echo # --echo #
--echo # MDEV-37320 ASAN errors in Field::is_null / Item_param::assign_default
--echo #
create table t1 (f01 timestamp, f03 timestamp);
insert into t1 () values ();
create trigger tr before insert on t1 for each row set @a=1;
prepare stmt from "update t1 set f03 = ?";
execute stmt using default;
drop table t1;
--echo # End of 10.6 test

View File

@@ -957,10 +957,8 @@ FROM (SELECT * FROM json_test) AS json_test_values;
json_object("a", json_compact(a), "b", json_compact(b)) json_object("a", json_compact(a), "b", json_compact(b))
{"a": [1,2,3], "b": {"a":"foo"}} {"a": [1,2,3], "b": {"a":"foo"}}
DROP TABLE json_test; DROP TABLE json_test;
#
# End of 10.2 tests # End of 10.2 tests
# #
#
# MDEV-12854 Synchronize CREATE..SELECT data type and result set metadata data type for INT functions # MDEV-12854 Synchronize CREATE..SELECT data type and result set metadata data type for INT functions
# #
SELECT SELECT
@@ -1492,10 +1490,8 @@ JSON_VALID(' {"number": 01E-4}')
select JSON_VALID(' {"number": 0E-4.0}'); select JSON_VALID(' {"number": 0E-4.0}');
JSON_VALID(' {"number": 0E-4.0}') JSON_VALID(' {"number": 0E-4.0}')
0 0
#
# End of 10.4 tests # End of 10.4 tests
# #
#
# MDEV-16620 JSON_ARRAYAGG # MDEV-16620 JSON_ARRAYAGG
# #
CREATE TABLE t1 (a INT); CREATE TABLE t1 (a INT);
@@ -1727,10 +1723,8 @@ NULL
Warnings: Warnings:
Warning 4036 Character disallowed in JSON in argument 1 to function 'json_extract' at position 2 Warning 4036 Character disallowed in JSON in argument 1 to function 'json_extract' at position 2
SET @@collation_connection= @save_collation_connection; SET @@collation_connection= @save_collation_connection;
#
# End of 10.5 tests # End of 10.5 tests
# #
#
# MDEV-26054 Server crashes in Item_func_json_arrayagg::get_str_from_field # MDEV-26054 Server crashes in Item_func_json_arrayagg::get_str_from_field
# #
CREATE TABLE t (a VARCHAR(8)); CREATE TABLE t (a VARCHAR(8));
@@ -1766,6 +1760,15 @@ FROM JSON_TABLE (@data, '$[*]' COLUMNS (data text PATH '$.Data')) AS t;
data data
<root language="de"></root> <root language="de"></root>
# #
# MDEV-21530: json_extract STILL crashes in Item_func_json_extract::read_json
#
select null<=>json_extract('1',json_object(null,'{ }',null,null),'{}');
null<=>json_extract('1',json_object(null,'{ }',null,null),'{}')
1
Warnings:
Warning 4042 Syntax error in JSON path in argument 2 to function 'json_extract' at position 1
# End of 10.6 tests
#
# MDEV-35614 JSON_UNQUOTE doesn't work with emojis # MDEV-35614 JSON_UNQUOTE doesn't work with emojis
# #
SELECT HEX(JSON_UNQUOTE('"\\ud83d\\ude0a"')) as hex_smiley; SELECT HEX(JSON_UNQUOTE('"\\ud83d\\ude0a"')) as hex_smiley;
@@ -1803,9 +1806,6 @@ show warnings;
Level Code Message Level Code Message
Warning 4035 Broken JSON string in argument 1 to function 'json_unquote' at position 13 Warning 4035 Broken JSON string in argument 1 to function 'json_unquote' at position 13
# #
# End of 10.6 tests
#
#
# MDEV-31147 json_normalize does not work correctly with MSAN build # MDEV-31147 json_normalize does not work correctly with MSAN build
# #
CREATE TABLE t1 (val JSON); CREATE TABLE t1 (val JSON);
@@ -1815,10 +1815,8 @@ SELECT * FROM t1;
val normalized_json val normalized_json
15 1.5E1 15 1.5E1
DROP TABLE t1; DROP TABLE t1;
#
# End of 10.8 tests # End of 10.8 tests
# #
#
# MDEV-27677: Implement JSON_OVERLAPS() # MDEV-27677: Implement JSON_OVERLAPS()
# #
# Testing scalar json datatypes # Testing scalar json datatypes
@@ -2670,6 +2668,4 @@ SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1');
JSON_VALUE(@json,'$.A[last-1][last-1].key1') JSON_VALUE(@json,'$.A[last-1][last-1].key1')
NULL NULL
SET @@collation_connection= @save_collation_connection; SET @@collation_connection= @save_collation_connection;
#
# End of 10.9 Test # End of 10.9 Test
#

View File

@@ -607,9 +607,7 @@ SELECT json_object("a", json_compact(a), "b", json_compact(b))
FROM (SELECT * FROM json_test) AS json_test_values; FROM (SELECT * FROM json_test) AS json_test_values;
DROP TABLE json_test; DROP TABLE json_test;
--echo #
--echo # End of 10.2 tests --echo # End of 10.2 tests
--echo #
--echo # --echo #
--echo # MDEV-12854 Synchronize CREATE..SELECT data type and result set metadata data type for INT functions --echo # MDEV-12854 Synchronize CREATE..SELECT data type and result set metadata data type for INT functions
@@ -971,9 +969,7 @@ select JSON_VALID(' {"number": 00E-4}');
select JSON_VALID(' {"number": 01E-4}'); select JSON_VALID(' {"number": 01E-4}');
select JSON_VALID(' {"number": 0E-4.0}'); select JSON_VALID(' {"number": 0E-4.0}');
--echo #
--echo # End of 10.4 tests --echo # End of 10.4 tests
--echo #
-- echo # -- echo #
-- echo # MDEV-16620 JSON_ARRAYAGG -- echo # MDEV-16620 JSON_ARRAYAGG
@@ -1195,9 +1191,7 @@ SELECT JSON_EXTRACT('{"a": 1,"b": 2}','$.a');
SET @@collation_connection= @save_collation_connection; SET @@collation_connection= @save_collation_connection;
--echo #
--echo # End of 10.5 tests --echo # End of 10.5 tests
--echo #
--echo # --echo #
--echo # MDEV-26054 Server crashes in Item_func_json_arrayagg::get_str_from_field --echo # MDEV-26054 Server crashes in Item_func_json_arrayagg::get_str_from_field
@@ -1233,6 +1227,14 @@ SELECT
FROM JSON_TABLE (@data, '$[*]' COLUMNS (data text PATH '$.Data')) AS t; FROM JSON_TABLE (@data, '$[*]' COLUMNS (data text PATH '$.Data')) AS t;
--echo #
--echo # MDEV-21530: json_extract STILL crashes in Item_func_json_extract::read_json
--echo #
select null<=>json_extract('1',json_object(null,'{ }',null,null),'{}');
--echo # End of 10.6 tests
--echo # --echo #
--echo # MDEV-35614 JSON_UNQUOTE doesn't work with emojis --echo # MDEV-35614 JSON_UNQUOTE doesn't work with emojis
--echo # --echo #
@@ -1253,10 +1255,6 @@ select json_unquote(json_extract(@v,'$.color')) as unquoted, collation(json_unqu
SELECT JSON_UNQUOTE('"\\uc080\\ude0a"') as invalid_utf8mb4; SELECT JSON_UNQUOTE('"\\uc080\\ude0a"') as invalid_utf8mb4;
show warnings; show warnings;
--echo #
--echo # End of 10.6 tests
--echo #
--echo # --echo #
--echo # MDEV-31147 json_normalize does not work correctly with MSAN build --echo # MDEV-31147 json_normalize does not work correctly with MSAN build
--echo # --echo #
@@ -1266,9 +1264,7 @@ INSERT INTO t1 (val) VALUES ('15');
SELECT * FROM t1; SELECT * FROM t1;
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # End of 10.8 tests --echo # End of 10.8 tests
--echo #
--echo # --echo #
--echo # MDEV-27677: Implement JSON_OVERLAPS() --echo # MDEV-27677: Implement JSON_OVERLAPS()
@@ -1942,6 +1938,4 @@ SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1');
SET @@collation_connection= @save_collation_connection; SET @@collation_connection= @save_collation_connection;
--echo #
--echo # End of 10.9 Test --echo # End of 10.9 Test
--echo #

View File

@@ -1242,6 +1242,7 @@ t1 CREATE TABLE `t1` (
insert into t1 value(concat(repeat('s',3000),'1')); insert into t1 value(concat(repeat('s',3000),'1'));
insert into t1 value(concat(repeat('s',3000),'2')); insert into t1 value(concat(repeat('s',3000),'2'));
ERROR 23000: Duplicate entry 'sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss...' for key 'a' ERROR 23000: Duplicate entry 'sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss...' for key 'a'
update t1 set a= concat(repeat('s',3000),'2');
insert into t1 value(concat(repeat('a',3000),'2')); insert into t1 value(concat(repeat('a',3000),'2'));
drop table t1; drop table t1;
create table t1(a varchar(4000), b longblob , c varchar(5000), d longblob, create table t1(a varchar(4000), b longblob , c varchar(5000), d longblob,

View File

@@ -404,6 +404,7 @@ show create table t1;
insert into t1 value(concat(repeat('s',3000),'1')); insert into t1 value(concat(repeat('s',3000),'1'));
--error ER_DUP_ENTRY --error ER_DUP_ENTRY
insert into t1 value(concat(repeat('s',3000),'2')); insert into t1 value(concat(repeat('s',3000),'2'));
update t1 set a= concat(repeat('s',3000),'2');
insert into t1 value(concat(repeat('a',3000),'2')); insert into t1 value(concat(repeat('a',3000),'2'));
drop table t1; drop table t1;

View File

@@ -356,6 +356,7 @@ ERROR 42000: Specified key was too long; max key length is 2300 bytes
# #
create table t1(a int, unique(a) using hash); create table t1(a int, unique(a) using hash);
#BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES) #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES)
insert into t1 values(200),(150),(149),(148),(147),(146),(145),(144),(143),(142),(141),(140),(139),(138),(137),(136),(135),(134),(133),(132),(131),(130),(129),(128),(127),(126),(125),(124),(123),(122),(121),(120),(119),(118),(117),(116),(115),(114),(113),(112),(111),(110),(109),(108),(107),(106),(105),(104),(103),(102),(101),(100),(99),(98),(97),(96),(95),(94),(93),(92),(91),(90),(89),(88),(87),(86),(85),(84),(83),(82),(81),(80),(79),(78),(77),(76),(75),(74),(73),(72),(71),(70),(69),(68),(67),(66),(65),(64),(63),(62),(61),(60),(59),(58),(57),(56),(55),(54),(53),(52),(51),(50),(49),(48),(47),(46),(45),(44),(43),(42),(41),(40),(39),(38),(37),(36),(35),(34),(33),(32),(31),(30),(29),(28),(27),(26),(25),(24),(23),(22),(21),(20),(19),(18),(17),(16),(15),(14),(13),(12),(11),(10),(9),(8),(7),(6),(5),(4),(3),(2),(1);
drop table t1; drop table t1;
# #
# MDEV-21804 Assertion `marked_for_read()' failed upon INSERT into table with long unique blob under binlog_row_image=NOBLOB # MDEV-21804 Assertion `marked_for_read()' failed upon INSERT into table with long unique blob under binlog_row_image=NOBLOB
@@ -809,3 +810,36 @@ hex(c1) hex(c2) c3 hex(c4)
NULL NULL NULL NULL NULL NULL NULL NULL
drop table t1; drop table t1;
# End of 10.5 tests # End of 10.5 tests
#
# MDEV-36852 Table definition gets corrupt after adding unique hash key
#
create table t1 (a text, b int, foreign key(a) references x(x)) engine=myisam;
Warnings:
Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` text DEFAULT NULL,
`b` int(11) DEFAULT NULL,
KEY `a` (`a`(1000))
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
alter table t1 add unique(a), add key(a);
Warnings:
Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` text DEFAULT NULL,
`b` int(11) DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH,
KEY `a_2` (`a`(1000))
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
drop table t1;
#
# MDEV-37203 UBSAN: applying zero offset to null pointer in strings/ctype-uca.inl | my_uca_strnncollsp_onelevel_utf8mb4 | handler::check_duplicate_long_entries_update
#
create table t (a int,b text unique key);
insert into t (a) values (1);
update t set a=2;
drop table t;
# End of 10.6 tests

View File

@@ -332,17 +332,8 @@ CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria;
--echo # MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes --echo # MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes
--echo # --echo #
create table t1(a int, unique(a) using hash); create table t1(a int, unique(a) using hash);
--let $count=150
--let insert_stmt= insert into t1 values(200)
while ($count)
{
--let $insert_stmt=$insert_stmt,($count)
--dec $count
}
--disable_query_log
--echo #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES) --echo #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES)
--eval $insert_stmt insert into t1 values(200),(150),(149),(148),(147),(146),(145),(144),(143),(142),(141),(140),(139),(138),(137),(136),(135),(134),(133),(132),(131),(130),(129),(128),(127),(126),(125),(124),(123),(122),(121),(120),(119),(118),(117),(116),(115),(114),(113),(112),(111),(110),(109),(108),(107),(106),(105),(104),(103),(102),(101),(100),(99),(98),(97),(96),(95),(94),(93),(92),(91),(90),(89),(88),(87),(86),(85),(84),(83),(82),(81),(80),(79),(78),(77),(76),(75),(74),(73),(72),(71),(70),(69),(68),(67),(66),(65),(64),(63),(62),(61),(60),(59),(58),(57),(56),(55),(54),(53),(52),(51),(50),(49),(48),(47),(46),(45),(44),(43),(42),(41),(40),(39),(38),(37),(36),(35),(34),(33),(32),(31),(30),(29),(28),(27),(26),(25),(24),(23),(22),(21),(20),(19),(18),(17),(16),(15),(14),(13),(12),(11),(10),(9),(8),(7),(6),(5),(4),(3),(2),(1);
--enable_query_log
drop table t1; drop table t1;
--echo # --echo #
@@ -756,3 +747,23 @@ select hex(c1), hex(c2), c3, hex(c4) from t1;
drop table t1; drop table t1;
--echo # End of 10.5 tests --echo # End of 10.5 tests
--echo #
--echo # MDEV-36852 Table definition gets corrupt after adding unique hash key
--echo #
create table t1 (a text, b int, foreign key(a) references x(x)) engine=myisam;
show create table t1;
alter table t1 add unique(a), add key(a);
show create table t1;
drop table t1;
--echo #
--echo # MDEV-37203 UBSAN: applying zero offset to null pointer in strings/ctype-uca.inl | my_uca_strnncollsp_onelevel_utf8mb4 | handler::check_duplicate_long_entries_update
--echo #
create table t (a int,b text unique key);
insert into t (a) values (1);
update t set a=2;
drop table t;
--echo # End of 10.6 tests

View File

@@ -134,3 +134,39 @@ disconnect con2;
# MDEV-20131 Assertion `!pk->has_virtual()' failed # MDEV-20131 Assertion `!pk->has_virtual()' failed
create table t1 (a text, primary key(a(1871))) engine=innodb; create table t1 (a text, primary key(a(1871))) engine=innodb;
ERROR 42000: Specified key was too long; max key length is 1536 bytes ERROR 42000: Specified key was too long; max key length is 1536 bytes
# End of 10.4 tests
#
# MDEV-37268 ER_NOT_KEYFILE or assertion failure upon REPLACE into table with unique hash under READ-COMMITTED
#
create table t1 (id int not null primary key, f varchar(100), unique(f) using hash) engine=innodb;
insert t1 values (1,'x');
set transaction isolation level read committed;
replace t1 values (2,'x');
select * from t1;
id f
2 x
drop table t1;
create table t1 (id int, f longtext, primary key (id), unique(f)) engine=innodb partition by key (id) partitions 9;
insert t1 (id) values (1),(2);
set transaction isolation level read committed;
update ignore t1 set f = 'x';
ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported
select * from t1;
id f
1 NULL
2 NULL
drop table t1;
#
# MDEV-37310 Non-debug failing assertion node->pcur->rel_pos == BTR_PCUR_ON upon violating long unique under READ-COMMITTED
#
create table t1 (id int, f blob, unique(id,f)) engine=innodb partition by key(id) partitions 2;
insert t1 values (1,'foo'),(2,'foo');
set transaction isolation level read committed;
update ignore t1 set id = 2;
ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported
select * from t1;
id f
1 foo
2 foo
drop table t1;
# End of 10.6 tests

View File

@@ -1,4 +1,5 @@
--source include/have_innodb.inc --source include/have_innodb.inc
--source include/have_partition.inc
# #
# MDEV-371 Unique indexes for blobs # MDEV-371 Unique indexes for blobs
@@ -143,3 +144,36 @@ disconnect con2;
--error ER_TOO_LONG_KEY --error ER_TOO_LONG_KEY
create table t1 (a text, primary key(a(1871))) engine=innodb; create table t1 (a text, primary key(a(1871))) engine=innodb;
--echo # End of 10.4 tests
--echo #
--echo # MDEV-37268 ER_NOT_KEYFILE or assertion failure upon REPLACE into table with unique hash under READ-COMMITTED
--echo #
create table t1 (id int not null primary key, f varchar(100), unique(f) using hash) engine=innodb;
insert t1 values (1,'x');
set transaction isolation level read committed;
replace t1 values (2,'x');
select * from t1;
drop table t1;
create table t1 (id int, f longtext, primary key (id), unique(f)) engine=innodb partition by key (id) partitions 9;
insert t1 (id) values (1),(2);
set transaction isolation level read committed;
--error ER_NOT_SUPPORTED_YET
update ignore t1 set f = 'x';
select * from t1;
drop table t1;
--echo #
--echo # MDEV-37310 Non-debug failing assertion node->pcur->rel_pos == BTR_PCUR_ON upon violating long unique under READ-COMMITTED
--echo #
create table t1 (id int, f blob, unique(id,f)) engine=innodb partition by key(id) partitions 2;
insert t1 values (1,'foo'),(2,'foo');
set transaction isolation level read committed;
--error ER_NOT_SUPPORTED_YET
update ignore t1 set id = 2;
select * from t1;
drop table t1;
--echo # End of 10.6 tests

View File

@@ -0,0 +1,255 @@
#
# MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records
#
## INSERT
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
# Keep a Read View open to prevent purge
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
# Create delete marked secondary index Record ('a', 10)
insert t1 values(10, 'a');
delete from t1;
# Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum
set transaction isolation level read committed;
set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
insert t1 values(15, 'a');
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
# Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10)
set transaction isolation level read committed;
insert t1 values(5, 'a');
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 23000: Duplicate entry 'a' for key 'col2'
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
5 a
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## INSERT, row-level locking proof
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
# Keep a Read View open to prevent purge
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
# Create delete marked secondary index Record ('a', 10)
insert t1 values(10, 'a');
delete from t1;
# Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum
set transaction isolation level read committed;
set debug_sync="ha_write_row_end SIGNAL checked_duplicate WAIT_FOR do_insert";
insert t1 values(15, 'a');
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
# Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10)
set session innodb_lock_wait_timeout= 1;
set transaction isolation level read committed;
insert t1 values(5, 'a');
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
set debug_sync="now SIGNAL do_insert";
connection con1;
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
15 a
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## UPDATE
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
update t1 set col2='a' where col1=5;
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 23000: Duplicate entry 'a' for key 'col2'
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
5 b
15 a
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## INSERT IGNORE
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
insert t1 values(10, 'a');
delete from t1;
set transaction isolation level read committed;
set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
insert ignore t1 values(15, 'a'), (16, 'b');
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
insert t1 values(5, 'a');
set debug_sync="now SIGNAL do_insert";
connection con1;
Warnings:
Warning 1062 Duplicate entry 'a' for key 'col2'
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
5 a
16 b
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## UPDATE IGNORE
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
insert into t1 values( 9, 'd');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
update ignore t1 set col2=chr(92+col1) where col1<=9;
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
5 b
9 d
15 a
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## UPDATE modifying PK
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
update t1 set col2='a', col1=4 where col1=5;
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 23000: Duplicate entry 'a' for key 'col2'
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
5 b
15 a
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## UPDATE IGNORE modifying PK
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
col1 col2
connect con1,localhost,root;
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
insert into t1 values( 9, 'd');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
update ignore t1 set col2=chr(92+col1), col1=col1-1 where col1<=9;
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported
connection default;
select * from t1;
col1 col2
commit;
select * from t1;
col1 col2
5 b
9 d
15 a
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
# End of 10.6 tests

View File

@@ -0,0 +1,242 @@
--source include/have_innodb.inc
--source include/have_debug_sync.inc
--echo #
--echo # MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records
--echo #
--disable_view_protocol
--echo ## INSERT
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
--echo # Keep a Read View open to prevent purge
start transaction;
select * from t1;
--connect con1,localhost,root
--echo # Create delete marked secondary index Record ('a', 10)
insert t1 values(10, 'a');
delete from t1;
--echo # Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum
set transaction isolation level read committed;
set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
--send insert t1 values(15, 'a')
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
--echo # Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10)
set transaction isolation level read committed;
insert t1 values(5, 'a');
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_DUP_ENTRY
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## INSERT, row-level locking proof
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
--echo # Keep a Read View open to prevent purge
start transaction;
select * from t1;
--connect con1,localhost,root
--echo # Create delete marked secondary index Record ('a', 10)
insert t1 values(10, 'a');
delete from t1;
--echo # Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum
set transaction isolation level read committed;
set debug_sync="ha_write_row_end SIGNAL checked_duplicate WAIT_FOR do_insert";
--send insert t1 values(15, 'a')
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
--echo # Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10)
set session innodb_lock_wait_timeout= 1;
set transaction isolation level read committed;
--error ER_LOCK_WAIT_TIMEOUT
insert t1 values(5, 'a');
set debug_sync="now SIGNAL do_insert";
--connection con1
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## UPDATE
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
--send update t1 set col2='a' where col1=5
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_DUP_ENTRY
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## INSERT IGNORE
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert t1 values(10, 'a');
delete from t1;
set transaction isolation level read committed;
set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
--send insert ignore t1 values(15, 'a'), (16, 'b')
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
insert t1 values(5, 'a');
set debug_sync="now SIGNAL do_insert";
--connection con1
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## UPDATE IGNORE
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
insert into t1 values( 9, 'd');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
--send update ignore t1 set col2=chr(92+col1) where col1<=9
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_NOT_SUPPORTED_YET
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## UPDATE modifying PK
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
--send update t1 set col2='a', col1=4 where col1=5
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_DUP_ENTRY
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## UPDATE IGNORE modifying PK
create table t1 (
col1 int primary key,
col2 varchar(3000),
unique (col2) using hash) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert into t1 values(10, 'a');
delete from t1;
insert into t1 values( 5, 'b');
insert into t1 values(15, 'c');
insert into t1 values( 9, 'd');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
--send update ignore t1 set col2=chr(92+col1), col1=col1-1 where col1<=9
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set col2='a' where col1=15;
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_NOT_SUPPORTED_YET
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--enable_view_protocol
--echo # End of 10.6 tests

View File

@@ -5995,3 +5995,55 @@ DROP VIEW t1;
# #
# End of 10.4 tests # End of 10.4 tests
# #
#
# MDEV-35330: Assertion `marked_for_read()` failed in VSec9::VSec9 | Item_func_from_unixtime::get_date
#
CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a));
INSERT INTO t VALUES (1,'2025-07-18 18:37:10');
SELECT * FROM t;
a b
1 2025-07-18 18:37:10
EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT;
SELECT * FROM t;
a b
1 1970-01-01 09:00:01
DROP TABLE t;
CREATE TABLE t (a INT, b INT DEFAULT (a+5));
INSERT INTO t values (1,2), (2,DEFAULT);
EXECUTE IMMEDIATE 'INSERT INTO t VALUES (3,4), (4,?)' USING DEFAULT;
SELECT * FROM t;
a b
1 2
2 7
3 4
4 9
EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT;
SELECT * FROM t;
a b
1 6
2 7
3 8
4 9
DROP TABLE t;
CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a));
INSERT INTO t VALUES (1,'2025-07-18 18:37:10');
SELECT * FROM t;
a b
1 2025-07-18 18:37:10
PREPARE s FROM 'UPDATE t SET b=?';
EXECUTE s USING DEFAULT;
SELECT * FROM t;
a b
1 1970-01-01 09:00:01
DROP TABLE t;
CREATE TABLE t (a INT, b TIMESTAMP DEFAULT FROM_UNIXTIME(a), c INT DEFAULT (a+5));
INSERT INTO t VALUES (1,'2025-07-18 18:37:10',3);
SELECT * FROM t;
a b c
1 2025-07-18 18:37:10 3
EXECUTE IMMEDIATE 'UPDATE t SET b=?, c=?' USING DEFAULT, DEFAULT;
SELECT * FROM t;
a b c
1 1970-01-01 09:00:01 6
DROP TABLE t;
# End of 10.6 tests

View File

@@ -5447,3 +5447,54 @@ DROP VIEW t1;
--echo # --echo #
--echo # End of 10.4 tests --echo # End of 10.4 tests
--echo # --echo #
--echo #
--echo # MDEV-35330: Assertion `marked_for_read()` failed in VSec9::VSec9 | Item_func_from_unixtime::get_date
--echo #
CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a));
INSERT INTO t VALUES (1,'2025-07-18 18:37:10');
SELECT * FROM t;
EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT;
SELECT * FROM t;
DROP TABLE t;
CREATE TABLE t (a INT, b INT DEFAULT (a+5));
INSERT INTO t values (1,2), (2,DEFAULT);
EXECUTE IMMEDIATE 'INSERT INTO t VALUES (3,4), (4,?)' USING DEFAULT;
SELECT * FROM t;
EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT;
SELECT * FROM t;
DROP TABLE t;
CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a));
INSERT INTO t VALUES (1,'2025-07-18 18:37:10');
SELECT * FROM t;
PREPARE s FROM 'UPDATE t SET b=?';
EXECUTE s USING DEFAULT;
SELECT * FROM t;
DROP TABLE t;
CREATE TABLE t (a INT, b TIMESTAMP DEFAULT FROM_UNIXTIME(a), c INT DEFAULT (a+5));
INSERT INTO t VALUES (1,'2025-07-18 18:37:10',3);
SELECT * FROM t;
EXECUTE IMMEDIATE 'UPDATE t SET b=?, c=?' USING DEFAULT, DEFAULT;
SELECT * FROM t;
DROP TABLE t;
--echo # End of 10.6 tests

View File

@@ -14,3 +14,5 @@ galera_wan : MDEV-35940 Unallowed state transition: donor -> synced in galera_wa
galera_vote_rejoin_ddl : MDEV-35940 Unallowed state transition: donor -> synced in galera_wan galera_vote_rejoin_ddl : MDEV-35940 Unallowed state transition: donor -> synced in galera_wan
MW-329 : MDEV-35951 Complete freeze during MW-329 test MW-329 : MDEV-35951 Complete freeze during MW-329 test
galera_vote_rejoin_dml : MDEV-35964 Assertion `ist_seqno >= cc_seqno' failed in galera_vote_rejoin_dml galera_vote_rejoin_dml : MDEV-35964 Assertion `ist_seqno >= cc_seqno' failed in galera_vote_rejoin_dml
galera_var_notify_cmd : MDEV-37257 WSREP: Notification command failed: 1 (Operation not permitted)
galera_var_notify_ssl_ipv6 : MDEV-37257 WSREP: Notification command failed: 1 (Operation not permitted)

View File

@@ -7,23 +7,15 @@ connection node_1;
connection node_1; connection node_1;
CREATE TABLE t1(i INT) ENGINE=INNODB; CREATE TABLE t1(i INT) ENGINE=INNODB;
CREATE TABLE t2(i INT) ENGINE=MYISAM; CREATE TABLE t2(i INT) ENGINE=MYISAM;
Warnings: ERROR HY000: The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
Note 1266 Using storage engine InnoDB for table 't2'
Note 1266 Using storage engine InnoDB for table 't2'
connection node_2; connection node_2;
SHOW TABLES; SHOW TABLES;
Tables_in_test Tables_in_test
t1 t1
t2
SHOW CREATE TABLE t1; SHOW CREATE TABLE t1;
Table Create Table Table Create Table
t1 CREATE TABLE `t1` ( t1 CREATE TABLE `t1` (
`i` int(11) DEFAULT NULL `i` int(11) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci ) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
SHOW CREATE TABLE t2; DROP TABLE t1;
Table Create Table
t2 CREATE TABLE `t2` (
`i` int(11) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
DROP TABLE t1, t2;
# End of tests # End of tests

View File

@@ -0,0 +1,25 @@
connection node_2;
connection node_1;
set session sql_mode='';
SET @@enforce_storage_engine=INNODB;
CREATE TABLE t1 (c INT ) ENGINE=ARIA;
ERROR HY000: The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
SHOW WARNINGS;
Level Code Message
Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set
Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set
CREATE TABLE t1 (c INT );
DROP TABLE t1;
CREATE TABLE t1 (c INT ) ENGINE=INNODB;
DROP TABLE t1;
SET @@enforce_storage_engine=ARIA;
CREATE TABLE t1 (c INT ) ENGINE=INNODB;
ERROR HY000: The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
SHOW WARNINGS;
Level Code Message
Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set
Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement
Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set

View File

@@ -7,14 +7,18 @@ SELECT 1 FROM DUAL;
1 1
1 1
LOCK TABLE t2 WRITE; LOCK TABLE t2 WRITE;
connect node_2_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2_ctrl;
SET SESSION wsrep_sync_wait=0;
connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2a; connection node_2a;
OPTIMIZE TABLE t1,t2;; OPTIMIZE TABLE t1,t2;;
connection node_2_ctrl;
SET SESSION wsrep_sync_wait = 0;
connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2; connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2b; connection node_2b;
REPAIR TABLE t1,t2;; REPAIR TABLE t1,t2;;
connection node_2; connection node_2_ctrl;
SET SESSION wsrep_sync_wait = 0;
connection node_1; connection node_1;
INSERT INTO t2 VALUES (1); INSERT INTO t2 VALUES (1);
connection node_2; connection node_2;
@@ -34,3 +38,4 @@ DROP TABLE t2;
connection node_1; connection node_1;
disconnect node_2a; disconnect node_2a;
disconnect node_2b; disconnect node_2b;
disconnect node_2_ctrl;

View File

@@ -1,5 +1,6 @@
--source include/galera_cluster.inc --source include/galera_cluster.inc
--source include/have_innodb.inc --source include/have_innodb.inc
--source include/have_aria.inc
--echo # --echo #
--echo # MDEV-9312: storage engine not enforced during galera cluster --echo # MDEV-9312: storage engine not enforced during galera cluster
@@ -7,14 +8,21 @@
--echo # --echo #
--connection node_1 --connection node_1
CREATE TABLE t1(i INT) ENGINE=INNODB; CREATE TABLE t1(i INT) ENGINE=INNODB;
#
# This is not anymore supported because enforce_storage_engine
# is local setting and final used storage engine
# on other members of cluster depend on their configuration.
# Currently, there is no way to query remote node
# configuration.
#
--error ER_OPTION_PREVENTS_STATEMENT
CREATE TABLE t2(i INT) ENGINE=MYISAM; CREATE TABLE t2(i INT) ENGINE=MYISAM;
--connection node_2 --connection node_2
SHOW TABLES; SHOW TABLES;
SHOW CREATE TABLE t1; SHOW CREATE TABLE t1;
SHOW CREATE TABLE t2;
# Cleanup # Cleanup
DROP TABLE t1, t2; DROP TABLE t1;
--echo # End of tests --echo # End of tests

View File

@@ -0,0 +1,19 @@
--source include/galera_cluster.inc
--source include/have_aria.inc
--source include/log_bin.inc
set session sql_mode='';
SET @@enforce_storage_engine=INNODB;
--error ER_OPTION_PREVENTS_STATEMENT
CREATE TABLE t1 (c INT ) ENGINE=ARIA;
SHOW WARNINGS;
CREATE TABLE t1 (c INT );
DROP TABLE t1;
CREATE TABLE t1 (c INT ) ENGINE=INNODB;
DROP TABLE t1;
SET @@enforce_storage_engine=ARIA;
--error ER_OPTION_PREVENTS_STATEMENT
CREATE TABLE t1 (c INT ) ENGINE=INNODB;
SHOW WARNINGS;

View File

@@ -10,21 +10,33 @@ SELECT 1 FROM DUAL;
LOCK TABLE t2 WRITE; LOCK TABLE t2 WRITE;
--connect node_2_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_2
--connection node_2_ctrl
SET SESSION wsrep_sync_wait=0;
--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 --connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--connection node_2a --connection node_2a
--send OPTIMIZE TABLE t1,t2; --send OPTIMIZE TABLE t1,t2;
--connection node_2_ctrl
SET SESSION wsrep_sync_wait = 0;
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'acquiring total order isolation%';
--let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST
--source include/wait_condition_with_debug_and_kill.inc
--connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2 --connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2
--connection node_2b --connection node_2b
--send REPAIR TABLE t1,t2; --send REPAIR TABLE t1,t2;
--connection node_2 --connection node_2_ctrl
SET SESSION wsrep_sync_wait = 0; --let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'acquiring total order isolation%';
--let $wait_condition = SELECT COUNT(*) BETWEEN 1 AND 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'Waiting to execute in isolation%';
--let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST --let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST
--source include/wait_condition_with_debug_and_kill.inc --source include/wait_condition_with_debug_and_kill.inc
--connection node_1 --connection node_1
# We have LOCK TABLE in node_2 so this could fail on lock wait
# or next statement is fast enought and succeed
--error 0,ER_LOCK_WAIT_TIMEOUT
INSERT INTO t2 VALUES (1); INSERT INTO t2 VALUES (1);
--connection node_2 --connection node_2
@@ -43,3 +55,4 @@ DROP TABLE t2;
--disconnect node_2a --disconnect node_2a
--disconnect node_2b --disconnect node_2b
--disconnect node_2_ctrl

View File

@@ -32,8 +32,8 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 6;
UPDATE t1 SET f2 = 1 WHERE f1 = 7; UPDATE t1 SET f2 = 1 WHERE f1 = 7;
UPDATE t1 SET f2 = 1 WHERE f1 = 8; UPDATE t1 SET f2 = 1 WHERE f1 = 8;
connection node_2; connection node_2;
SET wsrep_on=OFF; # make sure all events landed to slave queue
SET wsrep_on=ON; set wsrep_sync_wait=0;
UNLOCK TABLES; UNLOCK TABLES;
SET SESSION wsrep_on = ON; SET SESSION wsrep_on = ON;
SET SESSION wsrep_sync_wait = 15; SET SESSION wsrep_sync_wait = 15;
@@ -56,7 +56,8 @@ f1 f2
7 1 7 1
8 1 8 1
connection node_2; connection node_2;
SET GLOBAL wsrep_on=OFF; # Gracefully restart the node
set wsrep_on=OFF;
# restart # restart
DROP TABLE t1; DROP TABLE t1;
connection node_1; connection node_1;
@@ -73,11 +74,15 @@ INSERT INTO t1 VALUES (8, 0);
COMMIT; COMMIT;
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 INT); CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 INT);
connection node_2; connection node_2;
# Allow 1K slave queue without flow control
SET GLOBAL wsrep_provider_options='gcs.fc_limit=1K'; SET GLOBAL wsrep_provider_options='gcs.fc_limit=1K';
# Introduce inconsistency
SET wsrep_on=OFF; SET wsrep_on=OFF;
DROP TABLE t2; DROP TABLE t2;
SET wsrep_on=ON; SET wsrep_on=ON;
# set up sync point to ensure DROP TABLE replication order below
SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync'; SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
# Build up slave queue:
LOCK TABLES t1 READ; LOCK TABLES t1 READ;
connection node_1; connection node_1;
UPDATE t1 SET f2 = 1 WHERE f1 = 1; UPDATE t1 SET f2 = 1 WHERE f1 = 1;
@@ -86,18 +91,19 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 3;
UPDATE t1 SET f2 = 1 WHERE f1 = 4; UPDATE t1 SET f2 = 1 WHERE f1 = 4;
UPDATE t1 SET f2 = 2 WHERE f1 = 4; UPDATE t1 SET f2 = 2 WHERE f1 = 4;
/* dependent applier */; /* dependent applier */;
# interleave a failing statement
connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2a; connection node_2a;
DROP TABLE t2;; DROP TABLE t2;;
# make sure DROP TABLE from above has replicated
connection node_2; connection node_2;
SET wsrep_on=OFF; set wsrep_sync_wait=0;
"Wait for DROP TABLE to replicate" "Wait for DROP TABLE to replicate"
SET SESSION wsrep_on = 0; SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 0; SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync'; SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
SET GLOBAL wsrep_provider_options = 'dbug='; SET GLOBAL wsrep_provider_options = 'dbug=';
"DROP TABLE replicated" "DROP TABLE replicated"
SET wsrep_on=ON;
connection node_1; connection node_1;
UPDATE t1 SET f2 = 3 WHERE f1 = 4; UPDATE t1 SET f2 = 3 WHERE f1 = 4;
/* dependent applier */ /* dependent applier */
@@ -106,8 +112,7 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 6;
UPDATE t1 SET f2 = 1 WHERE f1 = 7; UPDATE t1 SET f2 = 1 WHERE f1 = 7;
UPDATE t1 SET f2 = 1 WHERE f1 = 8; UPDATE t1 SET f2 = 1 WHERE f1 = 8;
connection node_2; connection node_2;
SET wsrep_on=OFF; # make sure all events landed to slave queue
SET wsrep_on=ON;
UNLOCK TABLES; UNLOCK TABLES;
connection node_2a; connection node_2a;
ERROR 42S02: Unknown table 'test.t2' ERROR 42S02: Unknown table 'test.t2'
@@ -128,11 +133,11 @@ f1 f2
7 1 7 1
8 1 8 1
connection node_2; connection node_2;
SET SESSION wsrep_on = ON; set wsrep_on=OFF;
SET SESSION wsrep_sync_wait = 15; SET SESSION wsrep_sync_wait = 15;
SET SESSION wsrep_on = ON; # Wait for the node to shutdown replication
SET SESSION wsrep_sync_wait = 15; SET SESSION wsrep_sync_wait = 15;
SET GLOBAL wsrep_on=OFF; # Gracefully restart the node
# restart # restart
DROP TABLE t1; DROP TABLE t1;
CALL mtr.add_suppression("Can't find record in 't1'"); CALL mtr.add_suppression("Can't find record in 't1'");

View File

@@ -33,6 +33,7 @@ SET wsrep_on=OFF;
DELETE FROM t1 WHERE f1 = 2; DELETE FROM t1 WHERE f1 = 2;
DELETE FROM t1 WHERE f1 = 4; DELETE FROM t1 WHERE f1 = 4;
SET wsrep_on=ON; SET wsrep_on=ON;
--source include/galera_wait_ready.inc
# Build up slave queue: # Build up slave queue:
# - first 8 events will be picked by slave threads # - first 8 events will be picked by slave threads
@@ -51,11 +52,11 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 7;
UPDATE t1 SET f2 = 1 WHERE f1 = 8; UPDATE t1 SET f2 = 1 WHERE f1 = 8;
--connection node_2 --connection node_2
# make sure all events landed to slave queue --echo # make sure all events landed to slave queue
SET wsrep_on=OFF; set wsrep_sync_wait=0;
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_recv_queue'; --let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_recv_queue';
--source include/wait_condition.inc --source include/wait_condition.inc
SET wsrep_on=ON;
UNLOCK TABLES; UNLOCK TABLES;
--source include/wsrep_wait_disconnect.inc --source include/wsrep_wait_disconnect.inc
# Wait for the node to shutdown replication # Wait for the node to shutdown replication
@@ -70,8 +71,8 @@ SHOW STATUS LIKE 'wsrep_cluster_size';
SELECT * FROM t1; SELECT * FROM t1;
--connection node_2 --connection node_2
#Gracefully restart the node --echo # Gracefully restart the node
SET GLOBAL wsrep_on=OFF; set wsrep_on=OFF;
--source include/shutdown_mysqld.inc --source include/shutdown_mysqld.inc
--source include/start_mysqld.inc --source include/start_mysqld.inc
--source include/galera_wait_ready.inc --source include/galera_wait_ready.inc
@@ -98,20 +99,21 @@ COMMIT;
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 INT); CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 INT);
--connection node_2 --connection node_2
# Allow 1K slave queue without flow control --echo # Allow 1K slave queue without flow control
SET GLOBAL wsrep_provider_options='gcs.fc_limit=1K'; SET GLOBAL wsrep_provider_options='gcs.fc_limit=1K';
# Introduce inconsistency
SET wsrep_on=OFF;
--let $wait_condition = SELECT COUNT(*)=1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't2'; --let $wait_condition = SELECT COUNT(*)=1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't2';
--source include/wait_condition.inc --source include/wait_condition.inc
--echo # Introduce inconsistency
SET wsrep_on=OFF;
DROP TABLE t2; DROP TABLE t2;
SET wsrep_on=ON; SET wsrep_on=ON;
--source include/galera_wait_ready.inc
# set up sync point to ensure DROP TABLE replication order below --echo # set up sync point to ensure DROP TABLE replication order below
--let galera_sync_point = after_replicate_sync --let galera_sync_point = after_replicate_sync
--source include/galera_set_sync_point.inc --source include/galera_set_sync_point.inc
# Build up slave queue: --echo # Build up slave queue:
# - first 8 events will be picked by slave threads # - first 8 events will be picked by slave threads
# - one more event will be waiting in slave queue # - one more event will be waiting in slave queue
LOCK TABLES t1 READ; LOCK TABLES t1 READ;
@@ -123,20 +125,19 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 3;
UPDATE t1 SET f2 = 1 WHERE f1 = 4; UPDATE t1 SET f2 = 1 WHERE f1 = 4;
UPDATE t1 SET f2 = 2 WHERE f1 = 4; /* dependent applier */; UPDATE t1 SET f2 = 2 WHERE f1 = 4; /* dependent applier */;
# interleave a failing statement --echo # interleave a failing statement
--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 --connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--connection node_2a --connection node_2a
--send DROP TABLE t2; --send DROP TABLE t2;
# make sure DROP TABLE from above has replicated --echo # make sure DROP TABLE from above has replicated
--connection node_2 --connection node_2
SET wsrep_on=OFF; set wsrep_sync_wait=0;
--echo "Wait for DROP TABLE to replicate" --echo "Wait for DROP TABLE to replicate"
--source include/galera_wait_sync_point.inc --source include/galera_wait_sync_point.inc
--source include/galera_signal_sync_point.inc --source include/galera_signal_sync_point.inc
--source include/galera_clear_sync_point.inc --source include/galera_clear_sync_point.inc
--echo "DROP TABLE replicated" --echo "DROP TABLE replicated"
SET wsrep_on=ON;
--connection node_1 --connection node_1
UPDATE t1 SET f2 = 3 WHERE f1 = 4; /* dependent applier */ UPDATE t1 SET f2 = 3 WHERE f1 = 4; /* dependent applier */
@@ -146,11 +147,10 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 7;
UPDATE t1 SET f2 = 1 WHERE f1 = 8; UPDATE t1 SET f2 = 1 WHERE f1 = 8;
--connection node_2 --connection node_2
# make sure all events landed to slave queue --echo # make sure all events landed to slave queue
SET wsrep_on=OFF;
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_recv_queue'; --let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_recv_queue';
--source include/wait_condition.inc --source include/wait_condition.inc
SET wsrep_on=ON;
UNLOCK TABLES; UNLOCK TABLES;
--connection node_2a --connection node_2a
@@ -165,12 +165,13 @@ SHOW STATUS LIKE 'wsrep_cluster_size';
SELECT * FROM t1; SELECT * FROM t1;
--connection node_2 --connection node_2
set wsrep_on=OFF;
--source include/wsrep_wait_disconnect.inc --source include/wsrep_wait_disconnect.inc
# Wait for the node to shutdown replication --echo # Wait for the node to shutdown replication
--let $members=0 --let $members=0
--source include/wsrep_wait_membership.inc --source include/wsrep_wait_membership.inc
# Gracefully restart the node --echo # Gracefully restart the node
SET GLOBAL wsrep_on=OFF;
--source include/shutdown_mysqld.inc --source include/shutdown_mysqld.inc
--source include/start_mysqld.inc --source include/start_mysqld.inc
--source include/galera_wait_ready.inc --source include/galera_wait_ready.inc

View File

@@ -26,13 +26,13 @@ SET GLOBAL innodb_fast_shutdown = 0;
# restart: --debug_dbug=+d,ib_log_checkpoint_avoid_hard --innodb_flush_sync=0 # restart: --debug_dbug=+d,ib_log_checkpoint_avoid_hard --innodb_flush_sync=0
begin; begin;
insert into t1 values (6, repeat('%', 400)); insert into t1 values (6, repeat('%', 400));
SET GLOBAL innodb_max_dirty_pages_pct_lwm=0, innodb_max_dirty_pages_pct=0;
# Make the first page dirty for system tablespace # Make the first page dirty for system tablespace
set global innodb_saved_page_number_debug = 0; set global innodb_saved_page_number_debug = 0;
set global innodb_fil_make_page_dirty_debug = 0; set global innodb_fil_make_page_dirty_debug = 0;
# Make the second page dirty for system tablespace # Make the second page dirty for system tablespace
set global innodb_saved_page_number_debug = 1; set global innodb_saved_page_number_debug = 1;
set global innodb_fil_make_page_dirty_debug = 0; set global innodb_fil_make_page_dirty_debug = 0;
set global innodb_buf_flush_list_now = 1;
# Kill the server # Kill the server
# Make the 1st page (page_no=0) and 2nd page (page_no=1) # Make the 1st page (page_no=0) and 2nd page (page_no=1)
# of the system tablespace all zeroes. # of the system tablespace all zeroes.

View File

@@ -131,3 +131,11 @@ ALTER TABLE t2 STATS_PERSISTENT=1;
DROP TABLE t2; DROP TABLE t2;
SELECT * FROM mysql.innodb_index_stats; SELECT * FROM mysql.innodb_index_stats;
database_name table_name index_name last_update stat_name stat_value sample_size stat_description database_name table_name index_name last_update stat_name stat_value sample_size stat_description
#
# MDEV-37123 dict_table_open_on_id() fails to release dict_sys.latch
#
SET GLOBAL innodb_defragment_stats_accuracy=1;
CREATE TABLE t (f INT,f2 CHAR(1),KEY k1 (f2),FULLTEXT KEY(f2),
FOREIGN KEY(f2) REFERENCES t (f3)) ENGINE=InnoDB;
ERROR HY000: Can't create table `test`.`t` (errno: 150 "Foreign key constraint is incorrectly formed")
SET GLOBAL innodb_defragment_stats_accuracy=default;

View File

@@ -166,7 +166,6 @@ SELECT * FROM t FORCE INDEX (b) FOR UPDATE;
a b a b
1 NULL 1 NULL
COMMIT; COMMIT;
disconnect con_weird;
connection consistent; connection consistent;
SELECT * FROM t FORCE INDEX (b) FOR UPDATE; SELECT * FROM t FORCE INDEX (b) FOR UPDATE;
a b a b
@@ -230,9 +229,67 @@ UPDATE t SET b=4 WHERE a=1;
connection consistent; connection consistent;
SELECT * FROM t WHERE a=1 FOR UPDATE; SELECT * FROM t WHERE a=1 FOR UPDATE;
ERROR HY000: Record has changed since last read in table 't' ERROR HY000: Record has changed since last read in table 't'
disconnect consistent;
disconnect disable_purging; disconnect disable_purging;
connection default; connection default;
SET DEBUG_SYNC="RESET"; SET DEBUG_SYNC="RESET";
DROP TABLE t; DROP TABLE t;
CREATE TABLE t1(a INT) ENGINE=InnoDB STATS_PERSISTENT=0;
CREATE TABLE t2(a INT) ENGINE=InnoDB STATS_PERSISTENT=0;
BEGIN;
INSERT INTO t1 SET a=1;
connection con_weird;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
BEGIN;
INSERT INTO t2 SET a=1;
connection consistent;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
BEGIN;
INSERT INTO t2 SET a=2;
connection default;
COMMIT;
connection con_weird;
SELECT * FROM t1;
a
1
COMMIT;
connection consistent;
SELECT * FROM t1;
ERROR HY000: Record has changed since last read in table 't1'
COMMIT;
connection default;
BEGIN;
INSERT INTO t1 SET a=2;
connection con_weird;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
INSERT INTO t2 SET a=3;
connection consistent;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
INSERT INTO t2 SET a=2;
connection default;
COMMIT;
connection con_weird;
SELECT * FROM t1;
a
1
2
COMMIT;
disconnect con_weird;
connection consistent;
SELECT * FROM t1;
ERROR HY000: Record has changed since last read in table 't1'
COMMIT;
disconnect consistent;
connection default;
DROP TABLE t1,t2;
#
# MDEV-37215 SELECT...FOR UPDATE crash under SERIALIZABLE
#
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB;
SELECT * FROM t1 FOR UPDATE;
a
DROP TABLE t1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
# End of 10.6 tests # End of 10.6 tests

View File

@@ -15,5 +15,21 @@ FLUSH TABLE t1 FOR EXPORT;
NOT FOUND /repairman/ in t1.ibd NOT FOUND /repairman/ in t1.ibd
UNLOCK TABLES; UNLOCK TABLES;
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-37183 innodb_immediate_scrub_data_uncompressed=ON may break
# crash recovery
#
SET GLOBAL innodb_limit_optimistic_insert_debug=0;
CREATE TABLE t(a VARCHAR(1) PRIMARY KEY,INDEX(a DESC)) ENGINE=InnoDB;
INSERT INTO t VALUES('2'),('1'),(''),('6'),('4'),('3');
SET GLOBAL innodb_limit_optimistic_insert_debug=3;
INSERT INTO t VALUES('8');
CHECK TABLE t;
Table Op Msg_type Msg_text
test.t check status OK
SELECT COUNT(*) FROM t;
COUNT(*)
7
DROP TABLE t;
SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=@save_debug; SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=@save_debug;
SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=@save_scrub; SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=@save_scrub;

View File

@@ -51,6 +51,8 @@ let $restart_parameters=--debug_dbug=+d,ib_log_checkpoint_avoid_hard --innodb_fl
begin; begin;
insert into t1 values (6, repeat('%', 400)); insert into t1 values (6, repeat('%', 400));
SET GLOBAL innodb_max_dirty_pages_pct_lwm=0, innodb_max_dirty_pages_pct=0;
--echo # Make the first page dirty for system tablespace --echo # Make the first page dirty for system tablespace
set global innodb_saved_page_number_debug = 0; set global innodb_saved_page_number_debug = 0;
set global innodb_fil_make_page_dirty_debug = 0; set global innodb_fil_make_page_dirty_debug = 0;
@@ -59,7 +61,11 @@ set global innodb_fil_make_page_dirty_debug = 0;
set global innodb_saved_page_number_debug = 1; set global innodb_saved_page_number_debug = 1;
set global innodb_fil_make_page_dirty_debug = 0; set global innodb_fil_make_page_dirty_debug = 0;
set global innodb_buf_flush_list_now = 1; let $wait_condition =
SELECT variable_value = 0
FROM information_schema.global_status
WHERE variable_name = 'INNODB_BUFFER_POOL_PAGES_DIRTY';
--source include/wait_condition.inc
--let CLEANUP_IF_CHECKPOINT=drop table t1, unexpected_checkpoint; --let CLEANUP_IF_CHECKPOINT=drop table t1, unexpected_checkpoint;
--source ../include/no_checkpoint_end.inc --source ../include/no_checkpoint_end.inc

View File

@@ -86,3 +86,12 @@ ALTER TABLE t2 STATS_PERSISTENT=1;
DROP TABLE t2; DROP TABLE t2;
SELECT * FROM mysql.innodb_index_stats; SELECT * FROM mysql.innodb_index_stats;
--echo #
--echo # MDEV-37123 dict_table_open_on_id() fails to release dict_sys.latch
--echo #
SET GLOBAL innodb_defragment_stats_accuracy=1;
--error ER_CANT_CREATE_TABLE
CREATE TABLE t (f INT,f2 CHAR(1),KEY k1 (f2),FULLTEXT KEY(f2),
FOREIGN KEY(f2) REFERENCES t (f3)) ENGINE=InnoDB;
SET GLOBAL innodb_defragment_stats_accuracy=default;

View File

@@ -174,7 +174,6 @@ ROLLBACK;
--reap --reap
SELECT * FROM t FORCE INDEX (b) FOR UPDATE; SELECT * FROM t FORCE INDEX (b) FOR UPDATE;
COMMIT; COMMIT;
--disconnect con_weird
--connection consistent --connection consistent
SELECT * FROM t FORCE INDEX (b) FOR UPDATE; SELECT * FROM t FORCE INDEX (b) FOR UPDATE;
@@ -246,12 +245,65 @@ UPDATE t SET b=4 WHERE a=1;
--connection consistent --connection consistent
--error ER_CHECKREAD --error ER_CHECKREAD
SELECT * FROM t WHERE a=1 FOR UPDATE; SELECT * FROM t WHERE a=1 FOR UPDATE;
--disconnect consistent
--disconnect disable_purging --disconnect disable_purging
--connection default --connection default
SET DEBUG_SYNC="RESET"; SET DEBUG_SYNC="RESET";
DROP TABLE t; DROP TABLE t;
CREATE TABLE t1(a INT) ENGINE=InnoDB STATS_PERSISTENT=0;
CREATE TABLE t2(a INT) ENGINE=InnoDB STATS_PERSISTENT=0;
BEGIN; INSERT INTO t1 SET a=1;
--connection con_weird
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
BEGIN; INSERT INTO t2 SET a=1;
--connection consistent
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
BEGIN; INSERT INTO t2 SET a=2;
--connection default
COMMIT;
--connection con_weird
SELECT * FROM t1;
COMMIT;
--connection consistent
--disable_ps2_protocol
--error ER_CHECKREAD
SELECT * FROM t1;
--enable_ps2_protocol
COMMIT;
--connection default
BEGIN; INSERT INTO t1 SET a=2;
--connection con_weird
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
START TRANSACTION WITH CONSISTENT SNAPSHOT; INSERT INTO t2 SET a=3;
--connection consistent
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
START TRANSACTION WITH CONSISTENT SNAPSHOT; INSERT INTO t2 SET a=2;
--connection default
COMMIT;
--connection con_weird
SELECT * FROM t1;
COMMIT;
--disconnect con_weird
--connection consistent
--disable_ps2_protocol
--error ER_CHECKREAD
SELECT * FROM t1;
--enable_ps2_protocol
COMMIT;
--disconnect consistent
--connection default
DROP TABLE t1,t2;
--echo #
--echo # MDEV-37215 SELECT...FOR UPDATE crash under SERIALIZABLE
--echo #
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB;
SELECT * FROM t1 FOR UPDATE;
DROP TABLE t1;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
--source include/wait_until_count_sessions.inc --source include/wait_until_count_sessions.inc
--echo # End of 10.6 tests --echo # End of 10.6 tests

View File

@@ -24,5 +24,20 @@ FLUSH TABLE t1 FOR EXPORT;
-- source include/search_pattern_in_file.inc -- source include/search_pattern_in_file.inc
UNLOCK TABLES; UNLOCK TABLES;
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # MDEV-37183 innodb_immediate_scrub_data_uncompressed=ON may break
--echo # crash recovery
--echo #
SET GLOBAL innodb_limit_optimistic_insert_debug=0;
# Note: MariaDB 10.6 fails to reproduce the crash; it maps DESC to ASC.
CREATE TABLE t(a VARCHAR(1) PRIMARY KEY,INDEX(a DESC)) ENGINE=InnoDB;
INSERT INTO t VALUES('2'),('1'),(''),('6'),('4'),('3');
SET GLOBAL innodb_limit_optimistic_insert_debug=3;
INSERT INTO t VALUES('8');
CHECK TABLE t;
SELECT COUNT(*) FROM t;
DROP TABLE t;
SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=@save_debug; SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=@save_debug;
SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=@save_scrub; SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=@save_scrub;

View File

@@ -993,3 +993,15 @@ FTS_DOC_ID f1 f2
4294967298 txt bbb 4294967298 txt bbb
100000000000 aaa bbb 100000000000 aaa bbb
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-30363 Failing assertion: trx->error_state == DB_SUCCESS
# in que_run_threads
#
CREATE TABLE server_stopword (value VARCHAR(1))engine=innodb;
SET GLOBAL innodb_ft_server_stopword_table='test/server_stopword';
CREATE TABLE t (t VARCHAR(1) COLLATE utf8_unicode_ci,
FULLTEXT (t))engine=innodb;
TRUNCATE TABLE t;
DROP TABLE t;
DROP TABLE server_stopword;
SET GLOBAL innodb_ft_server_stopword_table= default;

View File

@@ -967,3 +967,16 @@ CREATE FULLTEXT INDEX i ON t1 (f2);
SELECT * FROM t1 WHERE match(f2) against("bbb"); SELECT * FROM t1 WHERE match(f2) against("bbb");
# Cleanup # Cleanup
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # MDEV-30363 Failing assertion: trx->error_state == DB_SUCCESS
--echo # in que_run_threads
--echo #
CREATE TABLE server_stopword (value VARCHAR(1))engine=innodb;
SET GLOBAL innodb_ft_server_stopword_table='test/server_stopword';
CREATE TABLE t (t VARCHAR(1) COLLATE utf8_unicode_ci,
FULLTEXT (t))engine=innodb;
TRUNCATE TABLE t;
DROP TABLE t;
DROP TABLE server_stopword;
SET GLOBAL innodb_ft_server_stopword_table= default;

View File

@@ -25,3 +25,15 @@ i
DROP TABLE t1; DROP TABLE t1;
DROP TABLE t2; DROP TABLE t2;
DROP TABLE t21; DROP TABLE t21;
#
# MDEV-36287 maribackup ignores tables-file option
#
CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB;
CREATE TABLE t2(f1 INT NOT NULL)ENGINE=InnoDB;
INSERT INTO t1 values(1);
# Only backup table t1 by creating tables_file
# Backup with --tables-file option
# table t2 is skipped. Shows only t1
t1.frm
t1.ibd
DROP TABLE t2, t1;

View File

@@ -78,3 +78,25 @@ DROP TABLE t1;
DROP TABLE t2; DROP TABLE t2;
DROP TABLE t21; DROP TABLE t21;
rmdir $targetdir; rmdir $targetdir;
--echo #
--echo # MDEV-36287 maribackup ignores tables-file option
--echo #
CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB;
CREATE TABLE t2(f1 INT NOT NULL)ENGINE=InnoDB;
INSERT INTO t1 values(1);
let targetdir=$MYSQLTEST_VARDIR/tmp/backup;
let tables_list=$MYSQLTEST_VARDIR/tmp/tables_list.out;
--echo # Only backup table t1 by creating tables_file
--exec echo "test.t1" > $tables_list
--echo # Backup with --tables-file option
--disable_result_log
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --tables-file=$tables_list --target-dir=$targetdir;
--enable_result_log
--echo # table t2 is skipped. Shows only t1
list_files $targetdir/test;
DROP TABLE t2, t1;
rmdir $targetdir;
remove_file $tables_list;

View File

@@ -0,0 +1,216 @@
#
# MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records
#
## INSERT
create table t1 (
id int, s date, e date,
period for p(s,e),
unique(id, p without overlaps)
) engine=innodb;
start transaction;
select * from t1;
id s e
connect con1,localhost,root;
insert t1 values(10, date'2010-10-10', date'2010-11-11');
delete from t1;
set transaction isolation level read committed;
set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
insert t1 values(10, date'2010-09-09', date'2010-11-10');
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
insert t1 values(10, date'2010-10-10', date'2010-11-12');
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 23000: Duplicate entry '10-2010-11-10-2010-09-09' for key 'id'
connection default;
select * from t1;
id s e
commit;
select * from t1;
id s e
10 2010-10-10 2010-11-12
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## UPDATE
create table t1 (
id int, s date, e date,
period for p(s,e),
unique(id, p without overlaps)
) engine=innodb;
start transaction;
select * from t1;
id s e
connect con1,localhost,root;
insert t1 values(10, date'2010-10-10', date'2010-11-11');
delete from t1;
insert t1 values(10, date'2010-09-09', date'2010-09-10');
insert t1 values(10, date'2010-12-10', date'2010-12-12');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
update t1 set e=e + interval 2 month where s=date'2010-09-09';
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set s=date'2010-10-10' where e=date'2010-12-12';
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 23000: Duplicate entry '10-2010-11-10-2010-09-09' for key 'id'
connection default;
select * from t1;
id s e
commit;
select * from t1;
id s e
10 2010-09-09 2010-09-10
10 2010-10-10 2010-12-12
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## INSERT IGNORE
create table t1 (
id int, s date, e date,
period for p(s,e),
unique(id, p without overlaps)
) engine=innodb;
start transaction;
select * from t1;
id s e
connect con1,localhost,root;
insert t1 values(10, date'2010-10-10', date'2010-11-11');
delete from t1;
set transaction isolation level read committed;
set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
insert ignore t1 values(10, date'2010-09-09', date'2010-11-10'),(11, date'2010-09-09', date'2010-11-10');
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
insert t1 values(10, date'2010-10-10', date'2010-11-12');
set debug_sync="now SIGNAL do_insert";
connection con1;
Warnings:
Warning 1062 Duplicate entry '10-2010-11-10-2010-09-09' for key 'id'
connection default;
select * from t1;
id s e
commit;
select * from t1;
id s e
10 2010-10-10 2010-11-12
11 2010-09-09 2010-11-10
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## UPDATE IGNORE
create table t1 (
id int, s date, e date,
period for p(s,e),
unique(id, p without overlaps)
) engine=innodb;
start transaction;
select * from t1;
id s e
connect con1,localhost,root;
insert t1 values(10, date'2010-10-10', date'2010-11-11');
delete from t1;
insert t1 values(10, date'2010-09-09', date'2010-09-10');
insert t1 values(10, date'2010-12-10', date'2010-12-12');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
update ignore t1 set e=e + interval 2 month where s=date'2010-09-09';
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set s=date'2010-10-10' where e=date'2010-12-12';
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint WITHOUT OVERLAPS is not currently supported
connection default;
select * from t1;
id s e
commit;
select * from t1;
id s e
10 2010-09-09 2010-09-10
10 2010-10-10 2010-12-12
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## UPDATE modifying PK
create table t1 (
id int, s date, e date,
period for p(s,e),
primary key (id, p without overlaps)
) engine=innodb;
start transaction;
select * from t1;
id s e
connect con1,localhost,root;
insert t1 values(10, date'2010-10-10', date'2010-11-11');
delete from t1;
insert t1 values(10, date'2010-09-09', date'2010-09-10');
insert t1 values(10, date'2010-12-10', date'2010-12-12');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
update t1 set e=e + interval 2 month where s=date'2010-09-09';
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set s=date'2010-10-10' where e=date'2010-12-12';
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 23000: Duplicate entry '10-2010-11-10-2010-09-09' for key 'PRIMARY'
connection default;
select * from t1;
id s e
commit;
select * from t1;
id s e
10 2010-09-09 2010-09-10
10 2010-10-10 2010-12-12
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
## UPDATE IGNORE modifying PK
create table t1 (
id int, s date, e date,
period for p(s,e),
primary key (id, p without overlaps)
) engine=innodb;
start transaction;
select * from t1;
id s e
connect con1,localhost,root;
insert t1 values(10, date'2010-10-10', date'2010-11-11');
delete from t1;
insert t1 values(10, date'2010-09-09', date'2010-09-10');
insert t1 values(10, date'2010-12-10', date'2010-12-12');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
update ignore t1 set e=e + interval 2 month where s=date'2010-09-09';
connect con2,localhost,root;
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set s=date'2010-10-10' where e=date'2010-12-12';
set debug_sync="now SIGNAL do_insert";
connection con1;
ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint WITHOUT OVERLAPS is not currently supported
connection default;
select * from t1;
id s e
commit;
select * from t1;
id s e
10 2010-09-09 2010-09-10
10 2010-10-10 2010-12-12
disconnect con1;
disconnect con2;
set debug_sync='RESET';
drop table t1;
# End of 10.6 tests

View File

@@ -15,3 +15,20 @@ INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01');
DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01'; DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01';
ERROR 23000: Duplicate entry 'foo' for key 'b' ERROR 23000: Duplicate entry 'foo' for key 'b'
DROP TABLE t1; DROP TABLE t1;
# End of 10.5 tests
#
# MDEV-37312 ASAN errors or assertion failure upon attempt to UPDATE FOR PORTION violating long unique under READ COMMITTED
#
create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb;
insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01');
set transaction isolation level read committed;
update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1;
ERROR 23000: Duplicate entry 'foo' for key 'f'
drop table t1;
create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb partition by hash (a);
insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01');
set transaction isolation level read committed;
update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1;
ERROR 23000: Duplicate entry 'foo' for key 'f'
drop table t1;
# End of 10.6 tests

View File

@@ -0,0 +1,209 @@
--source include/have_innodb.inc
--source include/have_debug_sync.inc
--echo #
--echo # MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records
--echo #
--echo ## INSERT
create table t1 (
id int, s date, e date,
period for p(s,e),
unique(id, p without overlaps)
) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert t1 values(10, date'2010-10-10', date'2010-11-11');
delete from t1;
set transaction isolation level read committed;
set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
--send insert t1 values(10, date'2010-09-09', date'2010-11-10')
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
insert t1 values(10, date'2010-10-10', date'2010-11-12');
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_DUP_ENTRY
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## UPDATE
create table t1 (
id int, s date, e date,
period for p(s,e),
unique(id, p without overlaps)
) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert t1 values(10, date'2010-10-10', date'2010-11-11');
delete from t1;
insert t1 values(10, date'2010-09-09', date'2010-09-10');
insert t1 values(10, date'2010-12-10', date'2010-12-12');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
--send update t1 set e=e + interval 2 month where s=date'2010-09-09'
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set s=date'2010-10-10' where e=date'2010-12-12';
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_DUP_ENTRY
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## INSERT IGNORE
create table t1 (
id int, s date, e date,
period for p(s,e),
unique(id, p without overlaps)
) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert t1 values(10, date'2010-10-10', date'2010-11-11');
delete from t1;
set transaction isolation level read committed;
set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert";
--send insert ignore t1 values(10, date'2010-09-09', date'2010-11-10'),(11, date'2010-09-09', date'2010-11-10')
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
insert t1 values(10, date'2010-10-10', date'2010-11-12');
set debug_sync="now SIGNAL do_insert";
--connection con1
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## UPDATE IGNORE
create table t1 (
id int, s date, e date,
period for p(s,e),
unique(id, p without overlaps)
) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert t1 values(10, date'2010-10-10', date'2010-11-11');
delete from t1;
insert t1 values(10, date'2010-09-09', date'2010-09-10');
insert t1 values(10, date'2010-12-10', date'2010-12-12');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
--send update ignore t1 set e=e + interval 2 month where s=date'2010-09-09'
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set s=date'2010-10-10' where e=date'2010-12-12';
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_NOT_SUPPORTED_YET
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## UPDATE modifying PK
create table t1 (
id int, s date, e date,
period for p(s,e),
primary key (id, p without overlaps)
) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert t1 values(10, date'2010-10-10', date'2010-11-11');
delete from t1;
insert t1 values(10, date'2010-09-09', date'2010-09-10');
insert t1 values(10, date'2010-12-10', date'2010-12-12');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
--send update t1 set e=e + interval 2 month where s=date'2010-09-09'
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set s=date'2010-10-10' where e=date'2010-12-12';
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_DUP_ENTRY
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo ## UPDATE IGNORE modifying PK
create table t1 (
id int, s date, e date,
period for p(s,e),
primary key (id, p without overlaps)
) engine=innodb;
start transaction;
select * from t1;
--connect con1,localhost,root
insert t1 values(10, date'2010-10-10', date'2010-11-11');
delete from t1;
insert t1 values(10, date'2010-09-09', date'2010-09-10');
insert t1 values(10, date'2010-12-10', date'2010-12-12');
set transaction isolation level read committed;
set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert";
--send update ignore t1 set e=e + interval 2 month where s=date'2010-09-09'
--connect con2,localhost,root
set debug_sync="now WAIT_FOR checked_duplicate";
set transaction isolation level read committed;
update t1 set s=date'2010-10-10' where e=date'2010-12-12';
set debug_sync="now SIGNAL do_insert";
--connection con1
--error ER_NOT_SUPPORTED_YET
--reap
--connection default
select * from t1;
commit;
select * from t1;
--disconnect con1
--disconnect con2
set debug_sync='RESET';
drop table t1;
--echo # End of 10.6 tests

View File

@@ -1,3 +1,4 @@
--source include/have_innodb.inc
--source include/have_partition.inc --source include/have_partition.inc
--echo # --echo #
@@ -21,3 +22,23 @@ INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01');
DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01'; DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01';
DROP TABLE t1; DROP TABLE t1;
--echo # End of 10.5 tests
--echo #
--echo # MDEV-37312 ASAN errors or assertion failure upon attempt to UPDATE FOR PORTION violating long unique under READ COMMITTED
--echo #
create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb;
insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01');
set transaction isolation level read committed;
--error ER_DUP_ENTRY
update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1;
drop table t1;
create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb partition by hash (a);
insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01');
set transaction isolation level read committed;
--error ER_DUP_ENTRY
update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1;
drop table t1;
--echo # End of 10.6 tests

View File

@@ -88,5 +88,19 @@ c1
3 3
20 20
connection master; connection master;
insert t1 values /*! (100);insert t1 values */ (200) //
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'insert t1 values */ (200)' at line 1
select * from t1;
c1
62
3
20
connection slave;
select * from t1;
c1
62
3
20
connection master;
DROP TABLE t1; DROP TABLE t1;
include/rpl_end.inc include/rpl_end.inc

View File

@@ -7,6 +7,8 @@ call mtr.add_suppression("Could not read packet");
call mtr.add_suppression("Could not write packet"); call mtr.add_suppression("Could not write packet");
set @save_bgc_count= @@global.binlog_commit_wait_count; set @save_bgc_count= @@global.binlog_commit_wait_count;
set @save_bgc_usec= @@global.binlog_commit_wait_usec; set @save_bgc_usec= @@global.binlog_commit_wait_usec;
set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point;
set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave;
set @save_debug_dbug= @@global.debug_dbug; set @save_debug_dbug= @@global.debug_dbug;
set @@global.binlog_commit_wait_count=3; set @@global.binlog_commit_wait_count=3;
set @@global.binlog_commit_wait_usec=10000000; set @@global.binlog_commit_wait_usec=10000000;
@@ -46,8 +48,6 @@ drop table t1, t2, t3;
# the binlogging to semi-sync, and starting the wait for ACK; and during # the binlogging to semi-sync, and starting the wait for ACK; and during
# this pause, semi-sync is manually switched off and on. # this pause, semi-sync is manually switched off and on.
connection master; connection master;
set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point;
set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave;
set @@global.rpl_semi_sync_master_wait_point= AFTER_SYNC; set @@global.rpl_semi_sync_master_wait_point= AFTER_SYNC;
set @@global.rpl_semi_sync_master_wait_no_slave= 1; set @@global.rpl_semi_sync_master_wait_no_slave= 1;
set @@global.debug_dbug="+d,semisync_log_skip_trx_wait"; set @@global.debug_dbug="+d,semisync_log_skip_trx_wait";
@@ -100,7 +100,66 @@ commit;
# Cleanup # Cleanup
connection master; connection master;
drop table tn; drop table tn;
set @@global.debug_dbug=@save_debug_dbug;
#
# MDEV-36934
# The server could indefinitely hang due to a memory leak which tried to
# pthread signal on a destroyed condition variable. In effect, no
# connections could commit transactions because there would be a thread
# stuck on a never-returning call to pthread_cond_signal() while
# holding Repl_semi_sync_master::LOCK_log.
connection master;
set @@global.rpl_semi_sync_master_wait_point= AFTER_COMMIT;
set @@global.rpl_semi_sync_master_wait_no_slave= 0;
# Ensure servers are in proper state
connection master;
connection slave;
# Test case initial set-up
connection master;
create table t_36934 (a int) engine=innodb;
include/save_master_gtid.inc
connection slave;
include/sync_with_master_gtid.inc
# Pause the user transaction before inserting into Active_tranx
connect user_con,localhost,root,,;
SET debug_sync= 'semisync_at_write_tranx_in_binlog SIGNAL at_write_tranx_in_binlog WAIT_FOR resume_write_tranx_in_binlog';
insert into t_36934 values (1);
connection server_1;
set debug_sync="now wait_for at_write_tranx_in_binlog";
# Disconnect the slave (note that the binlog dump thread won't yet be
# notified of a binlog update from the last transaction, so the slave
# should neither receiver nor ACK the transaction).
connection slave;
include/stop_slave.inc
# Waiting for master to realize the slave has disconnected..
connection server_1;
# ..done
# Resuming transaction (it will exit commit_trx early without waiting)
set debug_sync="now signal resume_write_tranx_in_binlog";
connection user_con;
disconnect user_con;
# Force delete the user thread (FLUSH THREADS ensures the thread won't
# stay in the thread cache)
connection master;
FLUSH THREADS;
# BUG: Re-connect slave. MDEV-36934 reports that the master would hang
# when the slave would re-connect and try to ACK the last transaction
# who's thread has been deleted
connection slave;
include/start_slave.inc
# Try to commit another transaction (prior to MDEV-36934 fixes, this
# would hang indefinitely)
connection master;
set debug_sync="RESET";
insert into t_36934 values (2);
connection server_1;
# Waiting 30s for last query to complete..
connection master;
# ..done
# Cleanup
connection master;
set @@global.rpl_semi_sync_master_wait_point= @old_master_wait_point; set @@global.rpl_semi_sync_master_wait_point= @old_master_wait_point;
set @@global.rpl_semi_sync_master_wait_no_slave= @old_master_wait_no_slave; set @@global.rpl_semi_sync_master_wait_no_slave= @old_master_wait_no_slave;
set @@global.debug_dbug=@save_debug_dbug; set @@global.debug_dbug=@save_debug_dbug;
drop table t_36934;
include/rpl_end.inc include/rpl_end.inc

View File

@@ -80,5 +80,17 @@ sync_slave_with_master;
select * from t1; select * from t1;
connection master; connection master;
#
# Bug#37117875 Binlog record error when delimiter is set to other symbols
#
delimiter //;
--error ER_PARSE_ERROR
insert t1 values /*! (100);insert t1 values */ (200) //
delimiter ;//
select * from t1;
sync_slave_with_master;
select * from t1;
connection master;
DROP TABLE t1; DROP TABLE t1;
--source include/rpl_end.inc --source include/rpl_end.inc

View File

@@ -34,6 +34,8 @@ call mtr.add_suppression("Could not read packet");
call mtr.add_suppression("Could not write packet"); call mtr.add_suppression("Could not write packet");
set @save_bgc_count= @@global.binlog_commit_wait_count; set @save_bgc_count= @@global.binlog_commit_wait_count;
set @save_bgc_usec= @@global.binlog_commit_wait_usec; set @save_bgc_usec= @@global.binlog_commit_wait_usec;
set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point;
set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave;
set @save_debug_dbug= @@global.debug_dbug; set @save_debug_dbug= @@global.debug_dbug;
set @@global.binlog_commit_wait_count=3; set @@global.binlog_commit_wait_count=3;
set @@global.binlog_commit_wait_usec=10000000; set @@global.binlog_commit_wait_usec=10000000;
@@ -98,8 +100,6 @@ drop table t1, t2, t3;
--connection master --connection master
set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point;
set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave;
set @@global.rpl_semi_sync_master_wait_point= AFTER_SYNC; set @@global.rpl_semi_sync_master_wait_point= AFTER_SYNC;
set @@global.rpl_semi_sync_master_wait_no_slave= 1; set @@global.rpl_semi_sync_master_wait_no_slave= 1;
--eval set @@global.debug_dbug="+d,semisync_log_skip_trx_wait" --eval set @@global.debug_dbug="+d,semisync_log_skip_trx_wait"
@@ -190,8 +190,108 @@ commit;
--echo # Cleanup --echo # Cleanup
--connection master --connection master
drop table tn; drop table tn;
set @@global.debug_dbug=@save_debug_dbug;
--echo #
--echo # MDEV-36934
--echo # The server could indefinitely hang due to a memory leak which tried to
--echo # pthread signal on a destroyed condition variable. In effect, no
--echo # connections could commit transactions because there would be a thread
--echo # stuck on a never-returning call to pthread_cond_signal() while
--echo # holding Repl_semi_sync_master::LOCK_log.
--connection master
set @@global.rpl_semi_sync_master_wait_point= AFTER_COMMIT;
set @@global.rpl_semi_sync_master_wait_no_slave= 0;
--echo # Ensure servers are in proper state
--connection master
let $status_var= rpl_semi_sync_master_status;
let $status_var_value= ON;
source include/wait_for_status_var.inc;
--connection slave
let $status_var= rpl_semi_sync_slave_status;
let $status_var_value= ON;
source include/wait_for_status_var.inc;
--echo # Test case initial set-up
--connection master
create table t_36934 (a int) engine=innodb;
--source include/save_master_gtid.inc
--connection slave
--source include/sync_with_master_gtid.inc
--echo # Pause the user transaction before inserting into Active_tranx
--connect(user_con,localhost,root,,)
SET debug_sync= 'semisync_at_write_tranx_in_binlog SIGNAL at_write_tranx_in_binlog WAIT_FOR resume_write_tranx_in_binlog';
--send insert into t_36934 values (1)
--connection server_1
set debug_sync="now wait_for at_write_tranx_in_binlog";
--echo # Disconnect the slave (note that the binlog dump thread won't yet be
--echo # notified of a binlog update from the last transaction, so the slave
--echo # should neither receiver nor ACK the transaction).
--connection slave
--source include/stop_slave.inc
--echo # Waiting for master to realize the slave has disconnected..
--connection server_1
let $status_var= rpl_semi_sync_master_clients;
let $status_var_value= 0;
source include/wait_for_status_var.inc;
--echo # ..done
--echo # Resuming transaction (it will exit commit_trx early without waiting)
set debug_sync="now signal resume_write_tranx_in_binlog";
--connection user_con
--reap
--let $user_con_tid= `SELECT connection_id()`
--disconnect user_con
--source include/wait_until_disconnected.inc
--echo # Force delete the user thread (FLUSH THREADS ensures the thread won't
--echo # stay in the thread cache)
--connection master
FLUSH THREADS;
--echo # BUG: Re-connect slave. MDEV-36934 reports that the master would hang
--echo # when the slave would re-connect and try to ACK the last transaction
--echo # who's thread has been deleted
--connection slave
--source include/start_slave.inc
--echo # Try to commit another transaction (prior to MDEV-36934 fixes, this
--echo # would hang indefinitely)
--connection master
set debug_sync="RESET";
--send insert into t_36934 values (2)
--connection server_1
--echo # Waiting 30s for last query to complete..
--let $wait_timeout= 30
--let $wait_condition= SELECT count(*)=0 FROM information_schema.processlist WHERE info LIKE 'insert into t_36934%';
--source include/wait_condition.inc
# Variable `success` is set by wait_condition.inc
if (!$success)
{
--echo # ..error
--die Query is hung
}
--connection master
--reap
--echo # ..done
--echo # Cleanup
--connection master
set @@global.rpl_semi_sync_master_wait_point= @old_master_wait_point; set @@global.rpl_semi_sync_master_wait_point= @old_master_wait_point;
set @@global.rpl_semi_sync_master_wait_no_slave= @old_master_wait_no_slave; set @@global.rpl_semi_sync_master_wait_no_slave= @old_master_wait_no_slave;
set @@global.debug_dbug=@save_debug_dbug; set @@global.debug_dbug=@save_debug_dbug;
drop table t_36934;
--source include/rpl_end.inc --source include/rpl_end.inc

View File

@@ -292,6 +292,25 @@ a b
10 j 10 j
DROP TABLE t1; DROP TABLE t1;
DROP SEQUENCE s1; DROP SEQUENCE s1;
#
# End of 10.3 tests # End of 10.3 tests
# in UPDATE
create sequence s1 cache 0;
create table t1 (id int unsigned default nextval(s1));
insert t1 values ();
update t1 set id=default;
prepare stmt from "update t1 set id=?";
execute stmt using default;
deallocate prepare stmt;
drop table t1;
drop sequence s1;
# #
# MDEV-37302 Assertion failure in Table_triggers_list::add_tables_and_routines_for_triggers upon attempt to insert DEFAULT into non-insertable view
#
create table t1 (f int);
create algorithm=temptable view v1 as select * from t1;
create trigger tr before update on t1 for each row set @a=1;
insert v1 values (default);
ERROR HY000: The target table v1 of the INSERT is not insertable-into
drop view v1;
drop table t1;
# End of 10.6 tests

View File

@@ -216,6 +216,28 @@ SELECT a, b FROM t1;
DROP TABLE t1; DROP TABLE t1;
DROP SEQUENCE s1; DROP SEQUENCE s1;
--echo #
--echo # End of 10.3 tests --echo # End of 10.3 tests
--echo # in UPDATE
create sequence s1 cache 0;
create table t1 (id int unsigned default nextval(s1));
insert t1 values ();
update t1 set id=default;
prepare stmt from "update t1 set id=?";
execute stmt using default;
deallocate prepare stmt;
drop table t1;
drop sequence s1;
--echo # --echo #
--echo # MDEV-37302 Assertion failure in Table_triggers_list::add_tables_and_routines_for_triggers upon attempt to insert DEFAULT into non-insertable view
--echo #
create table t1 (f int);
create algorithm=temptable view v1 as select * from t1;
create trigger tr before update on t1 for each row set @a=1;
--error ER_NON_INSERTABLE_TABLE
insert v1 values (default);
drop view v1;
drop table t1;
--echo # End of 10.6 tests

View File

@@ -97,7 +97,92 @@ ERROR 42000: SELECT, INSERT command denied to user 'u'@'localhost' for table `my
disconnect con1; disconnect con1;
connection default; connection default;
drop user u; drop user u;
create user u_alter;
create table t1 (id int);
grant alter on t1 to u_alter;
connect con_alter,localhost,u_alter,,mysqltest_1;
alter table t1 modify id int default nextval(s1);
ERROR 42000: SELECT, INSERT command denied to user 'u_alter'@'localhost' for table `mysqltest_1`.`s1`
connection default;
grant insert, select on s1 to u_alter;
connection con_alter;
alter table t1 modify id int default nextval(s1);
disconnect con_alter;
connection default;
drop user u_alter;
drop database mysqltest_1; drop database mysqltest_1;
# #
# End of 10.11 tests # MDEV-36870 Spurious unrelated permission error when selecting from table with default that uses nextval(sequence)
# #
create database db1;
use db1;
create sequence s1 cache 0;
create table t1 (id int unsigned default (10+nextval(s1)));
insert t1 values ();
create table t2 (id int unsigned default nextval(s1), b int default(default(id)));
insert t2 values ();
create function f1(x int) returns int sql security invoker
begin
select id+x into x from t1;
return x;
insert t1 values ();
end|
create user u1@localhost;
grant select on db1.* to u1@localhost;
grant execute on db1.* to u1@localhost;
grant all privileges on test.* to u1@localhost;
use test;
create table t3 (id int unsigned default (20+nextval(db1.s1)), b int);
insert t3 values ();
create sequence s2 cache 0;
create table t4 (id int unsigned default (10+nextval(s2)), b int);
insert t4 values ();
connect u1,localhost,u1,,db1;
select * from t1;
id
11
connection default;
flush tables;
connection u1;
select * from t1;
id
11
select default(id) from t1;
ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1`
select * from t2;
id b
2 3
select f1(100);
f1(100)
111
select column_name, data_type, column_default from information_schema.columns where table_schema='db1' and table_name='t1';
column_name data_type column_default
id int (10 + nextval(`db1`.`s1`))
use test;
insert t3 values ();
ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1`
insert t4 values ();
insert t3 (b) select 5;
ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1`
insert t4 (b) select 5;
update t3 set id=default;
ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1`
update t4 set id=default;
prepare stmt from "update t3 set id=?";
execute stmt using default;
ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1`
prepare stmt from "update t4 set id=?";
execute stmt using default;
deallocate prepare stmt;
insert t4 (b) values ((select * from db1.t1));
insert t4 (b) values ((select default(id) from db1.t1));
ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1`
connection default;
disconnect u1;
select nextval(db1.s1) as 'must be 5';
must be 5
5
drop user u1@localhost;
drop database db1;
drop table t3, t4, s2;
# End of 10.6 tests

View File

@@ -106,12 +106,121 @@ create table t1 (a int not null default(nextval(s1)),
--connection default --connection default
drop user u; drop user u;
# # ALTER for table with DEFAULT NEXTVAL(seq) column needs INSERT/SELECT on seq
# Cleanup # just like CREATE does in the example above
# create user u_alter;
create table t1 (id int);
grant alter on t1 to u_alter;
--connect(con_alter,localhost,u_alter,,mysqltest_1)
--error ER_TABLEACCESS_DENIED_ERROR
alter table t1 modify id int default nextval(s1);
--connection default
grant insert, select on s1 to u_alter;
--connection con_alter
alter table t1 modify id int default nextval(s1);
--disconnect con_alter
--connection default
drop user u_alter;
drop database mysqltest_1; drop database mysqltest_1;
--echo # --echo #
--echo # End of 10.11 tests --echo # MDEV-36870 Spurious unrelated permission error when selecting from table with default that uses nextval(sequence)
--echo # --echo #
# various tests for permission checking on sequences
create database db1;
use db1;
create sequence s1 cache 0;
create table t1 (id int unsigned default (10+nextval(s1)));
insert t1 values ();
create table t2 (id int unsigned default nextval(s1), b int default(default(id)));
insert t2 values ();
# INSERT affects prelocking, but is never actually executed
delimiter |;
create function f1(x int) returns int sql security invoker
begin
select id+x into x from t1;
return x;
insert t1 values ();
end|
delimiter ;|
create user u1@localhost;
grant select on db1.* to u1@localhost;
grant execute on db1.* to u1@localhost;
grant all privileges on test.* to u1@localhost;
use test;
create table t3 (id int unsigned default (20+nextval(db1.s1)), b int);
insert t3 values ();
create sequence s2 cache 0;
create table t4 (id int unsigned default (10+nextval(s2)), b int);
insert t4 values ();
connect u1,localhost,u1,,db1;
# table already in the cache. must be re-fixed
# SELECT * - no error
select * from t1;
# not in cache
connection default;
flush tables;
connection u1;
# SELECT * - no error
select * from t1;
# SELECT DEFAULT() - error
--error ER_TABLEACCESS_DENIED_ERROR
select default(id) from t1;
# default(default(nextval))
select * from t2;
# SELECT but table has TL_WRITE because of prelocking
select f1(100);
# opening the table for I_S
select column_name, data_type, column_default from information_schema.columns where table_schema='db1' and table_name='t1';
use test;
# insert
--error ER_TABLEACCESS_DENIED_ERROR
insert t3 values ();
insert t4 values ();
#insert select
--error ER_TABLEACCESS_DENIED_ERROR
insert t3 (b) select 5;
insert t4 (b) select 5;
#update
--error ER_TABLEACCESS_DENIED_ERROR
update t3 set id=default;
update t4 set id=default;
# PS UPDATE with ? = DEFAULT
prepare stmt from "update t3 set id=?";
--error ER_TABLEACCESS_DENIED_ERROR
execute stmt using default;
prepare stmt from "update t4 set id=?";
execute stmt using default;
deallocate prepare stmt;
# SELECT * in a subquery, like INSERT t3 VALUES ((SELECT * FROM t1));
# with sequences both on t3 and t1
insert t4 (b) values ((select * from db1.t1));
--error ER_TABLEACCESS_DENIED_ERROR
insert t4 (b) values ((select default(id) from db1.t1));
connection default;
disconnect u1;
--disable_ps2_protocol
select nextval(db1.s1) as 'must be 5';
--enable_ps2_protocol
drop user u1@localhost;
drop database db1;
drop table t3, t4, s2;
--echo # End of 10.6 tests

View File

@@ -1,4 +1,5 @@
SET GLOBAL wsrep_on=ON; SET GLOBAL wsrep_on=ON;
ERROR HY000: Galera replication not supported
SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size'; SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size';
Variable_name Value Variable_name Value
wsrep_cluster_size 0 wsrep_cluster_size 0

View File

@@ -140,7 +140,11 @@ WSREP_SST_DONOR_REJECTS_QUERIES
WSREP_SST_METHOD WSREP_SST_METHOD
WSREP_SST_RECEIVE_ADDRESS WSREP_SST_RECEIVE_ADDRESS
WSREP_START_POSITION WSREP_START_POSITION
<<<<<<< HEAD
WSREP_STATUS_FILE WSREP_STATUS_FILE
=======
WSREP_STRICT_DDL
>>>>>>> bb-10.6-serg
WSREP_SYNC_WAIT WSREP_SYNC_WAIT
WSREP_TRX_FRAGMENT_SIZE WSREP_TRX_FRAGMENT_SIZE
WSREP_TRX_FRAGMENT_UNIT WSREP_TRX_FRAGMENT_UNIT

View File

@@ -0,0 +1,6 @@
SET GLOBAL wsrep_on=ON;
ERROR HY000: Galera replication not supported
REPAIR TABLE performance_schema.setup_objects;
Table Op Msg_type Msg_text
performance_schema.setup_objects repair note The storage engine for the table doesn't support repair
SET GLOBAL wsrep_on=OFF;

View File

@@ -5,6 +5,7 @@
--source include/have_wsrep_provider.inc --source include/have_wsrep_provider.inc
--source include/have_binlog_format_row.inc --source include/have_binlog_format_row.inc
--error ER_GALERA_REPLICATION_NOT_SUPPORTED
SET GLOBAL wsrep_on=ON; SET GLOBAL wsrep_on=ON;
SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size'; SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size';
SET GLOBAL wsrep_on=OFF; SET GLOBAL wsrep_on=OFF;

View File

@@ -3,7 +3,7 @@
--source include/have_innodb.inc --source include/have_innodb.inc
--source include/galera_no_debug_sync.inc --source include/galera_no_debug_sync.inc
--let $galera_version=26.4.21 --let $galera_version=26.4.23
source include/check_galera_version.inc; source include/check_galera_version.inc;
source include/galera_variables_ok.inc; source include/galera_variables_ok.inc;

View File

@@ -5,7 +5,7 @@
--source include/have_debug_sync.inc --source include/have_debug_sync.inc
--source include/galera_have_debug_sync.inc --source include/galera_have_debug_sync.inc
--let $galera_version=26.4.21 --let $galera_version=26.4.23
source include/check_galera_version.inc; source include/check_galera_version.inc;
source include/galera_variables_ok_debug.inc; source include/galera_variables_ok_debug.inc;

View File

@@ -0,0 +1,17 @@
# Use default setting for mysqld processes
!include include/default_mysqld.cnf
[mysqld]
wsrep-on=OFF
wsrep-provider=@ENV.WSREP_PROVIDER
log-bin
binlog-format=row
loose-wsrep_cluster_address=gcomm://
loose-wsrep_node_address='127.0.0.1:@mysqld.1.#galera_port'
loose-wsrep-incoming-address=127.0.0.1:@mysqld.1.port
[mysqld.1]
wsrep-on=OFF
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port

View File

@@ -0,0 +1,8 @@
--source include/have_innodb.inc
--source include/have_wsrep_provider.inc
--source include/have_binlog_format_row.inc
--error ER_GALERA_REPLICATION_NOT_SUPPORTED
SET GLOBAL wsrep_on=ON;
REPAIR TABLE performance_schema.setup_objects;
SET GLOBAL wsrep_on=OFF;

View File

@@ -25,6 +25,9 @@
#else #else
# include <cpuid.h> # include <cpuid.h>
# ifdef __APPLE__ /* AVX512 states are not enabled in XCR0 */ # ifdef __APPLE__ /* AVX512 states are not enabled in XCR0 */
# elif __GNUC__ >= 15
# define TARGET "pclmul,avx10.1,vpclmulqdq"
# define USE_VPCLMULQDQ __attribute__((target(TARGET)))
# elif __GNUC__ >= 14 || (defined __clang_major__ && __clang_major__ >= 18) # elif __GNUC__ >= 14 || (defined __clang_major__ && __clang_major__ >= 18)
# define TARGET "pclmul,evex512,avx512f,avx512dq,avx512bw,avx512vl,vpclmulqdq" # define TARGET "pclmul,evex512,avx512f,avx512dq,avx512bw,avx512vl,vpclmulqdq"
# define USE_VPCLMULQDQ __attribute__((target(TARGET))) # define USE_VPCLMULQDQ __attribute__((target(TARGET)))

View File

@@ -38,7 +38,7 @@ do { \
if (swap_ptrs) \ if (swap_ptrs) \
{ \ { \
reg1 char **a = (char**) (A), **b = (char**) (B); \ reg1 char **a = (char**) (A), **b = (char**) (B); \
char *tmp = *a; *a++ = *b; *b++ = tmp; \ char *tmp = *a; *a = *b; *b = tmp; \
} \ } \
else \ else \
{ \ { \
@@ -190,16 +190,16 @@ qsort_t my_qsort(void *base_ptr, size_t count, size_t size, qsort_cmp cmp)
This ensures that the stack is keept small. This ensures that the stack is keept small.
*/ */
if ((int) (high_ptr - low) <= 0) if ((longlong) (high_ptr - low) <= 0)
{ {
if ((int) (high - low_ptr) <= 0) if ((longlong) (high - low_ptr) <= 0)
{ {
POP(low, high); /* Nothing more to sort */ POP(low, high); /* Nothing more to sort */
} }
else else
low = low_ptr; /* Ignore small left part. */ low = low_ptr; /* Ignore small left part. */
} }
else if ((int) (high - low_ptr) <= 0) else if ((longlong) (high - low_ptr) <= 0)
high = high_ptr; /* Ignore small right part. */ high = high_ptr; /* Ignore small right part. */
else if ((high_ptr - low) > (high - low_ptr)) else if ((high_ptr - low) > (high - low_ptr))
{ {

View File

@@ -8426,6 +8426,59 @@ Field *Field_varstring::make_new_field(MEM_ROOT *root, TABLE *new_table,
} }
Field *Field_varstring_compressed::make_new_field(MEM_ROOT *root, TABLE *new_table,
bool keep_type)
{
Field_varstring *res;
if (new_table->s->is_optimizer_tmp_table())
{
/*
Compressed field cannot be part of a key. For optimizer temporary
table we create uncompressed substitute.
*/
res= new (root) Field_varstring(ptr, field_length, length_bytes, null_ptr,
null_bit, Field::NONE, &field_name,
new_table->s, charset());
if (res)
{
res->init_for_make_new_field(new_table, orig_table);
/* See Column_definition::create_length_to_internal_length_string() */
res->field_length--;
}
}
else
res= (Field_varstring*) Field::make_new_field(root, new_table, keep_type);
if (res)
res->length_bytes= length_bytes;
return res;
}
Field *Field_blob_compressed::make_new_field(MEM_ROOT *root, TABLE *new_table,
bool keep_type)
{
Field_blob *res;
if (new_table->s->is_optimizer_tmp_table())
{
/*
Compressed field cannot be part of a key. For optimizer temporary
table we create uncompressed substitute.
*/
res= new (root) Field_blob(ptr, null_ptr, null_bit, Field::NONE, &field_name,
new_table->s, packlength, charset());
if (res)
{
res->init_for_make_new_field(new_table, orig_table);
/* See Column_definition::create_length_to_internal_length_string() */
res->field_length--;
}
}
else
res= (Field_blob *) Field::make_new_field(root, new_table, keep_type);
return res;
}
Field *Field_varstring::new_key_field(MEM_ROOT *root, TABLE *new_table, Field *Field_varstring::new_key_field(MEM_ROOT *root, TABLE *new_table,
uchar *new_ptr, uint32 length, uchar *new_ptr, uint32 length,
uchar *new_null_ptr, uint new_null_bit) uchar *new_null_ptr, uint new_null_bit)

View File

@@ -658,6 +658,7 @@ public:
bool fix_session_expr(THD *thd); bool fix_session_expr(THD *thd);
bool cleanup_session_expr(); bool cleanup_session_expr();
bool fix_and_check_expr(THD *thd, TABLE *table); bool fix_and_check_expr(THD *thd, TABLE *table);
bool check_access(THD *thd);
inline bool is_equal(const Virtual_column_info* vcol) const; inline bool is_equal(const Virtual_column_info* vcol) const;
/* Same as is_equal() but for comparing with different table */ /* Same as is_equal() but for comparing with different table */
bool is_equivalent(THD *thd, TABLE_SHARE *share, TABLE_SHARE *vcol_share, bool is_equivalent(THD *thd, TABLE_SHARE *share, TABLE_SHARE *vcol_share,
@@ -1542,7 +1543,14 @@ public:
{ {
ptr=ADD_TO_PTR(ptr,ptr_diff, uchar*); ptr=ADD_TO_PTR(ptr,ptr_diff, uchar*);
if (null_ptr) if (null_ptr)
{
null_ptr=ADD_TO_PTR(null_ptr,ptr_diff,uchar*); null_ptr=ADD_TO_PTR(null_ptr,ptr_diff,uchar*);
if (table)
{
DBUG_ASSERT(null_ptr < ptr);
DBUG_ASSERT(ptr - null_ptr <= (int)table->s->rec_buff_length);
}
}
} }
/* /*
@@ -4327,6 +4335,7 @@ private:
{ DBUG_ASSERT(0); return 0; } { DBUG_ASSERT(0); return 0; }
using Field_varstring::key_cmp; using Field_varstring::key_cmp;
Binlog_type_info binlog_type_info() const override; Binlog_type_info binlog_type_info() const override;
Field *make_new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type) override;
}; };
@@ -4768,6 +4777,7 @@ private:
override override
{ DBUG_ASSERT(0); return 0; } { DBUG_ASSERT(0); return 0; }
Binlog_type_info binlog_type_info() const override; Binlog_type_info binlog_type_info() const override;
Field *make_new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type) override;
}; };

View File

@@ -409,7 +409,6 @@ void ha_partition::init_handler_variables()
m_top_entry= NO_CURRENT_PART_ID; m_top_entry= NO_CURRENT_PART_ID;
m_rec_length= 0; m_rec_length= 0;
m_last_part= 0; m_last_part= 0;
m_rec0= 0;
m_err_rec= NULL; m_err_rec= NULL;
m_curr_key_info[0]= NULL; m_curr_key_info[0]= NULL;
m_curr_key_info[1]= NULL; m_curr_key_info[1]= NULL;
@@ -2209,7 +2208,7 @@ int ha_partition::copy_partitions(ulonglong * const copied,
goto init_error; goto init_error;
while (TRUE) while (TRUE)
{ {
if ((result= file->ha_rnd_next(m_rec0))) if ((result= file->ha_rnd_next(table->record[0])))
{ {
if (result != HA_ERR_END_OF_FILE) if (result != HA_ERR_END_OF_FILE)
goto error; goto error;
@@ -2235,7 +2234,7 @@ int ha_partition::copy_partitions(ulonglong * const copied,
/* Copy record to new handler */ /* Copy record to new handler */
(*copied)++; (*copied)++;
DBUG_ASSERT(!m_new_file[new_part]->row_logging); DBUG_ASSERT(!m_new_file[new_part]->row_logging);
result= m_new_file[new_part]->ha_write_row(m_rec0); result= m_new_file[new_part]->ha_write_row(table->record[0]);
if (result) if (result)
goto error; goto error;
} }
@@ -3818,7 +3817,6 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
DBUG_RETURN(HA_ERR_INITIALIZATION); DBUG_RETURN(HA_ERR_INITIALIZATION);
} }
m_start_key.length= 0; m_start_key.length= 0;
m_rec0= table->record[0];
m_rec_length= table_share->reclength; m_rec_length= table_share->reclength;
if (!m_part_ids_sorted_by_num_of_records) if (!m_part_ids_sorted_by_num_of_records)
{ {
@@ -4731,15 +4729,15 @@ int ha_partition::update_row(const uchar *old_data, const uchar *new_data)
*/ */
{ {
Abort_on_warning_instant_set old_abort_on_warning(thd, 0); Abort_on_warning_instant_set old_abort_on_warning(thd, 0);
error= get_part_for_buf(old_data, m_rec0, m_part_info, &old_part_id); error= get_part_for_buf(old_data, table->record[0], m_part_info, &old_part_id);
} }
DBUG_ASSERT(!error); DBUG_ASSERT(!error);
DBUG_ASSERT(old_part_id == m_last_part); DBUG_ASSERT(old_part_id == m_last_part);
DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), old_part_id)); DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), old_part_id));
#endif #endif
if (unlikely((error= get_part_for_buf(new_data, m_rec0, m_part_info, if (unlikely((error= get_part_for_buf(new_data, table->record[0],
&new_part_id)))) m_part_info, &new_part_id))))
goto exit; goto exit;
if (unlikely(!bitmap_is_set(&(m_part_info->lock_partitions), new_part_id))) if (unlikely(!bitmap_is_set(&(m_part_info->lock_partitions), new_part_id)))
{ {
@@ -5567,7 +5565,7 @@ int ha_partition::rnd_pos_by_record(uchar *record)
{ {
DBUG_ENTER("ha_partition::rnd_pos_by_record"); DBUG_ENTER("ha_partition::rnd_pos_by_record");
if (unlikely(get_part_for_buf(record, m_rec0, m_part_info, &m_last_part))) if (unlikely(get_part_for_buf(record, table->record[0], m_part_info, &m_last_part)))
DBUG_RETURN(1); DBUG_RETURN(1);
int err= m_file[m_last_part]->rnd_pos_by_record(record); int err= m_file[m_last_part]->rnd_pos_by_record(record);
@@ -6351,7 +6349,7 @@ int ha_partition::read_range_first(const key_range *start_key,
m_start_key.key= NULL; m_start_key.key= NULL;
m_index_scan_type= partition_read_range; m_index_scan_type= partition_read_range;
error= common_index_read(m_rec0, MY_TEST(start_key)); error= common_index_read(table->record[0], MY_TEST(start_key));
DBUG_RETURN(error); DBUG_RETURN(error);
} }
@@ -10351,7 +10349,7 @@ void ha_partition::print_error(int error, myf errflag)
str.append('('); str.append('(');
str.append_ulonglong(m_last_part); str.append_ulonglong(m_last_part);
str.append(STRING_WITH_LEN(" != ")); str.append(STRING_WITH_LEN(" != "));
if (get_part_for_buf(m_err_rec, m_rec0, m_part_info, &part_id)) if (get_part_for_buf(m_err_rec, table->record[0], m_part_info, &part_id))
str.append('?'); str.append('?');
else else
str.append_ulonglong(part_id); str.append_ulonglong(part_id);
@@ -11336,7 +11334,7 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair)
while (true) while (true)
{ {
if ((result= m_file[read_part_id]->ha_rnd_next(m_rec0))) if ((result= m_file[read_part_id]->ha_rnd_next(table->record[0])))
{ {
if (result != HA_ERR_END_OF_FILE) if (result != HA_ERR_END_OF_FILE)
break; break;
@@ -11382,7 +11380,7 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair)
Insert row into correct partition. Notice that there are no commit Insert row into correct partition. Notice that there are no commit
for every N row, so the repair will be one large transaction! for every N row, so the repair will be one large transaction!
*/ */
if ((result= m_file[correct_part_id]->ha_write_row(m_rec0))) if ((result= m_file[correct_part_id]->ha_write_row(table->record[0])))
{ {
/* /*
We have failed to insert a row, it might have been a duplicate! We have failed to insert a row, it might have been a duplicate!
@@ -11426,7 +11424,7 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair)
} }
/* Delete row from wrong partition. */ /* Delete row from wrong partition. */
if ((result= m_file[read_part_id]->ha_delete_row(m_rec0))) if ((result= m_file[read_part_id]->ha_delete_row(table->record[0])))
{ {
if (m_file[correct_part_id]->has_transactions_and_rollback()) if (m_file[correct_part_id]->has_transactions_and_rollback())
break; break;

View File

@@ -322,7 +322,6 @@ private:
and if clustered pk, [0]= current index, [1]= pk, [2]= NULL and if clustered pk, [0]= current index, [1]= pk, [2]= NULL
*/ */
KEY *m_curr_key_info[3]; // Current index KEY *m_curr_key_info[3]; // Current index
uchar *m_rec0; // table->record[0]
const uchar *m_err_rec; // record which gave error const uchar *m_err_rec; // record which gave error
QUEUE m_queue; // Prio queue used by sorted read QUEUE m_queue; // Prio queue used by sorted read

View File

@@ -503,6 +503,8 @@ int ha_init_errors(void)
SETMSG(HA_ERR_FTS_TOO_MANY_WORDS_IN_PHRASE, "Too many words in a FTS phrase or proximity search"); SETMSG(HA_ERR_FTS_TOO_MANY_WORDS_IN_PHRASE, "Too many words in a FTS phrase or proximity search");
SETMSG(HA_ERR_FK_DEPTH_EXCEEDED, "Foreign key cascade delete/update exceeds"); SETMSG(HA_ERR_FK_DEPTH_EXCEEDED, "Foreign key cascade delete/update exceeds");
SETMSG(HA_ERR_TABLESPACE_MISSING, ER_DEFAULT(ER_TABLESPACE_MISSING)); SETMSG(HA_ERR_TABLESPACE_MISSING, ER_DEFAULT(ER_TABLESPACE_MISSING));
SETMSG(HA_ERR_INCOMPATIBLE_DEFINITION,
"Mismatch between table definitions in sql and storage layer");
/* Register the error messages for use with my_error(). */ /* Register the error messages for use with my_error(). */
return my_error_register(get_handler_errmsgs, HA_ERR_FIRST, HA_ERR_LAST); return my_error_register(get_handler_errmsgs, HA_ERR_FIRST, HA_ERR_LAST);
@@ -3368,7 +3370,7 @@ int handler::create_lookup_handler()
if (!(tmp= clone(table->s->normalized_path.str, table->in_use->mem_root))) if (!(tmp= clone(table->s->normalized_path.str, table->in_use->mem_root)))
return 1; return 1;
lookup_handler= tmp; lookup_handler= tmp;
return lookup_handler->ha_external_lock(table->in_use, F_RDLCK); return lookup_handler->ha_external_lock(table->in_use, F_WRLCK);
} }
LEX_CSTRING *handler::engine_name() LEX_CSTRING *handler::engine_name()
@@ -6115,7 +6117,8 @@ int ha_create_table(THD *thd, const char *path, const char *db,
name= get_canonical_filename(table.file, share.path.str, name_buff); name= get_canonical_filename(table.file, share.path.str, name_buff);
error= table.file->ha_create(name, &table, create_info); error= table.check_sequence_privileges(thd) ? 1 :
table.file->ha_create(name, &table, create_info);
if (unlikely(error)) if (unlikely(error))
{ {
@@ -7353,10 +7356,10 @@ int handler::ha_reset()
DBUG_RETURN(reset()); DBUG_RETURN(reset());
} }
#ifdef WITH_WSREP
static int wsrep_after_row(THD *thd) static int wsrep_after_row(THD *thd)
{ {
DBUG_ENTER("wsrep_after_row"); DBUG_ENTER("wsrep_after_row");
#ifdef WITH_WSREP
if (thd->internal_transaction()) if (thd->internal_transaction())
DBUG_RETURN(0); DBUG_RETURN(0);
@@ -7380,9 +7383,32 @@ static int wsrep_after_row(THD *thd)
{ {
DBUG_RETURN(ER_LOCK_DEADLOCK); DBUG_RETURN(ER_LOCK_DEADLOCK);
} }
#endif /* WITH_WSREP */
DBUG_RETURN(0); DBUG_RETURN(0);
} }
#endif /* WITH_WSREP */
static bool long_unique_fields_differ(KEY *keyinfo, const uchar *other)
{
uint key_parts= fields_in_hash_keyinfo(keyinfo);
KEY_PART_INFO *keypart= keyinfo->key_part - key_parts;
my_ptrdiff_t off= other - keypart->field->table->record[0];
DBUG_ASSERT(off);
do
{
Field *field= keypart->field;
if (field->is_null() || field->is_null(off))
return true;
else if (f_is_blob(keypart->key_type) && keypart->length)
{
if (field->cmp_prefix(field->ptr, field->ptr + off, keypart->length))
return true;
}
else if (field->cmp_offset(off))
return true;
} while (keypart++ < keyinfo->key_part);
return false;
}
/** /**
@@ -7391,37 +7417,36 @@ static int wsrep_after_row(THD *thd)
int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no) int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no)
{ {
int result, error= 0; int result;
/* Skip just written row in the case of HA_CHECK_UNIQUE_AFTER_WRITE */
bool skip_self= ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE;
KEY *key_info= table->key_info + key_no; KEY *key_info= table->key_info + key_no;
Field *hash_field= key_info->key_part->field;
uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL]; uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL];
String *blob_storage;
DBUG_ENTER("handler::check_duplicate_long_entry_key"); DBUG_ENTER("handler::check_duplicate_long_entry_key");
DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY && DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY &&
key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL) || key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL) ||
key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL); key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL);
if (hash_field->is_real_null()) if (key_info->key_part->field->is_real_null())
DBUG_RETURN(0); DBUG_RETURN(0);
if (skip_self)
position(table->record[0]);
key_copy(ptr, new_rec, key_info, key_info->key_length, false); key_copy(ptr, new_rec, key_info, key_info->key_length, false);
result= lookup_handler->ha_index_init(key_no, 0); result= lookup_handler->ha_index_init(key_no, 0);
if (result) if (result)
DBUG_RETURN(result); DBUG_RETURN(result);
blob_storage= (String*)alloca(sizeof(String)*table->s->virtual_not_stored_blob_fields); auto blob_storage= (String*)alloca(sizeof(String)*table->s->virtual_not_stored_blob_fields);
table->remember_blob_values(blob_storage); table->remember_blob_values(blob_storage);
store_record(table, file->lookup_buffer); store_record(table, file->lookup_buffer);
result= lookup_handler->ha_index_read_map(table->record[0], ptr, result= lookup_handler->ha_index_read_map(table->record[0], ptr,
HA_WHOLE_KEY, HA_READ_KEY_EXACT); HA_WHOLE_KEY, HA_READ_KEY_EXACT);
if (!result) if (result)
{ goto end;
bool is_same;
Field * t_field;
Item_func_hash * temp= (Item_func_hash *)hash_field->vcol_info->expr;
Item ** arguments= temp->arguments();
uint arg_count= temp->argument_count();
// restore pointers after swap_values in TABLE::update_virtual_fields() // restore pointers after swap_values in TABLE::update_virtual_fields()
for (Field **vf= table->vfield; *vf; vf++) for (Field **vf= table->vfield; *vf; vf++)
{ {
@@ -7431,51 +7456,31 @@ int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no)
} }
do do
{ {
my_ptrdiff_t diff= table->file->lookup_buffer - new_rec; if (!long_unique_fields_differ(key_info, lookup_buffer))
is_same= true;
for (uint j=0; is_same && j < arg_count; j++)
{ {
DBUG_ASSERT(arguments[j]->type() == Item::FIELD_ITEM ||
// this one for left(fld_name,length)
arguments[j]->type() == Item::FUNC_ITEM);
if (arguments[j]->type() == Item::FIELD_ITEM)
{
t_field= static_cast<Item_field *>(arguments[j])->field;
if (t_field->cmp_offset(diff))
is_same= false;
}
else
{
Item_func_left *fnc= static_cast<Item_func_left *>(arguments[j]);
DBUG_ASSERT(!my_strcasecmp(system_charset_info, "left", fnc->func_name()));
DBUG_ASSERT(fnc->arguments()[0]->type() == Item::FIELD_ITEM);
t_field= static_cast<Item_field *>(fnc->arguments()[0])->field;
uint length= (uint)fnc->arguments()[1]->val_int();
if (t_field->cmp_prefix(t_field->ptr, t_field->ptr + diff, length))
is_same= false;
}
}
}
while (!is_same &&
!(result= lookup_handler->ha_index_next_same(table->record[0],
ptr, key_info->key_length)));
if (is_same)
error= HA_ERR_FOUND_DUPP_KEY;
goto exit;
}
if (result != HA_ERR_KEY_NOT_FOUND)
error= result;
exit:
if (error == HA_ERR_FOUND_DUPP_KEY)
{
table->file->lookup_errkey= key_no;
lookup_handler->position(table->record[0]); lookup_handler->position(table->record[0]);
memcpy(table->file->dup_ref, lookup_handler->ref, ref_length); if (skip_self && !memcmp(ref, lookup_handler->ref, ref_length))
{
skip_self= false; // cannot happen twice, so let's save a memcpy
continue;
} }
result= HA_ERR_FOUND_DUPP_KEY;
table->file->lookup_errkey= key_no;
memcpy(table->file->dup_ref, lookup_handler->ref, ref_length);
goto end;
}
}
while (!(result= lookup_handler->ha_index_next_same(table->record[0], ptr,
key_info->key_length)));
end:
if (result == HA_ERR_END_OF_FILE || result == HA_ERR_KEY_NOT_FOUND)
result= 0;
restore_record(table, file->lookup_buffer); restore_record(table, file->lookup_buffer);
table->restore_blob_values(blob_storage); table->restore_blob_values(blob_storage);
lookup_handler->ha_index_end(); lookup_handler->ha_index_end();
DBUG_RETURN(error); DBUG_RETURN(result);
} }
void handler::alloc_lookup_buffer() void handler::alloc_lookup_buffer()
@@ -7487,77 +7492,57 @@ void handler::alloc_lookup_buffer()
+ table_share->reclength); + table_share->reclength);
} }
int handler::ha_check_inserver_constraints(const uchar *old_data,
const uchar* new_data)
{
int error= 0;
/*
this != table->file is true in 3 cases:
1. under copy_partitions() (REORGANIZE PARTITION): that does not
require long unique check as it does not introduce new rows or new index.
2. under partition's ha_write_row() or ha_update_row(). Constraints
were already checked by ha_partition::ha_write_row(), no need re-check
for each partition.
3. under ha_mroonga::wrapper_write_row(). Same as 2.
*/
if (this == table->file)
{
uint saved_status= table->status;
if (!(error= ha_check_overlaps(old_data, new_data)))
error= ha_check_long_uniques(old_data, new_data);
table->status= saved_status;
}
return error;
}
/** @brief /** @brief
check whether inserted records breaks the check whether inserted records breaks the unique constraint on long columns.
unique constraint on long columns.
@returns 0 if no duplicate else returns error @returns 0 if no duplicate else returns error
*/ */
int handler::check_duplicate_long_entries(const uchar *new_rec) int handler::ha_check_long_uniques(const uchar *old_rec, const uchar *new_rec)
{ {
if (!table->s->long_unique_table)
return 0;
DBUG_ASSERT(inited == NONE || lookup_handler != this);
DBUG_ASSERT(new_rec == table->record[0]);
DBUG_ASSERT(!old_rec || old_rec == table->record[1]);
lookup_errkey= (uint)-1; lookup_errkey= (uint)-1;
for (uint i= 0; i < table->s->keys; i++) for (uint i= 0; i < table->s->keys; i++)
{ {
int result; KEY *keyinfo= table->key_info + i;
if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH &&
(result= check_duplicate_long_entry_key(new_rec, i)))
return result;
}
return 0;
}
/** @brief
check whether updated records breaks the
unique constraint on long columns.
In the case of update we just need to check the specic key
reason for that is consider case
create table t1(a blob , b blob , x blob , y blob ,unique(a,b)
,unique(x,y))
and update statement like this
update t1 set a=23+a; in this case if we try to scan for
whole keys in table then index scan on x_y will return 0
because data is same so in the case of update we take
key as a parameter in normal insert key should be -1
@returns 0 if no duplicate else returns error
*/
int handler::check_duplicate_long_entries_update(const uchar *new_rec)
{
Field *field;
uint key_parts;
KEY *keyinfo;
KEY_PART_INFO *keypart;
/*
Here we are comparing whether new record and old record are same
with respect to fields in hash_str
*/
uint reclength= (uint) (table->record[1] - table->record[0]);
for (uint i= 0; i < table->s->keys; i++)
{
keyinfo= table->key_info + i;
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH) if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
{ {
key_parts= fields_in_hash_keyinfo(keyinfo); if (!old_rec || long_unique_fields_differ(keyinfo, old_rec))
keypart= keyinfo->key_part - key_parts;
for (uint j= 0; j < key_parts; j++, keypart++)
{ {
int error; if (int res= check_duplicate_long_entry_key(new_rec, i))
field= keypart->field;
/*
Compare fields if they are different then check for duplicates
cmp_binary_offset cannot differentiate between null and empty string
So also check for that too
*/
if((field->is_null(0) != field->is_null(reclength)) ||
field->cmp_offset(reclength))
{ {
if((error= check_duplicate_long_entry_key(new_rec, i))) if (!old_rec && table->next_number_field &&
return error; !(ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE))
/* if (int err= update_auto_increment())
break because check_duplicate_long_entries_key will return err;
take care of remaining fields return res;
*/
break;
} }
} }
} }
@@ -7569,14 +7554,14 @@ int handler::check_duplicate_long_entries_update(const uchar *new_rec)
int handler::ha_check_overlaps(const uchar *old_data, const uchar* new_data) int handler::ha_check_overlaps(const uchar *old_data, const uchar* new_data)
{ {
DBUG_ASSERT(new_data); DBUG_ASSERT(new_data);
if (this != table->file) DBUG_ASSERT(this == table->file);
return 0;
if (!table_share->period.unique_keys) if (!table_share->period.unique_keys)
return 0; return 0;
if (table->versioned() && !table->vers_end_field()->is_max()) if (table->versioned() && !table->vers_end_field()->is_max())
return 0; return 0;
const bool is_update= old_data != NULL; const bool after_write= ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE;
const bool is_update= !after_write && old_data;
uchar *record_buffer= lookup_buffer + table_share->max_unique_length uchar *record_buffer= lookup_buffer + table_share->max_unique_length
+ table_share->null_fields; + table_share->null_fields;
@@ -7631,7 +7616,9 @@ int handler::ha_check_overlaps(const uchar *old_data, const uchar* new_data)
key_part_map((1 << (key_parts - 1)) - 1), key_part_map((1 << (key_parts - 1)) - 1),
HA_READ_AFTER_KEY); HA_READ_AFTER_KEY);
if (!error && is_update) if (!error)
{
if (is_update)
{ {
/* In case of update it could happen that the nearest neighbour is /* In case of update it could happen that the nearest neighbour is
a record we are updating. It means, that there are no overlaps a record we are updating. It means, that there are no overlaps
@@ -7644,6 +7631,9 @@ int handler::ha_check_overlaps(const uchar *old_data, const uchar* new_data)
if (memcmp(ref, lookup_handler->ref, ref_length) == 0) if (memcmp(ref, lookup_handler->ref, ref_length) == 0)
error= lookup_handler->ha_index_next(record_buffer); error= lookup_handler->ha_index_next(record_buffer);
} }
else if (after_write)
error= lookup_handler->ha_index_next(record_buffer);
}
if (!error && table->check_period_overlaps(key_info, new_data, record_buffer)) if (!error && table->check_period_overlaps(key_info, new_data, record_buffer))
error= HA_ERR_FOUND_DUPP_KEY; error= HA_ERR_FOUND_DUPP_KEY;
@@ -7754,11 +7744,8 @@ int handler::prepare_for_insert(bool do_create)
int handler::ha_write_row(const uchar *buf) int handler::ha_write_row(const uchar *buf)
{ {
int error; int error;
DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
m_lock_type == F_WRLCK);
DBUG_ENTER("handler::ha_write_row"); DBUG_ENTER("handler::ha_write_row");
DEBUG_SYNC_C("ha_write_row_start"); DEBUG_SYNC_C("ha_write_row_start");
#ifdef WITH_WSREP
DBUG_EXECUTE_IF("wsrep_ha_write_row", DBUG_EXECUTE_IF("wsrep_ha_write_row",
{ {
const char act[]= const char act[]=
@@ -7767,36 +7754,11 @@ int handler::ha_write_row(const uchar *buf)
"WAIT_FOR wsrep_ha_write_row_continue"; "WAIT_FOR wsrep_ha_write_row_continue";
DBUG_ASSERT(!debug_sync_set_action(ha_thd(), STRING_WITH_LEN(act))); DBUG_ASSERT(!debug_sync_set_action(ha_thd(), STRING_WITH_LEN(act)));
}); });
#endif /* WITH_WSREP */ DBUG_ASSERT(table_share->tmp_table || m_lock_type == F_WRLCK);
if ((error= ha_check_overlaps(NULL, buf)))
{
DEBUG_SYNC_C("ha_write_row_end");
DBUG_RETURN(error);
}
/* if (!(ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) &&
NOTE: this != table->file is true in 3 cases: (error= ha_check_inserver_constraints(NULL, buf)))
goto err;
1. under copy_partitions() (REORGANIZE PARTITION): that does not
require long unique check as it does not introduce new rows or new index.
2. under partition's ha_write_row() (INSERT): check_duplicate_long_entries()
was already done by ha_partition::ha_write_row(), no need to check it
again for each single partition.
3. under ha_mroonga::wrapper_write_row()
*/
if (table->s->long_unique_table && this == table->file)
{
DBUG_ASSERT(inited == NONE || lookup_handler != this);
if ((error= check_duplicate_long_entries(buf)))
{
if (table->next_number_field && buf == table->record[0])
if (int err= update_auto_increment())
error= err;
DEBUG_SYNC_C("ha_write_row_end");
DBUG_RETURN(error);
}
}
MYSQL_INSERT_ROW_START(table_share->db.str, table_share->table_name.str); MYSQL_INSERT_ROW_START(table_share->db.str, table_share->table_name.str);
mark_trx_read_write(); mark_trx_read_write();
@@ -7808,8 +7770,31 @@ int handler::ha_write_row(const uchar *buf)
dbug_format_row(table, buf, false).c_ptr_safe(), error)); dbug_format_row(table, buf, false).c_ptr_safe(), error));
MYSQL_INSERT_ROW_DONE(error); MYSQL_INSERT_ROW_DONE(error);
if (likely(!error)) if (error)
goto err;
if ((ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) &&
(error= ha_check_inserver_constraints(NULL, buf)))
{ {
if (lookup_handler != this) // INSERT IGNORE or REPLACE or ODKU
{
int olderror= error;
if ((error= lookup_handler->rnd_init(0)))
goto err;
position(buf);
if ((error= lookup_handler->rnd_pos(lookup_buffer, ref)))
goto err;
increment_statistics(&SSV::ha_delete_count);
TABLE_IO_WAIT(tracker, PSI_TABLE_DELETE_ROW, MAX_KEY, error,
{ error= lookup_handler->delete_row(buf);})
lookup_handler->rnd_end();
if (!error)
error= olderror;
}
goto err;
}
rows_changed++; rows_changed++;
if (row_logging) if (row_logging)
{ {
@@ -7817,14 +7802,11 @@ int handler::ha_write_row(const uchar *buf)
error= binlog_log_row(table, 0, buf, log_func); error= binlog_log_row(table, 0, buf, log_func);
} }
#ifdef WITH_WSREP if (WSREP_NNULL(ha_thd()) && table_share->tmp_table == NO_TMP_TABLE &&
THD *thd= ha_thd();
if (WSREP_NNULL(thd) && table_share->tmp_table == NO_TMP_TABLE &&
ht->flags & HTON_WSREP_REPLICATION && !error) ht->flags & HTON_WSREP_REPLICATION && !error)
error= wsrep_after_row(thd); error= wsrep_after_row(ha_thd());
#endif /* WITH_WSREP */
}
err:
DEBUG_SYNC_C("ha_write_row_end"); DEBUG_SYNC_C("ha_write_row_end");
DBUG_RETURN(error); DBUG_RETURN(error);
} }
@@ -7833,8 +7815,7 @@ int handler::ha_write_row(const uchar *buf)
int handler::ha_update_row(const uchar *old_data, const uchar *new_data) int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
{ {
int error; int error;
DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || DBUG_ASSERT(table_share->tmp_table || m_lock_type == F_WRLCK);
m_lock_type == F_WRLCK);
/* /*
Some storage engines require that the new record is in record[0] Some storage engines require that the new record is in record[0]
(and the old record is in record[1]). (and the old record is in record[1]).
@@ -7842,21 +7823,8 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
DBUG_ASSERT(new_data == table->record[0]); DBUG_ASSERT(new_data == table->record[0]);
DBUG_ASSERT(old_data == table->record[1]); DBUG_ASSERT(old_data == table->record[1]);
uint saved_status= table->status; if (!(ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) &&
error= ha_check_overlaps(old_data, new_data); (error= ha_check_inserver_constraints(old_data, new_data)))
/*
NOTE: this != table->file is true under partition's ha_update_row():
check_duplicate_long_entries_update() was already done by
ha_partition::ha_update_row(), no need to check it again for each single
partition. Same applies to ha_mroonga wrapper.
*/
if (!error && table->s->long_unique_table && this == table->file)
error= check_duplicate_long_entries_update(new_data);
table->status= saved_status;
if (error)
return error; return error;
MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str); MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str);
@@ -7871,8 +7839,24 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
error)); error));
MYSQL_UPDATE_ROW_DONE(error); MYSQL_UPDATE_ROW_DONE(error);
if (likely(!error)) if (error)
return error;
if ((ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) &&
(error= ha_check_inserver_constraints(old_data, new_data)))
{ {
int e= 0;
if (ha_thd()->lex->ignore)
{
my_printf_error(ER_NOT_SUPPORTED_YET, "UPDATE IGNORE in READ "
"COMMITTED isolation mode of a table with a UNIQUE constraint "
"%s is not currently supported", MYF(0),
table->s->long_unique_table ? "USING HASH" : "WITHOUT OVERLAPS");
return HA_ERR_UNSUPPORTED;
}
return e ? e : error;
}
rows_changed++; rows_changed++;
if (row_logging) if (row_logging)
{ {
@@ -7899,7 +7883,6 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
error= wsrep_after_row(thd); error= wsrep_after_row(thd);
} }
#endif /* WITH_WSREP */ #endif /* WITH_WSREP */
}
return error; return error;
} }
@@ -7934,13 +7917,11 @@ int handler::update_first_row(const uchar *new_data)
int handler::ha_delete_row(const uchar *buf) int handler::ha_delete_row(const uchar *buf)
{ {
int error; int error;
DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || DBUG_ASSERT(table_share->tmp_table || m_lock_type == F_WRLCK);
m_lock_type == F_WRLCK);
/* /*
Normally table->record[0] is used, but sometimes table->record[1] is used. Normally table->record[0] is used, but sometimes table->record[1] is used.
*/ */
DBUG_ASSERT(buf == table->record[0] || DBUG_ASSERT(buf == table->record[0] || buf == table->record[1]);
buf == table->record[1]);
MYSQL_DELETE_ROW_START(table_share->db.str, table_share->table_name.str); MYSQL_DELETE_ROW_START(table_share->db.str, table_share->table_name.str);
mark_trx_read_write(); mark_trx_read_write();

View File

@@ -368,7 +368,9 @@ enum chf_create_flags {
/* Implements SELECT ... FOR UPDATE SKIP LOCKED */ /* Implements SELECT ... FOR UPDATE SKIP LOCKED */
#define HA_CAN_SKIP_LOCKED (1ULL << 61) #define HA_CAN_SKIP_LOCKED (1ULL << 61)
#define HA_LAST_TABLE_FLAG HA_CAN_SKIP_LOCKED #define HA_CHECK_UNIQUE_AFTER_WRITE (1ULL << 62)
#define HA_LAST_TABLE_FLAG HA_CHECK_UNIQUE_AFTER_WRITE
/* bits in index_flags(index_number) for what you can do with index */ /* bits in index_flags(index_number) for what you can do with index */
@@ -4825,11 +4827,11 @@ private:
int create_lookup_handler(); int create_lookup_handler();
void alloc_lookup_buffer(); void alloc_lookup_buffer();
int check_duplicate_long_entries(const uchar *new_rec);
int check_duplicate_long_entries_update(const uchar *new_rec);
int check_duplicate_long_entry_key(const uchar *new_rec, uint key_no); int check_duplicate_long_entry_key(const uchar *new_rec, uint key_no);
/** PRIMARY KEY/UNIQUE WITHOUT OVERLAPS check */ /** PRIMARY KEY/UNIQUE WITHOUT OVERLAPS check */
int ha_check_overlaps(const uchar *old_data, const uchar* new_data); int ha_check_overlaps(const uchar *old_data, const uchar* new_data);
int ha_check_long_uniques(const uchar *old_rec, const uchar *new_rec);
int ha_check_inserver_constraints(const uchar *old_data, const uchar* new_data);
protected: protected:
/* /*

View File

@@ -5098,6 +5098,7 @@ Item_param::set_param_type_and_swap_value(Item_param *src)
void Item_param::set_default(bool set_type_handler_null) void Item_param::set_default(bool set_type_handler_null)
{ {
m_is_settable_routine_parameter= false; m_is_settable_routine_parameter= false;
current_thd->lex->default_used= true;
state= DEFAULT_VALUE; state= DEFAULT_VALUE;
/* /*
When Item_param is set to DEFAULT_VALUE: When Item_param is set to DEFAULT_VALUE:
@@ -5255,14 +5256,26 @@ static Field *make_default_field(THD *thd, Field *field_arg)
if (!newptr) if (!newptr)
return nullptr; return nullptr;
/* Don't check privileges, if it's parse_vcol_defs() */
if (def_field->table->pos_in_table_list &&
def_field->default_value->check_access(thd))
return nullptr;
if (should_mark_column(thd->column_usage)) if (should_mark_column(thd->column_usage))
def_field->default_value->expr->update_used_tables(); def_field->default_value->expr->update_used_tables();
def_field->move_field(newptr + 1, def_field->maybe_null() ? newptr : 0, 1); def_field->move_field(newptr + 1, def_field->maybe_null() ? newptr : 0, 1);
} }
else else if (field_arg->table && field_arg->table->s->field)
{
Field *def_val= field_arg->table->s->field[field_arg->field_index];
def_field->move_field(def_val->ptr, def_val->null_ptr, def_val->null_bit);
}
else /* e.g. non-updatable view */
{
def_field->move_field_offset((my_ptrdiff_t) def_field->move_field_offset((my_ptrdiff_t)
(def_field->table->s->default_values - (def_field->table->s->default_values -
def_field->table->record[0])); def_field->table->record[0]));
}
return def_field; return def_field;
} }

View File

@@ -2455,6 +2455,7 @@ public:
If there is some, sets a bit for this key in the proper key map. If there is some, sets a bit for this key in the proper key map.
*/ */
virtual bool check_index_dependence(void *arg) { return 0; } virtual bool check_index_dependence(void *arg) { return 0; }
virtual bool check_sequence_privileges(void *arg) { return 0; }
/*============== End of Item processor list ======================*/ /*============== End of Item processor list ======================*/
/* /*

View File

@@ -602,7 +602,7 @@ bool Arg_comparator::set_cmp_func_string(THD *thd)
else if ((*b)->type() == Item::FUNC_ITEM && else if ((*b)->type() == Item::FUNC_ITEM &&
((Item_func *) (*b))->functype() == Item_func::JSON_EXTRACT_FUNC) ((Item_func *) (*b))->functype() == Item_func::JSON_EXTRACT_FUNC)
{ {
func= is_owner_equal_func() ? &Arg_comparator::compare_e_json_str: func= is_owner_equal_func() ? &Arg_comparator::compare_e_str_json:
&Arg_comparator::compare_str_json; &Arg_comparator::compare_str_json;
return 0; return 0;
} }

View File

@@ -7100,15 +7100,14 @@ longlong Item_func_cursor_rowcount::val_int()
/***************************************************************************** /*****************************************************************************
SEQUENCE functions SEQUENCE functions
*****************************************************************************/ *****************************************************************************/
bool Item_func_nextval::check_access_and_fix_fields(THD *thd, Item **ref, bool Item_func_nextval::check_access(THD *thd, privilege_t want_access)
privilege_t want_access)
{ {
table_list->sequence= false; table_list->sequence= false;
bool error= check_single_table_access(thd, want_access, table_list, false); bool error= check_single_table_access(thd, want_access, table_list, false);
table_list->sequence= true; table_list->sequence= true;
if (error && table_list->belong_to_view) if (error && table_list->belong_to_view)
table_list->replace_view_error_with_generic(thd); table_list->replace_view_error_with_generic(thd);
return error || Item_longlong_func::fix_fields(thd, ref); return error;
} }
longlong Item_func_nextval::val_int() longlong Item_func_nextval::val_int()
@@ -7123,7 +7122,8 @@ longlong Item_func_nextval::val_int()
String key_buff(buff,sizeof(buff), &my_charset_bin); String key_buff(buff,sizeof(buff), &my_charset_bin);
DBUG_ENTER("Item_func_nextval::val_int"); DBUG_ENTER("Item_func_nextval::val_int");
update_table(); update_table();
DBUG_ASSERT(table && table->s->sequence); DBUG_ASSERT(table);
DBUG_ASSERT(table->s->sequence);
thd= table->in_use; thd= table->in_use;
if (thd->count_cuted_fields == CHECK_FIELD_EXPRESSION) if (thd->count_cuted_fields == CHECK_FIELD_EXPRESSION)

View File

@@ -4236,7 +4236,7 @@ class Item_func_nextval :public Item_longlong_func
protected: protected:
TABLE_LIST *table_list; TABLE_LIST *table_list;
TABLE *table; TABLE *table;
bool check_access_and_fix_fields(THD *, Item **ref, privilege_t); bool check_access(THD *, privilege_t);
public: public:
Item_func_nextval(THD *thd, TABLE_LIST *table_list_arg): Item_func_nextval(THD *thd, TABLE_LIST *table_list_arg):
Item_longlong_func(thd), table_list(table_list_arg) {} Item_longlong_func(thd), table_list(table_list_arg) {}
@@ -4247,7 +4247,13 @@ public:
return name; return name;
} }
bool fix_fields(THD *thd, Item **ref) override bool fix_fields(THD *thd, Item **ref) override
{ return check_access_and_fix_fields(thd, ref, INSERT_ACL | SELECT_ACL); } {
/* Don't check privileges, if it's parse_vcol_defs() */
return (table_list->table && check_sequence_privileges(thd)) ||
Item_longlong_func::fix_fields(thd, ref);
}
bool check_sequence_privileges(void *thd) override
{ return check_access((THD*)thd, INSERT_ACL | SELECT_ACL); }
bool fix_length_and_dec(THD *thd) override bool fix_length_and_dec(THD *thd) override
{ {
unsigned_flag= 0; unsigned_flag= 0;
@@ -4289,8 +4295,8 @@ class Item_func_lastval :public Item_func_nextval
public: public:
Item_func_lastval(THD *thd, TABLE_LIST *table_list_arg): Item_func_lastval(THD *thd, TABLE_LIST *table_list_arg):
Item_func_nextval(thd, table_list_arg) {} Item_func_nextval(thd, table_list_arg) {}
bool fix_fields(THD *thd, Item **ref) override bool check_sequence_privileges(void *thd) override
{ return check_access_and_fix_fields(thd, ref, SELECT_ACL); } { return check_access((THD*)thd, SELECT_ACL); }
longlong val_int() override; longlong val_int() override;
LEX_CSTRING func_name_cstring() const override LEX_CSTRING func_name_cstring() const override
{ {
@@ -4315,8 +4321,8 @@ public:
: Item_func_nextval(thd, table_list_arg), : Item_func_nextval(thd, table_list_arg),
nextval(nextval_arg), round(round_arg), is_used(is_used_arg) nextval(nextval_arg), round(round_arg), is_used(is_used_arg)
{} {}
bool fix_fields(THD *thd, Item **ref) override bool check_sequence_privileges(void *thd) override
{ return check_access_and_fix_fields(thd, ref, INSERT_ACL); } { return check_access((THD*)thd, INSERT_ACL); }
longlong val_int() override; longlong val_int() override;
LEX_CSTRING func_name_cstring() const override LEX_CSTRING func_name_cstring() const override
{ {

View File

@@ -7138,21 +7138,24 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
order R by (E(#records_matched) * key_record_length). order R by (E(#records_matched) * key_record_length).
S= first(R); -- set of scans that will be used for ROR-intersection S= first(R); -- set of scans that will be used for ROR-intersection
R= R-first(S); R= R - S;
min_cost= cost(S); min_cost= cost(S);
min_scan= make_scan(S); min_scan= make_scan(S);
while (R is not empty) while (R is not empty)
{ {
firstR= R - first(R); firstR= first(R);
if (!selectivity(S + firstR < selectivity(S))) if (!selectivity(S + firstR) < selectivity(S))
{
R= R - firstR;
continue; continue;
}
S= S + first(R); S= S + first(R);
if (cost(S) < min_cost) if (cost(S) < min_cost)
{ {
min_cost= cost(S); min_cost= cost(S);
min_scan= make_scan(S); min_scan= make_scan(S);
} }
R= R - firstR; -- Remove the processed scan from R
} }
return min_scan; return min_scan;
} }

View File

@@ -68,15 +68,20 @@ static ulonglong timespec_to_usec(const struct timespec *ts)
return (ulonglong) ts->tv_sec * TIME_MILLION + ts->tv_nsec / TIME_THOUSAND; return (ulonglong) ts->tv_sec * TIME_MILLION + ts->tv_nsec / TIME_THOUSAND;
} }
int signal_waiting_transaction(THD *waiting_thd, const char *binlog_file, static int
my_off_t binlog_pos) signal_waiting_transaction(THD *waiting_thd, bool thd_valid,
const char *binlog_file, my_off_t binlog_pos)
{ {
/* /*
It is possible that the connection thd waiting for an ACK was killed. In It is possible that the connection thd waiting for an ACK was killed. In
such circumstance, the connection thread will nullify the thd member of its such circumstance, the connection thread will nullify the thd member of its
Active_tranx node. So before we try to signal, ensure the THD exists. Active_tranx node. So before we try to signal, ensure the THD exists.
The thd_valid is only set while the THD is waiting in commit_trx(); this
is defensive coding to not signal an invalid THD if we somewhere
accidentally did not remove the transaction from the list.
*/ */
if (waiting_thd) if (waiting_thd && thd_valid)
mysql_cond_signal(&waiting_thd->COND_wakeup_ready); mysql_cond_signal(&waiting_thd->COND_wakeup_ready);
return 0; return 0;
} }
@@ -182,6 +187,7 @@ int Active_tranx::insert_tranx_node(THD *thd_to_wait,
ins_node->log_name[FN_REFLEN-1] = 0; /* make sure it ends properly */ ins_node->log_name[FN_REFLEN-1] = 0; /* make sure it ends properly */
ins_node->log_pos = log_file_pos; ins_node->log_pos = log_file_pos;
ins_node->thd= thd_to_wait; ins_node->thd= thd_to_wait;
ins_node->thd_valid= false;
if (!m_trx_front) if (!m_trx_front)
{ {
@@ -263,7 +269,8 @@ void Active_tranx::clear_active_tranx_nodes(
if ((log_file_name != NULL) && if ((log_file_name != NULL) &&
compare(new_front, log_file_name, log_file_pos) > 0) compare(new_front, log_file_name, log_file_pos) > 0)
break; break;
pre_delete_hook(new_front->thd, new_front->log_name, new_front->log_pos); pre_delete_hook(new_front->thd, new_front->thd_valid,
new_front->log_name, new_front->log_pos);
new_front = new_front->next; new_front = new_front->next;
} }
@@ -355,12 +362,16 @@ void Active_tranx::unlink_thd_as_waiter(const char *log_file_name,
} }
if (entry) if (entry)
{
entry->thd= NULL; entry->thd= NULL;
entry->thd_valid= false;
}
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
bool Active_tranx::is_thd_waiter(THD *thd_to_check, const char *log_file_name, Tranx_node *
Active_tranx::is_thd_waiter(THD *thd_to_check, const char *log_file_name,
my_off_t log_file_pos) my_off_t log_file_pos)
{ {
DBUG_ENTER("Active_tranx::assert_thd_is_waiter"); DBUG_ENTER("Active_tranx::assert_thd_is_waiter");
@@ -377,7 +388,7 @@ bool Active_tranx::is_thd_waiter(THD *thd_to_check, const char *log_file_name,
entry = entry->hash_next; entry = entry->hash_next;
} }
DBUG_RETURN(static_cast<bool>(entry)); DBUG_RETURN(entry);
} }
/******************************************************************************* /*******************************************************************************
@@ -863,6 +874,10 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name,
if (!rpl_semi_sync_master_clients && !rpl_semi_sync_master_wait_no_slave) if (!rpl_semi_sync_master_clients && !rpl_semi_sync_master_wait_no_slave)
{ {
lock();
m_active_tranxs->unlink_thd_as_waiter(trx_wait_binlog_name,
trx_wait_binlog_pos);
unlock();
rpl_semi_sync_master_no_transactions++; rpl_semi_sync_master_no_transactions++;
DBUG_RETURN(0); DBUG_RETURN(0);
} }
@@ -922,6 +937,9 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name,
} }
} }
Tranx_node *tranx_entry=
m_active_tranxs->is_thd_waiter(thd, trx_wait_binlog_name,
trx_wait_binlog_pos);
/* In between the binlogging of this transaction and this wait, it is /* In between the binlogging of this transaction and this wait, it is
* possible that our entry in Active_tranx was removed (i.e. if * possible that our entry in Active_tranx was removed (i.e. if
* semi-sync was switched off and on). It is also possible that the * semi-sync was switched off and on). It is also possible that the
@@ -932,8 +950,7 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name,
* rpl_semi_sync_master_yes/no_tx consistent with it, we check for a * rpl_semi_sync_master_yes/no_tx consistent with it, we check for a
* semi-sync restart _after_ checking the reply state. * semi-sync restart _after_ checking the reply state.
*/ */
if (unlikely(!m_active_tranxs->is_thd_waiter(thd, trx_wait_binlog_name, if (unlikely(!tranx_entry))
trx_wait_binlog_pos)))
{ {
DBUG_EXECUTE_IF( DBUG_EXECUTE_IF(
"semisync_log_skip_trx_wait", "semisync_log_skip_trx_wait",
@@ -952,6 +969,16 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name,
break; break;
} }
/*
Mark that our THD is now valid for signalling to by the ack thread.
It is important to ensure that we can never leave a no longer valid
THD in the transaction list and signal it, eg. MDEV-36934. This way,
we ensure the THD will only be signalled while this function is
running, even in case of some incorrect error handling or similar
that might leave a dangling THD in the list.
*/
tranx_entry->thd_valid= true;
/* Let us update the info about the minimum binlog position of waiting /* Let us update the info about the minimum binlog position of waiting
* threads. * threads.
*/ */
@@ -1284,6 +1311,8 @@ int Repl_semi_sync_master::write_tranx_in_binlog(THD *thd,
DBUG_ENTER("Repl_semi_sync_master::write_tranx_in_binlog"); DBUG_ENTER("Repl_semi_sync_master::write_tranx_in_binlog");
DEBUG_SYNC(current_thd, "semisync_at_write_tranx_in_binlog");
lock(); lock();
/* This is the real check inside the mutex. */ /* This is the real check inside the mutex. */
@@ -1317,7 +1346,8 @@ int Repl_semi_sync_master::write_tranx_in_binlog(THD *thd,
m_commit_file_name_inited = true; m_commit_file_name_inited = true;
} }
if (is_on()) if (is_on() &&
(rpl_semi_sync_master_clients || rpl_semi_sync_master_wait_no_slave))
{ {
DBUG_ASSERT(m_active_tranxs != NULL); DBUG_ASSERT(m_active_tranxs != NULL);
if(m_active_tranxs->insert_tranx_node(thd, log_file_name, log_file_pos)) if(m_active_tranxs->insert_tranx_node(thd, log_file_name, log_file_pos))

View File

@@ -30,6 +30,7 @@ extern PSI_cond_key key_COND_binlog_send;
struct Tranx_node { struct Tranx_node {
char log_name[FN_REFLEN]; char log_name[FN_REFLEN];
bool thd_valid; /* thd is valid for signalling */
my_off_t log_pos; my_off_t log_pos;
THD *thd; /* The thread awaiting an ACK */ THD *thd; /* The thread awaiting an ACK */
struct Tranx_node *next; /* the next node in the sorted list */ struct Tranx_node *next; /* the next node in the sorted list */
@@ -126,7 +127,9 @@ public:
trx_node= &(current_block->nodes[++last_node]); trx_node= &(current_block->nodes[++last_node]);
trx_node->log_name[0] = '\0'; trx_node->log_name[0] = '\0';
trx_node->thd_valid= false;
trx_node->log_pos= 0; trx_node->log_pos= 0;
trx_node->thd= nullptr;
trx_node->next= 0; trx_node->next= 0;
trx_node->hash_next= 0; trx_node->hash_next= 0;
return trx_node; return trx_node;
@@ -298,7 +301,8 @@ private:
its invocation. See the context in which it is called to know. its invocation. See the context in which it is called to know.
*/ */
typedef int (*active_tranx_action)(THD *trx_thd, const char *log_file_name, typedef int (*active_tranx_action)(THD *trx_thd, bool thd_valid,
const char *log_file_name,
my_off_t trx_log_file_pos); my_off_t trx_log_file_pos);
/** /**
@@ -381,7 +385,7 @@ public:
* matches the thread of the respective Tranx_node::thd of the passed in * matches the thread of the respective Tranx_node::thd of the passed in
* log_file_name and log_file_pos. * log_file_name and log_file_pos.
*/ */
bool is_thd_waiter(THD *thd_to_check, const char *log_file_name, Tranx_node * is_thd_waiter(THD *thd_to_check, const char *log_file_name,
my_off_t log_file_pos); my_off_t log_file_pos);
/* Given a position, check to see whether the position is an active /* Given a position, check to see whether the position is an active

View File

@@ -8453,9 +8453,17 @@ bool check_grant(THD *thd, privilege_t want_access, TABLE_LIST *tables,
Direct SELECT of a sequence table doesn't set t_ref->sequence, so Direct SELECT of a sequence table doesn't set t_ref->sequence, so
privileges will be checked normally, as for any table. privileges will be checked normally, as for any table.
*/ */
if (t_ref->sequence && if (t_ref->sequence)
!(want_access & ~(SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL))) {
if (!(want_access & ~(SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL)))
continue; continue;
/*
If it is ALTER..SET DEFAULT= nextval(sequence), also defer checks
until ::fix_fields().
*/
if (tl != tables && want_access == ALTER_ACL)
continue;
}
const ACL_internal_table_access *access= const ACL_internal_table_access *access=
get_cached_table_access(&t_ref->grant.m_internal, get_cached_table_access(&t_ref->grant.m_internal,

View File

@@ -5171,7 +5171,7 @@ bool DML_prelocking_strategy::handle_table(THD *thd,
DBUG_ASSERT(table_list->lock_type >= TL_FIRST_WRITE || DBUG_ASSERT(table_list->lock_type >= TL_FIRST_WRITE ||
thd->lex->default_used); thd->lex->default_used);
if (table_list->trg_event_map) if (table_list->trg_event_map && table_list->lock_type >= TL_FIRST_WRITE)
{ {
if (table->triggers) if (table->triggers)
{ {

View File

@@ -1670,6 +1670,9 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(insert_view_fields(thd, &fields, table_list)); DBUG_RETURN(insert_view_fields(thd, &fields, table_list));
} }
if (table_list->table->check_sequence_privileges(thd))
DBUG_RETURN(TRUE);
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
} }

View File

@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2019, Oracle and/or its affiliates. /* Copyright (c) 2000, 2025, Oracle and/or its affiliates.
Copyright (c) 2009, 2022, MariaDB Corporation. Copyright (c) 2009, 2025, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by it under the terms of the GNU General Public License as published by
@@ -2538,6 +2538,8 @@ int Lex_input_stream::lex_one_token(YYSTYPE *yylval, THD *thd)
state=MY_LEX_CHAR; state=MY_LEX_CHAR;
break; break;
case MY_LEX_END: case MY_LEX_END:
/* Unclosed special comments result in a syntax error */
if (in_comment == DISCARD_COMMENT) return (ABORT_SYM);
next_state= MY_LEX_END; next_state= MY_LEX_END;
return(0); // We found end of input last time return(0); // We found end of input last time

View File

@@ -1510,6 +1510,11 @@ static int mysql_test_update(Prepared_statement *stmt,
0, NULL, 0, THD_WHERE::SET_LIST) || 0, NULL, 0, THD_WHERE::SET_LIST) ||
check_unique_table(thd, table_list)) check_unique_table(thd, table_list))
goto error; goto error;
{
List_iterator_fast<Item> fs(select->item_list), vs(stmt->lex->value_list);
while (Item *f= fs++)
vs++->associate_with_target_field(thd, static_cast<Item_field*>(f));
}
/* TODO: here we should send types of placeholders to the client. */ /* TODO: here we should send types of placeholders to the client. */
DBUG_RETURN(0); DBUG_RETURN(0);
error: error:
@@ -4602,6 +4607,8 @@ Prepared_statement::set_parameters(String *expanded_query,
res= set_params_data(this, expanded_query); res= set_params_data(this, expanded_query);
#endif #endif
} }
lex->default_used= thd->lex->default_used;
thd->lex->default_used= false;
if (res) if (res)
{ {
my_error(ER_WRONG_ARGUMENTS, MYF(0), my_error(ER_WRONG_ARGUMENTS, MYF(0),

View File

@@ -7154,7 +7154,13 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array, KEY_FIELD *key_field)
} }
} }
} }
if (field->hash_join_is_possible() && /*
Compressed field cannot be part of a key. For optimizer temporary table
compressed fields are replaced by uncompressed, see
is_optimizer_tmp_table() and Field_*_compressed::make_new_field().
*/
if (!field->compression_method() &&
field->hash_join_is_possible() &&
(key_field->optimize & KEY_OPTIMIZE_EQ) && (key_field->optimize & KEY_OPTIMIZE_EQ) &&
key_field->val->used_tables()) key_field->val->used_tables())
{ {

View File

@@ -3540,8 +3540,6 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
Create_field *auto_increment_key= 0; Create_field *auto_increment_key= 0;
Key_part_spec *column; Key_part_spec *column;
bool is_hash_field_needed= key->key_create_info.algorithm
== HA_KEY_ALG_LONG_HASH;
if (key->type == Key::IGNORE_KEY) if (key->type == Key::IGNORE_KEY)
{ {
/* ignore redundant keys */ /* ignore redundant keys */
@@ -3552,6 +3550,9 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
break; break;
} }
bool is_hash_field_needed= key->key_create_info.algorithm
== HA_KEY_ALG_LONG_HASH;
if (key_check_without_overlaps(thd, create_info, alter_info, *key)) if (key_check_without_overlaps(thd, create_info, alter_info, *key))
DBUG_RETURN(true); DBUG_RETURN(true);
@@ -11343,7 +11344,8 @@ do_continue:;
thd->count_cuted_fields= CHECK_FIELD_EXPRESSION; thd->count_cuted_fields= CHECK_FIELD_EXPRESSION;
altered_table.reset_default_fields(); altered_table.reset_default_fields();
if (altered_table.default_field && if (altered_table.default_field &&
altered_table.update_default_fields(true)) (altered_table.check_sequence_privileges(thd) ||
altered_table.update_default_fields(true)))
{ {
cleanup_table_after_inplace_alter(&altered_table); cleanup_table_after_inplace_alter(&altered_table);
goto err_new_table_cleanup; goto err_new_table_cleanup;
@@ -12761,6 +12763,23 @@ bool check_engine(THD *thd, const char *db_name,
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "NO_ENGINE_SUBSTITUTION"); my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "NO_ENGINE_SUBSTITUTION");
DBUG_RETURN(TRUE); DBUG_RETURN(TRUE);
} }
#ifdef WITH_WSREP
/* @@enforce_storage_engine is local, if user has used
ENGINE=XXX we can't allow it in cluster in this
case as enf_engine != new _engine. This is because
original stmt is replicated including ENGINE=XXX and
here */
if ((create_info->used_fields & HA_CREATE_USED_ENGINE) &&
WSREP(thd))
{
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "ENFORCE_STORAGE_ENGINE");
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_OPTION_PREVENTS_STATEMENT,
"Do not use ENGINE=x when @@enforce_storage_engine is set");
DBUG_RETURN(TRUE);
}
#endif
*new_engine= enf_engine; *new_engine= enf_engine;
} }

View File

@@ -13252,6 +13252,7 @@ expr_or_ignore_or_default:
| DEFAULT | DEFAULT
{ {
$$= new (thd->mem_root) Item_default_specification(thd); $$= new (thd->mem_root) Item_default_specification(thd);
Lex->default_used= TRUE;
if (unlikely($$ == NULL)) if (unlikely($$ == NULL))
MYSQL_YYABORT; MYSQL_YYABORT;
} }
@@ -13335,6 +13336,7 @@ update_elem:
{ {
Item *def= new (thd->mem_root) Item_default_value(thd, Item *def= new (thd->mem_root) Item_default_value(thd,
Lex->current_context(), $1, 1); Lex->current_context(), $1, 1);
Lex->default_used= TRUE;
if (!def || add_item_to_list(thd, $1) || add_value_to_list(thd, def)) if (!def || add_item_to_list(thd, $1) || add_value_to_list(thd, def))
MYSQL_YYABORT; MYSQL_YYABORT;
} }

View File

@@ -2838,6 +2838,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
hash_keypart->fieldnr= hash_field_used_no + 1; hash_keypart->fieldnr= hash_field_used_no + 1;
hash_field= share->field[hash_field_used_no]; hash_field= share->field[hash_field_used_no];
hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs
DBUG_ASSERT(hash_field->invisible == INVISIBLE_FULL);
keyinfo->flags|= HA_NOSAME; keyinfo->flags|= HA_NOSAME;
share->virtual_fields++; share->virtual_fields++;
share->stored_fields--; share->stored_fields--;
@@ -3760,6 +3761,19 @@ Vcol_expr_context::~Vcol_expr_context()
} }
bool TABLE::check_sequence_privileges(THD *thd)
{
if (internal_tables)
for (Field **fp= field; *fp; fp++)
{
Virtual_column_info *vcol= (*fp)->default_value;
if (vcol && vcol->check_access(thd))
return 1;
}
return 0;
}
bool TABLE::vcol_fix_expr(THD *thd) bool TABLE::vcol_fix_expr(THD *thd)
{ {
if (pos_in_table_list->placeholder() || vcol_refix_list.is_empty()) if (pos_in_table_list->placeholder() || vcol_refix_list.is_empty())
@@ -3896,6 +3910,13 @@ bool Virtual_column_info::fix_and_check_expr(THD *thd, TABLE *table)
} }
bool Virtual_column_info::check_access(THD *thd)
{
return flags & VCOL_NEXTVAL &&
expr->walk(&Item::check_sequence_privileges, 0, thd);
}
/* /*
@brief @brief
Unpack the definition of a virtual column from its linear representation Unpack the definition of a virtual column from its linear representation

View File

@@ -1109,6 +1109,11 @@ struct TABLE_SHARE
return (tmp_table == SYSTEM_TMP_TABLE) ? 0 : table_map_id; return (tmp_table == SYSTEM_TMP_TABLE) ? 0 : table_map_id;
} }
bool is_optimizer_tmp_table()
{
return tmp_table == INTERNAL_TMP_TABLE && !db.length && table_name.length;
}
bool visit_subgraph(Wait_for_flush *waiting_ticket, bool visit_subgraph(Wait_for_flush *waiting_ticket,
MDL_wait_for_graph_visitor *gvisitor); MDL_wait_for_graph_visitor *gvisitor);
@@ -1752,6 +1757,7 @@ public:
TABLE *tmp_table, TABLE *tmp_table,
TMP_TABLE_PARAM *tmp_table_param, TMP_TABLE_PARAM *tmp_table_param,
bool with_cleanup); bool with_cleanup);
bool check_sequence_privileges(THD *thd);
bool vcol_fix_expr(THD *thd); bool vcol_fix_expr(THD *thd);
bool vcol_cleanup_expr(THD *thd); bool vcol_cleanup_expr(THD *thd);
Field *find_field_by_name(LEX_CSTRING *str) const; Field *find_field_by_name(LEX_CSTRING *str) const;

View File

@@ -2829,9 +2829,17 @@ static int wsrep_TOI_begin(THD *thd, const char *db, const char *table,
WSREP_DEBUG("TOI Begin: %s", wsrep_thd_query(thd)); WSREP_DEBUG("TOI Begin: %s", wsrep_thd_query(thd));
DEBUG_SYNC(thd, "wsrep_before_toi_begin"); DEBUG_SYNC(thd, "wsrep_before_toi_begin");
if (wsrep_can_run_in_toi(thd, db, table, table_list, create_info) == false) if (!wsrep_ready ||
wsrep_can_run_in_toi(thd, db, table, table_list, create_info) == false)
{ {
WSREP_DEBUG("No TOI for %s", wsrep_thd_query(thd)); WSREP_DEBUG("No TOI for %s", wsrep_thd_query(thd));
if (!wsrep_ready)
{
my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0));
push_warning_printf(thd, Sql_state_errno_level::WARN_LEVEL_WARN,
ER_GALERA_REPLICATION_NOT_SUPPORTED,
"Galera cluster is not ready to execute replication");
}
return 1; return 1;
} }

View File

@@ -27,6 +27,7 @@
#include <cstdlib> #include <cstdlib>
#include "wsrep_trans_observer.h" #include "wsrep_trans_observer.h"
#include "wsrep_server_state.h" #include "wsrep_server_state.h"
#include "wsrep_mysqld.h"
ulong wsrep_reject_queries; ulong wsrep_reject_queries;
@@ -123,6 +124,14 @@ bool wsrep_on_update (sys_var *self, THD* thd, enum_var_type var_type)
saved_wsrep_on= false; saved_wsrep_on= false;
} }
if (!wsrep_ready_get())
{
my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0));
WSREP_INFO("Failed to start Galera replication. Please check your "
"configuration.");
saved_wsrep_on= false;
}
free(tmp); free(tmp);
mysql_mutex_lock(&LOCK_global_system_variables); mysql_mutex_lock(&LOCK_global_system_variables);
} }

View File

@@ -950,12 +950,15 @@ MY_ATTRIBUTE((warn_unused_result))
@return number of pages written or hole-punched */ @return number of pages written or hole-punched */
uint32_t fil_space_t::flush_freed(bool writable) noexcept uint32_t fil_space_t::flush_freed(bool writable) noexcept
{ {
mysql_mutex_assert_not_owner(&buf_pool.flush_list_mutex);
mysql_mutex_assert_not_owner(&buf_pool.mutex);
const bool punch_hole= chain.start->punch_hole == 1; const bool punch_hole= chain.start->punch_hole == 1;
if (!punch_hole && !srv_immediate_scrub_data_uncompressed) if (!punch_hole && !srv_immediate_scrub_data_uncompressed)
return 0; return 0;
if (srv_is_undo_tablespace(id))
mysql_mutex_assert_not_owner(&buf_pool.flush_list_mutex); /* innodb_undo_log_truncate=ON can take care of these better */
mysql_mutex_assert_not_owner(&buf_pool.mutex); return 0;
for (;;) for (;;)
{ {

View File

@@ -870,6 +870,8 @@ retry:
else if (table) else if (table)
table->acquire(); table->acquire();
} }
else if (!dict_locked)
dict_sys.unfreeze();
return table; return table;
} }

View File

@@ -2705,23 +2705,30 @@ fil_io_t fil_space_t::io(const IORequest &type, os_offset_t offset, size_t len,
while (node->size <= p) { while (node->size <= p) {
p -= node->size; p -= node->size;
node = UT_LIST_GET_NEXT(chain, node); if (!UT_LIST_GET_NEXT(chain, node)) {
if (!node) {
fail: fail:
if (type.type != IORequest::READ_ASYNC) { switch (type.type) {
case IORequest::READ_ASYNC:
/* Read-ahead may be requested for
non-existing pages. Ignore such
requests. */
break;
default:
fil_invalid_page_access_msg( fil_invalid_page_access_msg(
node->name, node->name,
offset, len, offset, len,
type.is_read()); type.is_read());
}
#ifndef DBUG_OFF #ifndef DBUG_OFF
io_error: io_error:
#endif #endif
set_corrupted(); set_corrupted();
}
err = DB_CORRUPTION; err = DB_CORRUPTION;
node = nullptr; node = nullptr;
goto release; goto release;
} }
node = UT_LIST_GET_NEXT(chain, node);
} }
offset = os_offset_t{p} << srv_page_size_shift; offset = os_offset_t{p} << srv_page_size_shift;

View File

@@ -122,8 +122,6 @@ simple_thread_local ha_handler_stats *mariadb_stats;
#include <limits> #include <limits>
#include <myisamchk.h> // TT_FOR_UPGRADE #include <myisamchk.h> // TT_FOR_UPGRADE
#define thd_get_trx_isolation(X) ((enum_tx_isolation)thd_tx_isolation(X))
extern "C" void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all); extern "C" void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all);
unsigned long long thd_get_query_id(const MYSQL_THD thd); unsigned long long thd_get_query_id(const MYSQL_THD thd);
void thd_clear_error(MYSQL_THD thd); void thd_clear_error(MYSQL_THD thd);
@@ -839,14 +837,16 @@ innodb_tmpdir_validate(
return(0); return(0);
} }
/******************************************************************//** /** @return the current transaction isolation level */
Maps a MySQL trx isolation level code to the InnoDB isolation level code static inline uint innodb_isolation_level(const THD *thd) noexcept
@return InnoDB isolation level */ {
static inline static_assert(ISO_REPEATABLE_READ == TRX_ISO_REPEATABLE_READ, "");
uint static_assert(ISO_SERIALIZABLE == TRX_ISO_SERIALIZABLE, "");
innobase_map_isolation_level( static_assert(ISO_READ_COMMITTED == TRX_ISO_READ_COMMITTED, "");
/*=========================*/ static_assert(ISO_READ_UNCOMMITTED == TRX_ISO_READ_UNCOMMITTED, "");
enum_tx_isolation iso); /*!< in: MySQL isolation level code */ return high_level_read_only
? ISO_READ_UNCOMMITTED : (thd_tx_isolation(thd) & 3);
}
/** Gets field offset for a field in a table. /** Gets field offset for a field in a table.
@param[in] table MySQL table object @param[in] table MySQL table object
@@ -4363,21 +4363,18 @@ innobase_start_trx_and_assign_read_view(
trx_start_if_not_started_xa(trx, false); trx_start_if_not_started_xa(trx, false);
/* Assign a read view if the transaction does not have it yet. /* Assign a read view if the transaction does not have one yet.
Do this only if transaction is using REPEATABLE READ isolation Skip this for the READ UNCOMMITTED isolation level. */
level. */ trx->isolation_level = innodb_isolation_level(thd) & 3;
trx->isolation_level = innobase_map_isolation_level(
thd_get_trx_isolation(thd)) & 3;
if (trx->isolation_level == TRX_ISO_REPEATABLE_READ) { if (trx->isolation_level != TRX_ISO_READ_UNCOMMITTED) {
trx->read_view.open(trx); trx->read_view.open(trx);
} else { } else {
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
HA_ERR_UNSUPPORTED, HA_ERR_UNSUPPORTED,
"InnoDB: WITH CONSISTENT SNAPSHOT" "InnoDB: WITH CONSISTENT SNAPSHOT"
" was ignored because this phrase" " is ignored at READ UNCOMMITTED"
" can only be used with" " isolation level.");
" REPEATABLE READ isolation level.");
} }
/* Set the MySQL flag to mark that there is an active transaction */ /* Set the MySQL flag to mark that there is an active transaction */
@@ -4987,7 +4984,7 @@ ha_innobase::table_flags() const
called before prebuilt is inited. */ called before prebuilt is inited. */
if (thd_tx_isolation(thd) <= ISO_READ_COMMITTED) { if (thd_tx_isolation(thd) <= ISO_READ_COMMITTED) {
return(flags); return(flags | HA_CHECK_UNIQUE_AFTER_WRITE);
} }
return(flags | HA_BINLOG_STMT_CAPABLE); return(flags | HA_BINLOG_STMT_CAPABLE);
@@ -14059,10 +14056,10 @@ int ha_innobase::truncate()
trx); trx);
if (!err) if (!err)
{ {
trx->commit(deleted);
m_prebuilt->table->acquire(); m_prebuilt->table->acquire();
create_table_info_t::create_table_update_dict(m_prebuilt->table, create_table_info_t::create_table_update_dict(m_prebuilt->table,
m_user_thd, info, *table); m_user_thd, info, *table);
trx->commit(deleted);
} }
else else
{ {
@@ -16142,31 +16139,6 @@ ha_innobase::start_stmt(
DBUG_RETURN(0); DBUG_RETURN(0);
} }
/******************************************************************//**
Maps a MySQL trx isolation level code to the InnoDB isolation level code
@return InnoDB isolation level */
static inline
uint
innobase_map_isolation_level(
/*=========================*/
enum_tx_isolation iso) /*!< in: MySQL isolation level code */
{
if (UNIV_UNLIKELY(srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN)
|| UNIV_UNLIKELY(srv_read_only_mode)) {
return TRX_ISO_READ_UNCOMMITTED;
}
switch (iso) {
case ISO_REPEATABLE_READ: return(TRX_ISO_REPEATABLE_READ);
case ISO_READ_COMMITTED: return(TRX_ISO_READ_COMMITTED);
case ISO_SERIALIZABLE: return(TRX_ISO_SERIALIZABLE);
case ISO_READ_UNCOMMITTED: return(TRX_ISO_READ_UNCOMMITTED);
}
ut_error;
return(0);
}
/******************************************************************//** /******************************************************************//**
As MySQL will execute an external lock for every new table it uses when it As MySQL will execute an external lock for every new table it uses when it
starts to process an SQL statement (an exception is when MySQL calls starts to process an SQL statement (an exception is when MySQL calls
@@ -16633,19 +16605,30 @@ ha_innobase::store_lock(
Be careful to ignore TL_IGNORE if we are going to do something with Be careful to ignore TL_IGNORE if we are going to do something with
only 'real' locks! */ only 'real' locks! */
/* If no MySQL table is in use, we need to set the isolation level /* If no table handle is open, we need to set the isolation level
of the transaction. */ of the transaction. */
if (lock_type != TL_IGNORE if (lock_type != TL_IGNORE
&& trx->n_mysql_tables_in_use == 0) { && trx->n_mysql_tables_in_use == 0) {
trx->isolation_level = innobase_map_isolation_level( switch ((trx->isolation_level
(enum_tx_isolation) thd_tx_isolation(thd)) & 3; = innodb_isolation_level(thd) & 3)) {
case ISO_REPEATABLE_READ:
if (trx->isolation_level <= TRX_ISO_READ_COMMITTED) { break;
case ISO_READ_COMMITTED:
case ISO_READ_UNCOMMITTED:
/* At low transaction isolation levels we let /* At low transaction isolation levels we let
each consistent read set its own snapshot */ each consistent read set its own snapshot */
trx->read_view.close(); trx->read_view.close();
break;
case ISO_SERIALIZABLE:
auto trx_state = trx->state;
if (trx_state != TRX_STATE_NOT_STARTED) {
ut_ad(trx_state == TRX_STATE_ACTIVE);
} else if (trx->snapshot_isolation) {
trx->will_lock = true;
trx_start_if_not_started(trx, false);
trx->read_view.open(trx);
}
} }
} }

View File

@@ -1850,12 +1850,17 @@ corrupted:
return true; return true;
} }
/*********************************************************************//** /** Removes a page from the free list and frees it to the fsp system.
Removes a page from the free list and frees it to the fsp system. */ @param all Free all freed page. This should be useful only during slow
static void ibuf_remove_free_page() shutdown
@return error code when InnoDB fails to free the page
@retval DB_SUCCESS_LOCKED_REC if all free pages are freed
@retval DB_SUCCESS if page is freed */
static dberr_t ibuf_remove_free_page(bool all = false)
{ {
mtr_t mtr; mtr_t mtr;
page_t* header_page; page_t* header_page;
dberr_t err = DB_SUCCESS;
log_free_check(); log_free_check();
@@ -1871,17 +1876,17 @@ static void ibuf_remove_free_page()
mysql_mutex_lock(&ibuf_pessimistic_insert_mutex); mysql_mutex_lock(&ibuf_pessimistic_insert_mutex);
mysql_mutex_lock(&ibuf_mutex); mysql_mutex_lock(&ibuf_mutex);
if (!header_page || !ibuf_data_too_much_free()) { if (!header_page || (!all && !ibuf_data_too_much_free())) {
early_exit: early_exit:
mysql_mutex_unlock(&ibuf_mutex); mysql_mutex_unlock(&ibuf_mutex);
mysql_mutex_unlock(&ibuf_pessimistic_insert_mutex); mysql_mutex_unlock(&ibuf_pessimistic_insert_mutex);
exit:
ibuf_mtr_commit(&mtr); ibuf_mtr_commit(&mtr);
return; return err;
} }
buf_block_t* root = ibuf_tree_root_get(&mtr); buf_block_t* root = ibuf_tree_root_get(&mtr, &err);
if (UNIV_UNLIKELY(!root)) { if (UNIV_UNLIKELY(!root)) {
goto early_exit; goto early_exit;
@@ -1892,7 +1897,10 @@ early_exit:
+ PAGE_BTR_IBUF_FREE_LIST + PAGE_BTR_IBUF_FREE_LIST
+ root->page.frame).page; + root->page.frame).page;
/* If all the freed pages are removed during slow shutdown
then exit early with DB_SUCCESS_LOCKED_REC */
if (page_no >= fil_system.sys_space->free_limit) { if (page_no >= fil_system.sys_space->free_limit) {
err = DB_SUCCESS_LOCKED_REC;
goto early_exit; goto early_exit;
} }
@@ -1914,7 +1922,7 @@ early_exit:
compile_time_assert(IBUF_SPACE_ID == 0); compile_time_assert(IBUF_SPACE_ID == 0);
const page_id_t page_id{IBUF_SPACE_ID, page_no}; const page_id_t page_id{IBUF_SPACE_ID, page_no};
buf_block_t* bitmap_page = nullptr; buf_block_t* bitmap_page = nullptr;
dberr_t err = fseg_free_page( err = fseg_free_page(
header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER, header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER,
fil_system.sys_space, page_no, &mtr); fil_system.sys_space, page_no, &mtr);
@@ -1959,7 +1967,7 @@ func_exit:
buf_page_free(fil_system.sys_space, page_no, &mtr); buf_page_free(fil_system.sys_space, page_no, &mtr);
} }
ibuf_mtr_commit(&mtr); goto exit;
} }
/***********************************************************************//** /***********************************************************************//**
@@ -2427,7 +2435,9 @@ ATTRIBUTE_COLD ulint ibuf_contract()
== page_id_t(IBUF_SPACE_ID, FSP_IBUF_TREE_ROOT_PAGE_NO)); == page_id_t(IBUF_SPACE_ID, FSP_IBUF_TREE_ROOT_PAGE_NO));
ibuf_mtr_commit(&mtr); ibuf_mtr_commit(&mtr);
/* Remove all free page from free list and
frees it to system tablespace */
while (ibuf_remove_free_page(true) == DB_SUCCESS);
return(0); return(0);
} }

View File

@@ -1,14 +1,6 @@
--- suite/storage_engine/trx/cons_snapshot_serializable.result --- suite/storage_engine/trx/cons_snapshot_serializable.result
+++ suite/storage_engine/trx/cons_snapshot_serializable.reject +++ suite/storage_engine/trx/cons_snapshot_serializable.reject
@@ -5,12 +5,15 @@ @@ -11,6 +11,7 @@
CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
+Warnings:
+Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level.
connection con2;
INSERT INTO t1 (a) VALUES (1);
connection con1;
# If consistent read works on this isolation level (SERIALIZABLE), the following SELECT should not return the value we inserted (1) # If consistent read works on this isolation level (SERIALIZABLE), the following SELECT should not return the value we inserted (1)
SELECT a FROM t1; SELECT a FROM t1;
a a

View File

@@ -1,11 +0,0 @@
--- suite/storage_engine/trx/level_read_committed.result
+++ suite/storage_engine/trx/level_read_committed.reject
@@ -77,6 +77,8 @@
CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
+Warnings:
+Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level.
connection con2;
INSERT INTO t1 (a) VALUES (1);
connection con1;

View File

@@ -5,7 +5,7 @@
SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
START TRANSACTION WITH CONSISTENT SNAPSHOT; START TRANSACTION WITH CONSISTENT SNAPSHOT;
+Warnings: +Warnings:
+Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level. +Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT is ignored at READ UNCOMMITTED isolation level.
connection con2; connection con2;
INSERT INTO t1 (a) VALUES (1); INSERT INTO t1 (a) VALUES (1);
connection con1; connection con1;

View File

@@ -977,8 +977,9 @@ page_delete_rec_list_end(
size+= s; size+= s;
n_recs++; n_recs++;
if (scrub) if (UNIV_LIKELY(!scrub));
mtr->memset(block, rec2 - page, rec_offs_data_size(offsets), 0); else if (size_t size= rec_offs_data_size(offsets))
mtr->memset(block, rec2 - page, size, 0);
rec2= page_rec_get_next(rec2); rec2= page_rec_get_next(rec2);
} }

Some files were not shown because too many files have changed in this diff Show More