1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-08 11:22:35 +03:00

Merge 10.11 into 11.4

This commit is contained in:
Marko Mäkelä
2025-03-03 11:07:56 +02:00
129 changed files with 3001 additions and 1681 deletions

View File

@@ -76,3 +76,16 @@ WHERE schema_name='comment';
CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME SQL_PATH SCHEMA_COMMENT
def comment latin2 latin2_general_ci NULL comment
DROP DATABASE comment;
CREATE DATABASE db1;
# restart
SHOW CREATE DATABASE db1;
Database Create Database
db1 CREATE DATABASE `db1` /*!40100 DEFAULT CHARACTER SET latin1 COLLATE latin1_swedish_ci */
Warnings:
Note 1105 Database 'db1' does not have a db.opt file. You can create one with ALTER DATABASE if needed
SHOW CREATE DATABASE db1;
Database Create Database
db1 CREATE DATABASE `db1` /*!40100 DEFAULT CHARACTER SET latin1 COLLATE latin1_swedish_ci */
Warnings:
Note 1105 Database 'db1' does not have a db.opt file. You can create one with ALTER DATABASE if needed
DROP DATABASE db1;

View File

@@ -63,3 +63,11 @@ SELECT * FROM information_schema.schemata
WHERE schema_name='comment';
DROP DATABASE comment;
--enable_service_connection
CREATE DATABASE db1;
--remove_file $MARIADB_DATADIR/db1/db.opt
--source include/restart_mysqld.inc
# We need to call this two times to ensure all code paths are used
SHOW CREATE DATABASE db1;
SHOW CREATE DATABASE db1;
DROP DATABASE db1;

View File

@@ -53,6 +53,8 @@ SET @@character_set_database=DEFAULT;
SHOW CREATE DATABASE db1;
Database Create Database
db1 CREATE DATABASE `db1` /*!40100 DEFAULT CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci */
Warnings:
Note 1105 Database 'db1' does not have a db.opt file. You can create one with ALTER DATABASE if needed
USE db1;
SELECT @@character_set_database, 'taken from defaults' AS comment;
@@character_set_database comment

View File

@@ -5474,4 +5474,31 @@ INSERT INTO t VALUES (1,POINT(0,0)),(2,POINT(0,0));
SELECT NTH_VALUE(a,b) OVER () FROM t;
ERROR HY000: Illegal parameter data types point and bigint for operation '-'
DROP TABLE t;
#
# MDEV-32619 Settng SRID on geometry with ST_*FromWKKB(g, srid)
#
SELECT
ST_SRID(g1),
ST_SRID(ST_GeomFromWKB(g1, 4326)),
ST_SRID(ST_GeomFromWKB(g1)),
ST_AsText(g1),
ST_SRID(ST_PointFromWKB(g2, 4326)),
ST_SRID(g2),
ST_SRID(ST_LineStringFromWKB(g3, 3)),
ST_SRID(ST_PolygonFromWKB(g4, 4)),
ST_SRID(ST_MultiPointFromWKB(g5, 5)),
ST_SRID(ST_MultiLineStringFromWKB(g6, 6)),
ST_SRID(ST_MultiPolygonFromWKB(g7, 7))
FROM (
SELECT
POINT(1, 2) AS g1,
POINT(4, 3) AS g2,
LINESTRING(POINT(4, 3), POINT(4, 4)) AS g3,
POLYGON(LINESTRING(POINT(4, 3), POINT(4, 4), POINT(3, 4), POINT(4, 3))) AS g4,
MULTIPOINT(POINT(4, 3)) AS g5,
MULTILINESTRING(LINESTRING(POINT(4, 3), POINT(4, 4))) AS g6,
MULTIPOLYGON(POLYGON(LINESTRING(POINT(4, 3), POINT(4, 4), POINT(3, 4), POINT(4, 3)))) AS g7
) AS t;
ST_SRID(g1) ST_SRID(ST_GeomFromWKB(g1, 4326)) ST_SRID(ST_GeomFromWKB(g1)) ST_AsText(g1) ST_SRID(ST_PointFromWKB(g2, 4326)) ST_SRID(g2) ST_SRID(ST_LineStringFromWKB(g3, 3)) ST_SRID(ST_PolygonFromWKB(g4, 4)) ST_SRID(ST_MultiPointFromWKB(g5, 5)) ST_SRID(ST_MultiLineStringFromWKB(g6, 6)) ST_SRID(ST_MultiPolygonFromWKB(g7, 7))
0 4326 0 POINT(1 2) 4326 0 3 4 5 6 7
# End of 10.5 tests

View File

@@ -3482,4 +3482,30 @@ INSERT INTO t VALUES (1,POINT(0,0)),(2,POINT(0,0));
SELECT NTH_VALUE(a,b) OVER () FROM t;
DROP TABLE t;
--echo #
--echo # MDEV-32619 Settng SRID on geometry with ST_*FromWKKB(g, srid)
--echo #
SELECT
ST_SRID(g1),
ST_SRID(ST_GeomFromWKB(g1, 4326)),
ST_SRID(ST_GeomFromWKB(g1)),
ST_AsText(g1),
ST_SRID(ST_PointFromWKB(g2, 4326)),
ST_SRID(g2),
ST_SRID(ST_LineStringFromWKB(g3, 3)),
ST_SRID(ST_PolygonFromWKB(g4, 4)),
ST_SRID(ST_MultiPointFromWKB(g5, 5)),
ST_SRID(ST_MultiLineStringFromWKB(g6, 6)),
ST_SRID(ST_MultiPolygonFromWKB(g7, 7))
FROM (
SELECT
POINT(1, 2) AS g1,
POINT(4, 3) AS g2,
LINESTRING(POINT(4, 3), POINT(4, 4)) AS g3,
POLYGON(LINESTRING(POINT(4, 3), POINT(4, 4), POINT(3, 4), POINT(4, 3))) AS g4,
MULTIPOINT(POINT(4, 3)) AS g5,
MULTILINESTRING(LINESTRING(POINT(4, 3), POINT(4, 4))) AS g6,
MULTIPOLYGON(POLYGON(LINESTRING(POINT(4, 3), POINT(4, 4), POINT(3, 4), POINT(4, 3)))) AS g7
) AS t;
--echo # End of 10.5 tests

View File

@@ -6506,3 +6506,29 @@ DROP TABLE t1, t2;
#
# End of 10.5 tests
#
#
# MDEV-36165: BKA join cache buffer is employed despite join_cache_level=3 (flat BNLH)
#
CREATE TABLE t1(a INT);
INSERT INTO t1 VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
CREATE TABLE t2(a INT, b INT);
INSERT INTO t2 SELECT a, a from t1;
CREATE TABLE t3(a INT, b INT, c INT, key (a,b));
INSERT INTO t3 select a, a, a FROM t1;
SET optimizer_switch = 'join_cache_hashed=off,join_cache_bka=on,mrr=on';
SET join_cache_level = 3;
EXPLAIN SELECT * FROM t2, t3 WHERE t2.a=t3.a AND (t3.b+1 <= t2.b+1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using where
1 SIMPLE t3 ref a a 5 test.t2.a 1 Using index condition
SET join_cache_level = 4;
EXPLAIN SELECT * FROM t2, t3 WHERE t2.a=t3.a AND (t3.b+1 <= t2.b+1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using where
1 SIMPLE t3 ref a a 5 test.t2.a 1 Using index condition
SET join_cache_level = default;
SET optimizer_switch = default;
DROP TABLE t1, t2, t3;
#
# End of 10.11 tests
#

View File

@@ -4380,3 +4380,30 @@ DROP TABLE t1, t2;
--echo #
--echo # End of 10.5 tests
--echo #
--echo #
--echo # MDEV-36165: BKA join cache buffer is employed despite join_cache_level=3 (flat BNLH)
--echo #
--source include/have_sequence.inc
CREATE TABLE t1(a INT);
INSERT INTO t1 VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
CREATE TABLE t2(a INT, b INT);
INSERT INTO t2 SELECT a, a from t1;
CREATE TABLE t3(a INT, b INT, c INT, key (a,b));
INSERT INTO t3 select a, a, a FROM t1;
SET optimizer_switch = 'join_cache_hashed=off,join_cache_bka=on,mrr=on';
SET join_cache_level = 3;
EXPLAIN SELECT * FROM t2, t3 WHERE t2.a=t3.a AND (t3.b+1 <= t2.b+1);
SET join_cache_level = 4;
EXPLAIN SELECT * FROM t2, t3 WHERE t2.a=t3.a AND (t3.b+1 <= t2.b+1);
SET join_cache_level = default;
SET optimizer_switch = default;
DROP TABLE t1, t2, t3;
--echo #
--echo # End of 10.11 tests
--echo #

View File

@@ -12,6 +12,8 @@ FLUSH TABLES;
SHOW CREATE DATABASE sys;
Database Create Database
sys CREATE DATABASE `sys` /*!40100 DEFAULT CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_ci */
Warnings:
Note 1105 Database 'sys' does not have a db.opt file. You can create one with ALTER DATABASE if needed
Phase 1/8: Checking and upgrading mysql database
Processing databases
mysql

View File

@@ -248,3 +248,63 @@ SELECT id FROM t1 WHERE id IS NULL OR id NOT BETWEEN 1 AND 4;
id
5
DROP TABLE t1;
#
# MDEV-34620: Many index_merge variants made and discarded for a big OR
#
CREATE TABLE t1 (
a1 int NOT NULL,
a2 int NOT NULL,
filler char(100),
KEY key1 (a1,a2),
KEY key2 (a2,a1)
);
insert into t1 (a1,a2) values (1,1),(2,2),(3,3);
set @query= concat(
"explain select * from t1 where\n",
(select
group_concat(concat("a1=", seq, " and a2=", seq, " ") separator "\nor " )
from seq_1_to_30)
);
set optimizer_trace=1;
prepare s from @query;
execute s;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL key1,key2 NULL NULL NULL 3 Using where
set @trace=json_extract((select trace from information_schema.optimizer_trace), '$**.range_analysis');
# Observe that "key1" is a a part of several index_merge_union:
select json_pretty(json_search(@trace, 'all', 'key1'));
json_pretty(json_search(@trace, 'all', 'key1'))
[
"$[0].potential_range_indexes[0].index",
"$[0].analyzing_range_alternatives.range_scan_alternatives[0].index",
"$[0].analyzing_range_alternatives.analyzing_index_merge_union[0].indexes_to_merge[0].range_scan_alternatives[0].index",
"$[0].analyzing_range_alternatives.analyzing_index_merge_union[0].indexes_to_merge[1].range_scan_alternatives[0].index",
"$[0].analyzing_range_alternatives.analyzing_index_merge_union[1].indexes_to_merge[0].range_scan_alternatives[0].index",
"$[0].analyzing_range_alternatives.analyzing_index_merge_union[1].indexes_to_merge[1].range_scan_alternatives[0].index",
"$[0].analyzing_range_alternatives.analyzing_index_merge_union[1].indexes_to_merge[2].range_scan_alternatives[0].index",
"$[0].analyzing_range_alternatives.analyzing_index_merge_union[2].indexes_to_merge[0].range_scan_alternatives[0].index",
"$[0].analyzing_range_alternatives.analyzing_index_merge_union[2].indexes_to_merge[1].range_scan_alternatives[0].index"
]
#
# Now, same as above but for a long IN-list
#
set @query= concat(
"explain select * from t1 where\n",
(select
group_concat(concat("a1=", seq, " and a2=", seq, " ") separator "\nor " )
from seq_1_to_120)
);
set optimizer_trace=1;
prepare s from @query;
execute s;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL key1,key2 NULL NULL NULL 3 Using where
set @trace=json_extract((select trace from information_schema.optimizer_trace), '$**.range_analysis');
# Observe that there are NO index_merge_union candidates. Only one potential range scan:
select json_pretty(json_search(@trace, 'all', 'key1'));
json_pretty(json_search(@trace, 'all', 'key1'))
[
"$[0].potential_range_indexes[0].index",
"$[0].analyzing_range_alternatives.range_scan_alternatives[0].index"
]
drop table t1;

View File

@@ -163,3 +163,51 @@ INSERT INTO t1 VALUES (1),(5);
SELECT id FROM t1 WHERE id IS NULL OR id NOT BETWEEN 1 AND 4;
DROP TABLE t1;
--echo #
--echo # MDEV-34620: Many index_merge variants made and discarded for a big OR
--echo #
CREATE TABLE t1 (
a1 int NOT NULL,
a2 int NOT NULL,
filler char(100),
KEY key1 (a1,a2),
KEY key2 (a2,a1)
);
insert into t1 (a1,a2) values (1,1),(2,2),(3,3);
set @query= concat(
"explain select * from t1 where\n",
(select
group_concat(concat("a1=", seq, " and a2=", seq, " ") separator "\nor " )
from seq_1_to_30)
);
set optimizer_trace=1;
prepare s from @query;
execute s;
set @trace=json_extract((select trace from information_schema.optimizer_trace), '$**.range_analysis');
--echo # Observe that "key1" is a a part of several index_merge_union:
select json_pretty(json_search(@trace, 'all', 'key1'));
--echo #
--echo # Now, same as above but for a long IN-list
--echo #
set @query= concat(
"explain select * from t1 where\n",
(select
group_concat(concat("a1=", seq, " and a2=", seq, " ") separator "\nor " )
from seq_1_to_120)
);
set optimizer_trace=1;
prepare s from @query;
execute s;
set @trace=json_extract((select trace from information_schema.optimizer_trace), '$**.range_analysis');
--echo # Observe that there are NO index_merge_union candidates. Only one potential range scan:
select json_pretty(json_search(@trace, 'all', 'key1'));
drop table t1;

View File

@@ -46,23 +46,70 @@ ALTER TABLE t1 MODIFY a DECIMAL(10,0);
SELECT * FROM t1,t2 WHERE a=d;
a b c pk d e
Warnings:
Warning 1292 Truncated incorrect DECIMAL value: 'd'
Warning 1292 Truncated incorrect DECIMAL value: 'd'
Warning 1292 Truncated incorrect DECIMAL value: 'f'
Warning 1292 Truncated incorrect DECIMAL value: 'f'
Warning 1292 Truncated incorrect DECIMAL value: 'g'
Warning 1292 Truncated incorrect DECIMAL value: 'k'
Warning 1292 Truncated incorrect DECIMAL value: 'm'
Warning 1292 Truncated incorrect DECIMAL value: 'm'
Warning 1292 Truncated incorrect DECIMAL value: 'm'
Warning 1292 Truncated incorrect DECIMAL value: 'o'
Warning 1292 Truncated incorrect DECIMAL value: 'q'
Warning 1292 Truncated incorrect DECIMAL value: 'r'
Warning 1292 Truncated incorrect DECIMAL value: 'u'
Warning 1292 Truncated incorrect DECIMAL value: 'w'
Warning 1292 Truncated incorrect DECIMAL value: 'x'
Warning 1292 Truncated incorrect DECIMAL value: 'x'
Warning 1292 Truncated incorrect DECIMAL value: 'y'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'g'
Warning 1292 Truncated incorrect DOUBLE value: 'k'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'o'
Warning 1292 Truncated incorrect DOUBLE value: 'q'
Warning 1292 Truncated incorrect DOUBLE value: 'r'
Warning 1292 Truncated incorrect DOUBLE value: 'u'
Warning 1292 Truncated incorrect DOUBLE value: 'w'
Warning 1292 Truncated incorrect DOUBLE value: 'x'
Warning 1292 Truncated incorrect DOUBLE value: 'x'
Warning 1292 Truncated incorrect DOUBLE value: 'y'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'g'
Warning 1292 Truncated incorrect DOUBLE value: 'k'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'o'
Warning 1292 Truncated incorrect DOUBLE value: 'q'
Warning 1292 Truncated incorrect DOUBLE value: 'r'
Warning 1292 Truncated incorrect DOUBLE value: 'u'
Warning 1292 Truncated incorrect DOUBLE value: 'w'
Warning 1292 Truncated incorrect DOUBLE value: 'x'
Warning 1292 Truncated incorrect DOUBLE value: 'x'
Warning 1292 Truncated incorrect DOUBLE value: 'y'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'g'
Warning 1292 Truncated incorrect DOUBLE value: 'k'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'o'
Warning 1292 Truncated incorrect DOUBLE value: 'q'
Warning 1292 Truncated incorrect DOUBLE value: 'r'
Warning 1292 Truncated incorrect DOUBLE value: 'u'
Warning 1292 Truncated incorrect DOUBLE value: 'w'
Warning 1292 Truncated incorrect DOUBLE value: 'x'
Warning 1292 Truncated incorrect DOUBLE value: 'x'
Warning 1292 Truncated incorrect DOUBLE value: 'y'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'g'
Warning 1292 Truncated incorrect DOUBLE value: 'k'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'o'
Warning 1292 Truncated incorrect DOUBLE value: 'q'
Warning 1292 Truncated incorrect DOUBLE value: 'r'
Warning 1292 Truncated incorrect DOUBLE value: 'u'
ALTER TABLE t1 MODIFY a DOUBLE;
SELECT * FROM t1,t2 WHERE a=d;
a b c pk d e
@@ -84,6 +131,53 @@ Warning 1292 Truncated incorrect DOUBLE value: 'w'
Warning 1292 Truncated incorrect DOUBLE value: 'x'
Warning 1292 Truncated incorrect DOUBLE value: 'x'
Warning 1292 Truncated incorrect DOUBLE value: 'y'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'g'
Warning 1292 Truncated incorrect DOUBLE value: 'k'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'o'
Warning 1292 Truncated incorrect DOUBLE value: 'q'
Warning 1292 Truncated incorrect DOUBLE value: 'r'
Warning 1292 Truncated incorrect DOUBLE value: 'u'
Warning 1292 Truncated incorrect DOUBLE value: 'w'
Warning 1292 Truncated incorrect DOUBLE value: 'x'
Warning 1292 Truncated incorrect DOUBLE value: 'x'
Warning 1292 Truncated incorrect DOUBLE value: 'y'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'g'
Warning 1292 Truncated incorrect DOUBLE value: 'k'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'o'
Warning 1292 Truncated incorrect DOUBLE value: 'q'
Warning 1292 Truncated incorrect DOUBLE value: 'r'
Warning 1292 Truncated incorrect DOUBLE value: 'u'
Warning 1292 Truncated incorrect DOUBLE value: 'w'
Warning 1292 Truncated incorrect DOUBLE value: 'x'
Warning 1292 Truncated incorrect DOUBLE value: 'x'
Warning 1292 Truncated incorrect DOUBLE value: 'y'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'f'
Warning 1292 Truncated incorrect DOUBLE value: 'g'
Warning 1292 Truncated incorrect DOUBLE value: 'k'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'o'
Warning 1292 Truncated incorrect DOUBLE value: 'q'
Warning 1292 Truncated incorrect DOUBLE value: 'r'
Warning 1292 Truncated incorrect DOUBLE value: 'u'
DROP TABLE t1,t2;
#
# End of 10.2 tests

View File

@@ -766,6 +766,86 @@ u xxb
drop table t1;
# End of MariaDB 10.4 tests
#
# MDEV-35955 Wrong result for UPDATE ... ORDER BY LIMIT which uses tmp.table
#
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
insert into t1 (id, v) values (2,3),(1,4);
insert into t2 (id, v) values (5,5),(6,6);
select t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 2;
id v id v
1 4 5 5
1 4 6 6
UPDATE t1, t2 SET t1.v=-1, t2.v=-1 ORDER BY t1.id, t2.id LIMIT 2;
select * from t1;
id v
2 3
1 -1
select * from t2;
id v
5 -1
6 -1
drop table t1, t2;
create table t1 (id int primary key, v text) engine=myisam;
create table t2 (id int primary key, v text) engine=myisam;
insert into t1 (id, v) values (1,'b'),(2,'fo'),(3,'bar'),(4,'barr'),(5,'bazzz');
insert into t2 (id, v) values (6,'quxqux'),(7,'foofoof'),(8,'barbarba'),(9,'quxquxqux'),(10,'bazbazbazb');
select t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 2;
id v id v
1 b 6 quxqux
1 b 7 foofoof
update t1, t2 set t1.v='DELETED', t2.v='DELETED' order by t1.id, t2.id limit 2;
select * from t1;
id v
1 DELETED
2 fo
3 bar
4 barr
5 bazzz
select * from t2;
id v
6 DELETED
7 DELETED
8 barbarba
9 quxquxqux
10 bazbazbazb
drop table t1, t2;
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
create table t3 (id int primary key, v int);
insert into t1 (id, v) values (1, 1000), (2, 2000), (3, 3000), (4, 4000), (5, 5000);
insert into t2 (id, v) values (10, 100), (20, 200), (30, 300), (40, 400), (50, 500);
insert into t3 (id, v) values (11, 111), (22, 222), (33, 333), (44, 444), (55, 555);
select t1.*, t2.*, t3.* from t1, t2, t3 order by t1.id, t2.id, t3.id limit 3;
id v id v id v
1 1000 10 100 11 111
1 1000 10 100 22 222
1 1000 10 100 33 333
UPDATE t1, t2, t3 SET t1.v=-1, t2.v=-2, t3.v=-3 ORDER BY t1.id, t2.id, t3.id LIMIT 3;
select * from t1;
id v
1 -1
2 2000
3 3000
4 4000
5 5000
select * from t2;
id v
10 -2
20 200
30 300
40 400
50 500
select * from t3;
id v
11 -3
22 -3
33 -3
44 444
55 555
drop table t1, t2, t3;
# End of MariaDB 10.11 tests
#
# MDEV-29189: Second execution of SF using UPDATE?DELETE
# after reported error by the first execution
#
@@ -802,4 +882,4 @@ c
DROP FUNCTION f1;
DROP FUNCTION f2;
DROP TABLE t1;
# End of MariaDB 10.10 tests
# End of MariaDB 11.1 tests

View File

@@ -708,6 +708,46 @@ drop table t1;
--echo # End of MariaDB 10.4 tests
--echo #
--echo # MDEV-35955 Wrong result for UPDATE ... ORDER BY LIMIT which uses tmp.table
--echo #
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
insert into t1 (id, v) values (2,3),(1,4);
insert into t2 (id, v) values (5,5),(6,6);
select t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 2;
UPDATE t1, t2 SET t1.v=-1, t2.v=-1 ORDER BY t1.id, t2.id LIMIT 2;
select * from t1;
select * from t2;
drop table t1, t2;
create table t1 (id int primary key, v text) engine=myisam;
create table t2 (id int primary key, v text) engine=myisam;
insert into t1 (id, v) values (1,'b'),(2,'fo'),(3,'bar'),(4,'barr'),(5,'bazzz');
insert into t2 (id, v) values (6,'quxqux'),(7,'foofoof'),(8,'barbarba'),(9,'quxquxqux'),(10,'bazbazbazb');
select t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 2;
update t1, t2 set t1.v='DELETED', t2.v='DELETED' order by t1.id, t2.id limit 2;
select * from t1;
select * from t2;
drop table t1, t2;
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
create table t3 (id int primary key, v int);
insert into t1 (id, v) values (1, 1000), (2, 2000), (3, 3000), (4, 4000), (5, 5000);
insert into t2 (id, v) values (10, 100), (20, 200), (30, 300), (40, 400), (50, 500);
insert into t3 (id, v) values (11, 111), (22, 222), (33, 333), (44, 444), (55, 555);
select t1.*, t2.*, t3.* from t1, t2, t3 order by t1.id, t2.id, t3.id limit 3;
UPDATE t1, t2, t3 SET t1.v=-1, t2.v=-2, t3.v=-3 ORDER BY t1.id, t2.id, t3.id LIMIT 3;
select * from t1;
select * from t2;
select * from t3;
drop table t1, t2, t3;
--echo # End of MariaDB 10.11 tests
--echo #
--echo # MDEV-29189: Second execution of SF using UPDATE?DELETE
--echo # after reported error by the first execution
@@ -746,4 +786,4 @@ DROP FUNCTION f2;
DROP TABLE t1;
--enable_ps2_protocol
--echo # End of MariaDB 10.10 tests
--echo # End of MariaDB 11.1 tests

View File

@@ -270,6 +270,9 @@ our $opt_force= 0;
our $opt_skip_not_found= 0;
our $opt_mem= $ENV{'MTR_MEM'};
our $opt_clean_vardir= $ENV{'MTR_CLEAN_VARDIR'};
our $opt_catalogs= 0;
our $opt_catalog_name="";
our $catalog_name="def";
our $opt_gcov;
our $opt_gprof;
@@ -3956,6 +3959,23 @@ sub run_testcase ($$) {
}
}
# Set up things for catalogs
# The values of MARIADB_TOPDIR and MARIAD_DATADIR should
# be taken from the values used by the default (first)
# connection that is used by mariadb-test.
my ($mysqld, @servers);
@servers= all_servers();
$mysqld= $servers[0];
$ENV{'MARIADB_TOPDIR'}= $mysqld->value('datadir');
if (!$opt_catalogs)
{
$ENV{'MARIADB_DATADIR'}= $mysqld->value('datadir');
}
else
{
$ENV{'MARIADB_DATADIR'}= $mysqld->value('datadir') . "/" . $catalog_name;
}
# Write start of testcase to log
mark_log($path_current_testlog, $tinfo);
@@ -4469,14 +4489,12 @@ sub extract_warning_lines ($$) {
(
@global_suppressions,
qr/error .*connecting to master/,
qr/InnoDB: Error: in ALTER TABLE `test`.`t[12]`/,
qr/InnoDB: Error: table `test`.`t[12]` .*does not exist in the InnoDB internal/,
qr/InnoDB: Warning: a long semaphore wait:/,
qr/InnoDB: Dumping buffer pool.*/,
qr/InnoDB: Buffer pool.*/,
qr/InnoDB: Could not free any blocks in the buffer pool!/,
qr/InnoDB: Warning: Writer thread is waiting this semaphore:/,
qr/InnoDB: innodb_open_files .* should not be greater than/,
qr/InnoDB: Trying to delete tablespace.*but there are.*pending/,
qr/InnoDB: Tablespace 1[0-9]* was not found at .*, and innodb_force_recovery was set/,
qr/Slave: Unknown table 't1' .* 1051/,
qr/Slave SQL:.*(Internal MariaDB error code: [[:digit:]]+|Query:.*)/,
qr/slave SQL thread aborted/,

View File

@@ -1,16 +1,15 @@
***MDEV-5914: Parallel replication deadlock due to InnoDB lock conflicts ***
include/master-slave.inc
[connection master]
connection server_2;
SET sql_log_bin=0;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CALL mtr.add_suppression("InnoDB: Transaction was aborted due to ");
CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends");
SET sql_log_bin=1;
connection server_2;
SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads;
include/stop_slave.inc
SET GLOBAL slave_parallel_threads=10;
CHANGE MASTER TO master_use_gtid=slave_pos;
connection server_1;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB;
INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6);
connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,;

View File

@@ -79,7 +79,7 @@ CREATE TABLE federated.t1 (
`name` varchar(32) NOT NULL default ''
)
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
CONNECTION='mariadb://root@127.0.0.1:SLAVE_PORT/federated/t1';
INSERT INTO federated.t1 (id, name) VALUES (1, 'foo');
INSERT INTO federated.t1 (id, name) VALUES (2, 'fee');
INSERT INTO federated.t1 (id, `group`) VALUES (3, 42);

View File

@@ -92,7 +92,7 @@ eval CREATE TABLE federated.t1 (
`name` varchar(32) NOT NULL default ''
)
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
CONNECTION='mariadb://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
INSERT INTO federated.t1 (id, name) VALUES (1, 'foo');
INSERT INTO federated.t1 (id, name) VALUES (2, 'fee');

View File

@@ -494,12 +494,12 @@ CREATE TABLE federated.t3 (a INT);
INSERT INTO federated.t3 VALUES (1),(2),(3);
CREATE TABLE federated.t4 (a INT);
connection master;
CREATE SERVER fedlink FOREIGN DATA WRAPPER mysql
CREATE SERVER fedlink FOREIGN DATA WRAPPER mariadb
OPTIONS (USER 'root', HOST '127.0.0.1', DATABASE 'federated',
PORT SLAVE_PORT);
CREATE TABLE federated.t3 (a INT)
ENGINE=FEDERATED
CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t3'
CONNECTION='mariadb://root@127.0.0.1:$SLAVE_MYPORT/federated/t3'
PARTITION BY list (a)
(PARTITION p1 VALUES IN (1) CONNECTION='fedlink/t3',
PARTITION p2 VALUES IN (2) CONNECTION='fedlink/t4');

View File

@@ -311,13 +311,13 @@ CREATE TABLE federated.t4 (a INT);
connection master;
--replace_result $SLAVE_MYPORT SLAVE_PORT
eval CREATE SERVER fedlink FOREIGN DATA WRAPPER mysql
eval CREATE SERVER fedlink FOREIGN DATA WRAPPER mariadb
OPTIONS (USER 'root', HOST '127.0.0.1', DATABASE 'federated',
PORT $SLAVE_MYPORT);
CREATE TABLE federated.t3 (a INT)
ENGINE=FEDERATED
CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t3'
CONNECTION='mariadb://root@127.0.0.1:$SLAVE_MYPORT/federated/t3'
PARTITION BY list (a)
(PARTITION p1 VALUES IN (1) CONNECTION='fedlink/t3',
PARTITION p2 VALUES IN (2) CONNECTION='fedlink/t4');

View File

@@ -0,0 +1,35 @@
# include/wait_condition_with_debug_and_kill.inc
#
# SUMMARY
#
# Waits until the passed statement returns true, or the operation
# times out. If the operation times out, the additional error
# statement will be executed and server is killed.
#
# USAGE
#
# let $wait_condition=
# SELECT c = 3 FROM t;
# let $wait_condition_on_error_output= select count(*) from t;
# [let $explicit_default_wait_timeout= N] # to override the default reset
# --source include/wait_condition_with_debug_and_kill.inc
#
# OR
#
# let $wait_timeout= 60; # Override default 30 seconds with 60.
# let $wait_condition=
# SELECT c = 3 FROM t;
# let $wait_condition_on_error_output= select count(*) from t;
# --source include/wait_condition_with_debug_and_kill.inc
# --echo Executed the test condition $wait_condition_reps times
#
#
# EXAMPLE
# events_bugs.test, events_time_zone.test
#
--source include/wait_condition_with_debug.inc
if (!$success)
{
--source include/kill_galera.inc
}

View File

@@ -0,0 +1,31 @@
connection node_2;
connection node_1;
connection node_1;
INSTALL PLUGIN IF NOT EXISTS connect SONAME 'ha_connect';
CREATE TABLE t1 (f INT) ENGINE=CONNECT;
Warnings:
Warning 1105 No table_type. Will be set to DOS
Warning 1105 No file name. Table will use t1.dos
CREATE TABLE t2 (f INT) ENGINE=ROCKSDB;
CREATE TABLE t3 (f INT) ENGINE=SEQUENCE;
ERROR 42000: This version of MariaDB doesn't yet support 'non-InnoDB sequences in Galera cluster'
show warnings;
Level Code Message
Error 1235 This version of MariaDB doesn't yet support 'non-InnoDB sequences in Galera cluster'
Note 1235 ENGINE=SEQUENCE not supported by Galera
connection node_2;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f` int(11) DEFAULT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
show create table t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f` int(11) DEFAULT NULL
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
show create table t3;
ERROR 42S02: Table 'test.t3' doesn't exist
connection node_1;
DROP TABLE t1, t2;
UNINSTALL PLUGIN IF EXISTS connect;

View File

@@ -0,0 +1,16 @@
connection node_2;
connection node_1;
connection node_1;
connection node_2;
SET GLOBAL wsrep_provider_options = 'gmcast.isolate=1';
SET SESSION wsrep_sync_wait=0;
SET SESSION wsrep_sync_wait=DEFAULT;
DELETE FROM mysql.wsrep_streaming_log;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
SET SESSION wsrep_sync_wait=0;
SET GLOBAL wsrep_provider_options = 'gmcast.isolate=0';
SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
VARIABLE_VALUE
Primary
SET SESSION wsrep_sync_wait=DEFAULT;
CALL mtr.add_suppression("WSREP: Protocol violation\\. JOIN message sender (.*) is not in state transfer \\(SYNCED\\)\\. Message ignored\\.");

View File

@@ -7,6 +7,7 @@ LOCK TABLE t1 WRITE;
connection node_1;
INSERT INTO t1 VALUES (2);
connection node_2;
SET SESSION wsrep_sync_wait = 0;
UNLOCK TABLES;
COMMIT;
SELECT COUNT(*) = 1 FROM t1;

View File

@@ -40,18 +40,19 @@ drop table t1;
disconnect node_2a;
connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2a;
CREATE TABLE t1 (i int primary key);
CREATE TABLE t1 (i int primary key) engine=innodb;
SET DEBUG_SYNC = "before_wsrep_ordered_commit SIGNAL bwoc_reached WAIT_FOR bwoc_continue";
INSERT INTO t1 VALUES (1);
connection node_2;
SET DEBUG_SYNC = "now WAIT_FOR bwoc_reached";
SET DEBUG_SYNC = "now SIGNAL bwoc_continue";
SET DEBUG_SYNC='RESET';
connection node_2a;
connection node_2;
SET DEBUG_SYNC='RESET';
select * from t1;
i
1
disconnect node_2a;
disconnect node_2b;
connection node_1;
drop table t1;

View File

@@ -0,0 +1,176 @@
connection node_2;
connection node_1;
call mtr.add_suppression("WSREP: wsrep_mode = STRICT_REPLICATION enabled. Storage engine partition for table.*");
# wsrep-mode= DEFAULT
SET GLOBAL wsrep_mode = "";
SELECT @@wsrep_mode;
@@wsrep_mode
CREATE OR REPLACE TABLE t1 (v1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB
PARTITION BY KEY (v1)
PARTITIONS 2;
CREATE OR REPLACE TABLE t2 (v1 INT NOT NULL PRIMARY KEY) ENGINE=MyISAM
PARTITION BY KEY (v1)
PARTITIONS 2;
ALTER TABLE t1 ADD COLUMN v2 int;
ALTER TABLE t2 ADD COLUMN v2 int;
INSERT INTO t1 VALUES (1,1),(2,2);
INSERT INTO t2 VALUES (1,1),(2,2);
ALTER TABLE t1 ADD COLUMN v3 int, ENGINE=MyISAM;
ALTER TABLE t2 ADD COLUMN v3 int, ENGINE=Aria;
UPDATE t1 SET v3 = 3;
UPDATE t2 SET v3 = 3;
CREATE INDEX xx1 ON t1(v2);
CREATE INDEX xx2 ON t2(v2);
DROP INDEX xx1 ON t1;
DROP INDEX xx2 ON t2;
TRUNCATE TABLE t1;
TRUNCATE TABLE t2;
RENAME TABLE t1 TO t1_v2;
RENAME TABLE t2 TO t2_v2;
CREATE VIEW x1 AS SELECT * FROM t1_v2;
CREATE VIEW x2 AS SELECT * FROM t2_v2;
CREATE DEFINER=`root`@`localhost` TRIGGER increment_before_t1
AFTER INSERT ON t1_v2 FOR EACH ROW
UPDATE t1_v2 SET t1_v2.v3 = t1_v2.v3+1;
CREATE DEFINER=`root`@`localhost` TRIGGER increment_before_t2
AFTER INSERT ON t2_v2 FOR EACH ROW
UPDATE t2_v2 SET t2_v2.v3 = t2_v2.v3+1;
connection node_2;
SHOW CREATE TABLE t1_v2;
Table Create Table
t1_v2 CREATE TABLE `t1_v2` (
`v1` int(11) NOT NULL,
`v2` int(11) DEFAULT NULL,
`v3` int(11) DEFAULT NULL,
PRIMARY KEY (`v1`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
PARTITION BY KEY (`v1`)
PARTITIONS 2
SHOW CREATE TABLE t2_v2;
Table Create Table
t2_v2 CREATE TABLE `t2_v2` (
`v1` int(11) NOT NULL,
`v2` int(11) DEFAULT NULL,
`v3` int(11) DEFAULT NULL,
PRIMARY KEY (`v1`)
) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
PARTITION BY KEY (`v1`)
PARTITIONS 2
SHOW CREATE VIEW x1;
View Create View character_set_client collation_connection
x1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `x1` AS select `t1_v2`.`v1` AS `v1`,`t1_v2`.`v2` AS `v2`,`t1_v2`.`v3` AS `v3` from `t1_v2` latin1 latin1_swedish_ci
SHOW CREATE VIEW x2;
View Create View character_set_client collation_connection
x2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `x2` AS select `t2_v2`.`v1` AS `v1`,`t2_v2`.`v2` AS `v2`,`t2_v2`.`v3` AS `v3` from `t2_v2` latin1 latin1_swedish_ci
SELECT * FROM t1_v2;
v1 v2 v3
SELECT * FROM t2_v2;
v1 v2 v3
connection node_1;
DROP VIEW x1;
DROP VIEW x2;
DROP TRIGGER increment_before_t1;
DROP TRIGGER increment_before_t2;
DROP TABLE t1_v2;
DROP TABLE t2_v2;
SET GLOBAL wsrep_mode = "";
CREATE OR REPLACE TABLE t2 (v1 INT NOT NULL PRIMARY KEY) ENGINE=MyISAM
PARTITION BY KEY (v1)
PARTITIONS 2;
# wsrep-mode= STRICT_REPLICATION
SET GLOBAL wsrep_mode = "STRICT_REPLICATION";
SELECT @@wsrep_mode;
@@wsrep_mode
STRICT_REPLICATION
CREATE OR REPLACE TABLE t1 (v1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB
PARTITION BY KEY (v1)
PARTITIONS 2;
CREATE OR REPLACE TABLE t3 (v1 INT NOT NULL PRIMARY KEY) ENGINE=MyISAM
PARTITION BY KEY (v1)
PARTITIONS 2;
ERROR HY000: Galera replication not supported
ALTER TABLE t1 ADD COLUMN v2 int;
ALTER TABLE t2 ADD COLUMN v2 int;
ERROR HY000: Galera replication not supported
INSERT INTO t1 VALUES (1,1),(2,2);
Warnings:
Warning 1290 WSREP: wsrep_mode = STRICT_REPLICATION enabled. Storage engine partition for table 'test'.'t1' is not supported in Galera
INSERT INTO t2 VALUES (1),(2);
Warnings:
Warning 1290 WSREP: wsrep_mode = STRICT_REPLICATION enabled. Storage engine partition for table 'test'.'t2' is not supported in Galera
ALTER TABLE t1 ADD COLUMN v3 int, ENGINE=MyISAM;
ERROR HY000: Galera replication not supported
ALTER TABLE t2 ADD COLUMN v3 int, ENGINE=Aria;
ERROR HY000: Galera replication not supported
UPDATE t1 SET v2 = v2 + 3;
Warnings:
Warning 1290 WSREP: wsrep_mode = STRICT_REPLICATION enabled. Storage engine partition for table 'test'.'t1' is not supported in Galera
UPDATE t2 SET v1 = v1 + 3;
Warnings:
Warning 1290 WSREP: wsrep_mode = STRICT_REPLICATION enabled. Storage engine partition for table 'test'.'t2' is not supported in Galera
CREATE INDEX xx1 ON t1(v2);
CREATE INDEX xx2 ON t2(v2);
ERROR HY000: Galera replication not supported
DROP INDEX xx1 ON t1;
DROP INDEX xx2 on t2;
ERROR HY000: Galera replication not supported
TRUNCATE TABLE t1;
TRUNCATE TABLE t2;
ERROR HY000: Galera replication not supported
RENAME TABLE t1 TO t1_v2;
RENAME TABLE t2 TO t2_v2;
RENAME TABLE t2_v2 TO t2;
CREATE VIEW x1 AS SELECT * FROM t1_v2;
CREATE VIEW x2 AS SELECT * FROM t2;
ERROR HY000: Galera replication not supported
CREATE DEFINER=`root`@`localhost` TRIGGER increment_before_t1
AFTER INSERT ON t1_v2 FOR EACH ROW
UPDATE t1_v2 SET t1_v2.v2 = t1_v2.v2+1;
CREATE DEFINER=`root`@`localhost` TRIGGER increment_before_t2
AFTER INSERT ON t2 FOR EACH ROW
UPDATE t2 SET t2.v1 = t2.v1+1;
ERROR HY000: Galera replication not supported
connection node_2;
SHOW CREATE TABLE t1_v2;
Table Create Table
t1_v2 CREATE TABLE `t1_v2` (
`v1` int(11) NOT NULL,
`v2` int(11) DEFAULT NULL,
PRIMARY KEY (`v1`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
PARTITION BY KEY (`v1`)
PARTITIONS 2
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
`v1` int(11) NOT NULL,
`v2` int(11) DEFAULT NULL,
PRIMARY KEY (`v1`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
PARTITION BY KEY (`v1`)
PARTITIONS 2
SHOW CREATE VIEW x1;
View Create View character_set_client collation_connection
x1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `x1` AS select `t1_v2`.`v1` AS `v1`,`t1_v2`.`v2` AS `v2` from `t1_v2` latin1 latin1_swedish_ci
SELECT * FROM t1_v2;
v1 v2
SELECT * FROM t2;
v1 v2
connection node_1;
DROP VIEW x1;
DROP TRIGGER increment_before_t1;
DROP TABLE t1_v2;
DROP TABLE t2;
SET GLOBAL wsrep_mode = "";
CREATE OR REPLACE TABLE t2 (v1 INT NOT NULL PRIMARY KEY) ENGINE=MyISAM
PARTITION BY KEY (v1)
PARTITIONS 2;
# wsrep-mode= STRICT_REPLICATION
SET GLOBAL wsrep_mode = "STRICT_REPLICATION";
SELECT @@wsrep_mode;
@@wsrep_mode
STRICT_REPLICATION
ALTER TABLE t2 ENGINE=InnoDB;
DROP TABLE t2;
SET GLOBAL wsrep_mode = DEFAULT;

View File

@@ -0,0 +1,30 @@
connection node_2;
connection node_1;
# Correct Galera library found
connection node_1;
connection node_2;
connection node_1;
connection node_2;
SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment';
VARIABLE_VALUE = 'Synced'
1
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 2
1
connection node_1;
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 2
1
connection node_2;
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 2
1
connection node_1;
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 2
1
connection node_2;
connection node_1;
call mtr.add_suppression("WSREP: write_handler\\(\\)");
connection node_2;
call mtr.add_suppression("WSREP: write_handler\\(\\)");

View File

@@ -0,0 +1,84 @@
connection node_2;
connection node_1;
SET GLOBAL wsrep_mode=REPLICATE_MYISAM;
CREATE TABLE t (f0 CHAR(0)) ENGINE=MyISAM;
INSERT INTO t VALUES();
SELECT * FROM t;
f0
NULL
connection node_2;
SELECT * FROM t;
f0
NULL
DROP TABLE t;
connection node_1;
SET GLOBAL wsrep_mode=REPLICATE_MYISAM;
SET GLOBAL wsrep_forced_binlog_format=ROW;
CREATE TABLE t (f0 CHAR(0)) ENGINE=MyISAM;
INSERT INTO t VALUES();
SELECT * FROM t;
f0
NULL
connection node_2;
SELECT * FROM t;
f0
NULL
DROP TABLE t;
connection node_1;
SET GLOBAL wsrep_mode=REPLICATE_ARIA;
CREATE TABLE t (f0 CHAR(0)) ENGINE=Aria;
INSERT INTO t VALUES();
SELECT * FROM t;
f0
NULL
connection node_2;
SELECT * FROM t;
f0
NULL
DROP TABLE t;
connection node_1;
SET GLOBAL wsrep_mode=REPLICATE_ARIA;
SET GLOBAL wsrep_forced_binlog_format=ROW;
CREATE TABLE t (f0 CHAR(0)) ENGINE=Aria;
INSERT INTO t VALUES();
SELECT * FROM t;
f0
NULL
connection node_2;
SELECT * FROM t;
f0
NULL
DROP TABLE t;
connection node_1;
SET GLOBAL wsrep_mode=REPLICATE_MYISAM;
SET GLOBAL wsrep_forced_binlog_format=MIXED;
ERROR HY000: wsrep_forced_binlog_format=[MIXED|STATEMENT] can't be set if wsrep_mode=[REPLICATE_MYISAM|REPLICATE_ARIA]
SET GLOBAL wsrep_forced_binlog_format=STATEMENT;
ERROR HY000: wsrep_forced_binlog_format=[MIXED|STATEMENT] can't be set if wsrep_mode=[REPLICATE_MYISAM|REPLICATE_ARIA]
SET GLOBAL wsrep_mode=REPLICATE_ARIA;
SET GLOBAL wsrep_forced_binlog_format=MIXED;
ERROR HY000: wsrep_forced_binlog_format=[MIXED|STATEMENT] can't be set if wsrep_mode=[REPLICATE_MYISAM|REPLICATE_ARIA]
SET GLOBAL wsrep_forced_binlog_format=STATEMENT;
ERROR HY000: wsrep_forced_binlog_format=[MIXED|STATEMENT] can't be set if wsrep_mode=[REPLICATE_MYISAM|REPLICATE_ARIA]
SET GLOBAL wsrep_mode=DEFAULT;
SET GLOBAL wsrep_forced_binlog_format=MIXED;
SET GLOBAL wsrep_mode = REPLICATE_MYISAM;
ERROR HY000: wsrep_mode=[REPLICATE_MYISAM|REPLICATE_ARIA] can't be enabled if wsrep_forced_binlog != [NONE|ROW]
SET GLOBAL wsrep_mode = REPLICATE_ARIA;
ERROR HY000: wsrep_mode=[REPLICATE_MYISAM|REPLICATE_ARIA] can't be enabled if wsrep_forced_binlog != [NONE|ROW]
SET GLOBAL wsrep_mode=DEFAULT;
SET GLOBAL wsrep_forced_binlog_format=STATEMENT;
SET GLOBAL wsrep_mode = REPLICATE_MYISAM;
ERROR HY000: wsrep_mode=[REPLICATE_MYISAM|REPLICATE_ARIA] can't be enabled if wsrep_forced_binlog != [NONE|ROW]
SET GLOBAL wsrep_mode = REPLICATE_ARIA;
ERROR HY000: wsrep_mode=[REPLICATE_MYISAM|REPLICATE_ARIA] can't be enabled if wsrep_forced_binlog != [NONE|ROW]
SET GLOBAL wsrep_forced_binlog_format=DEFAULT;
SET GLOBAL wsrep_mode=DEFAULT;
SET GLOBAL wsrep_forced_binlog_format=MIXED;
SET GLOBAL wsrep_mode=REPLICATE_MYISAM;
ERROR HY000: wsrep_mode=[REPLICATE_MYISAM|REPLICATE_ARIA] can't be enabled if wsrep_forced_binlog != [NONE|ROW]
SET GLOBAL wsrep_forced_binlog_format=STATEMENT;
SET GLOBAL wsrep_mode=REPLICATE_MYISAM;
ERROR HY000: wsrep_mode=[REPLICATE_MYISAM|REPLICATE_ARIA] can't be enabled if wsrep_forced_binlog != [NONE|ROW]
SET GLOBAL wsrep_forced_binlog_format=DEFAULT;
SET GLOBAL wsrep_mode=DEFAULT;

View File

@@ -31,3 +31,6 @@ test.t1 repair note The storage engine for the table doesn't support repair
test.t2 repair note The storage engine for the table doesn't support repair
DROP TABLE t1;
DROP TABLE t2;
connection node_1;
disconnect node_2a;
disconnect node_2b;

View File

@@ -32,6 +32,8 @@ SHOW WARNINGS;
Level Code Message
Error 4165 Galera replication not supported
Warning 1031 WSREP: wsrep_mode = STRICT_REPLICATION enabled. Storage engine MyISAM not supported.
Error 4165 Galera replication not supported
Warning 1031 WSREP: wsrep_mode = STRICT_REPLICATION enabled. Storage engine MyISAM not supported.
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (

View File

@@ -0,0 +1 @@
--plugin-load=$HA_ROCKSDB_SO

View File

@@ -0,0 +1,22 @@
--source include/galera_cluster.inc
--source include/have_sequence.inc
--source include/have_rocksdb.inc
--connection node_1
INSTALL PLUGIN IF NOT EXISTS connect SONAME 'ha_connect';
CREATE TABLE t1 (f INT) ENGINE=CONNECT;
CREATE TABLE t2 (f INT) ENGINE=ROCKSDB;
--error ER_NOT_SUPPORTED_YET
CREATE TABLE t3 (f INT) ENGINE=SEQUENCE;
show warnings;
--connection node_2
show create table t1;
show create table t2;
--error ER_NO_SUCH_TABLE
show create table t3;
--connection node_1
DROP TABLE t1, t2;
UNINSTALL PLUGIN IF EXISTS connect;

View File

@@ -0,0 +1,41 @@
#
# MDEV-35946: Assertion `thd->is_error()' failed in Sql_cmd_dml::prepare
#
--source include/have_innodb.inc
--source include/galera_cluster.inc
# Save original auto_increment_offset values.
--let $node_1=node_1
--let $node_2=node_2
--source include/auto_increment_offset_save.inc
#
# Disconnect from the cluster
#
SET GLOBAL wsrep_provider_options = 'gmcast.isolate=1';
SET SESSION wsrep_sync_wait=0;
--let $wait_condition = SELECT VARIABLE_VALUE = 'non-Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
--source include/wait_condition.inc
SET SESSION wsrep_sync_wait=DEFAULT;
#
# If bug is present, assertion will fire
# during the execution of the following DELETE
#
--error ER_LOCK_WAIT_TIMEOUT
DELETE FROM mysql.wsrep_streaming_log;
#
# Reconnect to the cluster
#
SET SESSION wsrep_sync_wait=0;
SET GLOBAL wsrep_provider_options = 'gmcast.isolate=0';
--let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
--source include/wait_condition.inc
SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
SET SESSION wsrep_sync_wait=DEFAULT;
--source include/auto_increment_offset_restore.inc
CALL mtr.add_suppression("WSREP: Protocol violation\\. JOIN message sender (.*) is not in state transfer \\(SYNCED\\)\\. Message ignored\\.");

View File

@@ -16,13 +16,16 @@ LOCK TABLE t1 WRITE;
INSERT INTO t1 VALUES (2);
--connection node_2
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'Waiting for table metadata lock'
--source include/wait_condition.inc
SET SESSION wsrep_sync_wait = 0;
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'Waiting to execute in isolation%');
--let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST
--source include/wait_condition_with_debug.inc
UNLOCK TABLES;
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'Waiting for table metadata lock'
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'Waiting to execute in isolation%');
--let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST
--source include/wait_condition_with_debug.inc
COMMIT;
SELECT COUNT(*) = 1 FROM t1;

View File

@@ -110,7 +110,7 @@ drop table t1;
--connection node_2a
--let $connection_id = `SELECT CONNECTION_ID()`
CREATE TABLE t1 (i int primary key);
CREATE TABLE t1 (i int primary key) engine=innodb;
# Set up sync point
SET DEBUG_SYNC = "before_wsrep_ordered_commit SIGNAL bwoc_reached WAIT_FOR bwoc_continue";
@@ -129,17 +129,17 @@ SET DEBUG_SYNC = "now WAIT_FOR bwoc_reached";
--enable_query_log
SET DEBUG_SYNC = "now SIGNAL bwoc_continue";
SET DEBUG_SYNC='RESET';
--connection node_2a
--error 0,1213,2013,2026
--reap
--connection node_2
SET DEBUG_SYNC='RESET';
# victim was able to complete the INSERT
select * from t1;
--disconnect node_2a
--disconnect node_2b
--connection node_1
drop table t1;

View File

@@ -0,0 +1,133 @@
--source include/galera_cluster.inc
--source include/have_partition.inc
--source include/have_innodb.inc
--source include/have_aria.inc
call mtr.add_suppression("WSREP: wsrep_mode = STRICT_REPLICATION enabled. Storage engine partition for table.*");
--echo # wsrep-mode= DEFAULT
SET GLOBAL wsrep_mode = "";
SELECT @@wsrep_mode;
CREATE OR REPLACE TABLE t1 (v1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB
PARTITION BY KEY (v1)
PARTITIONS 2;
CREATE OR REPLACE TABLE t2 (v1 INT NOT NULL PRIMARY KEY) ENGINE=MyISAM
PARTITION BY KEY (v1)
PARTITIONS 2;
ALTER TABLE t1 ADD COLUMN v2 int;
ALTER TABLE t2 ADD COLUMN v2 int;
INSERT INTO t1 VALUES (1,1),(2,2);
INSERT INTO t2 VALUES (1,1),(2,2);
ALTER TABLE t1 ADD COLUMN v3 int, ENGINE=MyISAM;
ALTER TABLE t2 ADD COLUMN v3 int, ENGINE=Aria;
UPDATE t1 SET v3 = 3;
UPDATE t2 SET v3 = 3;
CREATE INDEX xx1 ON t1(v2);
CREATE INDEX xx2 ON t2(v2);
DROP INDEX xx1 ON t1;
DROP INDEX xx2 ON t2;
TRUNCATE TABLE t1;
TRUNCATE TABLE t2;
RENAME TABLE t1 TO t1_v2;
RENAME TABLE t2 TO t2_v2;
CREATE VIEW x1 AS SELECT * FROM t1_v2;
CREATE VIEW x2 AS SELECT * FROM t2_v2;
CREATE DEFINER=`root`@`localhost` TRIGGER increment_before_t1
AFTER INSERT ON t1_v2 FOR EACH ROW
UPDATE t1_v2 SET t1_v2.v3 = t1_v2.v3+1;
CREATE DEFINER=`root`@`localhost` TRIGGER increment_before_t2
AFTER INSERT ON t2_v2 FOR EACH ROW
UPDATE t2_v2 SET t2_v2.v3 = t2_v2.v3+1;
--connection node_2
SHOW CREATE TABLE t1_v2;
SHOW CREATE TABLE t2_v2;
SHOW CREATE VIEW x1;
SHOW CREATE VIEW x2;
SELECT * FROM t1_v2;
SELECT * FROM t2_v2;
--connection node_1
DROP VIEW x1;
DROP VIEW x2;
DROP TRIGGER increment_before_t1;
DROP TRIGGER increment_before_t2;
DROP TABLE t1_v2;
DROP TABLE t2_v2;
SET GLOBAL wsrep_mode = "";
CREATE OR REPLACE TABLE t2 (v1 INT NOT NULL PRIMARY KEY) ENGINE=MyISAM
PARTITION BY KEY (v1)
PARTITIONS 2;
--echo # wsrep-mode= STRICT_REPLICATION
SET GLOBAL wsrep_mode = "STRICT_REPLICATION";
SELECT @@wsrep_mode;
CREATE OR REPLACE TABLE t1 (v1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB
PARTITION BY KEY (v1)
PARTITIONS 2;
--error ER_GALERA_REPLICATION_NOT_SUPPORTED
CREATE OR REPLACE TABLE t3 (v1 INT NOT NULL PRIMARY KEY) ENGINE=MyISAM
PARTITION BY KEY (v1)
PARTITIONS 2;
ALTER TABLE t1 ADD COLUMN v2 int;
--error ER_GALERA_REPLICATION_NOT_SUPPORTED
ALTER TABLE t2 ADD COLUMN v2 int;
INSERT INTO t1 VALUES (1,1),(2,2);
INSERT INTO t2 VALUES (1),(2);
--error ER_GALERA_REPLICATION_NOT_SUPPORTED
ALTER TABLE t1 ADD COLUMN v3 int, ENGINE=MyISAM;
--error ER_GALERA_REPLICATION_NOT_SUPPORTED
ALTER TABLE t2 ADD COLUMN v3 int, ENGINE=Aria;
UPDATE t1 SET v2 = v2 + 3;
UPDATE t2 SET v1 = v1 + 3;
CREATE INDEX xx1 ON t1(v2);
--error ER_GALERA_REPLICATION_NOT_SUPPORTED
CREATE INDEX xx2 ON t2(v2);
DROP INDEX xx1 ON t1;
--error ER_GALERA_REPLICATION_NOT_SUPPORTED
DROP INDEX xx2 on t2;
TRUNCATE TABLE t1;
--error ER_GALERA_REPLICATION_NOT_SUPPORTED
TRUNCATE TABLE t2;
# At the moment can't restrict rename
RENAME TABLE t1 TO t1_v2;
RENAME TABLE t2 TO t2_v2;
RENAME TABLE t2_v2 TO t2;
CREATE VIEW x1 AS SELECT * FROM t1_v2;
--error ER_GALERA_REPLICATION_NOT_SUPPORTED
CREATE VIEW x2 AS SELECT * FROM t2;
CREATE DEFINER=`root`@`localhost` TRIGGER increment_before_t1
AFTER INSERT ON t1_v2 FOR EACH ROW
UPDATE t1_v2 SET t1_v2.v2 = t1_v2.v2+1;
--error ER_GALERA_REPLICATION_NOT_SUPPORTED
CREATE DEFINER=`root`@`localhost` TRIGGER increment_before_t2
AFTER INSERT ON t2 FOR EACH ROW
UPDATE t2 SET t2.v1 = t2.v1+1;
--connection node_2
SHOW CREATE TABLE t1_v2;
SHOW CREATE TABLE t2;
SHOW CREATE VIEW x1;
SELECT * FROM t1_v2;
SELECT * FROM t2;
--connection node_1
DROP VIEW x1;
DROP TRIGGER increment_before_t1;
DROP TABLE t1_v2;
# We allow dropping table
DROP TABLE t2;
SET GLOBAL wsrep_mode = "";
CREATE OR REPLACE TABLE t2 (v1 INT NOT NULL PRIMARY KEY) ENGINE=MyISAM
PARTITION BY KEY (v1)
PARTITIONS 2;
--echo # wsrep-mode= STRICT_REPLICATION
SET GLOBAL wsrep_mode = "STRICT_REPLICATION";
SELECT @@wsrep_mode;
ALTER TABLE t2 ENGINE=InnoDB;
DROP TABLE t2;
SET GLOBAL wsrep_mode = DEFAULT;

View File

@@ -0,0 +1,11 @@
!include ../galera_2nodes.cnf
[mysqld]
loose-galera-ssl-cipher=1
wsrep-debug=1
[mysqld.1]
wsrep_provider_options='base_port=@mysqld.1.#galera_port;socket.ssl=yes;socket.ssl_cert=@ENV.MYSQL_TEST_DIR/std_data/galera-cert.pem;socket.ssl_key=@ENV.MYSQL_TEST_DIR/std_data/galera-key.pem;cert.log_conflicts=YES'
[mysqld.2]
wsrep_provider_options='base_port=@mysqld.2.#galera_port;socket.ssl=yes;socket.ssl_cert=@ENV.MYSQL_TEST_DIR/std_data/galera-cert.pem;socket.ssl_key=@ENV.MYSQL_TEST_DIR/std_data/galera-key.pem;cert.log_conflicts=YES'

View File

@@ -0,0 +1,82 @@
#
# Test upgrading the SSL chipher
#
--source include/galera_cluster.inc
--source include/have_ssl_communication.inc
--source include/have_openssl.inc
--source include/force_restart.inc
#
# Lowest supported Galera library version
#
--let $galera_version=26.4.21
source ../wsrep/include/check_galera_version.inc;
# Save original auto_increment_offset values.
--let $node_1=node_1
--let $node_2=node_2
--source include/auto_increment_offset_save.inc
# Setup galera ports
--connection node_1
--source suite/galera/include/galera_base_port.inc
--let $NODE_GALERAPORT_1 = $_NODE_GALERAPORT
--connection node_2
--source suite/galera/include/galera_base_port.inc
--let $NODE_GALERAPORT_2 = $_NODE_GALERAPORT
SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment';
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
# 2. Restart node #1 with a socket.ssl_cipher
--connection node_1
--source include/shutdown_mysqld.inc
--let $restart_noprint = 1
--let $start_mysqld_params = --wsrep-cluster-address=gcomm://127.0.0.1:$NODE_GALERAPORT_2 --wsrep_provider_options=base_port=$NODE_GALERAPORT_1;socket.ssl=yes;socket.ssl_ca=$MYSQL_TEST_DIR/std_data/galera-upgrade-ca-cert.pem;socket.ssl_cert=$MYSQL_TEST_DIR/std_data/galera-cert.pem;socket.ssl_key=$MYSQL_TEST_DIR/std_data/galera-key.pem;socket.ssl_cipher=AES256-SHA
--source include/start_mysqld.inc
--source include/wait_until_connected_again.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment';
--source include/wait_condition.inc
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
# 3. Restart node #2 with the new socket.ssl_ca , socket.ssl_cert, socket.ssl_key and socket.ssl_cipher
--connection node_2
--source include/shutdown_mysqld.inc
--let $start_mysqld_params = --wsrep_provider_options=base_port=$NODE_GALERAPORT_2;socket.ssl=yes;socket.ssl_ca=$MYSQL_TEST_DIR/std_data/galera-upgrade-ca-cert.pem;socket.ssl_cert=$MYSQL_TEST_DIR/std_data/galera-upgrade-server-cert.pem;socket.ssl_key=$MYSQL_TEST_DIR/std_data/galera-upgrade-server-key.pem;socket.ssl_cipher=AES256-SHA
--source include/start_mysqld.inc
--source include/wait_until_connected_again.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment';
--source include/wait_condition.inc
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
# 4. Restart node #1 with the new socket.ssl_ca , socket.ssl_cert, socket.ssl_key and socket.ssl_cipher
--connection node_1
--source include/shutdown_mysqld.inc
--let $start_mysqld_params = --wsrep-cluster-address=gcomm://127.0.0.1:$NODE_GALERAPORT_2 --wsrep_provider_options=base_port=$NODE_GALERAPORT_1;socket.ssl=yes;socket.ssl_ca=$MYSQL_TEST_DIR/std_data/galera-upgrade-ca-cert.pem;socket.ssl_cert=$MYSQL_TEST_DIR/std_data/galera-upgrade-server-cert.pem;socket.ssl_key=$MYSQL_TEST_DIR/std_data/galera-upgrade-server-key.pem;socket.ssl_cipher=AES256-SHA
--source include/start_mysqld.inc
--source include/wait_until_connected_again.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment';
--source include/wait_condition.inc
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
# 5. Make sure node_2 is ready as well
--connection node_2
--source include/galera_wait_ready.inc
# Upgrade complete. Both nodes now use the new key and certificate
# Restore original auto_increment_offset values.
--source include/auto_increment_offset_restore.inc
--connection node_1
call mtr.add_suppression("WSREP: write_handler\\(\\)");
--connection node_2
call mtr.add_suppression("WSREP: write_handler\\(\\)");

View File

@@ -3,7 +3,6 @@
#
--source include/galera_cluster.inc
--source include/have_innodb.inc
CREATE TABLE t1 (f1 INT PRIMARY KEY) Engine=MyISAM;
INSERT INTO t1 VALUES (1);

View File

@@ -0,0 +1,81 @@
--source include/galera_cluster.inc
--source include/have_aria.inc
#
# MDEV-29775 : Assertion `0' failed in void Protocol::end_statement() when adding data to the MyISAM table after setting wsrep_mode=replicate_myisam
#
SET GLOBAL wsrep_mode=REPLICATE_MYISAM;
CREATE TABLE t (f0 CHAR(0)) ENGINE=MyISAM;
INSERT INTO t VALUES();
SELECT * FROM t;
--connection node_2
SELECT * FROM t;
DROP TABLE t;
--connection node_1
SET GLOBAL wsrep_mode=REPLICATE_MYISAM;
SET GLOBAL wsrep_forced_binlog_format=ROW;
CREATE TABLE t (f0 CHAR(0)) ENGINE=MyISAM;
INSERT INTO t VALUES();
SELECT * FROM t;
--connection node_2
SELECT * FROM t;
DROP TABLE t;
--connection node_1
SET GLOBAL wsrep_mode=REPLICATE_ARIA;
CREATE TABLE t (f0 CHAR(0)) ENGINE=Aria;
INSERT INTO t VALUES();
SELECT * FROM t;
--connection node_2
SELECT * FROM t;
DROP TABLE t;
--connection node_1
SET GLOBAL wsrep_mode=REPLICATE_ARIA;
SET GLOBAL wsrep_forced_binlog_format=ROW;
CREATE TABLE t (f0 CHAR(0)) ENGINE=Aria;
INSERT INTO t VALUES();
SELECT * FROM t;
--connection node_2
SELECT * FROM t;
DROP TABLE t;
--connection node_1
SET GLOBAL wsrep_mode=REPLICATE_MYISAM;
--error ER_WRONG_ARGUMENTS
SET GLOBAL wsrep_forced_binlog_format=MIXED;
--error ER_WRONG_ARGUMENTS
SET GLOBAL wsrep_forced_binlog_format=STATEMENT;
SET GLOBAL wsrep_mode=REPLICATE_ARIA;
--error ER_WRONG_ARGUMENTS
SET GLOBAL wsrep_forced_binlog_format=MIXED;
--error ER_WRONG_ARGUMENTS
SET GLOBAL wsrep_forced_binlog_format=STATEMENT;
SET GLOBAL wsrep_mode=DEFAULT;
SET GLOBAL wsrep_forced_binlog_format=MIXED;
--error ER_WRONG_ARGUMENTS
SET GLOBAL wsrep_mode = REPLICATE_MYISAM;
--error ER_WRONG_ARGUMENTS
SET GLOBAL wsrep_mode = REPLICATE_ARIA;
SET GLOBAL wsrep_mode=DEFAULT;
SET GLOBAL wsrep_forced_binlog_format=STATEMENT;
--error ER_WRONG_ARGUMENTS
SET GLOBAL wsrep_mode = REPLICATE_MYISAM;
--error ER_WRONG_ARGUMENTS
SET GLOBAL wsrep_mode = REPLICATE_ARIA;
SET GLOBAL wsrep_forced_binlog_format=DEFAULT;
SET GLOBAL wsrep_mode=DEFAULT;
SET GLOBAL wsrep_forced_binlog_format=MIXED;
--error ER_WRONG_ARGUMENTS
SET GLOBAL wsrep_mode=REPLICATE_MYISAM;
SET GLOBAL wsrep_forced_binlog_format=STATEMENT;
--error ER_WRONG_ARGUMENTS
SET GLOBAL wsrep_mode=REPLICATE_MYISAM;
SET GLOBAL wsrep_forced_binlog_format=DEFAULT;
SET GLOBAL wsrep_mode=DEFAULT;

View File

@@ -1,9 +1,4 @@
!include ../galera_2nodes.cnf
[mysqld.1]
[mysqld]
log-bin
wsrep-debug=1
[mysqld.1]
log-bin
wsrep-debug=1

View File

@@ -1,6 +1,5 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/force_restart.inc
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
CREATE TABLE t2 (id INT PRIMARY KEY) ENGINE=InnoDB;
@@ -21,8 +20,9 @@ LOCK TABLE t2 WRITE;
--connection node_2
SET SESSION wsrep_sync_wait = 0;
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'Waiting for table metadata lock'
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) BETWEEN 1 AND 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'Waiting to execute in isolation%';
--let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST
--source include/wait_condition_with_debug_and_kill.inc
--connection node_1
INSERT INTO t2 VALUES (1);
@@ -38,3 +38,8 @@ UNLOCK TABLES;
DROP TABLE t1;
DROP TABLE t2;
--connection node_1
--disconnect node_2a
--disconnect node_2b

View File

@@ -38,6 +38,10 @@ idxa n_diff_pfx01 a
idxa n_diff_pfx02 a,DB_ROW_ID
idxa n_leaf_pages Number of leaf pages in the index
idxa size Number of pages in the index
idxb n_diff_pfx01 b
idxb n_diff_pfx02 b,DB_ROW_ID
idxb n_leaf_pages Number of leaf pages in the index
idxb size Number of pages in the index
vidxcd n_diff_pfx01 c
vidxcd n_diff_pfx02 c,d
vidxcd n_diff_pfx03 c,d,DB_ROW_ID
@@ -54,6 +58,14 @@ index_name stat_name stat_description
GEN_CLUST_INDEX n_diff_pfx01 DB_ROW_ID
GEN_CLUST_INDEX n_leaf_pages Number of leaf pages in the index
GEN_CLUST_INDEX size Number of pages in the index
idxb n_diff_pfx01 b
idxb n_diff_pfx02 b,DB_ROW_ID
idxb n_leaf_pages Number of leaf pages in the index
idxb size Number of pages in the index
vidxcd n_diff_pfx01 d
vidxcd n_diff_pfx02 d,DB_ROW_ID
vidxcd n_leaf_pages Number of leaf pages in the index
vidxcd size Number of pages in the index
ALTER TABLE t ADD INDEX vidxe (e), ALGORITHM=INPLACE;
select count(*) from t;
count(*)
@@ -65,6 +77,18 @@ index_name stat_name stat_description
GEN_CLUST_INDEX n_diff_pfx01 DB_ROW_ID
GEN_CLUST_INDEX n_leaf_pages Number of leaf pages in the index
GEN_CLUST_INDEX size Number of pages in the index
idxb n_diff_pfx01 b
idxb n_diff_pfx02 b,DB_ROW_ID
idxb n_leaf_pages Number of leaf pages in the index
idxb size Number of pages in the index
vidxcd n_diff_pfx01 d
vidxcd n_diff_pfx02 d,DB_ROW_ID
vidxcd n_leaf_pages Number of leaf pages in the index
vidxcd size Number of pages in the index
vidxe n_diff_pfx01 e
vidxe n_diff_pfx02 e,DB_ROW_ID
vidxe n_leaf_pages Number of leaf pages in the index
vidxe size Number of pages in the index
ALTER TABLE t ADD COLUMN f INT GENERATED ALWAYS AS(a + a), ADD INDEX vidxf (f), ALGORITHM=INPLACE;
select count(*) from t;
count(*)
@@ -76,6 +100,22 @@ index_name stat_name stat_description
GEN_CLUST_INDEX n_diff_pfx01 DB_ROW_ID
GEN_CLUST_INDEX n_leaf_pages Number of leaf pages in the index
GEN_CLUST_INDEX size Number of pages in the index
idxb n_diff_pfx01 b
idxb n_diff_pfx02 b,DB_ROW_ID
idxb n_leaf_pages Number of leaf pages in the index
idxb size Number of pages in the index
vidxcd n_diff_pfx01 d
vidxcd n_diff_pfx02 d,DB_ROW_ID
vidxcd n_leaf_pages Number of leaf pages in the index
vidxcd size Number of pages in the index
vidxe n_diff_pfx01 e
vidxe n_diff_pfx02 e,DB_ROW_ID
vidxe n_leaf_pages Number of leaf pages in the index
vidxe size Number of pages in the index
vidxf n_diff_pfx01 f
vidxf n_diff_pfx02 f,DB_ROW_ID
vidxf n_leaf_pages Number of leaf pages in the index
vidxf size Number of pages in the index
ALTER TABLE t DROP INDEX vidxcd;
SELECT index_name, stat_name, stat_description
FROM mysql.innodb_index_stats
@@ -84,4 +124,16 @@ index_name stat_name stat_description
GEN_CLUST_INDEX n_diff_pfx01 DB_ROW_ID
GEN_CLUST_INDEX n_leaf_pages Number of leaf pages in the index
GEN_CLUST_INDEX size Number of pages in the index
idxb n_diff_pfx01 b
idxb n_diff_pfx02 b,DB_ROW_ID
idxb n_leaf_pages Number of leaf pages in the index
idxb size Number of pages in the index
vidxe n_diff_pfx01 e
vidxe n_diff_pfx02 e,DB_ROW_ID
vidxe n_leaf_pages Number of leaf pages in the index
vidxe size Number of pages in the index
vidxf n_diff_pfx01 f
vidxf n_diff_pfx02 f,DB_ROW_ID
vidxf n_leaf_pages Number of leaf pages in the index
vidxf size Number of pages in the index
DROP TABLE t;

View File

@@ -1,4 +1,6 @@
@@ -13,212 +13,212 @@
--- autoinc_persist.result
+++ autoinc_persist.result,desc
@@ -13,224 +13,224 @@
#
# Pre-create several tables
SET SQL_MODE='STRICT_ALL_TABLES';
@@ -296,8 +298,7 @@
+2
+1
+CREATE TABLE t11(a FLOAT AUTO_INCREMENT, PRIMARY KEY(a DESC)) ENGINE = InnoDB;
INSERT INTO t11 VALUES(0), (0), (0), (0), (-1), (-10), (0),
(20), (30), (31);
INSERT INTO t11 VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31);
SELECT * FROM t11;
a
--10
@@ -310,7 +311,7 @@
-20
-30
31
-CREATE TABLE t12(a DOUBLE AUTO_INCREMENT KEY) ENGINE = InnoDB;
-CREATE TABLE t11u(a FLOAT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB;
+30
+20
+5
@@ -320,9 +321,30 @@
+1
+-1
+-10
+CREATE TABLE t11u(a FLOAT UNSIGNED AUTO_INCREMENT, PRIMARY KEY(a DESC)) ENGINE = InnoDB;
INSERT INTO t11u VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31);
ERROR 22003: Out of range value for column 'a' at row 5
INSERT INTO t11u VALUES(0), (0), (0), (0), (0), (20), (30), (31);
SELECT * FROM t11u;
a
-11
-12
-13
-14
-15
-20
-30
31
-CREATE TABLE t12(a DOUBLE AUTO_INCREMENT KEY) ENGINE = InnoDB;
+30
+20
+15
+14
+13
+12
+11
+CREATE TABLE t12(a DOUBLE AUTO_INCREMENT, PRIMARY KEY(a DESC)) ENGINE = InnoDB;
INSERT INTO t12 VALUES(0), (0), (0), (0), (-1), (-10), (0),
(20), (30), (31);
INSERT INTO t12 VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31);
SELECT * FROM t12;
a
--10
@@ -344,10 +366,10 @@
+1
+-1
+-10
# Scenario 1: Normal restart, to test if the counters are persisted
# Scenario 2: Delete some values, to test the counters should not be the
# one which is the largest in current table
@@ -242,14 +242,14 @@
CREATE TABLE t12u(a DOUBLE UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB;
INSERT INTO t12u VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31);
ERROR 22003: Out of range value for column 'a' at row 5
@@ -268,14 +268,14 @@
SELECT MAX(a) AS `Expect 100000000000` FROM t9;
Expect 100000000000
100000000000
@@ -364,7 +386,7 @@
) ENGINE=InnoDB AUTO_INCREMENT=1234 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
INSERT INTO t13 VALUES(0);
SELECT a AS `Expect 1234` FROM t13;
@@ -464,28 +464,28 @@
@@ -490,28 +490,28 @@
INSERT INTO t1 VALUES(0), (0);
SELECT * FROM t1;
a
@@ -398,7 +420,7 @@
# Ensure that all changes before the server is killed are persisted.
set global innodb_flush_log_at_trx_commit=1;
TRUNCATE TABLE t1;
@@ -498,63 +498,63 @@
@@ -524,63 +524,63 @@
INSERT INTO t19 VALUES(0), (0);
SELECT * FROM t19;
a
@@ -481,7 +503,7 @@
DELETE FROM t3 WHERE a > 300;
SELECT MAX(a) AS `Expect 200` FROM t3;
Expect 200
@@ -566,7 +566,7 @@
@@ -592,7 +592,7 @@
Table Create Table
t3 CREATE TABLE `t3` (
`a` smallint(6) NOT NULL AUTO_INCREMENT,
@@ -490,7 +512,7 @@
) ENGINE=InnoDB AUTO_INCREMENT=201 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
INSERT INTO t3 VALUES(0);
SELECT MAX(a) AS `Expect 201` FROM t3;
@@ -579,7 +579,7 @@
@@ -605,7 +605,7 @@
Table Create Table
t3 CREATE TABLE `t3` (
`a` smallint(6) NOT NULL AUTO_INCREMENT,
@@ -499,7 +521,7 @@
) ENGINE=InnoDB AUTO_INCREMENT=500 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
INSERT INTO t3 VALUES(0);
SELECT MAX(a) AS `Expect 500` FROM t3;
@@ -591,13 +591,13 @@
@@ -617,13 +617,13 @@
Table Create Table
t3 CREATE TABLE `t3` (
`a` smallint(6) NOT NULL AUTO_INCREMENT,
@@ -515,7 +537,7 @@
INSERT INTO t3 VALUES(150), (180);
UPDATE t3 SET a = 200 WHERE a = 150;
INSERT INTO t3 VALUES(220);
@@ -607,7 +607,7 @@
@@ -633,7 +633,7 @@
Table Create Table
t3 CREATE TABLE `t3` (
`a` smallint(6) NOT NULL AUTO_INCREMENT,
@@ -524,7 +546,7 @@
) ENGINE=InnoDB AUTO_INCREMENT=221 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
INSERT INTO t3 VALUES(0);
SELECT MAX(a) AS `Expect 221` FROM t3;
@@ -619,7 +619,7 @@
@@ -645,7 +645,7 @@
Table Create Table
t3 CREATE TABLE `t3` (
`a` smallint(6) NOT NULL AUTO_INCREMENT,
@@ -533,7 +555,7 @@
) ENGINE=InnoDB AUTO_INCREMENT=120 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
# MDEV-6076: Test adding an AUTO_INCREMENT COLUMN
CREATE TABLE mdev6076a (b INT) ENGINE=InnoDB;
@@ -669,18 +669,18 @@
@@ -695,18 +695,18 @@
INSERT INTO t_inplace SELECT * FROM t3;
SELECT * FROM t_inplace;
a
@@ -559,7 +581,7 @@
) ENGINE=InnoDB AUTO_INCREMENT=211 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
# This will keep the autoinc counter
ALTER TABLE t_inplace AUTO_INCREMENT = 250, ALGORITHM = INPLACE;
@@ -689,7 +689,7 @@
@@ -715,7 +715,7 @@
Table Create Table
t_inplace CREATE TABLE `t_inplace` (
`a` smallint(6) NOT NULL AUTO_INCREMENT,
@@ -568,7 +590,7 @@
) ENGINE=InnoDB AUTO_INCREMENT=250 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
# This should keep the autoinc counter as well
ALTER TABLE t_inplace ADD COLUMN b INT, ALGORITHM = INPLACE;
@@ -699,16 +699,16 @@
@@ -725,16 +725,16 @@
t_inplace CREATE TABLE `t_inplace` (
`a` smallint(6) NOT NULL AUTO_INCREMENT,
`b` int(11) DEFAULT NULL,
@@ -590,7 +612,7 @@
# This should reset the autoinc counter to the one specified
# Since it's smaller than current one but bigger than existing
# biggest counter in the table
@@ -719,7 +719,7 @@
@@ -745,7 +745,7 @@
t_inplace CREATE TABLE `t_inplace` (
`a` smallint(6) NOT NULL AUTO_INCREMENT,
`b` int(11) DEFAULT NULL,
@@ -599,7 +621,7 @@
) ENGINE=InnoDB AUTO_INCREMENT=180 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
# This should reset the autoinc counter to the next value of
# current max counter in the table, since the specified value
@@ -730,7 +730,7 @@
@@ -756,7 +756,7 @@
Table Create Table
t_inplace CREATE TABLE `t_inplace` (
`a` smallint(6) NOT NULL AUTO_INCREMENT,
@@ -608,7 +630,7 @@
) ENGINE=InnoDB AUTO_INCREMENT=123 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
INSERT INTO t_inplace VALUES(0), (0);
SELECT MAX(a) AS `Expect 124` FROM t_inplace;
@@ -757,18 +757,18 @@
@@ -783,18 +783,18 @@
INSERT INTO t_copy SELECT * FROM t3;
SELECT * FROM t_copy;
a
@@ -634,7 +656,7 @@
) ENGINE=InnoDB AUTO_INCREMENT=211 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
# This will keep the autoinc counter
ALTER TABLE t_copy AUTO_INCREMENT = 250, ALGORITHM = COPY;
@@ -777,7 +777,7 @@
@@ -803,7 +803,7 @@
Table Create Table
t_copy CREATE TABLE `t_copy` (
`a` smallint(6) NOT NULL AUTO_INCREMENT,
@@ -643,7 +665,7 @@
) ENGINE=InnoDB AUTO_INCREMENT=250 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
# This should keep the autoinc counter as well
ALTER TABLE t_copy ADD COLUMN b INT, ALGORITHM = COPY;
@@ -787,16 +787,16 @@
@@ -813,16 +813,16 @@
t_copy CREATE TABLE `t_copy` (
`a` smallint(6) NOT NULL AUTO_INCREMENT,
`b` int(11) DEFAULT NULL,
@@ -665,7 +687,7 @@
# This should reset the autoinc counter to the one specified
# Since it's smaller than current one but bigger than existing
# biggest counter in the table
@@ -807,7 +807,7 @@
@@ -833,7 +833,7 @@
t_copy CREATE TABLE `t_copy` (
`a` smallint(6) NOT NULL AUTO_INCREMENT,
`b` int(11) DEFAULT NULL,
@@ -674,7 +696,7 @@
) ENGINE=InnoDB AUTO_INCREMENT=180 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
# This should reset the autoinc counter to the next value of
# current max counter in the table, since the specified value
@@ -818,7 +818,7 @@
@@ -844,7 +844,7 @@
Table Create Table
t_copy CREATE TABLE `t_copy` (
`a` smallint(6) NOT NULL AUTO_INCREMENT,
@@ -683,7 +705,7 @@
) ENGINE=InnoDB AUTO_INCREMENT=123 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
INSERT INTO t_copy VALUES(0), (0);
SELECT MAX(a) AS `Expect 124` FROM t_copy;
@@ -842,7 +842,7 @@
@@ -868,7 +868,7 @@
126
DROP TABLE t_copy, it_copy;
# Scenario 9: Test the sql_mode = NO_AUTO_VALUE_ON_ZERO
@@ -692,7 +714,7 @@
set SQL_MODE = NO_AUTO_VALUE_ON_ZERO;
INSERT INTO t30 VALUES(NULL, 1), (200, 2), (0, 3);
INSERT INTO t30(b) VALUES(4), (5), (6), (7);
@@ -869,20 +869,20 @@
@@ -895,20 +895,20 @@
set global innodb_flush_log_at_trx_commit=1;
CREATE TABLE t31 (a INT) ENGINE = InnoDB;
INSERT INTO t31 VALUES(1), (2);
@@ -719,7 +741,7 @@
INSERT INTO t32 VALUES(0), (0);
# Ensure that all changes before the server is killed are persisted.
set global innodb_flush_log_at_trx_commit=1;
@@ -897,7 +897,7 @@
@@ -923,7 +923,7 @@
# increasing the counter
CREATE TABLE t33 (
a BIGINT NOT NULL PRIMARY KEY,
@@ -728,7 +750,7 @@
INSERT INTO t33 VALUES(1, NULL);
INSERT INTO t33 VALUES(2, NULL);
INSERT INTO t33 VALUES(2, NULL);
@@ -920,13 +920,13 @@
@@ -946,13 +946,13 @@
INSERT INTO t31(a) VALUES(6), (0);
SELECT * FROM t31;
a b
@@ -748,7 +770,7 @@
DROP TABLE t31;
set SQL_MODE = NO_AUTO_VALUE_ON_ZERO;
DELETE FROM t30 WHERE a = 0;
@@ -965,7 +965,7 @@
@@ -991,7 +991,7 @@
DROP TABLE t33;
CREATE TABLE t33 (
a BIGINT NOT NULL PRIMARY KEY,
@@ -757,7 +779,7 @@
ALTER TABLE t33 DISCARD TABLESPACE;
restore: t33 .ibd and .cfg files
ALTER TABLE t33 IMPORT TABLESPACE;
@@ -975,7 +975,7 @@
@@ -1001,8 +1001,8 @@
4
SELECT * FROM t33;
a b
@@ -766,4 +788,5 @@
3 4
+2 2
+10 1
DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t30, t32, t33;
DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t11u, t12u,
t30, t32, t33;

View File

@@ -190,8 +190,7 @@ a
100000000000
100000000006
CREATE TABLE t11(a FLOAT AUTO_INCREMENT KEY) ENGINE = InnoDB;
INSERT INTO t11 VALUES(0), (0), (0), (0), (-1), (-10), (0),
(20), (30), (31);
INSERT INTO t11 VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31);
SELECT * FROM t11;
a
-10
@@ -204,9 +203,22 @@ a
20
30
31
CREATE TABLE t11u(a FLOAT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB;
INSERT INTO t11u VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31);
ERROR 22003: Out of range value for column 'a' at row 5
INSERT INTO t11u VALUES(0), (0), (0), (0), (0), (20), (30), (31);
SELECT * FROM t11u;
a
11
12
13
14
15
20
30
31
CREATE TABLE t12(a DOUBLE AUTO_INCREMENT KEY) ENGINE = InnoDB;
INSERT INTO t12 VALUES(0), (0), (0), (0), (-1), (-10), (0),
(20), (30), (31);
INSERT INTO t12 VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31);
SELECT * FROM t12;
a
-10
@@ -219,6 +231,20 @@ a
20
30
31
CREATE TABLE t12u(a DOUBLE UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB;
INSERT INTO t12u VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31);
ERROR 22003: Out of range value for column 'a' at row 5
INSERT INTO t12u VALUES(0), (0), (0), (0), (0), (20), (30), (31);
SELECT * FROM t12u;
a
11
12
13
14
15
20
30
31
# Scenario 1: Normal restart, to test if the counters are persisted
# Scenario 2: Delete some values, to test the counters should not be the
# one which is the largest in current table
@@ -978,4 +1004,5 @@ a b
10 1
2 2
3 4
DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t30, t32, t33;
DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t11u, t12u,
t30, t32, t33;

View File

@@ -11,9 +11,11 @@ insert into t1 values(5, repeat('.',12));
commit work;
SET GLOBAL innodb_fast_shutdown = 0;
# restart
SET GLOBAL innodb_max_dirty_pages_pct_lwm=0,innodb_max_dirty_pages_pct=0;
SET GLOBAL innodb_max_dirty_pages_pct=99;
connect dml,localhost,root,,;
XA START 'x';
insert into t1 values (6, repeat('%', @@innodb_page_size/2));
insert into t1 values(6, repeat('%', @@innodb_page_size/2));
XA END 'x';
XA PREPARE 'x';
disconnect dml;
@@ -23,7 +25,6 @@ flush table t1 for export;
# restart
FOUND 1 /InnoDB: Recovered page \[page id: space=[1-9][0-9]*, page number=0\]/ in mysqld.1.err
# restart
XA ROLLBACK 'x';
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@@ -34,18 +35,13 @@ f1 f2
3 ////////////
4 ------------
5 ............
connect dml,localhost,root,,;
XA START 'x';
insert into t1 values (6, repeat('%', @@innodb_page_size/2));
XA END 'x';
XA PREPARE 'x';
disconnect dml;
connection default;
flush table t1 for export;
SET GLOBAL innodb_max_dirty_pages_pct_lwm=0,innodb_max_dirty_pages_pct=0;
SET GLOBAL innodb_max_dirty_pages_pct=99;
XA ROLLBACK 'x';
FLUSH TABLE t1 FOR EXPORT;
# Kill the server
# restart
FOUND 4 /InnoDB: Recovered page \[page id: space=[1-9][0-9]*, page number=[03]\]/ in mysqld.1.err
XA ROLLBACK 'x';
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK

View File

@@ -1,10 +1,18 @@
CREATE TABLE `t`(`id` INT, PRIMARY KEY(`id`)) ENGINE=InnoDB STATS_PERSISTENT=0;
INSERT INTO t VALUES (1);
SET GLOBAL innodb_monitor_reset = "module_innodb";
SET GLOBAL innodb_monitor_disable="lock_row_lock_time";
SET GLOBAL innodb_monitor_disable="lock_row_lock_time_max";
SET GLOBAL innodb_monitor_reset_all='lock_row_lock_time';
SET GLOBAL innodb_monitor_reset_all='lock_row_lock_time_max';
SET GLOBAL innodb_monitor_enable="lock_row_lock_time";
SET GLOBAL innodb_monitor_enable="lock_row_lock_time_max";
BEGIN;
SELECT * FROM t FOR UPDATE;
id
1
SELECT @innodb_row_lock_time_before := variable_value
FROM information_schema.global_status
WHERE LOWER(variable_name) = 'innodb_row_lock_time';
connect con1,localhost,root,,;
SET innodb_lock_wait_timeout = 1;
SELECT * FROM t FOR UPDATE;
@@ -12,29 +20,27 @@ ERROR HY000: Lock wait timeout exceeded; try restarting transaction
disconnect con1;
connection default;
COMMIT;
SELECT variable_value > 100 FROM information_schema.global_status
SELECT variable_value - @innodb_row_lock_time_before > 100
FROM information_schema.global_status
WHERE LOWER(variable_name) = 'innodb_row_lock_time';
variable_value > 100
variable_value - @innodb_row_lock_time_before > 100
1
SELECT variable_value > 100 FROM information_schema.global_status
SELECT variable_value > 100
FROM information_schema.global_status
WHERE LOWER(variable_name) = 'innodb_row_lock_time_max';
variable_value > 100
1
SELECT variable_value > 100 FROM information_schema.global_status
WHERE LOWER(variable_name) = 'innodb_row_lock_time_avg';
variable_value > 100
1
SELECT count_reset > 100 FROM INFORMATION_SCHEMA.INNODB_METRICS
WHERE NAME="lock_row_lock_time";
SELECT count_reset > 100
FROM INFORMATION_SCHEMA.INNODB_METRICS
WHERE NAME='lock_row_lock_time';
count_reset > 100
1
SELECT count_reset > 100 FROM INFORMATION_SCHEMA.INNODB_METRICS
WHERE NAME="lock_row_lock_time_max";
count_reset > 100
1
SELECT count_reset > 100 FROM INFORMATION_SCHEMA.INNODB_METRICS
WHERE NAME="lock_row_lock_time_avg";
SELECT count_reset > 100
FROM INFORMATION_SCHEMA.INNODB_METRICS
WHERE NAME='lock_row_lock_time_max';
count_reset > 100
1
DROP TABLE t;
SET GLOBAL innodb_monitor_reset=default;
SET GLOBAL innodb_monitor_enable=default;
SET GLOBAL innodb_monitor_disable=default;
SET GLOBAL innodb_monitor_reset_all=default;

View File

@@ -5,13 +5,13 @@ COUNT(*) 1
SELECT COUNT(*) FROM mysql.innodb_index_stats WHERE table_name = 't';
COUNT(*) 3
SELECT * FROM t;
FLUSH TABLE t;
DELETE FROM mysql.innodb_index_stats WHERE table_name = 't';
DELETE FROM mysql.innodb_table_stats WHERE table_name = 't';
SELECT COUNT(*) FROM mysql.innodb_table_stats WHERE table_name = 't';
COUNT(*) 0
SELECT COUNT(*) FROM mysql.innodb_index_stats WHERE table_name = 't';
COUNT(*) 0
RENAME TABLE t TO tmp, tmp TO t;
SELECT * FROM t;
SELECT COUNT(*) FROM mysql.innodb_table_stats WHERE table_name = 't';
COUNT(*) 1
@@ -25,13 +25,13 @@ COUNT(*) 1
SELECT COUNT(*) FROM mysql.innodb_index_stats WHERE table_name = 't';
COUNT(*) 3
SELECT * FROM t;
FLUSH TABLE t;
DELETE FROM mysql.innodb_index_stats WHERE table_name = 't';
DELETE FROM mysql.innodb_table_stats WHERE table_name = 't';
SELECT COUNT(*) FROM mysql.innodb_table_stats WHERE table_name = 't';
COUNT(*) 0
SELECT COUNT(*) FROM mysql.innodb_index_stats WHERE table_name = 't';
COUNT(*) 0
RENAME TABLE t TO tmp, tmp TO t;
SELECT * FROM t;
SELECT COUNT(*) FROM mysql.innodb_table_stats WHERE table_name = 't';
COUNT(*) 1
@@ -45,13 +45,13 @@ COUNT(*) 1
SELECT COUNT(*) FROM mysql.innodb_index_stats WHERE table_name = 't';
COUNT(*) 3
SELECT * FROM t;
FLUSH TABLE t;
DELETE FROM mysql.innodb_index_stats WHERE table_name = 't';
DELETE FROM mysql.innodb_table_stats WHERE table_name = 't';
SELECT COUNT(*) FROM mysql.innodb_table_stats WHERE table_name = 't';
COUNT(*) 0
SELECT COUNT(*) FROM mysql.innodb_index_stats WHERE table_name = 't';
COUNT(*) 0
RENAME TABLE t TO tmp, tmp TO t;
SELECT * FROM t;
SELECT COUNT(*) FROM mysql.innodb_table_stats WHERE table_name = 't';
COUNT(*) 0

View File

@@ -125,7 +125,7 @@ WHERE
table_name = 'test_ps_fetch' AND
index_name = 'idx' AND
stat_name = 'n_diff_pfx02';
FLUSH TABLE test_ps_fetch;
RENAME TABLE test_ps_fetch TO tmp, tmp TO test_ps_fetch;
SELECT seq_in_index, column_name, cardinality
FROM information_schema.statistics WHERE table_name = 'test_ps_fetch'
ORDER BY index_name, seq_in_index;

View File

@@ -95,15 +95,25 @@ INSERT INTO t10 VALUES(0), (0), (0), (0), (8), (10), (0),
SELECT * FROM t10;
eval CREATE TABLE t11(a FLOAT $AUTO_INCREMENT_KEY_a) ENGINE = InnoDB;
INSERT INTO t11 VALUES(0), (0), (0), (0), (-1), (-10), (0),
(20), (30), (31);
INSERT INTO t11 VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31);
SELECT * FROM t11;
eval CREATE TABLE t11u(a FLOAT UNSIGNED $AUTO_INCREMENT_KEY_a) ENGINE = InnoDB;
--error ER_WARN_DATA_OUT_OF_RANGE
INSERT INTO t11u VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31);
INSERT INTO t11u VALUES(0), (0), (0), (0), (0), (20), (30), (31);
SELECT * FROM t11u;
eval CREATE TABLE t12(a DOUBLE $AUTO_INCREMENT_KEY_a) ENGINE = InnoDB;
INSERT INTO t12 VALUES(0), (0), (0), (0), (-1), (-10), (0),
(20), (30), (31);
INSERT INTO t12 VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31);
SELECT * FROM t12;
CREATE TABLE t12u(a DOUBLE UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB;
--error ER_WARN_DATA_OUT_OF_RANGE
INSERT INTO t12u VALUES(0), (0), (0), (0), (-1), (-10), (0), (20), (30), (31);
INSERT INTO t12u VALUES(0), (0), (0), (0), (0), (20), (30), (31);
SELECT * FROM t12u;
--echo # Scenario 1: Normal restart, to test if the counters are persisted
--echo # Scenario 2: Delete some values, to test the counters should not be the
--echo # one which is the largest in current table
@@ -566,4 +576,5 @@ INSERT INTO t33 VALUES(3, NULL);
SELECT MAX(b) AS `Expect 4` FROM t33;
SELECT * FROM t33;
DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t30, t32, t33;
DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t11u, t12u,
t30, t32, t33;

View File

@@ -1,7 +1,9 @@
[strict_crc32]
--innodb-checksum-algorithm=strict_crc32
--innodb-use-atomic-writes=0
--innodb-undo-tablespaces=0
[strict_full_crc32]
--innodb-checksum-algorithm=strict_full_crc32
--innodb-use-atomic-writes=0
--innodb-undo-tablespaces=0

View File

@@ -42,10 +42,17 @@ commit work;
SET GLOBAL innodb_fast_shutdown = 0;
let $shutdown_timeout=;
--source include/restart_mysqld.inc
SET GLOBAL innodb_max_dirty_pages_pct_lwm=0,innodb_max_dirty_pages_pct=0;
let $wait_condition =
SELECT variable_value = 0
FROM information_schema.global_status
WHERE variable_name = 'INNODB_BUFFER_POOL_PAGES_DIRTY';
--source include/wait_condition.inc
SET GLOBAL innodb_max_dirty_pages_pct=99;
--source ../include/no_checkpoint_start.inc
connect (dml,localhost,root,,);
XA START 'x';
insert into t1 values (6, repeat('%', @@innodb_page_size/2));
insert into t1 values(6, repeat('%', @@innodb_page_size/2));
XA END 'x';
XA PREPARE 'x';
disconnect dml;
@@ -53,10 +60,12 @@ connection default;
flush table t1 for export;
let $restart_parameters=;
--let CLEANUP_IF_CHECKPOINT=XA COMMIT 'x';drop table t1;
--let CLEANUP_IF_CHECKPOINT=drop table t1, unexpected_checkpoint;
--source ../include/no_checkpoint_end.inc
--copy_file $MYSQLD_DATADIR/ibdata1 $MYSQLD_DATADIR/ibdata1.bak
--copy_file $MYSQLD_DATADIR/ib_logfile0 $MYSQLD_DATADIR/ib_logfile0.bak
perl;
use IO::Handle;
do "$ENV{MTR_SUITE_DIR}/include/crc32.pl";
@@ -145,6 +154,12 @@ let $shutdown_timeout=0;
--source include/shutdown_mysqld.inc
let $shutdown_timeout=;
# Corrupt the file in a better way.
--remove_file $MYSQLD_DATADIR/ibdata1
--remove_file $MYSQLD_DATADIR/ib_logfile0
--move_file $MYSQLD_DATADIR/ibdata1.bak $MYSQLD_DATADIR/ibdata1
--move_file $MYSQLD_DATADIR/ib_logfile0.bak $MYSQLD_DATADIR/ib_logfile0
perl;
use IO::Handle;
my $fname= "$ENV{'MYSQLD_DATADIR'}test/t1.ibd";
@@ -157,22 +172,23 @@ syswrite(FILE, chr(0) x ($page_size/2));
close FILE;
EOF
--source include/start_mysqld.inc
XA ROLLBACK 'x';
check table t1;
select f1, f2 from t1;
SET GLOBAL innodb_max_dirty_pages_pct_lwm=0,innodb_max_dirty_pages_pct=0;
let $wait_condition =
SELECT variable_value = 0
FROM information_schema.global_status
WHERE variable_name = 'INNODB_BUFFER_POOL_PAGES_DIRTY';
--source include/wait_condition.inc
SET GLOBAL innodb_max_dirty_pages_pct=99;
--source ../include/no_checkpoint_start.inc
connect (dml,localhost,root,,);
XA START 'x';
insert into t1 values (6, repeat('%', @@innodb_page_size/2));
XA END 'x';
XA PREPARE 'x';
disconnect dml;
connection default;
XA ROLLBACK 'x';
FLUSH TABLE t1 FOR EXPORT;
flush table t1 for export;
let $restart_parameters=;
# If we are skipping the test at this point due to an unexpected
# checkpoint, we will already have tested a part of this functionality.
--let CLEANUP_IF_CHECKPOINT=drop table t1;
--source ../include/no_checkpoint_end.inc
# Zero out the first page in file and try to recover from dblwr
@@ -186,7 +202,6 @@ EOF
--source include/start_mysqld.inc
let SEARCH_PATTERN=InnoDB: Recovered page \\[page id: space=[1-9][0-9]*, page number=[03]\\];
--source include/search_pattern_in_file.inc
XA ROLLBACK 'x';
check table t1;
select f1, f2 from t1;
drop table t1;

View File

@@ -5,11 +5,26 @@ CREATE TABLE `t`(`id` INT, PRIMARY KEY(`id`)) ENGINE=InnoDB STATS_PERSISTENT=0;
INSERT INTO t VALUES (1);
SET GLOBAL innodb_monitor_reset = "module_innodb";
SET GLOBAL innodb_monitor_disable="lock_row_lock_time";
SET GLOBAL innodb_monitor_disable="lock_row_lock_time_max";
SET GLOBAL innodb_monitor_reset_all='lock_row_lock_time';
SET GLOBAL innodb_monitor_reset_all='lock_row_lock_time_max';
SET GLOBAL innodb_monitor_enable="lock_row_lock_time";
SET GLOBAL innodb_monitor_enable="lock_row_lock_time_max";
BEGIN;
SELECT * FROM t FOR UPDATE;
# We can't predict (innodb/lock)_row_lock_time_avg value, because it's counted
# as the whole waiting time divided by the amount of waits. The
# corresponding counters in lock_sys can't be reset with any query.
--disable_result_log
SELECT @innodb_row_lock_time_before := variable_value
FROM information_schema.global_status
WHERE LOWER(variable_name) = 'innodb_row_lock_time';
--enable_result_log
--connect(con1,localhost,root,,)
SET innodb_lock_wait_timeout = 1;
--error ER_LOCK_WAIT_TIMEOUT
@@ -19,24 +34,28 @@ SELECT * FROM t FOR UPDATE;
--connection default
COMMIT;
SELECT variable_value > 100 FROM information_schema.global_status
SELECT variable_value - @innodb_row_lock_time_before > 100
FROM information_schema.global_status
WHERE LOWER(variable_name) = 'innodb_row_lock_time';
SELECT variable_value > 100 FROM information_schema.global_status
# We can't use 'variable_value - @innodb_row_lock_time_max_before' trick for
# innodb_row_lock_time_max, because we can't reset it, and we don't know the
# initial value at the moment of the test execution.
SELECT variable_value > 100
FROM information_schema.global_status
WHERE LOWER(variable_name) = 'innodb_row_lock_time_max';
SELECT variable_value > 100 FROM information_schema.global_status
WHERE LOWER(variable_name) = 'innodb_row_lock_time_avg';
SELECT count_reset > 100 FROM INFORMATION_SCHEMA.INNODB_METRICS
WHERE NAME="lock_row_lock_time";
SELECT count_reset > 100 FROM INFORMATION_SCHEMA.INNODB_METRICS
WHERE NAME="lock_row_lock_time_max";
SELECT count_reset > 100 FROM INFORMATION_SCHEMA.INNODB_METRICS
WHERE NAME="lock_row_lock_time_avg";
SELECT count_reset > 100
FROM INFORMATION_SCHEMA.INNODB_METRICS
WHERE NAME='lock_row_lock_time';
SELECT count_reset > 100
FROM INFORMATION_SCHEMA.INNODB_METRICS
WHERE NAME='lock_row_lock_time_max';
DROP TABLE t;
--disable_warnings
SET GLOBAL innodb_monitor_reset=default;
SET GLOBAL innodb_monitor_enable=default;
SET GLOBAL innodb_monitor_disable=default;
SET GLOBAL innodb_monitor_reset_all=default;
--enable_warnings
--source include/wait_until_count_sessions.inc

View File

@@ -17,9 +17,7 @@ CREATE TABLE t (a INT, PRIMARY KEY (a)) ENGINE=INNODB;
-- eval $check_stats1
-- eval $check_stats2
# open and close the table
SELECT * FROM t;
FLUSH TABLE t;
DELETE FROM mysql.innodb_index_stats WHERE table_name = 't';
DELETE FROM mysql.innodb_table_stats WHERE table_name = 't';
@@ -27,7 +25,8 @@ DELETE FROM mysql.innodb_table_stats WHERE table_name = 't';
-- eval $check_stats1
-- eval $check_stats2
# open the table, causing stats recalc/save
# rename and open the table, causing stats recalc/save
RENAME TABLE t TO tmp, tmp TO t;
SELECT * FROM t;
-- eval $check_stats1
@@ -43,9 +42,7 @@ CREATE TABLE t (a INT, PRIMARY KEY (a)) ENGINE=INNODB STATS_AUTO_RECALC=1;
-- eval $check_stats1
-- eval $check_stats2
# open and close the table
SELECT * FROM t;
FLUSH TABLE t;
DELETE FROM mysql.innodb_index_stats WHERE table_name = 't';
DELETE FROM mysql.innodb_table_stats WHERE table_name = 't';
@@ -53,7 +50,7 @@ DELETE FROM mysql.innodb_table_stats WHERE table_name = 't';
-- eval $check_stats1
-- eval $check_stats2
# open the table, causing stats recalc/save
RENAME TABLE t TO tmp, tmp TO t;
SELECT * FROM t;
-- eval $check_stats1
@@ -69,9 +66,7 @@ CREATE TABLE t (a INT, PRIMARY KEY (a)) ENGINE=INNODB STATS_AUTO_RECALC=0;
-- eval $check_stats1
-- eval $check_stats2
# open and close the table
SELECT * FROM t;
FLUSH TABLE t;
DELETE FROM mysql.innodb_index_stats WHERE table_name = 't';
DELETE FROM mysql.innodb_table_stats WHERE table_name = 't';
@@ -79,7 +74,8 @@ DELETE FROM mysql.innodb_table_stats WHERE table_name = 't';
-- eval $check_stats1
-- eval $check_stats2
# open the table, stats should not be present, since autorecalc is disabled
# rename the table, stats should not be present, since autorecalc is disabled
RENAME TABLE t TO tmp, tmp TO t;
SELECT * FROM t;
-- eval $check_stats1

View File

@@ -69,7 +69,7 @@ table_name = 'test_ps_fetch' AND
index_name = 'idx' AND
stat_name = 'n_diff_pfx02';
FLUSH TABLE test_ps_fetch;
RENAME TABLE test_ps_fetch TO tmp, tmp TO test_ps_fetch;
SELECT seq_in_index, column_name, cardinality
FROM information_schema.statistics WHERE table_name = 'test_ps_fetch'

View File

@@ -5,6 +5,9 @@ id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY,
title VARCHAR(200),
content TEXT
) ENGINE= InnoDB;
SET STATEMENT debug_dbug='+d,innodb_report_deadlock' FOR
CREATE FULLTEXT INDEX idx ON articles (title, content);
ERROR HY000: Got error 11 "Resource temporarily unavailable" from storage engine InnoDB
CREATE FULLTEXT INDEX idx ON articles (title, content);
INSERT INTO articles (title, content) VALUES
('MySQL Tutorial','DBMS stands for MySQL DataBase ...'),

View File

@@ -3,6 +3,9 @@
-- source include/have_innodb.inc
-- source include/have_debug.inc
--disable_query_log
call mtr.add_suppression("InnoDB: \\(Deadlock\\) writing `use_stopword'");
--enable_query_log
SET @optimize=@@GLOBAL.INNODB_OPTIMIZE_FULLTEXT_ONLY;
SET GLOBAL INNODB_OPTIMIZE_FULLTEXT_ONLY=1;
@@ -14,6 +17,9 @@ CREATE TABLE articles (
content TEXT
) ENGINE= InnoDB;
--error ER_GET_ERRNO
SET STATEMENT debug_dbug='+d,innodb_report_deadlock' FOR
CREATE FULLTEXT INDEX idx ON articles (title, content);
CREATE FULLTEXT INDEX idx ON articles (title, content);
INSERT INTO articles (title, content) VALUES

View File

@@ -1,10 +1,10 @@
SELECT name, type, processlist_user, processlist_host, processlist_db,
processlist_command, processlist_time, processlist_state, processlist_info,
processlist_command, processlist_time, processlist_info,
parent_thread_id, role, instrumented
FROM performance_schema.threads
WHERE name LIKE 'thread/innodb/%'
GROUP BY name;
name type processlist_user processlist_host processlist_db processlist_command processlist_time processlist_state processlist_info parent_thread_id role instrumented
thread/innodb/page_cleaner_thread BACKGROUND NULL NULL NULL NULL NULL NULL NULL NULL NULL YES
thread/innodb/page_encrypt_thread BACKGROUND NULL NULL NULL NULL NULL NULL NULL NULL NULL YES
thread/innodb/thread_pool_thread BACKGROUND NULL NULL NULL NULL NULL NULL NULL NULL NULL YES
name type processlist_user processlist_host processlist_db processlist_command processlist_time processlist_info parent_thread_id role instrumented
thread/innodb/page_cleaner_thread BACKGROUND NULL NULL NULL NULL NULL NULL NULL NULL YES
thread/innodb/page_encrypt_thread BACKGROUND NULL NULL NULL NULL NULL NULL NULL NULL YES
thread/innodb/thread_pool_thread BACKGROUND NULL NULL NULL NULL NULL NULL NULL NULL YES

View File

@@ -14,7 +14,7 @@
# We suppress here duplicates rows with the goal to avoid that the test fails
# in case some defaults are changed.
SELECT name, type, processlist_user, processlist_host, processlist_db,
processlist_command, processlist_time, processlist_state, processlist_info,
processlist_command, processlist_time, processlist_info,
parent_thread_id, role, instrumented
FROM performance_schema.threads
WHERE name LIKE 'thread/innodb/%'

View File

@@ -1,16 +1,15 @@
***MDEV-5914: Parallel replication deadlock due to InnoDB lock conflicts ***
include/master-slave.inc
[connection master]
connection server_2;
SET sql_log_bin=0;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CALL mtr.add_suppression("InnoDB: Transaction was aborted due to ");
CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends");
SET sql_log_bin=1;
connection server_2;
SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads;
include/stop_slave.inc
SET GLOBAL slave_parallel_threads=10;
CHANGE MASTER TO master_use_gtid=slave_pos;
connection server_1;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB;
INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6);
connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,;

View File

@@ -0,0 +1,26 @@
include/master-slave.inc
[connection master]
connection master;
create table t1 (a int primary key, b int) engine=innodb;
insert t1 values (1,1),(3,3),(5,5),(7,7);
create table t2 (m int) engine=aria;
# Create multi-engine, two-phase XA transaction (T1)
xa start '1';
insert t2 values (1);
update t1 set b=50 where b=5;
xa end '1';
xa prepare '1';
# Create T2
connection server_1;
update t1 set b=10 where a=5;
connection master;
xa commit '1';
connection server_1;
include/save_master_gtid.inc
# This would hang prior to MDEV-21117
connection slave;
include/sync_with_master_gtid.inc
connection master;
drop table t1, t2;
include/rpl_end.inc
# End of rpl_xa_2pc_multi_engine.test

View File

@@ -5,21 +5,19 @@
--source include/have_debug_sync.inc
--source include/master-slave.inc
--disable_query_log
call mtr.add_suppression("InnoDB: Transaction was aborted due to ");
--enable_query_log
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CALL mtr.add_suppression("InnoDB: Transaction was aborted due to ");
CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends");
--save_master_pos
--connection server_2
SET sql_log_bin=0;
CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends");
SET sql_log_bin=1;
--sync_with_master
SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads;
--source include/stop_slave.inc
SET GLOBAL slave_parallel_threads=10;
CHANGE MASTER TO master_use_gtid=slave_pos;
--connection server_1
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB;
INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6);
--connect (con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,)

View File

@@ -0,0 +1,63 @@
#
# This test ensures binlog order is correct for multi-engine, two-phase XA
# transactions. MDEV-26652 exposed a race condition which would allow
# concurrent transactions which modify the same table record to binlog in
# the "opposite" order, i.e. what _should_ be:
# T1 XA PREPARE
# T1 XA COMMIT
# T2
#
# was binlogged as
# T1 XA PREPARE
# T2
# T1 XA COMMIT
#
# which would break replication.
#
# Note that the actual fix for this issue was done with MDEV-21117.
#
# References:
# MDEV-26652: xa transactions binlogged in wrong order
# MDEV-21117: refine the server binlog-based recovery for semisync
#
source include/have_binlog_format_row.inc;
source include/have_innodb.inc;
source include/master-slave.inc;
--connection master
create table t1 (a int primary key, b int) engine=innodb;
insert t1 values (1,1),(3,3),(5,5),(7,7);
create table t2 (m int) engine=aria;
--echo # Create multi-engine, two-phase XA transaction (T1)
xa start '1';
insert t2 values (1);
update t1 set b=50 where b=5;
xa end '1';
# Aria doesn't support XA PREPARE, so disable warnings
--disable_warnings
xa prepare '1';
--enable_warnings
--echo # Create T2
--connection server_1
--send update t1 set b=10 where a=5
--connection master
xa commit '1';
--connection server_1
--reap
--source include/save_master_gtid.inc
--echo # This would hang prior to MDEV-21117
--connection slave
--source include/sync_with_master_gtid.inc
--connection master
drop table t1, t2;
--source include/rpl_end.inc
--echo # End of rpl_xa_2pc_multi_engine.test

View File

@@ -1487,10 +1487,10 @@ VARIABLE_NAME INNODB_STATS_PERSISTENT_SAMPLE_PAGES
SESSION_VALUE NULL
DEFAULT_VALUE 20
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The number of leaf index pages to sample when calculating persistent statistics (by ANALYZE, default 20)
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 18446744073709551615
NUMERIC_MAX_VALUE 4294967295
NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -1511,10 +1511,10 @@ VARIABLE_NAME INNODB_STATS_TRANSIENT_SAMPLE_PAGES
SESSION_VALUE NULL
DEFAULT_VALUE 8
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The number of leaf index pages to sample when calculating transient statistics (if persistent statistics are not used, default 8)
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 18446744073709551615
NUMERIC_MAX_VALUE 4294967295
NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL
READ_ONLY NO

View File

@@ -0,0 +1,15 @@
#
# wsrep_replicate_myisam
#
# save the initial value
SET @wsrep_mode_saved = @@global.wsrep_mode;
# scope and valid values
SET @@global.wsrep_mode=REPLICATE_MYISAM;
SELECT @@global.wsrep_mode;
@@global.wsrep_mode
REPLICATE_MYISAM
# restore the initial value
SET @@global.wsrep_mode = @wsrep_mode_saved;
# End of test

View File

@@ -0,0 +1,19 @@
--source include/have_wsrep.inc
--echo #
--echo # wsrep_replicate_myisam
--echo #
--echo # save the initial value
SET @wsrep_mode_saved = @@global.wsrep_mode;
--echo
--echo # scope and valid values
SET @@global.wsrep_mode=REPLICATE_MYISAM;
SELECT @@global.wsrep_mode;
--echo
--echo # restore the initial value
SET @@global.wsrep_mode = @wsrep_mode_saved;
--echo # End of test

View File

@@ -0,0 +1,7 @@
!include ../my.cnf
[mysqld.1]
wsrep-on=ON
wsrep-cluster-address=gcomm://
wsrep-provider=@ENV.WSREP_PROVIDER
binlog-format=ROW

View File

@@ -1,4 +1,7 @@
--source include/have_wsrep.inc
--source include/have_innodb.inc
--source include/have_wsrep_provider.inc
--source include/have_binlog_format_row.inc
--echo #
--echo # wsrep_forced_binlog_format

View File

@@ -3762,10 +3762,11 @@ static void free_block(SIMPLE_KEY_CACHE_CB *keycache, BLOCK_LINK *block)
static int cmp_sec_link(const void *_a, const void *_b)
{
BLOCK_LINK *const *a= _a;
BLOCK_LINK *const *b= _b;
return (((*a)->hash_link->diskpos < (*b)->hash_link->diskpos) ? -1 :
((*a)->hash_link->diskpos > (*b)->hash_link->diskpos) ? 1 : 0);
const BLOCK_LINK *a= *(const BLOCK_LINK **)_a;
const BLOCK_LINK *b= *(const BLOCK_LINK **)_b;
return (a->hash_link->diskpos < b->hash_link->diskpos) ? -1 :
(a->hash_link->diskpos > b->hash_link->diskpos) ? 1 : 0;
}

View File

@@ -91,6 +91,15 @@ String *Item_func_geometry_from_wkb::val_str(String *str)
{
String *str_ret= args[0]->val_str(str);
null_value= args[0]->null_value;
if (!null_value && arg_count == 2 && !args[1]->null_value) {
srid= (uint32)args[1]->val_int();
if (str->copy(*str_ret))
return 0;
int4store(str->ptr(), srid);
return str;
}
return str_ret;
}

View File

@@ -8760,6 +8760,11 @@ SEL_TREE *Item_cond::get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr)
SEL_TREE *tree= li.ref()[0]->get_mm_tree(param, li.ref());
if (param->statement_should_be_aborted())
DBUG_RETURN(NULL);
bool orig_disable_index_merge= param->disable_index_merge_plans;
if (list.elements > MAX_OR_ELEMENTS_FOR_INDEX_MERGE)
param->disable_index_merge_plans= true;
if (tree)
{
if (tree->type == SEL_TREE::IMPOSSIBLE &&
@@ -8776,7 +8781,10 @@ SEL_TREE *Item_cond::get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr)
{
SEL_TREE *new_tree= li.ref()[0]->get_mm_tree(param, li.ref());
if (new_tree == NULL || param->statement_should_be_aborted())
{
param->disable_index_merge_plans= orig_disable_index_merge;
DBUG_RETURN(NULL);
}
tree= tree_or(param, tree, new_tree);
if (tree == NULL || tree->type == SEL_TREE::ALWAYS)
{
@@ -8808,6 +8816,7 @@ SEL_TREE *Item_cond::get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr)
if (replace_cond)
*cond_ptr= replacement_item;
}
param->disable_index_merge_plans= orig_disable_index_merge;
DBUG_RETURN(tree);
}
@@ -10199,6 +10208,8 @@ tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
{
bool must_be_ored= sel_trees_must_be_ored(param, tree1, tree2, ored_keys);
no_imerge_from_ranges= must_be_ored;
if (param->disable_index_merge_plans)
no_imerge_from_ranges= true;
if (no_imerge_from_ranges && no_merges1 && no_merges2)
{

View File

@@ -39,6 +39,32 @@
class JOIN;
class Item_sum;
/*
When processing an OR clause with more than MAX_OR_ELEMENTS_FOR_INDEX_MERGE
disjuncts (i.e. OR-parts), do not construct index_merge plans from it.
Some users have OR clauses with extremely large number of disjuncts, like:
(key1=1 AND key2=10) OR
(key1=2 AND key2=20) OR
(key1=3 AND key2=30) OR
...
When processing this, the optimizer would try to build a lot of potential
index_merge plans. Hypothetically this could be useful as the cheapest plan
could be to pick a specific index for each disjunct and build:
index_merge(key1 IN (1,3,8,15...), key2 IN (20, 40, 50 ...))
In practice this causes combinatorial amount of time to be spent in the range
analyzer, and most variants will be discarded when the range optimizer tries
to avoid this combinatorial explosion (which may or may not work depending on
the form of the WHERE clause).
In practice, very long ORs are served well enough by just considering range
accesses on individual indexes.
*/
const int MAX_OR_ELEMENTS_FOR_INDEX_MERGE=100;
struct KEY_PART {
uint16 key,part;
/* See KEY_PART_INFO for meaning of the next two: */
@@ -891,6 +917,9 @@ public:
*/
bool remove_false_where_parts;
/* If TRUE, do not construct index_merge plans */
bool disable_index_merge_plans;
/*
Which functions should give SQL notes for unusable keys.
*/

View File

@@ -137,6 +137,7 @@ public:
handlerton **ha,
bool tmp_table);
bool is_set() { return m_storage_engine_name.str != NULL; }
const LEX_CSTRING *name() const { return &m_storage_engine_name; }
};

View File

@@ -537,36 +537,53 @@ static bool write_db_opt(THD *thd, const char *path,
DESCRIPTION
create->default_table_charset is guaranteed to be alway set
Required by some callers
RETURN VALUES
0 File found
1 No database file or could not open it
-1 No database file (file was not found or 'empty' file was cached)
1 Could not open it
*/
bool load_db_opt(THD *thd, const char *path, Schema_specification_st *create)
int load_db_opt(THD *thd, const char *path, Schema_specification_st *create)
{
File file;
char buf[256+DATABASE_COMMENT_MAXLEN];
DBUG_ENTER("load_db_opt");
bool error=1;
int error= 0;
size_t nbytes;
myf utf8_flag= thd->get_utf8_flag();
bzero((char*) create,sizeof(*create));
create->default_table_charset= thd->variables.collation_server;
/* Check if options for this database are already in the hash */
if (!get_dbopt(thd, path, create))
DBUG_RETURN(0);
{
if (!create->default_table_charset)
error= -1; // db.opt did not exists
goto err1;
}
/* Otherwise, load options from the .opt file */
if ((file= mysql_file_open(key_file_dbopt,
path, O_RDONLY | O_SHARE, MYF(0))) < 0)
{
/*
Create an empty entry, to avoid doing an extra file open for every create
table.
*/
put_dbopt(path, create);
error= -1;
goto err1;
}
IO_CACHE cache;
if (init_io_cache(&cache, file, IO_SIZE, READ_CACHE, 0, 0, MYF(0)))
goto err2;
{
error= 1;
goto err2; // Not cached
}
while ((int) (nbytes= my_b_gets(&cache, (char*) buf, sizeof(buf))) > 0)
{
@@ -622,10 +639,11 @@ bool load_db_opt(THD *thd, const char *path, Schema_specification_st *create)
err2:
mysql_file_close(file, MYF(0));
err1:
if (!create->default_table_charset) // In case of error
create->default_table_charset= thd->variables.collation_server;
DBUG_RETURN(error);
}
/*
Retrieve database options by name. Load database options file or fetch from
cache.
@@ -652,11 +670,12 @@ err1:
db_create_info right after that.
RETURN VALUES (read NOTE!)
FALSE Success
TRUE Failed to retrieve options
0 File found
-1 No database file (file was not found or 'empty' file was cached)
1 Could not open it
*/
bool load_db_opt_by_name(THD *thd, const char *db_name,
int load_db_opt_by_name(THD *thd, const char *db_name,
Schema_specification_st *db_create_info)
{
char db_opt_path[FN_REFLEN + 1];

View File

@@ -37,8 +37,8 @@ bool mysql_opt_change_db(THD *thd,
bool my_dboptions_cache_init(void);
void my_dboptions_cache_free(void);
bool check_db_dir_existence(const char *db_name);
bool load_db_opt(THD *thd, const char *path, Schema_specification_st *create);
bool load_db_opt_by_name(THD *thd, const char *db_name,
int load_db_opt(THD *thd, const char *path, Schema_specification_st *create);
int load_db_opt_by_name(THD *thd, const char *db_name,
Schema_specification_st *db_create_info);
CHARSET_INFO *get_default_db_collation(THD *thd, const char *db_name);
bool my_dbopt_init(void);

View File

@@ -3652,7 +3652,14 @@ bool JOIN::add_fields_for_current_rowid(JOIN_TAB *cur, List<Item> *table_fields)
continue;
Item *item= new (thd->mem_root) Item_temptable_rowid(tab->table);
item->fix_fields(thd, 0);
table_fields->push_back(item, thd->mem_root);
/*
table_fields points to JOIN::all_fields or JOIN::tmp_all_fields_*.
These lists start with "added" fields and then their suffix is shared
with JOIN::fields_list or JOIN::tmp_fields_list*.
Because of that, new elements can only be added to the front of the list,
not to the back.
*/
table_fields->push_front(item, thd->mem_root);
cur->tmp_table_param->func_count++;
}
return 0;
@@ -15781,7 +15788,7 @@ uint check_join_cache_usage(JOIN_TAB *tab,
}
goto no_join_cache;
}
if (cache_level > 4 && no_bka_cache)
if (cache_level < 5 || no_bka_cache)
goto no_join_cache;
if ((flags & HA_MRR_NO_ASSOCIATION) &&

View File

@@ -1447,7 +1447,14 @@ bool mysqld_show_create_db(THD *thd, LEX_CSTRING *dbname,
DBUG_RETURN(TRUE);
}
load_db_opt_by_name(thd, dbname->str, &create);
if (load_db_opt_by_name(thd, dbname->str, &create) < 0)
{
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_UNKNOWN_ERROR,
"Database '%.192s' does not have a db.opt file. "
"You can create one with ALTER DATABASE if needed",
dbname->str);
}
}
mysqld_show_create_db_get_fields(thd, &field_list);
@@ -2969,25 +2976,27 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
while (thread_info *thd_info= arg.thread_infos.get())
{
const char *str;
ulonglong start_time;
CSET_STRING query;
protocol->prepare_for_resend();
protocol->store(thd_info->thread_id);
protocol->store(thd_info->user, strlen(thd_info->user), system_charset_info);
protocol->store(thd_info->host, strlen(thd_info->host), system_charset_info);
protocol->store_string_or_null(thd_info->db, system_charset_info);
if (thd_info->proc_info)
protocol->store(thd_info->proc_info, strlen(thd_info->proc_info),
system_charset_info);
if ((str= thd_info->proc_info))
protocol->store(str, strlen(str), system_charset_info);
else
protocol->store(&command_name[thd_info->command], system_charset_info);
if (thd_info->start_time && now > thd_info->start_time)
protocol->store_long((now - thd_info->start_time) / HRTIME_RESOLUTION);
if ((start_time= thd_info->start_time) && now > start_time)
protocol->store_long((now - start_time) / HRTIME_RESOLUTION);
else
protocol->store_null();
protocol->store_string_or_null(thd_info->state_info, system_charset_info);
if (thd_info->query_string.length())
protocol->store(thd_info->query_string.str(),
thd_info->query_string.length(),
thd_info->query_string.charset());
query= thd_info->query_string;
if (query.length() && query.str())
protocol->store(query.str(), query.length(), query.charset());
else
protocol->store_null();
if (!(thd->variables.old_behavior & OLD_MODE_NO_PROGRESS_INFO))

View File

@@ -2076,15 +2076,12 @@ public:
}
for (i= 0, state= calc_state; i < prefixes; i++, state++)
{
if (i < prefixes)
{
double val= state->prefix_count == 0 ?
0 : (double) state->entry_count / state->prefix_count;
index_info->collected_stats->set_avg_frequency(i, val);
}
}
}
};

View File

@@ -1582,12 +1582,19 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables,
else
{
#ifdef WITH_WSREP
if (WSREP(thd) && hton && !wsrep_should_replicate_ddl(thd, hton))
if (WSREP(thd) && hton)
{
handlerton *ht= hton;
// For partitioned tables resolve underlying handlerton
if (table->table && table->table->file->partition_ht())
ht= table->table->file->partition_ht();
if (!wsrep_should_replicate_ddl(thd, ht))
{
error= 1;
goto err;
}
#endif
}
#endif /* WITH_WSREP */
if (thd->locked_tables_mode == LTM_LOCK_TABLES ||
thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES)
@@ -1858,18 +1865,6 @@ err:
if (non_temp_tables_count)
query_cache_invalidate3(thd, tables, 0);
/*
We are always logging drop of temporary tables.
The reason is to handle the following case:
- Use statement based replication
- CREATE TEMPORARY TABLE foo (logged)
- set row based replication
- DROP TEMPORARY TABLE foo (needs to be logged)
This should be fixed so that we remember if creation of the
temporary table was logged and only log it if the creation was
logged.
*/
if (non_trans_tmp_table_deleted ||
trans_tmp_table_deleted || non_tmp_table_deleted)
{
@@ -4987,11 +4982,28 @@ bool wsrep_check_sequence(THD* thd,
// In Galera cluster we support only InnoDB sequences
if (db_type != DB_TYPE_INNODB)
{
// Currently any dynamic storage engine is not possible to identify
// using DB_TYPE_XXXX and ENGINE=SEQUENCE is one of them.
// Therefore, we get storage engine name from lex.
const LEX_CSTRING *tb_name= thd->lex->m_sql_cmd->option_storage_engine_name()->name();
// (1) CREATE TABLE ... ENGINE=SEQUENCE OR
// (2) ALTER TABLE ... ENGINE= OR
// Note in ALTER TABLE table->s->sequence != nullptr
// (3) CREATE SEQUENCE ... ENGINE=
if ((thd->lex->sql_command == SQLCOM_CREATE_TABLE &&
lex_string_eq(tb_name, STRING_WITH_LEN("SEQUENCE"))) ||
(thd->lex->sql_command == SQLCOM_ALTER_TABLE) ||
(thd->lex->sql_command == SQLCOM_CREATE_SEQUENCE))
{
my_error(ER_NOT_SUPPORTED_YET, MYF(0),
"non-InnoDB sequences in Galera cluster");
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_NOT_SUPPORTED_YET,
"ENGINE=%s not supported by Galera", tb_name->str);
return(true);
}
}
// In Galera cluster it is best to use INCREMENT BY 0 with CACHE
// or NOCACHE
@@ -10676,10 +10688,21 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db,
if (WSREP(thd) && table &&
(thd->lex->sql_command == SQLCOM_ALTER_TABLE ||
thd->lex->sql_command == SQLCOM_CREATE_INDEX ||
thd->lex->sql_command == SQLCOM_DROP_INDEX) &&
!wsrep_should_replicate_ddl(thd, table->s->db_type()))
thd->lex->sql_command == SQLCOM_DROP_INDEX))
{
handlerton *ht= table->s->db_type();
// If alter used ENGINE= we use that
if (create_info->used_fields & HA_CREATE_USED_ENGINE)
ht= create_info->db_type;
// For partitioned tables resolve underlying handlerton
else if (table->file->partition_ht())
ht= table->file->partition_ht();
if (!wsrep_should_replicate_ddl(thd, ht))
DBUG_RETURN(true);
#endif /* WITH_WSREP */
}
#endif
DEBUG_SYNC(thd, "alter_table_after_open_tables");

View File

@@ -622,7 +622,12 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
table= tables->table;
#ifdef WITH_WSREP
if (WSREP(thd) && !wsrep_should_replicate_ddl(thd, table->s->db_type()))
/* Resolve should we replicate creation of the trigger.
It should be replicated if storage engine(s) associated
to trigger are replicated by Galera.
*/
if (WSREP(thd) &&
!wsrep_should_replicate_ddl_iterate(thd, tables))
goto end;
#endif

View File

@@ -336,8 +336,15 @@ bool Sql_cmd_truncate_table::lock_table(THD *thd, TABLE_LIST *table_ref,
versioned= table->versioned();
hton= table->file->ht;
#ifdef WITH_WSREP
/* Resolve should we replicate truncate. It should
be replicated if storage engine(s) associated
are replicated by Galera. If this is partitioned
table we need to find out default partition
handlerton.
*/
if (WSREP(thd) &&
!wsrep_should_replicate_ddl(thd, hton))
!wsrep_should_replicate_ddl(thd, table->file->partition_ht() ?
table->file->partition_ht() : hton))
DBUG_RETURN(TRUE);
#endif
@@ -359,13 +366,23 @@ bool Sql_cmd_truncate_table::lock_table(THD *thd, TABLE_LIST *table_ref,
sequence= share->table_type == TABLE_TYPE_SEQUENCE;
hton= share->db_type();
#ifdef WITH_WSREP
if (WSREP(thd) &&
hton != view_pseudo_hton &&
!wsrep_should_replicate_ddl(thd, hton))
if (WSREP(thd) && hton != view_pseudo_hton)
{
/* Resolve should we replicate truncate. It should
be replicated if storage engine(s) associated
are replicated by Galera. If this is partitioned
table we need to find out default partition
handlerton.
*/
const handlerton *ht= share->default_part_plugin ?
plugin_hton(share->default_part_plugin) : hton;
if (ht && !wsrep_should_replicate_ddl(thd, ht))
{
tdc_release_share(share);
DBUG_RETURN(TRUE);
}
}
#endif
if (!versioned)

View File

@@ -452,8 +452,6 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
lex->link_first_table_back(view, link_to_local);
view->open_type= OT_BASE_ONLY;
WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL);
/*
ignore lock specs for CREATE statement
*/
@@ -471,13 +469,20 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
}
#ifdef WITH_WSREP
if(!wsrep_should_replicate_ddl_iterate(thd, static_cast<const TABLE_LIST *>(tables)))
/* Resolve should we replicate creation of the view.
It should be replicated if storage engine(s) associated
to view are replicated by Galera.
*/
if (WSREP(thd) &&
!wsrep_should_replicate_ddl_iterate(thd, tables))
{
res= TRUE;
goto err_no_relink;
}
#endif
WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL);
view= lex->unlink_first_table(&link_to_local);
if (check_db_dir_existence(view->db.str))

View File

@@ -6394,7 +6394,9 @@ static const char *wsrep_binlog_format_names[]=
static Sys_var_enum Sys_wsrep_forced_binlog_format(
"wsrep_forced_binlog_format", "binlog format to take effect over user's choice",
GLOBAL_VAR(wsrep_forced_binlog_format), CMD_LINE(REQUIRED_ARG),
wsrep_binlog_format_names, DEFAULT(BINLOG_FORMAT_UNSPEC));
wsrep_binlog_format_names, DEFAULT(BINLOG_FORMAT_UNSPEC),
NO_MUTEX_GUARD, NOT_IN_BINLOG,
ON_CHECK(wsrep_forced_binlog_format_check));
static Sys_var_mybool Sys_wsrep_recover_datadir(
"wsrep_recover", "Recover database state after crash and exit",

View File

@@ -1,5 +1,5 @@
/* Copyright (c) 2008, 2023 Codership Oy <http://www.codership.com>
Copyright (c) 2020, 2022, MariaDB
/* Copyright (c) 2008, 2024, Codership Oy <http://www.codership.com>
Copyright (c) 2020, 2024, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1608,7 +1608,12 @@ bool wsrep_sync_wait (THD* thd, uint mask)
This allows autocommit SELECTs and a first SELECT after SET AUTOCOMMIT=0
TODO: modify to check if thd has locked any rows.
*/
return thd->wsrep_cs().sync_wait(-1);
if (thd->wsrep_cs().sync_wait(-1))
{
wsrep_override_error(thd, thd->wsrep_cs().current_error(),
thd->wsrep_cs().current_error_status());
return true;
}
}
return false;
@@ -2492,19 +2497,12 @@ static int wsrep_drop_table_query(THD* thd, uchar** buf, size_t* buf_len)
/* Forward declarations. */
int wsrep_create_trigger_query(THD *thd, uchar** buf, size_t* buf_len);
bool wsrep_should_replicate_ddl_iterate(THD* thd, const TABLE_LIST* table_list)
{
if (WSREP(thd))
{
for (const TABLE_LIST* it= table_list; it; it= it->next_global)
{
if (it->table &&
!wsrep_should_replicate_ddl(thd, it->table->s->db_type()))
return false;
}
}
return true;
}
/*! Should DDL be replicated by Galera
*
* @param thd thread handle
* @param hton real storage engine handlerton
*
* @retval true if we should replicate DDL, false if not */
bool wsrep_should_replicate_ddl(THD* thd, const handlerton *hton)
{
@@ -2514,6 +2512,8 @@ bool wsrep_should_replicate_ddl(THD* thd, const handlerton *hton)
if (!hton)
return true;
DBUG_ASSERT(hton != nullptr);
switch (hton->db_type)
{
case DB_TYPE_INNODB:
@@ -2525,6 +2525,11 @@ bool wsrep_should_replicate_ddl(THD* thd, const handlerton *hton)
else
WSREP_DEBUG("wsrep OSU failed for %s", wsrep_thd_query(thd));
break;
case DB_TYPE_PARTITION_DB:
/* In most cases this means we could not find out
table->file->partition_ht() */
return true;
break;
case DB_TYPE_ARIA:
if (wsrep_check_mode(WSREP_MODE_REPLICATE_ARIA))
return true;
@@ -2545,6 +2550,27 @@ bool wsrep_should_replicate_ddl(THD* thd, const handlerton *hton)
ha_resolve_storage_engine_name(hton));
return false;
}
bool wsrep_should_replicate_ddl_iterate(THD* thd, const TABLE_LIST* table_list)
{
for (const TABLE_LIST* it= table_list; it; it= it->next_global)
{
if (it->table)
{
/* If this is partitioned table we need to find out
implementing storage engine handlerton.
*/
const handlerton *ht= it->table->file->partition_ht() ?
it->table->file->partition_ht() :
it->table->s->db_type();
if (!wsrep_should_replicate_ddl(thd, ht))
return false;
}
}
return true;
}
/*
Decide if statement should run in TOI.
@@ -2653,7 +2679,6 @@ bool wsrep_can_run_in_toi(THD *thd, const char *db, const char *table,
if (create_info)
{
const handlerton *hton= create_info->db_type;
if (!hton)
hton= ha_default_handlerton(thd);
if (!wsrep_should_replicate_ddl(thd, hton))
@@ -3220,11 +3245,9 @@ void wsrep_to_isolation_end(THD *thd)
@param requestor_ctx The MDL context of the requestor
@param ticket MDL ticket for the requested lock
@param key The key of the object (data) being protected
@retval TRUE Lock request can be granted
@retval FALSE Lock request cannot be granted
*/
void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx,
const MDL_ticket *ticket,
const MDL_key *key)
@@ -3306,16 +3329,21 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx,
(granted_thd->system_thread != NON_SYSTEM_THREAD &&
granted_thd->mdl_context.has_explicit_locks()))
{
WSREP_DEBUG("BF thread waiting for FLUSH for %s",
wsrep_thd_query(request_thd));
THD_STAGE_INFO(request_thd, stage_waiting_ddl);
WSREP_DEBUG("BF thread waiting for %s",
granted_thd->lex->sql_command == SQLCOM_FLUSH ? "FLUSH" : "BACKUP");
ticket->wsrep_report(wsrep_debug);
if (granted_thd->current_backup_stage != BACKUP_FINISHED &&
wsrep_check_mode(WSREP_MODE_BF_MARIABACKUP))
{
wsrep_abort_thd(request_thd, granted_thd, 1);
}
}
else if (granted_thd->lex->sql_command == SQLCOM_LOCK_TABLES)
{
WSREP_DEBUG("BF thread waiting for LOCK TABLES");
ticket->wsrep_report(wsrep_debug);
}
else if (request_thd->lex->sql_command == SQLCOM_DROP_TABLE)
{
WSREP_DEBUG("DROP caused BF abort, conf %s for %s",

View File

@@ -356,7 +356,7 @@ int wsrep_to_isolation_begin(THD *thd, const char *db_, const char *table_,
const wsrep::key_array *fk_tables= nullptr,
const HA_CREATE_INFO* create_info= nullptr);
bool wsrep_should_replicate_ddl(THD* thd, const handlerton *db_type);
bool wsrep_should_replicate_ddl(THD* thd, const handlerton *hton);
bool wsrep_should_replicate_ddl_iterate(THD* thd, const TABLE_LIST* table_list);
void wsrep_to_isolation_end(THD *thd);
@@ -615,7 +615,6 @@ bool wsrep_table_list_has_non_temp_tables(THD *thd, TABLE_LIST *tables);
#define wsrep_thr_deinit() do {} while(0)
#define wsrep_init_globals() do {} while(0)
#define wsrep_create_appliers(X) do {} while(0)
#define wsrep_should_replicate_ddl(X,Y) (1)
#define wsrep_cluster_address_exists() (false)
#define WSREP_MYSQL_DB (0)
#define WSREP_TO_ISOLATION_BEGIN(db_, table_, table_list_) do { } while(0)

View File

@@ -1,4 +1,4 @@
/* Copyright 2008-2022 Codership Oy <http://www.codership.com>
/* Copyright 2008-2023 Codership Oy <http://www.codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -333,14 +333,12 @@ bool wsrep_start_position_check (sys_var *self, THD* thd, set_var* var)
var->save_result.string_value.length);
start_pos_buf[var->save_result.string_value.length]= 0;
WSREP_DEBUG("SST wsrep_start_position check for new position %s old %s",
start_pos_buf, wsrep_start_position);
// Verify the format.
if (wsrep_start_position_verify(start_pos_buf)) return true;
// Give error if position is updated when wsrep is not enabled or
// provider is not loaded.
if ((!WSREP_ON || !Wsrep_server_state::instance().is_provider_loaded())
@@ -978,6 +976,22 @@ bool wsrep_max_ws_size_update(sys_var *self, THD *thd, enum_var_type)
bool wsrep_mode_check(sys_var *self, THD* thd, set_var* var)
{
ulonglong new_wsrep_mode= var->save_result.ulonglong_value;
ulonglong old_wsrep_mode= wsrep_mode;
wsrep_mode= new_wsrep_mode;
if (wsrep_check_mode(WSREP_MODE_REPLICATE_MYISAM) ||
wsrep_check_mode(WSREP_MODE_REPLICATE_ARIA))
{
if (!(wsrep_forced_binlog_format == BINLOG_FORMAT_UNSPEC ||
wsrep_forced_binlog_format == BINLOG_FORMAT_ROW))
{
my_message(ER_WRONG_ARGUMENTS, "wsrep_mode=[REPLICATE_MYISAM|REPLICATE_ARIA] "
"can't be enabled if wsrep_forced_binlog != [NONE|ROW]", MYF(0));
wsrep_mode= old_wsrep_mode;
return true;
}
}
wsrep_mode= old_wsrep_mode;
return false;
}
@@ -1121,3 +1135,28 @@ bool wsrep_gtid_domain_id_update(sys_var* self, THD *thd, enum_var_type)
return false;
}
bool wsrep_forced_binlog_format_check(sys_var *self, THD* thd, set_var* var)
{
ulonglong new_forced_binlog_format= var->save_result.ulonglong_value;
if (!(new_forced_binlog_format == BINLOG_FORMAT_UNSPEC ||
new_forced_binlog_format == BINLOG_FORMAT_ROW))
{
if (wsrep_check_mode(WSREP_MODE_BINLOG_ROW_FORMAT_ONLY))
{
my_message(ER_WRONG_ARGUMENTS, "wsrep_forced_binlog_format=[MIXED|STATEMENT] can't be set "
"if wsrep_mode=BINLOG_ROW_FORMAT_ONLY", MYF(0));
return true;
}
if (wsrep_check_mode(WSREP_MODE_REPLICATE_MYISAM) ||
wsrep_check_mode(WSREP_MODE_REPLICATE_ARIA))
{
my_message(ER_WRONG_ARGUMENTS, "wsrep_forced_binlog_format=[MIXED|STATEMENT] can't be set "
"if wsrep_mode=[REPLICATE_MYISAM|REPLICATE_ARIA]", MYF(0));
return true;
}
}
return false;
}

View File

@@ -1,4 +1,4 @@
/* Copyright (C) 2013-2022 Codership Oy <info@codership.com>
/* Copyright (C) 2013-2023 Codership Oy <info@codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -109,6 +109,7 @@ extern bool wsrep_gtid_seq_no_check CHECK_ARGS;
extern bool wsrep_gtid_domain_id_update UPDATE_ARGS;
extern bool wsrep_mode_check CHECK_ARGS;
extern bool wsrep_forced_binlog_format_check CHECK_ARGS;
#else /* WITH_WSREP */
#define wsrep_provider_init(X)

View File

@@ -51,6 +51,7 @@ struct io_schemes_st
static const io_schemes_st federated_io_schemes[] =
{
{ "mysql", &instantiate_io_mysql },
{ "mariadb", &instantiate_io_mysql },
{ "null", instantiate_io_null } /* must be last element */
};

View File

@@ -222,7 +222,6 @@ SET(INNOBASE_SOURCES
include/dict0pagecompress.h
include/dict0pagecompress.inl
include/dict0stats.h
include/dict0stats.inl
include/dict0stats_bg.h
include/dict0types.h
include/dyn0buf.h

View File

@@ -1910,7 +1910,8 @@ inline void log_t::write_checkpoint(lsn_t end_lsn) noexcept
resize_flush_buf= nullptr;
resize_target= 0;
resize_lsn.store(0, std::memory_order_relaxed);
writer_update();
resize_initiator= nullptr;
writer_update(false);
}
log_resize_release();

View File

@@ -195,71 +195,6 @@ dict_tables_have_same_db(
return(FALSE);
}
/** Decrement the count of open handles */
void dict_table_close(dict_table_t *table)
{
if (table->get_ref_count() == 1 &&
dict_stats_is_persistent_enabled(table) &&
strchr(table->name.m_name, '/'))
{
/* It looks like we are closing the last handle. The user could
have executed FLUSH TABLES in order to have the statistics reloaded
from the InnoDB persistent statistics tables. We must acquire
exclusive dict_sys.latch to prevent a race condition with another
thread concurrently acquiring a handle on the table. */
dict_sys.lock(SRW_LOCK_CALL);
if (table->release())
{
table->stats_mutex_lock();
if (table->get_ref_count() == 0)
dict_stats_deinit(table);
table->stats_mutex_unlock();
}
dict_sys.unlock();
}
else
table->release();
}
/** Decrements the count of open handles of a table.
@param[in,out] table table
@param[in] dict_locked whether dict_sys.latch is being held
@param[in] thd thread to release MDL
@param[in] mdl metadata lock or NULL if the thread
is a foreground one. */
void
dict_table_close(
dict_table_t* table,
bool dict_locked,
THD* thd,
MDL_ticket* mdl)
{
if (!dict_locked)
dict_table_close(table);
else
{
if (table->release() && dict_stats_is_persistent_enabled(table) &&
strchr(table->name.m_name, '/'))
{
/* Force persistent stats re-read upon next open of the table so
that FLUSH TABLE can be used to forcibly fetch stats from disk if
they have been manually modified. */
table->stats_mutex_lock();
if (table->get_ref_count() == 0)
dict_stats_deinit(table);
table->stats_mutex_unlock();
}
ut_ad(dict_lru_validate());
ut_ad(dict_sys.find(table));
}
if (!thd || !mdl);
else if (MDL_context *mdl_context= static_cast<MDL_context*>
(thd_mdl_context(thd)))
mdl_context->release_lock(mdl);
}
/** Check if the table has a given (non_virtual) column.
@param[in] table table object
@param[in] col_name column name
@@ -586,6 +521,14 @@ dict_index_get_nth_field_pos(
return(ULINT_UNDEFINED);
}
void mdl_release(THD *thd, MDL_ticket *mdl) noexcept
{
if (!thd || !mdl);
else if (MDL_context *mdl_context= static_cast<MDL_context*>
(thd_mdl_context(thd)))
mdl_context->release_lock(mdl);
}
/** Parse the table file name into table name and database name.
@tparam dict_frozen whether the caller holds dict_sys.latch
@param[in,out] db_name database name buffer

View File

@@ -359,7 +359,7 @@ dict_table_schema_check(
if (!table) {
if (opt_bootstrap)
return DB_TABLE_NOT_FOUND;
return DB_STATS_DO_NOT_EXIST;
if (req_schema == &table_stats_schema) {
if (innodb_table_stats_not_found_reported) {
return DB_STATS_DO_NOT_EXIST;
@@ -377,10 +377,10 @@ dict_table_schema_check(
snprintf(errstr, errstr_sz, "Table %s not found.",
req_schema->table_name_sql);
return DB_TABLE_NOT_FOUND;
return DB_STATS_DO_NOT_EXIST;
}
if (!table->is_readable() && !table->space) {
if (!table->is_readable() || !table->space) {
/* missing tablespace */
snprintf(errstr, errstr_sz,
"Tablespace for table %s is missing.",
@@ -491,11 +491,8 @@ dict_table_schema_check(
return DB_SUCCESS;
}
/*********************************************************************//**
Checks whether the persistent statistics storage exists and that all
tables have the proper structure.
@return true if exists and all tables are ok */
static bool dict_stats_persistent_storage_check(bool dict_already_locked)
dict_stats_schema_check
dict_stats_persistent_storage_check(bool dict_already_locked) noexcept
{
char errstr[512];
dberr_t ret;
@@ -521,14 +518,14 @@ static bool dict_stats_persistent_storage_check(bool dict_already_locked)
switch (ret) {
case DB_SUCCESS:
return true;
return SCHEMA_OK;
case DB_STATS_DO_NOT_EXIST:
return SCHEMA_NOT_EXIST;
default:
if (!opt_bootstrap) {
ib::error() << errstr;
sql_print_error("InnoDB: %s", errstr);
}
/* fall through */
case DB_STATS_DO_NOT_EXIST:
return false;
return SCHEMA_INVALID;
}
}
@@ -544,13 +541,16 @@ dberr_t dict_stats_exec_sql(pars_info_t *pinfo, const char* sql, trx_t *trx)
{
ut_ad(dict_sys.locked());
if (!dict_stats_persistent_storage_check(true))
{
pars_info_free(pinfo);
return DB_STATS_DO_NOT_EXIST;
switch (dict_stats_persistent_storage_check(true)) {
case SCHEMA_OK:
return que_eval_sql(pinfo, sql, trx);
case SCHEMA_INVALID:
case SCHEMA_NOT_EXIST:
break;
}
return que_eval_sql(pinfo, sql, trx);
pars_info_free(pinfo);
return DB_STATS_DO_NOT_EXIST;
}
/*********************************************************************//**
@@ -587,7 +587,7 @@ void dict_stats_empty_table(dict_table_t *table)
table->stat_clustered_index_size = 1;
/* 1 page for each index, not counting the clustered */
table->stat_sum_of_other_index_sizes
= UT_LIST_GET_LEN(table->indexes) - 1;
= uint32_t(UT_LIST_GET_LEN(table->indexes) - 1);
table->stat_modified_counter = 0;
dict_index_t* index;
@@ -603,7 +603,7 @@ void dict_stats_empty_table(dict_table_t *table)
dict_stats_empty_index(index);
}
table->stat_initialized = TRUE;
table->stat = table->stat | dict_table_t::STATS_INITIALIZED;
table->stats_mutex_unlock();
}
@@ -644,16 +644,10 @@ dict_stats_assert_initialized(
/*==========================*/
const dict_table_t* table) /*!< in: table */
{
ut_a(table->stat_initialized);
MEM_CHECK_DEFINED(&table->stats_last_recalc,
sizeof table->stats_last_recalc);
MEM_CHECK_DEFINED(&table->stat_persistent,
sizeof table->stat_persistent);
MEM_CHECK_DEFINED(&table->stats_auto_recalc,
sizeof table->stats_auto_recalc);
MEM_CHECK_DEFINED(&table->stat, sizeof table->stat);
MEM_CHECK_DEFINED(&table->stats_sample_pages,
sizeof table->stats_sample_pages);
@@ -822,8 +816,8 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index,
ulint n_cols;
ib_uint64_t* n_diff;
ib_uint64_t* n_not_null;
ibool stats_null_not_equal;
uintmax_t n_sample_pages=1; /* number of pages to sample */
bool stats_null_not_equal;
uint32_t n_sample_pages=1; /* number of pages to sample */
ulint not_empty_flag = 0;
ulint total_external_size = 0;
uintmax_t add_on;
@@ -861,11 +855,11 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index,
case SRV_STATS_NULLS_UNEQUAL:
/* for both SRV_STATS_NULLS_IGNORED and SRV_STATS_NULLS_UNEQUAL
case, we will treat NULLs as unequal value */
stats_null_not_equal = TRUE;
stats_null_not_equal = true;
break;
case SRV_STATS_NULLS_EQUAL:
stats_null_not_equal = FALSE;
stats_null_not_equal = false;
break;
default:
@@ -917,18 +911,20 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index,
so taking all case2 paths is I, our expression is:
n_pages = S < I? min(I,L) : I
*/
if (index->stat_index_size > 1) {
n_sample_pages = (srv_stats_transient_sample_pages < index->stat_index_size)
? ut_min(index->stat_index_size,
static_cast<ulint>(
log2(double(index->stat_index_size))
* double(srv_stats_transient_sample_pages)))
: index->stat_index_size;
if (uint32_t I = index->stat_index_size) {
const uint32_t S{srv_stats_transient_sample_pages};
n_sample_pages = S < I
? std::min(I,
uint32_t(log2(double(I))
* double(S)))
: I;
}
}
/* Sanity check */
ut_ad(n_sample_pages > 0 && n_sample_pages <= (index->stat_index_size <= 1 ? 1 : index->stat_index_size));
ut_ad(n_sample_pages);
ut_ad(n_sample_pages <= (index->stat_index_size <= 1
? 1 : index->stat_index_size));
/* We sample some pages in the index to get an estimate */
btr_cur_t cursor;
@@ -1141,7 +1137,7 @@ invalid:
mtr.x_lock_space(index->table->space);
ulint dummy, size;
uint32_t dummy, size;
index->stat_index_size
= fseg_n_reserved_pages(*root, PAGE_HEADER
+ PAGE_BTR_SEG_LEAF
@@ -1181,24 +1177,12 @@ invalid:
return err;
}
/*********************************************************************//**
Calculates new estimates for table and index statistics. This function
is relatively quick and is used to calculate transient statistics that
are not saved on disk.
This was the only way to calculate statistics before the
Persistent Statistics feature was introduced.
@return error code
@retval DB_SUCCESS_LOCKED REC if the table under bulk insert operation */
static
dberr_t
dict_stats_update_transient(
/*========================*/
dict_table_t* table) /*!< in/out: table */
dberr_t dict_stats_update_transient(dict_table_t *table) noexcept
{
ut_ad(!table->stats_mutex_is_owner());
dict_index_t* index;
ulint sum_of_index_sizes = 0;
uint32_t sum_of_index_sizes = 0;
dberr_t err = DB_SUCCESS;
/* Find out the sizes of the indexes and how many different values
@@ -1206,17 +1190,16 @@ dict_stats_update_transient(
index = dict_table_get_first_index(table);
if (!table->space) {
/* Nothing to do. */
empty_table:
if (!index || !table->space) {
dict_stats_empty_table(table);
return err;
} else if (index == NULL) {
/* Table definition is corrupt */
return DB_SUCCESS;
}
ib::warn() << "Table " << table->name
<< " has no indexes. Cannot calculate statistics.";
goto empty_table;
if (trx_id_t bulk_trx_id = table->bulk_trx_id) {
if (trx_sys.find(nullptr, bulk_trx_id, false)) {
dict_stats_empty_table(table);
return DB_SUCCESS_LOCKED_REC;
}
}
for (; index != NULL; index = dict_table_get_next_index(index)) {
@@ -1254,7 +1237,7 @@ empty_table:
table->stat_modified_counter = 0;
table->stat_initialized = TRUE;
table->stat = table->stat | dict_table_t::STATS_INITIALIZED;
table->stats_mutex_unlock();
@@ -2190,8 +2173,8 @@ dict_stats_analyze_index_for_n_prefix(
struct index_stats_t
{
std::vector<index_field_stats_t> stats;
ulint index_size;
ulint n_leaf_pages;
uint32_t index_size;
uint32_t n_leaf_pages;
index_stats_t(ulint n_uniq) : index_size(1), n_leaf_pages(1)
{
@@ -2330,7 +2313,7 @@ empty_index:
uint16_t root_level = btr_page_get_level(root->page.frame);
mtr.x_lock_space(index->table->space);
ulint dummy, size;
uint32_t dummy, size;
result.index_size
= fseg_n_reserved_pages(*root, PAGE_HEADER + PAGE_BTR_SEG_LEAF
+ root->page.frame, &size, &mtr)
@@ -2600,17 +2583,7 @@ found_level:
DBUG_RETURN(result);
}
/*********************************************************************//**
Calculates new estimates for table and index statistics. This function
is relatively slow and is used to calculate persistent statistics that
will be saved on disk.
@return DB_SUCCESS or error code
@retval DB_SUCCESS_LOCKED_REC if the table under bulk insert operation */
static
dberr_t
dict_stats_update_persistent(
/*=========================*/
dict_table_t* table) /*!< in/out: table */
dberr_t dict_stats_update_persistent(dict_table_t *table) noexcept
{
dict_index_t* index;
@@ -2618,6 +2591,13 @@ dict_stats_update_persistent(
DEBUG_SYNC_C("dict_stats_update_persistent");
if (trx_id_t bulk_trx_id = table->bulk_trx_id) {
if (trx_sys.find(nullptr, bulk_trx_id, false)) {
dict_stats_empty_table(table);
return DB_SUCCESS_LOCKED_REC;
}
}
/* analyze the clustered index first */
index = dict_table_get_first_index(table);
@@ -2706,7 +2686,7 @@ dict_stats_update_persistent(
table->stat_modified_counter = 0;
table->stat_initialized = TRUE;
table->stat = table->stat | dict_table_t::STATS_INITIALIZED;
dict_stats_assert_initialized(table);
@@ -2715,6 +2695,18 @@ dict_stats_update_persistent(
return(DB_SUCCESS);
}
dberr_t dict_stats_update_persistent_try(dict_table_t *table)
{
if (table->stats_is_persistent() &&
dict_stats_persistent_storage_check(false) == SCHEMA_OK)
{
if (dberr_t err= dict_stats_update_persistent(table))
return err;
return dict_stats_save(table);
}
return DB_SUCCESS;
}
#include "mysql_com.h"
/** Save an individual index's statistic into the persistent statistics
storage.
@@ -2793,14 +2785,14 @@ dict_stats_save_index_stat(
"END;", trx);
if (UNIV_UNLIKELY(ret != DB_SUCCESS)) {
if (innodb_index_stats_not_found == false &&
index->stats_error_printed == false) {
if (innodb_index_stats_not_found == false
&& !index->table->stats_error_printed) {
index->table->stats_error_printed = true;
ib::error() << "Cannot save index statistics for table "
<< index->table->name
<< ", index " << index->name
<< ", stat name \"" << stat_name << "\": "
<< ret;
index->stats_error_printed = true;
}
}
@@ -2838,17 +2830,11 @@ static dberr_t dict_stats_report_error(dict_table_t* table)
return err;
}
/** Save the table's statistics into the persistent statistics storage.
@param[in] table table whose stats to save
@param[in] only_for_index if this is non-NULL, then stats for indexes
that are not equal to it will not be saved, if NULL, then all indexes' stats
are saved
/** Save the persistent statistics of a table or an index.
@param table table whose stats to save
@param only_for_index the index ID to save statistics for (0=all)
@return DB_SUCCESS or error code */
static
dberr_t
dict_stats_save(
dict_table_t* table,
const index_id_t* only_for_index)
dberr_t dict_stats_save(dict_table_t* table, index_id_t index_id)
{
pars_info_t* pinfo;
char db_utf8[MAX_DB_UTF8_LEN];
@@ -2885,7 +2871,7 @@ dict_stats_save(
|| strcmp(table_stats->name.m_name, TABLE_STATS_NAME)) {
release_and_exit:
if (table_stats) {
dict_table_close(table_stats, false, thd, mdl_table);
dict_table_close(table_stats, thd, mdl_table);
}
return DB_STATS_DO_NOT_EXIST;
}
@@ -2902,7 +2888,7 @@ release_and_exit:
goto release_and_exit;
}
if (strcmp(index_stats->name.m_name, INDEX_STATS_NAME)) {
dict_table_close(index_stats, false, thd, mdl_index);
dict_table_close(index_stats, thd, mdl_index);
goto release_and_exit;
}
@@ -2962,8 +2948,14 @@ release_and_exit:
"END;", trx);
if (UNIV_UNLIKELY(ret != DB_SUCCESS)) {
ib::error() << "Cannot save table statistics for table "
<< table->name << ": " << ret;
sql_print_error("InnoDB: Cannot save table statistics for"
#ifdef EMBEDDED_LIBRARY
" table %.*s.%s: %s",
#else
" table %`.*s.%`s: %s",
#endif
int(table->name.dblen()), table->name.m_name,
table->name.basename(), ut_strerr(ret));
rollback_and_exit:
trx->rollback();
free_and_exit:
@@ -2971,8 +2963,8 @@ free_and_exit:
dict_sys.unlock();
unlocked_free_and_exit:
trx->free();
dict_table_close(table_stats, false, thd, mdl_table);
dict_table_close(index_stats, false, thd, mdl_index);
dict_table_close(table_stats, thd, mdl_table);
dict_table_close(index_stats, thd, mdl_index);
return ret;
}
@@ -3006,7 +2998,7 @@ unlocked_free_and_exit:
index = it->second;
if (only_for_index != NULL && index->id != *only_for_index) {
if (index_id != 0 && index->id != index_id) {
continue;
}
@@ -3074,6 +3066,14 @@ unlocked_free_and_exit:
goto free_and_exit;
}
void dict_stats_empty_table_and_save(dict_table_t *table)
{
dict_stats_empty_table(table);
if (table->stats_is_persistent() &&
dict_stats_persistent_storage_check(false) == SCHEMA_OK)
dict_stats_save(table);
}
/*********************************************************************//**
Called for the row that is selected by
SELECT ... FROM mysql.innodb_table_stats WHERE table='...'
@@ -3122,8 +3122,7 @@ dict_stats_fetch_table_stats_step(
ut_a(len == 8);
table->stat_clustered_index_size
= std::max<ulint>(
(ulint) mach_read_from_8(data), 1);
= std::max(mach_read_from_4(data + 4), 1U);
break;
}
@@ -3132,18 +3131,9 @@ dict_stats_fetch_table_stats_step(
ut_a(dtype_get_mtype(type) == DATA_INT);
ut_a(len == 8);
ulint stat_other_idx_size
= (ulint) mach_read_from_8(data);
if (!stat_other_idx_size
&& UT_LIST_GET_LEN(table->indexes) > 1) {
stat_other_idx_size
= UT_LIST_GET_LEN(table->indexes) - 1;
}
table->stat_sum_of_other_index_sizes
= std::max<ulint>(
(ulint) mach_read_from_8(data),
UT_LIST_GET_LEN(table->indexes) - 1);
table->stat_sum_of_other_index_sizes = std::max(
mach_read_from_4(data + 4),
uint32_t(UT_LIST_GET_LEN(table->indexes) - 1));
break;
}
default:
@@ -3328,14 +3318,12 @@ dict_stats_fetch_index_stats_step(
if (stat_name_len == 4 /* strlen("size") */
&& strncasecmp("size", stat_name, stat_name_len) == 0) {
index->stat_index_size
= std::max<ulint>((ulint) stat_value, 1);
index->stat_index_size = std::max(uint32_t(stat_value), 1U);
arg->stats_were_modified = true;
} else if (stat_name_len == 12 /* strlen("n_leaf_pages") */
&& strncasecmp("n_leaf_pages", stat_name, stat_name_len)
== 0) {
index->stat_n_leaf_pages
= std::max<ulint>((ulint) stat_value, 1);
index->stat_n_leaf_pages = std::max(uint32_t(stat_value), 1U);
arg->stats_were_modified = true;
} else if (stat_name_len > PFX_LEN /* e.g. stat_name=="n_diff_pfx01" */
&& strncasecmp(PFX, stat_name, PFX_LEN) == 0) {
@@ -3425,19 +3413,11 @@ dict_stats_fetch_index_stats_step(
return(TRUE);
}
/*********************************************************************//**
Read table's statistics from the persistent statistics storage.
@return DB_SUCCESS or error code */
static
dberr_t
dict_stats_fetch_from_ps(
/*=====================*/
dict_table_t* table) /*!< in/out: table */
/** Read the stored persistent statistics of a table. */
dberr_t dict_stats_fetch_from_ps(dict_table_t *table)
{
index_fetch_t index_fetch_arg;
trx_t* trx;
pars_info_t* pinfo;
dberr_t ret;
char db_utf8[MAX_DB_UTF8_LEN];
char table_utf8[MAX_TABLE_UTF8_LEN];
@@ -3451,34 +3431,36 @@ dict_stats_fetch_from_ps(
MDL_ticket *mdl_table = nullptr, *mdl_index = nullptr;
dict_table_t* table_stats = dict_table_open_on_name(
TABLE_STATS_NAME, false, DICT_ERR_IGNORE_NONE);
if (table_stats) {
if (!table_stats) {
return DB_STATS_DO_NOT_EXIST;
}
dict_table_t* index_stats = dict_table_open_on_name(
INDEX_STATS_NAME, false, DICT_ERR_IGNORE_NONE);
if (!index_stats) {
table_stats->release();
return DB_STATS_DO_NOT_EXIST;
}
dict_sys.freeze(SRW_LOCK_CALL);
table_stats = dict_acquire_mdl_shared<false>(table_stats, thd,
&mdl_table);
dict_sys.unfreeze();
}
if (!table_stats
|| strcmp(table_stats->name.m_name, TABLE_STATS_NAME)) {
release_and_exit:
dict_sys.unfreeze();
if (table_stats) {
dict_table_close(table_stats, false, thd, mdl_table);
dict_table_close(table_stats, thd, mdl_table);
}
if (index_stats) {
dict_table_close(index_stats, thd, mdl_index);
}
return DB_STATS_DO_NOT_EXIST;
}
dict_table_t* index_stats = dict_table_open_on_name(
INDEX_STATS_NAME, false, DICT_ERR_IGNORE_NONE);
if (index_stats) {
dict_sys.freeze(SRW_LOCK_CALL);
index_stats = dict_acquire_mdl_shared<false>(index_stats, thd,
&mdl_index);
dict_sys.unfreeze();
}
if (!index_stats) {
goto release_and_exit;
}
if (strcmp(index_stats->name.m_name, INDEX_STATS_NAME)) {
dict_table_close(index_stats, false, thd, mdl_index);
if (!index_stats
|| strcmp(index_stats->name.m_name, INDEX_STATS_NAME)) {
goto release_and_exit;
}
@@ -3486,10 +3468,6 @@ release_and_exit:
DEBUG_SYNC(thd, "dict_stats_mdl_acquired");
#endif /* ENABLED_DEBUG_SYNC */
trx = trx_create();
trx_start_internal_read_only(trx);
dict_fs2utf8(table->name.m_name, db_utf8, sizeof(db_utf8),
table_utf8, sizeof(table_utf8));
@@ -3510,8 +3488,10 @@ release_and_exit:
"fetch_index_stats_step",
dict_stats_fetch_index_stats_step,
&index_fetch_arg);
dict_sys.lock(SRW_LOCK_CALL); /* FIXME: remove this */
ret = que_eval_sql(pinfo,
dict_sys.unfreeze();
dict_sys.lock(SRW_LOCK_CALL);
que_t* graph = pars_sql(
pinfo,
"PROCEDURE FETCH_STATS () IS\n"
"found INT;\n"
"DECLARE FUNCTION fetch_table_stats_step;\n"
@@ -3564,22 +3544,29 @@ release_and_exit:
"END LOOP;\n"
"CLOSE index_stats_cur;\n"
"END;", trx);
/* pinfo is freed by que_eval_sql() */
"END;");
dict_sys.unlock();
dict_table_close(table_stats, false, thd, mdl_table);
dict_table_close(index_stats, false, thd, mdl_index);
trx_t* trx = trx_create();
trx->graph = nullptr;
graph->trx = trx;
trx_start_internal_read_only(trx);
que_run_threads(que_fork_start_command(graph));
que_graph_free(graph);
dict_table_close(table_stats, thd, mdl_table);
dict_table_close(index_stats, thd, mdl_index);
trx_commit_for_mysql(trx);
dberr_t ret = trx->error_state;
trx->free();
if (!index_fetch_arg.stats_were_modified) {
return(DB_STATS_DO_NOT_EXIST);
return DB_STATS_DO_NOT_EXIST;
}
return(ret);
return ret;
}
/*********************************************************************//**
@@ -3589,249 +3576,46 @@ dict_stats_update_for_index(
/*========================*/
dict_index_t* index) /*!< in/out: index */
{
DBUG_ENTER("dict_stats_update_for_index");
dict_table_t *const table= index->table;
ut_ad(table->stat_initialized());
if (dict_stats_is_persistent_enabled(index->table)) {
if (dict_stats_persistent_storage_check(false)) {
index_stats_t stats = dict_stats_analyze_index(index);
index->table->stats_mutex_lock();
if (table->stats_is_persistent())
switch (dict_stats_persistent_storage_check(false)) {
case SCHEMA_NOT_EXIST:
break;
case SCHEMA_INVALID:
if (table->stats_error_printed)
break;
table->stats_error_printed= true;
sql_print_information("InnoDB: Recalculation of persistent statistics"
#ifdef EMBEDDED_LIBRARY
" requested for table %.*s.%s index %s but"
#else
" requested for table %`.*s.%`s index %`s but"
#endif
" the required persistent statistics storage"
" is corrupted. Using transient stats instead.",
int(table->name.dblen()), table->name.m_name,
table->name.basename(), index->name());
break;
case SCHEMA_OK:
index_stats_t stats{dict_stats_analyze_index(index)};
table->stats_mutex_lock();
index->stat_index_size = stats.index_size;
index->stat_n_leaf_pages = stats.n_leaf_pages;
for (size_t i = 0; i < stats.stats.size(); ++i) {
index->stat_n_diff_key_vals[i]
= stats.stats[i].n_diff_key_vals;
index->stat_n_sample_sizes[i]
= stats.stats[i].n_sample_sizes;
index->stat_n_non_null_key_vals[i]
= stats.stats[i].n_non_null_key_vals;
}
index->table->stat_sum_of_other_index_sizes
+= index->stat_index_size;
index->table->stats_mutex_unlock();
dict_stats_save(index->table, &index->id);
DBUG_VOID_RETURN;
}
/* else */
if (innodb_index_stats_not_found == false &&
index->stats_error_printed == false) {
/* Fall back to transient stats since the persistent
storage is not present or is corrupted */
ib::info() << "Recalculation of persistent statistics"
" requested for table " << index->table->name
<< " index " << index->name
<< " but the required"
" persistent statistics storage is not present or is"
" corrupted. Using transient stats instead.";
index->stats_error_printed = false;
for (size_t i = 0; i < stats.stats.size(); ++i)
{
index->stat_n_diff_key_vals[i]= stats.stats[i].n_diff_key_vals;
index->stat_n_sample_sizes[i]= stats.stats[i].n_sample_sizes;
index->stat_n_non_null_key_vals[i]= stats.stats[i].n_non_null_key_vals;
}
table->stat_sum_of_other_index_sizes+= index->stat_index_size;
table->stats_mutex_unlock();
dict_stats_save(table, index->id);
return;
}
dict_stats_update_transient_for_index(index);
DBUG_VOID_RETURN;
}
/*********************************************************************//**
Calculates new estimates for table and index statistics. The statistics
are used in query optimization.
@return DB_SUCCESS or error code
@retval DB_SUCCESS_LOCKED_REC if the table under bulk insert operation */
dberr_t
dict_stats_update(
/*==============*/
dict_table_t* table, /*!< in/out: table */
dict_stats_upd_option_t stats_upd_option)
/*!< in: whether to (re) calc
the stats or to fetch them from
the persistent statistics
storage */
{
ut_ad(!table->stats_mutex_is_owner());
if (!table->is_readable()) {
return (dict_stats_report_error(table));
} else if (srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN) {
/* If we have set a high innodb_force_recovery level, do
not calculate statistics, as a badly corrupted index can
cause a crash in it. */
dict_stats_empty_table(table);
return(DB_SUCCESS);
}
if (trx_id_t bulk_trx_id = table->bulk_trx_id) {
if (trx_sys.find(nullptr, bulk_trx_id, false)) {
dict_stats_empty_table(table);
return DB_SUCCESS_LOCKED_REC;
}
}
switch (stats_upd_option) {
case DICT_STATS_RECALC_PERSISTENT:
if (srv_read_only_mode) {
goto transient;
}
/* Persistent recalculation requested, called from
1) ANALYZE TABLE, or
2) the auto recalculation background thread, or
3) open table if stats do not exist on disk and auto recalc
is enabled */
/* InnoDB internal tables (e.g. SYS_TABLES) cannot have
persistent stats enabled */
ut_a(strchr(table->name.m_name, '/') != NULL);
/* check if the persistent statistics storage exists
before calling the potentially slow function
dict_stats_update_persistent(); that is a
prerequisite for dict_stats_save() succeeding */
if (dict_stats_persistent_storage_check(false)) {
dberr_t err;
err = dict_stats_update_persistent(table);
if (err != DB_SUCCESS) {
return(err);
}
err = dict_stats_save(table, NULL);
return(err);
}
/* Fall back to transient stats since the persistent
storage is not present or is corrupted */
if (innodb_table_stats_not_found == false &&
table->stats_error_printed == false) {
ib::warn() << "Recalculation of persistent statistics"
" requested for table "
<< table->name
<< " but the required persistent"
" statistics storage is not present or is corrupted."
" Using transient stats instead.";
table->stats_error_printed = true;
}
goto transient;
case DICT_STATS_RECALC_TRANSIENT:
goto transient;
case DICT_STATS_EMPTY_TABLE:
dict_stats_empty_table(table);
/* If table is using persistent stats,
then save the stats on disk */
if (dict_stats_is_persistent_enabled(table)) {
if (dict_stats_persistent_storage_check(false)) {
return(dict_stats_save(table, NULL));
}
return(DB_STATS_DO_NOT_EXIST);
}
return(DB_SUCCESS);
case DICT_STATS_FETCH_ONLY_IF_NOT_IN_MEMORY:
/* fetch requested, either fetch from persistent statistics
storage or use the old method */
if (table->stat_initialized) {
return(DB_SUCCESS);
}
/* InnoDB internal tables (e.g. SYS_TABLES) cannot have
persistent stats enabled */
ut_a(strchr(table->name.m_name, '/') != NULL);
if (!dict_stats_persistent_storage_check(false)) {
/* persistent statistics storage does not exist
or is corrupted, calculate the transient stats */
if (innodb_table_stats_not_found == false &&
table->stats_error_printed == false &&
!opt_bootstrap) {
ib::error() << "Fetch of persistent statistics"
" requested for table "
<< table->name
<< " but the required system tables "
<< TABLE_STATS_NAME_PRINT
<< " and " << INDEX_STATS_NAME_PRINT
<< " are not present or have unexpected"
" structure. Using transient stats instead.";
table->stats_error_printed = true;
}
goto transient;
}
dberr_t err = dict_stats_fetch_from_ps(table);
switch (err) {
case DB_SUCCESS:
return(DB_SUCCESS);
case DB_STATS_DO_NOT_EXIST:
if (srv_read_only_mode) {
goto transient;
}
#ifdef WITH_WSREP
if (wsrep_thd_skip_locking(current_thd)) {
goto transient;
}
#endif
if (dict_stats_auto_recalc_is_enabled(table)) {
return(dict_stats_update(
table,
DICT_STATS_RECALC_PERSISTENT));
}
ib::info() << "Trying to use table " << table->name
<< " which has persistent statistics enabled,"
" but auto recalculation turned off and the"
" statistics do not exist in "
TABLE_STATS_NAME_PRINT
" and " INDEX_STATS_NAME_PRINT
". Please either run \"ANALYZE TABLE "
<< table->name << ";\" manually or enable the"
" auto recalculation with \"ALTER TABLE "
<< table->name << " STATS_AUTO_RECALC=1;\"."
" InnoDB will now use transient statistics for "
<< table->name << ".";
goto transient;
default:
if (innodb_table_stats_not_found == false &&
table->stats_error_printed == false) {
ib::error() << "Error fetching persistent statistics"
" for table "
<< table->name
<< " from " TABLE_STATS_NAME_PRINT " and "
INDEX_STATS_NAME_PRINT ": " << err
<< ". Using transient stats method instead.";
}
goto transient;
}
/* no "default:" in order to produce a compilation warning
about unhandled enumeration value */
}
transient:
return dict_stats_update_transient(table);
}
/** Execute DELETE FROM mysql.innodb_table_stats
@@ -3981,7 +3765,7 @@ dberr_t dict_stats_rename_index(const char *db, const char *table,
const char *old_name, const char *new_name,
trx_t *trx)
{
if (!dict_stats_persistent_storage_check(true))
if (dict_stats_persistent_storage_check(true) != SCHEMA_OK)
return DB_STATS_DO_NOT_EXIST;
pars_info_t *pinfo= pars_info_create();
@@ -4117,7 +3901,7 @@ test_dict_stats_save()
index2_stat_n_sample_sizes[2] = TEST_IDX2_N_DIFF3_SAMPLE_SIZE;
index2_stat_n_sample_sizes[3] = TEST_IDX2_N_DIFF4_SAMPLE_SIZE;
ret = dict_stats_save(&table, NULL);
ret = dict_stats_save(&table);
ut_a(ret == DB_SUCCESS);

View File

@@ -131,7 +131,9 @@ schedule new estimates for table and index statistics to be calculated.
void dict_stats_update_if_needed_func(dict_table_t *table)
#endif
{
if (UNIV_UNLIKELY(!table->stat_initialized)) {
uint32_t stat{table->stat};
if (UNIV_UNLIKELY(!table->stat_initialized(stat))) {
/* The table may have been evicted from dict_sys
and reloaded internally by InnoDB for FOREIGN KEY
processing, but not reloaded by the SQL layer.
@@ -150,13 +152,9 @@ void dict_stats_update_if_needed_func(dict_table_t *table)
ulonglong counter = table->stat_modified_counter++;
ulonglong n_rows = dict_table_get_n_rows(table);
if (dict_stats_is_persistent_enabled(table)) {
if (table->name.is_temporary()) {
return;
}
if (counter > n_rows / 10 /* 10% */
&& dict_stats_auto_recalc_is_enabled(table)) {
if (table->stats_is_persistent(stat)) {
if (table->stats_is_auto_recalc(stat)
&& counter > n_rows / 10 && !table->name.is_temporary()) {
#ifdef WITH_WSREP
/* Do not add table to background
statistic calculation if this thread is not a
@@ -199,7 +197,7 @@ void dict_stats_update_if_needed_func(dict_table_t *table)
if (counter > threshold) {
/* this will reset table->stat_modified_counter to 0 */
dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT);
dict_stats_update_transient(table);
}
}
@@ -325,7 +323,7 @@ invalid_table_id:
if (!mdl || !table->is_accessible())
{
dict_table_close(table, false, thd, mdl);
dict_table_close(table, thd, mdl);
goto invalid_table_id;
}
@@ -339,10 +337,10 @@ invalid_table_id:
difftime(time(nullptr), table->stats_last_recalc) >= MIN_RECALC_INTERVAL;
const dberr_t err= update_now
? dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT)
? dict_stats_update_persistent_try(table)
: DB_SUCCESS_LOCKED_REC;
dict_table_close(table, false, thd, mdl);
dict_table_close(table, thd, mdl);
mysql_mutex_lock(&recalc_pool_mutex);
auto i= std::find_if(recalc_pool.begin(), recalc_pool.end(),

View File

@@ -1657,12 +1657,11 @@ fseg_find_last_used_frag_page_slot(
/** Calculate reserved fragment page slots.
@param inode file segment index
@return number of fragment pages */
static ulint fseg_get_n_frag_pages(const fseg_inode_t *inode)
static uint32_t fseg_get_n_frag_pages(const fseg_inode_t *inode) noexcept
{
ulint i;
ulint count = 0;
uint32_t count = 0;
for (i = 0; i < FSEG_FRAG_ARR_N_SLOTS; i++) {
for (ulint i = 0; i < FSEG_FRAG_ARR_N_SLOTS; i++) {
if (FIL_NULL != fseg_get_nth_frag_page_no(inode, i)) {
count++;
}
@@ -1806,21 +1805,24 @@ Calculates the number of pages reserved by a segment, and how many pages are
currently used.
@return number of reserved pages */
static
ulint
uint32_t
fseg_n_reserved_pages_low(
/*======================*/
const fseg_inode_t* inode, /*!< in: segment inode */
ulint* used) /*!< out: number of pages used (not
uint32_t* used) /*!< out: number of pages used (not
more than reserved) */
noexcept
{
const uint32_t extent_size = FSP_EXTENT_SIZE;
*used = mach_read_from_4(inode + FSEG_NOT_FULL_N_USED)
+ FSP_EXTENT_SIZE * flst_get_len(inode + FSEG_FULL)
+ extent_size * flst_get_len(inode + FSEG_FULL)
+ fseg_get_n_frag_pages(inode);
return fseg_get_n_frag_pages(inode)
+ FSP_EXTENT_SIZE * flst_get_len(inode + FSEG_FREE)
+ FSP_EXTENT_SIZE * flst_get_len(inode + FSEG_NOT_FULL)
+ FSP_EXTENT_SIZE * flst_get_len(inode + FSEG_FULL);
+ extent_size * flst_get_len(inode + FSEG_FREE)
+ extent_size * flst_get_len(inode + FSEG_NOT_FULL)
+ extent_size * flst_get_len(inode + FSEG_FULL);
}
/** Calculate the number of pages reserved by a segment,
@@ -1830,9 +1832,9 @@ and how many pages are currently used.
@param[out] used number of pages that are used (not more than reserved)
@param[in,out] mtr mini-transaction
@return number of reserved pages */
ulint fseg_n_reserved_pages(const buf_block_t &block,
const fseg_header_t *header, ulint *used,
mtr_t *mtr)
uint32_t fseg_n_reserved_pages(const buf_block_t &block,
const fseg_header_t *header, uint32_t *used,
mtr_t *mtr) noexcept
{
ut_ad(page_align(header) == block.page.frame);
buf_block_t *iblock;
@@ -1857,7 +1859,7 @@ static dberr_t fseg_fill_free_list(const fseg_inode_t *inode,
buf_block_t *iblock, fil_space_t *space,
uint32_t hint, mtr_t *mtr)
{
ulint used;
uint32_t used;
ut_ad(!((page_offset(inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
ut_d(space->modify_check(*mtr));
@@ -2008,8 +2010,7 @@ fseg_alloc_free_page_low(
dberr_t* err)
{
ib_id_t seg_id;
ulint used;
ulint reserved;
uint32_t used, reserved;
xdes_t* descr; /*!< extent of the hinted page */
uint32_t ret_page; /*!< the allocated page offset, FIL_NULL
if could not be allocated */

View File

@@ -231,7 +231,7 @@ fts_config_set_value(
n_rows_updated = trx->undo_no - undo_no;
/* Check if we need to do an insert. */
if (n_rows_updated == 0) {
if (error == DB_SUCCESS && n_rows_updated == 0) {
info = pars_info_create();
pars_info_bind_varchar_literal(

View File

@@ -37,6 +37,7 @@ Full Text Search interface
#include "fts0plugin.h"
#include "dict0stats.h"
#include "btr0pcur.h"
#include "log.h"
static const ulint FTS_MAX_ID_LEN = 32;
@@ -1870,8 +1871,10 @@ fts_create_one_common_table(
}
}
ib::warn() << "Failed to create FTS common table " << fts_table_name;
trx->error_state = error;
ut_ad(trx->state == TRX_STATE_NOT_STARTED
|| trx->error_state == error);
sql_print_warning("InnoDB: Failed to create FTS common table %s: %s",
fts_table_name, ut_strerr(error));
return NULL;
}
@@ -2055,8 +2058,10 @@ fts_create_one_index_table(
}
}
ib::warn() << "Failed to create FTS index table " << table_name;
trx->error_state = error;
ut_ad(trx->state == TRX_STATE_NOT_STARTED
|| trx->error_state == error);
sql_print_warning("InnoDB: Failed to create FTS index table %s: %s",
table_name, ut_strerr(error));
return NULL;
}

View File

@@ -2809,7 +2809,7 @@ static void fts_optimize_sync_table(dict_table_t *table,
std::this_thread::sleep_for(std::chrono::seconds(6)););
if (mdl_ticket)
dict_table_close(sync_table, false, fts_opt_thd, mdl_ticket);
dict_table_close(sync_table, fts_opt_thd, mdl_ticket);
}
/**********************************************************************//**

View File

@@ -1437,9 +1437,9 @@ static void innodb_drop_database(handlerton*, char *path)
trx->commit();
if (table_stats)
dict_table_close(table_stats, true, thd, mdl_table);
dict_table_close(table_stats, thd, mdl_table);
if (index_stats)
dict_table_close(index_stats, true, thd, mdl_index);
dict_table_close(index_stats, thd, mdl_index);
row_mysql_unlock_data_dictionary(trx);
trx->free();
@@ -1588,9 +1588,9 @@ inline void ha_innobase::reload_statistics()
if (dict_table_t *table= m_prebuilt ? m_prebuilt->table : nullptr)
{
if (table->is_readable())
dict_stats_init(table);
statistics_init(table, true);
else
table->stat_initialized= 1;
table->stat.fetch_or(dict_table_t::STATS_INITIALIZED);
}
}
@@ -1900,7 +1900,7 @@ static int innodb_check_version(handlerton *hton, const char *path,
{
const trx_id_t trx_id= table->def_trx_id;
DBUG_ASSERT(trx_id <= create_id);
dict_table_close(table);
table->release();
DBUG_PRINT("info", ("create_id: %llu trx_id: %" PRIu64, create_id, trx_id));
DBUG_RETURN(create_id != trx_id);
}
@@ -2946,6 +2946,44 @@ static int innobase_rollback_by_xid(handlerton*, XID *xid) noexcept
return XAER_NOTA;
}
/** Initialize the InnoDB persistent statistics attributes.
@param table InnoDB table
@param table_options MariaDB table options
@param sar the value of STATS_AUTO_RECALC
@param initialized whether the InnoDB statistics were already initialized
@return whether table->stats_sample_pages needs to be initialized */
static bool innodb_copy_stat_flags(dict_table_t *table,
ulong table_options,
enum_stats_auto_recalc sar,
bool initialized) noexcept
{
if (table->is_temporary() || table->no_rollback())
{
table->stat= dict_table_t::STATS_INITIALIZED |
dict_table_t::STATS_PERSISTENT_OFF | dict_table_t::STATS_AUTO_RECALC_OFF;
table->stats_sample_pages= 1;
return false;
}
static_assert(HA_OPTION_STATS_PERSISTENT ==
dict_table_t::STATS_PERSISTENT_ON << 11, "");
static_assert(HA_OPTION_NO_STATS_PERSISTENT ==
dict_table_t::STATS_PERSISTENT_OFF << 11, "");
uint32_t stat=
(table_options &
(HA_OPTION_STATS_PERSISTENT | HA_OPTION_NO_STATS_PERSISTENT)) >> 11;
static_assert(uint32_t{HA_STATS_AUTO_RECALC_ON} << 3 ==
dict_table_t::STATS_AUTO_RECALC_ON, "");
static_assert(uint32_t{HA_STATS_AUTO_RECALC_OFF} << 3 ==
dict_table_t::STATS_AUTO_RECALC_OFF, "");
static_assert(true == dict_table_t::STATS_INITIALIZED, "");
stat|= (sar & (HA_STATS_AUTO_RECALC_ON | HA_STATS_AUTO_RECALC_OFF)) << 3 |
uint32_t(initialized);
table->stat= stat;
return true;
}
/*********************************************************************//**
Copy table flags from MySQL's HA_CREATE_INFO into an InnoDB table object.
Those flags are stored in .frm file and end up in the MySQL table object,
@@ -2958,29 +2996,9 @@ innobase_copy_frm_flags_from_create_info(
dict_table_t* innodb_table, /*!< in/out: InnoDB table */
const HA_CREATE_INFO* create_info) /*!< in: create info */
{
ibool ps_on;
ibool ps_off;
if (innodb_table->is_temporary()
|| innodb_table->no_rollback()) {
/* Temp tables do not use persistent stats. */
ps_on = FALSE;
ps_off = TRUE;
} else {
ps_on = create_info->table_options
& HA_OPTION_STATS_PERSISTENT;
ps_off = create_info->table_options
& HA_OPTION_NO_STATS_PERSISTENT;
}
dict_stats_set_persistent(innodb_table, ps_on, ps_off);
dict_stats_auto_recalc_set(
innodb_table,
create_info->stats_auto_recalc == HA_STATS_AUTO_RECALC_ON,
create_info->stats_auto_recalc == HA_STATS_AUTO_RECALC_OFF);
innodb_table->stats_sample_pages = create_info->stats_sample_pages;
if (innodb_copy_stat_flags(innodb_table, create_info->table_options,
create_info->stats_auto_recalc, false))
innodb_table->stats_sample_pages= create_info->stats_sample_pages;
}
/*********************************************************************//**
@@ -2994,28 +3012,10 @@ innobase_copy_frm_flags_from_table_share(
dict_table_t* innodb_table, /*!< in/out: InnoDB table */
const TABLE_SHARE* table_share) /*!< in: table share */
{
ibool ps_on;
ibool ps_off;
if (innodb_table->is_temporary()) {
/* Temp tables do not use persistent stats */
ps_on = FALSE;
ps_off = TRUE;
} else {
ps_on = table_share->db_create_options
& HA_OPTION_STATS_PERSISTENT;
ps_off = table_share->db_create_options
& HA_OPTION_NO_STATS_PERSISTENT;
}
dict_stats_set_persistent(innodb_table, ps_on, ps_off);
dict_stats_auto_recalc_set(
innodb_table,
table_share->stats_auto_recalc == HA_STATS_AUTO_RECALC_ON,
table_share->stats_auto_recalc == HA_STATS_AUTO_RECALC_OFF);
innodb_table->stats_sample_pages = table_share->stats_sample_pages;
if (innodb_copy_stat_flags(innodb_table, table_share->db_create_options,
table_share->stats_auto_recalc,
innodb_table->stat_initialized()))
innodb_table->stats_sample_pages= table_share->stats_sample_pages;
}
/*********************************************************************//**
@@ -3256,7 +3256,7 @@ static bool innobase_query_caching_table_check(
bool allow = innobase_query_caching_table_check_low(table, trx);
dict_table_close(table);
table->release();
if (allow) {
/* If the isolation level is high, assign a read view for the
@@ -5819,6 +5819,70 @@ static void initialize_auto_increment(dict_table_t *table, const Field& field,
table->autoinc_mutex.wr_unlock();
}
dberr_t ha_innobase::statistics_init(dict_table_t *table, bool recalc)
{
ut_ad(table->is_readable());
ut_ad(!table->stats_mutex_is_owner());
uint32_t stat= table->stat;
dberr_t err= DB_SUCCESS;
if (!recalc && dict_table_t::stat_initialized(stat));
else if (srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN)
dict_stats_empty_table(table);
else
{
if (dict_table_t::stats_is_persistent(stat) && !srv_read_only_mode
#ifdef WITH_WSREP
&& !wsrep_thd_skip_locking(m_user_thd)
#endif
)
{
switch (dict_stats_persistent_storage_check(false)) {
case SCHEMA_OK:
if (recalc)
{
recalc:
err= dict_stats_update_persistent(table);
if (err == DB_SUCCESS)
err= dict_stats_save(table);
}
else
{
err= dict_stats_fetch_from_ps(table);
if (err == DB_STATS_DO_NOT_EXIST && table->stats_is_auto_recalc())
goto recalc;
}
if (err == DB_SUCCESS)
return err;
if (!recalc)
break;
/* fall through */
case SCHEMA_INVALID:
if (table->stats_error_printed)
break;
table->stats_error_printed = true;
if (opt_bootstrap)
break;
sql_print_warning("InnoDB: %s of persistent statistics requested"
" for table %`.*s.%`s"
" but the required persistent statistics storage"
" is corrupted.",
recalc ? "Recalculation" : "Fetch",
int(table->name.dblen()), table->name.m_name,
table->name.basename());
/* fall through */
case SCHEMA_NOT_EXIST:
err= DB_STATS_DO_NOT_EXIST;
}
}
dict_stats_update_transient(table);
}
return err;
}
/** Open an InnoDB table
@param[in] name table name
@return error code
@@ -13370,7 +13434,7 @@ ha_innobase::create(const char *name, TABLE *form, HA_CREATE_INFO *create_info,
as it is not needed and would report error due to the table
not being readable yet. */
if (!info.creating_stub())
dict_stats_update(info.table(), DICT_STATS_EMPTY_TABLE);
dict_stats_empty_table_and_save(info.table());
if (!info.table()->is_temporary())
log_write_up_to(trx->commit_lsn, true);
info.table()->release();
@@ -13419,6 +13483,8 @@ ha_innobase::discard_or_import_tablespace(
DBUG_RETURN(HA_ERR_TABLE_NEEDS_UPGRADE);
}
ut_ad(m_prebuilt->table->stat_initialized());
if (m_prebuilt->table->space == fil_system.sys_space) {
ib_senderrf(
m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR,
@@ -13492,23 +13558,17 @@ ha_innobase::discard_or_import_tablespace(
err, m_prebuilt->table->flags, NULL));
}
if (dict_stats_is_persistent_enabled(m_prebuilt->table)) {
dberr_t ret;
dict_table_t* t = m_prebuilt->table;
/* Adjust the persistent statistics. */
ret = dict_stats_update(m_prebuilt->table,
DICT_STATS_RECALC_PERSISTENT);
if (ret != DB_SUCCESS) {
if (dberr_t ret = dict_stats_update_persistent_try(t)) {
push_warning_printf(
ha_thd(),
Sql_condition::WARN_LEVEL_WARN,
ER_ALTER_INFO,
"Error updating stats for table '%s'"
" after table rebuild: %s",
m_prebuilt->table->name.m_name,
ut_strerr(ret));
}
"Error updating stats after"
" ALTER TABLE %`.*s.%`s IMPORT TABLESPACE: %s",
int(t->name.dblen()), t->name.m_name,
t->name.basename(), ut_strerr(ret));
}
DBUG_RETURN(0);
@@ -13677,7 +13737,7 @@ int ha_innobase::delete_table(const char *name)
/* This looks like the rollback of ALTER TABLE...ADD PARTITION
that was caused by MDL timeout. We could have written undo log
for inserting the data into the new partitions. */
if (table->stat_persistent != DICT_STATS_PERSISTENT_OFF)
if (!(table->stat & dict_table_t::STATS_PERSISTENT_OFF))
{
/* We do not really know if we are holding MDL_EXCLUSIVE. Even
though this code is handling the case that we are not holding
@@ -13692,7 +13752,7 @@ int ha_innobase::delete_table(const char *name)
DEBUG_SYNC(thd, "before_delete_table_stats");
if (err == DB_SUCCESS && dict_stats_is_persistent_enabled(table) &&
if (err == DB_SUCCESS && table->stats_is_persistent() &&
!table->is_stats_table())
{
table_stats= dict_table_open_on_name(TABLE_STATS_NAME, false,
@@ -13730,8 +13790,8 @@ int ha_innobase::delete_table(const char *name)
ut_ad(err == DB_LOCK_WAIT);
ut_ad(trx->error_state == DB_SUCCESS);
err= DB_SUCCESS;
dict_table_close(table_stats, false, thd, mdl_table);
dict_table_close(index_stats, false, thd, mdl_index);
dict_table_close(table_stats, thd, mdl_table);
dict_table_close(index_stats, thd, mdl_index);
table_stats= nullptr;
index_stats= nullptr;
}
@@ -13805,9 +13865,9 @@ err_exit:
purge_sys.resume_FTS();
#endif
if (table_stats)
dict_table_close(table_stats, true, thd, mdl_table);
dict_table_close(table_stats, thd, mdl_table);
if (index_stats)
dict_table_close(index_stats, true, thd, mdl_index);
dict_table_close(index_stats, thd, mdl_index);
row_mysql_unlock_data_dictionary(trx);
if (trx != parent_trx)
trx->free();
@@ -13837,9 +13897,9 @@ err_exit:
std::vector<pfs_os_file_t> deleted;
trx->commit(deleted);
if (table_stats)
dict_table_close(table_stats, true, thd, mdl_table);
dict_table_close(table_stats, thd, mdl_table);
if (index_stats)
dict_table_close(index_stats, true, thd, mdl_index);
dict_table_close(index_stats, thd, mdl_index);
row_mysql_unlock_data_dictionary(trx);
for (pfs_os_file_t d : deleted)
os_file_close(d);
@@ -14054,6 +14114,8 @@ int ha_innobase::truncate()
error= fts_lock_tables(trx, *ib_table);
}
if (error == DB_SUCCESS)
{
/* Wait for purge threads to stop using the table. */
for (uint n = 15; ib_table->get_ref_count() > 1; )
{
@@ -14064,8 +14126,9 @@ int ha_innobase::truncate()
}
std::this_thread::sleep_for(std::chrono::milliseconds(50));
}
}
if (error == DB_SUCCESS && dict_stats_is_persistent_enabled(ib_table) &&
if (error == DB_SUCCESS && ib_table->stats_is_persistent() &&
!ib_table->is_stats_table())
{
table_stats= dict_table_open_on_name(TABLE_STATS_NAME, false,
@@ -14155,7 +14218,7 @@ int ha_innobase::truncate()
if (!err)
{
dict_stats_update(m_prebuilt->table, DICT_STATS_EMPTY_TABLE);
dict_stats_empty_table_and_save(m_prebuilt->table);
log_write_up_to(trx->commit_lsn, true);
row_prebuilt_t *prebuilt= m_prebuilt;
uchar *upd_buf= m_upd_buf;
@@ -14187,13 +14250,49 @@ int ha_innobase::truncate()
mem_heap_free(heap);
if (table_stats)
dict_table_close(table_stats, false, m_user_thd, mdl_table);
dict_table_close(table_stats, m_user_thd, mdl_table);
if (index_stats)
dict_table_close(index_stats, false, m_user_thd, mdl_index);
dict_table_close(index_stats, m_user_thd, mdl_index);
DBUG_RETURN(err);
}
/** Deinitialize InnoDB persistent statistics, forcing them
to be reloaded on subsequent ha_innobase::open().
@param t table for which the cached STATS_PERSISTENT are to be evicted */
static void stats_deinit(dict_table_t *t) noexcept
{
ut_ad(dict_sys.frozen());
ut_ad(t->get_ref_count() == 0);
if (t->is_temporary() || t->no_rollback())
return;
t->stats_mutex_lock();
t->stat= t->stat & ~dict_table_t::STATS_INITIALIZED;
MEM_UNDEFINED(&t->stat_n_rows, sizeof t->stat_n_rows);
MEM_UNDEFINED(&t->stat_clustered_index_size,
sizeof t->stat_clustered_index_size);
MEM_UNDEFINED(&t->stat_sum_of_other_index_sizes,
sizeof t->stat_sum_of_other_index_sizes);
MEM_UNDEFINED(&t->stat_modified_counter, sizeof t->stat_modified_counter);
#ifdef HAVE_valgrind
for (dict_index_t *i= dict_table_get_first_index(t); i;
i= dict_table_get_next_index(i))
{
MEM_UNDEFINED(i->stat_n_diff_key_vals,
i->n_uniq * sizeof *i->stat_n_diff_key_vals);
MEM_UNDEFINED(i->stat_n_sample_sizes,
i->n_uniq * sizeof *i->stat_n_sample_sizes);
MEM_UNDEFINED(i->stat_n_non_null_key_vals,
i->n_uniq * sizeof *i->stat_n_non_null_key_vals);
MEM_UNDEFINED(&i->stat_index_size, sizeof i->stat_index_size);
MEM_UNDEFINED(&i->stat_n_leaf_pages, sizeof i->stat_n_leaf_pages);
}
#endif /* HAVE_valgrind */
t->stats_mutex_unlock();
}
/*********************************************************************//**
Renames an InnoDB table.
@return 0 or error code */
@@ -14227,15 +14326,20 @@ ha_innobase::rename_table(
dberr_t error = DB_SUCCESS;
const bool from_temp = dict_table_t::is_temporary_name(norm_from);
dict_table_t* t;
if (from_temp) {
/* There is no need to lock any FOREIGN KEY child tables. */
} else if (dict_table_t *table = dict_table_open_on_name(
norm_from, false, DICT_ERR_IGNORE_FK_NOKEY)) {
error = lock_table_children(table, trx);
t = nullptr;
} else {
t = dict_table_open_on_name(
norm_from, false, DICT_ERR_IGNORE_FK_NOKEY);
if (t) {
error = lock_table_children(t, trx);
if (error == DB_SUCCESS) {
error = lock_table_for_trx(table, trx, LOCK_X);
error = lock_table_for_trx(t, trx, LOCK_X);
}
}
table->release();
}
if (strcmp(norm_from, TABLE_STATS_NAME)
@@ -14276,10 +14380,8 @@ ha_innobase::rename_table(
we cannot lock the tables, when the
table is being renamed from from a
temporary name. */
dict_table_close(table_stats, false, thd,
mdl_table);
dict_table_close(index_stats, false, thd,
mdl_index);
dict_table_close(table_stats, thd, mdl_table);
dict_table_close(index_stats, thd, mdl_index);
table_stats = nullptr;
index_stats = nullptr;
}
@@ -14321,16 +14423,27 @@ ha_innobase::rename_table(
if (error == DB_SUCCESS) {
trx->flush_log_later = true;
if (t) {
ut_ad(dict_sys.frozen());
if (UNIV_LIKELY(t->release())) {
stats_deinit(t);
} else {
ut_ad("unexpected references" == 0);
}
}
innobase_commit_low(trx);
} else {
if (t) {
t->release();
}
trx->rollback();
}
if (table_stats) {
dict_table_close(table_stats, true, thd, mdl_table);
dict_table_close(table_stats, thd, mdl_table);
}
if (index_stats) {
dict_table_close(index_stats, true, thd, mdl_index);
dict_table_close(index_stats, thd, mdl_index);
}
row_mysql_unlock_data_dictionary(trx);
if (error == DB_SUCCESS) {
@@ -14344,10 +14457,10 @@ ha_innobase::rename_table(
during DDL operations, because the duplicate key would
exist in metadata tables, not in the user table. */
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), to);
error = DB_ERROR;
DBUG_RETURN(HA_ERR_GENERIC);
} else if (error == DB_LOCK_WAIT_TIMEOUT) {
my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
error = DB_LOCK_WAIT;
DBUG_RETURN(HA_ERR_GENERIC);
}
DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL));
@@ -14563,7 +14676,7 @@ ha_innobase::scan_time()
ulint stat_clustered_index_size;
IO_AND_CPU_COST cost;
ut_a(m_prebuilt->table->stat_initialized);
ut_ad(m_prebuilt->table->stat_initialized());
stat_clustered_index_size =
m_prebuilt->table->stat_clustered_index_size;
@@ -14719,7 +14832,7 @@ innodb_rec_per_key(
rec_per_key_t rec_per_key;
ib_uint64_t n_diff;
ut_a(index->table->stat_initialized);
ut_ad(index->table->stat_initialized());
ut_ad(i < dict_index_get_n_unique(index));
ut_ad(!dict_index_is_spatial(index));
@@ -14857,57 +14970,75 @@ ha_innobase::info_low(
ib_table = m_prebuilt->table;
DBUG_ASSERT(ib_table->get_ref_count() > 0);
if (!ib_table->is_readable()) {
if (!ib_table->is_readable()
|| srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN) {
dict_stats_empty_table(ib_table);
} else if (flag & HA_STATUS_TIME) {
stats.update_time = ib_table->update_time;
if (!is_analyze && !innobase_stats_on_metadata) {
goto stats_fetch;
}
if (flag & HA_STATUS_TIME) {
if (is_analyze || innobase_stats_on_metadata) {
dict_stats_upd_option_t opt;
dberr_t ret;
m_prebuilt->trx->op_info = "updating table statistics";
if (dict_stats_is_persistent_enabled(ib_table)) {
if (ib_table->stats_is_persistent()
&& !srv_read_only_mode
&& dict_stats_persistent_storage_check(false)
== SCHEMA_OK) {
if (is_analyze) {
if (!srv_read_only_mode) {
dict_stats_recalc_pool_del(
ib_table->id, false);
}
opt = DICT_STATS_RECALC_PERSISTENT;
dict_stats_recalc_pool_del(ib_table->id,
false);
recalc:
ret = statistics_init(ib_table, is_analyze);
} else {
/* This is e.g. 'SHOW INDEXES', fetch
the persistent stats from disk. */
opt = DICT_STATS_FETCH_ONLY_IF_NOT_IN_MEMORY;
}
} else {
opt = DICT_STATS_RECALC_TRANSIENT;
/* This is e.g. 'SHOW INDEXES' */
ret = statistics_init(ib_table, is_analyze);
switch (ret) {
case DB_SUCCESS:
break;
default:
goto error;
case DB_STATS_DO_NOT_EXIST:
if (!ib_table
->stats_is_auto_recalc()) {
break;
}
ret = dict_stats_update(ib_table, opt);
if (opt_bootstrap) {
break;
}
#ifdef WITH_WSREP
if (wsrep_thd_skip_locking(
m_user_thd)) {
break;
}
#endif
is_analyze = true;
goto recalc;
}
}
} else {
ret = dict_stats_update_transient(ib_table);
if (ret != DB_SUCCESS) {
error:
m_prebuilt->trx->op_info = "";
DBUG_RETURN(HA_ERR_GENERIC);
}
m_prebuilt->trx->op_info =
"returning various info to MariaDB";
}
stats.update_time = (ulong) ib_table->update_time;
m_prebuilt->trx->op_info = "returning various info to MariaDB";
} else {
stats_fetch:
statistics_init(ib_table, false);
}
dict_stats_init(ib_table);
if (flag & HA_STATUS_VARIABLE) {
ulint stat_clustered_index_size;
ulint stat_sum_of_other_index_sizes;
ut_a(ib_table->stat_initialized);
ut_ad(ib_table->stat_initialized());
#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
if (xbegin()) {
@@ -15061,7 +15192,7 @@ ha_innobase::info_low(
auto _ = make_scope_exit([ib_table]() {
ib_table->stats_shared_unlock(); });
ut_a(ib_table->stat_initialized);
ut_ad(ib_table->stat_initialized());
for (uint i = 0; i < table->s->keys; i++) {
ulong j;
@@ -15678,7 +15809,7 @@ get_foreign_key_info(
<< foreign->foreign_table_name;
}
} else {
dict_table_close(ref_table, true);
ref_table->release();
}
}
@@ -15909,8 +16040,7 @@ ha_innobase::extra(
/* During copy alter operation, InnoDB
updates the stats only for non-persistent
tables. */
if (!dict_stats_is_persistent_enabled(
m_prebuilt->table)) {
if (!m_prebuilt->table->stats_is_persistent()) {
dict_stats_update_if_needed(
m_prebuilt->table, *trx);
}
@@ -17567,7 +17697,7 @@ static int innodb_ft_aux_table_validate(THD *thd, st_mysql_sys_var*,
table_name, false, DICT_ERR_IGNORE_NONE)) {
const table_id_t id = dict_table_has_fts_index(table)
? table->id : 0;
dict_table_close(table);
table->release();
if (id) {
innodb_ft_aux_table_id = id;
if (table_name == buf) {
@@ -18600,7 +18730,7 @@ static void innodb_log_file_size_update(THD *thd, st_mysql_sys_var*,
" innodb_log_buffer_size=%u", MYF(0), log_sys.buf_size);
else
{
switch (log_sys.resize_start(*static_cast<const ulonglong*>(save))) {
switch (log_sys.resize_start(*static_cast<const ulonglong*>(save), thd)) {
case log_t::RESIZE_NO_CHANGE:
break;
case log_t::RESIZE_IN_PROGRESS:
@@ -18612,12 +18742,11 @@ static void innodb_log_file_size_update(THD *thd, st_mysql_sys_var*,
ib_senderrf(thd, IB_LOG_LEVEL_ERROR, ER_CANT_CREATE_HANDLER_FILE);
break;
case log_t::RESIZE_STARTED:
const lsn_t start{log_sys.resize_in_progress()};
for (timespec abstime;;)
{
if (thd_kill_level(thd))
{
log_sys.resize_abort();
log_sys.resize_abort(thd);
break;
}
@@ -18632,13 +18761,15 @@ static void innodb_log_file_size_update(THD *thd, st_mysql_sys_var*,
resizing= log_sys.resize_in_progress();
}
mysql_mutex_unlock(&buf_pool.flush_list_mutex);
if (start > log_sys.get_lsn())
if (!resizing || !log_sys.resize_running(thd))
break;
if (resizing > log_sys.get_lsn())
{
ut_ad(!log_sys.is_mmap());
/* The server is almost idle. Write dummy FILE_CHECKPOINT records
to ensure that the log resizing will complete. */
log_sys.latch.wr_lock(SRW_LOCK_CALL);
while (start > log_sys.get_lsn())
while (resizing > log_sys.get_lsn())
{
mtr_t mtr;
mtr.start();
@@ -18646,8 +18777,6 @@ static void innodb_log_file_size_update(THD *thd, st_mysql_sys_var*,
}
log_sys.latch.wr_unlock();
}
if (!resizing || resizing > start /* only wait for our resize */)
break;
}
}
}
@@ -19169,12 +19298,12 @@ static MYSQL_SYSVAR_BOOL(stats_on_metadata, innobase_stats_on_metadata,
" SHOW TABLE STATUS for tables that use transient statistics (off by default)",
NULL, NULL, FALSE);
static MYSQL_SYSVAR_ULONGLONG(stats_transient_sample_pages,
static MYSQL_SYSVAR_UINT(stats_transient_sample_pages,
srv_stats_transient_sample_pages,
PLUGIN_VAR_RQCMDARG,
"The number of leaf index pages to sample when calculating transient"
" statistics (if persistent statistics are not used, default 8)",
NULL, NULL, 8, 1, ~0ULL, 0);
NULL, NULL, 8, 1, ~0U, 0);
static MYSQL_SYSVAR_BOOL(stats_persistent, srv_stats_persistent,
PLUGIN_VAR_OPCMDARG,
@@ -19190,12 +19319,12 @@ static MYSQL_SYSVAR_BOOL(stats_auto_recalc, srv_stats_auto_recalc,
" new statistics)",
NULL, NULL, TRUE);
static MYSQL_SYSVAR_ULONGLONG(stats_persistent_sample_pages,
static MYSQL_SYSVAR_UINT(stats_persistent_sample_pages,
srv_stats_persistent_sample_pages,
PLUGIN_VAR_RQCMDARG,
"The number of leaf index pages to sample when calculating persistent"
" statistics (by ANALYZE, default 20)",
NULL, NULL, 20, 1, ~0ULL, 0);
NULL, NULL, 20, 1, ~0U, 0);
static MYSQL_SYSVAR_ULONGLONG(stats_modified_counter, srv_stats_modified_counter,
PLUGIN_VAR_RQCMDARG,

View File

@@ -101,6 +101,9 @@ public:
int open(const char *name, int mode, uint test_if_locked) override;
/** Fetch or recalculate InnoDB table statistics */
dberr_t statistics_init(dict_table_t *table, bool recalc);
handler* clone(const char *name, MEM_ROOT *mem_root) override;
int close(void) override;

View File

@@ -7542,10 +7542,11 @@ error_handled:
}
}
/* n_ref_count must be 1, because background threads cannot
/* n_ref_count must be 1 (+ InnoDB_share),
because background threads cannot
be executing on this very table as we are
holding MDL_EXCLUSIVE. */
ut_ad(ctx->online || user_table->get_ref_count() == 1);
ut_ad(ctx->online || ((user_table->get_ref_count() - 1) <= 1));
if (new_clustered) {
online_retry_drop_indexes_low(user_table, ctx->trx);
@@ -11190,7 +11191,10 @@ alter_stats_norebuild(
DBUG_ENTER("alter_stats_norebuild");
DBUG_ASSERT(!ctx->need_rebuild());
if (!dict_stats_is_persistent_enabled(ctx->new_table)) {
auto stat = ctx->new_table->stat;
if (!dict_table_t::stat_initialized(stat)
|| !dict_table_t::stats_is_persistent(stat)) {
DBUG_VOID_RETURN;
}
@@ -11199,7 +11203,6 @@ alter_stats_norebuild(
DBUG_ASSERT(index->table == ctx->new_table);
if (!(index->type & DICT_FTS)) {
dict_stats_init(ctx->new_table);
dict_stats_update_for_index(index);
}
}
@@ -11224,12 +11227,15 @@ alter_stats_rebuild(
{
DBUG_ENTER("alter_stats_rebuild");
if (!table->space
|| !dict_stats_is_persistent_enabled(table)) {
if (!table->space || !table->stats_is_persistent()
|| dict_stats_persistent_storage_check(false) != SCHEMA_OK) {
DBUG_VOID_RETURN;
}
dberr_t ret = dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT);
dberr_t ret = dict_stats_update_persistent(table);
if (ret == DB_SUCCESS) {
ret = dict_stats_save(table);
}
if (ret != DB_SUCCESS) {
push_warning_printf(
@@ -11342,6 +11348,13 @@ ha_innobase::commit_inplace_alter_table(
/* A rollback is being requested. So far we may at
most have created stubs for ADD INDEX or a copy of the
table for rebuild. */
#if 0 /* FIXME: is there a better way for innodb.innodb-index-online? */
lock_shared_ha_data();
auto share = static_cast<InnoDB_share*>(get_ha_share_ptr());
set_ha_share_ptr(nullptr);
unlock_shared_ha_data();
delete share;
#endif
DBUG_RETURN(rollback_inplace_alter_table(
ha_alter_info, table, m_prebuilt));
}
@@ -11611,12 +11624,10 @@ err_index:
}
if (error != DB_SUCCESS) {
if (table_stats) {
dict_table_close(table_stats, false, m_user_thd,
mdl_table);
dict_table_close(table_stats, m_user_thd, mdl_table);
}
if (index_stats) {
dict_table_close(index_stats, false, m_user_thd,
mdl_index);
dict_table_close(index_stats, m_user_thd, mdl_index);
}
my_error_innodb(error, table_share->table_name.str, 0);
if (fts_exist) {
@@ -11652,11 +11663,11 @@ fail:
trx->rollback();
ut_ad(!trx->fts_trx);
if (table_stats) {
dict_table_close(table_stats, true, m_user_thd,
dict_table_close(table_stats, m_user_thd,
mdl_table);
}
if (index_stats) {
dict_table_close(index_stats, true, m_user_thd,
dict_table_close(index_stats, m_user_thd,
mdl_index);
}
row_mysql_unlock_data_dictionary(trx);
@@ -11710,10 +11721,10 @@ fail:
}
if (table_stats) {
dict_table_close(table_stats, true, m_user_thd, mdl_table);
dict_table_close(table_stats, m_user_thd, mdl_table);
}
if (index_stats) {
dict_table_close(index_stats, true, m_user_thd, mdl_index);
dict_table_close(index_stats, m_user_thd, mdl_index);
}
/* Commit or roll back the changes to the data dictionary. */

Some files were not shown because too many files have changed in this diff Show More