mirror of
https://github.com/MariaDB/server.git
synced 2025-11-08 00:28:29 +03:00
Merge branch '10.11' into 11.4
This commit is contained in:
@@ -163,6 +163,7 @@ SET(INSTALL_UNIX_ADDRDIR_RPM "${INSTALL_MYSQLDATADIR_RPM}/mysql.sock"
|
||||
SET(INSTALL_SYSTEMD_UNITDIR_RPM "/usr/lib/systemd/system")
|
||||
SET(INSTALL_SYSTEMD_SYSUSERSDIR_RPM "/usr/lib/sysusers.d")
|
||||
SET(INSTALL_SYSTEMD_TMPFILESDIR_RPM "/usr/lib/tmpfiles.d")
|
||||
SET(INSTALL_RUNDATADIR_RPM "/run/mariadb")
|
||||
SET(INSTALL_PAMDIR_RPM "/${INSTALL_LIBDIR_RPM}/security")
|
||||
SET(INSTALL_PAMDATADIR_RPM "/etc/security")
|
||||
|
||||
@@ -193,7 +194,8 @@ SET(INSTALL_SUPPORTFILESDIR_DEB "share/mariadb")
|
||||
#
|
||||
SET(INSTALL_MYSQLDATADIR_DEB "/var/lib/mysql")
|
||||
|
||||
SET(INSTALL_UNIX_ADDRDIR_DEB "/run/mysqld/mysqld.sock")
|
||||
SET(INSTALL_RUNDATADIR_DEB "/run/mysqld")
|
||||
SET(INSTALL_UNIX_ADDRDIR_DEB "${INSTALL_RUNDATADIR_DEB}/mysqld.sock")
|
||||
SET(INSTALL_SYSTEMD_UNITDIR_DEB "/lib/systemd/system")
|
||||
SET(INSTALL_SYSTEMD_SYSUSERSDIR_DEB "/usr/lib/sysusers.d")
|
||||
SET(INSTALL_SYSTEMD_TMPFILESDIR_DEB "/usr/lib/tmpfiles.d")
|
||||
@@ -257,3 +259,7 @@ IF(NOT MYSQL_UNIX_ADDR)
|
||||
SET(MYSQL_UNIX_ADDR ${INSTALL_UNIX_ADDRDIR})
|
||||
ENDIF()
|
||||
|
||||
IF(NOT INSTALL_RUNDATADIR)
|
||||
get_filename_component(MYSQL_UNIX_DIR ${MYSQL_UNIX_ADDR} DIRECTORY)
|
||||
SET(INSTALL_RUNDATADIR "${MYSQL_UNIX_DIR}" CACHE FILEPATH "Rundata installation directory" ${FORCE})
|
||||
ENDIF()
|
||||
|
||||
@@ -50,8 +50,7 @@ MACRO(CHECK_SYSTEMD)
|
||||
SET(SYSTEMD_SCRIPTS ${SYSTEMD_SCRIPTS} galera_new_cluster galera_recovery)
|
||||
ENDIF()
|
||||
IF(DEB)
|
||||
SET(SYSTEMD_EXECSTARTPRE "ExecStartPre=/usr/bin/install -m 755 -o mysql -g root -d /var/run/mysqld")
|
||||
SET(SYSTEMD_EXECSTARTPOST "ExecStartPost=/etc/mysql/debian-start")
|
||||
SET(SYSTEMD_EXECSTARTPOST "ExecStartPost=!/etc/mysql/debian-start")
|
||||
ENDIF()
|
||||
IF(URING_FOUND)
|
||||
SET(SYSTEMD_LIMIT "# For liburing and io_uring_setup()
|
||||
|
||||
18
debian/control
vendored
18
debian/control
vendored
@@ -980,8 +980,7 @@ Description: Connect storage engine JDBC interface for MariaDB server.
|
||||
|
||||
Package: mariadb-plugin-s3
|
||||
Architecture: any
|
||||
Depends: libcurl4,
|
||||
mariadb-server (= ${server:Version}),
|
||||
Depends: mariadb-server (= ${server:Version}),
|
||||
${misc:Depends},
|
||||
${shlibs:Depends}
|
||||
Description: Amazon S3 archival storage engine for MariaDB
|
||||
@@ -1010,8 +1009,7 @@ Description: RocksDB storage engine for MariaDB server
|
||||
|
||||
Package: mariadb-plugin-oqgraph
|
||||
Architecture: any
|
||||
Depends: libjudydebian1,
|
||||
mariadb-server (= ${server:Version}),
|
||||
Depends: mariadb-server (= ${server:Version}),
|
||||
${misc:Depends},
|
||||
${shlibs:Depends}
|
||||
Breaks: mariadb-oqgraph-engine-10.0,
|
||||
@@ -1076,8 +1074,7 @@ Description: Spider storage engine for MariaDB server
|
||||
|
||||
Package: mariadb-plugin-gssapi-server
|
||||
Architecture: any
|
||||
Depends: libgssapi-krb5-2,
|
||||
mariadb-server,
|
||||
Depends: mariadb-server (= ${server:Version}),
|
||||
${misc:Depends},
|
||||
${shlibs:Depends}
|
||||
Breaks: mariadb-gssapi-server-10.1,
|
||||
@@ -1100,8 +1097,7 @@ Description: GSSAPI authentication plugin for MariaDB server
|
||||
|
||||
Package: mariadb-plugin-gssapi-client
|
||||
Architecture: any
|
||||
Depends: libgssapi-krb5-2,
|
||||
mariadb-client (= ${binary:Version}),
|
||||
Depends: mariadb-client (= ${binary:Version}),
|
||||
${misc:Depends},
|
||||
${shlibs:Depends}
|
||||
Breaks: mariadb-gssapi-client-10.1,
|
||||
@@ -1120,8 +1116,7 @@ Description: GSSAPI authentication plugin for MariaDB client
|
||||
|
||||
Package: mariadb-plugin-cracklib-password-check
|
||||
Architecture: any
|
||||
Depends: libcrack2 (>= 2.9.0),
|
||||
mariadb-server,
|
||||
Depends: mariadb-server,
|
||||
${misc:Depends},
|
||||
${shlibs:Depends}
|
||||
Description: CrackLib Password Validation Plugin for MariaDB server
|
||||
@@ -1134,8 +1129,7 @@ Description: CrackLib Password Validation Plugin for MariaDB server
|
||||
|
||||
Package: mariadb-plugin-hashicorp-key-management
|
||||
Architecture: any
|
||||
Depends: libcurl4,
|
||||
mariadb-server,
|
||||
Depends: mariadb-server,
|
||||
${misc:Depends},
|
||||
${shlibs:Depends}
|
||||
Description: Hashicorp Key Management plugin for MariaDB
|
||||
|
||||
1
debian/mariadb-server.install
vendored
1
debian/mariadb-server.install
vendored
@@ -15,6 +15,7 @@ lib/systemd/system/mariadb@.socket
|
||||
lib/systemd/system/mysql.service
|
||||
lib/systemd/system/mysqld.service
|
||||
support-files/rpm/enable_encryption.preset etc/mysql/mariadb.conf.d/99-enable-encryption.cnf.preset
|
||||
usr/lib/tmpfiles.d/mariadb.conf
|
||||
usr/bin/aria_chk
|
||||
usr/bin/aria_dump_log
|
||||
usr/bin/aria_ftdump
|
||||
|
||||
1
debian/not-installed
vendored
1
debian/not-installed
vendored
@@ -22,7 +22,6 @@ usr/lib/*/pkgconfig/mariadb.pc
|
||||
usr/bin/uca-dump
|
||||
usr/bin/wsrep_sst_backup
|
||||
usr/lib/sysusers.d/mariadb.conf # Not used (yet) in Debian systemd
|
||||
usr/lib/tmpfiles.d/mariadb.conf # Not used (yet) in Debian systemd
|
||||
usr/sbin/rcmysql
|
||||
usr/share/doc/mariadb-server/COPYING (related file: "debian/tmp/usr/share/mariadb/mroonga/COPYING")
|
||||
usr/share/doc/mariadb-server/CREDITS
|
||||
|
||||
@@ -1,23 +1,4 @@
|
||||
#
|
||||
# MDEV-23836: Assertion `! is_set() || m_can_overwrite_status' in
|
||||
# Diagnostics_area::set_error_status (interrupted ALTER TABLE under LOCK)
|
||||
#
|
||||
SET @max_session_mem_used_save= @@max_session_mem_used;
|
||||
CREATE TABLE t1 (a INT);
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
ALTER TABLE x MODIFY xx INT;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
SET SESSION max_session_mem_used= 8192;
|
||||
LOCK TABLE t1 WRITE;
|
||||
ALTER TABLE t1 CHANGE COLUMN IF EXISTS b c INT;
|
||||
SET SESSION max_session_mem_used = @max_session_mem_used_save;
|
||||
UNLOCK TABLES;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# End of 10.5 tests
|
||||
#
|
||||
#
|
||||
# MDEV-28943 Online alter fails under LOCK TABLE with ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
|
||||
#
|
||||
create table t1 (f int) engine=innodb;
|
||||
@@ -58,9 +39,6 @@ alter online table t1 add column s blob not null, algorithm=inplace;
|
||||
ERROR 0A000: LOCK=NONE is not supported. Reason: Fulltext index creation requires a lock. Try LOCK=SHARED
|
||||
drop table t1;
|
||||
#
|
||||
# End of 10.11 tests
|
||||
#
|
||||
#
|
||||
# MDEV-35611 Assertion failure in Diagnostics_area::sql_errno upon interrupted ALTER
|
||||
#
|
||||
CREATE TABLE t (a INT) ENGINE=MyISAM;
|
||||
@@ -76,6 +54,4 @@ disconnect con1;
|
||||
connection default;
|
||||
UNLOCK TABLES;
|
||||
DROP TABLE t;
|
||||
#
|
||||
# End of 11.4 tests
|
||||
#
|
||||
|
||||
@@ -1,36 +1,6 @@
|
||||
--source include/not_embedded.inc
|
||||
--source include/have_innodb.inc
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-23836: Assertion `! is_set() || m_can_overwrite_status' in
|
||||
--echo # Diagnostics_area::set_error_status (interrupted ALTER TABLE under LOCK)
|
||||
--echo #
|
||||
|
||||
SET @max_session_mem_used_save= @@max_session_mem_used;
|
||||
|
||||
CREATE TABLE t1 (a INT);
|
||||
SELECT * FROM t1;
|
||||
|
||||
--error ER_NO_SUCH_TABLE
|
||||
ALTER TABLE x MODIFY xx INT;
|
||||
|
||||
SET SESSION max_session_mem_used= 8192;
|
||||
--error 0,ER_OPTION_PREVENTS_STATEMENT
|
||||
LOCK TABLE t1 WRITE;
|
||||
|
||||
--disable_warnings
|
||||
--error 0,ER_OPTION_PREVENTS_STATEMENT
|
||||
ALTER TABLE t1 CHANGE COLUMN IF EXISTS b c INT;
|
||||
--enable_warnings
|
||||
|
||||
SET SESSION max_session_mem_used = @max_session_mem_used_save;
|
||||
UNLOCK TABLES;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.5 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-28943 Online alter fails under LOCK TABLE with ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
|
||||
--echo #
|
||||
@@ -75,10 +45,6 @@ lock table t1 write;
|
||||
alter online table t1 add column s blob not null, algorithm=inplace;
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.11 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-35611 Assertion failure in Diagnostics_area::sql_errno upon interrupted ALTER
|
||||
--echo #
|
||||
@@ -97,6 +63,4 @@ ALTER TABLE IF EXISTS t FORCE;
|
||||
UNLOCK TABLES;
|
||||
DROP TABLE t;
|
||||
|
||||
--echo #
|
||||
--echo # End of 11.4 tests
|
||||
--echo #
|
||||
|
||||
@@ -1030,6 +1030,71 @@ set statement optimizer_switch='split_materialized=off' for $query;
|
||||
a b name total_amt
|
||||
1 NULL A 10
|
||||
DROP TABLE t1,t2;
|
||||
#
|
||||
# MDEV-37407 Wrong result with ORDER BY LIMIT
|
||||
# Both, with and without split_materialized should
|
||||
# produce the same results
|
||||
#
|
||||
SET @save_optimizer_switch= @@optimizer_switch;
|
||||
CREATE TABLE t1
|
||||
(a varchar(35), b varchar(4), KEY (a))
|
||||
ENGINE=InnoDB;
|
||||
INSERT INTO t1 VALUES
|
||||
('Albania','AXA'), ('Australia','AUS'), ('Myanmar','MMR'),
|
||||
('Bahamas','BS'), ('Brazil','BRA'), ('Barbados','BRB');
|
||||
CREATE TABLE t2
|
||||
(a varchar(4), b varchar(50), PRIMARY KEY (b,a), KEY (a))
|
||||
ENGINE=InnoDB;
|
||||
INSERT INTO t2 VALUES
|
||||
('AUS','Anglican'), ('MMR','Baptist'), ('BS','Anglican'),
|
||||
('BS','Baptist'), ('BS','Methodist'), ('BRB','Methodist'),
|
||||
('BRA','Baptist'), ('USA','Baptist');
|
||||
ANALYZE TABLE t1 PERSISTENT FOR ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
ANALYZE TABLE t2 PERSISTENT FOR ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t2 analyze status Engine-independent statistics collected
|
||||
test.t2 analyze status OK
|
||||
set optimizer_switch='split_materialized=off';
|
||||
SELECT t1.a
|
||||
FROM (SELECT a FROM t2 GROUP BY a ORDER BY a, COUNT(DISTINCT b) LIMIT 1) dt
|
||||
JOIN t1 ON
|
||||
dt.a=t1.b
|
||||
WHERE t1.a LIKE 'B%';
|
||||
a
|
||||
set optimizer_switch='split_materialized=on';
|
||||
SELECT t1.a
|
||||
FROM (SELECT a FROM t2 GROUP BY a ORDER BY a, COUNT(DISTINCT b) LIMIT 1) dt
|
||||
JOIN t1 ON
|
||||
dt.a=t1.b
|
||||
WHERE t1.a LIKE 'B%';
|
||||
a
|
||||
DROP TABLE t1,t2;
|
||||
SET optimizer_switch= @save_optimizer_switch;
|
||||
#
|
||||
# MDEV-29638 Crash when considering Split-Materialized plan
|
||||
#
|
||||
set @save_optimizer_switch= @@optimizer_switch;
|
||||
set optimizer_switch='condition_pushdown_for_derived=off,split_materialized=on';
|
||||
CREATE TABLE t1 (id int PRIMARY KEY)engine=innodb;
|
||||
CREATE TABLE t2 (id int PRIMARY KEY, c int) engine=innodb;
|
||||
CREATE TABLE t3 (id int PRIMARY KEY, a int , b int, KEY (a))engine=innodb;
|
||||
SELECT * FROM
|
||||
(
|
||||
SELECT DISTINCT t1.id
|
||||
FROM t1 JOIN
|
||||
(
|
||||
SELECT t2.id FROM t2 JOIN t3
|
||||
ON t3.id = t2.c
|
||||
WHERE (t3.a > 2 AND t3.b = 2)
|
||||
GROUP BY t2.id
|
||||
) m2 ON m2.id = t1.id
|
||||
) dt;
|
||||
id
|
||||
drop table t1, t2, t3;
|
||||
SET optimizer_switch= @save_optimizer_switch;
|
||||
# End of 10.11 tests
|
||||
#
|
||||
# MDEV-37057 Wrong result with LATERAL DERIVED
|
||||
|
||||
@@ -632,6 +632,77 @@ evalp set statement optimizer_switch='split_materialized=off' for $query;
|
||||
|
||||
DROP TABLE t1,t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-37407 Wrong result with ORDER BY LIMIT
|
||||
--echo # Both, with and without split_materialized should
|
||||
--echo # produce the same results
|
||||
--echo #
|
||||
|
||||
SET @save_optimizer_switch= @@optimizer_switch;
|
||||
|
||||
CREATE TABLE t1
|
||||
(a varchar(35), b varchar(4), KEY (a))
|
||||
ENGINE=InnoDB;
|
||||
|
||||
INSERT INTO t1 VALUES
|
||||
('Albania','AXA'), ('Australia','AUS'), ('Myanmar','MMR'),
|
||||
('Bahamas','BS'), ('Brazil','BRA'), ('Barbados','BRB');
|
||||
|
||||
CREATE TABLE t2
|
||||
(a varchar(4), b varchar(50), PRIMARY KEY (b,a), KEY (a))
|
||||
ENGINE=InnoDB;
|
||||
|
||||
INSERT INTO t2 VALUES
|
||||
('AUS','Anglican'), ('MMR','Baptist'), ('BS','Anglican'),
|
||||
('BS','Baptist'), ('BS','Methodist'), ('BRB','Methodist'),
|
||||
('BRA','Baptist'), ('USA','Baptist');
|
||||
|
||||
ANALYZE TABLE t1 PERSISTENT FOR ALL;
|
||||
ANALYZE TABLE t2 PERSISTENT FOR ALL;
|
||||
|
||||
let $q=
|
||||
SELECT t1.a
|
||||
FROM (SELECT a FROM t2 GROUP BY a ORDER BY a, COUNT(DISTINCT b) LIMIT 1) dt
|
||||
JOIN t1 ON
|
||||
dt.a=t1.b
|
||||
WHERE t1.a LIKE 'B%';
|
||||
|
||||
set optimizer_switch='split_materialized=off';
|
||||
eval $q;
|
||||
|
||||
set optimizer_switch='split_materialized=on';
|
||||
eval $q;
|
||||
|
||||
DROP TABLE t1,t2;
|
||||
|
||||
SET optimizer_switch= @save_optimizer_switch;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-29638 Crash when considering Split-Materialized plan
|
||||
--echo #
|
||||
|
||||
set @save_optimizer_switch= @@optimizer_switch;
|
||||
set optimizer_switch='condition_pushdown_for_derived=off,split_materialized=on';
|
||||
|
||||
CREATE TABLE t1 (id int PRIMARY KEY)engine=innodb;
|
||||
CREATE TABLE t2 (id int PRIMARY KEY, c int) engine=innodb;
|
||||
CREATE TABLE t3 (id int PRIMARY KEY, a int , b int, KEY (a))engine=innodb;
|
||||
|
||||
SELECT * FROM
|
||||
(
|
||||
SELECT DISTINCT t1.id
|
||||
FROM t1 JOIN
|
||||
(
|
||||
SELECT t2.id FROM t2 JOIN t3
|
||||
ON t3.id = t2.c
|
||||
WHERE (t3.a > 2 AND t3.b = 2)
|
||||
GROUP BY t2.id
|
||||
) m2 ON m2.id = t1.id
|
||||
) dt;
|
||||
|
||||
drop table t1, t2, t3;
|
||||
SET optimizer_switch= @save_optimizer_switch;
|
||||
|
||||
--echo # End of 10.11 tests
|
||||
|
||||
--echo #
|
||||
|
||||
@@ -212,7 +212,7 @@ id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index NULL PRIMARY 4 NULL 1 Using index
|
||||
explain SELECT distinct a from t3 order by a desc limit 2;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t3 index NULL a 5 NULL 40 Using index
|
||||
1 SIMPLE t3 range a a 5 NULL 10 Using index for group-by; Using temporary; Using filesort
|
||||
explain SELECT distinct a,b from t3 order by a+1;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t3 ALL NULL NULL NULL NULL 204 Using temporary; Using filesort
|
||||
|
||||
@@ -125,13 +125,4 @@ SELECT f1(1);
|
||||
Got one of the listed errors
|
||||
DROP FUNCTION f1;
|
||||
SET debug_dbug= @saved_dbug;
|
||||
#
|
||||
# MDEV-27978 wrong option name in error when exceeding max_session_mem_used
|
||||
#
|
||||
SET SESSION max_session_mem_used = 8192;
|
||||
SELECT * FROM information_schema.processlist;
|
||||
ERROR HY000: The MariaDB server is running with the --max-session-mem-used=8192 option so it cannot execute this statement
|
||||
SET SESSION max_session_mem_used = DEFAULT;
|
||||
#
|
||||
# End of 10.2 tests
|
||||
#
|
||||
|
||||
@@ -144,8 +144,6 @@ SELECT a FROM t1 ORDER BY rand(1);
|
||||
--echo #cleanup
|
||||
DROP TABLE t1, pid_table;
|
||||
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-12416 OOM in create_virtual_tmp_table() makes the server crash
|
||||
--echo #
|
||||
@@ -158,15 +156,4 @@ SELECT f1(1);
|
||||
DROP FUNCTION f1;
|
||||
SET debug_dbug= @saved_dbug;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-27978 wrong option name in error when exceeding max_session_mem_used
|
||||
--echo #
|
||||
SET SESSION max_session_mem_used = 8192;
|
||||
--error ER_OPTION_PREVENTS_STATEMENT
|
||||
SELECT * FROM information_schema.processlist;
|
||||
SET SESSION max_session_mem_used = DEFAULT;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.2 tests
|
||||
--echo #
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
drop table if exists t1;
|
||||
insert into t1 values(1);
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist
|
||||
delete from t1;
|
||||
@@ -170,12 +169,6 @@ UPDATE t1 SET a = 'new'
|
||||
WHERE COLUMN_CREATE( 1, 'v', 1, 'w' ) IS NULL;
|
||||
ERROR 22007: Illegal value used as argument of dynamic column function
|
||||
drop table t1;
|
||||
set @max_session_mem_used_save= @@max_session_mem_used;
|
||||
set max_session_mem_used = 50000;
|
||||
select * from seq_1_to_1000;
|
||||
set max_session_mem_used = 8192;
|
||||
select * from seq_1_to_1000;
|
||||
set max_session_mem_used = @max_session_mem_used_save;
|
||||
#
|
||||
# MDEV-20604: Duplicate key value is silently truncated to 64
|
||||
# characters in print_keydup_error
|
||||
@@ -231,16 +224,3 @@ Error 1327 Undeclared variable: foo
|
||||
Error 1305 PROCEDURE P1 does not exist
|
||||
drop procedure P1;
|
||||
# End of 10.4 tests
|
||||
#
|
||||
# MDEV-35828: Assertion fails in alloc_root() when memory causes it to call itself
|
||||
#
|
||||
CREATE TEMPORARY TABLE t1 (a INT,b INT);
|
||||
INSERT INTO t1 VALUES (1,1),(2,2);
|
||||
SET
|
||||
@tmp=@@max_session_mem_used,
|
||||
max_session_mem_used=8192;
|
||||
SELECT * FROM (t1 AS t2 LEFT JOIN t1 AS t3 USING (a)),t1;
|
||||
ERROR HY000: The MariaDB server is running with the --max-session-mem-used=8192 option so it cannot execute this statement
|
||||
DROP TABLE t1;
|
||||
SET max_session_mem_used=@tmp;
|
||||
# End of 10.6 tests
|
||||
|
||||
@@ -3,31 +3,28 @@
|
||||
#
|
||||
--source include/have_sequence.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
--enable_warnings
|
||||
--error 1146
|
||||
--error ER_NO_SUCH_TABLE
|
||||
insert into t1 values(1);
|
||||
--error 1146
|
||||
--error ER_NO_SUCH_TABLE
|
||||
delete from t1;
|
||||
--error 1146
|
||||
--error ER_NO_SUCH_TABLE
|
||||
update t1 set a=1;
|
||||
create table t1 (a int);
|
||||
--error 1054
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select count(test.t1.b) from t1;
|
||||
--error 1054
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select count(not_existing_database.t1) from t1;
|
||||
--error 1054
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select count(not_existing_database.t1.a) from t1;
|
||||
--error 1044,1146
|
||||
--error ER_DBACCESS_DENIED_ERROR,ER_NO_SUCH_TABLE
|
||||
select count(not_existing_database.t1.a) from not_existing_database.t1;
|
||||
--error 1054
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select 1 from t1 order by 2;
|
||||
--error 1054
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select 1 from t1 group by 2;
|
||||
--error 1054
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select 1 from t1 order by t1.b;
|
||||
--error 1054
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select count(*),b from t1;
|
||||
drop table t1;
|
||||
|
||||
@@ -36,10 +33,10 @@ drop table t1;
|
||||
#
|
||||
# Bug #6080: Error message for a field with a display width that is too long
|
||||
#
|
||||
--error 1439
|
||||
--error ER_TOO_BIG_DISPLAYWIDTH
|
||||
create table t1 (a int(256));
|
||||
set sql_mode='traditional';
|
||||
--error 1074
|
||||
--error ER_TOO_BIG_FIELDLENGTH
|
||||
create table t1 (a varchar(66000));
|
||||
set sql_mode=default;
|
||||
|
||||
@@ -95,7 +92,7 @@ delimiter ;|
|
||||
|
||||
flush status;
|
||||
--disable_ps2_protocol
|
||||
--error 1062
|
||||
--error ER_DUP_ENTRY
|
||||
select f1(), f2();
|
||||
--enable_ps2_protocol
|
||||
show status like 'Com_insert';
|
||||
@@ -202,24 +199,6 @@ UPDATE t1 SET a = 'new'
|
||||
WHERE COLUMN_CREATE( 1, 'v', 1, 'w' ) IS NULL;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# errors caused by max_session_mem_used
|
||||
#
|
||||
set @max_session_mem_used_save= @@max_session_mem_used;
|
||||
|
||||
--disable_result_log
|
||||
set max_session_mem_used = 50000;
|
||||
--error 0,ER_OPTION_PREVENTS_STATEMENT
|
||||
select * from seq_1_to_1000;
|
||||
set max_session_mem_used = 8192;
|
||||
--error 0,ER_OPTION_PREVENTS_STATEMENT
|
||||
select * from seq_1_to_1000;
|
||||
--enable_result_log
|
||||
# We may not be able to execute any more queries with this connection
|
||||
# because of too little memory#
|
||||
|
||||
set max_session_mem_used = @max_session_mem_used_save;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-20604: Duplicate key value is silently truncated to 64
|
||||
--echo # characters in print_keydup_error
|
||||
@@ -283,24 +262,4 @@ show warnings;
|
||||
|
||||
drop procedure P1;
|
||||
|
||||
-- echo # End of 10.4 tests
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-35828: Assertion fails in alloc_root() when memory causes it to call itself
|
||||
--echo #
|
||||
CREATE TEMPORARY TABLE t1 (a INT,b INT);
|
||||
INSERT INTO t1 VALUES (1,1),(2,2);
|
||||
|
||||
SET
|
||||
@tmp=@@max_session_mem_used,
|
||||
max_session_mem_used=8192;
|
||||
|
||||
--error ER_OPTION_PREVENTS_STATEMENT
|
||||
SELECT * FROM (t1 AS t2 LEFT JOIN t1 AS t3 USING (a)),t1;
|
||||
|
||||
DROP TABLE t1;
|
||||
SET max_session_mem_used=@tmp;
|
||||
|
||||
|
||||
--echo # End of 10.6 tests
|
||||
--echo # End of 10.4 tests
|
||||
|
||||
@@ -845,7 +845,7 @@ group by first_name, last_name
|
||||
order by first_name
|
||||
fetch first 2 rows with ties;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range t1_name t1_name 206 NULL 3 Using where; Using index for group-by
|
||||
1 SIMPLE t1 range t1_name t1_name 206 NULL 3 Using where; Using index for group-by; Using temporary; Using filesort
|
||||
select first_name, last_name
|
||||
from t1
|
||||
where first_name != 'John'
|
||||
@@ -870,7 +870,7 @@ select * from temp_table
|
||||
order by first_name, last_name;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2 Using filesort
|
||||
2 DERIVED t1 range t1_name t1_name 206 NULL 3 Using where; Using index for group-by
|
||||
2 DERIVED t1 range t1_name t1_name 206 NULL 3 Using where; Using index for group-by; Using temporary; Using filesort
|
||||
with temp_table as (
|
||||
select first_name, last_name
|
||||
from t1
|
||||
@@ -1461,3 +1461,88 @@ a b
|
||||
3 bar
|
||||
3 zzz
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-37901: Wrong result with Loose Scan on QUICK_GROUP_MIN_MAX_SELECT WITH TIES
|
||||
#
|
||||
create table t1 (
|
||||
country varchar(100),
|
||||
city varchar(100),
|
||||
user_score int,
|
||||
index (country, city, user_score)
|
||||
);
|
||||
insert into t1
|
||||
select 'China', 'Shenzhen', seq from seq_10_to_100;
|
||||
insert into t1
|
||||
select 'India', 'New Delhi', seq from seq_10_to_100;
|
||||
insert into t1
|
||||
select 'Sri Lanka', 'Colombo', seq from seq_10_to_100;
|
||||
analyze table t1 persistent for all;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
insert into t1
|
||||
select 'Finland', 'Espoo', seq from seq_10_to_200;
|
||||
insert into t1
|
||||
select 'Greece', 'Chania', seq from seq_10_to_20;
|
||||
insert into t1
|
||||
select 'Estonia', 'Narva', seq from seq_10_to_20;
|
||||
insert into t1
|
||||
select 'Russia', 'St Petersburg', seq from seq_10_to_20;
|
||||
# Must use "Using index for group-by":
|
||||
explain
|
||||
select country, city, min(user_score)
|
||||
from t1
|
||||
where user_score between 9 and 199
|
||||
group by country, city
|
||||
order by country
|
||||
fetch first 5 rows with ties;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range NULL country 211 NULL 4 Using where; Using index for group-by; Using temporary; Using filesort
|
||||
# Must not use "Using index for group-by":
|
||||
explain
|
||||
select country, city, sum(user_score)
|
||||
from t1
|
||||
where user_score between 9 and 199
|
||||
group by country, concat(city,'AA')
|
||||
order by country
|
||||
fetch first 5 rows with ties;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index NULL country 211 NULL 273 Using where; Using index; Using temporary; Using filesort
|
||||
select country, city, sum(user_score)
|
||||
from t1
|
||||
where user_score between 9 and 199
|
||||
group by country, concat(city,'AA')
|
||||
order by country
|
||||
fetch first 5 rows with ties;
|
||||
country city sum(user_score)
|
||||
China Shenzhen 5005
|
||||
Estonia Narva 165
|
||||
Finland Espoo 19855
|
||||
Greece Chania 165
|
||||
India New Delhi 5005
|
||||
# both using index and index with group by should produce same result
|
||||
select country, city, min(user_score)
|
||||
from t1
|
||||
where user_score between 9 and 199
|
||||
group by country, city
|
||||
order by country
|
||||
fetch first 5 rows with ties;
|
||||
country city min(user_score)
|
||||
China Shenzhen 10
|
||||
Estonia Narva 10
|
||||
Finland Espoo 10
|
||||
Greece Chania 10
|
||||
India New Delhi 10
|
||||
select country, city, min(user_score)
|
||||
from t1 use index()
|
||||
where user_score between 9 and 199
|
||||
group by country, city
|
||||
order by country
|
||||
fetch first 5 rows with ties;
|
||||
country city min(user_score)
|
||||
China Shenzhen 10
|
||||
Estonia Narva 10
|
||||
Finland Espoo 10
|
||||
Greece Chania 10
|
||||
India New Delhi 10
|
||||
drop table t1;
|
||||
|
||||
@@ -1095,3 +1095,77 @@ SELECT DISTINCT a, b FROM t1 ORDER BY a FETCH FIRST 3 ROWS WITH TIES;
|
||||
|
||||
# Cleanup
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-37901: Wrong result with Loose Scan on QUICK_GROUP_MIN_MAX_SELECT WITH TIES
|
||||
--echo #
|
||||
|
||||
--source include/have_sequence.inc
|
||||
|
||||
create table t1 (
|
||||
country varchar(100),
|
||||
city varchar(100),
|
||||
user_score int,
|
||||
index (country, city, user_score)
|
||||
);
|
||||
|
||||
insert into t1
|
||||
select 'China', 'Shenzhen', seq from seq_10_to_100;
|
||||
insert into t1
|
||||
select 'India', 'New Delhi', seq from seq_10_to_100;
|
||||
insert into t1
|
||||
select 'Sri Lanka', 'Colombo', seq from seq_10_to_100;
|
||||
analyze table t1 persistent for all;
|
||||
|
||||
insert into t1
|
||||
select 'Finland', 'Espoo', seq from seq_10_to_200;
|
||||
insert into t1
|
||||
select 'Greece', 'Chania', seq from seq_10_to_20;
|
||||
insert into t1
|
||||
select 'Estonia', 'Narva', seq from seq_10_to_20;
|
||||
insert into t1
|
||||
select 'Russia', 'St Petersburg', seq from seq_10_to_20;
|
||||
|
||||
--echo # Must use "Using index for group-by":
|
||||
explain
|
||||
select country, city, min(user_score)
|
||||
from t1
|
||||
where user_score between 9 and 199
|
||||
group by country, city
|
||||
order by country
|
||||
fetch first 5 rows with ties;
|
||||
|
||||
--echo # Must not use "Using index for group-by":
|
||||
explain
|
||||
select country, city, sum(user_score)
|
||||
from t1
|
||||
where user_score between 9 and 199
|
||||
group by country, concat(city,'AA')
|
||||
order by country
|
||||
fetch first 5 rows with ties;
|
||||
|
||||
select country, city, sum(user_score)
|
||||
from t1
|
||||
where user_score between 9 and 199
|
||||
group by country, concat(city,'AA')
|
||||
order by country
|
||||
fetch first 5 rows with ties;
|
||||
|
||||
--echo # both using index and index with group by should produce same result
|
||||
|
||||
select country, city, min(user_score)
|
||||
from t1
|
||||
where user_score between 9 and 199
|
||||
group by country, city
|
||||
order by country
|
||||
fetch first 5 rows with ties;
|
||||
|
||||
select country, city, min(user_score)
|
||||
from t1 use index()
|
||||
where user_score between 9 and 199
|
||||
group by country, city
|
||||
order by country
|
||||
fetch first 5 rows with ties;
|
||||
|
||||
drop table t1;
|
||||
|
||||
@@ -2692,6 +2692,77 @@ json_detailed('[[123],456]')
|
||||
SELECT JSON_VALUE(JSON_OBJECT("a", ""), '$.a') = "" AS not_null;
|
||||
not_null
|
||||
1
|
||||
#
|
||||
# MDEV-36319: Wrong result json_table
|
||||
#
|
||||
SET @JSON='
|
||||
{
|
||||
"SZ": [
|
||||
{
|
||||
"NAME": "S0",
|
||||
"OFFERS": [
|
||||
{
|
||||
"NAME": "S0A0"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"NAME": "S1",
|
||||
"OFFERS": [
|
||||
{
|
||||
"NAME": "S1A0"
|
||||
},
|
||||
{
|
||||
"NAME": "S1A1"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"NAME": "S2",
|
||||
"OFFERS": [
|
||||
{
|
||||
"NAME": "S2A0"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"NAME": "S3",
|
||||
"OFFERS": [
|
||||
{
|
||||
"NAME": "S3A0"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"NAME": "S4",
|
||||
"OFFERS": [
|
||||
{
|
||||
"NAME": "S4A0"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"NAME": "S5",
|
||||
"OFFERS": [
|
||||
{
|
||||
"NAME": "S5A0"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
'
|
||||
;
|
||||
# Should return EMPTY result
|
||||
SELECT * FROM json_table(@JSON, '$.SZ[0].OFFERS[1]'
|
||||
COLUMNS(NAME VARCHAR(30) PATH '$.NAME')) AS t_sz;
|
||||
NAME
|
||||
# Should return S1A1
|
||||
SELECT * FROM json_table(@JSON, '$.SZ[1].OFFERS[1]'
|
||||
COLUMNS(NAME VARCHAR(30) PATH '$.NAME')) AS t_sz;
|
||||
NAME
|
||||
S1A1
|
||||
# End of 10.11 Test
|
||||
#
|
||||
# MDEV-32007: JSON_VALUE and JSON_EXTRACT doesn't handle dash (-)
|
||||
|
||||
@@ -1955,6 +1955,78 @@ select json_detailed('[[123],456]');
|
||||
|
||||
SELECT JSON_VALUE(JSON_OBJECT("a", ""), '$.a') = "" AS not_null;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-36319: Wrong result json_table
|
||||
--echo #
|
||||
|
||||
SET @JSON='
|
||||
{
|
||||
"SZ": [
|
||||
{
|
||||
"NAME": "S0",
|
||||
"OFFERS": [
|
||||
{
|
||||
"NAME": "S0A0"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"NAME": "S1",
|
||||
"OFFERS": [
|
||||
{
|
||||
"NAME": "S1A0"
|
||||
},
|
||||
{
|
||||
"NAME": "S1A1"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"NAME": "S2",
|
||||
"OFFERS": [
|
||||
{
|
||||
"NAME": "S2A0"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"NAME": "S3",
|
||||
"OFFERS": [
|
||||
{
|
||||
"NAME": "S3A0"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"NAME": "S4",
|
||||
"OFFERS": [
|
||||
{
|
||||
"NAME": "S4A0"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"NAME": "S5",
|
||||
"OFFERS": [
|
||||
{
|
||||
"NAME": "S5A0"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
'
|
||||
;
|
||||
|
||||
--echo # Should return EMPTY result
|
||||
SELECT * FROM json_table(@JSON, '$.SZ[0].OFFERS[1]'
|
||||
COLUMNS(NAME VARCHAR(30) PATH '$.NAME')) AS t_sz;
|
||||
|
||||
--echo # Should return S1A1
|
||||
SELECT * FROM json_table(@JSON, '$.SZ[1].OFFERS[1]'
|
||||
COLUMNS(NAME VARCHAR(30) PATH '$.NAME')) AS t_sz;
|
||||
|
||||
--echo # End of 10.11 Test
|
||||
|
||||
--echo #
|
||||
|
||||
@@ -5414,8 +5414,14 @@ t1 CREATE TABLE `t1` (
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# End of 10.6 tests
|
||||
# MDEV-37947 Item_func_hex doesn't check for max_allowed_packet
|
||||
#
|
||||
select hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex('\\'))))))))))))))))))))))))))))))))))))))))))))) as x;
|
||||
x
|
||||
NULL
|
||||
Warnings:
|
||||
Warning 1301 Result of hex() was larger than max_allowed_packet (16777216) - truncated
|
||||
# End of 10.6 tests
|
||||
#
|
||||
# MDEV-25704 Function random_bytes
|
||||
#
|
||||
|
||||
@@ -2448,8 +2448,11 @@ SHOW CREATE TABLE t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.6 tests
|
||||
--echo # MDEV-37947 Item_func_hex doesn't check for max_allowed_packet
|
||||
--echo #
|
||||
select hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex(hex('\\'))))))))))))))))))))))))))))))))))))))))))))) as x;
|
||||
|
||||
--echo # End of 10.6 tests
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-25704 Function random_bytes
|
||||
|
||||
@@ -2465,7 +2465,7 @@ EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE EXISTS
|
||||
(SELECT max(b) FROM t1 GROUP BY a HAVING a < 2);
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY t1_outer index NULL a 10 NULL 15 Using index
|
||||
2 SUBQUERY t1 index NULL a 10 NULL 15 Using index
|
||||
2 SUBQUERY t1 range a a 5 NULL 6 Using index for group-by
|
||||
EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE
|
||||
(SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) > 12;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
|
||||
48
mysql-test/main/max_session_mem_used.result
Normal file
48
mysql-test/main/max_session_mem_used.result
Normal file
@@ -0,0 +1,48 @@
|
||||
#
|
||||
# MDEV-23836: Assertion `! is_set() || m_can_overwrite_status' in
|
||||
# Diagnostics_area::set_error_status (interrupted ALTER TABLE under LOCK)
|
||||
#
|
||||
CREATE TABLE t1 (a INT);
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
ALTER TABLE x MODIFY xx INT;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
SET SESSION max_session_mem_used= 8192;
|
||||
LOCK TABLE t1 WRITE;
|
||||
ALTER TABLE t1 CHANGE COLUMN IF EXISTS b c INT;
|
||||
SET SESSION max_session_mem_used = DEFAULT;
|
||||
UNLOCK TABLES;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-27978 wrong option name in error when exceeding max_session_mem_used
|
||||
#
|
||||
SET SESSION max_session_mem_used = 8192;
|
||||
SELECT * FROM information_schema.processlist;
|
||||
ERROR HY000: The MariaDB server is running with the --max-session-mem-used=8192 option so it cannot execute this statement
|
||||
SET SESSION max_session_mem_used = DEFAULT;
|
||||
set max_session_mem_used = 50000;
|
||||
select * from seq_1_to_1000;
|
||||
set max_session_mem_used = 8192;
|
||||
select * from seq_1_to_1000;
|
||||
set max_session_mem_used = DEFAULT;
|
||||
#
|
||||
# MDEV-35828: Assertion fails in alloc_root() when memory causes it to call itself
|
||||
#
|
||||
CREATE TEMPORARY TABLE t1 (a INT,b INT);
|
||||
INSERT INTO t1 VALUES (1,1),(2,2);
|
||||
SET max_session_mem_used=8192;
|
||||
SELECT * FROM (t1 AS t2 LEFT JOIN t1 AS t3 USING (a)),t1;
|
||||
ERROR HY000: The MariaDB server is running with the --max-session-mem-used=8192 option so it cannot execute this statement
|
||||
SET max_session_mem_used=DEFAULT;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-23824 SIGSEGV in end_io_cache on REPAIR LOCAL TABLE for Aria table
|
||||
#
|
||||
CREATE TABLE t1 (i INT) ENGINE=Aria;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
SET max_session_mem_used=50000;
|
||||
REPAIR LOCAL TABLE t1 USE_FRM;
|
||||
REPAIR LOCAL TABLE t1;
|
||||
DROP TABLE t1;
|
||||
SET max_session_mem_used=default;
|
||||
# End of 10.6 tests
|
||||
85
mysql-test/main/max_session_mem_used.test
Normal file
85
mysql-test/main/max_session_mem_used.test
Normal file
@@ -0,0 +1,85 @@
|
||||
# memory usage is sensitive to valgrind/ps-protocol/embedded
|
||||
source include/not_msan.inc;
|
||||
source include/not_valgrind.inc;
|
||||
source include/no_protocol.inc;
|
||||
source include/not_embedded.inc;
|
||||
source include/have_64bit.inc;
|
||||
source include/have_sequence.inc;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-23836: Assertion `! is_set() || m_can_overwrite_status' in
|
||||
--echo # Diagnostics_area::set_error_status (interrupted ALTER TABLE under LOCK)
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (a INT);
|
||||
SELECT * FROM t1;
|
||||
|
||||
--error ER_NO_SUCH_TABLE
|
||||
ALTER TABLE x MODIFY xx INT;
|
||||
|
||||
SET SESSION max_session_mem_used= 8192;
|
||||
--error 0,ER_OPTION_PREVENTS_STATEMENT
|
||||
LOCK TABLE t1 WRITE;
|
||||
|
||||
--disable_warnings
|
||||
--error 0,ER_OPTION_PREVENTS_STATEMENT
|
||||
ALTER TABLE t1 CHANGE COLUMN IF EXISTS b c INT;
|
||||
--enable_warnings
|
||||
|
||||
SET SESSION max_session_mem_used = DEFAULT;
|
||||
UNLOCK TABLES;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-27978 wrong option name in error when exceeding max_session_mem_used
|
||||
--echo #
|
||||
SET SESSION max_session_mem_used = 8192;
|
||||
--error ER_OPTION_PREVENTS_STATEMENT
|
||||
SELECT * FROM information_schema.processlist;
|
||||
SET SESSION max_session_mem_used = DEFAULT;
|
||||
|
||||
#
|
||||
# errors caused by max_session_mem_used
|
||||
#
|
||||
--disable_result_log
|
||||
set max_session_mem_used = 50000;
|
||||
--error 0,ER_OPTION_PREVENTS_STATEMENT
|
||||
select * from seq_1_to_1000;
|
||||
set max_session_mem_used = 8192;
|
||||
--error 0,ER_OPTION_PREVENTS_STATEMENT
|
||||
select * from seq_1_to_1000;
|
||||
--enable_result_log
|
||||
# We may not be able to execute any more queries with this connection
|
||||
# because of too little memory
|
||||
|
||||
set max_session_mem_used = DEFAULT;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-35828: Assertion fails in alloc_root() when memory causes it to call itself
|
||||
--echo #
|
||||
CREATE TEMPORARY TABLE t1 (a INT,b INT);
|
||||
INSERT INTO t1 VALUES (1,1),(2,2);
|
||||
|
||||
SET max_session_mem_used=8192;
|
||||
|
||||
--error ER_OPTION_PREVENTS_STATEMENT
|
||||
SELECT * FROM (t1 AS t2 LEFT JOIN t1 AS t3 USING (a)),t1;
|
||||
|
||||
SET max_session_mem_used=DEFAULT;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-23824 SIGSEGV in end_io_cache on REPAIR LOCAL TABLE for Aria table
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (i INT) ENGINE=Aria;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
SET max_session_mem_used=50000;
|
||||
--disable_result_log
|
||||
REPAIR LOCAL TABLE t1 USE_FRM;
|
||||
REPAIR LOCAL TABLE t1;
|
||||
--enable_result_log
|
||||
DROP TABLE t1;
|
||||
SET max_session_mem_used=default;
|
||||
|
||||
--echo # End of 10.6 tests
|
||||
1
mysql-test/main/merge_alter-master.opt
Normal file
1
mysql-test/main/merge_alter-master.opt
Normal file
@@ -0,0 +1 @@
|
||||
--timezone=GMT-3
|
||||
77
mysql-test/main/merge_alter.result
Normal file
77
mysql-test/main/merge_alter.result
Normal file
@@ -0,0 +1,77 @@
|
||||
SET timestamp=1000000000;
|
||||
RESET MASTER;
|
||||
CREATE TABLE t (i1 int, i2 int, pk int) ;
|
||||
CREATE TABLE t3 LIKE t ;
|
||||
ALTER TABLE t3 ENGINE = MERGE UNION (t1,t2);
|
||||
insert into t values(1,1,1);
|
||||
flush logs;
|
||||
/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/;
|
||||
/*!40019 SET @@session.max_delayed_threads=0*/;
|
||||
/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
|
||||
DELIMITER /*!*/;
|
||||
# at #
|
||||
#010909 4:46:40 server id # end_log_pos # CRC32 XXX Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
|
||||
ROLLBACK/*!*/;
|
||||
# at #
|
||||
#010909 4:46:40 server id # end_log_pos # CRC32 XXX Gtid list []
|
||||
# at #
|
||||
#010909 4:46:40 server id # end_log_pos # CRC32 XXX Binlog checkpoint master-bin.000001
|
||||
# at #
|
||||
#010909 4:46:40 server id # end_log_pos # CRC32 XXX GTID 0-1-1 ddl
|
||||
/*M!100101 SET @@session.skip_parallel_replication=0*//*!*/;
|
||||
/*M!100001 SET @@session.gtid_domain_id=0*//*!*/;
|
||||
/*M!100001 SET @@session.server_id=1*//*!*/;
|
||||
/*M!100001 SET @@session.gtid_seq_no=1*//*!*/;
|
||||
# at #
|
||||
#010909 4:46:40 server id # end_log_pos # CRC32 XXX Query thread_id=# exec_time=# error_code=0 xid=<xid>
|
||||
use `test`/*!*/;
|
||||
SET TIMESTAMP=1000000000/*!*/;
|
||||
SET @@session.pseudo_thread_id=#/*!*/;
|
||||
SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=0, @@session.unique_checks=1, @@session.autocommit=1, @@session.check_constraint_checks=1, @@session.sql_if_exists=0, @@session.explicit_defaults_for_timestamp=1, @@session.system_versioning_insert_history=0/*!*/;
|
||||
SET @@session.sql_mode=1411383296/*!*/;
|
||||
SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/;
|
||||
/*!\C latin1 *//*!*/;
|
||||
SET @@session.character_set_client=X,@@session.collation_connection=X,@@session.collation_server=X/*!*/;
|
||||
SET @@session.lc_time_names=0/*!*/;
|
||||
SET @@session.collation_database=DEFAULT/*!*/;
|
||||
CREATE TABLE t (i1 int, i2 int, pk int)
|
||||
/*!*/;
|
||||
# at #
|
||||
#010909 4:46:40 server id # end_log_pos # CRC32 XXX GTID 0-1-2 ddl
|
||||
/*M!100001 SET @@session.gtid_seq_no=2*//*!*/;
|
||||
# at #
|
||||
#010909 4:46:40 server id # end_log_pos # CRC32 XXX Query thread_id=# exec_time=# error_code=0 xid=<xid>
|
||||
SET TIMESTAMP=1000000000/*!*/;
|
||||
CREATE TABLE t3 LIKE t
|
||||
/*!*/;
|
||||
# at #
|
||||
#010909 4:46:40 server id # end_log_pos # CRC32 XXX GTID 0-1-3 ddl
|
||||
/*M!100001 SET @@session.gtid_seq_no=3*//*!*/;
|
||||
# at #
|
||||
#010909 4:46:40 server id # end_log_pos # CRC32 XXX Query thread_id=# exec_time=# error_code=0 xid=<xid>
|
||||
SET TIMESTAMP=1000000000/*!*/;
|
||||
ALTER TABLE t3 ENGINE = MERGE UNION (t1,t2)
|
||||
/*!*/;
|
||||
# at #
|
||||
#010909 4:46:40 server id # end_log_pos # CRC32 XXX GTID 0-1-4
|
||||
/*M!100001 SET @@session.gtid_seq_no=4*//*!*/;
|
||||
START TRANSACTION
|
||||
/*!*/;
|
||||
# at #
|
||||
#010909 4:46:40 server id # end_log_pos # CRC32 XXX Query thread_id=# exec_time=# error_code=0 xid=<xid>
|
||||
SET TIMESTAMP=1000000000/*!*/;
|
||||
insert into t values(1,1,1)
|
||||
/*!*/;
|
||||
# at #
|
||||
#010909 4:46:40 server id # end_log_pos # CRC32 XXX Query thread_id=# exec_time=# error_code=0 xid=<xid>
|
||||
SET TIMESTAMP=1000000000/*!*/;
|
||||
COMMIT
|
||||
/*!*/;
|
||||
# at #
|
||||
#010909 4:46:40 server id # end_log_pos # CRC32 XXX Rotate to master-bin.000002 pos: 4
|
||||
DELIMITER ;
|
||||
# End of log file
|
||||
ROLLBACK /* added by mysqlbinlog */;
|
||||
/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
|
||||
/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/;
|
||||
drop table t,t3;
|
||||
20
mysql-test/main/merge_alter.test
Normal file
20
mysql-test/main/merge_alter.test
Normal file
@@ -0,0 +1,20 @@
|
||||
--source include/have_binlog_format_mixed.inc
|
||||
|
||||
# MDEV-37903 ALTER TABLE ... ENGINE=MRG_MyISAM is not binlogged as DDL
|
||||
|
||||
# Fix timestamp to avoid varying results.
|
||||
SET timestamp=1000000000;
|
||||
|
||||
RESET MASTER;
|
||||
|
||||
CREATE TABLE t (i1 int, i2 int, pk int) ;
|
||||
CREATE TABLE t3 LIKE t ;
|
||||
ALTER TABLE t3 ENGINE = MERGE UNION (t1,t2);
|
||||
insert into t values(1,1,1);
|
||||
|
||||
flush logs;
|
||||
let $MYSQLD_DATADIR= `select @@datadir`;
|
||||
--replace_regex /server id [0-9]*/server id #/ /server v [^ ]*/server v #.##.##/ /exec_time=[0-9]*/exec_time=#/ /thread_id=[0-9]*/thread_id=#/ /table id [0-9]*/table id #/ /mapped to number [0-9]*/mapped to number #/ /end_log_pos [0-9]*/end_log_pos #/ /# at [0-9]*/# at #/ /CRC32 0x[0-9a-f]*/CRC32 XXX/ /collation_server=[0-9]+/collation_server=X/ /character_set_client=[a-zA-Z0-9]+/character_set_client=X/ /collation_connection=[0-9]+/collation_connection=X/ /xid=\d*/xid=<xid>/
|
||||
--exec $MYSQL_BINLOG --base64-output=decode-rows -v -v $MYSQLD_DATADIR/master-bin.000001
|
||||
|
||||
drop table t,t3;
|
||||
@@ -3794,6 +3794,27 @@ DROP TABLE t1;
|
||||
#
|
||||
# End of 10.5 tests
|
||||
#
|
||||
#
|
||||
# MDEV-37913: disable_index_merge_plans causes SELECT data loss when more than 100 ORs
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
id int primary key,
|
||||
key1 int,
|
||||
index(key1)
|
||||
);
|
||||
INSERT INTO t1 VALUES
|
||||
(1, 1),
|
||||
(2, 1),
|
||||
(3, 2);
|
||||
$query;
|
||||
id key1
|
||||
1 1
|
||||
2 1
|
||||
3 2
|
||||
drop table t1;
|
||||
#
|
||||
# End of 10.11 tests
|
||||
#
|
||||
set global innodb_stats_persistent= @innodb_stats_persistent_save;
|
||||
set global innodb_stats_persistent_sample_pages=
|
||||
@innodb_stats_persistent_sample_pages_save;
|
||||
|
||||
@@ -2558,6 +2558,32 @@ DROP TABLE t1;
|
||||
--echo # End of 10.5 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-37913: disable_index_merge_plans causes SELECT data loss when more than 100 ORs
|
||||
--echo #
|
||||
CREATE TABLE t1 (
|
||||
id int primary key,
|
||||
key1 int,
|
||||
index(key1)
|
||||
);
|
||||
|
||||
INSERT INTO t1 VALUES
|
||||
(1, 1),
|
||||
(2, 1),
|
||||
(3, 2);
|
||||
|
||||
let $query=`
|
||||
select concat('select * from t1 where (key1 = 2 AND id = 3) ',
|
||||
REPEAT(' OR (key1 = 1)', 100))
|
||||
`;
|
||||
|
||||
evalp $query;
|
||||
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.11 tests
|
||||
--echo #
|
||||
set global innodb_stats_persistent= @innodb_stats_persistent_save;
|
||||
set global innodb_stats_persistent_sample_pages=
|
||||
@innodb_stats_persistent_sample_pages_save;
|
||||
|
||||
@@ -3792,6 +3792,27 @@ DROP TABLE t1;
|
||||
#
|
||||
# End of 10.5 tests
|
||||
#
|
||||
#
|
||||
# MDEV-37913: disable_index_merge_plans causes SELECT data loss when more than 100 ORs
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
id int primary key,
|
||||
key1 int,
|
||||
index(key1)
|
||||
);
|
||||
INSERT INTO t1 VALUES
|
||||
(1, 1),
|
||||
(2, 1),
|
||||
(3, 2);
|
||||
$query;
|
||||
id key1
|
||||
1 1
|
||||
2 1
|
||||
3 2
|
||||
drop table t1;
|
||||
#
|
||||
# End of 10.11 tests
|
||||
#
|
||||
set global innodb_stats_persistent= @innodb_stats_persistent_save;
|
||||
set global innodb_stats_persistent_sample_pages=
|
||||
@innodb_stats_persistent_sample_pages_save;
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
#
|
||||
# MDEV-6610 Assertion `thd->is_error() || thd->killed' failed in mysql_execute_command on executing an SP with repeated CREATE TABLE .. SELECT
|
||||
#
|
||||
CREATE TABLE t1 (i INT);
|
||||
SET @a = 2;
|
||||
CREATE TABLE IF NOT EXISTS t2 (i INT) ENGINE = MyISAM
|
||||
@@ -94,4 +97,30 @@ CALL p1();
|
||||
# Clean up
|
||||
DROP FUNCTION cnt;
|
||||
DROP PROCEDURE p1;
|
||||
#
|
||||
# MDEV-37710 ASAN errors in find_type2 upon executing a procedure from sys schema
|
||||
#
|
||||
create procedure p1()
|
||||
begin
|
||||
declare found int;
|
||||
repeat
|
||||
set found = exists (select * from information_schema.routines where routine_name='f');
|
||||
if (sys.ps_is_consumer_enabled('events_waits_history_long') = 'yes') then
|
||||
select * from mysql.user;
|
||||
end if;
|
||||
select release_all_locks();
|
||||
until found end repeat;
|
||||
end$$
|
||||
select get_lock('p1', 300);
|
||||
get_lock('p1', 300)
|
||||
1
|
||||
call p1();
|
||||
connect con1,localhost,root,,;
|
||||
select get_lock('p1', 300);
|
||||
get_lock('p1', 300)
|
||||
1
|
||||
create function f() returns int return 1;
|
||||
connection default;
|
||||
drop function f;
|
||||
drop procedure p1;
|
||||
# End of 10.11 tests
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#
|
||||
# MDEV-6610 Assertion `thd->is_error() || thd->killed' failed in mysql_execute_command on executing an SP with repeated CREATE TABLE .. SELECT
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-6610 Assertion `thd->is_error() || thd->killed' failed in mysql_execute_command on executing an SP with repeated CREATE TABLE .. SELECT
|
||||
--echo #
|
||||
CREATE TABLE t1 (i INT);
|
||||
SET @a = 2;
|
||||
|
||||
@@ -97,4 +97,32 @@ CALL p1();
|
||||
DROP FUNCTION cnt;
|
||||
DROP PROCEDURE p1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-37710 ASAN errors in find_type2 upon executing a procedure from sys schema
|
||||
--echo #
|
||||
delimiter $$;
|
||||
create procedure p1()
|
||||
begin
|
||||
declare found int;
|
||||
repeat
|
||||
set found = exists (select * from information_schema.routines where routine_name='f');
|
||||
if (sys.ps_is_consumer_enabled('events_waits_history_long') = 'yes') then
|
||||
select * from mysql.user;
|
||||
end if;
|
||||
select release_all_locks();
|
||||
until found end repeat;
|
||||
end$$
|
||||
delimiter ;$$
|
||||
select get_lock('p1', 300);
|
||||
--send call p1()
|
||||
--connect con1,localhost,root,,
|
||||
select get_lock('p1', 300);
|
||||
create function f() returns int return 1;
|
||||
--connection default
|
||||
--disable_result_log
|
||||
--reap
|
||||
--enable_result_log
|
||||
drop function f;
|
||||
drop procedure p1;
|
||||
|
||||
--echo # End of 10.11 tests
|
||||
|
||||
@@ -620,6 +620,41 @@ id name
|
||||
4 xxx
|
||||
5 yyy
|
||||
DEALLOCATE PREPARE stmt;
|
||||
#
|
||||
# FederatedX error 10000 on multi-table UPDATE/DELETE
|
||||
#
|
||||
connection slave;
|
||||
DROP TABLE IF EXISTS federated.t1, federated.t2;
|
||||
CREATE TABLE federated.t1 (a int, b int);
|
||||
INSERT INTO federated.t1 VALUES (1,1), (2,2), (3,3);
|
||||
CREATE TABLE federated.t2 (a int, b int);
|
||||
INSERT INTO federated.t2 VALUES (1,1), (2,2), (4,4);
|
||||
connection master;
|
||||
DROP TABLE IF EXISTS federated.t1, federated.t2;
|
||||
CREATE TABLE federated.t1 (a int, b int)
|
||||
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
|
||||
CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
|
||||
CREATE TABLE federated.t2 (a int, b int)
|
||||
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
|
||||
CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2';
|
||||
use federated;
|
||||
# Multi-table UPDATE
|
||||
UPDATE t1, t2 SET t1.a = 2 WHERE t1.a=t2.a;
|
||||
# Check the result
|
||||
SELECT * FROM t1;
|
||||
a b
|
||||
2 1
|
||||
2 2
|
||||
3 3
|
||||
# Multi-table DELETE
|
||||
DELETE FROM t1 USING t1 JOIN t2 ON t1.a = t2.a WHERE t2.b > 1;
|
||||
SELECT * FROM t1;
|
||||
a b
|
||||
3 3
|
||||
# Another form of multi-table DELETE
|
||||
DELETE FROM a1 USING t1 AS a1;
|
||||
SELECT * FROM t1;
|
||||
a b
|
||||
DROP TABLES federated.t1, federated.t2, federated.t3, federated.t10,
|
||||
federated.t11;
|
||||
connection slave;
|
||||
|
||||
@@ -435,6 +435,50 @@ EXECUTE stmt;
|
||||
EXECUTE stmt;
|
||||
DEALLOCATE PREPARE stmt;
|
||||
|
||||
--echo #
|
||||
--echo # FederatedX error 10000 on multi-table UPDATE/DELETE
|
||||
--echo #
|
||||
|
||||
connection slave;
|
||||
DROP TABLE IF EXISTS federated.t1, federated.t2;
|
||||
|
||||
CREATE TABLE federated.t1 (a int, b int);
|
||||
INSERT INTO federated.t1 VALUES (1,1), (2,2), (3,3);
|
||||
|
||||
CREATE TABLE federated.t2 (a int, b int);
|
||||
INSERT INTO federated.t2 VALUES (1,1), (2,2), (4,4);
|
||||
|
||||
connection master;
|
||||
DROP TABLE IF EXISTS federated.t1, federated.t2;
|
||||
--replace_result $SLAVE_MYPORT SLAVE_PORT
|
||||
eval
|
||||
CREATE TABLE federated.t1 (a int, b int)
|
||||
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
|
||||
CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
|
||||
|
||||
--replace_result $SLAVE_MYPORT SLAVE_PORT
|
||||
eval
|
||||
CREATE TABLE federated.t2 (a int, b int)
|
||||
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
|
||||
CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2';
|
||||
|
||||
use federated;
|
||||
|
||||
--echo # Multi-table UPDATE
|
||||
UPDATE t1, t2 SET t1.a = 2 WHERE t1.a=t2.a;
|
||||
|
||||
--echo # Check the result
|
||||
SELECT * FROM t1;
|
||||
|
||||
--echo # Multi-table DELETE
|
||||
DELETE FROM t1 USING t1 JOIN t2 ON t1.a = t2.a WHERE t2.b > 1;
|
||||
|
||||
SELECT * FROM t1;
|
||||
|
||||
--echo # Another form of multi-table DELETE
|
||||
DELETE FROM a1 USING t1 AS a1;
|
||||
|
||||
SELECT * FROM t1;
|
||||
|
||||
DROP TABLES federated.t1, federated.t2, federated.t3, federated.t10,
|
||||
federated.t11;
|
||||
|
||||
@@ -15,3 +15,4 @@ galera_vote_rejoin_ddl : MDEV-35940 Unallowed state transition: donor -> synced
|
||||
galera_vote_rejoin_dml : MDEV-35964 Assertion `ist_seqno >= cc_seqno' failed in galera_vote_rejoin_dml
|
||||
galera_var_notify_cmd : MDEV-37257 WSREP: Notification command failed: 1 (Operation not permitted)
|
||||
galera_var_notify_ssl_ipv6 : MDEV-37257 WSREP: Notification command failed: 1 (Operation not permitted)
|
||||
MDEV-26266 : cannot work reliably
|
||||
|
||||
15
mysql-test/suite/galera/r/MDEV-37056.result
Normal file
15
mysql-test/suite/galera/r/MDEV-37056.result
Normal file
@@ -0,0 +1,15 @@
|
||||
connection node_2;
|
||||
connection node_1;
|
||||
connection node_1;
|
||||
SET SESSION wsrep_on=OFF;
|
||||
SET default_storage_engine=MYISAM;
|
||||
CREATE SEQUENCE t;
|
||||
SET SESSION wsrep_on=ON;
|
||||
CREATE INDEX idx ON t (a);
|
||||
ERROR HY000: Sequence 'test.t' table structure is invalid (Sequence tables cannot have any keys)
|
||||
DROP SEQUENCE t;
|
||||
SET default_storage_engine='MYISAM';
|
||||
CREATE SEQUENCE t INCREMENT BY 0 CACHE=0 ENGINE=InnoDB;
|
||||
CREATE INDEX c ON t (c);
|
||||
ERROR HY000: Sequence 'test.t' table structure is invalid (Sequence tables cannot have any keys)
|
||||
DROP SEQUENCE t;
|
||||
44
mysql-test/suite/galera/r/MDEV-37857.result
Normal file
44
mysql-test/suite/galera/r/MDEV-37857.result
Normal file
@@ -0,0 +1,44 @@
|
||||
connection node_2;
|
||||
connection node_1;
|
||||
drop table if exists t1;
|
||||
drop view if exists t1;
|
||||
connection node_2;
|
||||
SELECT @@character_set_server, @@collation_server;
|
||||
@@character_set_server @@collation_server
|
||||
latin1 latin1_swedish_ci
|
||||
SELECT @@character_set_client, @@collation_connection;
|
||||
@@character_set_client @@collation_connection
|
||||
latin1 latin1_swedish_ci
|
||||
connection node_1;
|
||||
SET NAMES latin1 COLLATE latin1_bin;
|
||||
SELECT @@character_set_server, @@collation_server;
|
||||
@@character_set_server @@collation_server
|
||||
latin1 latin1_swedish_ci
|
||||
SELECT @@character_set_client, @@collation_connection;
|
||||
@@character_set_client @@collation_connection
|
||||
latin1 latin1_bin
|
||||
create table t1 (a int);
|
||||
insert into t1 values (1);
|
||||
create view v1 as select a from t1;
|
||||
SHOW CREATE VIEW v1;
|
||||
View Create View character_set_client collation_connection
|
||||
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a` from `t1` latin1 latin1_bin
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` int(11) DEFAULT NULL
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
connection node_2;
|
||||
SHOW CREATE VIEW v1;
|
||||
View Create View character_set_client collation_connection
|
||||
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a` from `t1` latin1 latin1_bin
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` int(11) DEFAULT NULL
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
connection node_1;
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
disconnect node_2;
|
||||
disconnect node_1;
|
||||
18
mysql-test/suite/galera/t/MDEV-37056.test
Normal file
18
mysql-test/suite/galera/t/MDEV-37056.test
Normal file
@@ -0,0 +1,18 @@
|
||||
--source include/galera_cluster.inc
|
||||
|
||||
--connection node_1
|
||||
SET SESSION wsrep_on=OFF;
|
||||
SET default_storage_engine=MYISAM;
|
||||
CREATE SEQUENCE t;
|
||||
SET SESSION wsrep_on=ON;
|
||||
--error ER_SEQUENCE_INVALID_TABLE_STRUCTURE
|
||||
CREATE INDEX idx ON t (a);
|
||||
DROP SEQUENCE t;
|
||||
|
||||
SET default_storage_engine='MYISAM';
|
||||
CREATE SEQUENCE t INCREMENT BY 0 CACHE=0 ENGINE=InnoDB;
|
||||
--error ER_SEQUENCE_INVALID_TABLE_STRUCTURE
|
||||
CREATE INDEX c ON t (c);
|
||||
|
||||
# cleanup
|
||||
DROP SEQUENCE t;
|
||||
29
mysql-test/suite/galera/t/MDEV-37857.test
Normal file
29
mysql-test/suite/galera/t/MDEV-37857.test
Normal file
@@ -0,0 +1,29 @@
|
||||
--source include/galera_cluster.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
drop view if exists t1;
|
||||
--enable_warnings
|
||||
|
||||
--connection node_2
|
||||
SELECT @@character_set_server, @@collation_server;
|
||||
SELECT @@character_set_client, @@collation_connection;
|
||||
--connection node_1
|
||||
SET NAMES latin1 COLLATE latin1_bin;
|
||||
SELECT @@character_set_server, @@collation_server;
|
||||
SELECT @@character_set_client, @@collation_connection;
|
||||
create table t1 (a int);
|
||||
insert into t1 values (1);
|
||||
create view v1 as select a from t1;
|
||||
SHOW CREATE VIEW v1;
|
||||
SHOW CREATE TABLE t1;
|
||||
|
||||
--connection node_2
|
||||
SHOW CREATE VIEW v1;
|
||||
SHOW CREATE TABLE t1;
|
||||
|
||||
--connection node_1
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
|
||||
--source include/galera_end.inc
|
||||
@@ -1,18 +0,0 @@
|
||||
call mtr.add_suppression("\\[Warning\\] InnoDB: Cannot find a free slot for an undo log. Do you have too");
|
||||
drop database if exists mysqltest;
|
||||
create database mysqltest;
|
||||
CREATE TABLE mysqltest.transtable (id int unsigned NOT NULL PRIMARY KEY, val int DEFAULT 0) ENGINE=InnoDB;
|
||||
select count(*) from information_schema.processlist where command != 'Daemon';
|
||||
count(*)
|
||||
33
|
||||
connection default;
|
||||
CREATE TABLE mysqltest.testtable (id int unsigned not null primary key) ENGINE=InnoDB;
|
||||
ERROR HY000: Can't create table `mysqltest`.`testtable` (errno: 177 "Too many active concurrent transactions")
|
||||
select count(*) from information_schema.processlist where command != 'Daemon';
|
||||
count(*)
|
||||
33
|
||||
connection default;
|
||||
select count(*) from information_schema.processlist where command != 'Daemon';
|
||||
count(*)
|
||||
33
|
||||
drop database mysqltest;
|
||||
@@ -1,102 +0,0 @@
|
||||
# Test for bug #12400341: INNODB CAN LEAVE ORPHAN IBD FILES AROUND
|
||||
|
||||
-- source include/have_debug.inc
|
||||
-- source include/have_innodb.inc
|
||||
-- source include/have_innodb_16k.inc
|
||||
|
||||
# Don't test under valgrind, undo slots of the previous test might exist still
|
||||
# and cause unstable result.
|
||||
--source include/not_valgrind.inc
|
||||
# undo slots of the previous test might exist still
|
||||
--source include/not_windows.inc
|
||||
|
||||
call mtr.add_suppression("\\[Warning\\] InnoDB: Cannot find a free slot for an undo log. Do you have too");
|
||||
|
||||
--disable_query_log
|
||||
set @old_innodb_trx_rseg_n_slots_debug = @@innodb_trx_rseg_n_slots_debug;
|
||||
set global innodb_trx_rseg_n_slots_debug = 32;
|
||||
--enable_query_log
|
||||
|
||||
--disable_warnings
|
||||
drop database if exists mysqltest;
|
||||
--enable_warnings
|
||||
|
||||
create database mysqltest;
|
||||
CREATE TABLE mysqltest.transtable (id int unsigned NOT NULL PRIMARY KEY, val int DEFAULT 0) ENGINE=InnoDB;
|
||||
|
||||
--disable_query_log
|
||||
#
|
||||
# Insert in 1 transaction which needs over 1 page undo record to avoid the insert_undo cached,
|
||||
# because the cached insert_undo can be reused at "CREATE TABLE" statement later.
|
||||
#
|
||||
START TRANSACTION;
|
||||
let $c = 1024;
|
||||
while ($c)
|
||||
{
|
||||
eval INSERT INTO mysqltest.transtable (id) VALUES ($c);
|
||||
dec $c;
|
||||
}
|
||||
COMMIT;
|
||||
|
||||
let $c = 32;
|
||||
while ($c)
|
||||
{
|
||||
# if failed at here, it might be shortage of file descriptors limit.
|
||||
connect (con$c,localhost,root,,);
|
||||
dec $c;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
select count(*) from information_schema.processlist where command != 'Daemon';
|
||||
|
||||
#
|
||||
# fill the all undo slots
|
||||
#
|
||||
--disable_query_log
|
||||
let $c = 32;
|
||||
while ($c)
|
||||
{
|
||||
connection con$c;
|
||||
START TRANSACTION;
|
||||
eval UPDATE mysqltest.transtable SET val = 1 WHERE id = 33 - $c;
|
||||
dec $c;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
connection default;
|
||||
|
||||
--error ER_CANT_CREATE_TABLE
|
||||
CREATE TABLE mysqltest.testtable (id int unsigned not null primary key) ENGINE=InnoDB;
|
||||
|
||||
select count(*) from information_schema.processlist where command != 'Daemon';
|
||||
|
||||
--disable_query_log
|
||||
let $c = 32;
|
||||
while ($c)
|
||||
{
|
||||
connection con$c;
|
||||
ROLLBACK;
|
||||
dec $c;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
connection default;
|
||||
select count(*) from information_schema.processlist where command != 'Daemon';
|
||||
|
||||
--disable_query_log
|
||||
let $c = 32;
|
||||
while ($c)
|
||||
{
|
||||
disconnect con$c;
|
||||
dec $c;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
#
|
||||
# If the isolated .ibd file remained, the drop database should fail.
|
||||
#
|
||||
drop database mysqltest;
|
||||
|
||||
--disable_query_log
|
||||
set global innodb_trx_rseg_n_slots_debug = @old_innodb_trx_rseg_n_slots_debug;
|
||||
--enable_query_log
|
||||
@@ -72,4 +72,108 @@ INSERT INTO t1 SELECT POINTFROMTEXT ('POINT(0 0)') FROM seq_1_to_6;
|
||||
ROLLBACK;
|
||||
SET GLOBAL innodb_limit_optimistic_insert_debug=@save_limit;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-27675 Incorrect r-tree split after group
|
||||
# assignment causes page overflow
|
||||
#
|
||||
CREATE TABLE t1 (f1 INT, f2 INT, f3 VARCHAR(2500),
|
||||
f4 MULTIPOLYGON NOT NULL,
|
||||
PRIMARY KEY (f1,f2,f3), SPATIAL(f4)) ENGINE=InnoDB;
|
||||
INSERT INTO t1 VALUES
|
||||
(1,1,'id',MULTIPOLYGONFromText('MULTIPOLYGON(((0.12 0.53,0.23 0.92,0.12 0.53)))')),
|
||||
(2,2,REPEAT('s',853),MULTIPOLYGONFromText('MULTIPOLYGON(((0.09 0.71,0.92 0.49,0.09 0.71)))')),
|
||||
(3,3,'',MULTIPOLYGONFromText('MULTIPOLYGON(((0.62 0.71,0.62 0.71)))')),
|
||||
(4,4,'j',MULTIPOLYGONFromText('MULTIPOLYGON(((0.00 0.06,0.40 0.39,0.61 0.20,0.69 0.91,0.13 0.45,0.71 0.49,0.81 0.52,0.08 0.02,0.00 0.06)))')),
|
||||
(5,5,'b',MULTIPOLYGONFromText('MULTIPOLYGON(((0.05 0.20,0.45 0.96,0.59 0.46,0.26 0.12,0.45 0.68,0.41 0.10,0.05 0.20)))')),
|
||||
(6,6,'j',MULTIPOLYGONFromText('MULTIPOLYGON(((0.30 0.09,0.42 0.27,0.96 0.83,0.81 0.89,0.42 0.16,0.89 0.64,0.30 0.09)))')),
|
||||
(7,7,'f',MULTIPOLYGONFromText('MULTIPOLYGON(((0.62 0.42,0.12 0.70,0.07 0.24,0.10 0.07,0.92 0.29,0.20 0.52,0.62 0.42)))')),
|
||||
(8,8,'a',MULTIPOLYGONFromText('MULTIPOLYGON(((0.74 0.96,0.80 0.93,0.61 0.40,0.23 0.49,0.79 0.96,0.67 0.30,0.67 0.25,0.74 0.96)))')),
|
||||
(9,9,'j',MULTIPOLYGONFromText('MULTIPOLYGON(((0.18 0.56,0.03 0.48,0.89 0.30,0.79 0.85,0.40 0.92,0.47 0.34,0.38 0.48,0.18 0.56)))')),
|
||||
(10,10,'ko',MULTIPOLYGONFromText('MULTIPOLYGON(((0.60 0.23,0.03 0.43,0.33 0.94,0.20 0.37,0.60 0.23)))')),
|
||||
(11,11,'o',MULTIPOLYGONFromText('MULTIPOLYGON(((0.94 0.33,0.16 0.47,0.94 0.33)))')),
|
||||
(12,12,'bs',MULTIPOLYGONFromText('MULTIPOLYGON(((0.78 0.81,0.71 0.29,0.03 0.85,0.54 0.16,0.23 0.20,0.86 0.77,0.41 0.96,0.85 0.67,0.78 0.81)))')),
|
||||
(13,13,'z',MULTIPOLYGONFromText('MULTIPOLYGON(((0.70 0.92,0.61 0.64,0.05 0.75,0.60 1.00,0.47 0.14,0.70 0.92)))')),
|
||||
(14,14,'',MULTIPOLYGONFromText('MULTIPOLYGON(((0.03 0.78,0.83 0.08,0.18 0.49,0.02 0.88,0.62 0.46,0.25 0.53,0.03 0.78)))')),
|
||||
(15,15,'oaz',MULTIPOLYGONFromText('MULTIPOLYGON(((0.12 0.22,0.73 0.35,0.08 0.39,0.23 0.31,0.84 0.19,0.46 0.77,0.63 0.69,0.12 0.22)))')),
|
||||
(16,16,'a',MULTIPOLYGONFromText('MULTIPOLYGON(((0.50 0.49,0.48 0.69,0.25 0.87,0.85 0.62,0.96 0.28,0.07 0.70,0.45 0.79,0.87 0.36,0.50 0.49)))')),
|
||||
(17,17,'cj',MULTIPOLYGONFromText('MULTIPOLYGON(((0.72 0.93,0.03 0.94,0.77 0.06,0.29 0.76,0.82 0.68,0.16 0.59,0.15 0.73,0.72 0.93)))')),
|
||||
(18,18,REPEAT('r',149),MULTIPOLYGONFromText('MULTIPOLYGON(((0.02 0.67,0.05 0.90,0.68 0.02,0.02 0.67)))')),
|
||||
(19,19,'ihb',MULTIPOLYGONFromText('MULTIPOLYGON(((0.61 0.40,0.77 0.06,0.61 0.40)),((0.43 0.52,0.77 0.27,0.31 0.49,0.43 0.52)))')),
|
||||
(20,20,'h',MULTIPOLYGONFromText('MULTIPOLYGON(((0.37 0.98,0.88 0.84,0.18 0.47,0.15 0.77,0.82 0.92,0.66 0.55,0.60 0.02,0.17 0.09,0.37 0.98)))')),
|
||||
(21,21,'i',MULTIPOLYGONFromText('MULTIPOLYGON(((0.89 0.55,0.85 0.85,0.68 0.24,0.20 0.42,0.67 0.36,0.35 0.25,0.48 0.20,0.89 0.55)))')),
|
||||
(22,22,'q',MULTIPOLYGONFromText('MULTIPOLYGON(((0.67 0.40,0.63 0.18,0.80 0.66,0.65 0.47,0.66 0.56,0.64 0.97,0.00 0.92,0.66 0.18,0.67 0.40)))')),
|
||||
(23,23,'kh',MULTIPOLYGONFromText('MULTIPOLYGON(((0.89 0.31,0.33 0.68,0.75 0.35,0.40 0.57,0.94 0.91,0.88 0.23,0.89 0.31)))')),
|
||||
(24,24,'hbtgc',MULTIPOLYGONFromText('MULTIPOLYGON(((0.99 0.12,0.73 0.75,0.46 0.85,0.55 0.92,0.12 0.44,0.22 0.13,0.11 0.61,0.99 0.12)))')),
|
||||
(25,25,REPEAT('t',71),MULTIPOLYGONFromText('MULTIPOLYGON(((0.72 0.06,0.31 0.98,0.95 0.02,0.84 0.77,0.46 0.09,0.63 0.92,0.35 0.90,0.72 0.06)))')),
|
||||
(26,26,'g',MULTIPOLYGONFromText('MULTIPOLYGON(((0.18 0.27,0.28 0.15,0.18 0.27)),((0.22 0.55,0.22 0.55)),((0.28 0.70,0.28 0.70)))')),
|
||||
(27,27,'c',MULTIPOLYGONFromText('MULTIPOLYGON(((0.72 0.28,0.62 0.71,0.04 1.00,0.12 0.57,0.72 0.28)))')),
|
||||
(28,28,REPEAT('q',885),MULTIPOLYGONFromText('MULTIPOLYGON(((0.70 0.04,0.62 0.29,0.42 0.82,0.90 0.87,0.79 0.69,0.59 0.99,0.24 0.24,0.69 0.96,0.70 0.04)))')),
|
||||
(29,29,'oy',MULTIPOLYGONFromText('MULTIPOLYGON(((0.23 0.87,0.51 0.65,0.70 0.97,0.44 0.14,0.25 0.83,0.23 0.87)))')),
|
||||
(30,30,REPEAT('k',1684),MULTIPOLYGONFromText('MULTIPOLYGON(((0.99 0.78,0.78 0.99,0.76 0.51,0.25 0.31,0.13 0.86,0.16 0.11,0.45 0.94,0.23 0.98,0.99 0.78)))')),
|
||||
(31,31,'ylsmiix',MULTIPOLYGONFromText('MULTIPOLYGON(((0.85 0.35,0.03 0.75,0.18 0.31,0.84 0.36,0.92 0.72,0.52 0.93,0.65 0.10,0.55 0.80,0.85 0.35)))')),
|
||||
(32,32,'ojouw',MULTIPOLYGONFromText('MULTIPOLYGON(((0.72 0.00,0.83 0.45,0.32 0.62,0.36 0.40,0.19 0.95,0.50 0.38,0.30 0.76,0.72 0.00)))')),
|
||||
(33,33,'ou',MULTIPOLYGONFromText('MULTIPOLYGON(((0.98 0.02,0.01 0.23,0.27 0.11,0.98 0.02)),((0.44 0.54,0.44 0.54)),((0.86 0.97,0.86 0.97)))')),
|
||||
(34,34,'u',MULTIPOLYGONFromText('MULTIPOLYGON(((0.13 0.07,0.29 0.09,0.53 0.79,0.85 0.66,0.64 0.17,0.22 0.18,0.35 0.39,0.30 0.28,0.13 0.07)))')),
|
||||
(35,35,'sax',MULTIPOLYGONFromText('MULTIPOLYGON(((0.26 0.03,0.24 0.93,0.15 0.48,0.26 0.03)),((0.73 0.46,0.35 0.63,0.73 0.46)))')),
|
||||
(36,36,'xmet',MULTIPOLYGONFromText('MULTIPOLYGON(((0.23 0.35,0.35 0.82,0.23 0.35)),((0.29 0.61,0.82 0.54,0.29 0.61)))')),
|
||||
(37,37,REPEAT('e',276),MULTIPOLYGONFromText('MULTIPOLYGON(((0.65 0.67,0.65 0.67)))')),
|
||||
(38,38,'ty',MULTIPOLYGONFromText('MULTIPOLYGON(((0.43 0.44,0.64 0.76,0.92 0.59,0.73 0.23,0.43 0.44)))')),
|
||||
(39,39,'yq',MULTIPOLYGONFromText('MULTIPOLYGON(((0.84 0.27,0.19 0.67,0.84 0.27)),((0.55 0.13,0.39 0.64,0.21 0.70,0.18 0.45,0.55 0.13)))')),
|
||||
(40,40,'hcsv',MULTIPOLYGONFromText('MULTIPOLYGON(((0.61 0.79,0.83 0.16,0.63 0.80,0.78 0.28,0.88 0.66,0.61 0.79)))')),
|
||||
(41,41,'csvhlr',MULTIPOLYGONFromText('MULTIPOLYGON(((0.82 0.24,0.31 0.52,0.61 0.67,0.99 0.90,0.05 0.73,0.52 0.18,0.71 0.87,0.82 0.24)))')),
|
||||
(42,42,'b',MULTIPOLYGONFromText('MULTIPOLYGON(((0.09 0.21,0.37 0.57,0.81 0.75,0.61 0.16,0.48 0.17,0.29 0.28,0.72 0.46,0.09 0.21)))')),
|
||||
(43,43,'wd',MULTIPOLYGONFromText('MULTIPOLYGON(((0.06 0.25,0.52 0.23,0.02 0.05,0.06 0.25)),((0.70 0.52,0.44 0.46,0.95 0.47,0.70 0.52)))')),
|
||||
(44,44,'dg',MULTIPOLYGONFromText('MULTIPOLYGON(((0.81 0.28,0.19 0.17,0.81 0.28)))')),
|
||||
(45,45,'qtqkyyhkayeoopxmexd',MULTIPOLYGONFromText('MULTIPOLYGON(((0.80 0.66,0.81 0.12,0.83 0.31,0.52 0.29,0.08 0.04,0.80 0.66)))')),
|
||||
(46,46,'tqk',MULTIPOLYGONFromText('MULTIPOLYGON(((0.95 0.08,0.95 0.08)),((0.09 0.31,0.09 0.31)),((0.38 0.75,0.30 0.04,0.38 0.75)))')),
|
||||
(47,47,REPEAT('q',925),MULTIPOLYGONFromText('MULTIPOLYGON(((0.56 0.73,0.87 0.11,0.37 0.86,0.48 0.05,0.82 0.55,0.25 0.06,0.19 0.85,0.10 0.75,0.56 0.73)))')),
|
||||
(48,48,'yhk',MULTIPOLYGONFromText('MULTIPOLYGON(((0.06 0.67,0.41 0.51,0.03 0.83,0.40 0.20,0.16 0.87,0.16 0.07,0.29 0.52,0.06 0.67)))')),
|
||||
(49,49,'k',MULTIPOLYGONFromText('MULTIPOLYGON(((0.16 0.14,0.16 0.14)),((0.97 0.69,0.45 0.32,0.45 0.38,0.97 0.69)))')),
|
||||
(50,50,'b',MULTIPOLYGONFromText('MULTIPOLYGON(((0.70 0.00,0.70 0.00)),((0.88 0.53,0.90 0.16,0.88 0.53)))')),
|
||||
(51,51,'',MULTIPOLYGONFromText('MULTIPOLYGON(((0.48 0.06,0.45 0.05,0.03 0.12,0.27 0.80,0.22 0.75,0.53 0.55,0.48 0.06)))')),
|
||||
(52,52,'o',MULTIPOLYGONFromText('MULTIPOLYGON(((0.32 0.76,0.17 0.43,0.32 0.76)),((0.40 0.79,0.40 0.79)),((0.42 0.34,0.42 0.34)))')),
|
||||
(53,53,'pxme',MULTIPOLYGONFromText('MULTIPOLYGON(((0.44 0.08,0.02 0.74,0.26 0.21,0.75 0.42,0.91 0.32,0.24 0.65,0.67 0.50,0.44 0.08)))')),
|
||||
(54,54,'m',MULTIPOLYGONFromText('MULTIPOLYGON(((0.86 0.13,0.21 0.34,0.00 0.87,0.76 0.23,0.69 0.73,0.13 0.63,0.86 0.13)))')),
|
||||
(55,55,'mex',MULTIPOLYGONFromText('MULTIPOLYGON(((0.84 0.11,0.63 0.13,0.51 0.81,0.58 0.25,0.53 0.29,0.53 0.42,0.84 0.11)))')),
|
||||
(56,56,REPEAT('e',504),MULTIPOLYGONFromText('MULTIPOLYGON(((0.27 0.84,0.65 0.26,0.75 0.44,0.29 0.52,0.27 0.84)))')),
|
||||
(57,57,'i',MULTIPOLYGONFromText('MULTIPOLYGON(((0.71 0.84,0.77 0.27,0.45 0.71,0.91 0.01,0.84 0.35,0.71 0.84)))')),
|
||||
(58,58,'b',MULTIPOLYGONFromText('MULTIPOLYGON(((0.12 0.36,0.02 0.47,0.57 0.76,0.15 0.54,0.12 0.36)))')),
|
||||
(59,59,'',MULTIPOLYGONFromText('MULTIPOLYGON(((0.77 0.80,0.25 0.69,0.34 0.68,0.77 0.80)))')),
|
||||
(60,60,'',MULTIPOLYGONFromText('MULTIPOLYGON(((0.57 0.30,0.58 0.81,0.57 0.30)))')),
|
||||
(61,61,'nh',MULTIPOLYGONFromText('MULTIPOLYGON(((0.42 0.99,0.42 0.99)))')),
|
||||
(62,62,'hwi',MULTIPOLYGONFromText('MULTIPOLYGON(((0.40 0.50,0.97 0.34,0.60 0.75,0.26 0.74,0.40 0.50)))')),
|
||||
(63,63,'id',MULTIPOLYGONFromText('MULTIPOLYGON(((0.30 0.67,0.13 0.43,0.16 0.64,0.04 0.72,0.95 0.87,0.83 0.24,0.17 0.82,0.30 0.67)))')),
|
||||
(64,64,'toy',MULTIPOLYGONFromText('MULTIPOLYGON(((0.68 0.75,0.92 0.90,0.68 0.75)),((0.58 0.03,0.41 0.09,0.62 0.05,0.58 0.03)))')),
|
||||
(65,65,'yhawdptl',MULTIPOLYGONFromText('MULTIPOLYGON(((0.95 0.50,0.61 0.35,0.78 0.07,0.67 0.43,0.50 0.70,0.48 0.98,0.95 0.50)))')),
|
||||
(66,66,'gs',MULTIPOLYGONFromText('MULTIPOLYGON(((0.59 0.06,0.12 0.94,0.05 0.90,0.99 0.22,0.13 0.55,0.59 0.06)))')),
|
||||
(67,67,'bplb',MULTIPOLYGONFromText('MULTIPOLYGON(((0.33 0.90,0.54 0.11,0.05 0.04,0.59 0.66,0.33 0.90)))')),
|
||||
(68,68,'b',MULTIPOLYGONFromText('MULTIPOLYGON(((0.50 0.52,0.23 0.54,0.80 0.14,0.88 0.70,0.13 0.67,0.68 0.66,0.50 0.52)))')),
|
||||
(69,69,'p',MULTIPOLYGONFromText('MULTIPOLYGON(((0.07 0.99,0.11 0.79,0.07 0.99)),((0.50 0.22,0.77 0.58,0.50 0.22)))')),
|
||||
(70,70,'l',MULTIPOLYGONFromText('MULTIPOLYGON(((0.21 0.75,0.21 0.75)))')),
|
||||
(71,71,'rwkqhip',MULTIPOLYGONFromText('MULTIPOLYGON(((0.99 0.89,0.25 0.77,0.99 0.89)))')),
|
||||
(72,72,'n',MULTIPOLYGONFromText('MULTIPOLYGON(((0.01 0.10,0.01 0.20,0.01 0.10)),((0.83 0.75,0.29 0.21,0.83 0.75)))')),
|
||||
(73,73,'q',MULTIPOLYGONFromText('MULTIPOLYGON(((0.12 0.03,0.51 0.05,0.27 0.77,0.74 0.06,0.12 0.03)))')),
|
||||
(74,74,'hipd',MULTIPOLYGONFromText('MULTIPOLYGON(((0.89 0.94,0.54 0.92,0.37 0.71,0.89 0.94)))')),
|
||||
(75,75,'ipdec',MULTIPOLYGONFromText('MULTIPOLYGON(((0.50 0.48,0.07 0.31,0.19 0.23,0.51 0.74,0.50 0.48)))')),
|
||||
(76,76,'pde',MULTIPOLYGONFromText('MULTIPOLYGON(((0.79 0.42,0.61 0.98,0.13 0.85,0.52 0.16,0.79 0.42)))')),
|
||||
(77,77,REPEAT('e',1432),MULTIPOLYGONFromText('MULTIPOLYGON(((0.78 0.29,0.42 0.20,0.88 0.86,0.99 0.81,0.78 0.29)))')),
|
||||
(78,78,'cyhr',MULTIPOLYGONFromText('MULTIPOLYGON(((0.61 0.16,0.62 0.19,0.61 0.16)),((0.62 0.94,0.65 0.53,0.15 0.25,0.71 0.41,0.62 0.94)),((0.67 0.63,0.86 0.60,0.67 0.63)))')),
|
||||
(79,79,'n',MULTIPOLYGONFromText('MULTIPOLYGON(((0.39 0.89,0.25 0.77,0.22 0.21,0.51 0.19,0.71 0.51,0.39 0.89)))')),
|
||||
(80,80,'y',MULTIPOLYGONFromText('MULTIPOLYGON(((0.29 0.36,0.29 0.36)))')),
|
||||
(81,81,'r',MULTIPOLYGONFromText('MULTIPOLYGON(((0.05 0.94,0.93 0.37,0.22 0.07,0.73 0.75,0.99 0.35,0.05 0.94)))')),
|
||||
(82,82,'w',MULTIPOLYGONFromText('MULTIPOLYGON(((0.33 0.37,0.06 0.59,0.34 0.82,0.73 0.86,0.18 0.78,0.99 0.03,0.33 0.37)))')),
|
||||
(83,83,REPEAT('g',74),MULTIPOLYGONFromText('MULTIPOLYGON(((0.60 0.54,0.25 0.31,0.60 0.54)))')),
|
||||
(84,84,REPEAT('s',214),MULTIPOLYGONFromText('MULTIPOLYGON(((0.80 0.34,0.09 0.74,0.47 0.96,0.55 0.19,0.80 0.34)))')),
|
||||
(85,85,REPEAT('h',223),MULTIPOLYGONFromText('MULTIPOLYGON(((0.76 0.26,0.16 0.85,0.91 0.75,0.64 0.83,0.47 0.02,0.92 0.58,0.76 0.26)))')),
|
||||
(86,86,'l',MULTIPOLYGONFromText('MULTIPOLYGON(((0.11 0.64,0.41 0.64,0.64 0.64,0.11 0.64)))')),
|
||||
(87,87,'hj',MULTIPOLYGONFromText('MULTIPOLYGON(((0.66 1.00,0.21 0.96,0.52 0.44,0.94 0.06,0.80 0.39,0.33 0.57,0.30 0.89,0.66 1.00)))')),
|
||||
(88,88,'axcs',MULTIPOLYGONFromText('MULTIPOLYGON(((0.20 0.66,0.71 0.41,0.32 0.94,0.30 0.66,0.50 0.49,0.60 0.67,0.20 0.66)))')),
|
||||
(89,89,'cs',MULTIPOLYGONFromText('MULTIPOLYGON(((0.02 0.69,0.80 0.21,0.09 0.23,0.45 0.66,0.10 0.72,0.02 0.69)))')),
|
||||
(90,90,'f',MULTIPOLYGONFromText('MULTIPOLYGON(((0.87 0.14,0.54 0.83,0.87 0.42,0.36 0.58,0.87 0.14)))')),
|
||||
(91,91,'icq',MULTIPOLYGONFromText('MULTIPOLYGON(((0.73 0.57,0.36 0.41,0.86 0.33,0.76 0.49,0.44 0.83,0.73 0.57)))')),
|
||||
(92,92,REPEAT('z',783),MULTIPOLYGONFromText('MULTIPOLYGON(((0.28 0.98,0.05 0.26,0.09 0.59,1.00 0.17,0.55 0.68,0.12 0.04,0.28 0.98)))')),
|
||||
(93,93,'z',MULTIPOLYGONFromText('MULTIPOLYGON(((0.05 0.89,0.05 0.89)))')),
|
||||
(94,94,REPEAT('x',1412),MULTIPOLYGONFromText('MULTIPOLYGON(((0.79 0.83,0.12 0.49,0.54 0.63,0.79 0.83)))')),
|
||||
(95,95,REPEAT('u',2500),MULTIPOLYGONFromText('MULTIPOLYGON(((0.50 0.55,0.13 0.19,0.72 0.06,0.50 0.55)),((0.73 0.92,0.02 0.48,0.73 0.92)))'));
|
||||
DROP TABLE t1;
|
||||
# End of 10.6 tests
|
||||
|
||||
@@ -87,4 +87,109 @@ ROLLBACK;
|
||||
SET GLOBAL innodb_limit_optimistic_insert_debug=@save_limit;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-27675 Incorrect r-tree split after group
|
||||
--echo # assignment causes page overflow
|
||||
--echo #
|
||||
CREATE TABLE t1 (f1 INT, f2 INT, f3 VARCHAR(2500),
|
||||
f4 MULTIPOLYGON NOT NULL,
|
||||
PRIMARY KEY (f1,f2,f3), SPATIAL(f4)) ENGINE=InnoDB;
|
||||
INSERT INTO t1 VALUES
|
||||
(1,1,'id',MULTIPOLYGONFromText('MULTIPOLYGON(((0.12 0.53,0.23 0.92,0.12 0.53)))')),
|
||||
(2,2,REPEAT('s',853),MULTIPOLYGONFromText('MULTIPOLYGON(((0.09 0.71,0.92 0.49,0.09 0.71)))')),
|
||||
(3,3,'',MULTIPOLYGONFromText('MULTIPOLYGON(((0.62 0.71,0.62 0.71)))')),
|
||||
(4,4,'j',MULTIPOLYGONFromText('MULTIPOLYGON(((0.00 0.06,0.40 0.39,0.61 0.20,0.69 0.91,0.13 0.45,0.71 0.49,0.81 0.52,0.08 0.02,0.00 0.06)))')),
|
||||
(5,5,'b',MULTIPOLYGONFromText('MULTIPOLYGON(((0.05 0.20,0.45 0.96,0.59 0.46,0.26 0.12,0.45 0.68,0.41 0.10,0.05 0.20)))')),
|
||||
(6,6,'j',MULTIPOLYGONFromText('MULTIPOLYGON(((0.30 0.09,0.42 0.27,0.96 0.83,0.81 0.89,0.42 0.16,0.89 0.64,0.30 0.09)))')),
|
||||
(7,7,'f',MULTIPOLYGONFromText('MULTIPOLYGON(((0.62 0.42,0.12 0.70,0.07 0.24,0.10 0.07,0.92 0.29,0.20 0.52,0.62 0.42)))')),
|
||||
(8,8,'a',MULTIPOLYGONFromText('MULTIPOLYGON(((0.74 0.96,0.80 0.93,0.61 0.40,0.23 0.49,0.79 0.96,0.67 0.30,0.67 0.25,0.74 0.96)))')),
|
||||
(9,9,'j',MULTIPOLYGONFromText('MULTIPOLYGON(((0.18 0.56,0.03 0.48,0.89 0.30,0.79 0.85,0.40 0.92,0.47 0.34,0.38 0.48,0.18 0.56)))')),
|
||||
(10,10,'ko',MULTIPOLYGONFromText('MULTIPOLYGON(((0.60 0.23,0.03 0.43,0.33 0.94,0.20 0.37,0.60 0.23)))')),
|
||||
(11,11,'o',MULTIPOLYGONFromText('MULTIPOLYGON(((0.94 0.33,0.16 0.47,0.94 0.33)))')),
|
||||
(12,12,'bs',MULTIPOLYGONFromText('MULTIPOLYGON(((0.78 0.81,0.71 0.29,0.03 0.85,0.54 0.16,0.23 0.20,0.86 0.77,0.41 0.96,0.85 0.67,0.78 0.81)))')),
|
||||
(13,13,'z',MULTIPOLYGONFromText('MULTIPOLYGON(((0.70 0.92,0.61 0.64,0.05 0.75,0.60 1.00,0.47 0.14,0.70 0.92)))')),
|
||||
(14,14,'',MULTIPOLYGONFromText('MULTIPOLYGON(((0.03 0.78,0.83 0.08,0.18 0.49,0.02 0.88,0.62 0.46,0.25 0.53,0.03 0.78)))')),
|
||||
(15,15,'oaz',MULTIPOLYGONFromText('MULTIPOLYGON(((0.12 0.22,0.73 0.35,0.08 0.39,0.23 0.31,0.84 0.19,0.46 0.77,0.63 0.69,0.12 0.22)))')),
|
||||
(16,16,'a',MULTIPOLYGONFromText('MULTIPOLYGON(((0.50 0.49,0.48 0.69,0.25 0.87,0.85 0.62,0.96 0.28,0.07 0.70,0.45 0.79,0.87 0.36,0.50 0.49)))')),
|
||||
(17,17,'cj',MULTIPOLYGONFromText('MULTIPOLYGON(((0.72 0.93,0.03 0.94,0.77 0.06,0.29 0.76,0.82 0.68,0.16 0.59,0.15 0.73,0.72 0.93)))')),
|
||||
(18,18,REPEAT('r',149),MULTIPOLYGONFromText('MULTIPOLYGON(((0.02 0.67,0.05 0.90,0.68 0.02,0.02 0.67)))')),
|
||||
(19,19,'ihb',MULTIPOLYGONFromText('MULTIPOLYGON(((0.61 0.40,0.77 0.06,0.61 0.40)),((0.43 0.52,0.77 0.27,0.31 0.49,0.43 0.52)))')),
|
||||
(20,20,'h',MULTIPOLYGONFromText('MULTIPOLYGON(((0.37 0.98,0.88 0.84,0.18 0.47,0.15 0.77,0.82 0.92,0.66 0.55,0.60 0.02,0.17 0.09,0.37 0.98)))')),
|
||||
(21,21,'i',MULTIPOLYGONFromText('MULTIPOLYGON(((0.89 0.55,0.85 0.85,0.68 0.24,0.20 0.42,0.67 0.36,0.35 0.25,0.48 0.20,0.89 0.55)))')),
|
||||
(22,22,'q',MULTIPOLYGONFromText('MULTIPOLYGON(((0.67 0.40,0.63 0.18,0.80 0.66,0.65 0.47,0.66 0.56,0.64 0.97,0.00 0.92,0.66 0.18,0.67 0.40)))')),
|
||||
(23,23,'kh',MULTIPOLYGONFromText('MULTIPOLYGON(((0.89 0.31,0.33 0.68,0.75 0.35,0.40 0.57,0.94 0.91,0.88 0.23,0.89 0.31)))')),
|
||||
(24,24,'hbtgc',MULTIPOLYGONFromText('MULTIPOLYGON(((0.99 0.12,0.73 0.75,0.46 0.85,0.55 0.92,0.12 0.44,0.22 0.13,0.11 0.61,0.99 0.12)))')),
|
||||
(25,25,REPEAT('t',71),MULTIPOLYGONFromText('MULTIPOLYGON(((0.72 0.06,0.31 0.98,0.95 0.02,0.84 0.77,0.46 0.09,0.63 0.92,0.35 0.90,0.72 0.06)))')),
|
||||
(26,26,'g',MULTIPOLYGONFromText('MULTIPOLYGON(((0.18 0.27,0.28 0.15,0.18 0.27)),((0.22 0.55,0.22 0.55)),((0.28 0.70,0.28 0.70)))')),
|
||||
(27,27,'c',MULTIPOLYGONFromText('MULTIPOLYGON(((0.72 0.28,0.62 0.71,0.04 1.00,0.12 0.57,0.72 0.28)))')),
|
||||
(28,28,REPEAT('q',885),MULTIPOLYGONFromText('MULTIPOLYGON(((0.70 0.04,0.62 0.29,0.42 0.82,0.90 0.87,0.79 0.69,0.59 0.99,0.24 0.24,0.69 0.96,0.70 0.04)))')),
|
||||
(29,29,'oy',MULTIPOLYGONFromText('MULTIPOLYGON(((0.23 0.87,0.51 0.65,0.70 0.97,0.44 0.14,0.25 0.83,0.23 0.87)))')),
|
||||
(30,30,REPEAT('k',1684),MULTIPOLYGONFromText('MULTIPOLYGON(((0.99 0.78,0.78 0.99,0.76 0.51,0.25 0.31,0.13 0.86,0.16 0.11,0.45 0.94,0.23 0.98,0.99 0.78)))')),
|
||||
(31,31,'ylsmiix',MULTIPOLYGONFromText('MULTIPOLYGON(((0.85 0.35,0.03 0.75,0.18 0.31,0.84 0.36,0.92 0.72,0.52 0.93,0.65 0.10,0.55 0.80,0.85 0.35)))')),
|
||||
(32,32,'ojouw',MULTIPOLYGONFromText('MULTIPOLYGON(((0.72 0.00,0.83 0.45,0.32 0.62,0.36 0.40,0.19 0.95,0.50 0.38,0.30 0.76,0.72 0.00)))')),
|
||||
(33,33,'ou',MULTIPOLYGONFromText('MULTIPOLYGON(((0.98 0.02,0.01 0.23,0.27 0.11,0.98 0.02)),((0.44 0.54,0.44 0.54)),((0.86 0.97,0.86 0.97)))')),
|
||||
(34,34,'u',MULTIPOLYGONFromText('MULTIPOLYGON(((0.13 0.07,0.29 0.09,0.53 0.79,0.85 0.66,0.64 0.17,0.22 0.18,0.35 0.39,0.30 0.28,0.13 0.07)))')),
|
||||
(35,35,'sax',MULTIPOLYGONFromText('MULTIPOLYGON(((0.26 0.03,0.24 0.93,0.15 0.48,0.26 0.03)),((0.73 0.46,0.35 0.63,0.73 0.46)))')),
|
||||
(36,36,'xmet',MULTIPOLYGONFromText('MULTIPOLYGON(((0.23 0.35,0.35 0.82,0.23 0.35)),((0.29 0.61,0.82 0.54,0.29 0.61)))')),
|
||||
(37,37,REPEAT('e',276),MULTIPOLYGONFromText('MULTIPOLYGON(((0.65 0.67,0.65 0.67)))')),
|
||||
(38,38,'ty',MULTIPOLYGONFromText('MULTIPOLYGON(((0.43 0.44,0.64 0.76,0.92 0.59,0.73 0.23,0.43 0.44)))')),
|
||||
(39,39,'yq',MULTIPOLYGONFromText('MULTIPOLYGON(((0.84 0.27,0.19 0.67,0.84 0.27)),((0.55 0.13,0.39 0.64,0.21 0.70,0.18 0.45,0.55 0.13)))')),
|
||||
(40,40,'hcsv',MULTIPOLYGONFromText('MULTIPOLYGON(((0.61 0.79,0.83 0.16,0.63 0.80,0.78 0.28,0.88 0.66,0.61 0.79)))')),
|
||||
(41,41,'csvhlr',MULTIPOLYGONFromText('MULTIPOLYGON(((0.82 0.24,0.31 0.52,0.61 0.67,0.99 0.90,0.05 0.73,0.52 0.18,0.71 0.87,0.82 0.24)))')),
|
||||
(42,42,'b',MULTIPOLYGONFromText('MULTIPOLYGON(((0.09 0.21,0.37 0.57,0.81 0.75,0.61 0.16,0.48 0.17,0.29 0.28,0.72 0.46,0.09 0.21)))')),
|
||||
(43,43,'wd',MULTIPOLYGONFromText('MULTIPOLYGON(((0.06 0.25,0.52 0.23,0.02 0.05,0.06 0.25)),((0.70 0.52,0.44 0.46,0.95 0.47,0.70 0.52)))')),
|
||||
(44,44,'dg',MULTIPOLYGONFromText('MULTIPOLYGON(((0.81 0.28,0.19 0.17,0.81 0.28)))')),
|
||||
(45,45,'qtqkyyhkayeoopxmexd',MULTIPOLYGONFromText('MULTIPOLYGON(((0.80 0.66,0.81 0.12,0.83 0.31,0.52 0.29,0.08 0.04,0.80 0.66)))')),
|
||||
(46,46,'tqk',MULTIPOLYGONFromText('MULTIPOLYGON(((0.95 0.08,0.95 0.08)),((0.09 0.31,0.09 0.31)),((0.38 0.75,0.30 0.04,0.38 0.75)))')),
|
||||
(47,47,REPEAT('q',925),MULTIPOLYGONFromText('MULTIPOLYGON(((0.56 0.73,0.87 0.11,0.37 0.86,0.48 0.05,0.82 0.55,0.25 0.06,0.19 0.85,0.10 0.75,0.56 0.73)))')),
|
||||
(48,48,'yhk',MULTIPOLYGONFromText('MULTIPOLYGON(((0.06 0.67,0.41 0.51,0.03 0.83,0.40 0.20,0.16 0.87,0.16 0.07,0.29 0.52,0.06 0.67)))')),
|
||||
(49,49,'k',MULTIPOLYGONFromText('MULTIPOLYGON(((0.16 0.14,0.16 0.14)),((0.97 0.69,0.45 0.32,0.45 0.38,0.97 0.69)))')),
|
||||
(50,50,'b',MULTIPOLYGONFromText('MULTIPOLYGON(((0.70 0.00,0.70 0.00)),((0.88 0.53,0.90 0.16,0.88 0.53)))')),
|
||||
(51,51,'',MULTIPOLYGONFromText('MULTIPOLYGON(((0.48 0.06,0.45 0.05,0.03 0.12,0.27 0.80,0.22 0.75,0.53 0.55,0.48 0.06)))')),
|
||||
(52,52,'o',MULTIPOLYGONFromText('MULTIPOLYGON(((0.32 0.76,0.17 0.43,0.32 0.76)),((0.40 0.79,0.40 0.79)),((0.42 0.34,0.42 0.34)))')),
|
||||
(53,53,'pxme',MULTIPOLYGONFromText('MULTIPOLYGON(((0.44 0.08,0.02 0.74,0.26 0.21,0.75 0.42,0.91 0.32,0.24 0.65,0.67 0.50,0.44 0.08)))')),
|
||||
(54,54,'m',MULTIPOLYGONFromText('MULTIPOLYGON(((0.86 0.13,0.21 0.34,0.00 0.87,0.76 0.23,0.69 0.73,0.13 0.63,0.86 0.13)))')),
|
||||
(55,55,'mex',MULTIPOLYGONFromText('MULTIPOLYGON(((0.84 0.11,0.63 0.13,0.51 0.81,0.58 0.25,0.53 0.29,0.53 0.42,0.84 0.11)))')),
|
||||
(56,56,REPEAT('e',504),MULTIPOLYGONFromText('MULTIPOLYGON(((0.27 0.84,0.65 0.26,0.75 0.44,0.29 0.52,0.27 0.84)))')),
|
||||
(57,57,'i',MULTIPOLYGONFromText('MULTIPOLYGON(((0.71 0.84,0.77 0.27,0.45 0.71,0.91 0.01,0.84 0.35,0.71 0.84)))')),
|
||||
(58,58,'b',MULTIPOLYGONFromText('MULTIPOLYGON(((0.12 0.36,0.02 0.47,0.57 0.76,0.15 0.54,0.12 0.36)))')),
|
||||
(59,59,'',MULTIPOLYGONFromText('MULTIPOLYGON(((0.77 0.80,0.25 0.69,0.34 0.68,0.77 0.80)))')),
|
||||
(60,60,'',MULTIPOLYGONFromText('MULTIPOLYGON(((0.57 0.30,0.58 0.81,0.57 0.30)))')),
|
||||
(61,61,'nh',MULTIPOLYGONFromText('MULTIPOLYGON(((0.42 0.99,0.42 0.99)))')),
|
||||
(62,62,'hwi',MULTIPOLYGONFromText('MULTIPOLYGON(((0.40 0.50,0.97 0.34,0.60 0.75,0.26 0.74,0.40 0.50)))')),
|
||||
(63,63,'id',MULTIPOLYGONFromText('MULTIPOLYGON(((0.30 0.67,0.13 0.43,0.16 0.64,0.04 0.72,0.95 0.87,0.83 0.24,0.17 0.82,0.30 0.67)))')),
|
||||
(64,64,'toy',MULTIPOLYGONFromText('MULTIPOLYGON(((0.68 0.75,0.92 0.90,0.68 0.75)),((0.58 0.03,0.41 0.09,0.62 0.05,0.58 0.03)))')),
|
||||
(65,65,'yhawdptl',MULTIPOLYGONFromText('MULTIPOLYGON(((0.95 0.50,0.61 0.35,0.78 0.07,0.67 0.43,0.50 0.70,0.48 0.98,0.95 0.50)))')),
|
||||
(66,66,'gs',MULTIPOLYGONFromText('MULTIPOLYGON(((0.59 0.06,0.12 0.94,0.05 0.90,0.99 0.22,0.13 0.55,0.59 0.06)))')),
|
||||
(67,67,'bplb',MULTIPOLYGONFromText('MULTIPOLYGON(((0.33 0.90,0.54 0.11,0.05 0.04,0.59 0.66,0.33 0.90)))')),
|
||||
(68,68,'b',MULTIPOLYGONFromText('MULTIPOLYGON(((0.50 0.52,0.23 0.54,0.80 0.14,0.88 0.70,0.13 0.67,0.68 0.66,0.50 0.52)))')),
|
||||
(69,69,'p',MULTIPOLYGONFromText('MULTIPOLYGON(((0.07 0.99,0.11 0.79,0.07 0.99)),((0.50 0.22,0.77 0.58,0.50 0.22)))')),
|
||||
(70,70,'l',MULTIPOLYGONFromText('MULTIPOLYGON(((0.21 0.75,0.21 0.75)))')),
|
||||
(71,71,'rwkqhip',MULTIPOLYGONFromText('MULTIPOLYGON(((0.99 0.89,0.25 0.77,0.99 0.89)))')),
|
||||
(72,72,'n',MULTIPOLYGONFromText('MULTIPOLYGON(((0.01 0.10,0.01 0.20,0.01 0.10)),((0.83 0.75,0.29 0.21,0.83 0.75)))')),
|
||||
(73,73,'q',MULTIPOLYGONFromText('MULTIPOLYGON(((0.12 0.03,0.51 0.05,0.27 0.77,0.74 0.06,0.12 0.03)))')),
|
||||
(74,74,'hipd',MULTIPOLYGONFromText('MULTIPOLYGON(((0.89 0.94,0.54 0.92,0.37 0.71,0.89 0.94)))')),
|
||||
(75,75,'ipdec',MULTIPOLYGONFromText('MULTIPOLYGON(((0.50 0.48,0.07 0.31,0.19 0.23,0.51 0.74,0.50 0.48)))')),
|
||||
(76,76,'pde',MULTIPOLYGONFromText('MULTIPOLYGON(((0.79 0.42,0.61 0.98,0.13 0.85,0.52 0.16,0.79 0.42)))')),
|
||||
(77,77,REPEAT('e',1432),MULTIPOLYGONFromText('MULTIPOLYGON(((0.78 0.29,0.42 0.20,0.88 0.86,0.99 0.81,0.78 0.29)))')),
|
||||
(78,78,'cyhr',MULTIPOLYGONFromText('MULTIPOLYGON(((0.61 0.16,0.62 0.19,0.61 0.16)),((0.62 0.94,0.65 0.53,0.15 0.25,0.71 0.41,0.62 0.94)),((0.67 0.63,0.86 0.60,0.67 0.63)))')),
|
||||
(79,79,'n',MULTIPOLYGONFromText('MULTIPOLYGON(((0.39 0.89,0.25 0.77,0.22 0.21,0.51 0.19,0.71 0.51,0.39 0.89)))')),
|
||||
(80,80,'y',MULTIPOLYGONFromText('MULTIPOLYGON(((0.29 0.36,0.29 0.36)))')),
|
||||
(81,81,'r',MULTIPOLYGONFromText('MULTIPOLYGON(((0.05 0.94,0.93 0.37,0.22 0.07,0.73 0.75,0.99 0.35,0.05 0.94)))')),
|
||||
(82,82,'w',MULTIPOLYGONFromText('MULTIPOLYGON(((0.33 0.37,0.06 0.59,0.34 0.82,0.73 0.86,0.18 0.78,0.99 0.03,0.33 0.37)))')),
|
||||
(83,83,REPEAT('g',74),MULTIPOLYGONFromText('MULTIPOLYGON(((0.60 0.54,0.25 0.31,0.60 0.54)))')),
|
||||
(84,84,REPEAT('s',214),MULTIPOLYGONFromText('MULTIPOLYGON(((0.80 0.34,0.09 0.74,0.47 0.96,0.55 0.19,0.80 0.34)))')),
|
||||
(85,85,REPEAT('h',223),MULTIPOLYGONFromText('MULTIPOLYGON(((0.76 0.26,0.16 0.85,0.91 0.75,0.64 0.83,0.47 0.02,0.92 0.58,0.76 0.26)))')),
|
||||
(86,86,'l',MULTIPOLYGONFromText('MULTIPOLYGON(((0.11 0.64,0.41 0.64,0.64 0.64,0.11 0.64)))')),
|
||||
(87,87,'hj',MULTIPOLYGONFromText('MULTIPOLYGON(((0.66 1.00,0.21 0.96,0.52 0.44,0.94 0.06,0.80 0.39,0.33 0.57,0.30 0.89,0.66 1.00)))')),
|
||||
(88,88,'axcs',MULTIPOLYGONFromText('MULTIPOLYGON(((0.20 0.66,0.71 0.41,0.32 0.94,0.30 0.66,0.50 0.49,0.60 0.67,0.20 0.66)))')),
|
||||
(89,89,'cs',MULTIPOLYGONFromText('MULTIPOLYGON(((0.02 0.69,0.80 0.21,0.09 0.23,0.45 0.66,0.10 0.72,0.02 0.69)))')),
|
||||
(90,90,'f',MULTIPOLYGONFromText('MULTIPOLYGON(((0.87 0.14,0.54 0.83,0.87 0.42,0.36 0.58,0.87 0.14)))')),
|
||||
(91,91,'icq',MULTIPOLYGONFromText('MULTIPOLYGON(((0.73 0.57,0.36 0.41,0.86 0.33,0.76 0.49,0.44 0.83,0.73 0.57)))')),
|
||||
(92,92,REPEAT('z',783),MULTIPOLYGONFromText('MULTIPOLYGON(((0.28 0.98,0.05 0.26,0.09 0.59,1.00 0.17,0.55 0.68,0.12 0.04,0.28 0.98)))')),
|
||||
(93,93,'z',MULTIPOLYGONFromText('MULTIPOLYGON(((0.05 0.89,0.05 0.89)))')),
|
||||
(94,94,REPEAT('x',1412),MULTIPOLYGONFromText('MULTIPOLYGON(((0.79 0.83,0.12 0.49,0.54 0.63,0.79 0.83)))')),
|
||||
(95,95,REPEAT('u',2500),MULTIPOLYGONFromText('MULTIPOLYGON(((0.50 0.55,0.13 0.19,0.72 0.06,0.50 0.55)),((0.73 0.92,0.02 0.48,0.73 0.92)))'));
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo # End of 10.6 tests
|
||||
|
||||
@@ -1219,6 +1219,7 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
|
||||
#
|
||||
# MDEV-27898 CREATE VIEW AS SELECT FROM JSON_TABLE column requires global privileges
|
||||
#
|
||||
# Beginning of 10.11 tests
|
||||
create view v1 as (select * from
|
||||
json_table('[{"a":"1"}]', '$[*]' columns(a int path '$.a') ) as jt);
|
||||
create user u1@localhost;
|
||||
@@ -1231,7 +1232,58 @@ connection default;
|
||||
DROP VIEW v2;
|
||||
DROP VIEW v1;
|
||||
DROP USER u1@localhost;
|
||||
# End of 10.11 tests
|
||||
#
|
||||
# MDEV-34081: View containing JSON_TABLE does not return JSON
|
||||
#
|
||||
CREATE OR REPLACE VIEW test_view AS SELECT * FROM JSON_TABLE('
|
||||
[
|
||||
{
|
||||
"caption": "First Element",
|
||||
"value": 1
|
||||
},
|
||||
{
|
||||
"caption": "Second Element",
|
||||
"value": 2
|
||||
}
|
||||
]
|
||||
', '$[*]' COLUMNS(
|
||||
caption VARCHAR(200) PATH '$.caption',
|
||||
whole_block JSON PATH '$')) t;
|
||||
SELECT * FROM test_view;
|
||||
caption whole_block
|
||||
First Element {
|
||||
"caption": "First Element",
|
||||
"value": 1
|
||||
}
|
||||
Second Element {
|
||||
"caption": "Second Element",
|
||||
"value": 2
|
||||
}
|
||||
SELECT * FROM JSON_TABLE('
|
||||
[
|
||||
{
|
||||
"caption": "First Element",
|
||||
"value": 1
|
||||
},
|
||||
{
|
||||
"caption": "Second Element",
|
||||
"value": 2
|
||||
}
|
||||
]
|
||||
', '$[*]' COLUMNS(
|
||||
caption VARCHAR(200) PATH '$.caption',
|
||||
whole_block JSON PATH '$')) t;
|
||||
caption whole_block
|
||||
First Element {
|
||||
"caption": "First Element",
|
||||
"value": 1
|
||||
}
|
||||
Second Element {
|
||||
"caption": "Second Element",
|
||||
"value": 2
|
||||
}
|
||||
DROP VIEW test_view;
|
||||
# End of 10.11 test
|
||||
#
|
||||
# MDEV-29390: Improve coverage for UPDATE and DELETE statements in MTR test suites
|
||||
#
|
||||
|
||||
@@ -1048,6 +1048,8 @@ COLUMNS
|
||||
--echo # MDEV-27898 CREATE VIEW AS SELECT FROM JSON_TABLE column requires global privileges
|
||||
--echo #
|
||||
|
||||
--echo # Beginning of 10.11 tests
|
||||
|
||||
create view v1 as (select * from
|
||||
json_table('[{"a":"1"}]', '$[*]' columns(a int path '$.a') ) as jt);
|
||||
|
||||
@@ -1065,7 +1067,45 @@ DROP VIEW v2;
|
||||
DROP VIEW v1;
|
||||
DROP USER u1@localhost;
|
||||
|
||||
--echo # End of 10.11 tests
|
||||
--echo #
|
||||
--echo # MDEV-34081: View containing JSON_TABLE does not return JSON
|
||||
--echo #
|
||||
|
||||
CREATE OR REPLACE VIEW test_view AS SELECT * FROM JSON_TABLE('
|
||||
[
|
||||
{
|
||||
"caption": "First Element",
|
||||
"value": 1
|
||||
},
|
||||
{
|
||||
"caption": "Second Element",
|
||||
"value": 2
|
||||
}
|
||||
]
|
||||
', '$[*]' COLUMNS(
|
||||
caption VARCHAR(200) PATH '$.caption',
|
||||
whole_block JSON PATH '$')) t;
|
||||
|
||||
SELECT * FROM test_view;
|
||||
|
||||
SELECT * FROM JSON_TABLE('
|
||||
[
|
||||
{
|
||||
"caption": "First Element",
|
||||
"value": 1
|
||||
},
|
||||
{
|
||||
"caption": "Second Element",
|
||||
"value": 2
|
||||
}
|
||||
]
|
||||
', '$[*]' COLUMNS(
|
||||
caption VARCHAR(200) PATH '$.caption',
|
||||
whole_block JSON PATH '$')) t;
|
||||
|
||||
DROP VIEW test_view;
|
||||
|
||||
--echo # End of 10.11 test
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-29390: Improve coverage for UPDATE and DELETE statements in MTR test suites
|
||||
|
||||
@@ -22,16 +22,6 @@ i
|
||||
1
|
||||
UNLOCK TABLES;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-23824 SIGSEGV in end_io_cache on REPAIR LOCAL TABLE for Aria table
|
||||
#
|
||||
CREATE TABLE t1 (i INT) ENGINE=Aria;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
SET max_session_mem_used=50000;
|
||||
REPAIR LOCAL TABLE t1 USE_FRM;
|
||||
REPAIR LOCAL TABLE t1;
|
||||
DROP TABLE t1;
|
||||
SET max_session_mem_used=default;
|
||||
|
||||
# MDEV-17223 Assertion `thd->killed != 0' failed in
|
||||
# ha_maria::enable_indexes
|
||||
|
||||
@@ -1,8 +1,3 @@
|
||||
# We should not run this test with embedded as we are using
|
||||
# max_session_mem_used, which causes things to fail/not fail randomly
|
||||
# as memory usage is different compared to normal server.
|
||||
|
||||
--source include/not_embedded.inc
|
||||
--source include/have_sequence.inc
|
||||
|
||||
#
|
||||
@@ -30,20 +25,6 @@ SELECT * FROM t1;
|
||||
UNLOCK TABLES;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-23824 SIGSEGV in end_io_cache on REPAIR LOCAL TABLE for Aria table
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (i INT) ENGINE=Aria;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
SET max_session_mem_used=50000;
|
||||
--disable_result_log
|
||||
REPAIR LOCAL TABLE t1 USE_FRM;
|
||||
REPAIR LOCAL TABLE t1;
|
||||
--enable_result_log
|
||||
DROP TABLE t1;
|
||||
SET max_session_mem_used=default;
|
||||
|
||||
--echo
|
||||
--echo # MDEV-17223 Assertion `thd->killed != 0' failed in
|
||||
--echo # ha_maria::enable_indexes
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
--source include/not_embedded.inc
|
||||
--source include/have_perfschema.inc
|
||||
--source include/not_msan.inc
|
||||
--source include/not_valgrind.inc
|
||||
# This does not crash on 32 bit because of less memory used
|
||||
--source include/have_64bit.inc
|
||||
--echo #
|
||||
@@ -16,7 +18,9 @@ set max_session_mem_used=32768;
|
||||
select * from performance_schema.session_status;
|
||||
--enable_result_log
|
||||
# this used to cause mutex lock order violation when OOM happened under LOCK_global_system_variables
|
||||
--disable_ps_protocol # different memory requirements
|
||||
set global innodb_io_capacity_max=100;
|
||||
set max_session_mem_used=default;
|
||||
--enable_ps_protocol
|
||||
set global innodb_io_capacity=@old_innodb_io_capacity;
|
||||
set global innodb_io_capacity_max=@old_innodb_io_capacity_max;
|
||||
|
||||
@@ -488,7 +488,6 @@ insert into test.sanity values
|
||||
("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_SYNC_SPIN_LOOPS"),
|
||||
("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_TEMP_DATA_FILE_PATH"),
|
||||
("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_TRX_PURGE_VIEW_UPDATE_ONLY_DEBUG"),
|
||||
("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_TRX_RSEG_N_SLOTS_DEBUG"),
|
||||
("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_UNDO_DIRECTORY"),
|
||||
("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_UNDO_LOG_TRUNCATE"),
|
||||
("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_UNDO_TABLESPACES"),
|
||||
|
||||
@@ -237,6 +237,8 @@ insert delayed into t1 values (1);
|
||||
connection default;
|
||||
# Waiting until INSERT DELAYED thread does the insert.
|
||||
drop table t1;
|
||||
create table test_partitions (i int) partition by hash (i) partitions 4;
|
||||
drop table test_partitions;
|
||||
set global server_audit_logging= off;
|
||||
set global server_audit_incl_users='root';
|
||||
set global server_audit_logging= on;
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
--source include/have_plugin_auth.inc
|
||||
--source include/not_embedded.inc
|
||||
--source include/have_partition.inc
|
||||
|
||||
if (!$SERVER_AUDIT_SO) {
|
||||
skip No SERVER_AUDIT plugin;
|
||||
@@ -178,12 +179,20 @@ connection cn1;
|
||||
|
||||
create table t1(id int) engine=myisam;
|
||||
insert delayed into t1 values (1);
|
||||
|
||||
connection default;
|
||||
|
||||
--echo # Waiting until INSERT DELAYED thread does the insert.
|
||||
let $wait_condition= SELECT COUNT(*) = 1 FROM t1;
|
||||
--source include/wait_condition.inc
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# MDEV-7451 Server audit: Table events for partitioned tables are duplicated for each partition.
|
||||
#
|
||||
create table test_partitions (i int) partition by hash (i) partitions 4;
|
||||
drop table test_partitions;
|
||||
|
||||
set global server_audit_logging= off;
|
||||
set global server_audit_incl_users='root';
|
||||
set global server_audit_logging= on;
|
||||
|
||||
@@ -385,6 +385,35 @@ disconnect con1;
|
||||
connection default;
|
||||
drop sequence s1;
|
||||
drop sequence s2;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
# MDEV-37906 Server crash or UBSAN errors in Item_func_nextval::update_table upon INSERT DELAYED
|
||||
#
|
||||
create sequence s engine=myisam;
|
||||
create table t (id bigint default(nextval(s))) engine=myisam;
|
||||
insert delayed into t () values();
|
||||
drop table t;
|
||||
drop sequence s;
|
||||
#
|
||||
# MDEV-37345 Item_func_nextval::val_int() crash on INSERT...SELECT with subqueries
|
||||
#
|
||||
create sequence s;
|
||||
create table t1 (a int, b int default(nextval(s)));
|
||||
insert into t1 () values ();
|
||||
create table t2 (c int);
|
||||
create procedure p() update t1 set a = 0;
|
||||
create trigger tr after insert on t2 for each row
|
||||
begin
|
||||
insert into t1 () values ();
|
||||
call p();
|
||||
end $
|
||||
insert into t2 values ();
|
||||
drop table t1, t2, s;
|
||||
drop procedure p;
|
||||
create sequence s;
|
||||
create temporary table t (f int);
|
||||
alter table t modify f int default(nextval(s));
|
||||
create or replace sequence s;
|
||||
insert into t values (default);
|
||||
drop sequence s;
|
||||
# End of 10.11 tests
|
||||
|
||||
@@ -417,6 +417,43 @@ insert into s1 values (1, 1, 10000, 100, 1, 1000, 0, 0);
|
||||
drop sequence s1;
|
||||
drop sequence s2;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-37906 Server crash or UBSAN errors in Item_func_nextval::update_table upon INSERT DELAYED
|
||||
--echo #
|
||||
create sequence s engine=myisam;
|
||||
create table t (id bigint default(nextval(s))) engine=myisam;
|
||||
insert delayed into t () values();
|
||||
drop table t;
|
||||
drop sequence s;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-37345 Item_func_nextval::val_int() crash on INSERT...SELECT with subqueries
|
||||
--echo #
|
||||
# sequence and prelocking.
|
||||
create sequence s;
|
||||
create table t1 (a int, b int default(nextval(s)));
|
||||
insert into t1 () values ();
|
||||
create table t2 (c int);
|
||||
create procedure p() update t1 set a = 0;
|
||||
--delimiter $
|
||||
create trigger tr after insert on t2 for each row
|
||||
begin
|
||||
insert into t1 () values ();
|
||||
call p();
|
||||
end $
|
||||
--delimiter ;
|
||||
insert into t2 values ();
|
||||
drop table t1, t2, s;
|
||||
drop procedure p;
|
||||
|
||||
# another one, temporary table, ALTER, recreate sequence
|
||||
create sequence s;
|
||||
create temporary table t (f int);
|
||||
alter table t modify f int default(nextval(s));
|
||||
create or replace sequence s;
|
||||
insert into t values (default);
|
||||
drop sequence s;
|
||||
|
||||
--echo # End of 10.11 tests
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
SET @start_global_value = @@global.innodb_trx_rseg_n_slots_debug;
|
||||
SELECT @start_global_value;
|
||||
@start_global_value
|
||||
0
|
||||
select @@global.innodb_trx_rseg_n_slots_debug between 0 and 1024;
|
||||
@@global.innodb_trx_rseg_n_slots_debug between 0 and 1024
|
||||
1
|
||||
select @@global.innodb_trx_rseg_n_slots_debug;
|
||||
@@global.innodb_trx_rseg_n_slots_debug
|
||||
0
|
||||
select @@session.innodb_trx_rseg_n_slots_debug;
|
||||
ERROR HY000: Variable 'innodb_trx_rseg_n_slots_debug' is a GLOBAL variable
|
||||
show global variables like 'innodb_trx_rseg_n_slots_debug';
|
||||
Variable_name Value
|
||||
innodb_trx_rseg_n_slots_debug 0
|
||||
show session variables like 'innodb_trx_rseg_n_slots_debug';
|
||||
Variable_name Value
|
||||
innodb_trx_rseg_n_slots_debug 0
|
||||
select * from information_schema.global_variables where variable_name='innodb_trx_rseg_n_slots_debug';
|
||||
VARIABLE_NAME VARIABLE_VALUE
|
||||
INNODB_TRX_RSEG_N_SLOTS_DEBUG 0
|
||||
select * from information_schema.session_variables where variable_name='innodb_trx_rseg_n_slots_debug';
|
||||
VARIABLE_NAME VARIABLE_VALUE
|
||||
INNODB_TRX_RSEG_N_SLOTS_DEBUG 0
|
||||
set global innodb_trx_rseg_n_slots_debug=1;
|
||||
select @@global.innodb_trx_rseg_n_slots_debug;
|
||||
@@global.innodb_trx_rseg_n_slots_debug
|
||||
1
|
||||
select * from information_schema.global_variables where variable_name='innodb_trx_rseg_n_slots_debug';
|
||||
VARIABLE_NAME VARIABLE_VALUE
|
||||
INNODB_TRX_RSEG_N_SLOTS_DEBUG 1
|
||||
select * from information_schema.session_variables where variable_name='innodb_trx_rseg_n_slots_debug';
|
||||
VARIABLE_NAME VARIABLE_VALUE
|
||||
INNODB_TRX_RSEG_N_SLOTS_DEBUG 1
|
||||
set @@global.innodb_trx_rseg_n_slots_debug=0;
|
||||
select @@global.innodb_trx_rseg_n_slots_debug;
|
||||
@@global.innodb_trx_rseg_n_slots_debug
|
||||
0
|
||||
select * from information_schema.global_variables where variable_name='innodb_trx_rseg_n_slots_debug';
|
||||
VARIABLE_NAME VARIABLE_VALUE
|
||||
INNODB_TRX_RSEG_N_SLOTS_DEBUG 0
|
||||
select * from information_schema.session_variables where variable_name='innodb_trx_rseg_n_slots_debug';
|
||||
VARIABLE_NAME VARIABLE_VALUE
|
||||
INNODB_TRX_RSEG_N_SLOTS_DEBUG 0
|
||||
set session innodb_trx_rseg_n_slots_debug='some';
|
||||
ERROR HY000: Variable 'innodb_trx_rseg_n_slots_debug' is a GLOBAL variable and should be set with SET GLOBAL
|
||||
set @@session.innodb_trx_rseg_n_slots_debug='some';
|
||||
ERROR HY000: Variable 'innodb_trx_rseg_n_slots_debug' is a GLOBAL variable and should be set with SET GLOBAL
|
||||
set global innodb_trx_rseg_n_slots_debug=1.1;
|
||||
ERROR 42000: Incorrect argument type to variable 'innodb_trx_rseg_n_slots_debug'
|
||||
set global innodb_trx_rseg_n_slots_debug='foo';
|
||||
ERROR 42000: Incorrect argument type to variable 'innodb_trx_rseg_n_slots_debug'
|
||||
set global innodb_trx_rseg_n_slots_debug=-2;
|
||||
Warnings:
|
||||
Warning 1292 Truncated incorrect innodb_trx_rseg_n_slots_debug value: '-2'
|
||||
set global innodb_trx_rseg_n_slots_debug=1e1;
|
||||
ERROR 42000: Incorrect argument type to variable 'innodb_trx_rseg_n_slots_debug'
|
||||
set global innodb_trx_rseg_n_slots_debug=1024;
|
||||
set global innodb_trx_rseg_n_slots_debug=1025;
|
||||
Warnings:
|
||||
Warning 1292 Truncated incorrect innodb_trx_rseg_n_slots_debug value: '1025'
|
||||
SET @@global.innodb_trx_rseg_n_slots_debug = @start_global_value;
|
||||
SELECT @@global.innodb_trx_rseg_n_slots_debug;
|
||||
@@global.innodb_trx_rseg_n_slots_debug
|
||||
0
|
||||
@@ -1652,18 +1652,6 @@ NUMERIC_BLOCK_SIZE NULL
|
||||
ENUM_VALUE_LIST OFF,ON
|
||||
READ_ONLY NO
|
||||
COMMAND_LINE_ARGUMENT NULL
|
||||
VARIABLE_NAME INNODB_TRX_RSEG_N_SLOTS_DEBUG
|
||||
SESSION_VALUE NULL
|
||||
DEFAULT_VALUE 0
|
||||
VARIABLE_SCOPE GLOBAL
|
||||
VARIABLE_TYPE INT UNSIGNED
|
||||
VARIABLE_COMMENT Debug flags for InnoDB to limit TRX_RSEG_N_SLOTS for trx_rsegf_undo_find_free()
|
||||
NUMERIC_MIN_VALUE 0
|
||||
NUMERIC_MAX_VALUE 1024
|
||||
NUMERIC_BLOCK_SIZE 0
|
||||
ENUM_VALUE_LIST NULL
|
||||
READ_ONLY NO
|
||||
COMMAND_LINE_ARGUMENT NULL
|
||||
VARIABLE_NAME INNODB_UNDO_DIRECTORY
|
||||
SESSION_VALUE NULL
|
||||
DEFAULT_VALUE
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
|
||||
SET @start_global_value = @@global.innodb_trx_rseg_n_slots_debug;
|
||||
SELECT @start_global_value;
|
||||
|
||||
#
|
||||
# exists as global only
|
||||
#
|
||||
select @@global.innodb_trx_rseg_n_slots_debug between 0 and 1024;
|
||||
select @@global.innodb_trx_rseg_n_slots_debug;
|
||||
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
|
||||
select @@session.innodb_trx_rseg_n_slots_debug;
|
||||
show global variables like 'innodb_trx_rseg_n_slots_debug';
|
||||
show session variables like 'innodb_trx_rseg_n_slots_debug';
|
||||
--disable_warnings
|
||||
select * from information_schema.global_variables where variable_name='innodb_trx_rseg_n_slots_debug';
|
||||
select * from information_schema.session_variables where variable_name='innodb_trx_rseg_n_slots_debug';
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# show that it's writable
|
||||
#
|
||||
set global innodb_trx_rseg_n_slots_debug=1;
|
||||
select @@global.innodb_trx_rseg_n_slots_debug;
|
||||
--disable_warnings
|
||||
select * from information_schema.global_variables where variable_name='innodb_trx_rseg_n_slots_debug';
|
||||
select * from information_schema.session_variables where variable_name='innodb_trx_rseg_n_slots_debug';
|
||||
--enable_warnings
|
||||
set @@global.innodb_trx_rseg_n_slots_debug=0;
|
||||
select @@global.innodb_trx_rseg_n_slots_debug;
|
||||
--disable_warnings
|
||||
select * from information_schema.global_variables where variable_name='innodb_trx_rseg_n_slots_debug';
|
||||
select * from information_schema.session_variables where variable_name='innodb_trx_rseg_n_slots_debug';
|
||||
--enable_warnings
|
||||
--error ER_GLOBAL_VARIABLE
|
||||
set session innodb_trx_rseg_n_slots_debug='some';
|
||||
--error ER_GLOBAL_VARIABLE
|
||||
set @@session.innodb_trx_rseg_n_slots_debug='some';
|
||||
|
||||
#
|
||||
# incorrect types
|
||||
#
|
||||
--error ER_WRONG_TYPE_FOR_VAR
|
||||
set global innodb_trx_rseg_n_slots_debug=1.1;
|
||||
--error ER_WRONG_TYPE_FOR_VAR
|
||||
set global innodb_trx_rseg_n_slots_debug='foo';
|
||||
set global innodb_trx_rseg_n_slots_debug=-2;
|
||||
--error ER_WRONG_TYPE_FOR_VAR
|
||||
set global innodb_trx_rseg_n_slots_debug=1e1;
|
||||
set global innodb_trx_rseg_n_slots_debug=1024;
|
||||
set global innodb_trx_rseg_n_slots_debug=1025;
|
||||
|
||||
#
|
||||
# Cleanup
|
||||
#
|
||||
|
||||
SET @@global.innodb_trx_rseg_n_slots_debug = @start_global_value;
|
||||
SELECT @@global.innodb_trx_rseg_n_slots_debug;
|
||||
@@ -21,11 +21,11 @@ EOF
|
||||
exit 0
|
||||
fi
|
||||
|
||||
systemctl set-environment _WSREP_NEW_CLUSTER='--wsrep-new-cluster' && \
|
||||
systemctl restart ${1:-mariadb}
|
||||
echo _WSREP_NEW_CLUSTER='--wsrep-new-cluster' > "@INSTALL_RUNDATADIR@/wsrep-new-cluster" && \
|
||||
systemctl restart mariadb.service
|
||||
|
||||
extcode=$?
|
||||
|
||||
systemctl set-environment _WSREP_NEW_CLUSTER=''
|
||||
rm -f "@INSTALL_RUNDATADIR@/wsrep-new-cluster"
|
||||
|
||||
exit $extcode
|
||||
|
||||
@@ -5841,7 +5841,12 @@ handler::ha_create(const char *name, TABLE *form, HA_CREATE_INFO *info_arg)
|
||||
info_arg->options|= HA_LEX_CREATE_GLOBAL_TMP_TABLE;
|
||||
int error= create(name, form, info_arg);
|
||||
if (!error &&
|
||||
!(info_arg->options & (HA_LEX_CREATE_TMP_TABLE | HA_CREATE_TMP_ALTER)))
|
||||
!(info_arg->options & (HA_LEX_CREATE_TMP_TABLE | HA_CREATE_TMP_ALTER)) &&
|
||||
/*
|
||||
DO not notify if not main handler.
|
||||
So skip notifications for partitions.
|
||||
*/
|
||||
form->file == this)
|
||||
mysql_audit_create_table(form);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -4247,7 +4247,7 @@ protected:
|
||||
bool check_access(THD *, privilege_t);
|
||||
public:
|
||||
Item_func_nextval(THD *thd, TABLE_LIST *table_list_arg):
|
||||
Item_longlong_func(thd), table_list(table_list_arg) {}
|
||||
Item_longlong_func(thd), table_list(table_list_arg), table(0) {}
|
||||
longlong val_int() override;
|
||||
LEX_CSTRING func_name_cstring() const override
|
||||
{
|
||||
@@ -4276,14 +4276,8 @@ public:
|
||||
*/
|
||||
void update_table()
|
||||
{
|
||||
if (!(table= table_list->table))
|
||||
{
|
||||
/*
|
||||
If nextval was used in DEFAULT then next_local points to
|
||||
the table_list used by to open the sequence table
|
||||
*/
|
||||
table= table_list->next_local->table;
|
||||
}
|
||||
table= table_list->table;
|
||||
DBUG_ASSERT(table);
|
||||
}
|
||||
bool const_item() const override { return 0; }
|
||||
Item *do_get_copy(THD *thd) const override
|
||||
|
||||
@@ -211,24 +211,23 @@ int json_path_parts_compare(
|
||||
{
|
||||
if (b->type & JSON_PATH_ARRAY)
|
||||
{
|
||||
int res= 0, corrected_n_item_a= 0;
|
||||
if (array_sizes)
|
||||
corrected_n_item_a= a->n_item < 0 ?
|
||||
array_sizes[b-temp_b] + a->n_item : a->n_item;
|
||||
if (a->type & JSON_PATH_ARRAY_RANGE)
|
||||
int res = 0;
|
||||
if (a->type & JSON_PATH_WILD)
|
||||
res = 1;
|
||||
else if (a->type & JSON_PATH_ARRAY_RANGE && array_sizes)
|
||||
{
|
||||
int corrected_n_item_end_a= 0;
|
||||
if (array_sizes)
|
||||
corrected_n_item_end_a= a->n_item_end < 0 ?
|
||||
array_sizes[b-temp_b] + a->n_item_end :
|
||||
a->n_item_end;
|
||||
res= b->n_item >= corrected_n_item_a &&
|
||||
b->n_item <= corrected_n_item_end_a;
|
||||
int start = (a->n_item >= 0) ? a->n_item
|
||||
: array_sizes[b - temp_b] + a->n_item;
|
||||
int end = (a->n_item_end >= 0) ? a->n_item_end
|
||||
: array_sizes[b - temp_b] + a->n_item_end;
|
||||
res = (b->n_item >= start && b->n_item <= end);
|
||||
}
|
||||
else
|
||||
res= corrected_n_item_a == b->n_item;
|
||||
else if (a->n_item >= 0)
|
||||
res = (a->n_item == b->n_item);
|
||||
else if (a->n_item < 0 && array_sizes)
|
||||
res = (a->n_item == b->n_item - array_sizes[b - temp_b]);
|
||||
|
||||
if ((a->type & JSON_PATH_WILD) || res)
|
||||
if (res)
|
||||
goto step_fits;
|
||||
goto step_failed;
|
||||
}
|
||||
|
||||
@@ -4250,9 +4250,21 @@ String *Item_func_hex::val_str_ascii_from_val_str(String *str)
|
||||
{
|
||||
DBUG_ASSERT(&tmp_value != str);
|
||||
String *res= args[0]->val_str(&tmp_value);
|
||||
DBUG_ASSERT(res != str);
|
||||
THD *thd= current_thd;
|
||||
|
||||
if ((null_value= (res == NULL)))
|
||||
return NULL;
|
||||
|
||||
if (res->length()*2 > thd->variables.max_allowed_packet)
|
||||
{
|
||||
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
|
||||
ER_THD(thd, ER_WARN_ALLOWED_PACKET_OVERFLOWED),
|
||||
func_name(), thd->variables.max_allowed_packet);
|
||||
null_value= true;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return str->set_hex(res->ptr(), res->length()) ? make_empty_result(str) : str;
|
||||
}
|
||||
|
||||
|
||||
@@ -978,8 +978,9 @@ int Json_table_column::print(THD *thd, Field **f, String *str)
|
||||
|
||||
(*f)->sql_type(column_type);
|
||||
|
||||
if (str->append(column_type) ||
|
||||
((*f)->has_charset() && m_explicit_cs &&
|
||||
if ((m_format_json ? str->append(STRING_WITH_LEN(" JSON ")) : str->append(column_type)))
|
||||
return 1;
|
||||
if (((*f)->has_charset() && m_explicit_cs &&
|
||||
(str->append(STRING_WITH_LEN(" CHARSET ")) ||
|
||||
str->append(&m_explicit_cs->cs_name) ||
|
||||
(Charset(m_explicit_cs).can_have_collate_clause() &&
|
||||
|
||||
@@ -167,6 +167,7 @@ public:
|
||||
{
|
||||
m_on_error.m_response= RESPONSE_NOT_SPECIFIED;
|
||||
m_on_empty.m_response= RESPONSE_NOT_SPECIFIED;
|
||||
m_format_json= false;
|
||||
}
|
||||
int print(THD *tnd, Field **f, String *str);
|
||||
};
|
||||
|
||||
@@ -1903,7 +1903,16 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
|
||||
if (charset_inited)
|
||||
{
|
||||
rpl_sql_thread_info *sql_info= thd->system_thread_info.rpl_sql_info;
|
||||
if (thd->slave_thread && sql_info->cached_charset_compare(charset))
|
||||
const bool applier=
|
||||
#ifdef WITH_WSREP
|
||||
WSREP(thd) ? thd->wsrep_applier :
|
||||
#endif
|
||||
false;
|
||||
|
||||
// Event charset should be compared for slave thread
|
||||
// and applier threads
|
||||
if ((thd->slave_thread || applier) &&
|
||||
sql_info->cached_charset_compare(charset))
|
||||
{
|
||||
/* Verify that we support the charsets found in the event. */
|
||||
if (!(thd->variables.character_set_client=
|
||||
|
||||
@@ -10221,8 +10221,6 @@ tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
|
||||
{
|
||||
bool must_be_ored= sel_trees_must_be_ored(param, tree1, tree2, ored_keys);
|
||||
no_imerge_from_ranges= must_be_ored;
|
||||
if (param->disable_index_merge_plans)
|
||||
no_imerge_from_ranges= true;
|
||||
|
||||
if (no_imerge_from_ranges && no_merges1 && no_merges2)
|
||||
{
|
||||
@@ -10272,6 +10270,13 @@ tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
|
||||
DBUG_RETURN(result);
|
||||
}
|
||||
|
||||
/*
|
||||
Ok, the result now has the ranges that one gets for (RT1 OR RT2).
|
||||
If construction of SEL_IMERGE is disabled, stop right here.
|
||||
*/
|
||||
if (param->disable_index_merge_plans)
|
||||
DBUG_RETURN(result);
|
||||
|
||||
SEL_IMERGE *imerge_from_ranges;
|
||||
if (!(imerge_from_ranges= new SEL_IMERGE()))
|
||||
result= NULL;
|
||||
@@ -15650,6 +15655,46 @@ TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows,
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
@brief
|
||||
Return true if the select is using "Using index for group-by" and also
|
||||
has "ORDER BY ... FETCH FIRST n ROWS WITH TIES"
|
||||
|
||||
@detail
|
||||
There is a rewrite that removes the ORDER BY (JOIN::order) if the select
|
||||
also has a GROUP BY that produces a compatible ordering.
|
||||
However "FETCH FIRST ... WITH TIES" needs an ORDER BY clause (in
|
||||
JOIN::alloc_order_fields()).
|
||||
GROUP BY strategies handle it this way:
|
||||
- For strategies using temporary table, JOIN::make_aggr_tables_info() will
|
||||
put the ORDER BY clause back.
|
||||
- OrderedGroupBy in end_send_group() handles WITH TIES with the GROUP BY
|
||||
clause (note that SQL doesn't allow "GROUP BY ... WITH TIES").
|
||||
- The remaining strategy is QUICK_GROUP_MIN_MAX_SELECT, for which
|
||||
= the grouping strategy in the quick select doesn't handle WITH TIES.
|
||||
= end_send() would not handle WITH TIES, because JOIN::order is removed.
|
||||
|
||||
The solution is to NOT remove ORDER BY when QUICK_GROUP_MIN_MAX_SELECT is
|
||||
used.
|
||||
|
||||
Unfortunately, the optimizer then will not recognize that it can skip
|
||||
sorting and will use filesort, which will prevent short-cutting the
|
||||
execution when LIMIT is reached.
|
||||
*/
|
||||
|
||||
bool using_with_ties_and_group_min_max(JOIN *join)
|
||||
{
|
||||
if (join->unit->lim.is_with_ties())
|
||||
{
|
||||
JOIN_TAB *tab= &join->join_tab[join->const_tables];
|
||||
if (tab->select && tab->select->quick &&
|
||||
tab->select->quick->get_type() ==
|
||||
QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
Construct new quick select for group queries with min/max.
|
||||
|
||||
|
||||
@@ -1878,6 +1878,7 @@ public:
|
||||
Explain_quick_select *get_explain(MEM_ROOT *alloc) override;
|
||||
};
|
||||
|
||||
bool using_with_ties_and_group_min_max(JOIN *join);
|
||||
|
||||
class QUICK_SELECT_DESC: public QUICK_RANGE_SELECT
|
||||
{
|
||||
|
||||
@@ -355,6 +355,7 @@ struct SplM_field_ext_info: public SplM_field_info
|
||||
with available statistics.
|
||||
10. The select doesn't use WITH ROLLUP (This limitation can probably be
|
||||
lifted)
|
||||
11. The select doesn't have ORDER BY with LIMIT
|
||||
|
||||
@retval
|
||||
true if the answer is positive
|
||||
@@ -388,6 +389,9 @@ bool JOIN::check_for_splittable_materialized()
|
||||
if (!partition_list)
|
||||
return false;
|
||||
|
||||
if (select_lex->order_list.elements > 0 && !unit->lim.is_unlimited()) //!(11)
|
||||
return false;
|
||||
|
||||
Json_writer_object trace_wrapper(thd);
|
||||
Json_writer_object trace_split(thd, "check_split_materialized");
|
||||
|
||||
|
||||
@@ -1149,7 +1149,7 @@ Sp_handler::sp_drop_routine_internal(THD *thd,
|
||||
sp_cache **spc= get_cache(thd);
|
||||
DBUG_ASSERT(spc);
|
||||
if ((sp= sp_cache_lookup(spc, name)))
|
||||
sp_cache_flush_obsolete(spc, &sp);
|
||||
sp_cache_remove(spc, &sp);
|
||||
/* Drop statistics for this stored program from performance schema. */
|
||||
MYSQL_DROP_SP(type(), name->m_db.str, static_cast<uint>(name->m_db.length),
|
||||
name->m_name.str, static_cast<uint>(name->m_name.length));
|
||||
@@ -2840,10 +2840,12 @@ int Sp_handler::sp_cache_routine(THD *thd,
|
||||
DBUG_ASSERT(spc);
|
||||
|
||||
*sp= sp_cache_lookup(spc, name);
|
||||
thd->set_sp_cache_version_if_needed(sp_cache_version());
|
||||
|
||||
if (*sp)
|
||||
{
|
||||
sp_cache_flush_obsolete(spc, sp);
|
||||
if ((*sp)->sp_cache_version() < thd->sp_cache_version())
|
||||
sp_cache_remove(spc, sp);
|
||||
if (*sp)
|
||||
DBUG_RETURN(SP_OK);
|
||||
}
|
||||
|
||||
@@ -231,14 +231,10 @@ void sp_cache_invalidate()
|
||||
inside SP'.
|
||||
*/
|
||||
|
||||
void sp_cache_flush_obsolete(sp_cache **cp, sp_head **sp)
|
||||
void sp_cache_remove(sp_cache **cp, sp_head **sp)
|
||||
{
|
||||
if ((*sp)->sp_cache_version() < Cversion && !(*sp)->is_invoked())
|
||||
{
|
||||
DBUG_EXECUTE_IF("check_sp_cache_not_invalidated", DBUG_SUICIDE(););
|
||||
(*cp)->remove(*sp);
|
||||
*sp= NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -59,7 +59,7 @@ void sp_cache_clear(sp_cache **cp);
|
||||
void sp_cache_insert(sp_cache **cp, sp_head *sp);
|
||||
sp_head *sp_cache_lookup(sp_cache **cp, const Database_qualified_name *name);
|
||||
void sp_cache_invalidate();
|
||||
void sp_cache_flush_obsolete(sp_cache **cp, sp_head **sp);
|
||||
void sp_cache_remove(sp_cache **cp, sp_head **sp);
|
||||
ulong sp_cache_version();
|
||||
void sp_cache_enforce_limit(sp_cache *cp, ulong upper_limit_for_elements);
|
||||
|
||||
|
||||
@@ -2047,7 +2047,18 @@ bool open_table(THD *thd, TABLE_LIST *table_list, Open_table_context *ot_ctx)
|
||||
table->query_id == 0))
|
||||
{
|
||||
int distance= ((int) table->reginfo.lock_type -
|
||||
(int) table_list->lock_type);
|
||||
(int) table_list->lock_type) * 2;
|
||||
TABLE_LIST *tl= thd->locked_tables_mode == LTM_PRELOCKED
|
||||
? table->pos_in_table_list : table->pos_in_locked_tables;
|
||||
/*
|
||||
note, that merge table children are automatically added to
|
||||
prelocking set in ha_myisammrg::add_children_list(), but their
|
||||
TABLE_LIST's are on the execution arena, so tl will be invalid
|
||||
on the second execution. Let's just skip them below.
|
||||
*/
|
||||
if (table_list->parent_l || !tl ||
|
||||
table_list->for_insert_data != tl->for_insert_data)
|
||||
distance|= 1;
|
||||
|
||||
/*
|
||||
Find a table that either has the exact lock type requested,
|
||||
@@ -2449,6 +2460,11 @@ retry_share:
|
||||
DBUG_ASSERT(table->file->pushed_cond == NULL);
|
||||
table_list->updatable= 1; // It is not derived table nor non-updatable VIEW
|
||||
table_list->table= table;
|
||||
if (table_list->linked_table)
|
||||
{
|
||||
/* Update link for sequence tables in default */
|
||||
table_list->linked_table->table= table;
|
||||
}
|
||||
|
||||
if (!from_share && table->vcol_fix_expr(thd))
|
||||
DBUG_RETURN(true);
|
||||
@@ -3207,7 +3223,7 @@ static bool
|
||||
check_and_update_routine_version(THD *thd, Sroutine_hash_entry *rt,
|
||||
sp_head *sp)
|
||||
{
|
||||
ulong spc_version= sp_cache_version();
|
||||
ulong spc_version= thd->sp_cache_version();
|
||||
/* sp is NULL if there is no such routine. */
|
||||
ulong version= sp ? sp->sp_cache_version() : spc_version;
|
||||
/*
|
||||
@@ -3217,7 +3233,7 @@ check_and_update_routine_version(THD *thd, Sroutine_hash_entry *rt,
|
||||
Sic: version != spc_version <--> sp is not NULL.
|
||||
*/
|
||||
if (rt->m_sp_cache_version != version ||
|
||||
(version != spc_version && !sp->is_invoked()))
|
||||
(version < spc_version && !sp->is_invoked()))
|
||||
{
|
||||
if (thd->m_reprepare_observer &&
|
||||
thd->m_reprepare_observer->report_error(thd))
|
||||
@@ -5076,7 +5092,7 @@ add_internal_tables(THD *thd, Query_tables_list *prelocking_ctx,
|
||||
next_local value as it may have been changed by a previous
|
||||
statement using the same table.
|
||||
*/
|
||||
tables->next_local= tmp;
|
||||
tmp->linked_table= tables;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -5091,10 +5107,10 @@ add_internal_tables(THD *thd, Query_tables_list *prelocking_ctx,
|
||||
&prelocking_ctx->query_tables_last,
|
||||
tables->for_insert_data);
|
||||
/*
|
||||
Store link to the new table_list that will be used by open so that
|
||||
Item_func_nextval() can find it
|
||||
Store link to the sequences table so that we can in open_table() update
|
||||
it to point to the opened table.
|
||||
*/
|
||||
tables->next_local= tl;
|
||||
tl->linked_table= tables;
|
||||
DBUG_PRINT("info", ("table name: %s added", tables->table_name.str));
|
||||
} while ((tables= tables->next_global));
|
||||
DBUG_RETURN(FALSE);
|
||||
|
||||
@@ -2638,16 +2638,16 @@ struct wait_for_commit
|
||||
|
||||
class Sp_caches
|
||||
{
|
||||
protected:
|
||||
ulong m_sp_cache_version;
|
||||
public:
|
||||
sp_cache *sp_proc_cache;
|
||||
sp_cache *sp_func_cache;
|
||||
sp_cache *sp_package_spec_cache;
|
||||
sp_cache *sp_package_body_cache;
|
||||
Sp_caches()
|
||||
:sp_proc_cache(NULL),
|
||||
sp_func_cache(NULL),
|
||||
sp_package_spec_cache(NULL),
|
||||
sp_package_body_cache(NULL)
|
||||
:m_sp_cache_version(0), sp_proc_cache(NULL), sp_func_cache(NULL),
|
||||
sp_package_spec_cache(NULL), sp_package_body_cache(NULL)
|
||||
{ }
|
||||
~Sp_caches()
|
||||
{
|
||||
@@ -2657,19 +2657,22 @@ public:
|
||||
DBUG_ASSERT(sp_package_spec_cache == NULL);
|
||||
DBUG_ASSERT(sp_package_body_cache == NULL);
|
||||
}
|
||||
void sp_caches_swap(Sp_caches &rhs)
|
||||
{
|
||||
swap_variables(sp_cache*, sp_proc_cache, rhs.sp_proc_cache);
|
||||
swap_variables(sp_cache*, sp_func_cache, rhs.sp_func_cache);
|
||||
swap_variables(sp_cache*, sp_package_spec_cache, rhs.sp_package_spec_cache);
|
||||
swap_variables(sp_cache*, sp_package_body_cache, rhs.sp_package_body_cache);
|
||||
}
|
||||
void sp_caches_clear();
|
||||
/**
|
||||
Clear content of sp related caches.
|
||||
Don't delete cache objects itself.
|
||||
*/
|
||||
void sp_caches_empty();
|
||||
ulong sp_cache_version() const
|
||||
{
|
||||
DBUG_ASSERT(m_sp_cache_version);
|
||||
return m_sp_cache_version;
|
||||
}
|
||||
void set_sp_cache_version_if_needed(ulong version)
|
||||
{
|
||||
if (!m_sp_cache_version)
|
||||
m_sp_cache_version= version;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -41,6 +41,8 @@ typedef bool (*dt_processor)(THD *thd, LEX *lex, TABLE_LIST *derived);
|
||||
static bool mysql_derived_init(THD *thd, LEX *lex, TABLE_LIST *derived);
|
||||
static bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived);
|
||||
static bool mysql_derived_optimize(THD *thd, LEX *lex, TABLE_LIST *derived);
|
||||
static bool mysql_derived_optimize_stage2(THD *thd, LEX *lex,
|
||||
TABLE_LIST *derived);
|
||||
static bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived);
|
||||
static bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived);
|
||||
static bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived);
|
||||
@@ -58,6 +60,7 @@ dt_processor processors[]=
|
||||
&mysql_derived_create,
|
||||
&mysql_derived_fill,
|
||||
&mysql_derived_reinit,
|
||||
&mysql_derived_optimize_stage2
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -109,7 +112,7 @@ mysql_handle_derived(LEX *lex, uint phases)
|
||||
{
|
||||
if (!cursor->is_view_or_derived() && phases == DT_MERGE_FOR_INSERT)
|
||||
continue;
|
||||
uint8 allowed_phases= (cursor->is_merged_derived() ? DT_PHASES_MERGE :
|
||||
uint allowed_phases= (cursor->is_merged_derived() ? DT_PHASES_MERGE :
|
||||
DT_PHASES_MATERIALIZE | DT_MERGE_FOR_INSERT);
|
||||
/*
|
||||
Skip derived tables to which the phase isn't applicable.
|
||||
@@ -170,7 +173,7 @@ bool
|
||||
mysql_handle_single_derived(LEX *lex, TABLE_LIST *derived, uint phases)
|
||||
{
|
||||
bool res= FALSE;
|
||||
uint8 allowed_phases= (derived->is_merged_derived() ? DT_PHASES_MERGE :
|
||||
uint allowed_phases= (derived->is_merged_derived() ? DT_PHASES_MERGE :
|
||||
DT_PHASES_MATERIALIZE);
|
||||
DBUG_ENTER("mysql_handle_single_derived");
|
||||
DBUG_PRINT("enter", ("phases: 0x%x allowed: 0x%x alias: '%s'",
|
||||
@@ -1063,6 +1066,43 @@ err:
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
@brief
|
||||
Call JOIN::optimize_stage2_and_finish() for all child selects that use
|
||||
two-phase optimization.
|
||||
*/
|
||||
|
||||
static
|
||||
bool mysql_derived_optimize_stage2(THD *thd, LEX *lex, TABLE_LIST *derived)
|
||||
{
|
||||
SELECT_LEX_UNIT *unit= derived->get_unit();
|
||||
SELECT_LEX *first_select= unit->first_select();
|
||||
SELECT_LEX *save_current_select= lex->current_select;
|
||||
bool res= FALSE;
|
||||
|
||||
if (derived->merged || unit->is_unit_op())
|
||||
{
|
||||
/*
|
||||
Two-phase join optimization is not applicable for merged derived tables
|
||||
and UNIONs.
|
||||
*/
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
lex->current_select= first_select;
|
||||
/* Same condition as in mysql_derived_optimize(): */
|
||||
if (unit->derived && !derived->is_merged_derived())
|
||||
{
|
||||
JOIN *join= first_select->join;
|
||||
if (join && join->optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE)
|
||||
res= join->optimize_stage2_and_finish();
|
||||
}
|
||||
|
||||
lex->current_select= save_current_select;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Actually create result table for a materialized derived table/view.
|
||||
|
||||
|
||||
@@ -3403,11 +3403,11 @@ bool Delayed_insert::open_and_lock_table()
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
if (table->triggers || table->check_constraints)
|
||||
if (table->triggers || table->check_constraints || table->internal_tables)
|
||||
{
|
||||
/*
|
||||
Table has triggers or check constraints. This is not an error, but we do
|
||||
not support these with delayed insert. Terminate the delayed
|
||||
Table uses triggers, check constraints or sequences. This is not an error,
|
||||
but we do not support these with delayed insert. Terminate the delayed
|
||||
thread without an error and thus request lock upgrade.
|
||||
*/
|
||||
return TRUE;
|
||||
|
||||
@@ -2031,6 +2031,12 @@ int Lex_input_stream::lex_one_token(YYSTYPE *yylval, THD *thd)
|
||||
const uchar *const state_map= cs->state_map;
|
||||
const uchar *const ident_map= cs->ident_map;
|
||||
|
||||
if (thd->killed)
|
||||
{
|
||||
thd->send_kill_message();
|
||||
return END_OF_INPUT;
|
||||
}
|
||||
|
||||
start_token();
|
||||
state= next_state;
|
||||
next_state= MY_LEX_OPERATOR_OR_IDENT;
|
||||
|
||||
@@ -7486,6 +7486,7 @@ void THD::reset_for_next_command(bool do_clear_error)
|
||||
binlog_unsafe_warning_flags= 0;
|
||||
|
||||
save_prep_leaf_list= false;
|
||||
m_sp_cache_version= 0;
|
||||
|
||||
#if defined(WITH_WSREP) && !defined(DBUG_OFF)
|
||||
if (mysql_bin_log.is_open())
|
||||
|
||||
@@ -1992,7 +1992,6 @@ bool JOIN::build_explain()
|
||||
int JOIN::optimize()
|
||||
{
|
||||
int res= 0;
|
||||
join_optimization_state init_state= optimization_state;
|
||||
if (select_lex->pushdown_select)
|
||||
{
|
||||
if (optimization_state == JOIN::OPTIMIZATION_DONE)
|
||||
@@ -2009,18 +2008,18 @@ int JOIN::optimize()
|
||||
}
|
||||
with_two_phase_optimization= false;
|
||||
}
|
||||
else if (optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE)
|
||||
res= optimize_stage2();
|
||||
else
|
||||
{
|
||||
// to prevent double initialization on EXPLAIN
|
||||
/*
|
||||
This function may be invoked multiple times. Do nothing if the
|
||||
optimization (either full or stage1) are already done.
|
||||
*/
|
||||
if (optimization_state != JOIN::NOT_OPTIMIZED)
|
||||
return FALSE;
|
||||
optimization_state= JOIN::OPTIMIZATION_IN_PROGRESS;
|
||||
res= optimize_inner();
|
||||
}
|
||||
if (!with_two_phase_optimization ||
|
||||
init_state == JOIN::OPTIMIZATION_PHASE_1_DONE)
|
||||
if (!with_two_phase_optimization)
|
||||
{
|
||||
if (!res && have_query_plan != QEP_DELETED)
|
||||
res= build_explain();
|
||||
@@ -2037,6 +2036,29 @@ int JOIN::optimize()
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
@brief
|
||||
Call optimize_stage2() and save the query plan.
|
||||
*/
|
||||
|
||||
int JOIN::optimize_stage2_and_finish()
|
||||
{
|
||||
int res= 0;
|
||||
DBUG_ASSERT(with_two_phase_optimization);
|
||||
DBUG_ASSERT(optimization_state == OPTIMIZATION_PHASE_1_DONE);
|
||||
|
||||
if (optimize_stage2())
|
||||
res= 1;
|
||||
else
|
||||
{
|
||||
if (have_query_plan != JOIN::QEP_DELETED)
|
||||
res= build_explain();
|
||||
optimization_state= JOIN::OPTIMIZATION_DONE;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
@brief
|
||||
Create range filters objects needed in execution for all join tables
|
||||
@@ -2756,6 +2778,19 @@ setup_subq_exit:
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
@brief
|
||||
In the Stage 1 we've picked the join order.
|
||||
Now, refine the query plan and sort out all the details.
|
||||
The choice how to handle GROUP/ORDER BY is also made here.
|
||||
|
||||
@detail
|
||||
The main reason this is a separate function is Split-Materialized
|
||||
optimization. There, we first consider doing non-split Materialization for
|
||||
a SELECT. After that, the parent SELECT will attempt doing Splitting in
|
||||
multiple ways and make the final choice.
|
||||
*/
|
||||
|
||||
int JOIN::optimize_stage2()
|
||||
{
|
||||
ulonglong select_opts_for_readinfo;
|
||||
@@ -2776,7 +2811,7 @@ int JOIN::optimize_stage2()
|
||||
if (make_range_rowid_filters())
|
||||
DBUG_RETURN(1);
|
||||
|
||||
if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
|
||||
if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE_STAGE2))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
/*
|
||||
@@ -3251,9 +3286,14 @@ int JOIN::optimize_stage2()
|
||||
(as MariaDB is by default sorting on GROUP BY) or
|
||||
if there is no GROUP BY and aggregate functions are used
|
||||
(as the result will only contain one row).
|
||||
|
||||
(1) - Do not remove ORDER BY if we have WITH TIES and are using
|
||||
QUICK_GROUP_MIN_MAX_SELECT to handle GROUP BY. See the comment
|
||||
for using_with_ties_and_group_min_max() for details.
|
||||
*/
|
||||
if (order && (test_if_subpart(group_list, order) ||
|
||||
(!group_list && tmp_table_param.sum_func_count)))
|
||||
(!group_list && tmp_table_param.sum_func_count)) &&
|
||||
!using_with_ties_and_group_min_max(this)) // (1)
|
||||
order=0;
|
||||
|
||||
// Can't use sort on head table if using join buffering
|
||||
@@ -3567,7 +3607,7 @@ setup_subq_exit:
|
||||
some of the derived tables, and never did stage 2.
|
||||
Do it now, otherwise Explain data structure will not be complete.
|
||||
*/
|
||||
if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
|
||||
if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE_STAGE2))
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
/*
|
||||
@@ -4846,6 +4886,7 @@ bool JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
|
||||
int JOIN::exec()
|
||||
{
|
||||
int res;
|
||||
DBUG_ASSERT(optimization_state == OPTIMIZATION_DONE);
|
||||
DBUG_EXECUTE_IF("show_explain_probe_join_exec_start",
|
||||
if (dbug_user_var_equals_int(thd,
|
||||
"show_explain_probe_select_id",
|
||||
@@ -14644,6 +14685,19 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
|
||||
if (build_tmp_join_prefix_cond(join, tab, &sel->cond))
|
||||
return true;
|
||||
|
||||
/*
|
||||
To be removed in 11.0+:
|
||||
Caution: we can reach this point with quick=NULL. Below, we'll
|
||||
use tab->keys and not tab->const_keys like
|
||||
get_quick_record_count() did. If we have constructed a
|
||||
group-min-max quick select, make sure we're able to construct it
|
||||
again
|
||||
*/
|
||||
if (sel->quick && sel->quick->get_type() ==
|
||||
QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)
|
||||
{
|
||||
tab->keys.set_bit(sel->quick->index);
|
||||
}
|
||||
/*
|
||||
We can't call sel->cond->fix_fields,
|
||||
as it will break tab->on_expr if it's AND condition
|
||||
|
||||
@@ -1768,6 +1768,7 @@ public:
|
||||
int optimize();
|
||||
int optimize_inner();
|
||||
int optimize_stage2();
|
||||
int optimize_stage2_and_finish();
|
||||
bool build_explain();
|
||||
int reinit();
|
||||
int init_execution();
|
||||
|
||||
@@ -5206,20 +5206,18 @@ static int fill_schema_table_from_frm(THD *thd, MEM_ROOT *mem_root,
|
||||
res= open_table_from_share(thd, share, table_name, 0,
|
||||
EXTRA_RECORD | OPEN_FRM_FILE_ONLY,
|
||||
thd->open_options, &tbl, FALSE);
|
||||
if (res && hide_object_error(thd->get_stmt_da()->sql_errno()))
|
||||
if (res)
|
||||
{
|
||||
if (hide_object_error(thd->get_stmt_da()->sql_errno()))
|
||||
res= 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
char buf[NAME_CHAR_LEN + 1];
|
||||
if (unlikely(res))
|
||||
get_table_engine_for_i_s(thd, buf, &table_list, db_name, table_name);
|
||||
|
||||
tbl.s= share;
|
||||
table_list.table= &tbl;
|
||||
table_list.view= (LEX*) share->is_view;
|
||||
bool res2= schema_table->process_table(thd, &table_list, table, res,
|
||||
db_name, table_name);
|
||||
if (res == 0)
|
||||
closefrm(&tbl);
|
||||
res= res2;
|
||||
}
|
||||
@@ -5363,30 +5361,6 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
|
||||
uint table_open_method= tables->table_open_method;
|
||||
bool can_deadlock;
|
||||
MEM_ROOT tmp_mem_root;
|
||||
/*
|
||||
We're going to open FRM files for tables.
|
||||
In case of VIEWs that contain stored function calls,
|
||||
these stored functions will be parsed and put to the SP cache.
|
||||
|
||||
Suppose we have a view containing a stored function call:
|
||||
CREATE VIEW v1 AS SELECT f1() AS c1;
|
||||
and now we're running:
|
||||
SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=f1();
|
||||
If a parallel thread invalidates the cache,
|
||||
e.g. by creating or dropping some stored routine,
|
||||
the SELECT query will re-parse f1() when processing "v1"
|
||||
and replace the outdated cached version of f1() to a new one.
|
||||
But the old version of f1() is referenced from the m_sp member
|
||||
of the Item_func_sp instances used in the WHERE condition.
|
||||
We cannot destroy it. To avoid such clashes, let's remember
|
||||
all old routines into a temporary SP cache collection
|
||||
and process tables with a new empty temporary SP cache collection.
|
||||
Then restore to the old SP cache collection at the end.
|
||||
*/
|
||||
Sp_caches old_sp_caches;
|
||||
|
||||
old_sp_caches.sp_caches_swap(*thd);
|
||||
|
||||
bzero(&tmp_mem_root, sizeof(tmp_mem_root));
|
||||
|
||||
/*
|
||||
@@ -5609,14 +5583,6 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
|
||||
err:
|
||||
thd->restore_backup_open_tables_state(&open_tables_state_backup);
|
||||
free_root(&tmp_mem_root, 0);
|
||||
|
||||
/*
|
||||
Now restore to the saved SP cache collection
|
||||
and clear the temporary SP cache collection.
|
||||
*/
|
||||
old_sp_caches.sp_caches_swap(*thd);
|
||||
old_sp_caches.sp_caches_clear();
|
||||
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
@@ -5013,32 +5013,35 @@ bool wsrep_check_sequence(THD* thd,
|
||||
const bool used_engine)
|
||||
{
|
||||
enum legacy_db_type db_type;
|
||||
const LEX_CSTRING *engine_name;
|
||||
|
||||
DBUG_ASSERT(WSREP(thd));
|
||||
|
||||
if (used_engine)
|
||||
{
|
||||
db_type= thd->lex->create_info.db_type->db_type;
|
||||
// Currently any dynamic storage engine is not possible to identify
|
||||
// using DB_TYPE_XXXX and ENGINE=SEQUENCE is one of them.
|
||||
// Therefore, we get storage engine name from lex.
|
||||
engine_name=
|
||||
thd->lex->m_sql_cmd->option_storage_engine_name()->name();
|
||||
}
|
||||
else
|
||||
{
|
||||
const handlerton *hton= ha_default_handlerton(thd);
|
||||
db_type= hton->db_type;
|
||||
engine_name= hton_name(hton);
|
||||
}
|
||||
|
||||
// In Galera cluster we support only InnoDB sequences
|
||||
if (db_type != DB_TYPE_INNODB)
|
||||
{
|
||||
// Currently any dynamic storage engine is not possible to identify
|
||||
// using DB_TYPE_XXXX and ENGINE=SEQUENCE is one of them.
|
||||
// Therefore, we get storage engine name from lex.
|
||||
const LEX_CSTRING *tb_name= thd->lex->m_sql_cmd->option_storage_engine_name()->name();
|
||||
// (1) CREATE TABLE ... ENGINE=SEQUENCE OR
|
||||
// (2) ALTER TABLE ... ENGINE= OR
|
||||
// Note in ALTER TABLE table->s->sequence != nullptr
|
||||
// (3) CREATE SEQUENCE ... ENGINE=
|
||||
if ((thd->lex->sql_command == SQLCOM_CREATE_TABLE &&
|
||||
lex_string_eq(tb_name, STRING_WITH_LEN("SEQUENCE"))) ||
|
||||
lex_string_eq(engine_name, STRING_WITH_LEN("SEQUENCE"))) ||
|
||||
(thd->lex->sql_command == SQLCOM_ALTER_TABLE) ||
|
||||
(thd->lex->sql_command == SQLCOM_CREATE_SEQUENCE))
|
||||
{
|
||||
@@ -5046,7 +5049,8 @@ bool wsrep_check_sequence(THD* thd,
|
||||
"non-InnoDB sequences in Galera cluster");
|
||||
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
|
||||
ER_NOT_SUPPORTED_YET,
|
||||
"ENGINE=%s not supported by Galera", tb_name->str);
|
||||
"ENGINE=%s not supported by Galera",
|
||||
engine_name->str);
|
||||
return(true);
|
||||
}
|
||||
}
|
||||
@@ -11761,6 +11765,7 @@ do_continue:;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* MERGE TABLE */
|
||||
if (!table->s->tmp_table &&
|
||||
wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN))
|
||||
goto err_new_table_cleanup;
|
||||
@@ -11769,6 +11774,8 @@ do_continue:;
|
||||
alter_info->keys_onoff);
|
||||
if (trans_commit_stmt(thd) || trans_commit_implicit(thd))
|
||||
goto err_new_table_cleanup;
|
||||
/* Ensure that the ALTER is binlogged as a DDL */
|
||||
thd->transaction->stmt.mark_trans_did_ddl();
|
||||
}
|
||||
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
|
||||
|
||||
|
||||
@@ -2108,9 +2108,13 @@ class IS_table_read_plan;
|
||||
#define DT_CREATE 32U
|
||||
#define DT_FILL 64U
|
||||
#define DT_REINIT 128U
|
||||
#define DT_PHASES 8U
|
||||
|
||||
#define DT_OPTIMIZE_STAGE2 256U
|
||||
|
||||
/* Number of bits used by all phases: */
|
||||
#define DT_PHASES 9U
|
||||
/* Phases that are applicable to all derived tables. */
|
||||
#define DT_COMMON (DT_INIT + DT_PREPARE + DT_REINIT + DT_OPTIMIZE)
|
||||
#define DT_COMMON (DT_INIT + DT_PREPARE + DT_REINIT + DT_OPTIMIZE + DT_OPTIMIZE_STAGE2)
|
||||
/* Phases that are applicable only to materialized derived tables. */
|
||||
#define DT_MATERIALIZE (DT_CREATE + DT_FILL)
|
||||
|
||||
@@ -2494,6 +2498,7 @@ struct TABLE_LIST
|
||||
TABLE_LIST *next_local;
|
||||
/* link in a global list of all queries tables */
|
||||
TABLE_LIST *next_global, **prev_global;
|
||||
TABLE_LIST *linked_table; // For sequence tables used in default
|
||||
LEX_CSTRING db;
|
||||
LEX_CSTRING table_name;
|
||||
LEX_CSTRING schema_table_name;
|
||||
|
||||
@@ -220,6 +220,12 @@ int federatedx_handler_base::end_scan_()
|
||||
}
|
||||
|
||||
|
||||
static bool is_supported_by_select_handler(enum_sql_command sql_command)
|
||||
{
|
||||
return sql_command == SQLCOM_SELECT || sql_command == SQLCOM_INSERT_SELECT;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Create FederatedX select handler for processing either a single select
|
||||
(in this case sel_lex is initialized and lex_unit==NULL)
|
||||
@@ -230,7 +236,7 @@ static select_handler *
|
||||
create_federatedx_select_handler(THD *thd, SELECT_LEX *sel_lex,
|
||||
SELECT_LEX_UNIT *lex_unit)
|
||||
{
|
||||
if (!use_pushdown)
|
||||
if (!use_pushdown || !is_supported_by_select_handler(thd->lex->sql_command))
|
||||
return nullptr;
|
||||
|
||||
auto tbl= get_fed_table_for_pushdown(sel_lex);
|
||||
|
||||
@@ -206,18 +206,16 @@ ATTRIBUTE_COLD void btr_search_lazy_free(dict_index_t *index)
|
||||
UT_LIST_REMOVE(table->freed_indexes, index);
|
||||
index->lock.free();
|
||||
dict_mem_index_free(index);
|
||||
|
||||
if (!UT_LIST_GET_LEN(table->freed_indexes) &&
|
||||
!UT_LIST_GET_LEN(table->indexes))
|
||||
{
|
||||
ut_ad(!table->id);
|
||||
const bool destroy= !table->id && !UT_LIST_GET_LEN(table->freed_indexes) &&
|
||||
!UT_LIST_GET_LEN(table->indexes);
|
||||
table->autoinc_mutex.wr_unlock();
|
||||
|
||||
if (destroy)
|
||||
{
|
||||
table->autoinc_mutex.destroy();
|
||||
dict_mem_table_free(table);
|
||||
return;
|
||||
}
|
||||
|
||||
table->autoinc_mutex.wr_unlock();
|
||||
}
|
||||
|
||||
ATTRIBUTE_COLD bool btr_search_disable()
|
||||
@@ -682,7 +680,6 @@ btr_search_update_hash_ref(
|
||||
ut_ad(block->page.lock.have_x() || block->page.lock.have_s());
|
||||
ut_ad(btr_cur_get_page(cursor) == block->page.frame);
|
||||
ut_ad(page_is_leaf(block->page.frame));
|
||||
assert_block_ahi_valid(block);
|
||||
|
||||
dict_index_t* index = block->index;
|
||||
|
||||
@@ -700,8 +697,9 @@ btr_search_update_hash_ref(
|
||||
ut_ad(index == cursor->index());
|
||||
auto part = btr_search_sys.get_part(*index);
|
||||
part->latch.wr_lock(SRW_LOCK_CALL);
|
||||
ut_ad(!block->index || block->index == index);
|
||||
|
||||
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
|
||||
ut_a(block->index ? block->index == index : !block->n_pointers);
|
||||
#endif
|
||||
if (block->index
|
||||
&& (block->curr_n_fields == info->n_fields)
|
||||
&& (block->curr_n_bytes == info->n_bytes)
|
||||
|
||||
@@ -2183,10 +2183,24 @@ ATTRIBUTE_COLD void buf_flush_ahead(lsn_t lsn, bool furious) noexcept
|
||||
if (limit < lsn)
|
||||
{
|
||||
limit= lsn;
|
||||
if (furious)
|
||||
{
|
||||
/* Request any concurrent threads to wait for this batch to complete,
|
||||
in log_free_check(). */
|
||||
log_sys.set_check_for_checkpoint();
|
||||
/* Immediately wake up buf_flush_page_cleaner(), even when it
|
||||
is in the middle of a 1-second my_cond_timedwait(). */
|
||||
wake:
|
||||
buf_pool.page_cleaner_set_idle(false);
|
||||
pthread_cond_signal(&buf_pool.do_flush_list);
|
||||
if (furious)
|
||||
log_sys.set_check_for_checkpoint();
|
||||
}
|
||||
else if (buf_pool.page_cleaner_idle())
|
||||
/* In non-furious mode, concurrent writes to the log will remain
|
||||
possible, and we are gently requesting buf_flush_page_cleaner()
|
||||
to do more work to avoid a later call with furious=true.
|
||||
We will only wake the buf_flush_page_cleaner() from an indefinite
|
||||
my_cond_wait(), but we will not disturb the regular 1-second sleep. */
|
||||
goto wake;
|
||||
}
|
||||
mysql_mutex_unlock(&buf_pool.flush_list_mutex);
|
||||
}
|
||||
|
||||
@@ -445,7 +445,7 @@ pick_next(
|
||||
double* g1, /*!< in: mbr of group 1. */
|
||||
double* g2, /*!< in: mbr of group 2. */
|
||||
rtr_split_node_t** choice, /*!< out: the next node.*/
|
||||
int* n_group, /*!< out: group number.*/
|
||||
uint16_t* n_group, /*!< out: 1 or 2 */
|
||||
int n_dim) /*!< in: dimensions. */
|
||||
{
|
||||
rtr_split_node_t* cur = node;
|
||||
@@ -487,7 +487,7 @@ mark_all_entries(
|
||||
/*=============*/
|
||||
rtr_split_node_t* node, /*!< in/out: split nodes. */
|
||||
int n_entries, /*!< in: entries number. */
|
||||
int n_group) /*!< in: group number. */
|
||||
uint16_t n_group) /*!< in: 1 or 2 */
|
||||
{
|
||||
rtr_split_node_t* cur = node;
|
||||
rtr_split_node_t* end = node + n_entries;
|
||||
@@ -522,7 +522,7 @@ split_rtree_node(
|
||||
double* g1 = reserve_coords(d_buffer, n_dim);
|
||||
double* g2 = reserve_coords(d_buffer, n_dim);
|
||||
rtr_split_node_t* next = NULL;
|
||||
int next_node = 0;
|
||||
uint16_t next_node = 0;
|
||||
int i;
|
||||
int first_rec_group = 1;
|
||||
rtr_split_node_t* end = node + n_entries;
|
||||
@@ -542,9 +542,9 @@ split_rtree_node(
|
||||
b->n_node = 2;
|
||||
|
||||
copy_coords(g1, a->coords, n_dim);
|
||||
size1 += key_size;
|
||||
size1 += a->key_len;
|
||||
copy_coords(g2, b->coords, n_dim);
|
||||
size2 += key_size;
|
||||
size2 += b->key_len;
|
||||
|
||||
for (i = n_entries - 2; i > 0; --i) {
|
||||
/* Can't write into group 2 */
|
||||
@@ -561,10 +561,10 @@ split_rtree_node(
|
||||
|
||||
pick_next(node, n_entries, g1, g2, &next, &next_node, n_dim);
|
||||
if (next_node == 1) {
|
||||
size1 += key_size;
|
||||
size1 += next->key_len;
|
||||
mbr_join(g1, next->coords, n_dim);
|
||||
} else {
|
||||
size2 += key_size;
|
||||
size2 += next->key_len;
|
||||
mbr_join(g2, next->coords, n_dim);
|
||||
}
|
||||
|
||||
|
||||
@@ -96,6 +96,7 @@ rtr_page_split_initialize_nodes(
|
||||
for (cur = task; cur < stop - 1; ++cur) {
|
||||
cur->coords = reserve_coords(buf_pos, SPDIMS);
|
||||
cur->key = rec;
|
||||
cur->key_len = static_cast<uint16_t>(len);
|
||||
|
||||
memcpy(cur->coords, source_cur, DATA_MBR_LEN);
|
||||
|
||||
@@ -109,11 +110,11 @@ rtr_page_split_initialize_nodes(
|
||||
source_cur = static_cast<const byte*>(dfield_get_data(
|
||||
dtuple_get_nth_field(tuple, 0)));
|
||||
cur->coords = reserve_coords(buf_pos, SPDIMS);
|
||||
rec = (byte*) mem_heap_alloc(
|
||||
heap, rec_get_converted_size(cursor->index(), tuple, 0));
|
||||
|
||||
len = rec_get_converted_size(cursor->index(), tuple, 0);
|
||||
rec = (byte*) mem_heap_alloc(heap, len);
|
||||
rec = rec_convert_dtuple_to_rec(rec, cursor->index(), tuple, 0);
|
||||
cur->key = rec;
|
||||
cur->key_len = static_cast<uint16_t>(len);
|
||||
|
||||
memcpy(cur->coords, source_cur, DATA_MBR_LEN);
|
||||
|
||||
@@ -1103,8 +1104,10 @@ corrupted:
|
||||
|
||||
/* Reposition the cursor for insert and try insertion */
|
||||
page_cursor = btr_cur_get_page_cur(cursor);
|
||||
page_cursor->block = cur_split_node->n_node != first_rec_group
|
||||
? new_block : block;
|
||||
buf_block_t *insert_block = (cur_split_node->n_node != first_rec_group)
|
||||
? new_block
|
||||
: block;
|
||||
page_cursor->block = insert_block;
|
||||
|
||||
ulint up_match = 0, low_match = 0;
|
||||
|
||||
@@ -1131,7 +1134,7 @@ corrupted:
|
||||
attempted this already. */
|
||||
if (rec == NULL) {
|
||||
if (!is_page_cur_get_page_zip(page_cursor)
|
||||
&& btr_page_reorganize(page_cursor, mtr)) {
|
||||
&& !btr_page_reorganize(page_cursor, mtr)) {
|
||||
rec = page_cur_tuple_insert(page_cursor, tuple,
|
||||
offsets,
|
||||
heap, n_ext, mtr);
|
||||
@@ -1184,11 +1187,11 @@ after_insert:
|
||||
IF_DBUG(iterated=true,);
|
||||
|
||||
rec_t* i_rec = page_rec_get_next(page_get_infimum_rec(
|
||||
buf_block_get_frame(block)));
|
||||
buf_block_get_frame(insert_block)));
|
||||
if (UNIV_UNLIKELY(!i_rec)) {
|
||||
goto corrupted;
|
||||
}
|
||||
btr_cur_position(cursor->index(), i_rec, block, cursor);
|
||||
btr_cur_position(cursor->index(), i_rec, insert_block, cursor);
|
||||
|
||||
goto func_start;
|
||||
}
|
||||
|
||||
@@ -19669,11 +19669,6 @@ static MYSQL_SYSVAR_ENUM(default_row_format, innodb_default_row_format,
|
||||
&innodb_default_row_format_typelib);
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
static MYSQL_SYSVAR_UINT(trx_rseg_n_slots_debug, trx_rseg_n_slots_debug,
|
||||
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_NOCMDOPT,
|
||||
"Debug flags for InnoDB to limit TRX_RSEG_N_SLOTS for trx_rsegf_undo_find_free()",
|
||||
NULL, NULL, 0, 0, 1024, 0);
|
||||
|
||||
static MYSQL_SYSVAR_UINT(limit_optimistic_insert_debug,
|
||||
btr_cur_limit_optimistic_insert_debug, PLUGIN_VAR_RQCMDARG,
|
||||
"Artificially limit the number of records per B-tree page (0=unlimited).",
|
||||
@@ -19953,7 +19948,6 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
|
||||
MYSQL_SYSVAR(compression_pad_pct_max),
|
||||
MYSQL_SYSVAR(default_row_format),
|
||||
#ifdef UNIV_DEBUG
|
||||
MYSQL_SYSVAR(trx_rseg_n_slots_debug),
|
||||
MYSQL_SYSVAR(limit_optimistic_insert_debug),
|
||||
MYSQL_SYSVAR(trx_purge_view_update_only_debug),
|
||||
MYSQL_SYSVAR(evict_tables_on_commit_debug),
|
||||
|
||||
@@ -342,16 +342,6 @@ inline uint32_t dtype_form_prtype(ulint old_prtype, ulint charset_coll)
|
||||
return uint32_t(old_prtype | (charset_coll << 16));
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Determines if a MySQL string type is a subset of UTF-8. This function
|
||||
may return false negatives, in case further character-set collation
|
||||
codes are introduced in MySQL later.
|
||||
@return whether a subset of UTF-8 */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
dtype_is_utf8(
|
||||
/*==========*/
|
||||
ulint prtype);/*!< in: precise data type */
|
||||
/*********************************************************************//**
|
||||
Gets the type length.
|
||||
@return fixed length of the type, in bytes, or 0 if variable-length */
|
||||
|
||||
@@ -27,31 +27,6 @@ Created 1/16/1996 Heikki Tuuri
|
||||
#include "mach0data.h"
|
||||
#include "ha_prototypes.h"
|
||||
|
||||
/*********************************************************************//**
|
||||
Determines if a MySQL string type is a subset of UTF-8. This function
|
||||
may return false negatives, in case further character-set collation
|
||||
codes are introduced in MySQL later.
|
||||
@return whether a subset of UTF-8 */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
dtype_is_utf8(
|
||||
/*==========*/
|
||||
ulint prtype) /*!< in: precise data type */
|
||||
{
|
||||
/* These codes have been copied from strings/ctype-extra.c
|
||||
and strings/ctype-utf8.c. */
|
||||
switch (dtype_get_charset_coll(prtype)) {
|
||||
case 11: /* ascii_general_ci */
|
||||
case 65: /* ascii_bin */
|
||||
case 33: /* utf8_general_ci */
|
||||
case 83: /* utf8_bin */
|
||||
case 254: /* utf8_general_cs */
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Gets the MySQL type code from a dtype.
|
||||
@return MySQL type code; this is NOT an InnoDB type code! */
|
||||
|
||||
@@ -70,7 +70,8 @@ rtree_mbr_from_wkb(
|
||||
struct rtr_split_node_t
|
||||
{
|
||||
double square; /* square of the mbr.*/
|
||||
int n_node; /* which group in.*/
|
||||
uint16_t n_node; /* group: 1 or 2, or 0=unassigned */
|
||||
uint16_t key_len; /* key length */
|
||||
uchar* key; /* key. */
|
||||
double* coords; /* mbr. */
|
||||
};
|
||||
|
||||
@@ -644,17 +644,6 @@ public:
|
||||
/** Note that log_sys.latch is no longer being held exclusively. */
|
||||
void flag_wr_unlock() noexcept { ut_ad(m_latch_ex); m_latch_ex= false; }
|
||||
|
||||
/** type of page flushing is needed during commit() */
|
||||
enum page_flush_ahead
|
||||
{
|
||||
/** no need to trigger page cleaner */
|
||||
PAGE_FLUSH_NO= 0,
|
||||
/** asynchronous flushing is needed */
|
||||
PAGE_FLUSH_ASYNC,
|
||||
/** furious flushing is needed */
|
||||
PAGE_FLUSH_SYNC
|
||||
};
|
||||
|
||||
private:
|
||||
/** Handle any pages that were freed during the mini-transaction. */
|
||||
void process_freed_pages();
|
||||
@@ -701,29 +690,31 @@ private:
|
||||
/** Commit the mini-transaction log.
|
||||
@tparam pmem log_sys.is_mmap()
|
||||
@param mtr mini-transaction
|
||||
@param lsns {start_lsn,flush_ahead} */
|
||||
@param lsns {start_lsn,flush_ahead_lsn} */
|
||||
template<bool pmem>
|
||||
static void commit_log(mtr_t *mtr, std::pair<lsn_t,page_flush_ahead> lsns)
|
||||
noexcept;
|
||||
static void commit_log(mtr_t *mtr, std::pair<lsn_t,lsn_t> lsns) noexcept;
|
||||
|
||||
/** Release log_sys.latch. */
|
||||
void commit_log_release() noexcept;
|
||||
|
||||
/** Append the redo log records to the redo log buffer.
|
||||
@return {start_lsn,flush_ahead} */
|
||||
std::pair<lsn_t,page_flush_ahead> do_write();
|
||||
@return {start_lsn,flush_ahead_lsn} */
|
||||
std::pair<lsn_t,lsn_t> do_write() noexcept;
|
||||
|
||||
/** Append the redo log records to the redo log buffer.
|
||||
@tparam mmap log_sys.is_mmap()
|
||||
@param mtr mini-transaction
|
||||
@param len number of bytes to write
|
||||
@return {start_lsn,flush_ahead} */
|
||||
@return {start_lsn,flush_ahead_lsn} */
|
||||
template<bool mmap> static
|
||||
std::pair<lsn_t,page_flush_ahead> finish_writer(mtr_t *mtr, size_t len);
|
||||
std::pair<lsn_t,lsn_t> finish_writer(mtr_t *mtr, size_t len);
|
||||
|
||||
/** The applicable variant of commit_log() */
|
||||
static void (*commit_logger)(mtr_t *, std::pair<lsn_t,page_flush_ahead>);
|
||||
static void (*commit_logger)(mtr_t *, std::pair<lsn_t,lsn_t>);
|
||||
/** The applicable variant of finish_writer() */
|
||||
static std::pair<lsn_t,page_flush_ahead> (*finisher)(mtr_t *, size_t);
|
||||
static std::pair<lsn_t,lsn_t> (*finisher)(mtr_t *, size_t);
|
||||
|
||||
std::pair<lsn_t,page_flush_ahead> finish_write(size_t len)
|
||||
std::pair<lsn_t,lsn_t> finish_write(size_t len)
|
||||
{ return finisher(this, len); }
|
||||
public:
|
||||
/** Update finisher when spin_wait_delay is changing to or from 0. */
|
||||
|
||||
@@ -71,11 +71,6 @@ inline buf_block_t *trx_sysf_get(mtr_t* mtr, bool rw= true)
|
||||
0, rw ? RW_X_LATCH : RW_S_LATCH, mtr);
|
||||
}
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/* Flag to control TRX_RSEG_N_SLOTS behavior debugging. */
|
||||
extern uint trx_rseg_n_slots_debug;
|
||||
#endif
|
||||
|
||||
/** Write DB_TRX_ID.
|
||||
@param[out] db_trx_id the DB_TRX_ID field to be written to
|
||||
@param[in] id transaction ID */
|
||||
|
||||
@@ -1321,10 +1321,10 @@ func_exit:
|
||||
}
|
||||
|
||||
const lsn_t lsn= log_sys.get_lsn();
|
||||
const lsn_t checkpoint= log_sys.last_checkpoint_lsn;
|
||||
const lsn_t sync_lsn= checkpoint + log_sys.max_checkpoint_age;
|
||||
const lsn_t max_age= log_sys.max_checkpoint_age;
|
||||
const lsn_t age= lsn_t(lsn - log_sys.last_checkpoint_lsn);
|
||||
|
||||
if (lsn <= sync_lsn)
|
||||
if (age <= max_age)
|
||||
{
|
||||
#ifndef DBUG_OFF
|
||||
skip_checkpoint:
|
||||
@@ -1337,7 +1337,7 @@ func_exit:
|
||||
log_sys.latch.wr_unlock();
|
||||
|
||||
/* We must wait to prevent the tail of the log overwriting the head. */
|
||||
buf_flush_wait_flushed(std::min(sync_lsn, checkpoint + (1U << 20)));
|
||||
buf_flush_wait_flushed(lsn - max_age);
|
||||
/* Sleep to avoid a thundering herd */
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(10));
|
||||
}
|
||||
|
||||
@@ -40,10 +40,10 @@ Created 11/26/1995 Heikki Tuuri
|
||||
#include "my_cpu.h"
|
||||
|
||||
#ifdef HAVE_PMEM
|
||||
void (*mtr_t::commit_logger)(mtr_t *, std::pair<lsn_t,page_flush_ahead>);
|
||||
void (*mtr_t::commit_logger)(mtr_t *, std::pair<lsn_t,lsn_t>);
|
||||
#endif
|
||||
|
||||
std::pair<lsn_t,mtr_t::page_flush_ahead> (*mtr_t::finisher)(mtr_t *, size_t);
|
||||
std::pair<lsn_t,lsn_t> (*mtr_t::finisher)(mtr_t *, size_t);
|
||||
|
||||
void mtr_t::finisher_update()
|
||||
{
|
||||
@@ -336,9 +336,25 @@ void mtr_t::release()
|
||||
m_memo.clear();
|
||||
}
|
||||
|
||||
ATTRIBUTE_NOINLINE void mtr_t::commit_log_release() noexcept
|
||||
{
|
||||
if (m_latch_ex)
|
||||
{
|
||||
log_sys.latch.wr_unlock();
|
||||
m_latch_ex= false;
|
||||
}
|
||||
else
|
||||
log_sys.latch.rd_unlock();
|
||||
}
|
||||
|
||||
static ATTRIBUTE_NOINLINE ATTRIBUTE_COLD
|
||||
void mtr_flush_ahead(lsn_t flush_lsn) noexcept
|
||||
{
|
||||
buf_flush_ahead(flush_lsn, bool(flush_lsn & 1));
|
||||
}
|
||||
|
||||
template<bool mmap>
|
||||
void mtr_t::commit_log(mtr_t *mtr, std::pair<lsn_t,page_flush_ahead> lsns)
|
||||
noexcept
|
||||
void mtr_t::commit_log(mtr_t *mtr, std::pair<lsn_t,lsn_t> lsns) noexcept
|
||||
{
|
||||
size_t modified= 0;
|
||||
|
||||
@@ -379,25 +395,12 @@ void mtr_t::commit_log(mtr_t *mtr, std::pair<lsn_t,page_flush_ahead> lsns)
|
||||
buf_pool.page_cleaner_wakeup();
|
||||
mysql_mutex_unlock(&buf_pool.flush_list_mutex);
|
||||
|
||||
if (mtr->m_latch_ex)
|
||||
{
|
||||
log_sys.latch.wr_unlock();
|
||||
mtr->m_latch_ex= false;
|
||||
}
|
||||
else
|
||||
log_sys.latch.rd_unlock();
|
||||
|
||||
mtr->commit_log_release();
|
||||
mtr->release();
|
||||
}
|
||||
else
|
||||
{
|
||||
if (mtr->m_latch_ex)
|
||||
{
|
||||
log_sys.latch.wr_unlock();
|
||||
mtr->m_latch_ex= false;
|
||||
}
|
||||
else
|
||||
log_sys.latch.rd_unlock();
|
||||
mtr->commit_log_release();
|
||||
|
||||
for (auto it= mtr->m_memo.rbegin(); it != mtr->m_memo.rend(); )
|
||||
{
|
||||
@@ -459,8 +462,11 @@ void mtr_t::commit_log(mtr_t *mtr, std::pair<lsn_t,page_flush_ahead> lsns)
|
||||
if (ha_handler_stats *stats= mtr->trx->active_handler_stats)
|
||||
stats->pages_updated+= modified;
|
||||
|
||||
if (UNIV_UNLIKELY(lsns.second != PAGE_FLUSH_NO))
|
||||
buf_flush_ahead(mtr->m_commit_lsn, lsns.second == PAGE_FLUSH_SYNC);
|
||||
if (UNIV_UNLIKELY(lsns.second != 0))
|
||||
{
|
||||
ut_ad(lsns.second < mtr->m_commit_lsn);
|
||||
mtr_flush_ahead(lsns.second);
|
||||
}
|
||||
}
|
||||
|
||||
/** Commit a mini-transaction. */
|
||||
@@ -482,7 +488,7 @@ void mtr_t::commit()
|
||||
}
|
||||
|
||||
ut_ad(!srv_read_only_mode);
|
||||
std::pair<lsn_t,page_flush_ahead> lsns{do_write()};
|
||||
std::pair<lsn_t,lsn_t> lsns{do_write()};
|
||||
process_freed_pages();
|
||||
#ifdef HAVE_PMEM
|
||||
commit_logger(this, lsns);
|
||||
@@ -974,24 +980,44 @@ std::pair<lsn_t,byte*> log_t::append_prepare(size_t size, bool ex) noexcept
|
||||
|
||||
/** Finish appending data to the log.
|
||||
@param lsn the end LSN of the log record
|
||||
@return whether buf_flush_ahead() will have to be invoked */
|
||||
static mtr_t::page_flush_ahead log_close(lsn_t lsn) noexcept
|
||||
@return lsn for invoking buf_flush_ahead() on, with "furious" flag in the LSB
|
||||
@retval 0 if buf_flush_ahead() will not have to be invoked */
|
||||
static lsn_t log_close(lsn_t lsn) noexcept
|
||||
{
|
||||
ut_ad(log_sys.latch_have_any());
|
||||
|
||||
const lsn_t checkpoint_age= lsn - log_sys.last_checkpoint_lsn;
|
||||
const lsn_t max_age= log_sys.max_modified_age_async;
|
||||
|
||||
if (UNIV_UNLIKELY(checkpoint_age >= log_sys.log_capacity) &&
|
||||
/* silence message on create_log_file() after the log had been deleted */
|
||||
checkpoint_age != lsn)
|
||||
log_overwrite_warning(lsn);
|
||||
else if (UNIV_LIKELY(checkpoint_age <= log_sys.max_modified_age_async))
|
||||
return mtr_t::PAGE_FLUSH_NO;
|
||||
else if (UNIV_LIKELY(checkpoint_age <= log_sys.max_checkpoint_age))
|
||||
return mtr_t::PAGE_FLUSH_ASYNC;
|
||||
else if (UNIV_LIKELY(checkpoint_age <= max_age))
|
||||
return 0;
|
||||
|
||||
/* The last checkpoint is too old. Let us set an appropriate
|
||||
checkpoint age target, that is, a checkpoint LSN target that is the
|
||||
current LSN minus the maximum age. Let us see if are exceeding the
|
||||
log_checkpoint_margin() limit that will involve a synchronous wait
|
||||
in each write operation. */
|
||||
|
||||
const bool furious{checkpoint_age >= log_sys.max_checkpoint_age};
|
||||
|
||||
/* If furious==true, we could set a less aggressive target
|
||||
(lsn - log_sys.max_checkpoint_age) instead of what we will be using
|
||||
in both cases (lsn - log_sys.max_checkpoint_age_async).
|
||||
|
||||
The aim of the more aggressive target is that mtr_flush_ahead() will
|
||||
request more progress in buf_flush_page_cleaner() sooner, so that it
|
||||
will be less likely that several threads will end up waiting in
|
||||
log_checkpoint_margin(). That function will use the less aggressive
|
||||
limit (lsn - log_sys.max_checkpoint_age) in order to minimize the
|
||||
synchronous wait time. */
|
||||
if (furious)
|
||||
log_sys.set_check_for_checkpoint();
|
||||
return mtr_t::PAGE_FLUSH_SYNC;
|
||||
|
||||
return ((lsn - max_age) & ~lsn_t{1}) | lsn_t{furious};
|
||||
}
|
||||
|
||||
inline void mtr_t::page_checksum(const buf_page_t &bpage)
|
||||
@@ -1037,7 +1063,7 @@ inline void mtr_t::page_checksum(const buf_page_t &bpage)
|
||||
m_log.close(l + 4);
|
||||
}
|
||||
|
||||
std::pair<lsn_t,mtr_t::page_flush_ahead> mtr_t::do_write()
|
||||
std::pair<lsn_t,lsn_t> mtr_t::do_write() noexcept
|
||||
{
|
||||
ut_ad(!recv_no_log_write);
|
||||
ut_ad(is_logged());
|
||||
@@ -1189,8 +1215,7 @@ inline void log_t::append(byte *&d, const void *s, size_t size) noexcept
|
||||
}
|
||||
|
||||
template<bool mmap>
|
||||
std::pair<lsn_t,mtr_t::page_flush_ahead>
|
||||
mtr_t::finish_writer(mtr_t *mtr, size_t len)
|
||||
std::pair<lsn_t,lsn_t> mtr_t::finish_writer(mtr_t *mtr, size_t len)
|
||||
{
|
||||
ut_ad(log_sys.is_latest());
|
||||
ut_ad(!recv_no_log_write);
|
||||
|
||||
@@ -1366,18 +1366,18 @@ row_raw_format_str(
|
||||
|
||||
charset_coll = dtype_get_charset_coll(prtype);
|
||||
|
||||
if (UNIV_LIKELY(dtype_is_utf8(prtype))) {
|
||||
|
||||
switch (charset_coll) {
|
||||
case 11: /* ascii_general_ci */
|
||||
case 65: /* ascii_bin */
|
||||
case 33: /* utf8_general_ci */
|
||||
case 83: /* utf8_bin */
|
||||
case 254: /* utf8_general_cs */
|
||||
return(ut_str_sql_format(data, data_len, buf, buf_size));
|
||||
}
|
||||
/* else */
|
||||
|
||||
if (charset_coll == DATA_MYSQL_BINARY_CHARSET_COLL) {
|
||||
|
||||
case 0:
|
||||
case DATA_MYSQL_BINARY_CHARSET_COLL:
|
||||
*format_in_hex = TRUE;
|
||||
return(0);
|
||||
}
|
||||
/* else */
|
||||
|
||||
return(innobase_raw_format(data, data_len, charset_coll,
|
||||
buf, buf_size));
|
||||
@@ -1438,9 +1438,18 @@ row_raw_format(
|
||||
break;
|
||||
case DATA_CHAR:
|
||||
case DATA_VARCHAR:
|
||||
/* FTS_%_CONFIG.key are incorrectly created with prtype==0.
|
||||
The DATA_ENGLISH is being used for CHAR columns of the
|
||||
InnoDB internal SQL parser, such as SYS_FOREIGN.ID.
|
||||
For these, we will eventually goto format_in_hex. */
|
||||
ut_ad(dtype_get_charset_coll(prtype) == 8
|
||||
|| (mtype == DATA_VARCHAR
|
||||
&& (prtype == 0 || prtype == DATA_ENGLISH)));
|
||||
goto format_str;
|
||||
case DATA_MYSQL:
|
||||
case DATA_VARMYSQL:
|
||||
|
||||
ut_ad(dtype_get_charset_coll(prtype));
|
||||
format_str:
|
||||
ret = row_raw_format_str(data, data_len, prtype,
|
||||
buf, buf_size, &format_in_hex);
|
||||
if (format_in_hex) {
|
||||
|
||||
@@ -420,7 +420,6 @@ loop:
|
||||
rollback segment. */
|
||||
|
||||
if (!(rseg.SKIP & rseg_ref) && !freed &&
|
||||
ut_d(!trx_rseg_n_slots_debug &&)
|
||||
&rseg == &trx_sys.rseg_array[purge_sys.skipped_rseg])
|
||||
/* If rseg.space == purge_sys.truncate_undo_space.current
|
||||
the following will be a no-op. A possible conflict
|
||||
|
||||
@@ -45,9 +45,6 @@ Created 3/26/1996 Heikki Tuuri
|
||||
trx_sys_t trx_sys;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/* Flag to control TRX_RSEG_N_SLOTS behavior debugging. */
|
||||
uint trx_rseg_n_slots_debug = 0;
|
||||
|
||||
void rw_trx_hash_t::validate_element(trx_t *trx)
|
||||
{
|
||||
ut_ad(!trx->read_only || !trx->rsegs.m_redo.rseg);
|
||||
|
||||
@@ -815,7 +815,6 @@ static void trx_assign_rseg_low(trx_t *trx)
|
||||
undo tablespaces that are scheduled for truncation. */
|
||||
static Atomic_counter<unsigned> rseg_slot;
|
||||
unsigned slot = rseg_slot++ % TRX_SYS_N_RSEGS;
|
||||
ut_d(if (trx_rseg_n_slots_debug) slot = 0);
|
||||
ut_d(const auto start_scan_slot = slot);
|
||||
ut_d(bool look_for_rollover = false);
|
||||
trx_rseg_t* rseg;
|
||||
@@ -827,7 +826,6 @@ static void trx_assign_rseg_low(trx_t *trx)
|
||||
rseg = &trx_sys.rseg_array[slot];
|
||||
ut_ad(!look_for_rollover || start_scan_slot != slot);
|
||||
ut_d(look_for_rollover = true);
|
||||
ut_d(if (!trx_rseg_n_slots_debug))
|
||||
slot = (slot + 1) % TRX_SYS_N_RSEGS;
|
||||
|
||||
if (!rseg->space) {
|
||||
|
||||
@@ -436,11 +436,6 @@ static ulint trx_rsegf_undo_find_free(const buf_block_t *rseg_header)
|
||||
{
|
||||
ulint max_slots= TRX_RSEG_N_SLOTS;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
if (trx_rseg_n_slots_debug)
|
||||
max_slots= std::min<ulint>(trx_rseg_n_slots_debug, TRX_RSEG_N_SLOTS);
|
||||
#endif
|
||||
|
||||
for (ulint i= 0; i < max_slots; i++)
|
||||
if (trx_rsegf_get_nth_undo(rseg_header, i) == FIL_NULL)
|
||||
return i;
|
||||
|
||||
81
storage/spider/CODING_STADNARDS.org
Normal file
81
storage/spider/CODING_STADNARDS.org
Normal file
@@ -0,0 +1,81 @@
|
||||
#+title Spider Development Documentation
|
||||
|
||||
** Testing
|
||||
:PROPERTIES:
|
||||
:UPDATED: [2025-10-15 Wed 15:33]
|
||||
:END:
|
||||
|
||||
*** Run spider test suites
|
||||
:PROPERTIES:
|
||||
:UPDATED: [2025-10-15 Wed 15:39]
|
||||
:END:
|
||||
|
||||
Spider has sub-suites. Assuming temporary WIP spider tests are placed
|
||||
under the spider/temp suite, to run a test on all spider tests, do
|
||||
|
||||
#+begin_src sh
|
||||
./mysql-test/mtr --suite spider,spider/*,spider/*/* \
|
||||
--skip-test="spider/temp.*|.*/t\..*" --parallel=auto --big-test \
|
||||
--force --max-test-fail=0
|
||||
#+end_src
|
||||
|
||||
Tests should be run normally, but also with --ps-protocol,
|
||||
--view-protocol and ASAN.
|
||||
|
||||
For 10.11+ tests should also be run with
|
||||
--mysqld=--loose-disable-spider-group-by-handler. This will be done
|
||||
automatically after MDEV-37810.
|
||||
|
||||
*** Where to place new tests
|
||||
:PROPERTIES:
|
||||
:UPDATED: [2025-10-15 Wed 15:35]
|
||||
:END:
|
||||
|
||||
- spider/bugfix suite for bugfixes
|
||||
- spider/feature suite for new features
|
||||
- spider suite for all else, such as generic tests to improve coverage
|
||||
|
||||
*** Use engine defined attributes in tests whenever possible
|
||||
:PROPERTIES:
|
||||
:UPDATED: [2025-10-15 Wed 15:52]
|
||||
:END:
|
||||
|
||||
In versions of at least 10.11, when writing new tests or updating
|
||||
existing tests, use engine defined attributes for spider table
|
||||
connection info instead of table comments
|
||||
|
||||
#+begin_src sql
|
||||
# Do this for 10.11+
|
||||
CREATE TABLE t (c int) ENGINE=SPIDER REMOTE_SERVER=s1 REMOTE_TABLE=t1;
|
||||
# Do this for 10.6
|
||||
CREATE TABLE t (c int) ENGINE=SPIDER COMMENT='srv "s1", table "t1"';
|
||||
#+end_src
|
||||
|
||||
However, if the spider table has connection info that is not
|
||||
REMOTE_SERVER, REMOTE_TABLE, or REMOTE_DATABASE, comments are still
|
||||
needed for 10.11:
|
||||
|
||||
#+begin_src sql
|
||||
# Do this for 10.6 and 10.11
|
||||
CREATE TABLE t (c int) ENGINE=SPIDER COMMENT='srv "s1", table "t1", read_only_mode "1"';
|
||||
# Do this for 11.4+
|
||||
CREATE TABLE t (c int) ENGINE=SPIDER REMOTE_SERVER=s1 REMOTE_TABLE=t1 READ_ONLY=1;
|
||||
#+end_src
|
||||
|
||||
Don't mix engine defined attributes with COMMENT, unless the mixing is
|
||||
part of the test.
|
||||
|
||||
#+begin_src sql
|
||||
# Don't do this
|
||||
CREATE TABLE t (c int) ENGINE=SPIDER REMOTE_SERVER=s1 REMOTE_TABLE=t1 COMMENT='read_only_mode "1"';
|
||||
#+end_src
|
||||
|
||||
WRAPPER by default is mysql, so it is ok to do the following
|
||||
conversion in 10.11+:
|
||||
|
||||
#+begin_src sql
|
||||
# From
|
||||
CREATE TABLE t (c int) ENGINE=SPIDER COMMENT='wrapper "mysql", srv "s1", table "t1"';
|
||||
# to
|
||||
CREATE TABLE t (c int) ENGINE=SPIDER REMOTE_SERVER=s1 REMOTE_TABLE=t1;
|
||||
#+end_src
|
||||
@@ -63,12 +63,56 @@ public:
|
||||
char *conn_keys_first_ptr;
|
||||
char **conn_keys;
|
||||
SPIDER_CONN **conns;
|
||||
/* array of indexes of active servers */
|
||||
/*
|
||||
Array of indexes of active servers.
|
||||
|
||||
For a spider table or partition with multiple remotes (HA), the
|
||||
remotes are divided into n groups of active links, where n is the
|
||||
number of "active link count", equal to share->link_count aka
|
||||
share->active_link_count. For example, if a spider table has 11
|
||||
remotes (i.e. share->all_link_count == 11), and
|
||||
share->active_link_count == 3, then we have 3 link groups with
|
||||
group 0 consisting of the 0th, 3rd, 6th and 9th remotes and so on:
|
||||
|
||||
group 0: 0, 3, 6, 9
|
||||
group 1: 1, 4, 7, 10
|
||||
group 2: 2, 5, 8
|
||||
|
||||
conn_link_idx[i] is the "current" remote chosen for the ith group,
|
||||
and it can only take a value in the ith group.
|
||||
|
||||
Continue with the example above, at some point, we could end up
|
||||
with:
|
||||
|
||||
conn_link_idx[0] == 3
|
||||
conn_link_idx[1] == 1
|
||||
conn_link_idx[2] == 8
|
||||
|
||||
conn_link_idx is set in spider_trx_set_link_idx_for_all().
|
||||
|
||||
By default, active_link_idx is the same number as all_link_count,
|
||||
i.e. 11 in the above example.
|
||||
|
||||
If spider HA is gone (MDEV-28862), this will be no longer needed.
|
||||
|
||||
Typically, to distinguish the ith group and ith link, we use
|
||||
variable names link_idx and all_link_idx respectively, so we often
|
||||
have
|
||||
|
||||
all_link_idx == conn_link_idx[link_idx]
|
||||
|
||||
spider->conns[link_idx] is created using connection info of the
|
||||
`conn_link_idx[link_idx]'th remote.
|
||||
|
||||
When only one of the indexes is used, we simply use variable name
|
||||
link_idx
|
||||
*/
|
||||
uint *conn_link_idx;
|
||||
/* A bitmap indicating whether each active server have some higher
|
||||
numbered server in the same "group" left to try (can fail over) */
|
||||
uchar *conn_can_fo;
|
||||
void **quick_targets;
|
||||
/* indexed on active servers */
|
||||
int *need_mons;
|
||||
query_id_t search_link_query_id;
|
||||
int search_link_idx;
|
||||
@@ -93,8 +137,17 @@ public:
|
||||
bool da_status;
|
||||
bool use_spatial_index;
|
||||
|
||||
/*
|
||||
Index of the table in FROM tables, for the use of direct
|
||||
execution by gbh
|
||||
*/
|
||||
uint idx_for_direct_join;
|
||||
/*
|
||||
Whether using a spider_fields, only applicable to direct
|
||||
execution by gbh
|
||||
*/
|
||||
bool use_fields;
|
||||
/* If use_fields == true, the spider_fields in use for gbh */
|
||||
spider_fields *fields;
|
||||
SPIDER_LINK_IDX_CHAIN *link_idx_chain;
|
||||
SPIDER_LINK_IDX_CHAIN *result_link_idx_chain;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user