1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-29 05:21:33 +03:00

Merge ../10.1 into bb-10.1-explain-json

This commit is contained in:
Sergei Petrunia
2014-10-15 17:21:59 +04:00
2515 changed files with 52642 additions and 564022 deletions

View File

@ -74,7 +74,7 @@ ENDIF()
IF(WITH_EMBEDDED_SERVER)
SET(TEST_EMBEDDED ${MTR_FORCE} --comment=embedded --timer --embedded-server
--skip-rpl --skip-ndbcluster ${EXP})
--skip-rpl ${EXP})
ELSE()
SET(TEST_EMBEDDED echo "Can not test embedded, not compiled in")
ENDIF()
@ -92,8 +92,8 @@ ADD_CUSTOM_TARGET(test-force
ADD_CUSTOM_TARGET(test-bt
${TEST_BT_START}
COMMAND ${MTR_FORCE} --comment=normal --timer --skip-ndbcluster --report-features ${EXP}
COMMAND ${MTR_FORCE} --comment=ps --timer --skip-ndbcluster --ps-protocol ${EXP}
COMMAND ${MTR_FORCE} --comment=normal --timer --report-features ${EXP}
COMMAND ${MTR_FORCE} --comment=ps --timer --ps-protocol ${EXP}
COMMAND ${MTR_FORCE} --comment=funcs1+ps --ps-protocol --reorder --suite=funcs_1 ${EXP}
COMMAND ${MTR_FORCE} --comment=funcs2 --suite=funcs_2 ${EXP}
COMMAND ${MTR_FORCE} --comment=partitions --suite=parts ${EXP}
@ -105,13 +105,13 @@ ADD_CUSTOM_TARGET(test-bt
ADD_CUSTOM_TARGET(test-bt-fast
${TEST_BT_START}
COMMAND ${MTR_FORCE} --comment=ps --timer --skip-ndbcluster --ps-protocol --report-features ${EXP}
COMMAND ${MTR_FORCE} --comment=ps --timer --ps-protocol --report-features ${EXP}
COMMAND ${MTR_FORCE} --comment=stress --suite=stress ${EXP}
)
ADD_CUSTOM_TARGET(test-bt-debug
${TEST_BT_START}
COMMAND ${MTR_FORCE} --comment=debug --timer --skip-ndbcluster --skip-rpl --report-features ${EXP}
COMMAND ${MTR_FORCE} --comment=debug --timer --skip-rpl --report-features ${EXP}
)
# Process .in files with includes in collections/

View File

@ -1,7 +1,7 @@
# For easier human reading (MTR doesn't care), please keep entries
# in alphabetical order. This also helps with merge conflict resolution.
binlog.binlog_multi_engine # joro : NDB tests marked as experimental as agreed with bochklin
binlog.binlog_multi_engine # joro : tests marked as experimental as agreed with bochklin
funcs_1.charset_collation_1 # depends on compile-time decisions

View File

@ -2,6 +2,5 @@ perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collection
perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=ps_row --vardir=var-ps_row --ps-protocol --mysqld=--binlog-format=row
perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=embedded --vardir=var-emebbed --embedded
perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=funcs_1 --vardir=var-funcs_1 --suite=funcs_1
perl mysql-test-run.pl --timer --force --parallel=auto --comment=rpl_ndb_row --vardir=var-rpl_ndb_row --mysqld=--binlog-format=row --suite=rpl_ndb,ndb
perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=rpl_binlog_row --vardir=var-rpl_binlog_row --mysqld=--binlog-format=row --suite=rpl,binlog --skip-ndb
perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=rpl_binlog_row --vardir=var-rpl_binlog_row --mysqld=--binlog-format=row --suite=rpl,binlog
perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=rpl_binlog_checksum --mysqld=--binlog-checksum=CRC32 --vardir=var-rpl_binlog_checksum --suite=binlog,rpl

View File

@ -372,7 +372,8 @@ dfLtTBcBAAAAIgAAAPkAAAAAABcAAAAAAAcAAf/+AQAAAA==
SELECT * FROM t1;
--echo # Their values should be ON
SHOW SESSION VARIABLES LIKE "%_checks";
SHOW SESSION VARIABLES LIKE "foreign_key_checks";
SHOW SESSION VARIABLES LIKE "unique_checks";
--echo
SET @@SESSION.foreign_key_checks= OFF;
@ -387,7 +388,8 @@ dfLtTBcBAAAAIgAAAM0BAAAAABcAAAAAAAEAAf/+AgAAAA==
SELECT * FROM t1;
--echo # Their values should be OFF
SHOW SESSION VARIABLES LIKE "%_checks";
SHOW SESSION VARIABLES LIKE "foreign_key_checks";
SHOW SESSION VARIABLES LIKE "unique_checks";
--echo # INSERT INTO t1 VALUES(2)
--echo # foreign_key_checks=1 and unique_checks=1
@ -401,7 +403,8 @@ dfLtTBcBAAAAIgAAAM0BAAAAABcAAAAAAAEAAf/+AgAAAA==
SELECT * FROM t1;
--echo # Their values should be OFF
SHOW SESSION VARIABLES LIKE "%_checks";
SHOW SESSION VARIABLES LIKE "foreign_key_checks";
SHOW SESSION VARIABLES LIKE "unique_checks";
DROP TABLE t1;

View File

@ -1,7 +1,6 @@
#
# Test of auto_increment with offset
#
-- source include/not_ndb_default.inc
-- source include/master-slave.inc
eval create table t1 (a int not null auto_increment,b int, primary key (a)) engine=$engine_type2 auto_increment=3;

View File

@ -146,13 +146,10 @@ eval CREATE TABLE mysqltest1.t1 (f1 BIGINT) ENGINE=$engine_type;
# Prevent Bug#26687 rpl_ddl test fails if run with --innodb option
# The testscript (suite/rpl/rpl_ddl.test) + the expected result need that the
# slave uses MyISAM for the table mysqltest.t1.
# This is not valid in case of suite/rpl_ndb/rpl_ndb_ddl.test which sources
# also this script.
sync_slave_with_master;
connection slave;
if (`SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA = 'mysqltest1' AND TABLE_NAME = 't1'
AND ENGINE <> 'MyISAM' AND '$engine_type' <> 'NDB'`)
WHERE TABLE_SCHEMA = 'mysqltest1' AND TABLE_NAME = 't1' AND ENGINE <> 'MyISAM'`)
{
skip This test needs on slave side: InnoDB disabled, default engine: MyISAM;
}

View File

@ -395,9 +395,6 @@ sync_slave_with_master;
###############################################################
# Error reaction is up to sql_mode of the slave sql (bug#38173)
#--echo *** Create t9 on slave ***
# Please, check BUG#47741 to see why you are not testing NDB.
if (`SELECT UPPER(LEFT($engine_type, 3)) != 'NDB'`)
{
STOP SLAVE;
RESET SLAVE;
eval CREATE TABLE t9 (a INT KEY, b BLOB, c CHAR(5),
@ -446,8 +443,6 @@ if (`SELECT UPPER(LEFT($engine_type, 3)) != 'NDB'`)
DROP TABLE t9;
sync_slave_with_master;
}
############################################
# More columns in slave at middle of table #
# Expect: Proper error message #

View File

@ -21,9 +21,7 @@ connection master;
SET TIMESTAMP=1000000000;
CREATE TABLE t3 ( a INT UNIQUE );
SET FOREIGN_KEY_CHECKS=0;
# Had to add 1022 for run with ndb as ndb uses different
# error and error code for error ER_DUP_ENTRY. Bug 16677
--error 1022, ER_DUP_ENTRY
--error ER_DUP_ENTRY
INSERT INTO t3 VALUES (1),(1);
sync_slave_with_master;

View File

@ -64,22 +64,6 @@ while ($ddl_cases >= 1)
{
let $commit_event_row_number= 4;
}
#
# In NDB (RBR and MIXED modes), the commit event is usually the seventh event
# in the binary log:
#
# 1: COMMAND
# 2: BEGIN
# 3: TABLE MAP EVENT
# 4: TABLE MAP EVENT (ndb_apply_status)
# 5: ROW EVENT
# 6: ROW EVENT
# 7: COMMIT
#
if ($engine == NDB)
{
let $commit_event_row_number= 7;
}
let $first_binlog_position= query_get_value("SHOW MASTER STATUS", Position, 1);
--enable_query_log
@ -87,32 +71,10 @@ while ($ddl_cases >= 1)
if ($ddl_cases == 41)
{
let $cmd= LOAD INDEX INTO CACHE nt_1 IGNORE LEAVES;
if ($engine == NDB)
{
# This seems to be related to epochs.
# We need to check this against an updated version or avoid it.
let $ok= no;
let $commit_event_row_number= 6;
}
}
if ($ddl_cases == 40)
{
let $cmd= LOAD INDEX INTO CACHE tt_1, tt_2 IGNORE LEAVES;
#
# In NDB (RBR and MIXED modes), the commit event is the sixth event
# in the binary log:
#
# 1: BEGIN
# 2: TABLE MAP EVENT
# 3: TABLE MAP EVENT (ndb_apply_status)
# 4: ROW EVENT
# 5: ROW EVENT
# 6: COMMIT
#
if ($engine == NDB)
{
let $commit_event_row_number= 6;
}
}
if ($ddl_cases == 39)
{
@ -121,21 +83,6 @@ while ($ddl_cases >= 1)
if ($ddl_cases == 38)
{
let $cmd= CHECK TABLE nt_1;
#
# In NDB (RBR and MIXED modes), the commit event is the sixth event
# in the binary log:
#
# 1: BEGIN
# 2: TABLE MAP EVENT
# 3: TABLE MAP EVENT (ndb_apply_status)
# 4: ROW EVENT
# 5: ROW EVENT
# 6: COMMIT
#
if ($engine == NDB)
{
let $commit_event_row_number= 6;
}
}
if ($ddl_cases == 37)
{
@ -148,40 +95,10 @@ while ($ddl_cases >= 1)
if ($ddl_cases == 35)
{
let $cmd= LOCK TABLES tt_1 WRITE;
#
# In NDB (RBR and MIXED modes), the commit event is the sixth event
# in the binary log:
#
# 1: BEGIN
# 2: TABLE MAP EVENT
# 3: TABLE MAP EVENT (ndb_apply_status)
# 4: ROW EVENT
# 5: ROW EVENT
# 6: COMMIT
#
if ($engine == NDB)
{
let $commit_event_row_number= 6;
}
}
if ($ddl_cases == 34)
{
let $cmd= UNLOCK TABLES;
#
# In NDB (RBR and MIXED modes), the commit event is the sixth event
# in the binary log:
#
# 1: BEGIN
# 2: TABLE MAP EVENT
# 3: TABLE MAP EVENT (ndb_apply_status)
# 4: ROW EVENT
# 5: ROW EVENT
# 6: COMMIT
#
if ($engine == NDB)
{
let $commit_event_row_number= 6;
}
}
if ($ddl_cases == 33)
{
@ -194,42 +111,6 @@ while ($ddl_cases >= 1)
if ($ddl_cases == 31)
{
let $cmd= SET PASSWORD FOR 'user'@'localhost' = PASSWORD('newpass');
#
# In NDB (RBR mode), the commit event is the eleventh event
# in the binary log:
#
# 1: DDL EVENT which triggered the previous commmit.
# 2: BEGIN
# 3: TABLE MAP EVENT
# 4: ROW EVENT
# 5: COMMIT
# 6: BEGIN
# 7: TABLE MAP EVENT
# 8: TABLE MAP EVENT (ndb_apply_status)
# 9: ROW EVENT
# 10: ROW EVENT
# 11: COMMIT
#
if (`SELECT '$engine' = 'NDB' && @@binlog_format = 'ROW'`)
{
let $commit_event_row_number= 11;
}
#
# In NDB (MIXED mode), the commit event is the eighth event
# in the binary log:
#
# 1: DDL EVENT which triggered the previous commmit.
# 2: BEGIN
# 3: TABLE MAP EVENT
# 4: TABLE MAP EVENT (ndb_apply_status)
# 5: ROW EVENT
# 6: ROW EVENT
# 7: COMMIT
#
if (`SELECT '$engine' = 'NDB' && @@binlog_format != 'ROW'`)
{
let $commit_event_row_number= 7;
}
}
if ($ddl_cases == 30)
{
@ -272,7 +153,7 @@ while ($ddl_cases >= 1)
# 5: COMMIT
# 6: DDL EVENT which triggered the previous commmit.
#
if (`select @@binlog_format = 'ROW' && '$engine' != 'NDB'`)
if (`select @@binlog_format = 'ROW'`)
{
let $commit_event_row_number= 5;
}
@ -316,42 +197,10 @@ while ($ddl_cases >= 1)
if ($ddl_cases == 13)
{
let $cmd= CREATE INDEX ix ON tt_1(ddl_case);
#
# In NDB (RBR and MIXED modes), the commit event is the sixth event
# in the binary log:
#
# 1: BEGIN
# 2: TABLE MAP EVENT
# 3: TABLE MAP EVENT (ndb_apply_status)
# 4: ROW EVENT
# 5: ROW EVENT
# 6: COMMIT
# 7: DDL EVENT which triggered the previous commmit.
#
if ($engine == NDB)
{
let $commit_event_row_number= 6;
}
}
if ($ddl_cases == 12)
{
let $cmd= DROP INDEX ix ON tt_1;
#
# In NDB (RBR and MIXED modes), the commit event is the sixth event
# in the binary log:
#
# 1: BEGIN
# 2: TABLE MAP EVENT
# 3: TABLE MAP EVENT (ndb_apply_status)
# 4: ROW EVENT
# 5: ROW EVENT
# 6: COMMIT
# 7: DDL EVENT which triggered the previous commmit.
#
if ($engine == NDB)
{
let $commit_event_row_number= 6;
}
}
if ($ddl_cases == 11)
{
@ -377,39 +226,6 @@ while ($ddl_cases >= 1)
{
let $commit_event_row_number= 4;
}
#
# In NDB (RBR mode), the commit event is the sixth event
# in the binary log:
#
# 1: BEGIN
# 2: TABLE MAP EVENT
# 3: TABLE MAP EVENT (ndb_apply_status)
# 4: ROW EVENT
# 5: ROW EVENT
# 6: COMMIT
#
if (`SELECT '$engine' = 'NDB' && @@binlog_format = 'ROW'` )
{
let $commit_event_row_number= 6;
}
#
# In NDB (MIXED mode), the commit event is the nineth event
# in the binary log:
#
# 1: BEGIN
# 2: DDL EVENT which triggered the previous commmit.
# 3: COMMIT
# 4: BEGIN
# 5: TABLE MAP EVENT
# 6: TABLE MAP EVENT (ndb_apply_status)
# 7: ROW EVENT
# 8: ROW EVENT
# 9: COMMIT
#
if (`SELECT '$engine' = 'NDB' && @@binlog_format != 'ROW'` )
{
let $commit_event_row_number= 9;
}
}
if ($ddl_cases == 10)
{
@ -427,21 +243,6 @@ while ($ddl_cases >= 1)
{
let $commit_event_row_number= 4;
}
#
# In NDB (RBR and MIXED modes), the commit event is the sixth event
# in the binary log:
#
# 1: BEGIN
# 2: TABLE MAP EVENT
# 3: TABLE MAP EVENT (ndb_apply_status)
# 4: ROW EVENT
# 5: ROW EVENT
# 6: COMMIT
#
if ($engine == NDB)
{
let $commit_event_row_number= 6;
}
}
if ($ddl_cases == 9)
{
@ -459,21 +260,6 @@ while ($ddl_cases >= 1)
{
let $commit_event_row_number= 4;
}
#
# In NDB (RBR and MIXED modes), the commit event is the sixth event
# in the binary log:
#
# 1: BEGIN
# 2: TABLE MAP EVENT
# 3: TABLE MAP EVENT (ndb_apply_status)
# 4: ROW EVENT
# 5: ROW EVENT
# 6: COMMIT
#
if ($engine == NDB)
{
let $commit_event_row_number= 6;
}
}
if ($ddl_cases == 8)
{
@ -514,42 +300,6 @@ while ($ddl_cases >= 1)
{
let $commit_event_row_number= 5;
}
#
# In NDB (RBR and MIXED modes), the commit event is the sixth event
# in the binary log:
#
# 1: BEGIN
# 2: DROP TEMPORARY table IF EXISTS
# 3: COMMIT
# 4: BEGIN
# 5: TABLE MAP EVENT
# 6: TABLE MAP EVENT (ndb_apply_status)
# 7: ROW EVENT
# 8: ROW EVENT
# 9: COMMIT
#
if ($engine == NDB)
{
let $commit_event_row_number= 9;
}
#
# In NDB (MIXED mode), the commit event is the nineth event
# in the binary log:
#
# 1: BEGIN
# 2: DDL EVENT which triggered the previous commmit.
# 3: COMMIT
# 4: BEGIN
# 5: TABLE MAP EVENT
# 6: TABLE MAP EVENT (ndb_apply_status)
# 7: ROW EVENT
# 8: ROW EVENT
# 9: COMMIT
#
if (`SELECT '$engine' = 'NDB' && @@binlog_format != 'ROW'` )
{
let $commit_event_row_number= 9;
}
}
if ($ddl_cases == 7)
{
@ -574,42 +324,10 @@ while ($ddl_cases >= 1)
if ($ddl_cases == 2)
{
let $cmd= CREATE DATABASE db;
#
# In NDB (RBR and MIXED modes), the commit event is the sixth event
# in the binary log:
#
# 1: BEGIN
# 2: TABLE MAP EVENT
# 3: TABLE MAP EVENT (ndb_apply_status)
# 4: ROW EVENT
# 5: ROW EVENT
# 6: COMMIT
# 7: DDL EVENT which triggered the previous commmit.
#
if ($engine == NDB)
{
let $commit_event_row_number= 6;
}
}
if ($ddl_cases == 1)
{
let $cmd= DROP DATABASE IF EXISTS db;
#
# In NDB (RBR and MIXED modes), the commit event is the sixth event
# in the binary log:
#
# 1: BEGIN
# 2: TABLE MAP EVENT
# 3: TABLE MAP EVENT (ndb_apply_status)
# 4: ROW EVENT
# 5: ROW EVENT
# 6: COMMIT
# 7: DDL EVENT which triggered the previous commmit.
#
if ($engine == NDB)
{
let $commit_event_row_number= 6;
}
}
--eval $cmd
--disable_query_log

View File

@ -48,8 +48,6 @@ connection master;
#
#Note Matthias: to be merged to rpl_ddl.test
--source include/not_ndb_default.inc
FLUSH LOGS;
sync_slave_with_master;
FLUSH LOGS;

View File

@ -23,10 +23,6 @@ SELECT * FROM test.t1 ORDER BY blob_column;
save_master_pos;
sync_slave_with_master;
connection slave;
# Need to allow some time when NDB engine is used for
# the injector thread to have time to populate binlog
let $wait_condition= SELECT INSTR(blob_column,'aberration') > 0 FROM test.t1 WHERE a = 2;
--source include/wait_condition.inc
SELECT * FROM test.t1 ORDER BY blob_column;
# Cleanup

View File

@ -12,8 +12,7 @@
# used in the same transaction.
#
# * Statements that do an implicit commit (i.e., most but not all DDL, and
# some utility commands) are logged specially due to unspecified requirements by
# NDB.
# some utility commands) are logged specially
#
# * Statements that update temporary tables need special treatment since they
# are not logged in row format.
@ -147,9 +146,7 @@
# - Rules for committing statements, except CREATE [TEMPORARY] TABLE...SELECT
#
# * All other statements that have a pre-commit are written directly to the
# binlog. (Note: this is semantically equivalent to writing it to the SC and
# flushing the SC. However, due to requirements by NDB (which have not been
# clarified), we write directly to the binlog.)
# binlog.
#
# We use the include file rpl_mixing_engines.inc to generate sql commands from a
# format string. The format string consists of a sequence of 'codes' separated

View File

@ -1,119 +0,0 @@
#######################################
# Author: Rafal Somla #
# Date: 2006-08-20 #
# Purpose: Test replication of basic #
# table operations in various setups #
# #
# Based on rpl_ndb_2multi_eng.test by #
# JBM #
#######################################
--echo --- Doing pre test cleanup ---
connection master;
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_query_log
#################################################
--echo --- Create Table Section ---
CREATE TABLE t1 (id MEDIUMINT NOT NULL,
b1 INT,
vc VARCHAR(255),
bc CHAR(255),
d DECIMAL(10,4) DEFAULT 0,
f FLOAT DEFAULT 0,
total BIGINT UNSIGNED,
y YEAR,
t DATE,
PRIMARY KEY(id));
--echo --- Show table on master ---
SHOW CREATE TABLE t1;
--echo --- Show table on slave ---
sync_slave_with_master;
SHOW CREATE TABLE t1;
--source include/rpl_multi_engine2.inc
#################################################
# Okay lets see how it holds up to table changes
--echo --- Check that simple Alter statements are replicated correctly --
ALTER TABLE t1 DROP PRIMARY KEY;
# note: table with no PK can't contain blobs if it is to be replicated.
ALTER TABLE t1 MODIFY vc char(32);
--echo --- Show the new improved table on the master ---
SHOW CREATE TABLE t1;
--echo --- Make sure that our tables on slave are still same engine ---
--echo --- and that the alter statements replicated correctly ---
sync_slave_with_master;
SHOW CREATE TABLE t1;
--source include/rpl_multi_engine2.inc
#################################################
--echo --- Check that replication works when slave has more columns than master
connection master;
ALTER TABLE t1 ADD PRIMARY KEY(id,total);
ALTER TABLE t1 MODIFY vc TEXT;
INSERT INTO t1 VALUES(3,1,'Testing MySQL databases is a cool ',
'Must make it bug free for the customer',
654321.4321,15.21,0,1965,"1905-11-14");
INSERT INTO t1 VALUES(20,1,'Testing MySQL databases is a cool ',
'Must make it bug free for the customer',
654321.4321,15.21,0,1965,"1965-11-14");
INSERT INTO t1 VALUES(50,1,'Testing MySQL databases is a cool ',
'Must make it bug free for the customer',
654321.4321,15.21,0,1965,"1985-11-14");
--echo --- Add columns on slave ---
--sync_slave_with_master
ALTER TABLE t1 ADD (u int, v char(16) default 'default');
UPDATE t1 SET u=7 WHERE id < 50;
UPDATE t1 SET v='explicit' WHERE id >10;
--echo --- Show changed table on slave ---
SHOW CREATE TABLE t1;
SELECT *
FROM t1
ORDER BY id;
--source include/rpl_multi_engine2.inc
TRUNCATE TABLE t1;
#################################################
--echo --- Check that replication works when master has more columns than slave
connection master;
--echo --- Remove columns on slave ---
--sync_slave_with_master
ALTER TABLE t1 DROP COLUMN v;
ALTER TABLE t1 DROP COLUMN u;
ALTER TABLE t1 DROP COLUMN t;
ALTER TABLE t1 DROP COLUMN y;
--echo --- Show changed table on slave ---
SHOW CREATE TABLE t1;
--source include/rpl_multi_engine2.inc
TRUNCATE TABLE t1;
#################################################
--echo --- Do Cleanup --
connection master;
DROP TABLE IF EXISTS t1;
sync_slave_with_master;
connection master;

View File

@ -1,347 +0,0 @@
#######################################
# Author: JBM #
# Date: 2006-02-23 #
# Purpose: See if replication between #
# NDB -> MyISAM and InnoDB works. #
# and if #
# MyISAM and InnoDB -> NDB works. #
#######################################
# By JBM #
# Date 2006-02-28 #
# Change: Implemented review comments #
#######################################
--echo --- Doing pre test cleanup ---
connection master;
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_query_log
--echo --- Start test 1 Basic testing ---
--echo --- Create Table Section ---
#################################################
# Requirment: Create basic table, replicate #
# basice operations such at insert, update #
# delete between 2 different storage engines #
# Alter table and ensure table is handled #
# Correctly on the slave #
#################################################
CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
y YEAR, t DATE,PRIMARY KEY(id));
--echo --- Show table on master ---
SHOW CREATE TABLE t1;
--echo --- Show table on slave ---
sync_slave_with_master;
SHOW CREATE TABLE t1;
--echo --- Perform basic operation on master ---
--echo --- and ensure replicated correctly ---
--source include/rpl_multi_engine3.inc
# Okay lets see how it holds up to table changes
--echo --- Check that simple Alter statements are replicated correctly --
ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(id, total);
ALTER TABLE t1 MODIFY vc TEXT;
--echo --- Show the new improved table on the master ---
SHOW CREATE TABLE t1;
--echo --- Make sure that our tables on slave are still same engine ---
--echo --- and that the alter statements replicated correctly ---
sync_slave_with_master;
SHOW CREATE TABLE t1;
--echo --- Perform basic operation on master ---
--echo --- and ensure replicated correctly ---
--source include/rpl_multi_engine3.inc
--echo --- End test 1 Basic testing ---
--echo --- Do Cleanup --
DROP TABLE IF EXISTS t1;
#################################################################
--echo --- Start test 2 partition RANGE testing --
--echo --- Do setup --
#################################################
# Requirment: Create table that is partitioned #
# by range on year i.e. year(t) and replicate #
# basice operations such at insert, update #
# delete between 2 different storage engines #
# Alter table and ensure table is handled #
# Correctly on the slave #
#################################################
CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
y YEAR, t DATE)
PARTITION BY RANGE (YEAR(t))
(PARTITION p0 VALUES LESS THAN (1901),
PARTITION p1 VALUES LESS THAN (1946),
PARTITION p2 VALUES LESS THAN (1966),
PARTITION p3 VALUES LESS THAN (1986),
PARTITION p4 VALUES LESS THAN (2005),
PARTITION p5 VALUES LESS THAN MAXVALUE);
--echo --- Show table on master ---
SHOW CREATE TABLE t1;
--echo --- Show table on slave --
sync_slave_with_master;
SHOW CREATE TABLE t1;
--echo --- Perform basic operation on master ---
--echo --- and ensure replicated correctly ---
--source include/rpl_multi_engine3.inc
--echo --- Check that simple Alter statements are replicated correctly ---
ALTER TABLE t1 ADD PRIMARY KEY(t,id);
ALTER TABLE t1 MODIFY vc TEXT;
--echo --- Show the new improved table on the master ---
SHOW CREATE TABLE t1;
--echo --- Make sure that our tables on slave are still same engine ---
--echo --- and that the alter statements replicated correctly ---
sync_slave_with_master;
SHOW CREATE TABLE t1;
--echo --- Perform basic operation on master ---
--echo --- and ensure replicated correctly ---
--enable_query_log
--source include/rpl_multi_engine3.inc
--echo --- End test 2 partition RANGE testing ---
--echo --- Do Cleanup ---
DROP TABLE IF EXISTS t1;
########################################################
--echo --- Start test 3 partition LIST testing ---
--echo --- Do setup ---
#################################################
# Requirment: Create table that is partitioned #
# by list on id i.e. (2,4). Pretend that we #
# missed one and alter to add. Then replicate #
# basice operations such at insert, update #
# delete between 2 different storage engines #
# Alter table and ensure table is handled #
# Correctly on the slave #
#################################################
CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
y YEAR, t DATE)
PARTITION BY LIST(id)
(PARTITION p0 VALUES IN (2, 4),
PARTITION p1 VALUES IN (42, 142));
--echo --- Test 3 Alter to add partition ---
ALTER TABLE t1 ADD PARTITION (PARTITION p2 VALUES IN (412));
--echo --- Show table on master ---
SHOW CREATE TABLE t1;
--echo --- Show table on slave ---
sync_slave_with_master;
SHOW CREATE TABLE t1;
--echo --- Perform basic operation on master ---
--echo --- and ensure replicated correctly ---
--source include/rpl_multi_engine3.inc
--echo --- Check that simple Alter statements are replicated correctly ---
ALTER TABLE t1 ADD PRIMARY KEY(id);
ALTER TABLE t1 MODIFY vc TEXT;
--echo --- Show the new improved table on the master ---
SHOW CREATE TABLE t1;
--echo --- Make sure that our tables on slave are still same engine ---
--echo --- and that the alter statements replicated correctly ---
sync_slave_with_master;
SHOW CREATE TABLE t1;
--echo --- Perform basic operation on master ---
--echo --- and ensure replicated correctly ---
--source include/rpl_multi_engine3.inc
--echo --- End test 3 partition LIST testing ---
--echo --- Do Cleanup --
DROP TABLE IF EXISTS t1;
########################################################
--echo --- Start test 4 partition HASH testing ---
--echo --- Do setup ---
#################################################
# Requirment: Create table that is partitioned #
# by hash on year i.e. YEAR(t). Then replicate #
# basice operations such at insert, update #
# delete between 2 different storage engines #
# Alter table and ensure table is handled #
# Correctly on the slave #
#################################################
CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
y YEAR, t DATE)
PARTITION BY HASH( YEAR(t) )
PARTITIONS 4;
--echo --- show that tables have been created correctly ---
SHOW CREATE TABLE t1;
sync_slave_with_master;
SHOW CREATE TABLE t1;
--echo --- Perform basic operation on master ---
--echo --- and ensure replicated correctly ---
--source include/rpl_multi_engine3.inc
--echo --- Check that simple Alter statements are replicated correctly ---
ALTER TABLE t1 ADD PRIMARY KEY(t,id);
ALTER TABLE t1 MODIFY vc TEXT;
--echo --- Show the new improved table on the master ---
SHOW CREATE TABLE t1;
--echo --- Make sure that our tables on slave are still same engine ---
--echo --- and that the alter statements replicated correctly ---
sync_slave_with_master;
SHOW CREATE TABLE t1;
--echo --- Perform basic operation on master ---
--echo --- and ensure replicated correctly ---
--source include/rpl_multi_engine3.inc
--echo --- End test 4 partition HASH testing ---
--echo --- Do Cleanup --
DROP TABLE IF EXISTS t1;
########################################################
--echo --- Start test 5 partition by key testing ---
--echo --- Create Table Section ---
#################################################
# Requirment: Create table that is partitioned #
# by key on id with 4 parts. Then replicate #
# basice operations such at insert, update #
# delete between 2 different storage engines #
# Alter table and ensure table is handled #
# Correctly on the slave #
#################################################
CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
y YEAR, t DATE,PRIMARY KEY(id))
PARTITION BY KEY()
PARTITIONS 4;
--echo --- Show that tables on master are ndbcluster tables ---
SHOW CREATE TABLE t1;
--echo --- Show that tables on slave ---
sync_slave_with_master;
SHOW CREATE TABLE t1;
--echo --- Perform basic operation on master ---
--echo --- and ensure replicated correctly ---
--source include/rpl_multi_engine3.inc
# Okay lets see how it holds up to table changes
--echo --- Check that simple Alter statements are replicated correctly ---
ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(id, total);
--echo --- Show the new improved table on the master ---
SHOW CREATE TABLE t1;
--echo --- Make sure that our tables on slave are still right type ---
--echo --- and that the alter statements replicated correctly ---
sync_slave_with_master;
SHOW CREATE TABLE t1;
--echo --- Perform basic operation on master ---
--echo --- and ensure replicated correctly ---
--source include/rpl_multi_engine3.inc
--echo --- Check that simple Alter statements are replicated correctly ---
ALTER TABLE t1 MODIFY vc TEXT;
--echo --- Show the new improved table on the master ---
SHOW CREATE TABLE t1;
--echo --- Make sure that our tables on slave are still same engine ---
--echo --- and that the alter statements replicated correctly ---
sync_slave_with_master;
SHOW CREATE TABLE t1;
--echo --- Perform basic operation on master ---
--echo --- and ensure replicated correctly ---
--source include/rpl_multi_engine3.inc
--echo --- End test 5 key partition testing ---
--echo --- Do Cleanup ---
DROP TABLE IF EXISTS t1;
sync_slave_with_master;
# End of 5.1 test case

View File

@ -1,307 +0,0 @@
#############################################
#Authors: TU and Jeb
#Date: 2007/04
#Purpose: Generic replication to cluster
# and ensuring that the ndb_apply_status
# table is updated.
#############################################
# Notes:
# include/select_ndb_apply_status.inc
# Selects out the log name, start & end pos
# from the ndb_apply_status table
#
# include/show_binlog_using_logname.inc
# To select out 1 row from offset 1
# from the start position in the binlog whose
# name is = log_name
#
# include/tpcb.inc
# Creates DATABASE tpcb, the tables and
# stored procedures for loading the DB
# and for running transactions against DB.
##############################################
--echo
--echo *** Test 1 ***
--echo
connection master;
create table t1 (a int key, b int) engine innodb;
create table t2 (a int key, b int) engine innodb;
--echo
--sync_slave_with_master
alter table t1 engine ndb;
alter table t2 engine ndb;
--echo
# check binlog position without begin
connection master;
insert into t1 values (1,2);
--echo
--sync_slave_with_master
--source include/select_ndb_apply_status.inc
--echo
connection master;
--echo # Now check that that is in the apply_status table is consistant
--echo # with what is in the binlog
--echo
--echo # since insert is done with transactional engine, expect a BEGIN
--echo # at <start_pos>
--echo
--let $binlog_start= $start_pos
--let $binlog_limit= 1
--source include/show_binlog_events.inc
--echo
--echo # Now the insert, one step after
--echo
--let $binlog_start= $start_pos
--let $binlog_limit= 1,1
--source include/show_binlog_events.inc
--echo
--echo # and the COMMIT should be at <end_pos>
--echo
--let $binlog_start= $start_pos
--let $binlog_limit= 2,1
--source include/show_binlog_events.inc
--echo
# check binlog position with begin
begin;
insert into t1 values (2,3);
insert into t2 values (3,4);
commit;
--echo
--sync_slave_with_master
--source include/select_ndb_apply_status.inc
connection master;
--let $binlog_start= $start_pos
--let $binlog_limit= 1
--source include/show_binlog_events.inc
--echo
--let $binlog_start= $start_pos
--let $binlog_limit= 1,2
--source include/show_binlog_events.inc
--echo
--let $binlog_start= $start_pos
--let $binlog_limit= 3,1
--source include/show_binlog_events.inc
--echo
connection master;
DROP TABLE test.t1, test.t2;
--sync_slave_with_master
SHOW TABLES;
# Run in some transactions using stored procedures
# and ensure that the ndb_apply_status table is
# updated to show the transactions
--echo
--echo *** Test 2 ***
--echo
# Create database/tables and stored procdures
connection master;
--source include/tpcb.inc
# Switch tables on slave to use NDB
--sync_slave_with_master
USE tpcb;
ALTER TABLE account ENGINE NDB;
ALTER TABLE branch ENGINE NDB;
ALTER TABLE teller ENGINE NDB;
ALTER TABLE history ENGINE NDB;
--echo
# Load DB tpcb and run some transactions
connection master;
--disable_query_log
CALL tpcb.load();
SET AUTOCOMMIT=0;
let $run= 5;
while ($run)
{
START TRANSACTION;
--disable_warnings
--eval CALL tpcb.trans($rpl_format);
--enable_warnings
eval SET @my_errno= $mysql_errno;
let $run_good= `SELECT @my_errno = 0`;
let $run_bad= `SELECT @my_errno <> 0`;
if ($run_good)
{
COMMIT;
}
if ($run_bad)
{
ROLLBACK;
}
dec $run;
}
SET AUTOCOMMIT=1;
--enable_query_log
--sync_slave_with_master
--source include/select_ndb_apply_status.inc
--echo
connection master;
--source include/show_binlog_using_logname.inc
# Flush the logs on the master moving all
# Transaction to a new binlog and ensure
# that the ndb_apply_status table is updated
# to show the use of the new binlog.
--echo
--echo ** Test 3 **
--echo
# Flush logs on master which should force it
# to switch to binlog #2
FLUSH LOGS;
# Run in some transaction to increase end pos in
# binlog
--disable_query_log
SET AUTOCOMMIT=0;
let $run= 5;
while ($run)
{
START TRANSACTION;
--disable_warnings
--eval CALL tpcb.trans($rpl_format);
--enable_warnings
eval SET @my_errno= $mysql_errno;
let $run_good= `SELECT @my_errno = 0`;
let $run_bad= `SELECT @my_errno <> 0`;
if ($run_good)
{
COMMIT;
}
if ($run_bad)
{
ROLLBACK;
}
dec $run;
}
SET AUTOCOMMIT=1;
--enable_query_log
--echo
--sync_slave_with_master
--source include/select_ndb_apply_status.inc
--echo
connection master;
--source include/show_binlog_using_logname.inc
# Now we reset both the master and the slave
# Run some more transaction and ensure
# that the ndb_apply_status is updated
# correctly
--echo
--echo ** Test 4 **
--echo
# Reset both slave and master
# This should reset binlog to #1
--source include/rpl_reset.inc
--echo
# Run in some transactions and check
connection master;
--disable_query_log
SET AUTOCOMMIT=0;
let $run= 5;
while ($run)
{
START TRANSACTION;
--disable_warnings
--eval CALL tpcb.trans($rpl_format);
--enable_warnings
eval SET @my_errno= $mysql_errno;
let $run_good= `SELECT @my_errno = 0`;
let $run_bad= `SELECT @my_errno <> 0`;
if ($run_good)
{
COMMIT;
}
if ($run_bad)
{
ROLLBACK;
}
dec $run;
}
SET AUTOCOMMIT=1;
--enable_query_log
--sync_slave_with_master
--source include/select_ndb_apply_status.inc
--echo
connection master;
--source include/show_binlog_using_logname.inc
# Since we are doing replication, it is a good
# idea to check to make sure all data was
# Replicated correctly
--echo
--echo *** DUMP MASTER & SLAVE FOR COMPARE ********
--exec $MYSQL_DUMP -n -t --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/master_apply_status.sql
--exec $MYSQL_DUMP_SLAVE -n -t --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/slave_apply_status.sql
connection master;
DROP DATABASE tpcb;
--sync_slave_with_master
####### Commenting out until decision on Bug#27960 ###########
#--source include/select_ndb_apply_status.inc
#connection master;
#--eval SHOW BINLOG EVENTS in '$log_name' from $start_pos
#--source include/show_binlog_using_logname.inc
--echo ****** Do dumps compare ************
diff_files $MYSQLTEST_VARDIR/tmp/master_apply_status.sql $MYSQLTEST_VARDIR/tmp/slave_apply_status.sql;
## Note: Ths files should only get removed, if the above diff succeeds.
--exec rm $MYSQLTEST_VARDIR/tmp/master_apply_status.sql
--exec rm $MYSQLTEST_VARDIR/tmp/slave_apply_status.sql
# End of 5.1 Test

View File

@ -201,11 +201,11 @@ CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
PARTITION BY KEY()
PARTITIONS 4;
--echo --- Show that tables on master are ndbcluster tables ---
--echo --- Show tables on master ---
SHOW CREATE TABLE t1;
--echo --- Show that tables on slave ---
--echo --- Show tables on slave ---
sync_slave_with_master;
SHOW CREATE TABLE t1;

View File

@ -36,7 +36,6 @@ SELECT LENGTH(data) FROM test.t1 WHERE c1 = 3;
save_master_pos;
connection slave;
sync_with_master;
--source include/wait_for_ndb_to_binlog.inc
--echo
--echo **** Data Insert Validation Slave Section test.t1 ****
--echo
@ -59,7 +58,6 @@ SELECT LENGTH(data) FROM test.t1 WHERE c1 = 2;
save_master_pos;
connection slave;
sync_with_master;
--source include/wait_for_ndb_to_binlog.inc
--echo
--echo **** Data Update Validation Slave Section test.t1 ****
--echo
@ -130,7 +128,6 @@ FROM test.t2 WHERE c1=2;
save_master_pos;
connection slave;
sync_with_master;
--source include/wait_for_ndb_to_binlog.inc
--echo
--echo **** Data Insert Validation Slave Section test.t2 ****
--echo
@ -156,7 +153,6 @@ FROM test.t2 WHERE c1=2;
save_master_pos;
connection slave;
sync_with_master;
--source include/wait_for_ndb_to_binlog.inc
--echo
--echo **** Data Update Validation Slave Section test.t2 ****
--echo
@ -176,8 +172,4 @@ diff_files $MYSQLTEST_VARDIR/tmp/rpl_row_blob_master.sql $MYSQLTEST_VARDIR/tmp/r
DROP TABLE IF EXISTS test.t1;
DROP TABLE IF EXISTS test.t2;
# ensure cleanup on slave as well:
# ndb blob tables consist of several tables
# if cluster is shutdown while not all tables are
# properly dropped, the table becomes inconsistent
# and wrecks later test cases
--sync_slave_with_master

View File

@ -54,11 +54,8 @@ INSERT INTO test.t1 VALUES (null,test.f1());
ROLLBACK;
SET AUTOCOMMIT=1;
# Sync master and slave for all engines except NDB
if (`SELECT UPPER(LEFT('$engine_type', 3)) != 'NDB'`) {
sync_slave_with_master;
connection master;
}
# Time to dump the databases and so we can see if they match

View File

@ -53,8 +53,6 @@ SELECT release_lock("test");
connection master;
SELECT * FROM test.t1;
#show binlog events;
--source include/wait_for_ndb_to_binlog.inc
sync_slave_with_master;
connection slave;
SELECT * FROM test.t1;

View File

@ -8,8 +8,6 @@ SHOW VARIABLES LIKE 'relay_log_space_limit';
# Matz says: I have no idea what this is supposed to test, but it has
# potential for generating different results with some storage engines
# that process rows in an order not dependent on the insertion order.
# For instance, I would assume that distributed storage engines (like
# NDB) could process rows based on locality.
eval CREATE TABLE t1 (name varchar(64), age smallint(3))ENGINE=$engine_type;
INSERT INTO t1 SET name='Andy', age=31;

View File

@ -7,7 +7,6 @@
#############################################################################
# Change Auth: JBM #
# Date: 2006-02-14 #
# Change: Added error, sleep and comments (ndb) #
####################################################
# Begin clean up test section
@ -28,15 +27,12 @@ CREATE TRIGGER test.t1_bi_t2 BEFORE INSERT ON test.t2 FOR EACH ROW INSERT INTO t
delimiter ;//
INSERT INTO test.t2 VALUES (1, 0.0);
# Expect duplicate error 1022 == ndb
--error 1022, ER_DUP_ENTRY
--error ER_DUP_ENTRY
INSERT INTO test.t2 VALUES (1, 0.0);
#show binlog events;
select * from test.t1;
select * from test.t2;
let $wait_time= 10;
--source include/wait_for_ndb_to_binlog.inc
sync_slave_with_master;
connection slave;
select * from test.t1;

View File

@ -191,17 +191,9 @@ drop table t1;
#
if(!$is_heap)
{
if(!$is_ndb)
{
--error ER_TOO_LONG_KEY
eval create table t1 (a text character set utf8mb4, primary key(a(371))) engine $engine;
}
if($is_ndb)
{
--error ER_BLOB_USED_AS_KEY
eval create table t1 (a text character set utf8mb4, primary key(a(371))) engine $engine;
}
}
#
# Bug 2959
@ -254,8 +246,6 @@ drop table t2;
# Bug 4521: unique key prefix interacts poorly with utf8mb4
# MYISAM: keys with prefix compression, case insensitive collation.
#
if (!$is_ndb)
{
eval create table t1 (c varchar(30) character set utf8mb4, unique(c(10))) engine $engine;
insert into t1 values ('1'),('2'),('3'),('x'),('y'),('z');
insert into t1 values ('aaaaaaaaaa');
@ -549,7 +539,6 @@ select c as c_all from t1 order by c;
select c as c_a from t1 where c='a';
select c as c_a from t1 where c='б';
drop table t1;
}
# Bug#4594: column index make = failed for gbk, but like works
@ -593,8 +582,6 @@ drop table t1;
# the same for HEAP+HASH
#
if (!$is_ndb)
{
eval create table t1 (
str varchar(255) character set utf8mb4 not null,
key str using hash (str(2))
@ -618,7 +605,6 @@ INSERT INTO t1 VALUES ('str');
INSERT INTO t1 VALUES ('str2');
select * from t1 where str='str';
drop table t1;
}
#
# Bug #5397: Crash with varchar binary and LIKE
@ -651,8 +637,6 @@ DROP TABLE t1;
#
if (!$is_heap)
{
if (!$is_ndb)
{
eval CREATE TABLE t1 (
id int unsigned NOT NULL auto_increment,
list_id smallint unsigned NOT NULL,
@ -689,7 +673,6 @@ SELECT id, term FROM t1 where (list_id = 1) AND (term = "testetest");
SELECT id, term FROM t1 where (list_id = 1) AND (term = "test<EFBFBD>test");
DROP TABLE t1;
}
}
#
# Bug #6019 SELECT tries to use too short prefix index on utf8mb4 data
@ -979,14 +962,7 @@ if (!$is_heap)
#
eval CREATE TABLE t1 (t TINYTEXT CHARACTER SET utf8mb4) ENGINE $engine;
INSERT INTO t1 VALUES(REPEAT('a', 100));
if (!$is_ndb)
{
eval CREATE TEMPORARY TABLE t2 ENGINE $engine SELECT COALESCE(t) AS bug FROM t1;
}
if ($is_ndb)
{
eval CREATE TABLE t2 ENGINE $engine SELECT COALESCE(t) AS bug FROM t1;
}
SELECT LENGTH(bug) FROM t2;
DROP TABLE t2;
DROP TABLE t1;
@ -1202,8 +1178,6 @@ SET NAMES latin2;
if (!$is_heap)
{
if (!$is_ndb)
{
eval CREATE TABLE t1 (
id int(11) NOT NULL default '0',
tid int(11) NOT NULL default '0',
@ -1229,7 +1203,6 @@ SELECT * FROM t1 WHERE tid=72 and val LIKE 'VOLN
DROP TABLE t1;
}
}
#
# Bug 20709: problem with utf8mb4 fields in temporary tables
@ -1318,8 +1291,6 @@ drop table t1;
#
# Check that do_varstring2_mb produces a warning
#
if (!$is_ndb)
{
eval create table t1 (
a varchar(4000) not null
) default character set utf8mb4 engine $engine;
@ -1327,7 +1298,6 @@ insert into t1 values (repeat('a',4000));
alter table t1 change a a varchar(3000) character set utf8mb4 not null;
select length(a) from t1;
drop table t1;
}
#
# Bug#10504: Character set does not support traditional mode
@ -1614,8 +1584,6 @@ set max_sort_length=default;
--echo #
if (!$is_heap)
{
if (!$is_ndb)
{
eval CREATE TABLE t1 (
clipid INT NOT NULL,
Tape TINYTEXT,
@ -1626,7 +1594,6 @@ ALTER TABLE t1 ADD mos TINYINT DEFAULT 0 AFTER clipid;
SHOW CREATE TABLE t1;
DROP TABLE t1;
}
}
#--echo #
#--echo # Check that supplementary characters are not allowed in identifiers
@ -1807,12 +1774,9 @@ INSERT INTO t2 VALUES (x'ea9da8');
SELECT HEX(CONCAT(utf8mb4, utf8mb3)) FROM t1,t2 ORDER BY 1;
SELECT CHARSET(CONCAT(utf8mb4, utf8mb3)) FROM t1, t2 LIMIT 1;
if (!$is_ndb)
{
eval CREATE TEMPORARY TABLE t3 ENGINE $engine AS SELECT *, concat(utf8mb4,utf8mb3) FROM t1, t2;
SHOW CREATE TABLE t3;
DROP TEMPORARY TABLE t3;
}
SELECT * FROM t1, t2 WHERE t1.utf8mb4 > t2.utf8mb3;
SELECT * FROM t1, t2 WHERE t1.utf8mb4 = t2.utf8mb3;

View File

@ -1,27 +0,0 @@
[cluster_config]
MaxNoOfSavedMessages= 1000
MaxNoOfConcurrentTransactions= 128
MaxNoOfConcurrentOperations= 10000
DataMemory= 20M
IndexMemory= 1M
Diskless= 0
TimeBetweenWatchDogCheck= 30000
MaxNoOfOrderedIndexes= 32
MaxNoOfAttributes= 2048
TimeBetweenGlobalCheckpoints= 500
NoOfFragmentLogFiles= 4
FragmentLogFileSize= 12M
DiskPageBufferMemory= 4M
# O_DIRECT has issues on 2.4 whach have not been handled, Bug #29612
#ODirect= 1
# the following parametes just function as a small regression
# test that the parameter exists
InitialNoOfOpenFiles= 27
# Increase timeouts for slow test-machines
HeartbeatIntervalDbDb= 30000
HeartbeatIntervalDbApi= 30000
#TransactionDeadlockDetectionTimeout= 7500

View File

@ -0,0 +1,4 @@
if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'aria' AND support IN ('YES', 'DEFAULT', 'ENABLED')`)
{
--skip Test requires Aria
}

View File

@ -0,0 +1,6 @@
--source include/have_innodb.inc
if (`SELECT COUNT(*) = 0 from INFORMATION_SCHEMA.GLOBAL_VARIABLES
WHERE VARIABLE_NAME = 'INNODB_DISALLOW_WRITES'`) {
--skip Test requires 'innodb_disallow_writes'
}

View File

@ -1,52 +0,0 @@
# Setup connections to both MySQL Servers connected to the cluster
connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,);
connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,);
# Check that server1 has NDB support
connection server1;
let $engines_table= query_get_value(SHOW TABLES FROM information_schema LIKE 'ENGINES', Tables_in_information_schema (ENGINES), 1);
disable_query_log;
if (`SELECT 1 FROM dual WHERE '$engines_table' = 'engines'`)
{
--require r/true.require
SELECT (support = 'YES' or support = 'DEFAULT' or support = 'ENABLED') as `TRUE` FROM information_schema.engines WHERE engine = 'ndbcluster';
--source include/ndb_not_readonly.inc
}
enable_query_log;
# Check that server2 has NDB support
connection server2;
let $engines_table= query_get_value(SHOW TABLES FROM information_schema LIKE 'ENGINES', Tables_in_information_schema (ENGINES), 1);
disable_query_log;
if (`SELECT 1 FROM dual WHERE '$engines_table' = 'engines'`)
{
--require r/true.require
SELECT (support = 'YES' or support = 'DEFAULT' or support = 'ENABLED') as `TRUE` FROM information_schema.engines WHERE engine = 'ndbcluster';
--source include/ndb_not_readonly.inc
}
enable_query_log;
# cleanup
connection server1;
disable_query_log;
disable_warnings;
--error 0,1051
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
flush tables;
flush status;
enable_warnings;
enable_query_log;
connection server2;
disable_query_log;
disable_warnings;
--error 0,1051
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
flush tables;
flush status;
enable_warnings;
enable_query_log;
# Set the default connection
connection server1;

View File

@ -1,4 +1,3 @@
--require r/have_mutex_deadlock_detector.require
disable_query_log;
select 1 from information_schema.global_variables where variable_name = "mutex_deadlock_detector";
enable_query_log;
if (`select count(*)=0 from information_schema.global_variables where variable_name = "debug_mutex_deadlock_detector"`) {
skip needs safemutex deadlock detector;
}

View File

@ -1,2 +0,0 @@
# Check that server is compiled and started with support for NDB
--source include/have_multi_ndb.inc

View File

@ -1,2 +0,0 @@
-- require r/have_ndb_extra.require
eval select $NDB_EXTRA_TEST;

View File

@ -1,4 +0,0 @@
--require r/have_ndbapi_examples.require
disable_query_log;
eval select LENGTH('$NDB_EXAMPLES_BINARY') > 0 as 'have_ndb_example';
enable_query_log;

View File

@ -0,0 +1,8 @@
# To be used in a test which requires server to be compiled with wsrep support
# (-DWITH_WSREP=ON) and wsrep plugin is ACTIVE.
if (`SELECT COUNT(*)=0 FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_NAME = 'wsrep' AND PLUGIN_STATUS='ACTIVE'`)
{
--skip Test requires wsrep plugin.
}

View File

@ -1,5 +1,4 @@
# Test if the engine does autocommit in LOAD DATA INFILE, or not
# (NDB wants to do, others don't).
eval SET SESSION STORAGE_ENGINE = $engine_type;
@ -9,8 +8,6 @@ drop table if exists t1;
let $load_file= $MYSQLTEST_VARDIR/std_data/loaddata2.dat;
# NDB does not support the create option 'Binlog of table with BLOB attribute and no PK'
# So use a dummy PK here.
create table t1 (id int unsigned not null auto_increment primary key, a text, b text);
start transaction;
--replace_result $load_file LOAD_FILE

View File

@ -47,7 +47,7 @@ BEGIN
-- Show "mysql" database, tables and columns
SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql
FROM INFORMATION_SCHEMA.TABLES
WHERE table_schema='mysql' AND table_name != 'ndb_apply_status'
WHERE table_schema='mysql'
ORDER BY tables_in_mysql;
SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql,
column_name, ordinal_position, column_default, is_nullable,
@ -55,7 +55,7 @@ BEGIN
numeric_precision, numeric_scale, character_set_name,
collation_name, column_type, column_key, extra, column_comment
FROM INFORMATION_SCHEMA.COLUMNS
WHERE table_schema='mysql' AND table_name != 'ndb_apply_status'
WHERE table_schema='mysql'
ORDER BY columns_in_mysql;
-- Dump all events, there should be none

View File

@ -115,9 +115,6 @@ INSERT INTO global_suppressions VALUES
("unknown variable 'loose-"),
("You have forced lower_case_table_names to 0 through a command-line option"),
("Setting lower_case_table_names=2"),
("NDB Binlog:"),
("NDB: failed to setup table"),
("NDB: only row based binary logging"),
("Neither --relay-log nor --relay-log-index were used"),
("Query partially completed"),
("Slave I.O thread aborted while waiting for relay log"),
@ -139,7 +136,6 @@ INSERT INTO global_suppressions VALUES
("Slave: The incident LOST_EVENTS occured on the master"),
("Slave: Unknown error.* 1105"),
("Slave: Can't drop database.* database doesn't exist"),
("Time-out in NDB"),
("Warning:\s+One can only use the --user.*root"),
("Warning:\s+Table:.* on (delete|rename)"),
("You have an error in your SQL syntax"),

View File

@ -1,48 +0,0 @@
######################################################
# By JBM 2006-02-16 So that the code is not repeated #
# in test cases and can be reused. #
######################################################
--exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "start backup" >> $NDB_TOOLS_OUTPUT
# To find the backupid, we must dump this data to a table, and SELECT
# what we want into an outfile. This could be accomplished with grep, but
# grep isn't Windows-portable
--disable_query_log
# create a table to help us out
--disable_warnings # leave this on until done with the entire process
# cleanup
DROP TABLE IF EXISTS helper1;
CREATE TABLE helper1(c1 VARCHAR(20));
# dump raw data to file
let $ndb_backup_file1= $MYSQLTEST_VARDIR/ndb_backup_tmp.dat;
let $ndb_backup_file2= $MYSQLTEST_VARDIR/tmp.dat;
--disable_warnings
--error 0,1
--remove_file $ndb_backup_file1
--enable_warnings
--exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring="$NDB_CONNECTSTRING" -d sys --delimiter=',' SYSTAB_0 > $ndb_backup_file1
# load the table from the raw data file
eval LOAD DATA INFILE '$ndb_backup_file1' INTO TABLE helper1;
--remove_file $ndb_backup_file1
# output what we need
eval SELECT * FROM helper1 WHERE c1 LIKE '%520093696%'
INTO OUTFILE '$ndb_backup_file2';
# cleanup
DROP TABLE helper1;
--enable_warnings
--enable_query_log
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info
(id INT, backup_id INT) ENGINE = MEMORY;
--replace_result $MYSQLTEST_VARDIR <MYSQLTEST_VARDIR>
eval LOAD DATA INFILE '$ndb_backup_file2' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
--remove_file $ndb_backup_file2
# Load backup id into environment variable
let the_backup_id=`SELECT backup_id from test.backup_info`;
DROP TABLE test.backup_info;

View File

@ -1,9 +0,0 @@
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults $ndb_restore_opts -b $the_backup_id -n 1 $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id $ndb_restore_filter > $MYSQLTEST_VARDIR/tmp/tmp.dat
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults $ndb_restore_opts -b $the_backup_id -n 2 $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id $ndb_restore_filter >> $MYSQLTEST_VARDIR/tmp/tmp.dat
--exec sort $MYSQLTEST_VARDIR/tmp/tmp.dat
--disable_warnings
--error 0,1
--remove_file $MYSQLTEST_VARDIR/tmp/tmp.dat
--enable_warnings
--let ndb_restore_opts=
--let ndb_restore_filter=

View File

@ -1,4 +0,0 @@
-- require r/ndb_default_cluster.require
disable_query_log;
show status like "Ndb_config_from_host";
enable_query_log;

View File

@ -1,12 +0,0 @@
--source include/master-slave.inc
connection slave;
# Check that server is compiled and started with support for NDB
disable_query_log;
--require r/true.require
select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster';
--source include/ndb_not_readonly.inc
enable_query_log;
# Set the default connection to 'master'
connection master;

View File

@ -1,67 +0,0 @@
# ==== Purpose ====
#
# Set up circular cluster replication where each
# cluster has two mysqlds and replication directions are
# following:
# master ---> slave
# / \
# cluster A cluster B
# \ /
# master1 <--- slave1
#
# ==== Usage ====
#
# [--let $rpl_server_count= N]
# [--let $rpl_skip_check_server_ids= 1]
# [--let $rpl_skip_reset_master_and_slave= 1]
# [--let $rpl_skip_change_master= 1]
# [--let $rpl_skip_start_slave= 1]
# [--let $rpl_debug= 1]
# [--let $slave_timeout= NUMBER]
# --source include/ndb_master-slave_2ch.inc
#
# Parameters:
# $rpl_server_count, $rpl_skip_check_server_ids,
# $rpl_skip_reset_master_and_slave, $rpl_skip_change_master,
# $rpl_skip_start_slave, $rpl_debug, $slave_timeout
# See include/master-slave.inc
--let $rpl_topology= 1->2,4->3
--let $rpl_skip_check_server_ids= 1
--source include/rpl_init.inc
# Make connections to mysqlds
--let $rpl_connection_name= master
--let $rpl_server_number= 1
--source include/rpl_connect.inc
--let $rpl_connection_name= master1
--let $rpl_server_number= 1
--source include/rpl_connect.inc
--let $rpl_connection_name= slave
--let $rpl_server_number= 2
--source include/rpl_connect.inc
--let $rpl_connection_name= slave1
--let $rpl_server_number= 2
--source include/rpl_connect.inc
# Check that all mysqld are compiled with ndb support
--let $_rpl_server= 4
while ($_rpl_server)
{
--connection server_$_rpl_server
if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'ndbcluster' AND (support = 'YES' OR support = 'DEFAULT')`)
{
--skip Test requires NDB.
}
--source include/ndb_not_readonly.inc
--dec $_rpl_server
}
# Set the default connection to 'master' (cluster A)
connection master;

View File

@ -1,36 +0,0 @@
# Check that server has come out ot readonly mode
#
# wait for server to connect properly to cluster
#
--disable_query_log
set @saved_log = @@sql_log_bin;
set sql_log_bin = 0;
--error 0,ER_NO_SUCH_TABLE,ER_OPEN_AS_READONLY,ER_GET_ERRMSG,ER_KEY_NOT_FOUND
delete from mysql.ndb_apply_status where server_id=0;
let $mysql_errno= 1;
let $counter= 600;
while ($mysql_errno)
{
# Table is readonly until the mysqld has connected properly
--error 0,ER_NO_SUCH_TABLE,ER_OPEN_AS_READONLY,ER_GET_ERRMSG
replace into mysql.ndb_apply_status values(0,0,"",0,0);
if ($mysql_errno)
{
if (!$counter)
{
die Failed while waiting for mysqld to come out of readonly mode;
}
dec $counter;
--sleep 0.1
}
}
delete from mysql.ndb_apply_status where server_id=0;
set sql_log_bin = @saved_log;
--enable_query_log
#
# connected
#

View File

@ -1,8 +0,0 @@
######################################################
# By JBM 2006-02-16 So that the code is not repeated #
# in test cases and can be reused. #
######################################################
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -p 8 -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -p 8 -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT

View File

@ -1,11 +0,0 @@
######################################################
# By JBM 2006-03-08 So that the code is not repeated #
# in test cases and can be reused. #
######################################################
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING_SLAVE" -p 8 -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING_SLAVE" -p 8 -b $the_backup_id -n 2 -r -e --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT

View File

@ -1,27 +0,0 @@
#
# now setup replication to continue from last epoch
# 1. get ndb_apply_status epoch from slave
# 2. get corresponding _next_ binlog postition from master
# 3. change master on slave
# 1.
--connection slave
--replace_column 1 <the_epoch>
SELECT @the_epoch:=MAX(epoch) FROM mysql.ndb_apply_status;
--let $the_epoch= `select @the_epoch`
# 2.
--connection master
--replace_result $the_epoch <the_epoch>
--replace_column 1 <the_pos>
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
FROM mysql.ndb_binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1;
--let $the_pos= `SELECT @the_pos`
--let $the_file= `SELECT @the_file`
# 3.
--connection slave
--replace_result $the_pos <the_pos>
eval CHANGE MASTER TO
master_log_file = '$the_file',
master_log_pos = $the_pos ;

View File

@ -1,26 +0,0 @@
# Check that mysqld has reconnected to ndbd after
# restart of ndbd
#
--disable_query_log
--disable_result_log
let $mysql_errno= 1;
let $counter= 600;
while ($mysql_errno)
{
--error 0,157
CREATE TABLE ndb_wait_connected (a int primary key);
if ($mysql_errno)
{
if (!$counter)
{
die Failed waiting for mysqld to reconnect to ndbd;
}
dec $counter;
--sleep 0.1
}
}
DROP TABLE ndb_wait_connected;
--enable_query_log
--enable_result_log

View File

@ -1,7 +0,0 @@
-- require r/not_ndb.require
disable_query_log;
# so that both DISABLED and NO is output as NO
-- replace_result DISABLED NO
show variables like "have_ndbcluster";
enable_query_log;

View File

@ -1,4 +0,0 @@
--require r/not_ndb_default.require
disable_query_log;
select convert(@@storage_engine using latin1) NOT IN ("ndbcluster","NDBCLUSTER") as "TRUE";
enable_query_log;

View File

@ -256,9 +256,14 @@ execute stmt1 using @my_key ;
execute full_info ;
--disable_metadata
# the next statement must fail
--error 1064
# the next statement does not fail anymore
prepare stmt1 from "select c1 into ? from t9 where c1= 1" ;
execute stmt1 using @result;
--enable_metadata
execute full_info ;
--disable_metadata
select @result;

View File

@ -428,7 +428,6 @@ execute stmt1 using @arg01, @arg02;
prepare stmt1 from ' select a, b FROM t1 outer_table where
a = (select a from t1 where b = outer_table.b ) order by a ';
# also Bug#4000 (only BDB tables)
# Bug#4106 : ndb table, query with correlated subquery, wrong result
execute stmt1 ;
# test case derived from client_test.c: test_subqueries_ref
let $1= 3 ;

View File

@ -35,9 +35,7 @@
# (It is allowed, but not required, to configure SERVER_MYPORT_1
# and SERVER_MYPORT_2 too. If these variables are not set, the
# variables MASTER_MYPORT and SLAVE_MYPORT, configured in the
# default my.cnf used by the rpl and rpl_ndb suites, are used
# instead. In addition, in the rpl_ndb suite, SERVER_MYPORT_3 is
# not needed since MASTER_MYPORT1 can be used instead.)
# default my.cnf used by the rpl suite, are used instead.)
#
# 2. Execute the following near the top of the test:
#
@ -124,18 +122,6 @@ if (!$SERVER_MYPORT_2)
{
--let SERVER_MYPORT_2= $SLAVE_MYPORT
}
# Allow $MASTER_MYPORT1 as alias for $SERVER_MYPORT_3
# (this alias is used by rpl_ndb tests)
if (!$SERVER_MYPORT_3)
{
--let SERVER_MYPORT_3= $MASTER_MYPORT1
}
# Allow $SLAVE_MYPORT1 as alias for $SERVER_MYPORT_4
# (this alias is used by rpl_ndb tests)
if (!$SERVER_MYPORT_4)
{
--let SERVER_MYPORT_4= $SLAVE_MYPORT1
}
# Check that $rpl_server_count is set
if (!$rpl_server_count)
{

View File

@ -72,10 +72,6 @@ ORDER BY id;
connection master;
--echo --- Remove a record from t1 on master ---
# Note: there is an error in replication of Delete_row
# from NDB to MyISAM (BUG#28538). However, if there is
# only one row in Delete_row event then it works fine,
# as this test demonstrates.
DELETE FROM t1 WHERE id = 412;
--echo --- Show current count on master for t1 ---

View File

@ -1,23 +0,0 @@
# to mask out the error - never abort neither log in result file - in setting
# to read-only variable.
# It is assumed that the new value is equal to one the var was set to.
# Such situation happens particularily with binlog_format that becomes read-only
# with ndb default storage.
#
# when generate results always watch the file to find what is expected,
# the SET query may fail
# script accepts $maybe_ro_var the var name and $val4var the value
### USAGE:
### let $maybe_ro_var= ...
### let $val4var= ...
### include/safe_set_to_maybe_ro_var.inc
--disable_result_log
--disable_abort_on_error
eval SET $maybe_ro_var = $val4var;
--enable_abort_on_error
--enable_result_log
eval SELECT $maybe_ro_var;

View File

@ -1,13 +0,0 @@
##################################################
# Author: Jeb
# Date: 2007/04
# Purpose: To select out log name, start and end
# positions from ndb_apply_status table
##################################################
--replace_column 1 <log_name> 2 <start_pos> 3 <end_pos>
select @log_name:=log_name, @start_pos:=start_pos, @end_pos:=end_pos
from mysql.ndb_apply_status;
--let $start_pos = `select @start_pos`
--let $end_pos = `select @end_pos`
--let $log_name = `select @log_name`

View File

@ -1,26 +0,0 @@
# ==== Purpose ====
#
# Several test primitives from mysql-test/extra/rpl_tests
# are shared for test cases for MyISAM, InnoDB, NDB and
# other engines.
# For NDB engine all events will be added by NDB injector
# so tests only can continue after injector is ready,
# this test waits for proper injector thread state.
#
# ==== Usage ====
#
# let $engine_type= NDB;
# --source include/wait_for_ndb_to_binlog.inc
#
# ==== Parameters =====
#
# $engine_type
# Type of engine. If type is NDB then it waits for injector
# thread proper state.
if (`SELECT UPPER(LEFT('$engine_type',3)) = 'NDB'`) {
let $show_statement= SHOW PROCESSLIST;
let $field= State;
let $condition= = 'Waiting for event from ndbcluster';
source include/wait_show_condition.inc;
}

View File

@ -0,0 +1,3 @@
[32bit]
[64bit]

View File

@ -0,0 +1,4 @@
#
# tests that include this file will be run for an appropriate combination.
# See word_size.combinations for the list of combinations.
#

View File

@ -260,65 +260,6 @@ if (IS_WINDOWS)
push(@mysqld_rules, {'shared-memory-base-name' => \&fix_socket});
}
sub fix_ndb_mgmd_port {
my ($self, $config, $group_name, $group)= @_;
my $hostname= $group->value('HostName');
return $self->{PORT}++;
}
sub fix_cluster_dir {
my ($self, $config, $group_name, $group)= @_;
my $vardir= $self->{ARGS}->{vardir};
my (undef, $process_type, $idx, $suffix)= split(/\./, $group_name);
return "$vardir/mysql_cluster.$suffix/$process_type.$idx";
}
sub fix_cluster_backup_dir {
my ($self, $config, $group_name, $group)= @_;
my $vardir= $self->{ARGS}->{vardir};
my (undef, $process_type, $idx, $suffix)= split(/\./, $group_name);
return "$vardir/mysql_cluster.$suffix/";
}
#
# Rules to run for each ndb_mgmd in the config
# - will be run in order listed here
#
my @ndb_mgmd_rules=
(
{ 'PortNumber' => \&fix_ndb_mgmd_port },
{ 'DataDir' => \&fix_cluster_dir },
);
#
# Rules to run for each ndbd in the config
# - will be run in order listed here
#
my @ndbd_rules=
(
{ 'HostName' => \&fix_host },
{ 'DataDir' => \&fix_cluster_dir },
{ 'BackupDataDir' => \&fix_cluster_backup_dir },
);
#
# Rules to run for each cluster_config section
# - will be run in order listed here
#
my @cluster_config_rules=
(
{ 'ndb_mgmd' => \&fix_host },
{ 'ndbd' => \&fix_host },
{ 'mysqld' => \&fix_host },
{ 'ndbapi' => \&fix_host },
);
#
# Rules to run for [client] section
# - will be run in order listed here
@ -496,49 +437,12 @@ sub post_fix_resolve_at_variables {
}
}
sub post_fix_mysql_cluster_section {
my ($self, $config)= @_;
# Add a [mysl_cluster.<suffix>] section for each
# defined [cluster_config.<suffix>] section
foreach my $group ( $config->like('cluster_config\.\w*$') )
{
my @urls;
# Generate ndb_connectstring for this cluster
foreach my $ndb_mgmd ( $config->like('cluster_config.ndb_mgmd.')) {
if ($ndb_mgmd->suffix() eq $group->suffix()) {
my $host= $ndb_mgmd->value('HostName');
my $port= $ndb_mgmd->value('PortNumber');
push(@urls, "$host:$port");
}
}
croak "Could not generate valid ndb_connectstring for '$group'"
unless @urls > 0;
my $ndb_connectstring= join(";", @urls);
# Add ndb_connectstring to [mysql_cluster.<suffix>]
$config->insert('mysql_cluster'.$group->suffix(),
'ndb_connectstring', $ndb_connectstring);
# Add ndb_connectstring to each mysqld connected to this
# cluster
foreach my $mysqld ( $config->like('cluster_config.mysqld.')) {
if ($mysqld->suffix() eq $group->suffix()) {
my $after= $mysqld->after('cluster_config.mysqld');
$config->insert("mysqld$after",
'ndb_connectstring', $ndb_connectstring);
}
}
}
}
#
# Rules to run last of all
#
my @post_rules=
(
\&post_check_client_groups,
\&post_fix_mysql_cluster_section,
\&post_fix_resolve_at_variables,
\&post_check_embedded_group,
);
@ -576,54 +480,6 @@ sub run_section_rules {
}
sub run_generate_sections_from_cluster_config {
my ($self, $config)= @_;
my @options= ('ndb_mgmd', 'ndbd',
'mysqld', 'ndbapi');
foreach my $group ( $config->like('cluster_config\.\w*$') ) {
# Keep track of current index per process type
my %idxes;
map { $idxes{$_}= 1; } @options;
foreach my $option_name ( @options ) {
my $value= $group->value($option_name);
my @hosts= split(/,/, $value, -1); # -1 => return also empty strings
# Add at least one host
push(@hosts, undef) unless scalar(@hosts);
# Assign hosts unless already fixed
@hosts= map { $self->fix_host() unless $_; } @hosts;
# Write the hosts value back
$group->insert($option_name, join(",", @hosts));
# Generate sections for each host
foreach my $host ( @hosts ){
my $idx= $idxes{$option_name}++;
my $suffix= $group->suffix();
# Generate a section for ndb_mgmd to read
$config->insert("cluster_config.$option_name.$idx$suffix",
"HostName", $host);
if ($option_name eq 'mysqld'){
my $datadir=
$self->fix_cluster_dir($config,
"cluster_config.mysqld.$idx$suffix",
$group);
$config->insert("mysqld.$idx$suffix",
'datadir', "$datadir/data");
}
}
}
}
}
sub new_config {
my ($class, $args)= @_;
@ -648,18 +504,6 @@ sub new_config {
&$rule($self, $config);
}
$self->run_section_rules($config,
'cluster_config\.\w*$',
@cluster_config_rules);
$self->run_generate_sections_from_cluster_config($config);
$self->run_section_rules($config,
'cluster_config.ndb_mgmd.',
@ndb_mgmd_rules);
$self->run_section_rules($config,
'cluster_config.ndbd',
@ndbd_rules);
$self->run_section_rules($config,
'mysqld.',
@mysqld_rules);

View File

@ -36,7 +36,6 @@ our $do_test;
our $skip_test;
our $binlog_format;
our $enable_disabled;
our $opt_with_ndbcluster_only;
sub collect_option {
my ($opt, $value)= @_;
@ -817,29 +816,6 @@ sub collect_one_test_case {
return $tinfo
}
if ( $tinfo->{'ndb_test'} )
{
# This is a NDB test
if ( $::ndbcluster_enabled == 0)
{
# ndbcluster is disabled
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "ndbcluster disabled";
return $tinfo;
}
}
else
{
# This is not a ndb test
if ( $opt_with_ndbcluster_only )
{
# Only the ndb test should be run, all other should be skipped
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "Only ndbcluster tests";
return $tinfo;
}
}
if ( $tinfo->{'rpl_test'} )
{
if ( $skip_rpl )
@ -976,10 +952,7 @@ sub collect_one_test_case {
my $tags_map= {'big_test' => ['big_test', 1],
'have_ndb' => ['ndb_test', 1],
'have_multi_ndb' => ['ndb_test', 1],
'master-slave' => ['rpl_test', 1],
'ndb_master-slave' => ['rpl_test', 1, 'ndb_test', 1],
'long_test' => ['long_test', 1],
};
my $tags_regex_string= join('|', keys %$tags_map);

View File

@ -154,7 +154,6 @@ sub collect_test_cases ($) {
#
# Append the criteria for sorting, in order of importance.
#
push(@criteria, "ndb=" . ($tinfo->{'ndb_test'} ? "1" : "0"));
# Group test with equal options together.
# Ending with "~" makes empty sort later than filled
push(@criteria, join("!", sort @{$tinfo->{'master_opt'}}) . "~");
@ -788,8 +787,6 @@ sub collect_one_test_case($$$$$$$$$) {
{
# Different default engine is used
# tag test to require that engine
$tinfo->{'ndb_test'}= 1
if ( $::used_default_engine =~ /^ndb/i );
$tinfo->{'innodb_test'}= 1
if ( $::used_default_engine =~ /^innodb/i );
@ -809,20 +806,6 @@ sub collect_one_test_case($$$$$$$$$) {
return;
}
if ( $tinfo->{'ndb_extra'} and ! $::opt_ndb_extra_test )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "Test need 'ndb_extra' option";
return;
}
if ( $tinfo->{'require_manager'} )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "Test need the _old_ manager(to be removed)";
return;
}
if ( $tinfo->{'need_debug'} && ! $::debug_compiled_binaries )
{
$tinfo->{'skip'}= 1;
@ -830,38 +813,6 @@ sub collect_one_test_case($$$$$$$$$) {
return;
}
if ( $tinfo->{'ndb_test'} )
{
# This is a NDB test
if ( ! $::glob_ndbcluster_supported )
{
# Ndb is not supported, skip it
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "No ndbcluster support";
return;
}
elsif ( $::opt_skip_ndbcluster )
{
# All ndb test's should be skipped
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "No ndbcluster tests(--skip-ndbcluster)";
return;
}
# Ndb tests run with two mysqld masters
$tinfo->{'master_num'}= 2;
}
else
{
# This is not a ndb test
if ( $::opt_with_ndbcluster_only )
{
# Only the ndb test should be run, all other should be skipped
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "Only ndbcluster tests(--with-ndbcluster-only)";
return;
}
}
if ( $tinfo->{'innodb_test'} )
{
# This is a test that need innodb
@ -916,10 +867,6 @@ our @tags=
["include/have_log_bin.inc", "need_binlog", 1],
["include/big_test.inc", "big_test", 1],
["include/have_debug.inc", "need_debug", 1],
["include/have_ndb.inc", "ndb_test", 1],
["include/have_multi_ndb.inc", "ndb_test", 1],
["include/have_ndb_extra.inc", "ndb_extra", 1],
["include/ndb_master-slave.inc", "ndb_test", 1],
["require_manager", "require_manager", 1],
["include/federated.inc", "federated_test", 1],
["include/have_federated_db.inc", "federated_test", 1],

View File

@ -31,7 +31,6 @@ sub mtr_check_stop_servers ($);
sub mtr_kill_leftovers ();
sub mtr_wait_blocking ($);
sub mtr_record_dead_children ();
sub mtr_ndbmgm_start($$);
sub mtr_mysqladmin_start($$$);
sub mtr_exit ($);
sub sleep_until_file_created ($$$);
@ -342,10 +341,8 @@ sub mtr_process_exit_status {
##############################################################################
# Kill all processes(mysqld, ndbd, ndb_mgmd and im) that would conflict with
# this run
# Kill all processes that would conflict with this run
# Make sure to remove the PID file, if any.
# kill IM manager first, else it will restart the servers
sub mtr_kill_leftovers () {
mtr_report("Killing Possible Leftover Processes");
@ -376,46 +373,6 @@ sub mtr_kill_leftovers () {
$srv->{'pid'}= 0; # Assume we are done with it
}
if ( ! $::opt_skip_ndbcluster )
{
foreach my $cluster (@{$::clusters})
{
# Don't shut down a "running" cluster
next if $cluster->{'use_running'};
mtr_debug(" - cluster " .
"(pid: $cluster->{pid}; " .
"pid file: '$cluster->{path_pid})");
my $pid= mtr_ndbmgm_start($cluster, "shutdown");
# Save the pid of the ndb_mgm process
$admin_pids{$pid}= 1;
push(@kill_pids,{
pid => $cluster->{'pid'},
pidfile => $cluster->{'path_pid'}
});
$cluster->{'pid'}= 0; # Assume we are done with it
foreach my $ndbd (@{$cluster->{'ndbds'}})
{
mtr_debug(" - ndbd " .
"(pid: $ndbd->{pid}; " .
"pid file: '$ndbd->{path_pid})");
push(@kill_pids,{
pid => $ndbd->{'pid'},
pidfile => $ndbd->{'path_pid'},
});
$ndbd->{'pid'}= 0; # Assume we are done with it
}
}
}
# Wait for all the admin processes to complete
mtr_wait_blocking(\%admin_pids);
@ -747,32 +704,6 @@ sub mtr_mysqladmin_start($$$) {
}
# Start "ndb_mgm shutdown" for a specific cluster, it will
# shutdown all data nodes and leave the ndb_mgmd running
sub mtr_ndbmgm_start($$) {
my $cluster= shift;
my $command= shift;
my $args;
mtr_init_args(\$args);
mtr_add_arg($args, "--no-defaults");
mtr_add_arg($args, "--core");
mtr_add_arg($args, "--try-reconnect=1");
mtr_add_arg($args, "--ndb_connectstring=%s", $cluster->{'connect_string'});
mtr_add_arg($args, "-e");
mtr_add_arg($args, "$command");
my $pid= mtr_spawn($::exe_ndb_mgm, $args,
"", "/dev/null", "/dev/null", "",
{});
mtr_verbose("mtr_ndbmgm_start, pid: $pid");
return $pid;
}
# Ping all servers in list, exit when none of them answers
# or when timeout has passed
sub mtr_ping_with_timeout($) {
@ -843,25 +774,6 @@ sub mark_process_dead($)
}
}
foreach my $cluster (@{$::clusters})
{
if ( $cluster->{'pid'} eq $ret_pid )
{
mtr_verbose("$cluster->{'name'} cluster ndb_mgmd exited, pid: $ret_pid");
$cluster->{'pid'}= 0;
return;
}
foreach my $ndbd (@{$cluster->{'ndbds'}})
{
if ( $ndbd->{'pid'} eq $ret_pid )
{
mtr_verbose("$cluster->{'name'} cluster ndbd exited, pid: $ret_pid");
$ndbd->{'pid'}= 0;
return;
}
}
}
mtr_warning("mark_process_dead couldn't find an entry for pid: $ret_pid");
}
@ -915,52 +827,6 @@ sub check_expected_crash_and_restart($)
}
}
foreach my $cluster (@{$::clusters})
{
if ( $cluster->{'pid'} eq $ret_pid )
{
mtr_verbose("$cluster->{'name'} cluster ndb_mgmd exited, pid: $ret_pid");
$cluster->{'pid'}= 0;
# Check if crash expected and restart if it was
my $expect_file= "$::opt_vardir/tmp/ndb_mgmd_" . "$cluster->{'type'}" .
".expect";
if ( -f $expect_file )
{
mtr_verbose("Crash was expected, file $expect_file exists");
unlink($expect_file);
ndbmgmd_start($cluster);
}
return;
}
foreach my $ndbd (@{$cluster->{'ndbds'}})
{
if ( $ndbd->{'pid'} eq $ret_pid )
{
mtr_verbose("$cluster->{'name'} cluster ndbd exited, pid: $ret_pid");
$ndbd->{'pid'}= 0;
# Check if crash expected and restart if it was
my $expect_file= "$::opt_vardir/tmp/ndbd_" . "$cluster->{'type'}" .
"$ndbd->{'idx'}" . ".expect";
if ( -f $expect_file )
{
mtr_verbose("Crash was expected, file $expect_file exists");
unlink($expect_file);
ndbd_start($cluster, $ndbd->{'idx'},
$ndbd->{'start_extra_args'});
}
return;
}
}
}
if ($::instance_manager->{'spawner_pid'} eq $ret_pid)
{
return;
}
mtr_warning("check_expected_crash_and_restart couldn't find an entry for pid: $ret_pid");
}

View File

@ -287,9 +287,6 @@ sub mtr_report_stats ($) {
/unknown variable 'loose-/ or
/You have forced lower_case_table_names to 0 through a command-line option/ or
/Setting lower_case_table_names=2/ or
/NDB Binlog:/ or
/NDB: failed to setup table/ or
/NDB: only row based binary logging/ or
/Neither --relay-log nor --relay-log-index were used/ or
/Query partially completed/ or
/Slave I.O thread aborted while waiting for relay log/ or
@ -314,7 +311,6 @@ sub mtr_report_stats ($) {
/Slave: Can't drop database.* database doesn't exist/ or
/Slave SQL:.*(?:error.* \d+|Query:.*)/ or
/Sort aborted/ or
/Time-out in NDB/ or
/One can only use the --user.*root/ or
/Table:.* on (delete|rename)/ or
/You have an error in your SQL syntax/ or

File diff suppressed because it is too large Load Diff

View File

@ -1,47 +0,0 @@
[ndbd default]
NoOfReplicas= 1
MaxNoOfConcurrentTransactions= 64
MaxNoOfConcurrentOperations= CHOOSE_MaxNoOfConcurrentOperations
DataMemory= CHOOSE_DataMemory
IndexMemory= CHOOSE_IndexMemory
Diskless= CHOOSE_Diskless
TimeBetweenWatchDogCheck= 30000
DataDir= CHOOSE_FILESYSTEM
MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes
TimeBetweenGlobalCheckpoints= 500
NoOfFragmentLogFiles= 8
FragmentLogFileSize= 6M
DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory
#
# Increase timeouts to cater for slow test-machines
# (possibly running several tests in parallell)
#
HeartbeatIntervalDbDb= 30000
HeartbeatIntervalDbApi= 30000
#TransactionDeadlockDetectionTimeout= 7500
[ndbd]
HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress
[ndb_mgmd]
HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress
DataDir= CHOOSE_FILESYSTEM #
PortNumber= CHOOSE_PORT_MGM
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]

View File

@ -1,55 +0,0 @@
[ndbd default]
NoOfReplicas= 2
MaxNoOfConcurrentTransactions= 64
MaxNoOfConcurrentOperations= CHOOSE_MaxNoOfConcurrentOperations
DataMemory= CHOOSE_DataMemory
IndexMemory= CHOOSE_IndexMemory
Diskless= CHOOSE_Diskless
TimeBetweenWatchDogCheck= 30000
DataDir= CHOOSE_FILESYSTEM
MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes
TimeBetweenGlobalCheckpoints= 500
NoOfFragmentLogFiles= 4
FragmentLogFileSize=12M
DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory
# O_DIRECT has issues on 2.4 whach have not been handled, Bug #29612
#ODirect= 1
# the following parametes just function as a small regression
# test that the parameter exists
InitialNoOfOpenFiles= 27
#
# Increase timeouts to cater for slow test-machines
# (possibly running several tests in parallell)
#
HeartbeatIntervalDbDb= 30000
HeartbeatIntervalDbApi= 30000
#TransactionDeadlockDetectionTimeout= 7500
[ndbd]
HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress
[ndbd]
HostName= CHOOSE_HOSTNAME_2 # hostname is a valid network adress
[ndb_mgmd]
HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress
DataDir= CHOOSE_FILESYSTEM #
PortNumber= CHOOSE_PORT_MGM
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]
[mysqld]

View File

@ -135,6 +135,9 @@ my $opt_start;
my $opt_start_dirty;
my $opt_start_exit;
my $start_only;
my $file_wsrep_provider;
our @global_suppressions;
END {
if ( defined $opt_tmpdir_pid and $opt_tmpdir_pid == $$ )
@ -187,6 +190,7 @@ my @DEFAULT_SUITES= qw(
sys_vars-
unit-
vcol-
wsrep-
);
my $opt_suites;
@ -337,16 +341,6 @@ sub check_timeout ($) { return testcase_timeout($_[0]) / 10; }
our $opt_warnings= 1;
our $ndbcluster_enabled= 0;
my $opt_include_ndbcluster= 0;
my $opt_skip_ndbcluster= 0;
my $exe_ndbd;
my $exe_ndbmtd;
my $exe_ndb_mgmd;
my $exe_ndb_waiter;
my $exe_ndb_mgm;
our %mysqld_variables;
our @optional_plugins;
@ -409,7 +403,6 @@ sub main {
# Run the mysqld to find out what features are available
collect_mysqld_features();
}
check_ndbcluster_support();
check_ssl_support();
check_debug_support();
@ -601,12 +594,6 @@ sub run_test_server ($$$) {
my $test_failure= 0; # Set true if test suite failed
my $extra_warnings= []; # Warnings found during server shutdowns
# Scheduler variables
my $max_ndb= $ENV{MTR_MAX_NDB} || $childs / 2;
$max_ndb = $childs if $max_ndb > $childs;
$max_ndb = 1 if $max_ndb < 1;
my $num_ndb_tests= 0;
my $completed= [];
my %running;
my $result;
@ -780,9 +767,6 @@ sub run_test_server ($$$) {
mtr_error("'", $result->{name},"' is not known to be running")
unless delete $running{$result->key()};
# Update scheduler variables
$num_ndb_tests-- if ($result->{ndb_test});
# Save result in completed list
push(@$completed, $result);
@ -815,7 +799,6 @@ sub run_test_server ($$$) {
# Find next test to schedule
# - Try to use same configuration as worker used last time
# - Limit number of parallel ndb tests
my $next;
my $second_best;
@ -835,12 +818,6 @@ sub run_test_server ($$$) {
redo;
}
# Limit number of parallell NDB tests
if ($t->{ndb_test} and $num_ndb_tests >= $max_ndb){
#mtr_report("Skipping, num ndb is already at max, $num_ndb_tests");
next;
}
# From secondary choices, we prefer to pick a 'long-running' test if
# possible; this helps avoid getting stuck with a few of those at the
# end of high --parallel runs, with most workers being idle.
@ -893,7 +870,6 @@ sub run_test_server ($$$) {
delete $next->{criteria};
$next->write_test($sock, 'TESTCASE');
$running{$next->key()}= $next;
$num_ndb_tests++ if ($next->{ndb_test});
}
else {
# No more test, tell child to exit
@ -1130,9 +1106,6 @@ sub command_line_setup {
# Control what test suites or cases to run
'force+' => \$opt_force,
'with-ndbcluster-only' => \&collect_option,
'ndb|include-ndbcluster' => \$opt_include_ndbcluster,
'skip-ndbcluster|skip-ndb' => \$opt_skip_ndbcluster,
'suite|suites=s' => \$opt_suites,
'skip-rpl' => \&collect_option,
'skip-test=s' => \&collect_option,
@ -2007,51 +1980,6 @@ sub executable_setup () {
$exe_mysql_embedded= mtr_exe_maybe_exists("$basedir/libmysqld/examples/mysql_embedded");
if ( $ndbcluster_enabled )
{
# Look for single threaded NDB
$exe_ndbd=
my_find_bin($bindir,
["storage/ndb/src/kernel", "libexec", "sbin", "bin"],
"ndbd");
# Look for multi threaded NDB
$exe_ndbmtd=
my_find_bin($bindir,
["storage/ndb/src/kernel", "libexec", "sbin", "bin"],
"ndbmtd", NOT_REQUIRED);
if ($exe_ndbmtd)
{
my $mtr_ndbmtd = $ENV{MTR_NDBMTD};
if ($mtr_ndbmtd)
{
mtr_report(" - multi threaded ndbd found, will be used always");
$exe_ndbd = $exe_ndbmtd;
}
else
{
mtr_report(" - multi threaded ndbd found, will be ".
"used \"round robin\"");
}
}
$exe_ndb_mgmd=
my_find_bin($bindir,
["storage/ndb/src/mgmsrv", "libexec", "sbin", "bin"],
"ndb_mgmd");
$exe_ndb_mgm=
my_find_bin($bindir,
["storage/ndb/src/mgmclient", "bin"],
"ndb_mgm");
$exe_ndb_waiter=
my_find_bin($bindir,
["storage/ndb/tools/", "bin"],
"ndb_waiter");
}
# Look for mysqltest executable
if ( $opt_embedded_server )
{
@ -2276,14 +2204,6 @@ sub environment_setup {
}
}
# --------------------------------------------------------------------------
# Add the path where libndbclient can be found
# --------------------------------------------------------------------------
if ( $ndbcluster_enabled )
{
push(@ld_library_paths, "$basedir/storage/ndb/src/.libs");
}
# --------------------------------------------------------------------------
# Valgrind need to be run with debug libraries otherwise it's almost
# impossible to add correct supressions, that means if "/usr/lib/debug"
@ -2369,34 +2289,6 @@ sub environment_setup {
#
$ENV{HAVE_BROKEN_DNS}= defined(gethostbyname('invalid_hostname'));
# ----------------------------------------------------
# Setup env for NDB
# ----------------------------------------------------
if ( $ndbcluster_enabled )
{
$ENV{'NDB_MGM'}=
my_find_bin($bindir,
["storage/ndb/src/mgmclient", "bin"],
"ndb_mgm");
$ENV{'NDB_TOOLS_DIR'}=
my_find_dir($bindir,
["storage/ndb/tools", "bin"]);
$ENV{'NDB_EXAMPLES_DIR'}=
my_find_dir($basedir,
["storage/ndb/ndbapi-examples", "bin"]);
$ENV{'NDB_EXAMPLES_BINARY'}=
my_find_bin($bindir,
["storage/ndb/ndbapi-examples/ndbapi_simple", "bin"],
"ndbapi_simple", NOT_REQUIRED);
my $path_ndb_testrun_log= "$opt_vardir/log/ndb_testrun.log";
$ENV{'NDB_TOOLS_OUTPUT'}= $path_ndb_testrun_log;
$ENV{'NDB_EXAMPLES_OUTPUT'}= $path_ndb_testrun_log;
}
# ----------------------------------------------------
# mysql clients
# ----------------------------------------------------
@ -2864,317 +2756,6 @@ sub vs_config_dirs ($$) {
"$basedir/$path_part/debug/$exe");
}
sub check_ndbcluster_support {
my $ndbcluster_supported = 0;
if ($mysqld_variables{'ndb-connectstring'})
{
$ndbcluster_supported = 1;
}
if ($opt_skip_ndbcluster && $opt_include_ndbcluster)
{
# User is ambivalent. Theoretically the arg which was
# given last on command line should win, but that order is
# unknown at this time.
mtr_error("Ambigous command, both --include-ndbcluster " .
" and --skip-ndbcluster was specified");
}
# Check if this is MySQL Cluster, ie. mysql version string ends
# with -ndb-Y.Y.Y[-status]
if ( defined $mysql_version_extra &&
$mysql_version_extra =~ /-ndb-([0-9]*)\.([0-9]*)\.([0-9]*)/ )
{
# MySQL Cluster tree
mtr_report(" - MySQL Cluster detected");
if ($opt_skip_ndbcluster)
{
mtr_report(" - skipping ndbcluster(--skip-ndbcluster)");
return;
}
if (!$ndbcluster_supported)
{
# MySQL Cluster tree, but mysqld was not compiled with
# ndbcluster -> fail unless --skip-ndbcluster was used
mtr_error("This is MySQL Cluster but mysqld does not " .
"support ndbcluster. Use --skip-ndbcluster to " .
"force mtr to run without it.");
}
# mysqld was compiled with ndbcluster -> auto enable
}
else
{
# Not a MySQL Cluster tree
if (!$ndbcluster_supported)
{
if ($opt_include_ndbcluster)
{
mtr_error("Could not detect ndbcluster support ".
"requested with --include-ndbcluster");
}
# Silently skip, mysqld was compiled without ndbcluster
# which is the default case
return;
}
if ($opt_skip_ndbcluster)
{
# Compiled with ndbcluster but ndbcluster skipped
mtr_report(" - skipping ndbcluster(--skip-ndbcluster)");
return;
}
# Not a MySQL Cluster tree, enable ndbcluster
# if --include-ndbcluster was used
if ($opt_include_ndbcluster)
{
# enable ndbcluster
}
else
{
mtr_report(" - skipping ndbcluster(disabled by default)");
return;
}
}
mtr_report(" - enabling ndbcluster");
$ndbcluster_enabled= 1;
# Add MySQL Cluster test suites
push @DEFAULT_SUITES, qw(ndb ndb_binlog rpl_ndb ndb_rpl ndb_memcache);
return;
}
sub ndbcluster_wait_started {
my $cluster= shift;
my $ndb_waiter_extra_opt= shift;
my $path_waitlog= join('/', $opt_vardir, $cluster->name(), "ndb_waiter.log");
my $args;
mtr_init_args(\$args);
mtr_add_arg($args, "--defaults-file=%s", $path_config_file);
mtr_add_arg($args, "--defaults-group-suffix=%s", $cluster->suffix());
mtr_add_arg($args, "--timeout=%d", $opt_start_timeout);
if ($ndb_waiter_extra_opt)
{
mtr_add_arg($args, "$ndb_waiter_extra_opt");
}
# Start the ndb_waiter which will connect to the ndb_mgmd
# and poll it for state of the ndbd's, will return when
# all nodes in the cluster is started
my $res= My::SafeProcess->run
(
name => "ndb_waiter ".$cluster->name(),
path => $exe_ndb_waiter,
args => \$args,
output => $path_waitlog,
error => $path_waitlog,
append => 1,
);
# Check that ndb_mgmd(s) are still alive
foreach my $ndb_mgmd ( in_cluster($cluster, ndb_mgmds()) )
{
my $proc= $ndb_mgmd->{proc};
if ( ! $proc->wait_one(0) )
{
mtr_warning("$proc died");
return 2;
}
}
# Check that all started ndbd(s) are still alive
foreach my $ndbd ( in_cluster($cluster, ndbds()) )
{
my $proc= $ndbd->{proc};
next unless defined $proc;
if ( ! $proc->wait_one(0) )
{
mtr_warning("$proc died");
return 3;
}
}
if ($res)
{
mtr_verbose("ndbcluster_wait_started failed");
return 1;
}
return 0;
}
sub ndb_mgmd_wait_started($) {
my ($cluster)= @_;
my $retries= 100;
while ($retries)
{
my $result= ndbcluster_wait_started($cluster, "--no-contact");
if ($result == 0)
{
# ndb_mgmd is started
mtr_verbose("ndb_mgmd is started");
return 0;
}
elsif ($result > 1)
{
mtr_warning("Cluster process failed while waiting for start");
return $result;
}
mtr_milli_sleep(100);
$retries--;
}
return 1;
}
sub ndb_mgmd_stop{
my $ndb_mgmd= shift or die "usage: ndb_mgmd_stop(<ndb_mgmd>)";
my $host=$ndb_mgmd->value('HostName');
my $port=$ndb_mgmd->value('PortNumber');
mtr_verbose("Stopping cluster '$host:$port'");
my $args;
mtr_init_args(\$args);
mtr_add_arg($args, "--ndb-connectstring=%s:%s", $host,$port);
mtr_add_arg($args, "-e");
mtr_add_arg($args, "shutdown");
My::SafeProcess->run
(
name => "ndb_mgm shutdown $host:$port",
path => $exe_ndb_mgm,
args => \$args,
output => "/dev/null",
);
}
sub ndb_mgmd_start ($$) {
my ($cluster, $ndb_mgmd)= @_;
mtr_verbose("ndb_mgmd_start");
my $dir= $ndb_mgmd->value("DataDir");
mkpath($dir) unless -d $dir;
my $args;
mtr_init_args(\$args);
mtr_add_arg($args, "--defaults-file=%s", $path_config_file);
mtr_add_arg($args, "--defaults-group-suffix=%s", $cluster->suffix());
mtr_add_arg($args, "--mycnf");
mtr_add_arg($args, "--nodaemon");
my $path_ndb_mgmd_log= "$dir/ndb_mgmd.log";
$ndb_mgmd->{'proc'}= My::SafeProcess->new
(
name => $ndb_mgmd->after('cluster_config.'),
path => $exe_ndb_mgmd,
args => \$args,
output => $path_ndb_mgmd_log,
error => $path_ndb_mgmd_log,
append => 1,
verbose => $opt_verbose,
shutdown => sub { ndb_mgmd_stop($ndb_mgmd) },
);
mtr_verbose("Started $ndb_mgmd->{proc}");
# FIXME Should not be needed
# Unfortunately the cluster nodes will fail to start
# if ndb_mgmd has not started properly
if (ndb_mgmd_wait_started($cluster))
{
mtr_warning("Failed to wait for start of ndb_mgmd");
return 1;
}
return 0;
}
sub ndbd_stop {
# Intentionally left empty, ndbd nodes will be shutdown
# by sending "shutdown" to ndb_mgmd
}
our $exe_ndbmtd_counter= 0;
sub ndbd_start {
my ($cluster, $ndbd)= @_;
mtr_verbose("ndbd_start");
my $dir= $ndbd->value("DataDir");
mkpath($dir) unless -d $dir;
my $args;
mtr_init_args(\$args);
mtr_add_arg($args, "--defaults-file=%s", $path_config_file);
mtr_add_arg($args, "--defaults-group-suffix=%s", $cluster->suffix());
mtr_add_arg($args, "--nodaemon");
# > 5.0 { 'character-sets-dir' => \&fix_charset_dir },
my $exe= $exe_ndbd;
if ($exe_ndbmtd and ($exe_ndbmtd_counter++ % 2) == 0)
{
# Use ndbmtd every other time
$exe= $exe_ndbmtd;
}
my $path_ndbd_log= "$dir/ndbd.log";
my $proc= My::SafeProcess->new
(
name => $ndbd->after('cluster_config.'),
path => $exe,
args => \$args,
output => $path_ndbd_log,
error => $path_ndbd_log,
append => 1,
verbose => $opt_verbose,
shutdown => sub { ndbd_stop($ndbd) },
);
mtr_verbose("Started $proc");
$ndbd->{proc}= $proc;
return;
}
sub ndbcluster_start ($) {
my ($cluster) = @_;
mtr_verbose("ndbcluster_start '".$cluster->name()."'");
foreach my $ndb_mgmd ( in_cluster($cluster, ndb_mgmds()) )
{
next if started($ndb_mgmd);
ndb_mgmd_start($cluster, $ndb_mgmd);
}
foreach my $ndbd ( in_cluster($cluster, ndbds()) )
{
next if started($ndbd);
ndbd_start($cluster, $ndbd);
}
return 0;
}
sub mysql_server_start($) {
my ($mysqld, $tinfo) = @_;
@ -4109,8 +3690,6 @@ sub config_files($) {
sub _like { return $config ? $config->like($_[0]) : (); }
sub mysqlds { return _like('mysqld\.'); }
sub ndbds { return _like('cluster_config\.ndbd\.');}
sub ndb_mgmds { return _like('cluster_config\.ndb_mgmd\.'); }
sub fix_servers($) {
my ($tinfo) = @_;
@ -4121,19 +3700,6 @@ sub fix_servers($) {
START => \&mysql_server_start,
WAIT => \&mysql_server_wait,
},
qr/mysql_cluster\./ => {
SORT => 200,
START => \&ndbcluster_start,
WAIT => \&ndbcluster_wait_started,
},
qr/cluster_config\.ndb_mgmd\./ => {
SORT => 210,
START => undef,
},
qr/cluster_config\.ndbd\./ => {
SORT => 220,
START => undef,
},
$tinfo->{suite}->servers()
);
for ($config->groups()) {
@ -4783,8 +4349,8 @@ sub extract_warning_lines ($$) {
# Perl code.
my @antipatterns =
(
@global_suppressions,
qr/error .*connecting to master/,
qr/Plugin 'ndbcluster' will be forced to shutdown/,
qr/InnoDB: Error: in ALTER TABLE `test`.`t[12]`/,
qr/InnoDB: Error: table `test`.`t[12]` .*does not exist in the InnoDB internal/,
qr/InnoDB: Warning: Setting innodb_use_sys_malloc/,
@ -4798,7 +4364,6 @@ sub extract_warning_lines ($$) {
qr/Now setting lower_case_table_names to [02]/,
qr/Setting lower_case_table_names=2/,
qr/You have forced lower_case_table_names to 0/,
qr/Plugin 'ndbcluster' will be forced to shutdow/,
qr/deprecated/,
qr/Slave SQL thread retried transaction/,
qr/Slave \(additional info\)/,
@ -5668,18 +5233,6 @@ sub servers_need_restart($) {
############################################
#
# Filter a list of servers and return only those that are part
# of the specified cluster
#
sub in_cluster {
my ($cluster)= shift;
# Return only processes for a specific cluster
return grep { $_->suffix() eq $cluster->suffix() } @_;
}
#
# Filter a list of servers and return the SafeProcess
# for only those that are started or stopped
@ -6393,9 +5946,6 @@ Options to control what test suites or cases to run
the execution will continue from the next test file.
When specified twice, execution will continue executing
the failed test file from the next command.
with-ndbcluster-only Run only tests that include "ndb" in the filename
skip-ndb[cluster] Skip all tests that need cluster. Default.
include-ndb[cluster] Enable all tests that need cluster
do-test=PREFIX or REGEX
Run test cases which name are prefixed with PREFIX
or fulfills REGEX

View File

@ -259,3 +259,20 @@ analyze delete from t1 returning *;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 0 0 100.00 100.00
drop table t1;
#
# MDEV-6396: ANALYZE INSERT/REPLACE is accepted, but does not produce a plan
#
create table t1 (a int primary key, b int);
analyze insert into t1 values (1,1);
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 INSERT t1 ALL NULL NULL NULL NULL NULL NULL 100.00 100.00 NULL
select * from t1;
a b
1 1
analyze replace t1 values (1,2);
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 INSERT t1 ALL NULL NULL NULL NULL NULL NULL 100.00 100.00 NULL
select * from t1;
a b
1 2
drop table t1;

View File

@ -0,0 +1,174 @@
CREATE TABLE t1 (a INT PRIMARY KEY)|
BEGIN NOT ATOMIC
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (2);
INSERT INTO t1 VALUES (3);
END|
SELECT * FROM t1|
a
1
2
3
PREPARE stmt FROM "BEGIN NOT ATOMIC
INSERT INTO t1 VALUES (4);
INSERT INTO t1 VALUES (5);
INSERT INTO t1 VALUES (?);
END";
SET @val = 6|
reset master|
EXECUTE stmt USING @val|
SELECT * FROM t1|
a
1
2
3
4
5
6
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (4)
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (5)
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (6)
master-bin.000001 # Query # # COMMIT
PREPARE stmt FROM "BEGIN NOT ATOMIC
DECLARE v_res INT;
SELECT COUNT(*) INTO v_res FROM t1;
SELECT 'Hello World', v_res INTO ?,?;
END"|
SET @val="", @val2=""|
EXECUTE stmt USING @val, @val2|
SELECT @val, @val2|
@val @val2
Hello World 6
DROP TABLE t1|
CREATE DATABASE mysqltest1|
CREATE PROCEDURE mysqltest1.sp1()
BEGIN
PREPARE stmt FROM "BEGIN NOT ATOMIC CREATE TABLE t1 AS SELECT DATABASE(); END";
EXECUTE stmt;
END|
CALL mysqltest1.sp1()|
SELECT * FROM mysqltest1.t1|
DATABASE()
mysqltest1
USE mysqltest1|
DROP DATABASE mysqltest1|
BEGIN NOT ATOMIC CREATE TABLE t1(a int); END|
ERROR 3D000: No database selected
BEGIN NOT ATOMIC SET @a=1; CREATE TABLE test.t1(a int); END|
USE test|
show tables|
Tables_in_test
t1
drop table t1|
/**/ if (select count(*) from information_schema.tables
where table_schema='test' and table_name='t1') = 0
then
create table t1 (a int);
end if|
show tables|
Tables_in_test
t1
/**/ if (select count(*) from information_schema.tables
where table_schema='test' and table_name='t1') = 0
then
create table t1 (a int);
end if|
show tables|
Tables_in_test
t1
case (select table_name from information_schema.tables where table_schema='test')
when 't1' then create table t2 (b int);
when 't2' then create table t3 (b int);
else signal sqlstate '42S02';
end case|
show tables|
Tables_in_test
t1
t2
case
when database() = 'test' then create table t3 (test text);
when now() < date'2001-02-03' then create table oops (machine time);
end case|
show tables|
Tables_in_test
t1
t2
t3
loop
create table t4 (a int);
end loop|
ERROR 42S01: Table 't4' already exists
show tables|
Tables_in_test
t1
t2
t3
t4
set @a=0;
repeat
set @a = @a + 1;
until @a > 5
end repeat|
select @a|
@a
6
/**/ while (select count(*) from information_schema.tables where table_schema='test')
do
select concat('drop table ', table_name) into @a
from information_schema.tables where table_schema='test' limit 1;
select @a as 'executing:';
prepare dt from @a;
execute dt;
end while|
executing: drop table t1
executing: drop table t2
executing: drop table t3
executing: drop table t4
create table t1 (x int)|
create function fn(a int) returns int
begin
insert t1 values (a+7);
return a+8;
end|
reset master|
/**/ if fn(9) > 5 then
select 1;
end if|
1
1
prepare stmt from "if fn(?) > 6 then
begin
declare a int;
set a=?*2;
insert t1 values(a+?);
end;
end if"|
set @a=1, @b=2, @c=3|
execute stmt using @a, @b, @c|
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; SELECT `test`.`fn`(9)
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; SELECT `test`.`fn`(1)
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Query # # use `test`; insert t1 values( NAME_CONST('a',4)+3)
master-bin.000001 # Query # # COMMIT
drop function fn|
drop table t1|
begin not atomic select @@sql_mode; end|
@@sql_mode
create table t1 (a int)|
select a from t1 having a > 1|
a
begin not atomic select a from t1 having a > 1; end|
a
drop table t1|

View File

@ -351,7 +351,7 @@ a
DROP TABLE t1;
set global LC_MESSAGES=convert((@@global.log_bin_trust_function_creators)
using cp1250);
ERROR HY000: Unknown locale: '1'
ERROR HY000: Unknown locale: 'ON'
#
# Start of 5.6 tests
#

View File

@ -0,0 +1,24 @@
#
# Test for mysqldump's galera-sst-mode option
#
#
# MDEV-6490: mysqldump unknown option --galera-sst-mode
#
CREATE DATABASE bug6490;
USE bug6490;
CREATE TABLE t1(c1 INT);
INSERT INTO t1 values (1);
INSERT INTO t1 values (2);
# Save the current gtid_binlog_state.
# Take a dump of bug6490 database
DROP TABLE t1;
# Load the dump
RESET MASTER;
SELECT * from t1;
c1
1
2
# Compare the two gtid_binlog_state's
# Cleanup
DROP DATABASE bug6490;
# End of test

View File

@ -58,6 +58,7 @@ authentication_string
password_expired N
is_role N
default_role
max_statement_time 0.000000
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' REQUIRE CIPHER 'EDH-RSA-DES-CBC3-SHA'
@ -85,7 +86,7 @@ delete from mysql.user where user='mysqltest_1';
flush privileges;
delete from mysql.user where user='mysqltest_1';
flush privileges;
grant usage on *.* to mysqltest_1@localhost with max_queries_per_hour 10;
grant usage on *.* to mysqltest_1@localhost with max_queries_per_hour 10 max_statement_time 60;
select * from mysql.user where user="mysqltest_1";
Host localhost
User mysqltest_1
@ -132,10 +133,11 @@ authentication_string
password_expired N
is_role N
default_role
max_statement_time 60.000000
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' WITH MAX_QUERIES_PER_HOUR 10
grant usage on *.* to mysqltest_1@localhost with max_updates_per_hour 20 max_connections_per_hour 30;
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' WITH MAX_QUERIES_PER_HOUR 10 MAX_STATEMENT_TIME 60.000000
grant usage on *.* to mysqltest_1@localhost with max_updates_per_hour 20 max_connections_per_hour 30 max_statement_time 0;
select * from mysql.user where user="mysqltest_1";
Host localhost
User mysqltest_1
@ -182,6 +184,7 @@ authentication_string
password_expired N
is_role N
default_role
max_statement_time 0.000000
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' WITH MAX_QUERIES_PER_HOUR 10 MAX_UPDATES_PER_HOUR 20 MAX_CONNECTIONS_PER_HOUR 30

View File

@ -5,7 +5,7 @@ plugin_version 1.0
plugin_status ACTIVE
plugin_type DAEMON
plugin_library handlersocket.so
plugin_library_version 1.8
plugin_library_version 1.10
plugin_author higuchi dot akira at dena dot jp
plugin_description Direct access into InnoDB
plugin_license BSD

View File

@ -1,2 +0,0 @@
1
1

View File

@ -1,3 +0,0 @@
select 1;
1
1

View File

@ -1,2 +0,0 @@
have_ndb_example
1

View File

@ -74,7 +74,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 17 Using sort_union(i1,i2); Using where
explain select * from t0 where key2 = 45 or key1 <=> null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t0 range i1,i2 i2 4 NULL 1 Using where
1 SIMPLE t0 range i1,i2 i2 4 NULL 1 Using index condition
explain select * from t0 where key2 = 45 or key1 is not null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t0 ALL i1,i2 NULL NULL NULL 1024 Using where

View File

@ -48,6 +48,7 @@ SCHEMA_PRIVILEGES TABLE_SCHEMA
SESSION_STATUS VARIABLE_NAME
SESSION_VARIABLES VARIABLE_NAME
STATISTICS TABLE_SCHEMA
SYSTEM_VARIABLES VARIABLE_NAME
TABLES TABLE_SCHEMA
TABLESPACES TABLESPACE_NAME
TABLE_CONSTRAINTS CONSTRAINT_SCHEMA
@ -102,6 +103,7 @@ SCHEMA_PRIVILEGES TABLE_SCHEMA
SESSION_STATUS VARIABLE_NAME
SESSION_VARIABLES VARIABLE_NAME
STATISTICS TABLE_SCHEMA
SYSTEM_VARIABLES VARIABLE_NAME
TABLES TABLE_SCHEMA
TABLESPACES TABLESPACE_NAME
TABLE_CONSTRAINTS CONSTRAINT_SCHEMA

View File

@ -42,7 +42,7 @@ insert into t5 values (10);
create view v1 (c) as
SELECT table_name FROM information_schema.TABLES
WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND
table_name not like 'ndb_%' AND table_name not like 'innodb_%' AND
table_name not like 'innodb_%' AND
table_name not like 'xtradb_%';
select * from v1;
c
@ -75,6 +75,7 @@ SCHEMA_PRIVILEGES
SESSION_STATUS
SESSION_VARIABLES
STATISTICS
SYSTEM_VARIABLES
TABLES
TABLESPACES
TABLE_CONSTRAINTS
@ -128,6 +129,11 @@ TABLE_CONSTRAINTS TABLE_CONSTRAINTS
TABLE_PRIVILEGES TABLE_PRIVILEGES
TABLE_STATISTICS TABLE_STATISTICS
TRIGGERS TRIGGERS
t1 t1
t2 t2
t3 t3
t4 t4
t5 t5
table_stats table_stats
tables_priv tables_priv
time_zone time_zone
@ -135,11 +141,6 @@ time_zone_leap_second time_zone_leap_second
time_zone_name time_zone_name
time_zone_transition time_zone_transition
time_zone_transition_type time_zone_transition_type
t1 t1
t4 t4
t2 t2
t3 t3
t5 t5
select c,table_name from v1
left join information_schema.TABLES v2 on (v1.c=v2.table_name)
where v1.c like "t%";
@ -150,6 +151,11 @@ TABLE_CONSTRAINTS TABLE_CONSTRAINTS
TABLE_PRIVILEGES TABLE_PRIVILEGES
TABLE_STATISTICS TABLE_STATISTICS
TRIGGERS TRIGGERS
t1 t1
t2 t2
t3 t3
t4 t4
t5 t5
table_stats table_stats
tables_priv tables_priv
time_zone time_zone
@ -157,11 +163,6 @@ time_zone_leap_second time_zone_leap_second
time_zone_name time_zone_name
time_zone_transition time_zone_transition
time_zone_transition_type time_zone_transition_type
t1 t1
t4 t4
t2 t2
t3 t3
t5 t5
select c, v2.table_name from v1
right join information_schema.TABLES v2 on (v1.c=v2.table_name)
where v1.c like "t%";
@ -172,6 +173,11 @@ TABLE_CONSTRAINTS TABLE_CONSTRAINTS
TABLE_PRIVILEGES TABLE_PRIVILEGES
TABLE_STATISTICS TABLE_STATISTICS
TRIGGERS TRIGGERS
t1 t1
t2 t2
t3 t3
t4 t4
t5 t5
table_stats table_stats
tables_priv tables_priv
time_zone time_zone
@ -179,11 +185,6 @@ time_zone_leap_second time_zone_leap_second
time_zone_name time_zone_name
time_zone_transition time_zone_transition
time_zone_transition_type time_zone_transition_type
t1 t1
t4 t4
t2 t2
t3 t3
t5 t5
select table_name from information_schema.TABLES
where table_schema = "mysqltest" and table_name like "t%";
table_name
@ -819,6 +820,7 @@ information_schema PROCESSLIST INFO
information_schema ROUTINES DTD_IDENTIFIER
information_schema ROUTINES ROUTINE_DEFINITION
information_schema ROUTINES ROUTINE_COMMENT
information_schema SYSTEM_VARIABLES ENUM_VALUE_LIST
information_schema TRIGGERS ACTION_CONDITION
information_schema TRIGGERS ACTION_STATEMENT
information_schema VIEWS VIEW_DEFINITION
@ -1637,6 +1639,33 @@ drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists
Warnings:
Warning 1265 Data truncated for column 'VARIABLE_VALUE' at row 1
select * from information_schema.global_variables where variable_name like 'init%' order by variable_name;
VARIABLE_NAME VARIABLE_VALUE
INIT_CONNECT drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists t1;
drop table if exists t1;drop table if exists
INIT_FILE
INIT_SLAVE
Warnings:
Warning 1265 Data truncated for column 'VARIABLE_VALUE' at row #
set global init_connect="";
create table t0 select * from information_schema.global_status where VARIABLE_NAME='COM_SELECT';
SELECT 1;

View File

@ -46,6 +46,7 @@ SCHEMA_PRIVILEGES
SESSION_STATUS
SESSION_VARIABLES
STATISTICS
SYSTEM_VARIABLES
TABLES
TABLESPACES
TABLE_CONSTRAINTS
@ -115,6 +116,7 @@ SCHEMA_PRIVILEGES TABLE_SCHEMA
SESSION_STATUS VARIABLE_NAME
SESSION_VARIABLES VARIABLE_NAME
STATISTICS TABLE_SCHEMA
SYSTEM_VARIABLES VARIABLE_NAME
TABLES TABLE_SCHEMA
TABLESPACES TABLESPACE_NAME
TABLE_CONSTRAINTS CONSTRAINT_SCHEMA
@ -184,6 +186,7 @@ SCHEMA_PRIVILEGES TABLE_SCHEMA
SESSION_STATUS VARIABLE_NAME
SESSION_VARIABLES VARIABLE_NAME
STATISTICS TABLE_SCHEMA
SYSTEM_VARIABLES VARIABLE_NAME
TABLES TABLE_SCHEMA
TABLESPACES TABLESPACE_NAME
TABLE_CONSTRAINTS CONSTRAINT_SCHEMA
@ -258,6 +261,7 @@ SCHEMA_PRIVILEGES information_schema.SCHEMA_PRIVILEGES 1
SESSION_STATUS information_schema.SESSION_STATUS 1
SESSION_VARIABLES information_schema.SESSION_VARIABLES 1
STATISTICS information_schema.STATISTICS 1
SYSTEM_VARIABLES information_schema.SYSTEM_VARIABLES 1
TABLES information_schema.TABLES 1
TABLESPACES information_schema.TABLESPACES 1
TABLE_CONSTRAINTS information_schema.TABLE_CONSTRAINTS 1
@ -317,6 +321,7 @@ Database: information_schema
| SESSION_STATUS |
| SESSION_VARIABLES |
| STATISTICS |
| SYSTEM_VARIABLES |
| TABLES |
| TABLESPACES |
| TABLE_CONSTRAINTS |
@ -376,6 +381,7 @@ Database: INFORMATION_SCHEMA
| SESSION_STATUS |
| SESSION_VARIABLES |
| STATISTICS |
| SYSTEM_VARIABLES |
| TABLES |
| TABLESPACES |
| TABLE_CONSTRAINTS |
@ -391,7 +397,7 @@ Database: INFORMATION_SCHEMA
Wildcard: inf_rmation_schema
| Databases |
| information_schema |
SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name<>'ndb_binlog_index' AND table_name<>'ndb_apply_status' GROUP BY TABLE_SCHEMA;
SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') GROUP BY TABLE_SCHEMA;
table_schema count(*)
information_schema 54
information_schema 55
mysql 30

View File

@ -1002,7 +1002,7 @@ insert into t2 (b) values (null), (null), (null);
set optimizer_switch='extended_keys=on';
explain select a from t1 where b is null order by a desc limit 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref b b 9 const 2 Using where; Using filesort
1 SIMPLE t1 index b PRIMARY 8 NULL 3 Using where
select a from t1 where b is null order by a desc limit 2;
a
3

View File

@ -2090,10 +2090,10 @@ SELECT t1.b, t2.c, t2.d FROM t2 LEFT JOIN t1 ON t2.c = t1.a
WHERE t1.pk BETWEEN 5 AND 6 AND t1.b IS NULL OR t1.b = 5
ORDER BY t1.b;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ref PRIMARY,idx idx 4 const 2 100.00 Using where; Using filesort
1 SIMPLE t1 ref PRIMARY,idx idx 4 const 2 100.00 Using where
1 SIMPLE t2 ref c c 5 test.t1.a 2 100.00
Warnings:
Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` join `test`.`t1` where ((`test`.`t2`.`c` = `test`.`t1`.`a`) and (((`test`.`t1`.`pk` between 5 and 6) and isnull(`test`.`t1`.`b`)) or (`test`.`t1`.`b` = 5))) order by `test`.`t1`.`b`
Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` join `test`.`t1` where ((`test`.`t2`.`c` = `test`.`t1`.`a`) and (`test`.`t1`.`b` = 5)) order by `test`.`t1`.`b`
SELECT t1.b, t2.c, t2.d FROM t2 LEFT JOIN t1 ON t2.c = t1.a
WHERE t1.pk BETWEEN 5 AND 6 AND t1.b IS NULL OR t1.b = 5
ORDER BY t1.b;
@ -2222,4 +2222,32 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
Warnings:
Note 1003 select `test`.`t1`.`i1` AS `i1`,`test`.`t2`.`i2` AS `i2`,`test`.`t3`.`i3` AS `i3`,`test`.`t3`.`d3` AS `d3` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(((`test`.`t2`.`i2` = `test`.`t1`.`i1`) and (`test`.`t3`.`i3` = `test`.`t1`.`i1`))) where ((`test`.`t3`.`d3` = 0) or isnull(`test`.`t3`.`d3`))
DROP TABLE t1,t2,t3;
#
# MDEV-6634: Wrong estimates for ref(const) and key IS NULL predicate
#
create table t1(a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t2 (a int, b int, c int, key(b), key(c));
insert into t2 select
@a:=A.a + 10*B.a+100*C.a,
IF(@a<900, NULL, @a),
IF(@a<500, NULL, @a)
from t1 A, t1 B, t1 C;
delete from t1 where a=0;
# Check that there are different #rows of NULLs for b and c, both !=10:
explain select * from t2 force index (b) where b is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref b b 5 const 780 Using index condition
explain select * from t2 force index (c) where c is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref c c 5 const 393 Using index condition
explain select * from t1 left join t2 on t2.b is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 9
1 SIMPLE t2 ref b b 5 const 780 Using where
explain select * from t1 left join t2 on t2.c is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 9
1 SIMPLE t2 ref c c 5 const 393 Using where
drop table t1,t2;
SET optimizer_switch=@save_optimizer_switch;

View File

@ -2101,10 +2101,10 @@ SELECT t1.b, t2.c, t2.d FROM t2 LEFT JOIN t1 ON t2.c = t1.a
WHERE t1.pk BETWEEN 5 AND 6 AND t1.b IS NULL OR t1.b = 5
ORDER BY t1.b;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ref PRIMARY,idx idx 4 const 2 100.00 Using where; Using filesort
1 SIMPLE t1 ref PRIMARY,idx idx 4 const 2 100.00 Using where
1 SIMPLE t2 ref c c 5 test.t1.a 2 100.00
Warnings:
Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` join `test`.`t1` where ((`test`.`t2`.`c` = `test`.`t1`.`a`) and (((`test`.`t1`.`pk` between 5 and 6) and isnull(`test`.`t1`.`b`)) or (`test`.`t1`.`b` = 5))) order by `test`.`t1`.`b`
Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` join `test`.`t1` where ((`test`.`t2`.`c` = `test`.`t1`.`a`) and (`test`.`t1`.`b` = 5)) order by `test`.`t1`.`b`
SELECT t1.b, t2.c, t2.d FROM t2 LEFT JOIN t1 ON t2.c = t1.a
WHERE t1.pk BETWEEN 5 AND 6 AND t1.b IS NULL OR t1.b = 5
ORDER BY t1.b;
@ -2233,6 +2233,34 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
Warnings:
Note 1003 select `test`.`t1`.`i1` AS `i1`,`test`.`t2`.`i2` AS `i2`,`test`.`t3`.`i3` AS `i3`,`test`.`t3`.`d3` AS `d3` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(((`test`.`t2`.`i2` = `test`.`t1`.`i1`) and (`test`.`t3`.`i3` = `test`.`t1`.`i1`))) where ((`test`.`t3`.`d3` = 0) or isnull(`test`.`t3`.`d3`))
DROP TABLE t1,t2,t3;
#
# MDEV-6634: Wrong estimates for ref(const) and key IS NULL predicate
#
create table t1(a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t2 (a int, b int, c int, key(b), key(c));
insert into t2 select
@a:=A.a + 10*B.a+100*C.a,
IF(@a<900, NULL, @a),
IF(@a<500, NULL, @a)
from t1 A, t1 B, t1 C;
delete from t1 where a=0;
# Check that there are different #rows of NULLs for b and c, both !=10:
explain select * from t2 force index (b) where b is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref b b 5 const 780 Using index condition
explain select * from t2 force index (c) where c is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref c c 5 const 393 Using index condition
explain select * from t1 left join t2 on t2.b is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 9
1 SIMPLE t2 ref b b 5 const 780 Using where
explain select * from t1 left join t2 on t2.c is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 9
1 SIMPLE t2 ref c c 5 const 393 Using where
drop table t1,t2;
SET optimizer_switch=@save_optimizer_switch;
set join_cache_level=default;
show variables like 'join_cache_level';

View File

@ -42,6 +42,7 @@ select * from mysql.slow_log where sql_text NOT LIKE '%slow_log%';
start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text thread_id
# Switch to connection default
set global slow_query_log= ON;
set local slow_query_log= ON;
# Switch to connection con1
set session long_query_time = @long_query_time;
select sleep(@long_query_time + 1);
@ -49,7 +50,13 @@ sleep(@long_query_time + 1)
0
select * from mysql.slow_log where sql_text NOT LIKE '%slow_log%';
start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text thread_id
TIMESTAMP USER_HOST QUERY_TIME 00:00:00.000000 1 0 test 0 0 1 select sleep(@long_query_time + 1) THREAD_ID
set local slow_query_log= ON;
select sleep(@long_query_time + 2);
sleep(@long_query_time + 2)
0
select * from mysql.slow_log where sql_text NOT LIKE '%slow_log%';
start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text thread_id
TIMESTAMP USER_HOST QUERY_TIME 00:00:00.000000 1 0 test 0 0 1 select sleep(@long_query_time + 2) THREAD_ID
# Switch to connection default
show global variables
where Variable_name = 'general_log' or Variable_name = 'slow_query_log';
@ -62,6 +69,7 @@ set global general_log= OFF;
set global slow_query_log= ON;
set global slow_query_log= OFF;
set global slow_query_log= OFF;
set local slow_query_log= ON;
set global general_log= ON;
truncate table mysql.general_log;
create table t1(f1 int);
@ -124,6 +132,9 @@ Variable_name Value
general_log OFF
show variables like 'slow_query_log';
Variable_name Value
slow_query_log ON
show global variables like 'slow_query_log';
Variable_name Value
slow_query_log OFF
set global general_log=ON;
set global log_output=default;

View File

@ -0,0 +1,147 @@
# Test the MAX_STATEMENT_TIME option.
SET @@MAX_STATEMENT_TIME=2;
select @@max_statement_time;
@@max_statement_time
2.000000
SELECT SLEEP(1);
SLEEP(1)
0
SELECT SLEEP(3);
SLEEP(3)
1
SET @@MAX_STATEMENT_TIME=0;
SELECT SLEEP(1);
SLEEP(1)
0
SHOW STATUS LIKE "max_statement_time_exceeded";
Variable_name Value
Max_statement_time_exceeded 1
CREATE TABLE t1 (a INT, b VARCHAR(300)) engine=myisam;
INSERT INTO t1 VALUES (1, 'string');
SELECT 0;
0
0
# Test the MAX_STATEMENT_TIME option with SF (should have no effect).
CREATE PROCEDURE p1()
BEGIN
declare tmp int;
SET @@MAX_STATEMENT_TIME=0.0001;
SELECT COUNT(*) INTO tmp FROM t1 WHERE b LIKE '%z%';
SET @@MAX_STATEMENT_TIME=0;
END|
CREATE PROCEDURE p2()
BEGIN
SET @@MAX_STATEMENT_TIME=5;
END|
SELECT @@MAX_STATEMENT_TIME;
@@MAX_STATEMENT_TIME
0.000000
CALL p1();
CALL p2();
SELECT @@MAX_STATEMENT_TIME;
@@MAX_STATEMENT_TIME
5.000000
SET @@MAX_STATEMENT_TIME=0;
DROP PROCEDURE p1;
DROP PROCEDURE p2;
DROP TABLE t1;
# MAX_STATEMENT_TIME account resource
GRANT USAGE ON *.* TO user1@localhost WITH MAX_STATEMENT_TIME 1.005;
# con1
SELECT @@max_statement_time;
@@max_statement_time
1.005000
# restart and reconnect
set @global.userstat=1;
SELECT @@global.max_statement_time,@@session.max_statement_time;
@@global.max_statement_time @@session.max_statement_time
0.000000 1.005000
select sleep(100);
sleep(100)
1
SHOW STATUS LIKE "max_statement_time_exceeded";
Variable_name Value
Max_statement_time_exceeded 1
show grants for user1@localhost;
Grants for user1@localhost
GRANT USAGE ON *.* TO 'user1'@'localhost' WITH MAX_STATEMENT_TIME 1.005000
set @global.userstat=0;
DROP USER user1@localhost;
# MAX_STATEMENT_TIME status variables.
flush status;
SET @@max_statement_time=0;
SELECT CONVERT(VARIABLE_VALUE, UNSIGNED) INTO @time_exceeded
FROM INFORMATION_SCHEMA.GLOBAL_STATUS
WHERE VARIABLE_NAME = 'max_statement_time_exceeded';
SET @@max_statement_time=0.5;
SELECT SLEEP(2);
SLEEP(2)
1
SHOW STATUS LIKE '%timeout%';
Variable_name Value
Ssl_default_timeout 0
Ssl_session_cache_timeouts 0
SET @@max_statement_time=0;
# Ensure that the counters for:
# - statements that exceeded their maximum execution time
# are incremented.
SELECT 1 AS STATUS FROM INFORMATION_SCHEMA.GLOBAL_STATUS
WHERE VARIABLE_NAME = 'max_statement_time_exceeded'
AND CONVERT(VARIABLE_VALUE, UNSIGNED) > @time_exceeded;
STATUS
1
# Check that the appropriate error status is set.
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
START TRANSACTION;
SELECT * FROM t1 FOR UPDATE;
a
1
SET @@SESSION.max_statement_time = 0.5;
UPDATE t1 SET a = 2;
ERROR 70100: Query execution was interrupted (max_statement_time exceeded)
SHOW WARNINGS;
Level Code Message
Error 1967 Query execution was interrupted (max_statement_time exceeded)
ROLLBACK;
DROP TABLE t1;
# Test interaction with lock waits.
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
SET @@SESSION.max_statement_time= 0.5;
LOCK TABLES t1 WRITE;
SELECT @@SESSION.max_statement_time;
@@SESSION.max_statement_time
0.500000
LOCK TABLES t1 READ;
ERROR 70100: Query execution was interrupted (max_statement_time exceeded)
UNLOCK TABLES;
BEGIN;
SELECT * FROM t1;
a
1
ALTER TABLE t1 ADD COLUMN b INT;
ERROR 70100: Query execution was interrupted (max_statement_time exceeded)
ROLLBACK;
SELECT GET_LOCK('lock', 1);
GET_LOCK('lock', 1)
1
SELECT GET_LOCK('lock', 1);
GET_LOCK('lock', 1)
NULL
SELECT RELEASE_LOCK('lock');
RELEASE_LOCK('lock')
1
DROP TABLE t1;

View File

@ -674,14 +674,14 @@ FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE t1.a > 0 ORDER BY t1.a;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 index PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using index
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using index
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` > 0) order by `test`.`t1`.`a`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_first 1
Handler_read_key 1
Handler_read_next 3
# Status of testing query execution:
Variable_name Value

View File

@ -2,6 +2,9 @@
# MDEV-5226 mysql_tzinfo_to_sql errors with tzdata 2013f and above
#
# Verbose run
set @prep=if((select count(*) from information_schema.global_variables where variable_name='wsrep_on'), 'SET GLOBAL wsrep_replicate_myisam=?', 'do ?');
prepare set_wsrep_myisam from @prep;
set @toggle=1; execute set_wsrep_myisam using @toggle;
TRUNCATE TABLE time_zone;
TRUNCATE TABLE time_zone_name;
TRUNCATE TABLE time_zone_transition;
@ -25,7 +28,11 @@ Warning: Unable to load 'MYSQLTEST_VARDIR/zoneinfo/posix/ignored.tab' as time zo
Warning: Skipping directory 'MYSQLTEST_VARDIR/zoneinfo/posix/posix': to avoid infinite symlink recursion.
ALTER TABLE time_zone_transition ORDER BY Time_zone_id, Transition_time;
ALTER TABLE time_zone_transition_type ORDER BY Time_zone_id, Transition_type_id;
set @toggle=0; execute set_wsrep_myisam using @toggle;
# Silent run
set @prep=if((select count(*) from information_schema.global_variables where variable_name='wsrep_on'), 'SET GLOBAL wsrep_replicate_myisam=?', 'do ?');
prepare set_wsrep_myisam from @prep;
set @toggle=1; execute set_wsrep_myisam using @toggle;
TRUNCATE TABLE time_zone;
TRUNCATE TABLE time_zone_name;
TRUNCATE TABLE time_zone_transition;
@ -46,17 +53,26 @@ INSERT INTO time_zone_transition_type (Time_zone_id, Transition_type_id, Offset,
Warning: Unable to load 'MYSQLTEST_VARDIR/zoneinfo/posix/garbage' as time zone. Skipping it.
ALTER TABLE time_zone_transition ORDER BY Time_zone_id, Transition_time;
ALTER TABLE time_zone_transition_type ORDER BY Time_zone_id, Transition_type_id;
set @toggle=0; execute set_wsrep_myisam using @toggle;
#
# Testing with explicit timezonefile
#
set @prep=if((select count(*) from information_schema.global_variables where variable_name='wsrep_on'), 'SET GLOBAL wsrep_replicate_myisam=?', 'do ?');
prepare set_wsrep_myisam from @prep;
set @toggle=1; execute set_wsrep_myisam using @toggle;
INSERT INTO time_zone (Use_leap_seconds) VALUES ('N');
SET @time_zone_id= LAST_INSERT_ID();
INSERT INTO time_zone_name (Name, Time_zone_id) VALUES ('XXX', @time_zone_id);
INSERT INTO time_zone_transition_type (Time_zone_id, Transition_type_id, Offset, Is_DST, Abbreviation) VALUES
(@time_zone_id, 0, 0, 0, 'GMT')
;
set @toggle=0; execute set_wsrep_myisam using @toggle;
#
# Testing --leap
#
set @prep=if((select count(*) from information_schema.global_variables where variable_name='wsrep_on'), 'SET GLOBAL wsrep_replicate_myisam=?', 'do ?');
prepare set_wsrep_myisam from @prep;
set @toggle=1; execute set_wsrep_myisam using @toggle;
TRUNCATE TABLE time_zone_leap_second;
ALTER TABLE time_zone_leap_second ORDER BY Transition_time;
set @toggle=0; execute set_wsrep_myisam using @toggle;

View File

@ -412,7 +412,7 @@ The following options may be given as the first argument:
Maximum number of prepared statements in the server
--max-relay-log-size=#
relay log will be rotated automatically when the size
exceeds this value. If 0 are startup, it's set to
exceeds this value. If 0 at startup, it's set to
max_binlog_size
--max-seeks-for-key=#
Limit assumed max number of seeks when looking up rows
@ -422,6 +422,11 @@ The following options may be given as the first argument:
value are used; the rest are ignored)
--max-sp-recursion-depth[=#]
Maximum stored procedure recursion depth
--max-statement-time=#
A SELECT query that have taken more than
max_statement_time seconds will be aborted. The argument
will be treated as a decimal value with microsecond
precision. A value of 0 (default) means no timeout
--max-tmp-tables=# Maximum number of temporary tables a client can keep open
at a time
--max-user-connections=#
@ -1196,6 +1201,7 @@ max-relay-log-size 1073741824
max-seeks-for-key 18446744073709551615
max-sort-length 1024
max-sp-recursion-depth 0
max-statement-time 0
max-tmp-tables 32
max-user-connections 0
max-write-lock-count 18446744073709551615
@ -1263,7 +1269,7 @@ performance-schema-max-rwlock-instances -1
performance-schema-max-socket-classes 10
performance-schema-max-socket-instances -1
performance-schema-max-stage-classes 150
performance-schema-max-statement-classes 180
performance-schema-max-statement-classes 178
performance-schema-max-table-handles -1
performance-schema-max-table-instances -1
performance-schema-max-thread-classes 50

View File

@ -5290,3 +5290,13 @@ Usage: mysqldump [OPTIONS] database [tables]
OR mysqldump [OPTIONS] --databases [OPTIONS] DB1 [DB2 DB3...]
OR mysqldump [OPTIONS] --all-databases [OPTIONS]
For more options, use mysqldump --help
#
# Test mysqldump with --disable-query-logs
#
create table t1 (a int);
insert into t1 values (1);
drop table t1;
select * from t1;
a
1
drop table t1;

View File

@ -1,2 +0,0 @@
Variable_name Value
Ndb_config_from_host localhost

View File

@ -1,2 +0,0 @@
Variable_name Value
have_ndbcluster NO

View File

@ -1,2 +0,0 @@
TRUE
1

View File

@ -297,7 +297,7 @@ create table t1 (a int not null, b int, c varchar(10), key (a, b, c));
insert into t1 values (1, NULL, NULL), (1, NULL, 'b'), (1, 1, NULL), (1, 1, 'b'), (1, 1, 'b'), (2, 1, 'a'), (2, 1, 'b'), (2, 2, 'a'), (2, 2, 'b'), (2, 3, 'c'),(1,3,'b');
explain select * from t1 where (a = 1 and b is null and c = 'b') or (a > 2) order by a desc;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index a a 22 NULL 11 Using where; Using index
1 SIMPLE t1 range a a 22 NULL 2 Using where; Using index
select * from t1 where (a = 1 and b is null and c = 'b') or (a > 2) order by a desc;
a b c
1 NULL b
@ -2569,7 +2569,7 @@ SELECT * FROM t1 r JOIN t1 s ON r.a = s.a
WHERE s.a IN (2,9) OR s.a < 100 AND s.a != 0
ORDER BY 1 LIMIT 10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE r index PRIMARY PRIMARY 4 NULL 10 100.00 Using where; Using index
1 SIMPLE r range PRIMARY PRIMARY 4 NULL 12 100.00 Using where; Using index
1 SIMPLE s eq_ref PRIMARY PRIMARY 4 test.r.a 1 100.00 Using index
Warnings:
Note 1003 select `test`.`r`.`a` AS `a`,`test`.`s`.`a` AS `a` from `test`.`t1` `r` join `test`.`t1` `s` where ((`test`.`s`.`a` = `test`.`r`.`a`) and ((`test`.`r`.`a` in (2,9)) or ((`test`.`r`.`a` < 100) and (`test`.`r`.`a` <> 0)))) order by 1 limit 10
@ -2600,7 +2600,7 @@ CREATE TABLE t1 (a INT,KEY (a));
INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
EXPLAIN SELECT DISTINCT a,1 FROM t1 WHERE a <> 1 ORDER BY a DESC;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index a a 5 NULL 10 Using where; Using index; Using filesort
1 SIMPLE t1 range a a 5 NULL 10 Using where; Using index
SELECT DISTINCT a,1 FROM t1 WHERE a <> 1 ORDER BY a DESC;
a 1
10 1
@ -2949,3 +2949,39 @@ explain update t1 set key1=key1+1 where key1 between 10 and 110 order by key1 li
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range key1 key1 5 NULL 2 Using where; Using buffer
drop table t1,t2;
#
# MDEV-465: Optimizer : wrong index choice, leading to strong performances issues
#
CREATE TABLE t1 (
id1 int(10) unsigned NOT NULL auto_increment,
id2 tinyint(3) unsigned NOT NULL default '0',
id3 tinyint(3) unsigned NOT NULL default '0',
id4 int(10) unsigned NOT NULL default '0',
date timestamp NOT NULL default CURRENT_TIMESTAMP,
PRIMARY KEY (id1),
KEY id_234_date (id2,id3,id4,date),
KEY id_23_date (id2,id3,date)
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
# t1 has "bad" index declaration order..
CREATE TABLE t2 (
id1 int(10) unsigned NOT NULL auto_increment,
id2 tinyint(3) unsigned NOT NULL default '0',
id3 tinyint(3) unsigned NOT NULL default '0',
id4 int(10) unsigned NOT NULL default '0',
date timestamp NOT NULL default CURRENT_TIMESTAMP,
PRIMARY KEY (id1),
KEY id_23_date (id2,id3,date),
KEY id_234_date (id2,id3,id4,date)
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
# t2 has a "good" index declaration order
INSERT INTO t1 (id2,id3,id4) VALUES (1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,0,1),(1,2,1),(1,3,1);
INSERT INTO t2 (id2,id3,id4) VALUES (1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,0,1),(1,2,1),(1,3,1);
# The following two must both use id_23_date and no "using filesort":
EXPLAIN SELECT id1 FROM t1 WHERE id2=1 AND id3=1 ORDER BY date DESC LIMIT 0,4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range id_234_date,id_23_date id_23_date 2 NULL 3 Using where
# See above query
EXPLAIN SELECT id1 FROM t2 WHERE id2=1 AND id3=1 ORDER BY date DESC LIMIT 0,4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref id_23_date,id_234_date id_23_date 2 const,const 3 Using where
drop table t1,t2;

View File

@ -0,0 +1,98 @@
drop table if exists t0,t1,t2,t3;
#
# MDEV-6402: Optimizer doesn't choose best execution plan when composite key is used
#
create table t0(a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1(a int);
insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
CREATE TABLE t2 (
pk1 int(11) NOT NULL,
pk2 int(11) NOT NULL,
fd5 bigint(20) DEFAULT NULL,
filler1 char(200),
filler2 char(200),
PRIMARY KEY (pk1,pk2),
UNIQUE KEY ux_pk1_fd5 (pk1,fd5)
) ENGINE=InnoDB;
insert into t2
select
round(log(2,t1.a+1)),
t1.a,
t1.a,
REPEAT('filler-data-', 10),
REPEAT('filler-data-', 10)
from
t1;
select pk1, count(*) from t2 group by pk1;
pk1 count(*)
0 1
1 1
2 3
3 6
4 11
5 23
6 45
7 91
8 181
9 362
10 276
# The following should use range(ux_pk1_fd5), two key parts (key_len=5+8=13)
EXPLAIN SELECT * FROM t2 USE INDEX(ux_pk1_fd5) WHERE pk1=9 AND fd5 < 500 ORDER BY fd5 DESC LIMIT 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range ux_pk1_fd5 ux_pk1_fd5 13 NULL 137 Using where
# This also must use range, not ref. key_len must be 13
EXPLAIN SELECT * FROM t2 WHERE pk1=9 AND fd5 < 500 ORDER BY fd5 DESC LIMIT 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range PRIMARY,ux_pk1_fd5 ux_pk1_fd5 13 NULL 137 Using where
drop table t0,t1, t2;
#
# MDEV-6814: Server crashes in calculate_key_len on query with ORDER BY
#
CREATE TABLE t1 (f1 INT, f2 INT, f3 INT, KEY(f2),KEY(f2,f1)) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,5,0),(2,6,0);
SELECT * FROM t1 WHERE f1 < 3 AND f2 IS NULL ORDER BY f1;
f1 f2 f3
DROP TABLE t1;
#
# MDEV-6796: Unable to skip filesort when using implicit extended key
#
CREATE TABLE t1 (
pk1 int(11) NOT NULL,
pk2 varchar(64) NOT NULL,
col1 varchar(16) DEFAULT NULL,
PRIMARY KEY (pk1,pk2),
KEY key1 (pk1,col1)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE t2 (
pk1 int(11) NOT NULL,
pk2 varchar(64) NOT NULL,
col1 varchar(16) DEFAULT NULL,
PRIMARY KEY (pk1,pk2),
KEY key1 (pk1,col1,pk2)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `t1` VALUES
(12321321,'a8f5f167f44f4964e6c998dee827110c','video'),
(12321321,'d77a17a3659ffa60c54e0ea17b6c6d16','video'),
(12321321,'wwafdsafdsafads','video'),
(12321321,'696aa249f0738e8181957dd57c2d7d0b','video-2014-09-23'),
(12321321,'802f9f29584b486f356693e3aa4ef0af','video=sdsd'),
(12321321,'2f94543ff74aab82e9a058b4e8316d75','video=sdsdsds'),
(12321321,'c1316b9df0d203fd1b9035308de52a0a','video=sdsdsdsdsd');
insert into t2 select * from t1;
# this must not use filesort:
explain SELECT pk2
FROM t1 USE INDEX(key1)
WHERE pk1 = 123
AND col1 = 'video'
ORDER BY pk2 DESC LIMIT 21;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref key1 key1 55 const,const 1 Using where; Using index
# this must not use filesort, either:
explain SELECT pk2
FROM t2 USE INDEX(key1)
WHERE pk1 = 123 AND col1 = 'video'
ORDER BY pk2 DESC LIMIT 21;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref key1 key1 55 const,const 1 Using where; Using index
drop table t1, t2;

View File

@ -15,7 +15,7 @@ PLUGIN_STATUS ACTIVE
PLUGIN_TYPE STORAGE ENGINE
PLUGIN_TYPE_VERSION #
PLUGIN_LIBRARY ha_example.so
PLUGIN_LIBRARY_VERSION 1.8
PLUGIN_LIBRARY_VERSION 1.10
PLUGIN_AUTHOR Brian Aker, MySQL AB
PLUGIN_DESCRIPTION Example storage engine
PLUGIN_LICENSE GPL
@ -28,7 +28,7 @@ PLUGIN_STATUS ACTIVE
PLUGIN_TYPE DAEMON
PLUGIN_TYPE_VERSION #
PLUGIN_LIBRARY ha_example.so
PLUGIN_LIBRARY_VERSION 1.8
PLUGIN_LIBRARY_VERSION 1.10
PLUGIN_AUTHOR Sergei Golubchik
PLUGIN_DESCRIPTION Unusable Daemon
PLUGIN_LICENSE GPL
@ -67,7 +67,7 @@ PLUGIN_STATUS DELETED
PLUGIN_TYPE STORAGE ENGINE
PLUGIN_TYPE_VERSION #
PLUGIN_LIBRARY ha_example.so
PLUGIN_LIBRARY_VERSION 1.8
PLUGIN_LIBRARY_VERSION 1.10
PLUGIN_AUTHOR Brian Aker, MySQL AB
PLUGIN_DESCRIPTION Example storage engine
PLUGIN_LICENSE GPL

View File

@ -1202,13 +1202,13 @@ SET @aux= "SELECT COUNT(*)
prepare my_stmt from @aux;
execute my_stmt;
COUNT(*)
45
46
execute my_stmt;
COUNT(*)
45
46
execute my_stmt;
COUNT(*)
45
46
deallocate prepare my_stmt;
drop procedure if exists p1|
drop table if exists t1|

View File

@ -2265,7 +2265,46 @@ def @arg32 250 16777215 0 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
prepare stmt1 from "select c1 into ? from t9 where c1= 1" ;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '? from t9 where c1= 1' at line 1
execute stmt1 using @result;
execute full_info ;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def @arg01 8 20 1 Y 32896 0 63
def @arg02 8 20 0 Y 32896 0 63
def @arg03 8 20 0 Y 32896 0 63
def @arg04 8 20 0 Y 32896 0 63
def @arg05 8 20 0 Y 32896 0 63
def @arg06 8 20 0 Y 32896 0 63
def @arg07 5 23 0 Y 32896 31 63
def @arg08 5 23 0 Y 32896 31 63
def @arg09 5 23 0 Y 32896 31 63
def @arg10 5 23 0 Y 32896 31 63
def @arg11 246 83 0 Y 32896 30 63
def @arg12 246 83 0 Y 32896 30 63
def @arg13 250 16777215 0 Y 0 31 8
def @arg14 250 16777215 0 Y 0 31 8
def @arg15 250 16777215 19 Y 0 31 8
def @arg16 250 16777215 0 Y 0 31 8
def @arg17 8 20 0 Y 32928 0 63
def @arg18 8 20 0 Y 32896 0 63
def @arg19 8 20 0 Y 32896 0 63
def @arg20 250 16777215 0 Y 0 31 8
def @arg21 250 16777215 0 Y 0 31 8
def @arg22 250 16777215 0 Y 0 31 8
def @arg23 250 16777215 0 Y 128 31 63
def @arg24 250 16777215 0 Y 0 31 8
def @arg25 250 16777215 0 Y 128 31 63
def @arg26 250 16777215 0 Y 0 31 8
def @arg27 250 16777215 0 Y 128 31 63
def @arg28 250 16777215 0 Y 0 31 8
def @arg29 250 16777215 0 Y 128 31 63
def @arg30 250 16777215 0 Y 0 31 8
def @arg31 250 16777215 0 Y 0 31 8
def @arg32 250 16777215 0 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
select @result;
@result
1
test_sequence
-- insert into numeric columns --
insert into t9

View File

@ -2248,7 +2248,46 @@ def @arg32 250 16777215 0 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
prepare stmt1 from "select c1 into ? from t9 where c1= 1" ;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '? from t9 where c1= 1' at line 1
execute stmt1 using @result;
execute full_info ;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def @arg01 8 20 1 Y 32896 0 63
def @arg02 8 20 0 Y 32896 0 63
def @arg03 8 20 0 Y 32896 0 63
def @arg04 8 20 0 Y 32896 0 63
def @arg05 8 20 0 Y 32896 0 63
def @arg06 8 20 0 Y 32896 0 63
def @arg07 5 23 0 Y 32896 31 63
def @arg08 5 23 0 Y 32896 31 63
def @arg09 5 23 0 Y 32896 31 63
def @arg10 5 23 0 Y 32896 31 63
def @arg11 246 83 0 Y 32896 30 63
def @arg12 246 83 0 Y 32896 30 63
def @arg13 250 16777215 0 Y 0 31 8
def @arg14 250 16777215 0 Y 0 31 8
def @arg15 250 16777215 19 Y 0 31 8
def @arg16 250 16777215 0 Y 0 31 8
def @arg17 8 20 0 Y 32928 0 63
def @arg18 8 20 0 Y 32896 0 63
def @arg19 8 20 0 Y 32896 0 63
def @arg20 250 16777215 0 Y 0 31 8
def @arg21 250 16777215 0 Y 0 31 8
def @arg22 250 16777215 0 Y 0 31 8
def @arg23 250 16777215 0 Y 128 31 63
def @arg24 250 16777215 0 Y 0 31 8
def @arg25 250 16777215 0 Y 128 31 63
def @arg26 250 16777215 0 Y 0 31 8
def @arg27 250 16777215 0 Y 128 31 63
def @arg28 250 16777215 0 Y 0 31 8
def @arg29 250 16777215 0 Y 128 31 63
def @arg30 250 16777215 0 Y 0 31 8
def @arg31 250 16777215 0 Y 0 31 8
def @arg32 250 16777215 0 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
select @result;
@result
1
test_sequence
-- insert into numeric columns --
insert into t9

View File

@ -2249,7 +2249,46 @@ def @arg32 250 16777215 0 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
prepare stmt1 from "select c1 into ? from t9 where c1= 1" ;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '? from t9 where c1= 1' at line 1
execute stmt1 using @result;
execute full_info ;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def @arg01 8 20 1 Y 32896 0 63
def @arg02 8 20 0 Y 32896 0 63
def @arg03 8 20 0 Y 32896 0 63
def @arg04 8 20 0 Y 32896 0 63
def @arg05 8 20 0 Y 32896 0 63
def @arg06 8 20 0 Y 32896 0 63
def @arg07 5 23 0 Y 32896 31 63
def @arg08 5 23 0 Y 32896 31 63
def @arg09 5 23 0 Y 32896 31 63
def @arg10 5 23 0 Y 32896 31 63
def @arg11 246 83 0 Y 32896 30 63
def @arg12 246 83 0 Y 32896 30 63
def @arg13 250 16777215 0 Y 0 31 8
def @arg14 250 16777215 0 Y 0 31 8
def @arg15 250 16777215 19 Y 0 31 8
def @arg16 250 16777215 0 Y 0 31 8
def @arg17 8 20 0 Y 32928 0 63
def @arg18 8 20 0 Y 32896 0 63
def @arg19 8 20 0 Y 32896 0 63
def @arg20 250 16777215 0 Y 0 31 8
def @arg21 250 16777215 0 Y 0 31 8
def @arg22 250 16777215 0 Y 0 31 8
def @arg23 250 16777215 0 Y 0 31 8
def @arg24 250 16777215 0 Y 0 31 8
def @arg25 250 16777215 0 Y 0 31 8
def @arg26 250 16777215 0 Y 0 31 8
def @arg27 250 16777215 0 Y 0 31 8
def @arg28 250 16777215 0 Y 0 31 8
def @arg29 250 16777215 0 Y 0 31 8
def @arg30 250 16777215 0 Y 0 31 8
def @arg31 250 16777215 0 Y 0 31 8
def @arg32 250 16777215 0 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
select @result;
@result
1
test_sequence
-- insert into numeric columns --
insert into t9

View File

@ -2185,7 +2185,46 @@ def @arg32 250 16777215 0 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
prepare stmt1 from "select c1 into ? from t9 where c1= 1" ;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '? from t9 where c1= 1' at line 1
execute stmt1 using @result;
execute full_info ;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def @arg01 8 20 1 Y 32896 0 63
def @arg02 8 20 0 Y 32896 0 63
def @arg03 8 20 0 Y 32896 0 63
def @arg04 8 20 0 Y 32896 0 63
def @arg05 8 20 0 Y 32896 0 63
def @arg06 8 20 0 Y 32896 0 63
def @arg07 5 23 0 Y 32896 31 63
def @arg08 5 23 0 Y 32896 31 63
def @arg09 5 23 0 Y 32896 31 63
def @arg10 5 23 0 Y 32896 31 63
def @arg11 246 83 0 Y 32896 30 63
def @arg12 246 83 0 Y 32896 30 63
def @arg13 250 16777215 0 Y 0 31 8
def @arg14 250 16777215 0 Y 0 31 8
def @arg15 250 16777215 19 Y 0 31 8
def @arg16 250 16777215 0 Y 0 31 8
def @arg17 8 20 0 Y 32928 0 63
def @arg18 8 20 0 Y 32896 0 63
def @arg19 8 20 0 Y 32896 0 63
def @arg20 250 16777215 0 Y 0 31 8
def @arg21 250 16777215 0 Y 0 31 8
def @arg22 250 16777215 0 Y 0 31 8
def @arg23 250 16777215 0 Y 128 31 63
def @arg24 250 16777215 0 Y 0 31 8
def @arg25 250 16777215 0 Y 128 31 63
def @arg26 250 16777215 0 Y 0 31 8
def @arg27 250 16777215 0 Y 128 31 63
def @arg28 250 16777215 0 Y 0 31 8
def @arg29 250 16777215 0 Y 128 31 63
def @arg30 250 16777215 0 Y 0 31 8
def @arg31 250 16777215 0 Y 0 31 8
def @arg32 250 16777215 0 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
select @result;
@result
1
test_sequence
-- insert into numeric columns --
insert into t9
@ -5539,7 +5578,46 @@ def @arg32 250 16777215 0 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
prepare stmt1 from "select c1 into ? from t9 where c1= 1" ;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '? from t9 where c1= 1' at line 1
execute stmt1 using @result;
execute full_info ;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def @arg01 8 20 1 Y 32896 0 63
def @arg02 8 20 0 Y 32896 0 63
def @arg03 8 20 0 Y 32896 0 63
def @arg04 8 20 0 Y 32896 0 63
def @arg05 8 20 0 Y 32896 0 63
def @arg06 8 20 0 Y 32896 0 63
def @arg07 5 23 0 Y 32896 31 63
def @arg08 5 23 0 Y 32896 31 63
def @arg09 5 23 0 Y 32896 31 63
def @arg10 5 23 0 Y 32896 31 63
def @arg11 246 83 0 Y 32896 30 63
def @arg12 246 83 0 Y 32896 30 63
def @arg13 250 16777215 0 Y 0 31 8
def @arg14 250 16777215 0 Y 0 31 8
def @arg15 250 16777215 19 Y 0 31 8
def @arg16 250 16777215 0 Y 0 31 8
def @arg17 8 20 0 Y 32928 0 63
def @arg18 8 20 0 Y 32896 0 63
def @arg19 8 20 0 Y 32896 0 63
def @arg20 250 16777215 0 Y 0 31 8
def @arg21 250 16777215 0 Y 0 31 8
def @arg22 250 16777215 0 Y 0 31 8
def @arg23 250 16777215 0 Y 128 31 63
def @arg24 250 16777215 0 Y 0 31 8
def @arg25 250 16777215 0 Y 128 31 63
def @arg26 250 16777215 0 Y 0 31 8
def @arg27 250 16777215 0 Y 128 31 63
def @arg28 250 16777215 0 Y 0 31 8
def @arg29 250 16777215 0 Y 128 31 63
def @arg30 250 16777215 0 Y 0 31 8
def @arg31 250 16777215 0 Y 0 31 8
def @arg32 250 16777215 0 Y 0 31 8
@arg01 @arg02 @arg03 @arg04 @arg05 @arg06 @arg07 @arg08 @arg09 @arg10 @arg11 @arg12 @arg13 @arg14 @arg15 @arg16 @arg17 @arg18 @arg19 @arg20 @arg21 @arg22 @arg23 @arg24 @arg25 @arg26 @arg27 @arg28 @arg29 @arg30 @arg31 @arg32
0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1991-01-01 01:01:01 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
select @result;
@result
1
test_sequence
-- insert into numeric columns --
insert into t9

View File

@ -2196,3 +2196,48 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t3 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range a a 5 NULL 1 Using where; Using index
drop table t1,t2,t3;
#
# MDEV-6480: Remove conditions for which range optimizer returned SEL_ARG::IMPOSSIBLE.
#
create table t1(a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t2 (a int, b int, c int, key(a), key(b));
insert into t2
select
A.a + B.a* 10 + C.a * 100,
A.a + B.a* 10 + C.a * 100,
12345
from
t1 A, t1 B, t1 C;
# EXPLAIN EXTENDED should show that 'b > 25 and b < 15' is removed from the WHERE:
explain extended select * from t2 where (b > 25 and b < 15) or a<44;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range a,b a 5 NULL 43 100.00 Using index condition
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where (`test`.`t2`.`a` < 44)
# EXPLAIN EXTENDED should show that 'b > 25 and b < 15' is removed from the WHERE:
explain extended select * from t2 where a < 44 or (b > 25 and b < 15);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range a,b a 5 NULL 43 100.00 Using index condition
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where (`test`.`t2`.`a` < 44)
# Here, conditions b will not be removed, because "c<44" is not sargable
# and hence (b.. and .. b) part is not analyzed at all:
explain extended select * from t2 where c < 44 or (b > 25 and b < 15);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL b NULL NULL NULL 1000 100.00 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where ((`test`.`t2`.`c` < 44) or ((`test`.`t2`.`b` > 25) and (`test`.`t2`.`b` < 15)))
# EXPLAIN EXTENDED should show that 'b > 25 and b < 15' is removed from the WHERE:
explain extended select * from t2 where (b > 25 and b < 15) or c < 44;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL b NULL NULL NULL 1000 100.00 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where ((`test`.`t2`.`c` < 44))
# Try a case where both OR parts produce SEL_ARG::IMPOSSIBLE:
explain extended select * from t2 where (b > 25 and b < 15) or (a>55 and a<44);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where 0
drop table t1,t2;

View File

@ -2198,4 +2198,49 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t3 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range a a 5 NULL 1 Using where; Using index
drop table t1,t2,t3;
#
# MDEV-6480: Remove conditions for which range optimizer returned SEL_ARG::IMPOSSIBLE.
#
create table t1(a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t2 (a int, b int, c int, key(a), key(b));
insert into t2
select
A.a + B.a* 10 + C.a * 100,
A.a + B.a* 10 + C.a * 100,
12345
from
t1 A, t1 B, t1 C;
# EXPLAIN EXTENDED should show that 'b > 25 and b < 15' is removed from the WHERE:
explain extended select * from t2 where (b > 25 and b < 15) or a<44;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range a,b a 5 NULL 43 100.00 Using index condition; Rowid-ordered scan
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where (`test`.`t2`.`a` < 44)
# EXPLAIN EXTENDED should show that 'b > 25 and b < 15' is removed from the WHERE:
explain extended select * from t2 where a < 44 or (b > 25 and b < 15);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range a,b a 5 NULL 43 100.00 Using index condition; Rowid-ordered scan
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where (`test`.`t2`.`a` < 44)
# Here, conditions b will not be removed, because "c<44" is not sargable
# and hence (b.. and .. b) part is not analyzed at all:
explain extended select * from t2 where c < 44 or (b > 25 and b < 15);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL b NULL NULL NULL 1000 100.00 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where ((`test`.`t2`.`c` < 44) or ((`test`.`t2`.`b` > 25) and (`test`.`t2`.`b` < 15)))
# EXPLAIN EXTENDED should show that 'b > 25 and b < 15' is removed from the WHERE:
explain extended select * from t2 where (b > 25 and b < 15) or c < 44;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL b NULL NULL NULL 1000 100.00 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where ((`test`.`t2`.`c` < 44))
# Try a case where both OR parts produce SEL_ARG::IMPOSSIBLE:
explain extended select * from t2 where (b > 25 and b < 15) or (a>55 and a<44);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where 0
drop table t1,t2;
set optimizer_switch=@mrr_icp_extra_tmp;

View File

@ -100,20 +100,20 @@ drop table t1;
-- after Bug#29394 is implemented.
show variables like "wait_timeout%";
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def information_schema VARIABLES VARIABLES VARIABLE_NAME Variable_name 253 64 12 N 1 0 8
def information_schema VARIABLES VARIABLES VARIABLE_VALUE Value 253 1024 5 Y 0 0 8
def information_schema SESSION_VARIABLES SESSION_VARIABLES VARIABLE_NAME Variable_name 253 64 12 N 1 0 8
def information_schema SESSION_VARIABLES SESSION_VARIABLES VARIABLE_VALUE Value 253 1024 5 N 1 0 8
Variable_name Value
wait_timeout 28800
show variables like "WAIT_timeout%";
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def information_schema VARIABLES VARIABLES VARIABLE_NAME Variable_name 253 64 12 N 1 0 8
def information_schema VARIABLES VARIABLES VARIABLE_VALUE Value 253 1024 5 Y 0 0 8
def information_schema SESSION_VARIABLES SESSION_VARIABLES VARIABLE_NAME Variable_name 253 64 12 N 1 0 8
def information_schema SESSION_VARIABLES SESSION_VARIABLES VARIABLE_VALUE Value 253 1024 5 N 1 0 8
Variable_name Value
wait_timeout 28800
show variables like "this_doesn't_exists%";
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def information_schema VARIABLES VARIABLES VARIABLE_NAME Variable_name 253 64 0 N 1 0 8
def information_schema VARIABLES VARIABLES VARIABLE_VALUE Value 253 1024 0 Y 0 0 8
def information_schema SESSION_VARIABLES SESSION_VARIABLES VARIABLE_NAME Variable_name 253 64 0 N 1 0 8
def information_schema SESSION_VARIABLES SESSION_VARIABLES VARIABLE_VALUE Value 253 1024 0 N 1 0 8
Variable_name Value
show table status from test like "this_doesn't_exists%";
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr

Some files were not shown because too many files have changed in this diff Show More