diff --git a/client/mysql.cc b/client/mysql.cc index cf541160ea6..c5c163c127b 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -2048,8 +2048,15 @@ get_one_option(const struct my_option *opt, const char *argument, case 'S': if (filename[0] == '\0') { - /* Socket given on command line, switch protocol to use SOCKETSt */ - opt_protocol= MYSQL_PROTOCOL_SOCKET; + /* + Socket given on command line, switch protocol to use SOCKETSt + Except on Windows if 'protocol= pipe' has been provided in + the config file or command line. + */ + if (opt_protocol != MYSQL_PROTOCOL_PIPE) + { + opt_protocol= MYSQL_PROTOCOL_SOCKET; + } } break; case 'I': diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc index 3bba27b4103..eae46b8b5f1 100644 --- a/client/mysqladmin.cc +++ b/client/mysqladmin.cc @@ -321,8 +321,15 @@ get_one_option(const struct my_option *opt, const char *argument, case 'S': if (filename[0] == '\0') { - /* Socket given on command line, switch protocol to use SOCKETSt */ - opt_protocol= MYSQL_PROTOCOL_SOCKET; + /* + Socket given on command line, switch protocol to use SOCKETSt + Except on Windows if 'protocol= pipe' has been provided in + the config file or command line. + */ + if (opt_protocol != MYSQL_PROTOCOL_PIPE) + { + opt_protocol= MYSQL_PROTOCOL_SOCKET; + } } break; } diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index ab41680ab15..517d513c754 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -2447,8 +2447,15 @@ get_one_option(const struct my_option *opt, const char *argument, case 'S': if (filename[0] == '\0') { - /* Socket given on command line, switch protocol to use SOCKETSt */ - opt_protocol= MYSQL_PROTOCOL_SOCKET; + /* + Socket given on command line, switch protocol to use SOCKETSt + Except on Windows if 'protocol= pipe' has been provided in + the config file or command line. + */ + if (opt_protocol != MYSQL_PROTOCOL_PIPE) + { + opt_protocol= MYSQL_PROTOCOL_SOCKET; + } } break; case 'v': diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c index f326ca62d72..3b9942d55a5 100644 --- a/client/mysqlcheck.c +++ b/client/mysqlcheck.c @@ -387,8 +387,15 @@ get_one_option(const struct my_option *opt, case 'S': if (filename[0] == '\0') { - /* Socket given on command line, switch protocol to use SOCKETSt */ - opt_protocol= MYSQL_PROTOCOL_SOCKET; + /* + Socket given on command line, switch protocol to use SOCKETSt + Except on Windows if 'protocol= pipe' has been provided in + the config file or command line. + */ + if (opt_protocol != MYSQL_PROTOCOL_PIPE) + { + opt_protocol= MYSQL_PROTOCOL_SOCKET; + } } break; } diff --git a/client/mysqldump.c b/client/mysqldump.c index 99f53a71164..9ae4af2d02f 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -1074,8 +1074,15 @@ get_one_option(const struct my_option *opt, case 'S': if (filename[0] == '\0') { - /* Socket given on command line, switch protocol to use SOCKETSt */ - opt_protocol= MYSQL_PROTOCOL_SOCKET; + /* + Socket given on command line, switch protocol to use SOCKETSt + Except on Windows if 'protocol= pipe' has been provided in + the config file or command line. + */ + if (opt_protocol != MYSQL_PROTOCOL_PIPE) + { + opt_protocol= MYSQL_PROTOCOL_SOCKET; + } } break; } diff --git a/client/mysqlimport.c b/client/mysqlimport.c index ae27c691e57..19d79686271 100644 --- a/client/mysqlimport.c +++ b/client/mysqlimport.c @@ -271,8 +271,15 @@ get_one_option(const struct my_option *opt, const char *argument, case 'S': if (filename[0] == '\0') { - /* Socket given on command line, switch protocol to use SOCKETSt */ - opt_protocol= MYSQL_PROTOCOL_SOCKET; + /* + Socket given on command line, switch protocol to use SOCKETSt + Except on Windows if 'protocol= pipe' has been provided in + the config file or command line. + */ + if (opt_protocol != MYSQL_PROTOCOL_PIPE) + { + opt_protocol= MYSQL_PROTOCOL_SOCKET; + } } break; case '#': diff --git a/client/mysqlshow.c b/client/mysqlshow.c index 0a114ad94fb..63981e02d59 100644 --- a/client/mysqlshow.c +++ b/client/mysqlshow.c @@ -346,8 +346,15 @@ get_one_option(const struct my_option *opt, const char *argument, case 'S': if (filename[0] == '\0') { - /* Socket given on command line, switch protocol to use SOCKETSt */ - opt_protocol= MYSQL_PROTOCOL_SOCKET; + /* + Socket given on command line, switch protocol to use SOCKETSt + Except on Windows if 'protocol= pipe' has been provided in + the config file or command line. + */ + if (opt_protocol != MYSQL_PROTOCOL_PIPE) + { + opt_protocol= MYSQL_PROTOCOL_SOCKET; + } } break; break; diff --git a/client/mysqlslap.c b/client/mysqlslap.c index 79e5fe1f8bc..f8b8ad15e4c 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -784,8 +784,15 @@ get_one_option(const struct my_option *opt, const char *argument, case 'S': if (filename[0] == '\0') { - /* Socket given on command line, switch protocol to use SOCKETSt */ - opt_protocol= MYSQL_PROTOCOL_SOCKET; + /* + Socket given on command line, switch protocol to use SOCKETSt + Except on Windows if 'protocol= pipe' has been provided in + the config file or command line. + */ + if (opt_protocol != MYSQL_PROTOCOL_PIPE) + { + opt_protocol= MYSQL_PROTOCOL_SOCKET; + } } break; case '#': diff --git a/mysql-test/main/cli_options_force_protocol_win.result b/mysql-test/main/cli_options_force_protocol_win.result index eedfde4f1de..0d17a54dbc7 100644 --- a/mysql-test/main/cli_options_force_protocol_win.result +++ b/mysql-test/main/cli_options_force_protocol_win.result @@ -12,6 +12,12 @@ Connection: localhost via named pipe # exec MYSQL --host=localhost -W -e "status" 2>&1 | findstr /c:"Connection:" Connection: localhost via named pipe # exec MYSQL --host=localhost -W --socket=MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:" -Connection: localhost via TCP/IP +Connection: localhost via named pipe # exec MYSQL --host=localhost --socket=MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:" Connection: localhost via TCP/IP +# +# MDEV-30639: Upgrade to 10.8 and later does not work on Windows +# due to connection protocol overwrite +# +# exec MYSQL --host=localhost --protocol=pipe --socket=MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:" +Connection: localhost via named pipe diff --git a/mysql-test/main/cli_options_force_protocol_win.test b/mysql-test/main/cli_options_force_protocol_win.test index 63024f3b620..31fb47fd72b 100644 --- a/mysql-test/main/cli_options_force_protocol_win.test +++ b/mysql-test/main/cli_options_force_protocol_win.test @@ -25,3 +25,10 @@ --echo # exec MYSQL --host=localhost --socket=MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:" --exec $MYSQL --host=localhost --socket=$MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:" + +--echo # +--echo # MDEV-30639: Upgrade to 10.8 and later does not work on Windows +--echo # due to connection protocol overwrite +--echo # +--echo # exec MYSQL --host=localhost --protocol=pipe --socket=MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:" +--exec $MYSQL --host=localhost --protocol=pipe --socket=$MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:" diff --git a/mysql-test/suite/innodb/r/gap_lock_split.result b/mysql-test/suite/innodb/r/gap_lock_split.result index a5765cb5694..7c10d8eed94 100644 --- a/mysql-test/suite/innodb/r/gap_lock_split.result +++ b/mysql-test/suite/innodb/r/gap_lock_split.result @@ -1,18 +1,22 @@ SET @save_frequency=@@GLOBAL.innodb_purge_rseg_truncate_frequency; SET GLOBAL innodb_purge_rseg_truncate_frequency=1; -CREATE TABLE t1(id INT PRIMARY key, val VARCHAR(16000)) ENGINE=InnoDB; +CREATE TABLE t1(id INT PRIMARY key, val VARCHAR(16000)) +ENGINE=InnoDB STATS_PERSISTENT=0; INSERT INTO t1 (id,val) SELECT 2*seq,'x' FROM seq_0_to_1023; connect con1,localhost,root,,; InnoDB 0 transactions not purged START TRANSACTION WITH CONSISTENT SNAPSHOT; connection default; DELETE FROM t1 WHERE id=1788; +SET @saved_dbug = @@GLOBAL.debug_dbug; +SET @@GLOBAL.debug_dbug="d,enable_row_purge_del_mark_exit_sync_point"; BEGIN; SELECT * FROM t1 WHERE id=1788 FOR UPDATE; id val connection con1; COMMIT; -InnoDB 0 transactions not purged +SET DEBUG_SYNC = 'now WAIT_FOR row_purge_del_mark_finished'; +SET @@GLOBAL.debug_dbug = @saved_dbug; connection default; INSERT INTO t1 (id,val) VALUES (1787, REPEAT('x',2000)); connection con1; diff --git a/mysql-test/suite/innodb/r/innodb_defragment_fill_factor.result b/mysql-test/suite/innodb/r/innodb_defragment_fill_factor.result deleted file mode 100644 index 8453050a92a..00000000000 --- a/mysql-test/suite/innodb/r/innodb_defragment_fill_factor.result +++ /dev/null @@ -1,118 +0,0 @@ -Testing tables with large records -CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), c VARCHAR(256), KEY SECOND(a, b,c)) ENGINE=INNODB; -INSERT INTO t1 VALUES (1, REPEAT('A', 256), REPEAT('B', 256)); -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -SET GLOBAL innodb_fast_shutdown = 0; -# restart -optimize table t1; -Table Op Msg_type Msg_text -test.t1 optimize status OK -select count(*) from t1; -count(*) -927 -select count(*) from t1 force index (second); -count(*) -927 -# A few more insertions on the page should not cause a page split. -insert into t1 values (81, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (83, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (87, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (82, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (86, REPEAT('A', 256), REPEAT('B', 256)); -# Insert more rows to cause a page split -insert into t1 values (180, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (181, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (182, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (183, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (184, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (185, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (186, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (187, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (188, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (189, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (190, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (191, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (192, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (193, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (194, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (195, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (196, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (197, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (198, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (199, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (200, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (201, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (202, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (203, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (204, REPEAT('A', 256), REPEAT('B', 256)); -DROP TABLE t1; -Testing table with small records -CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(16), c VARCHAR(32), KEY SECOND(a,b,c)) ENGINE=INNODB; -SET GLOBAL innodb_fast_shutdown = 0; -# restart -optimize table t2; -Table Op Msg_type Msg_text -test.t2 optimize status OK -select count(*) from t2; -count(*) -3701 -select count(*) from t2 force index(second); -count(*) -3701 -The page should have room for about 20 insertions -insert into t2 values(1181, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1191, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1182, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1192, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1183, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1193, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1184, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1194, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1185, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1195, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1186, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1196, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1187, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1197, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1188, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1198, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1189, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1199, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1190, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1180, REPEAT('A', 16), REPEAT('B',32)); -# Insert more rows to cause a page split -insert into t2 values (180, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (181, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (182, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (183, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (184, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (185, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (186, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (187, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (188, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (189, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (190, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (191, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (192, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (193, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (194, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (195, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (196, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (197, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (198, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (199, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (200, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (201, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (202, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (203, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (204, REPEAT('A', 16), REPEAT('B', 32)); -DROP TABLE t2; diff --git a/mysql-test/suite/innodb/r/innodb_information_schema_buffer.result b/mysql-test/suite/innodb/r/innodb_information_schema_buffer.result index 46372cd85f2..e87b35383a7 100644 --- a/mysql-test/suite/innodb/r/innodb_information_schema_buffer.result +++ b/mysql-test/suite/innodb/r/innodb_information_schema_buffer.result @@ -14,7 +14,6 @@ WHERE TABLE_NAME LIKE '%infoschema_buffer_test%' AND PAGE_TYPE='index'; POOL_ID BLOCK_ID SPACE PAGE_NUMBER PAGE_TYPE FLUSH_TYPE FIX_COUNT IS_HASHED NEWEST_MODIFICATION OLDEST_MODIFICATION ACCESS_TIME TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE COMPRESSED_SIZE PAGE_STATE IO_FIX IS_OLD FREE_PAGE_CLOCK 0 # # 3 INDEX 0 FIX AHI LSN LSN TIME `test`.`infoschema_buffer_test` GEN_CLUST_INDEX 2 58 0 FILE_PAGE IO_FIX OLD # 0 # # 4 INDEX 0 FIX AHI LSN LSN TIME `test`.`infoschema_buffer_test` idx 2 32 0 FILE_PAGE IO_FIX OLD # -0 # # 5 INDEX 0 FIX AHI LSN LSN TIME `test`.`infoschema_buffer_test` idx 2 32 0 FILE_PAGE IO_FIX OLD # DROP TABLE infoschema_buffer_test; SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE WHERE TABLE_NAME LIKE '%infoschema_buffer_test%'; diff --git a/mysql-test/suite/innodb/r/insert_into_empty.result b/mysql-test/suite/innodb/r/insert_into_empty.result index 45f3dd7654c..0cdee172696 100644 --- a/mysql-test/suite/innodb/r/insert_into_empty.result +++ b/mysql-test/suite/innodb/r/insert_into_empty.result @@ -460,3 +460,10 @@ ERROR HY000: Got error 1 "Operation not permitted" during COMMIT COMMIT; DROP TABLE t1; # End of 10.8 tests +# +# MDEV-31537 Bulk insert operation aborts the server +# for redundant table +# +CREATE TABLE t (a CHAR CHARACTER SET utf8) ENGINE=InnoDB ROW_FORMAT=REDUNDANT; +INSERT t SELECT left(seq,1) FROM seq_1_to_43691; +DROP TABLE t; diff --git a/mysql-test/suite/innodb/t/gap_lock_split.test b/mysql-test/suite/innodb/t/gap_lock_split.test index 8211a612d35..e8202615c3f 100644 --- a/mysql-test/suite/innodb/t/gap_lock_split.test +++ b/mysql-test/suite/innodb/t/gap_lock_split.test @@ -1,11 +1,13 @@ --source include/have_innodb.inc --source include/have_sequence.inc --source include/have_debug.inc +--source include/have_debug_sync.inc SET @save_frequency=@@GLOBAL.innodb_purge_rseg_truncate_frequency; SET GLOBAL innodb_purge_rseg_truncate_frequency=1; -CREATE TABLE t1(id INT PRIMARY key, val VARCHAR(16000)) ENGINE=InnoDB; +CREATE TABLE t1(id INT PRIMARY key, val VARCHAR(16000)) + ENGINE=InnoDB STATS_PERSISTENT=0; INSERT INTO t1 (id,val) SELECT 2*seq,'x' FROM seq_0_to_1023; connect(con1,localhost,root,,); @@ -16,13 +18,18 @@ connection default; DELETE FROM t1 WHERE id=1788; +SET @saved_dbug = @@GLOBAL.debug_dbug; +SET @@GLOBAL.debug_dbug="d,enable_row_purge_del_mark_exit_sync_point"; + BEGIN; # This will return no result, but should acquire a gap lock. SELECT * FROM t1 WHERE id=1788 FOR UPDATE; connection con1; COMMIT; -source include/wait_all_purged.inc; +SET DEBUG_SYNC = 'now WAIT_FOR row_purge_del_mark_finished'; +SET @@GLOBAL.debug_dbug = @saved_dbug; + connection default; INSERT INTO t1 (id,val) VALUES (1787, REPEAT('x',2000)); diff --git a/mysql-test/suite/innodb/t/innodb_defragment_fill_factor.opt b/mysql-test/suite/innodb/t/innodb_defragment_fill_factor.opt deleted file mode 100644 index 6426bac41a0..00000000000 --- a/mysql-test/suite/innodb/t/innodb_defragment_fill_factor.opt +++ /dev/null @@ -1,4 +0,0 @@ ---loose-innodb-buffer-pool-stats ---loose-innodb-buffer-page ---loose-innodb-buffer-page-lru ---innodb-defragment=1 \ No newline at end of file diff --git a/mysql-test/suite/innodb/t/innodb_defragment_fill_factor.test b/mysql-test/suite/innodb/t/innodb_defragment_fill_factor.test deleted file mode 100644 index 3a5897b9911..00000000000 --- a/mysql-test/suite/innodb/t/innodb_defragment_fill_factor.test +++ /dev/null @@ -1,203 +0,0 @@ ---source include/have_innodb.inc ---source include/big_test.inc ---source include/not_valgrind.inc ---source include/not_embedded.inc ---source include/have_innodb_16k.inc - ---echo Testing tables with large records -# Create table. -CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), c VARCHAR(256), KEY SECOND(a, b,c)) ENGINE=INNODB; - -# Populate table. -INSERT INTO t1 VALUES (1, REPEAT('A', 256), REPEAT('B', 256)); -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; -INSERT INTO t1 (b) SELECT b from t1; - ---disable_query_log -let $size = 10; -while ($size) -{ - let $j = 100 * $size; - eval delete from t1 where a between $j - 20 and $j + 5; - dec $size; -} ---enable_query_log - -SET GLOBAL innodb_fast_shutdown = 0; ---source include/restart_mysqld.inc -optimize table t1; - -select count(*) from t1; - ---let $primary_before = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'PRIMARY', Value, 1) - -select count(*) from t1 force index (second); - ---let $second_before = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'second', Value, 1) - ---echo # A few more insertions on the page should not cause a page split. -insert into t1 values (81, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (83, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (87, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (82, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (86, REPEAT('A', 256), REPEAT('B', 256)); ---let $primary_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'PRIMARY', Value, 1) ---let $second_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'second', Value, 1) - -if ($primary_before != $primary_after) { - --echo Insertion caused page split on primary, which should be avoided by innodb_defragment_fill_factor. -} - -if ($second_before != $second_after) { - --echo Insertion caused page split on second, which should be avoided by innodb_defragment_fill_factor. -} - ---echo # Insert more rows to cause a page split -insert into t1 values (180, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (181, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (182, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (183, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (184, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (185, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (186, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (187, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (188, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (189, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (190, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (191, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (192, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (193, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (194, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (195, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (196, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (197, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (198, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (199, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (200, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (201, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (202, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (203, REPEAT('A', 256), REPEAT('B', 256)); -insert into t1 values (204, REPEAT('A', 256), REPEAT('B', 256)); - ---let $primary_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'PRIMARY', Value, 1) - ---let $second_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'second', Value, 1) - -if ($primary_before == $primary_after) { - --echo Too little space is reserved on primary index. -} - -if ($second_before == $second_after) { - --echo Too little space is reserved on second index. -} - -DROP TABLE t1; - ---echo Testing table with small records -CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(16), c VARCHAR(32), KEY SECOND(a,b,c)) ENGINE=INNODB; -# Populate table. ---disable_query_log -INSERT INTO t2 VALUES (1, REPEAT('A', 16), REPEAT('B', 32)); -INSERT INTO t2 (b) SELECT b from t2; -INSERT INTO t2 (b) SELECT b from t2; -INSERT INTO t2 (b) SELECT b from t2; -INSERT INTO t2 (b) SELECT b from t2; -INSERT INTO t2 (b) SELECT b from t2; -INSERT INTO t2 (b) SELECT b from t2; -INSERT INTO t2 (b) SELECT b from t2; -INSERT INTO t2 (b) SELECT b from t2; -INSERT INTO t2 (b) SELECT b from t2; -INSERT INTO t2 (b) SELECT b from t2; -INSERT INTO t2 (b) SELECT b from t2; -INSERT INTO t2 (b) SELECT b from t2; ---enable_query_log - ---disable_query_log -let $size = 40; -while ($size) -{ - let $j = 100 * $size; - eval delete from t2 where a between $j - 20 and $j; - dec $size; -} ---enable_query_log - -SET GLOBAL innodb_fast_shutdown = 0; ---source include/restart_mysqld.inc -optimize table t2; - -select count(*) from t2; -select count(*) from t2 force index(second); - ---let $second_before = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t2%' and index_name = 'second', Value, 1) - ---echo The page should have room for about 20 insertions -insert into t2 values(1181, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1191, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1182, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1192, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1183, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1193, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1184, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1194, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1185, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1195, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1186, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1196, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1187, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1197, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1188, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1198, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1189, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1199, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1190, REPEAT('A', 16), REPEAT('B',32)); -insert into t2 values(1180, REPEAT('A', 16), REPEAT('B',32)); - ---let $second_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t2%' and index_name = 'second', Value, 1) - -if ($second_before != $second_after) { - --echo Insertion caused page split on second, which should be avoided by innodb_defragment_fill_factor. -} - ---echo # Insert more rows to cause a page split -insert into t2 values (180, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (181, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (182, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (183, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (184, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (185, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (186, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (187, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (188, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (189, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (190, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (191, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (192, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (193, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (194, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (195, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (196, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (197, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (198, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (199, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (200, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (201, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (202, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (203, REPEAT('A', 16), REPEAT('B', 32)); -insert into t2 values (204, REPEAT('A', 16), REPEAT('B', 32)); - ---let $second_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t2%' and index_name = 'second', Value, 1) - -if ($second_before == $second_after) { - --echo Too little space is reserved on second index. -} - -DROP TABLE t2; diff --git a/mysql-test/suite/innodb/t/insert_into_empty.test b/mysql-test/suite/innodb/t/insert_into_empty.test index 8452cecf700..ac55b9054a8 100644 --- a/mysql-test/suite/innodb/t/insert_into_empty.test +++ b/mysql-test/suite/innodb/t/insert_into_empty.test @@ -479,3 +479,11 @@ INSERT INTO t1 VALUES COMMIT; DROP TABLE t1; --echo # End of 10.8 tests + +--echo # +--echo # MDEV-31537 Bulk insert operation aborts the server +--echo # for redundant table +--echo # +CREATE TABLE t (a CHAR CHARACTER SET utf8) ENGINE=InnoDB ROW_FORMAT=REDUNDANT; +INSERT t SELECT left(seq,1) FROM seq_1_to_43691; +DROP TABLE t; diff --git a/mysql-test/suite/sql_sequence/alter.result b/mysql-test/suite/sql_sequence/alter.result index 90de2ebfcc0..6d29876a8eb 100644 --- a/mysql-test/suite/sql_sequence/alter.result +++ b/mysql-test/suite/sql_sequence/alter.result @@ -248,3 +248,17 @@ SELECT NEXTVAL(s); NEXTVAL(s) 1 DROP SEQUENCE s; +# +# MDEV-31607 ER_DUP_KEY in mysql.table_stats upon REANME on sequence +# +CREATE SEQUENCE s1 ENGINE=InnoDB; +CREATE SEQUENCE s2 ENGINE=InnoDB; +SHOW CREATE SEQUENCE s1; +Table Create Table +s1 CREATE SEQUENCE `s1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB +SHOW CREATE SEQUENCE s2; +Table Create Table +s2 CREATE SEQUENCE `s2` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB +DROP SEQUENCE s2; +RENAME TABLE s1 TO s2; +DROP SEQUENCE s2; diff --git a/mysql-test/suite/sql_sequence/alter.test b/mysql-test/suite/sql_sequence/alter.test index a5e6245d609..3ad9821b0cf 100644 --- a/mysql-test/suite/sql_sequence/alter.test +++ b/mysql-test/suite/sql_sequence/alter.test @@ -161,3 +161,15 @@ CREATE SEQUENCE s; ALTER TABLE s ORDER BY cache_size; SELECT NEXTVAL(s); DROP SEQUENCE s; + +--echo # +--echo # MDEV-31607 ER_DUP_KEY in mysql.table_stats upon REANME on sequence +--echo # + +CREATE SEQUENCE s1 ENGINE=InnoDB; +CREATE SEQUENCE s2 ENGINE=InnoDB; +SHOW CREATE SEQUENCE s1; +SHOW CREATE SEQUENCE s2; +DROP SEQUENCE s2; +RENAME TABLE s1 TO s2; +DROP SEQUENCE s2; diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 59ba6eaa2b4..bdea0c85a90 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -4991,8 +4991,6 @@ class btr_est_cur_t page_id_t m_page_id; /** Current block */ buf_block_t *m_block; - /** mtr savepoint of the current block */ - ulint m_savepoint; /** Page search mode, can differ from m_mode for non-leaf pages, see c-tor comments for details */ page_cur_mode_t m_page_mode; @@ -5051,7 +5049,6 @@ public: bool fetch_child(ulint level, mtr_t &mtr, const buf_block_t *right_parent) { buf_block_t *parent_block= m_block; - ulint parent_savepoint= m_savepoint; m_block= btr_block_get(*index(), m_page_id.page_no(), RW_S_LATCH, !level, &mtr, nullptr); @@ -5059,9 +5056,10 @@ public: return false; if (parent_block && parent_block != right_parent) - mtr.rollback_to_savepoint(parent_savepoint, parent_savepoint + 1); - - m_savepoint= mtr.get_savepoint() - 1; + { + ut_ad(mtr.get_savepoint() >= 2); + mtr.rollback_to_savepoint(1, 2); + } return level == ULINT_UNDEFINED || btr_page_get_level(m_block->page.frame) == level; @@ -5123,10 +5121,10 @@ public: return true; } - /** Gets page id of the current record child. + /** Read page id of the current record child. @param offsets offsets array. @param heap heap for offsets array */ - void get_child(rec_offs **offsets, mem_heap_t **heap) + void read_child_page_id(rec_offs **offsets, mem_heap_t **heap) { const rec_t *node_ptr= page_cur_get_rec(&m_page_cur); @@ -5196,11 +5194,7 @@ public: /** Copies block pointer and savepoint from another btr_est_cur_t in the case if both left and right border cursors point to the same block. @param o reference to the other btr_est_cur_t object. */ - void set_block(const btr_est_cur_t &o) - { - m_block= o.m_block; - m_savepoint= o.m_savepoint; - } + void set_block(const btr_est_cur_t &o) { m_block= o.m_block; } /** @return current record number. */ ulint nth_rec() const { return m_nth_rec; } @@ -5239,7 +5233,6 @@ static ha_rows btr_estimate_n_rows_in_range_on_level( pages before reaching right_page_no, then we estimate the average from the pages scanned so far. */ static constexpr uint n_pages_read_limit= 9; - ulint savepoint= 0; buf_block_t *block= nullptr; const dict_index_t *index= left_cur.index(); @@ -5269,9 +5262,6 @@ static ha_rows btr_estimate_n_rows_in_range_on_level( { page_t *page; buf_block_t *prev_block= block; - ulint prev_savepoint= savepoint; - - savepoint= mtr.get_savepoint(); /* Fetch the page. */ block= btr_block_get(*index, page_id.page_no(), RW_S_LATCH, !level, &mtr, @@ -5279,9 +5269,11 @@ static ha_rows btr_estimate_n_rows_in_range_on_level( if (prev_block) { - mtr.rollback_to_savepoint(prev_savepoint, prev_savepoint + 1); - if (block) - savepoint--; + ulint savepoint = mtr.get_savepoint(); + /* Index s-lock, p1, p2 latches, can also be p1 and p2 parent latch if + they are not diverged */ + ut_ad(savepoint >= 3); + mtr.rollback_to_savepoint(savepoint - 2, savepoint - 1); } if (!block || btr_page_get_level(buf_block_get_frame(block)) != level) @@ -5312,8 +5304,8 @@ static ha_rows btr_estimate_n_rows_in_range_on_level( if (block) { - ut_ad(block == mtr.at_savepoint(savepoint)); - mtr.rollback_to_savepoint(savepoint, savepoint + 1); + ut_ad(block == mtr.at_savepoint(mtr.get_savepoint() - 1)); + mtr.rollback_to_savepoint(mtr.get_savepoint() - 1); } return (n_rows); @@ -5322,8 +5314,8 @@ inexact: if (block) { - ut_ad(block == mtr.at_savepoint(savepoint)); - mtr.rollback_to_savepoint(savepoint, savepoint + 1); + ut_ad(block == mtr.at_savepoint(mtr.get_savepoint() - 1)); + mtr.rollback_to_savepoint(mtr.get_savepoint() - 1); } is_n_rows_exact= false; @@ -5517,8 +5509,12 @@ search_loop: { ut_ad(height > 0); height--; - p1.get_child(&offsets, &heap); - p2.get_child(&offsets, &heap); + ut_ad(mtr.memo_contains(p1.index()->lock, MTR_MEMO_S_LOCK)); + ut_ad(mtr.memo_contains_flagged(p1.block(), MTR_MEMO_PAGE_S_FIX)); + p1.read_child_page_id(&offsets, &heap); + ut_ad(mtr.memo_contains(p2.index()->lock, MTR_MEMO_S_LOCK)); + ut_ad(mtr.memo_contains_flagged(p2.block(), MTR_MEMO_PAGE_S_FIX)); + p2.read_child_page_id(&offsets, &heap); goto search_loop; } diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc index 300276ff3a6..f8ab8256fb5 100644 --- a/storage/innobase/btr/btr0sea.cc +++ b/storage/innobase/btr/btr0sea.cc @@ -2118,14 +2118,13 @@ static bool ha_validate(const hash_table_t *table, } /** Validates the search system for given hash table. -@param[in] hash_table_id hash table to validate -@return TRUE if ok */ -static -ibool -btr_search_hash_table_validate(ulint hash_table_id) +@param thd connection, for checking if CHECK TABLE has been killed +@param hash_table_id hash table to validate +@return true if ok */ +static bool btr_search_hash_table_validate(THD *thd, ulint hash_table_id) { ha_node_t* node; - ibool ok = TRUE; + bool ok = true; ulint i; ulint cell_count; mem_heap_t* heap = NULL; @@ -2133,9 +2132,15 @@ btr_search_hash_table_validate(ulint hash_table_id) rec_offs* offsets = offsets_; btr_search_x_lock_all(); - if (!btr_search_enabled) { + if (!btr_search_enabled || (thd && thd_kill_level(thd))) { +func_exit: btr_search_x_unlock_all(); - return(TRUE); + + if (UNIV_LIKELY_NULL(heap)) { + mem_heap_free(heap); + } + + return ok; } /* How many cells to check before temporarily releasing @@ -2162,8 +2167,8 @@ btr_search_hash_table_validate(ulint hash_table_id) btr_search_x_lock_all(); - if (!btr_search_enabled) { - ok = true; + if (!btr_search_enabled + || (thd && thd_kill_level(thd))) { goto func_exit; } @@ -2270,8 +2275,8 @@ state_ok: btr_search_x_lock_all(); - if (!btr_search_enabled) { - ok = true; + if (!btr_search_enabled + || (thd && thd_kill_level(thd))) { goto func_exit; } @@ -2292,33 +2297,23 @@ state_ok: ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1); if (!ha_validate(&part.table, i, end_index)) { - ok = FALSE; + ok = false; } } mysql_mutex_unlock(&buf_pool.mutex); -func_exit: - btr_search_x_unlock_all(); - - if (UNIV_LIKELY_NULL(heap)) { - mem_heap_free(heap); - } - - return(ok); + goto func_exit; } -/** Validate the search system. -@return true if ok. */ -bool -btr_search_validate() +/** Validates the search system. +@param thd connection, for checking if CHECK TABLE has been killed +@return true if ok */ +bool btr_search_validate(THD *thd) { - for (ulint i = 0; i < btr_ahi_parts; ++i) { - if (!btr_search_hash_table_validate(i)) { - return(false); - } - } - - return(true); + for (ulint i= 0; i < btr_ahi_parts; ++i) + if (!btr_search_hash_table_validate(thd, i)) + return(false); + return true; } #ifdef UNIV_DEBUG diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 90263757c19..32b40c58a9b 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -1864,8 +1864,7 @@ inline void log_t::write_checkpoint(lsn_t end_lsn) noexcept ib::info() << "Resized log to " << ib::bytes_iec{resizing_completed} << "; start LSN=" << resizing; else - sql_print_error("InnoDB: Resize of log failed at " LSN_PF, - get_flushed_lsn()); + buf_flush_ahead(end_lsn + 1, false); } /** Initiate a log checkpoint, discarding the start of the log. diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 540cf3e580a..db7319e75fa 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -13674,13 +13674,12 @@ err_exit: } if (!table->no_rollback()) - { err= trx->drop_table_foreign(table->name); - if (err == DB_SUCCESS && table_stats && index_stats) - err= trx->drop_table_statistics(table->name); - if (err != DB_SUCCESS) - goto err_exit; - } + + if (err == DB_SUCCESS && table_stats && index_stats) + err= trx->drop_table_statistics(table->name); + if (err != DB_SUCCESS) + goto err_exit; err= trx->drop_table(*table); if (err != DB_SUCCESS) @@ -15342,7 +15341,8 @@ ha_innobase::check( /* We validate the whole adaptive hash index for all tables at every CHECK TABLE only when QUICK flag is not present. */ - if (!(check_opt->flags & T_QUICK) && !btr_search_validate()) { + if (!(check_opt->flags & T_QUICK) + && !btr_search_validate(m_prebuilt->trx->mysql_thd)) { push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_NOT_KEYFILE, "InnoDB: The adaptive hash index is corrupted."); diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index 3cb51f5e15d..b00308d7e51 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -3882,7 +3882,7 @@ i_s_innodb_buffer_page_get_info( page_info->state = bpage->state(); - if (page_info->state < buf_page_t::FREED) { + if (page_info->state < buf_page_t::UNFIXED) { page_info->page_type = I_S_PAGE_TYPE_UNKNOWN; page_info->compressed_only = false; } else { diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h index 48e4fadab9b..b75cad10180 100644 --- a/storage/innobase/include/btr0sea.h +++ b/storage/innobase/include/btr0sea.h @@ -127,8 +127,9 @@ void btr_search_update_hash_on_insert(btr_cur_t *cursor, void btr_search_update_hash_on_delete(btr_cur_t *cursor); /** Validates the search system. +@param thd connection, for checking if CHECK TABLE has been killed @return true if ok */ -bool btr_search_validate(); +bool btr_search_validate(THD *thd); /** Lock all search latches in exclusive mode. */ static inline void btr_search_x_lock_all(); diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 3e4c7029db4..10a832cac86 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -499,7 +499,14 @@ log_t::resize_start_status log_t::resize_start(os_offset_t size) noexcept log_resize_release(); if (start_lsn) + { + mysql_mutex_lock(&buf_pool.flush_list_mutex); + lsn_t target_lsn= buf_pool.get_oldest_modification(0); + if (start_lsn < target_lsn) + start_lsn= target_lsn + 1; + mysql_mutex_unlock(&buf_pool.flush_list_mutex); buf_flush_ahead(start_lsn, false); + } return status; } diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 42c8c633b8d..84befbaf311 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -479,6 +479,13 @@ static ulint row_merge_bulk_buf_add(row_merge_buf_t* buf, ulint fixed_len= ifield->fixed_len; + /* CHAR in ROW_FORMAT=REDUNDANT is always + fixed-length, but in the temporary file it is + variable-length for variable-length character sets. */ + if (fixed_len && !index->table->not_redundant() && + col->mbminlen != col->mbmaxlen) + fixed_len= 0; + if (fixed_len); else if (len < 128 || (!DATA_BIG_COL(col))) extra_size++; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/group_min_max.inc b/storage/rocksdb/mysql-test/rocksdb/include/group_min_max.inc index 40fabce0517..27d73e8d815 100644 --- a/storage/rocksdb/mysql-test/rocksdb/include/group_min_max.inc +++ b/storage/rocksdb/mysql-test/rocksdb/include/group_min_max.inc @@ -12,6 +12,10 @@ --source include/default_optimizer_switch.inc +set + @rgmm_hist_type=@@histogram_type, + histogram_type=double_prec_hb; + --disable_warnings drop table if exists t1; --enable_warnings @@ -1436,3 +1440,5 @@ SHOW SESSION STATUS LIKE 'Handler_read%'; DROP TABLE t1; --echo # End of test for Bug#18109609 + +set histogram_type=@rgmm_hist_type; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/group_min_max.result b/storage/rocksdb/mysql-test/rocksdb/r/group_min_max.result index a070ba91a55..5a1350fe0ff 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/group_min_max.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/group_min_max.result @@ -1,5 +1,8 @@ set @debug_tmp= @@debug_dbug; set global debug_dbug="+d,force_group_by"; +set +@rgmm_hist_type=@@histogram_type, +histogram_type=double_prec_hb; drop table if exists t1; create table t1 ( a1 char(64), a2 char(64), b char(16), c char(16) not null, d char(16), dummy char(248) default ' ' @@ -3523,4 +3526,5 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 DROP TABLE t1; # End of test for Bug#18109609 +set histogram_type=@rgmm_hist_type; set global debug_dbug=@debug_tmp;