1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-07 00:04:31 +03:00

Merge 10.9 into 10.10

This commit is contained in:
Marko Mäkelä
2023-07-04 08:18:30 +03:00
30 changed files with 226 additions and 416 deletions

View File

@@ -2048,9 +2048,16 @@ get_one_option(const struct my_option *opt, const char *argument,
case 'S': case 'S':
if (filename[0] == '\0') if (filename[0] == '\0')
{ {
/* Socket given on command line, switch protocol to use SOCKETSt */ /*
Socket given on command line, switch protocol to use SOCKETSt
Except on Windows if 'protocol= pipe' has been provided in
the config file or command line.
*/
if (opt_protocol != MYSQL_PROTOCOL_PIPE)
{
opt_protocol= MYSQL_PROTOCOL_SOCKET; opt_protocol= MYSQL_PROTOCOL_SOCKET;
} }
}
break; break;
case 'I': case 'I':
case '?': case '?':

View File

@@ -321,9 +321,16 @@ get_one_option(const struct my_option *opt, const char *argument,
case 'S': case 'S':
if (filename[0] == '\0') if (filename[0] == '\0')
{ {
/* Socket given on command line, switch protocol to use SOCKETSt */ /*
Socket given on command line, switch protocol to use SOCKETSt
Except on Windows if 'protocol= pipe' has been provided in
the config file or command line.
*/
if (opt_protocol != MYSQL_PROTOCOL_PIPE)
{
opt_protocol= MYSQL_PROTOCOL_SOCKET; opt_protocol= MYSQL_PROTOCOL_SOCKET;
} }
}
break; break;
} }
return 0; return 0;

View File

@@ -2447,9 +2447,16 @@ get_one_option(const struct my_option *opt, const char *argument,
case 'S': case 'S':
if (filename[0] == '\0') if (filename[0] == '\0')
{ {
/* Socket given on command line, switch protocol to use SOCKETSt */ /*
Socket given on command line, switch protocol to use SOCKETSt
Except on Windows if 'protocol= pipe' has been provided in
the config file or command line.
*/
if (opt_protocol != MYSQL_PROTOCOL_PIPE)
{
opt_protocol= MYSQL_PROTOCOL_SOCKET; opt_protocol= MYSQL_PROTOCOL_SOCKET;
} }
}
break; break;
case 'v': case 'v':
if (argument == disabled_my_option) if (argument == disabled_my_option)

View File

@@ -387,9 +387,16 @@ get_one_option(const struct my_option *opt,
case 'S': case 'S':
if (filename[0] == '\0') if (filename[0] == '\0')
{ {
/* Socket given on command line, switch protocol to use SOCKETSt */ /*
Socket given on command line, switch protocol to use SOCKETSt
Except on Windows if 'protocol= pipe' has been provided in
the config file or command line.
*/
if (opt_protocol != MYSQL_PROTOCOL_PIPE)
{
opt_protocol= MYSQL_PROTOCOL_SOCKET; opt_protocol= MYSQL_PROTOCOL_SOCKET;
} }
}
break; break;
} }

View File

@@ -1074,9 +1074,16 @@ get_one_option(const struct my_option *opt,
case 'S': case 'S':
if (filename[0] == '\0') if (filename[0] == '\0')
{ {
/* Socket given on command line, switch protocol to use SOCKETSt */ /*
Socket given on command line, switch protocol to use SOCKETSt
Except on Windows if 'protocol= pipe' has been provided in
the config file or command line.
*/
if (opt_protocol != MYSQL_PROTOCOL_PIPE)
{
opt_protocol= MYSQL_PROTOCOL_SOCKET; opt_protocol= MYSQL_PROTOCOL_SOCKET;
} }
}
break; break;
} }
return 0; return 0;

View File

@@ -271,9 +271,16 @@ get_one_option(const struct my_option *opt, const char *argument,
case 'S': case 'S':
if (filename[0] == '\0') if (filename[0] == '\0')
{ {
/* Socket given on command line, switch protocol to use SOCKETSt */ /*
Socket given on command line, switch protocol to use SOCKETSt
Except on Windows if 'protocol= pipe' has been provided in
the config file or command line.
*/
if (opt_protocol != MYSQL_PROTOCOL_PIPE)
{
opt_protocol= MYSQL_PROTOCOL_SOCKET; opt_protocol= MYSQL_PROTOCOL_SOCKET;
} }
}
break; break;
case '#': case '#':
DBUG_PUSH(argument ? argument : "d:t:o"); DBUG_PUSH(argument ? argument : "d:t:o");

View File

@@ -346,9 +346,16 @@ get_one_option(const struct my_option *opt, const char *argument,
case 'S': case 'S':
if (filename[0] == '\0') if (filename[0] == '\0')
{ {
/* Socket given on command line, switch protocol to use SOCKETSt */ /*
Socket given on command line, switch protocol to use SOCKETSt
Except on Windows if 'protocol= pipe' has been provided in
the config file or command line.
*/
if (opt_protocol != MYSQL_PROTOCOL_PIPE)
{
opt_protocol= MYSQL_PROTOCOL_SOCKET; opt_protocol= MYSQL_PROTOCOL_SOCKET;
} }
}
break; break;
break; break;
case '#': case '#':

View File

@@ -784,9 +784,16 @@ get_one_option(const struct my_option *opt, const char *argument,
case 'S': case 'S':
if (filename[0] == '\0') if (filename[0] == '\0')
{ {
/* Socket given on command line, switch protocol to use SOCKETSt */ /*
Socket given on command line, switch protocol to use SOCKETSt
Except on Windows if 'protocol= pipe' has been provided in
the config file or command line.
*/
if (opt_protocol != MYSQL_PROTOCOL_PIPE)
{
opt_protocol= MYSQL_PROTOCOL_SOCKET; opt_protocol= MYSQL_PROTOCOL_SOCKET;
} }
}
break; break;
case '#': case '#':
DBUG_PUSH(argument ? argument : default_dbug_option); DBUG_PUSH(argument ? argument : default_dbug_option);

View File

@@ -12,6 +12,12 @@ Connection: localhost via named pipe
# exec MYSQL --host=localhost -W -e "status" 2>&1 | findstr /c:"Connection:" # exec MYSQL --host=localhost -W -e "status" 2>&1 | findstr /c:"Connection:"
Connection: localhost via named pipe Connection: localhost via named pipe
# exec MYSQL --host=localhost -W --socket=MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:" # exec MYSQL --host=localhost -W --socket=MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:"
Connection: localhost via TCP/IP Connection: localhost via named pipe
# exec MYSQL --host=localhost --socket=MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:" # exec MYSQL --host=localhost --socket=MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:"
Connection: localhost via TCP/IP Connection: localhost via TCP/IP
#
# MDEV-30639: Upgrade to 10.8 and later does not work on Windows
# due to connection protocol overwrite
#
# exec MYSQL --host=localhost --protocol=pipe --socket=MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:"
Connection: localhost via named pipe

View File

@@ -25,3 +25,10 @@
--echo # exec MYSQL --host=localhost --socket=MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:" --echo # exec MYSQL --host=localhost --socket=MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:"
--exec $MYSQL --host=localhost --socket=$MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:" --exec $MYSQL --host=localhost --socket=$MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:"
--echo #
--echo # MDEV-30639: Upgrade to 10.8 and later does not work on Windows
--echo # due to connection protocol overwrite
--echo #
--echo # exec MYSQL --host=localhost --protocol=pipe --socket=MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:"
--exec $MYSQL --host=localhost --protocol=pipe --socket=$MASTER_MYSOCK -e "status" 2>&1 | findstr /c:"Connection:"

View File

@@ -1,18 +1,22 @@
SET @save_frequency=@@GLOBAL.innodb_purge_rseg_truncate_frequency; SET @save_frequency=@@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency=1; SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
CREATE TABLE t1(id INT PRIMARY key, val VARCHAR(16000)) ENGINE=InnoDB; CREATE TABLE t1(id INT PRIMARY key, val VARCHAR(16000))
ENGINE=InnoDB STATS_PERSISTENT=0;
INSERT INTO t1 (id,val) SELECT 2*seq,'x' FROM seq_0_to_1023; INSERT INTO t1 (id,val) SELECT 2*seq,'x' FROM seq_0_to_1023;
connect con1,localhost,root,,; connect con1,localhost,root,,;
InnoDB 0 transactions not purged InnoDB 0 transactions not purged
START TRANSACTION WITH CONSISTENT SNAPSHOT; START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default; connection default;
DELETE FROM t1 WHERE id=1788; DELETE FROM t1 WHERE id=1788;
SET @saved_dbug = @@GLOBAL.debug_dbug;
SET @@GLOBAL.debug_dbug="d,enable_row_purge_del_mark_exit_sync_point";
BEGIN; BEGIN;
SELECT * FROM t1 WHERE id=1788 FOR UPDATE; SELECT * FROM t1 WHERE id=1788 FOR UPDATE;
id val id val
connection con1; connection con1;
COMMIT; COMMIT;
InnoDB 0 transactions not purged SET DEBUG_SYNC = 'now WAIT_FOR row_purge_del_mark_finished';
SET @@GLOBAL.debug_dbug = @saved_dbug;
connection default; connection default;
INSERT INTO t1 (id,val) VALUES (1787, REPEAT('x',2000)); INSERT INTO t1 (id,val) VALUES (1787, REPEAT('x',2000));
connection con1; connection con1;

View File

@@ -1,118 +0,0 @@
Testing tables with large records
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), c VARCHAR(256), KEY SECOND(a, b,c)) ENGINE=INNODB;
INSERT INTO t1 VALUES (1, REPEAT('A', 256), REPEAT('B', 256));
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
SET GLOBAL innodb_fast_shutdown = 0;
# restart
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
select count(*) from t1;
count(*)
927
select count(*) from t1 force index (second);
count(*)
927
# A few more insertions on the page should not cause a page split.
insert into t1 values (81, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (83, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (87, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (82, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (86, REPEAT('A', 256), REPEAT('B', 256));
# Insert more rows to cause a page split
insert into t1 values (180, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (181, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (182, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (183, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (184, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (185, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (186, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (187, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (188, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (189, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (190, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (191, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (192, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (193, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (194, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (195, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (196, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (197, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (198, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (199, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (200, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (201, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (202, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (203, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (204, REPEAT('A', 256), REPEAT('B', 256));
DROP TABLE t1;
Testing table with small records
CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(16), c VARCHAR(32), KEY SECOND(a,b,c)) ENGINE=INNODB;
SET GLOBAL innodb_fast_shutdown = 0;
# restart
optimize table t2;
Table Op Msg_type Msg_text
test.t2 optimize status OK
select count(*) from t2;
count(*)
3701
select count(*) from t2 force index(second);
count(*)
3701
The page should have room for about 20 insertions
insert into t2 values(1181, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1191, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1182, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1192, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1183, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1193, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1184, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1194, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1185, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1195, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1186, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1196, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1187, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1197, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1188, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1198, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1189, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1199, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1190, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1180, REPEAT('A', 16), REPEAT('B',32));
# Insert more rows to cause a page split
insert into t2 values (180, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (181, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (182, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (183, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (184, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (185, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (186, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (187, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (188, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (189, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (190, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (191, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (192, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (193, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (194, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (195, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (196, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (197, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (198, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (199, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (200, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (201, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (202, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (203, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (204, REPEAT('A', 16), REPEAT('B', 32));
DROP TABLE t2;

View File

@@ -14,7 +14,6 @@ WHERE TABLE_NAME LIKE '%infoschema_buffer_test%' AND PAGE_TYPE='index';
POOL_ID BLOCK_ID SPACE PAGE_NUMBER PAGE_TYPE FLUSH_TYPE FIX_COUNT IS_HASHED NEWEST_MODIFICATION OLDEST_MODIFICATION ACCESS_TIME TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE COMPRESSED_SIZE PAGE_STATE IO_FIX IS_OLD FREE_PAGE_CLOCK POOL_ID BLOCK_ID SPACE PAGE_NUMBER PAGE_TYPE FLUSH_TYPE FIX_COUNT IS_HASHED NEWEST_MODIFICATION OLDEST_MODIFICATION ACCESS_TIME TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE COMPRESSED_SIZE PAGE_STATE IO_FIX IS_OLD FREE_PAGE_CLOCK
0 # # 3 INDEX 0 FIX AHI LSN LSN TIME `test`.`infoschema_buffer_test` GEN_CLUST_INDEX 2 58 0 FILE_PAGE IO_FIX OLD # 0 # # 3 INDEX 0 FIX AHI LSN LSN TIME `test`.`infoschema_buffer_test` GEN_CLUST_INDEX 2 58 0 FILE_PAGE IO_FIX OLD #
0 # # 4 INDEX 0 FIX AHI LSN LSN TIME `test`.`infoschema_buffer_test` idx 2 32 0 FILE_PAGE IO_FIX OLD # 0 # # 4 INDEX 0 FIX AHI LSN LSN TIME `test`.`infoschema_buffer_test` idx 2 32 0 FILE_PAGE IO_FIX OLD #
0 # # 5 INDEX 0 FIX AHI LSN LSN TIME `test`.`infoschema_buffer_test` idx 2 32 0 FILE_PAGE IO_FIX OLD #
DROP TABLE infoschema_buffer_test; DROP TABLE infoschema_buffer_test;
SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE
WHERE TABLE_NAME LIKE '%infoschema_buffer_test%'; WHERE TABLE_NAME LIKE '%infoschema_buffer_test%';

View File

@@ -460,3 +460,10 @@ ERROR HY000: Got error 1 "Operation not permitted" during COMMIT
COMMIT; COMMIT;
DROP TABLE t1; DROP TABLE t1;
# End of 10.8 tests # End of 10.8 tests
#
# MDEV-31537 Bulk insert operation aborts the server
# for redundant table
#
CREATE TABLE t (a CHAR CHARACTER SET utf8) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
INSERT t SELECT left(seq,1) FROM seq_1_to_43691;
DROP TABLE t;

View File

@@ -1,11 +1,13 @@
--source include/have_innodb.inc --source include/have_innodb.inc
--source include/have_sequence.inc --source include/have_sequence.inc
--source include/have_debug.inc --source include/have_debug.inc
--source include/have_debug_sync.inc
SET @save_frequency=@@GLOBAL.innodb_purge_rseg_truncate_frequency; SET @save_frequency=@@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency=1; SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
CREATE TABLE t1(id INT PRIMARY key, val VARCHAR(16000)) ENGINE=InnoDB; CREATE TABLE t1(id INT PRIMARY key, val VARCHAR(16000))
ENGINE=InnoDB STATS_PERSISTENT=0;
INSERT INTO t1 (id,val) SELECT 2*seq,'x' FROM seq_0_to_1023; INSERT INTO t1 (id,val) SELECT 2*seq,'x' FROM seq_0_to_1023;
connect(con1,localhost,root,,); connect(con1,localhost,root,,);
@@ -16,13 +18,18 @@ connection default;
DELETE FROM t1 WHERE id=1788; DELETE FROM t1 WHERE id=1788;
SET @saved_dbug = @@GLOBAL.debug_dbug;
SET @@GLOBAL.debug_dbug="d,enable_row_purge_del_mark_exit_sync_point";
BEGIN; BEGIN;
# This will return no result, but should acquire a gap lock. # This will return no result, but should acquire a gap lock.
SELECT * FROM t1 WHERE id=1788 FOR UPDATE; SELECT * FROM t1 WHERE id=1788 FOR UPDATE;
connection con1; connection con1;
COMMIT; COMMIT;
source include/wait_all_purged.inc; SET DEBUG_SYNC = 'now WAIT_FOR row_purge_del_mark_finished';
SET @@GLOBAL.debug_dbug = @saved_dbug;
connection default; connection default;
INSERT INTO t1 (id,val) VALUES (1787, REPEAT('x',2000)); INSERT INTO t1 (id,val) VALUES (1787, REPEAT('x',2000));

View File

@@ -1,4 +0,0 @@
--loose-innodb-buffer-pool-stats
--loose-innodb-buffer-page
--loose-innodb-buffer-page-lru
--innodb-defragment=1

View File

@@ -1,203 +0,0 @@
--source include/have_innodb.inc
--source include/big_test.inc
--source include/not_valgrind.inc
--source include/not_embedded.inc
--source include/have_innodb_16k.inc
--echo Testing tables with large records
# Create table.
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), c VARCHAR(256), KEY SECOND(a, b,c)) ENGINE=INNODB;
# Populate table.
INSERT INTO t1 VALUES (1, REPEAT('A', 256), REPEAT('B', 256));
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
--disable_query_log
let $size = 10;
while ($size)
{
let $j = 100 * $size;
eval delete from t1 where a between $j - 20 and $j + 5;
dec $size;
}
--enable_query_log
SET GLOBAL innodb_fast_shutdown = 0;
--source include/restart_mysqld.inc
optimize table t1;
select count(*) from t1;
--let $primary_before = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'PRIMARY', Value, 1)
select count(*) from t1 force index (second);
--let $second_before = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'second', Value, 1)
--echo # A few more insertions on the page should not cause a page split.
insert into t1 values (81, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (83, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (87, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (82, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (86, REPEAT('A', 256), REPEAT('B', 256));
--let $primary_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'PRIMARY', Value, 1)
--let $second_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'second', Value, 1)
if ($primary_before != $primary_after) {
--echo Insertion caused page split on primary, which should be avoided by innodb_defragment_fill_factor.
}
if ($second_before != $second_after) {
--echo Insertion caused page split on second, which should be avoided by innodb_defragment_fill_factor.
}
--echo # Insert more rows to cause a page split
insert into t1 values (180, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (181, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (182, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (183, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (184, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (185, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (186, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (187, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (188, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (189, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (190, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (191, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (192, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (193, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (194, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (195, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (196, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (197, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (198, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (199, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (200, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (201, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (202, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (203, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (204, REPEAT('A', 256), REPEAT('B', 256));
--let $primary_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'PRIMARY', Value, 1)
--let $second_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'second', Value, 1)
if ($primary_before == $primary_after) {
--echo Too little space is reserved on primary index.
}
if ($second_before == $second_after) {
--echo Too little space is reserved on second index.
}
DROP TABLE t1;
--echo Testing table with small records
CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(16), c VARCHAR(32), KEY SECOND(a,b,c)) ENGINE=INNODB;
# Populate table.
--disable_query_log
INSERT INTO t2 VALUES (1, REPEAT('A', 16), REPEAT('B', 32));
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
--enable_query_log
--disable_query_log
let $size = 40;
while ($size)
{
let $j = 100 * $size;
eval delete from t2 where a between $j - 20 and $j;
dec $size;
}
--enable_query_log
SET GLOBAL innodb_fast_shutdown = 0;
--source include/restart_mysqld.inc
optimize table t2;
select count(*) from t2;
select count(*) from t2 force index(second);
--let $second_before = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t2%' and index_name = 'second', Value, 1)
--echo The page should have room for about 20 insertions
insert into t2 values(1181, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1191, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1182, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1192, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1183, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1193, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1184, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1194, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1185, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1195, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1186, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1196, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1187, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1197, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1188, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1198, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1189, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1199, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1190, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1180, REPEAT('A', 16), REPEAT('B',32));
--let $second_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t2%' and index_name = 'second', Value, 1)
if ($second_before != $second_after) {
--echo Insertion caused page split on second, which should be avoided by innodb_defragment_fill_factor.
}
--echo # Insert more rows to cause a page split
insert into t2 values (180, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (181, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (182, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (183, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (184, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (185, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (186, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (187, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (188, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (189, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (190, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (191, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (192, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (193, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (194, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (195, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (196, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (197, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (198, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (199, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (200, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (201, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (202, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (203, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (204, REPEAT('A', 16), REPEAT('B', 32));
--let $second_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t2%' and index_name = 'second', Value, 1)
if ($second_before == $second_after) {
--echo Too little space is reserved on second index.
}
DROP TABLE t2;

View File

@@ -479,3 +479,11 @@ INSERT INTO t1 VALUES
COMMIT; COMMIT;
DROP TABLE t1; DROP TABLE t1;
--echo # End of 10.8 tests --echo # End of 10.8 tests
--echo #
--echo # MDEV-31537 Bulk insert operation aborts the server
--echo # for redundant table
--echo #
CREATE TABLE t (a CHAR CHARACTER SET utf8) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
INSERT t SELECT left(seq,1) FROM seq_1_to_43691;
DROP TABLE t;

View File

@@ -248,3 +248,17 @@ SELECT NEXTVAL(s);
NEXTVAL(s) NEXTVAL(s)
1 1
DROP SEQUENCE s; DROP SEQUENCE s;
#
# MDEV-31607 ER_DUP_KEY in mysql.table_stats upon REANME on sequence
#
CREATE SEQUENCE s1 ENGINE=InnoDB;
CREATE SEQUENCE s2 ENGINE=InnoDB;
SHOW CREATE SEQUENCE s1;
Table Create Table
s1 CREATE SEQUENCE `s1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB
SHOW CREATE SEQUENCE s2;
Table Create Table
s2 CREATE SEQUENCE `s2` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB
DROP SEQUENCE s2;
RENAME TABLE s1 TO s2;
DROP SEQUENCE s2;

View File

@@ -161,3 +161,15 @@ CREATE SEQUENCE s;
ALTER TABLE s ORDER BY cache_size; ALTER TABLE s ORDER BY cache_size;
SELECT NEXTVAL(s); SELECT NEXTVAL(s);
DROP SEQUENCE s; DROP SEQUENCE s;
--echo #
--echo # MDEV-31607 ER_DUP_KEY in mysql.table_stats upon REANME on sequence
--echo #
CREATE SEQUENCE s1 ENGINE=InnoDB;
CREATE SEQUENCE s2 ENGINE=InnoDB;
SHOW CREATE SEQUENCE s1;
SHOW CREATE SEQUENCE s2;
DROP SEQUENCE s2;
RENAME TABLE s1 TO s2;
DROP SEQUENCE s2;

View File

@@ -4991,8 +4991,6 @@ class btr_est_cur_t
page_id_t m_page_id; page_id_t m_page_id;
/** Current block */ /** Current block */
buf_block_t *m_block; buf_block_t *m_block;
/** mtr savepoint of the current block */
ulint m_savepoint;
/** Page search mode, can differ from m_mode for non-leaf pages, see c-tor /** Page search mode, can differ from m_mode for non-leaf pages, see c-tor
comments for details */ comments for details */
page_cur_mode_t m_page_mode; page_cur_mode_t m_page_mode;
@@ -5051,7 +5049,6 @@ public:
bool fetch_child(ulint level, mtr_t &mtr, const buf_block_t *right_parent) bool fetch_child(ulint level, mtr_t &mtr, const buf_block_t *right_parent)
{ {
buf_block_t *parent_block= m_block; buf_block_t *parent_block= m_block;
ulint parent_savepoint= m_savepoint;
m_block= btr_block_get(*index(), m_page_id.page_no(), RW_S_LATCH, !level, m_block= btr_block_get(*index(), m_page_id.page_no(), RW_S_LATCH, !level,
&mtr, nullptr); &mtr, nullptr);
@@ -5059,9 +5056,10 @@ public:
return false; return false;
if (parent_block && parent_block != right_parent) if (parent_block && parent_block != right_parent)
mtr.rollback_to_savepoint(parent_savepoint, parent_savepoint + 1); {
ut_ad(mtr.get_savepoint() >= 2);
m_savepoint= mtr.get_savepoint() - 1; mtr.rollback_to_savepoint(1, 2);
}
return level == ULINT_UNDEFINED || return level == ULINT_UNDEFINED ||
btr_page_get_level(m_block->page.frame) == level; btr_page_get_level(m_block->page.frame) == level;
@@ -5123,10 +5121,10 @@ public:
return true; return true;
} }
/** Gets page id of the current record child. /** Read page id of the current record child.
@param offsets offsets array. @param offsets offsets array.
@param heap heap for offsets array */ @param heap heap for offsets array */
void get_child(rec_offs **offsets, mem_heap_t **heap) void read_child_page_id(rec_offs **offsets, mem_heap_t **heap)
{ {
const rec_t *node_ptr= page_cur_get_rec(&m_page_cur); const rec_t *node_ptr= page_cur_get_rec(&m_page_cur);
@@ -5196,11 +5194,7 @@ public:
/** Copies block pointer and savepoint from another btr_est_cur_t in the case /** Copies block pointer and savepoint from another btr_est_cur_t in the case
if both left and right border cursors point to the same block. if both left and right border cursors point to the same block.
@param o reference to the other btr_est_cur_t object. */ @param o reference to the other btr_est_cur_t object. */
void set_block(const btr_est_cur_t &o) void set_block(const btr_est_cur_t &o) { m_block= o.m_block; }
{
m_block= o.m_block;
m_savepoint= o.m_savepoint;
}
/** @return current record number. */ /** @return current record number. */
ulint nth_rec() const { return m_nth_rec; } ulint nth_rec() const { return m_nth_rec; }
@@ -5239,7 +5233,6 @@ static ha_rows btr_estimate_n_rows_in_range_on_level(
pages before reaching right_page_no, then we estimate the average from the pages before reaching right_page_no, then we estimate the average from the
pages scanned so far. */ pages scanned so far. */
static constexpr uint n_pages_read_limit= 9; static constexpr uint n_pages_read_limit= 9;
ulint savepoint= 0;
buf_block_t *block= nullptr; buf_block_t *block= nullptr;
const dict_index_t *index= left_cur.index(); const dict_index_t *index= left_cur.index();
@@ -5269,9 +5262,6 @@ static ha_rows btr_estimate_n_rows_in_range_on_level(
{ {
page_t *page; page_t *page;
buf_block_t *prev_block= block; buf_block_t *prev_block= block;
ulint prev_savepoint= savepoint;
savepoint= mtr.get_savepoint();
/* Fetch the page. */ /* Fetch the page. */
block= btr_block_get(*index, page_id.page_no(), RW_S_LATCH, !level, &mtr, block= btr_block_get(*index, page_id.page_no(), RW_S_LATCH, !level, &mtr,
@@ -5279,9 +5269,11 @@ static ha_rows btr_estimate_n_rows_in_range_on_level(
if (prev_block) if (prev_block)
{ {
mtr.rollback_to_savepoint(prev_savepoint, prev_savepoint + 1); ulint savepoint = mtr.get_savepoint();
if (block) /* Index s-lock, p1, p2 latches, can also be p1 and p2 parent latch if
savepoint--; they are not diverged */
ut_ad(savepoint >= 3);
mtr.rollback_to_savepoint(savepoint - 2, savepoint - 1);
} }
if (!block || btr_page_get_level(buf_block_get_frame(block)) != level) if (!block || btr_page_get_level(buf_block_get_frame(block)) != level)
@@ -5312,8 +5304,8 @@ static ha_rows btr_estimate_n_rows_in_range_on_level(
if (block) if (block)
{ {
ut_ad(block == mtr.at_savepoint(savepoint)); ut_ad(block == mtr.at_savepoint(mtr.get_savepoint() - 1));
mtr.rollback_to_savepoint(savepoint, savepoint + 1); mtr.rollback_to_savepoint(mtr.get_savepoint() - 1);
} }
return (n_rows); return (n_rows);
@@ -5322,8 +5314,8 @@ inexact:
if (block) if (block)
{ {
ut_ad(block == mtr.at_savepoint(savepoint)); ut_ad(block == mtr.at_savepoint(mtr.get_savepoint() - 1));
mtr.rollback_to_savepoint(savepoint, savepoint + 1); mtr.rollback_to_savepoint(mtr.get_savepoint() - 1);
} }
is_n_rows_exact= false; is_n_rows_exact= false;
@@ -5517,8 +5509,12 @@ search_loop:
{ {
ut_ad(height > 0); ut_ad(height > 0);
height--; height--;
p1.get_child(&offsets, &heap); ut_ad(mtr.memo_contains(p1.index()->lock, MTR_MEMO_S_LOCK));
p2.get_child(&offsets, &heap); ut_ad(mtr.memo_contains_flagged(p1.block(), MTR_MEMO_PAGE_S_FIX));
p1.read_child_page_id(&offsets, &heap);
ut_ad(mtr.memo_contains(p2.index()->lock, MTR_MEMO_S_LOCK));
ut_ad(mtr.memo_contains_flagged(p2.block(), MTR_MEMO_PAGE_S_FIX));
p2.read_child_page_id(&offsets, &heap);
goto search_loop; goto search_loop;
} }

View File

@@ -2118,14 +2118,13 @@ static bool ha_validate(const hash_table_t *table,
} }
/** Validates the search system for given hash table. /** Validates the search system for given hash table.
@param[in] hash_table_id hash table to validate @param thd connection, for checking if CHECK TABLE has been killed
@return TRUE if ok */ @param hash_table_id hash table to validate
static @return true if ok */
ibool static bool btr_search_hash_table_validate(THD *thd, ulint hash_table_id)
btr_search_hash_table_validate(ulint hash_table_id)
{ {
ha_node_t* node; ha_node_t* node;
ibool ok = TRUE; bool ok = true;
ulint i; ulint i;
ulint cell_count; ulint cell_count;
mem_heap_t* heap = NULL; mem_heap_t* heap = NULL;
@@ -2133,9 +2132,15 @@ btr_search_hash_table_validate(ulint hash_table_id)
rec_offs* offsets = offsets_; rec_offs* offsets = offsets_;
btr_search_x_lock_all(); btr_search_x_lock_all();
if (!btr_search_enabled) { if (!btr_search_enabled || (thd && thd_kill_level(thd))) {
func_exit:
btr_search_x_unlock_all(); btr_search_x_unlock_all();
return(TRUE);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
return ok;
} }
/* How many cells to check before temporarily releasing /* How many cells to check before temporarily releasing
@@ -2162,8 +2167,8 @@ btr_search_hash_table_validate(ulint hash_table_id)
btr_search_x_lock_all(); btr_search_x_lock_all();
if (!btr_search_enabled) { if (!btr_search_enabled
ok = true; || (thd && thd_kill_level(thd))) {
goto func_exit; goto func_exit;
} }
@@ -2270,8 +2275,8 @@ state_ok:
btr_search_x_lock_all(); btr_search_x_lock_all();
if (!btr_search_enabled) { if (!btr_search_enabled
ok = true; || (thd && thd_kill_level(thd))) {
goto func_exit; goto func_exit;
} }
@@ -2292,33 +2297,23 @@ state_ok:
ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1); ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1);
if (!ha_validate(&part.table, i, end_index)) { if (!ha_validate(&part.table, i, end_index)) {
ok = FALSE; ok = false;
} }
} }
mysql_mutex_unlock(&buf_pool.mutex); mysql_mutex_unlock(&buf_pool.mutex);
func_exit: goto func_exit;
btr_search_x_unlock_all();
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
return(ok);
} }
/** Validate the search system. /** Validates the search system.
@return true if ok. */ @param thd connection, for checking if CHECK TABLE has been killed
bool @return true if ok */
btr_search_validate() bool btr_search_validate(THD *thd)
{ {
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i= 0; i < btr_ahi_parts; ++i)
if (!btr_search_hash_table_validate(i)) { if (!btr_search_hash_table_validate(thd, i))
return(false); return(false);
} return true;
}
return(true);
} }
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG

View File

@@ -1864,8 +1864,7 @@ inline void log_t::write_checkpoint(lsn_t end_lsn) noexcept
ib::info() << "Resized log to " << ib::bytes_iec{resizing_completed} ib::info() << "Resized log to " << ib::bytes_iec{resizing_completed}
<< "; start LSN=" << resizing; << "; start LSN=" << resizing;
else else
sql_print_error("InnoDB: Resize of log failed at " LSN_PF, buf_flush_ahead(end_lsn + 1, false);
get_flushed_lsn());
} }
/** Initiate a log checkpoint, discarding the start of the log. /** Initiate a log checkpoint, discarding the start of the log.

View File

@@ -13674,13 +13674,12 @@ err_exit:
} }
if (!table->no_rollback()) if (!table->no_rollback())
{
err= trx->drop_table_foreign(table->name); err= trx->drop_table_foreign(table->name);
if (err == DB_SUCCESS && table_stats && index_stats) if (err == DB_SUCCESS && table_stats && index_stats)
err= trx->drop_table_statistics(table->name); err= trx->drop_table_statistics(table->name);
if (err != DB_SUCCESS) if (err != DB_SUCCESS)
goto err_exit; goto err_exit;
}
err= trx->drop_table(*table); err= trx->drop_table(*table);
if (err != DB_SUCCESS) if (err != DB_SUCCESS)
@@ -15342,7 +15341,8 @@ ha_innobase::check(
/* We validate the whole adaptive hash index for all tables /* We validate the whole adaptive hash index for all tables
at every CHECK TABLE only when QUICK flag is not present. */ at every CHECK TABLE only when QUICK flag is not present. */
if (!(check_opt->flags & T_QUICK) && !btr_search_validate()) { if (!(check_opt->flags & T_QUICK)
&& !btr_search_validate(m_prebuilt->trx->mysql_thd)) {
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_NOT_KEYFILE, ER_NOT_KEYFILE,
"InnoDB: The adaptive hash index is corrupted."); "InnoDB: The adaptive hash index is corrupted.");

View File

@@ -3882,7 +3882,7 @@ i_s_innodb_buffer_page_get_info(
page_info->state = bpage->state(); page_info->state = bpage->state();
if (page_info->state < buf_page_t::FREED) { if (page_info->state < buf_page_t::UNFIXED) {
page_info->page_type = I_S_PAGE_TYPE_UNKNOWN; page_info->page_type = I_S_PAGE_TYPE_UNKNOWN;
page_info->compressed_only = false; page_info->compressed_only = false;
} else { } else {

View File

@@ -127,8 +127,9 @@ void btr_search_update_hash_on_insert(btr_cur_t *cursor,
void btr_search_update_hash_on_delete(btr_cur_t *cursor); void btr_search_update_hash_on_delete(btr_cur_t *cursor);
/** Validates the search system. /** Validates the search system.
@param thd connection, for checking if CHECK TABLE has been killed
@return true if ok */ @return true if ok */
bool btr_search_validate(); bool btr_search_validate(THD *thd);
/** Lock all search latches in exclusive mode. */ /** Lock all search latches in exclusive mode. */
static inline void btr_search_x_lock_all(); static inline void btr_search_x_lock_all();

View File

@@ -499,7 +499,14 @@ log_t::resize_start_status log_t::resize_start(os_offset_t size) noexcept
log_resize_release(); log_resize_release();
if (start_lsn) if (start_lsn)
{
mysql_mutex_lock(&buf_pool.flush_list_mutex);
lsn_t target_lsn= buf_pool.get_oldest_modification(0);
if (start_lsn < target_lsn)
start_lsn= target_lsn + 1;
mysql_mutex_unlock(&buf_pool.flush_list_mutex);
buf_flush_ahead(start_lsn, false); buf_flush_ahead(start_lsn, false);
}
return status; return status;
} }

View File

@@ -479,6 +479,13 @@ static ulint row_merge_bulk_buf_add(row_merge_buf_t* buf,
ulint fixed_len= ifield->fixed_len; ulint fixed_len= ifield->fixed_len;
/* CHAR in ROW_FORMAT=REDUNDANT is always
fixed-length, but in the temporary file it is
variable-length for variable-length character sets. */
if (fixed_len && !index->table->not_redundant() &&
col->mbminlen != col->mbmaxlen)
fixed_len= 0;
if (fixed_len); if (fixed_len);
else if (len < 128 || (!DATA_BIG_COL(col))) else if (len < 128 || (!DATA_BIG_COL(col)))
extra_size++; extra_size++;

View File

@@ -12,6 +12,10 @@
--source include/default_optimizer_switch.inc --source include/default_optimizer_switch.inc
set
@rgmm_hist_type=@@histogram_type,
histogram_type=double_prec_hb;
--disable_warnings --disable_warnings
drop table if exists t1; drop table if exists t1;
--enable_warnings --enable_warnings
@@ -1436,3 +1440,5 @@ SHOW SESSION STATUS LIKE 'Handler_read%';
DROP TABLE t1; DROP TABLE t1;
--echo # End of test for Bug#18109609 --echo # End of test for Bug#18109609
set histogram_type=@rgmm_hist_type;

View File

@@ -1,5 +1,8 @@
set @debug_tmp= @@debug_dbug; set @debug_tmp= @@debug_dbug;
set global debug_dbug="+d,force_group_by"; set global debug_dbug="+d,force_group_by";
set
@rgmm_hist_type=@@histogram_type,
histogram_type=double_prec_hb;
drop table if exists t1; drop table if exists t1;
create table t1 ( create table t1 (
a1 char(64), a2 char(64), b char(16), c char(16) not null, d char(16), dummy char(248) default ' ' a1 char(64), a2 char(64), b char(16), c char(16) not null, d char(16), dummy char(248) default ' '
@@ -3523,4 +3526,5 @@ Handler_read_rnd_deleted 0
Handler_read_rnd_next 0 Handler_read_rnd_next 0
DROP TABLE t1; DROP TABLE t1;
# End of test for Bug#18109609 # End of test for Bug#18109609
set histogram_type=@rgmm_hist_type;
set global debug_dbug=@debug_tmp; set global debug_dbug=@debug_tmp;