mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
MDEV-12827 Assertion failure when reporting duplicate key error in online table rebuild
row_log_table_apply_insert_low(), row_log_table_apply_update(): When reporting the error_key_num, only count the clustered index if it corresponds to a key in the SQL layer. The assertion failure was probably introduced by the (incomplete) MySQL 5.6.28 bug fix Bug #21364096 THE BOGUS DUPLICATE KEY ERROR IN ONLINE DDL WITH INCORRECT KEY NAME which we are improving. Side note: the fix was incorrectly merged to MySQL 5.7.10; incorrect key names will continue to be reported in MySQL 5.7.
This commit is contained in:
@ -67,3 +67,25 @@ alter table t1 force, algorithm=inplace;
|
||||
ERROR HY000: Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space
|
||||
SET DEBUG_DBUG = @saved_debug_dbug;
|
||||
drop table t1, t480;
|
||||
#
|
||||
# MDEV-12827 Assertion failure when reporting duplicate key error
|
||||
# in online table rebuild
|
||||
#
|
||||
CREATE TABLE t1 (j INT UNIQUE, i INT UNIQUE) ENGINE=InnoDB;
|
||||
SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL built WAIT_FOR log';
|
||||
ALTER TABLE t1 DROP j, FORCE;
|
||||
SET DEBUG_SYNC='now WAIT_FOR built';
|
||||
INSERT INTO t1 (i) VALUES (0),(0);
|
||||
ERROR 23000: Duplicate entry '0' for key 'i'
|
||||
SET DEBUG_SYNC='now SIGNAL log';
|
||||
ERROR 23000: Duplicate entry '0' for key 'i'
|
||||
SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL built2 WAIT_FOR log2';
|
||||
ALTER TABLE t1 DROP j, FORCE;
|
||||
SET DEBUG_SYNC='now WAIT_FOR built2';
|
||||
INSERT INTO t1 (i) VALUES (0),(1);
|
||||
UPDATE t1 SET i=0;
|
||||
ERROR 23000: Duplicate entry '0' for key 'i'
|
||||
SET DEBUG_SYNC='now SIGNAL log2';
|
||||
ERROR 23000: Duplicate entry '0' for key 'i'
|
||||
SET DEBUG_SYNC='RESET';
|
||||
DROP TABLE t1;
|
||||
|
@ -1,7 +1,6 @@
|
||||
-- source include/have_debug.inc
|
||||
-- source include/have_innodb.inc
|
||||
|
||||
let $MYSQLD_DATADIR= `select @@datadir`;
|
||||
-- source include/have_debug_sync.inc
|
||||
|
||||
let $per_table=`select @@innodb_file_per_table`;
|
||||
let $format=`select @@innodb_file_format`;
|
||||
@ -82,3 +81,42 @@ SET DEBUG_DBUG = '+d,innobase_tmpfile_creation_failure';
|
||||
alter table t1 force, algorithm=inplace;
|
||||
SET DEBUG_DBUG = @saved_debug_dbug;
|
||||
drop table t1, t480;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-12827 Assertion failure when reporting duplicate key error
|
||||
--echo # in online table rebuild
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (j INT UNIQUE, i INT UNIQUE) ENGINE=InnoDB;
|
||||
--connect (con1,localhost,root,,test)
|
||||
SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL built WAIT_FOR log';
|
||||
--send
|
||||
ALTER TABLE t1 DROP j, FORCE;
|
||||
|
||||
--connection default
|
||||
SET DEBUG_SYNC='now WAIT_FOR built';
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 (i) VALUES (0),(0);
|
||||
SET DEBUG_SYNC='now SIGNAL log';
|
||||
|
||||
--connection con1
|
||||
--error ER_DUP_ENTRY
|
||||
reap;
|
||||
SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL built2 WAIT_FOR log2';
|
||||
--send
|
||||
ALTER TABLE t1 DROP j, FORCE;
|
||||
|
||||
--connection default
|
||||
SET DEBUG_SYNC='now WAIT_FOR built2';
|
||||
INSERT INTO t1 (i) VALUES (0),(1);
|
||||
--error ER_DUP_ENTRY
|
||||
UPDATE t1 SET i=0;
|
||||
SET DEBUG_SYNC='now SIGNAL log2';
|
||||
|
||||
--connection con1
|
||||
--error ER_DUP_ENTRY
|
||||
reap;
|
||||
--disconnect con1
|
||||
--connection default
|
||||
SET DEBUG_SYNC='RESET';
|
||||
DROP TABLE t1;
|
||||
|
@ -1520,13 +1520,10 @@ row_log_table_apply_insert_low(
|
||||
return(error);
|
||||
}
|
||||
|
||||
do {
|
||||
n_index++;
|
||||
|
||||
if (!(index = dict_table_get_next_index(index))) {
|
||||
break;
|
||||
}
|
||||
ut_ad(dict_index_is_clust(index));
|
||||
|
||||
for (n_index += index->type != DICT_CLUSTERED;
|
||||
(index = dict_table_get_next_index(index)); n_index++) {
|
||||
if (index->type & DICT_FTS) {
|
||||
continue;
|
||||
}
|
||||
@ -1536,12 +1533,13 @@ row_log_table_apply_insert_low(
|
||||
flags, BTR_MODIFY_TREE,
|
||||
index, offsets_heap, heap, entry, trx_id, thr);
|
||||
|
||||
/* Report correct index name for duplicate key error. */
|
||||
if (error == DB_DUPLICATE_KEY) {
|
||||
thr_get_trx(thr)->error_key_num = n_index;
|
||||
if (error != DB_SUCCESS) {
|
||||
if (error == DB_DUPLICATE_KEY) {
|
||||
thr_get_trx(thr)->error_key_num = n_index;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
} while (error == DB_SUCCESS);
|
||||
}
|
||||
|
||||
return(error);
|
||||
}
|
||||
@ -2120,17 +2118,16 @@ func_exit_committed:
|
||||
dtuple_big_rec_free(big_rec);
|
||||
}
|
||||
|
||||
while ((index = dict_table_get_next_index(index)) != NULL) {
|
||||
if (error != DB_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
|
||||
n_index++;
|
||||
|
||||
for (n_index += index->type != DICT_CLUSTERED;
|
||||
(index = dict_table_get_next_index(index)); n_index++) {
|
||||
if (index->type & DICT_FTS) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (error != DB_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (!row_upd_changes_ord_field_binary(
|
||||
index, update, thr, old_row, NULL)) {
|
||||
continue;
|
||||
|
@ -1520,13 +1520,10 @@ row_log_table_apply_insert_low(
|
||||
return(error);
|
||||
}
|
||||
|
||||
do {
|
||||
n_index++;
|
||||
|
||||
if (!(index = dict_table_get_next_index(index))) {
|
||||
break;
|
||||
}
|
||||
ut_ad(dict_index_is_clust(index));
|
||||
|
||||
for (n_index += index->type != DICT_CLUSTERED;
|
||||
(index = dict_table_get_next_index(index)); n_index++) {
|
||||
if (index->type & DICT_FTS) {
|
||||
continue;
|
||||
}
|
||||
@ -1536,12 +1533,13 @@ row_log_table_apply_insert_low(
|
||||
flags, BTR_MODIFY_TREE,
|
||||
index, offsets_heap, heap, entry, trx_id, thr);
|
||||
|
||||
/* Report correct index name for duplicate key error. */
|
||||
if (error == DB_DUPLICATE_KEY) {
|
||||
thr_get_trx(thr)->error_key_num = n_index;
|
||||
if (error != DB_SUCCESS) {
|
||||
if (error == DB_DUPLICATE_KEY) {
|
||||
thr_get_trx(thr)->error_key_num = n_index;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
} while (error == DB_SUCCESS);
|
||||
}
|
||||
|
||||
return(error);
|
||||
}
|
||||
@ -2120,17 +2118,16 @@ func_exit_committed:
|
||||
dtuple_big_rec_free(big_rec);
|
||||
}
|
||||
|
||||
while ((index = dict_table_get_next_index(index)) != NULL) {
|
||||
if (error != DB_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
|
||||
n_index++;
|
||||
|
||||
for (n_index += index->type != DICT_CLUSTERED;
|
||||
(index = dict_table_get_next_index(index)); n_index++) {
|
||||
if (index->type & DICT_FTS) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (error != DB_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (!row_upd_changes_ord_field_binary(
|
||||
index, update, thr, old_row, NULL)) {
|
||||
continue;
|
||||
|
Reference in New Issue
Block a user