mirror of
https://github.com/MariaDB/server.git
synced 2025-05-29 21:42:28 +03:00
- InnoDB DDL results in `Duplicate entry' if concurrent DML throws duplicate key error. The following scenario explains the problem connection con1: ALTER TABLE t1 FORCE; connection con2: INSERT INTO t1(pk, uk) VALUES (2, 2), (3, 2); In connection con2, InnoDB throws the 'DUPLICATE KEY' error because of unique index. Alter operation will throw the error when applying the concurrent DML log. - Inserting the duplicate key for unique index logs the insert operation for online ALTER TABLE. When insertion fails, transaction does rollback and it leads to logging of delete operation for online ALTER TABLE. While applying the insert log entries, alter operation encounters 'DUPLICATE KEY' error. - To avoid the above fake duplicate scenario, InnoDB should not write any log for online ALTER TABLE before DML transaction commit. - User thread which does DML can apply the online log if InnoDB ran out of online log and index is marked as completed. Set online log error if apply phase encountered any error. It can also clear all other indexes log, marks the newly added indexes as corrupted. - Removed the old online code which was a part of DML operations commit_inplace_alter_table() : Does apply the online log for the last batch of secondary index log and does frees the log for the completed index. trx_t::apply_online_log: Set to true while writing the undo log if the modified table has active DDL trx_t::apply_log(): Apply the DML changes to online DDL tables dict_table_t::is_active_ddl(): Returns true if the table has an active DDL dict_index_t::online_log_make_dummy(): Assign dummy value for clustered index online log to indicate the secondary indexes are being rebuild. dict_index_t::online_log_is_dummy(): Check whether the online log has dummy value ha_innobase_inplace_ctx::log_failure(): Handle the apply log failure for online DDL transaction row_log_mark_other_online_index_abort(): Clear out all other online index log after encountering the error during row_log_apply() row_log_get_error(): Get the error happened during row_log_apply() row_log_online_op(): Does apply the online log if index is completed and ran out of memory. Returns false if apply log fails UndorecApplier: Introduced a class to maintain the undo log record, latched undo buffer page, parse the undo log record, maintain the undo record type, info bits and update vector UndorecApplier::get_old_rec(): Get the correct version of the clustered index record that was modified by the current undo log record UndorecApplier::clear_undo_rec(): Clear the undo log related information after applying the undo log record UndorecApplier::log_update(): Handle the update, delete undo log and apply it on online indexes UndorecApplier::log_insert(): Handle the insert undo log and apply it on online indexes UndorecApplier::is_same(): Check whether the given roll pointer is generated by the current undo log record information trx_t::rollback_low(): Set apply_online_log for the transaction after partially rollbacked transaction has any active DDL prepare_inplace_alter_table_dict(): After allocating the online log, InnoDB does create fulltext common tables. Fulltext index doesn't allow the index to be online. So removed the dead code of online log removal Thanks to Marko Mäkelä for providing the initial prototype and Matthias Leich for testing the issue patiently.
175 lines
5.5 KiB
Plaintext
175 lines
5.5 KiB
Plaintext
-- source include/have_debug.inc
|
|
-- source include/have_innodb.inc
|
|
-- source include/count_sessions.inc
|
|
-- source include/have_debug_sync.inc
|
|
# This test is slow on buildbot.
|
|
--source include/big_test.inc
|
|
|
|
let $MYSQLD_DATADIR= `select @@datadir`;
|
|
|
|
#
|
|
# Test for BUG# 12739098, check whether trx->error_status is reset on error.
|
|
#
|
|
CREATE TABLE t1(c1 INT NOT NULL, c2 INT, PRIMARY KEY(c1)) Engine=InnoDB;
|
|
SHOW CREATE TABLE t1;
|
|
INSERT INTO t1 VALUES (1,1),(2,2),(3,3),(4,4),(5,5);
|
|
|
|
SET @saved_debug_dbug = @@SESSION.debug_dbug;
|
|
SET DEBUG_DBUG='+d,ib_build_indexes_too_many_concurrent_trxs, ib_rename_indexes_too_many_concurrent_trxs, ib_drop_index_too_many_concurrent_trxs';
|
|
--error ER_TOO_MANY_CONCURRENT_TRXS
|
|
ALTER TABLE t1 ADD UNIQUE INDEX(c2);
|
|
SET DEBUG_DBUG = @saved_debug_dbug;
|
|
|
|
SHOW CREATE TABLE t1;
|
|
DROP TABLE t1;
|
|
|
|
#
|
|
# Test for Bug#13861218 Records are not fully sorted during index creation
|
|
#
|
|
CREATE TABLE bug13861218 (c1 INT NOT NULL, c2 INT NOT NULL, INDEX(c2))
|
|
ENGINE=InnoDB;
|
|
INSERT INTO bug13861218 VALUES (8, 0), (4, 0), (0, 0);
|
|
SET DEBUG_DBUG = '+d,ib_row_merge_buf_add_two';
|
|
# Force creation of a PRIMARY KEY on c1 to see what happens on the index(c2).
|
|
# No crash here, because n_uniq for c2 includes the clustered index fields
|
|
CREATE UNIQUE INDEX ui ON bug13861218(c1);
|
|
SET DEBUG_DBUG = @saved_debug_dbug;
|
|
DROP TABLE bug13861218;
|
|
|
|
CREATE TABLE bug13861218 (c1 INT NOT NULL, c2 INT UNIQUE) ENGINE=InnoDB;
|
|
INSERT INTO bug13861218 VALUES (8, NULL), (4, NULL), (0, NULL);
|
|
SET DEBUG_DBUG = '+d,ib_row_merge_buf_add_two';
|
|
# Force creation of a PRIMARY KEY on c1 to see what happens on the index(c2).
|
|
# assertion failure: ut_ad(cmp_dtuple_rec(dtuple, rec, rec_offsets) > 0)
|
|
CREATE UNIQUE INDEX ui ON bug13861218(c1);
|
|
SET DEBUG_DBUG = @saved_debug_dbug;
|
|
DROP TABLE bug13861218;
|
|
|
|
--echo #
|
|
--echo # Bug #17657223 EXCESSIVE TEMPORARY FILE USAGE IN ALTER TABLE
|
|
--echo #
|
|
|
|
# Error during file creation in alter operation
|
|
create table t480(a serial)engine=innodb;
|
|
insert into t480
|
|
values(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
|
|
(),(),(),(),(),(),(),();
|
|
insert into t480 select 0 from t480;
|
|
insert into t480 select 0 from t480;
|
|
insert into t480 select 0 from t480;
|
|
insert into t480 select 0 from t480;
|
|
|
|
# Error during file write in alter operation.
|
|
create table t1(f1 int auto_increment not null,
|
|
f2 char(200) not null, f3 char(200) not null,
|
|
primary key(f1,f2,f3), key(f1))engine=innodb;
|
|
insert into t1 select NULL,'aaa','bbb' from t480;
|
|
insert into t1 select NULL,'aaaa','bbbb' from t480;
|
|
insert into t1 select NULL,'aaaaa','bbbbb' from t480;
|
|
insert into t1 select NULL,'aaaaaa','bbbbbb' from t480;
|
|
SET DEBUG_DBUG = '+d,row_merge_write_failure';
|
|
--error ER_TEMP_FILE_WRITE_FAILURE
|
|
alter table t1 drop primary key,add primary key(f2,f1);
|
|
SET DEBUG_DBUG = @saved_debug_dbug;
|
|
drop table t1;
|
|
|
|
# Optimize table via inplace algorithm
|
|
connect (con1,localhost,root);
|
|
create table t1(k1 int auto_increment primary key,
|
|
k2 char(200),k3 char(200))engine=innodb;
|
|
insert into t1 values(NULL,'a','b'), (NULL,'aa','bb');
|
|
SET DEBUG_SYNC= 'row_merge_after_scan
|
|
SIGNAL opened WAIT_FOR flushed';
|
|
send optimize table t1;
|
|
connection default;
|
|
SET DEBUG_SYNC= 'now WAIT_FOR opened';
|
|
INSERT INTO t1 select NULL,'aaa','bbb' from t480;
|
|
SET DEBUG_SYNC= 'now SIGNAL flushed';
|
|
connection con1;
|
|
--enable_info
|
|
--echo /*con1 reap*/ Optimize table t1;
|
|
reap;
|
|
--disable_info
|
|
SELECT COUNT(k1),k2,k3 FROM t1 GROUP BY k2,k3;
|
|
drop table t1;
|
|
|
|
# Log file creation failure.
|
|
create table t1(k1 int auto_increment primary key,
|
|
k2 char(200),k3 char(200))engine=innodb;
|
|
INSERT INTO t1 VALUES(1, "test", "test");
|
|
SET DEBUG_SYNC= 'row_merge_after_scan
|
|
SIGNAL opened WAIT_FOR flushed';
|
|
send ALTER TABLE t1 FORCE, ADD COLUMN k4 int;
|
|
connection default;
|
|
SET DEBUG_SYNC= 'now WAIT_FOR opened';
|
|
SET debug = '+d,row_log_tmpfile_fail';
|
|
INSERT INTO t1 select NULL,'aaa','bbb' from t480;
|
|
INSERT INTO t1 select NULL,'aaaa','bbbb' from t480;
|
|
SET DEBUG_SYNC= 'now SIGNAL flushed';
|
|
SET DEBUG_DBUG = @saved_debug_dbug;
|
|
connection con1;
|
|
--echo /*con1 reap*/ ALTER TABLE t1 ADD COLUMN k4 int;
|
|
--error ER_OUT_OF_RESOURCES
|
|
reap;
|
|
SELECT COUNT(k1),k2,k3 FROM t1 GROUP BY k2,k3;
|
|
disconnect con1;
|
|
connection default;
|
|
show create table t1;
|
|
drop table t1;
|
|
drop table t480;
|
|
--echo #
|
|
--echo # MDEV-12827 Assertion failure when reporting duplicate key error
|
|
--echo # in online table rebuild
|
|
--echo #
|
|
|
|
CREATE TABLE t1 (j INT UNIQUE, i INT) ENGINE=InnoDB;
|
|
INSERT INTO t1 VALUES(2, 2);
|
|
--connect (con1,localhost,root,,test)
|
|
SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL built WAIT_FOR log';
|
|
--send
|
|
ALTER TABLE t1 DROP j, ADD UNIQUE INDEX(i), FORCE;
|
|
|
|
--connection default
|
|
SET DEBUG_SYNC='now WAIT_FOR built';
|
|
SET DEBUG_DBUG='+d,row_ins_row_level';
|
|
INSERT INTO t1 (i) VALUES (0),(0);
|
|
SET DEBUG_SYNC='now SIGNAL log';
|
|
SET DEBUG_DBUG=@saved_debug_dbug;
|
|
|
|
--connection con1
|
|
--error ER_DUP_ENTRY
|
|
reap;
|
|
DELETE FROM t1;
|
|
ALTER TABLE t1 ADD UNIQUE INDEX(i);
|
|
SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL built2 WAIT_FOR log2';
|
|
--send
|
|
ALTER TABLE t1 DROP j, FORCE;
|
|
|
|
--connection default
|
|
SET DEBUG_SYNC='now WAIT_FOR built2';
|
|
INSERT INTO t1 (i) VALUES (0),(1);
|
|
--error ER_DUP_ENTRY
|
|
UPDATE t1 SET i=0;
|
|
SET DEBUG_SYNC='now SIGNAL log2';
|
|
|
|
--connection con1
|
|
reap;
|
|
--disconnect con1
|
|
--connection default
|
|
SET DEBUG_SYNC='RESET';
|
|
DROP TABLE t1;
|
|
|
|
SET DEBUG_SYNC='RESET';
|
|
--source include/wait_until_count_sessions.inc
|
|
|
|
--echo #
|
|
--echo # BUG#21612714 ALTER TABLE SORTING SKIPPED WHEN CHANGE PK AND DROP
|
|
--echo # LAST COLUMN OF OLD PK
|
|
--echo #
|
|
|
|
SET DEBUG_DBUG = '+d,innodb_alter_table_pk_assert_no_sort';
|
|
|
|
--source suite/innodb/include/alter_table_pk_no_sort.inc
|
|
|
|
SET DEBUG_DBUG = @saved_debug_dbug;
|