From 50de7d13036c7bacbaf460bfcaa77cfbe1ad4123 Mon Sep 17 00:00:00 2001 From: Monty Date: Thu, 18 Jan 2018 01:41:52 +0200 Subject: [PATCH 01/17] Fixed MDEV-14326 engine ARIA with row_format=FIXED is broken The problem was that max_size was acciently set to 1 in some cases. Other things: - Adjust max_rows if min_rows > max_rows. - Removed not used variable varchar_length - Adjusted max_pack_length (safety fix) --- mysql-test/suite/maria/max_length.result | 105 +++++++++++++++++++++++ mysql-test/suite/maria/max_length.test | 72 ++++++++++++++++ storage/maria/ma_create.c | 15 ++-- 3 files changed, 184 insertions(+), 8 deletions(-) diff --git a/mysql-test/suite/maria/max_length.result b/mysql-test/suite/maria/max_length.result index 049b92eafe5..177810f32a3 100644 --- a/mysql-test/suite/maria/max_length.result +++ b/mysql-test/suite/maria/max_length.result @@ -54,3 +54,108 @@ Table Op Msg_type Msg_text test.t1 check warning Datafile is almost full, 268230656 of 268320768 used test.t1 check status OK drop table t1,t2; +create table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=FIXED min_rows=1000000; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` int(10) unsigned DEFAULT NULL, + `c2` char(80) DEFAULT NULL +) ENGINE=Aria DEFAULT CHARSET=latin1 MIN_ROWS=1000000 PAGE_CHECKSUM=1 ROW_FORMAT=FIXED +insert into t1 select seq,seq from seq_1_to_100000; +create or replace table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=FIXED; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` int(10) unsigned DEFAULT NULL, + `c2` char(80) DEFAULT NULL +) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 ROW_FORMAT=FIXED +insert into t1 select seq,seq from seq_1_to_100000; +create or replace table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=PAGE TRANSACTIONAL=0; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` int(10) unsigned DEFAULT NULL, + `c2` char(80) DEFAULT NULL +) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 ROW_FORMAT=PAGE TRANSACTIONAL=0 +insert into t1 select seq,seq from seq_1_to_100000; +create or replace table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=FIXED MAX_ROWS=10; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` int(10) unsigned DEFAULT NULL, + `c2` char(80) DEFAULT NULL +) ENGINE=Aria DEFAULT CHARSET=latin1 MAX_ROWS=10 PAGE_CHECKSUM=1 ROW_FORMAT=FIXED +insert into t1 select seq,seq from seq_1_to_100000; +ERROR HY000: The table 't1' is full +select count(*) from t1; +count(*) +65535 +create or replace table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=DYNAMIC MAX_ROWS=10; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` int(10) unsigned DEFAULT NULL, + `c2` char(80) DEFAULT NULL +) ENGINE=Aria DEFAULT CHARSET=latin1 MAX_ROWS=10 PAGE_CHECKSUM=1 ROW_FORMAT=DYNAMIC +insert into t1 select seq,seq from seq_1_to_100000; +ERROR HY000: The table 't1' is full +select count(*) from t1; +count(*) +3276 +check table t1; +Table Op Msg_type Msg_text +test.t1 check warning Datafile is almost full, 65520 of 65535 used +test.t1 check status OK +create or replace table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=PAGE MAX_ROWS=10; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` int(10) unsigned DEFAULT NULL, + `c2` char(80) DEFAULT NULL +) ENGINE=Aria DEFAULT CHARSET=latin1 MAX_ROWS=10 PAGE_CHECKSUM=1 ROW_FORMAT=PAGE +insert into t1 select seq,seq from seq_1_to_100000; +select count(*) from t1; +count(*) +100000 +check table t1; +Table Op Msg_type Msg_text +test.t1 check status OK +drop table t1; +create or replace table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=PAGE MAX_ROWS=10; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` int(10) unsigned DEFAULT NULL, + `c2` char(80) DEFAULT NULL +) ENGINE=Aria DEFAULT CHARSET=latin1 MAX_ROWS=10 PAGE_CHECKSUM=1 ROW_FORMAT=PAGE +insert into t1 select seq,seq from seq_1_to_10000000; +ERROR HY000: The table 't1' is full +select count(*) from t1; +count(*) +6189940 +check table t1; +Table Op Msg_type Msg_text +test.t1 check warning Datafile is almost full, 268320768 of 268320768 used +test.t1 check status OK +drop table t1; diff --git a/mysql-test/suite/maria/max_length.test b/mysql-test/suite/maria/max_length.test index 68ad1e22aa9..2be3da8e1b0 100644 --- a/mysql-test/suite/maria/max_length.test +++ b/mysql-test/suite/maria/max_length.test @@ -2,6 +2,7 @@ # This test will use around 1.3G of disk space! --source include/have_maria.inc +--source include/have_sequence.inc --source include/big_test.inc drop table if exists t1,t2; @@ -50,3 +51,74 @@ insert into t1 (v,b) select v,b from t2; check table t1; drop table t1,t2; + +# +# Check that we don't get table-is-full +# + +create table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=FIXED min_rows=1000000; +show create table t1; +insert into t1 select seq,seq from seq_1_to_100000; + +create or replace table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=FIXED; +show create table t1; +insert into t1 select seq,seq from seq_1_to_100000; + +create or replace table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=PAGE TRANSACTIONAL=0; +show create table t1; +insert into t1 select seq,seq from seq_1_to_100000; + +# +# For these we should get table is full error +# + +create or replace table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=FIXED MAX_ROWS=10; +show create table t1; +--error ER_RECORD_FILE_FULL +insert into t1 select seq,seq from seq_1_to_100000; +select count(*) from t1; + +create or replace table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=DYNAMIC MAX_ROWS=10; +show create table t1; +--error ER_RECORD_FILE_FULL +insert into t1 select seq,seq from seq_1_to_100000; +select count(*) from t1; +check table t1; + +# PAGE uses 3 byte pointers as minimum, which can handle up to 200M files + +create or replace table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=PAGE MAX_ROWS=10; +show create table t1; +insert into t1 select seq,seq from seq_1_to_100000; +select count(*) from t1; +check table t1; +drop table t1; + +create or replace table t1 ( +c1 int unsigned, +c2 char(80) +) Engine=ARIA ROW_FORMAT=PAGE MAX_ROWS=10; +show create table t1; +--error ER_RECORD_FILE_FULL +insert into t1 select seq,seq from seq_1_to_10000000; +select count(*) from t1; +check table t1; +drop table t1; diff --git a/storage/maria/ma_create.c b/storage/maria/ma_create.c index 2c67195b3f8..94c4c250bef 100644 --- a/storage/maria/ma_create.c +++ b/storage/maria/ma_create.c @@ -70,7 +70,7 @@ int maria_create(const char *name, enum data_file_type datafile_type, myf create_flag; uint length,max_key_length,packed,pack_bytes,pointer,real_length_diff, key_length,info_length,key_segs,options,min_key_length, - base_pos,long_varchar_count,varchar_length, + base_pos,long_varchar_count, unique_key_parts,fulltext_keys,offset, not_block_record_extra_length; uint max_field_lengths, extra_header_size, column_nr; uint internal_table= flags & HA_CREATE_INTERNAL_TABLE; @@ -144,9 +144,6 @@ int maria_create(const char *name, enum data_file_type datafile_type, datafile_type= BLOCK_RECORD; } - if (ci->reloc_rows > ci->max_rows) - ci->reloc_rows=ci->max_rows; /* Check if wrong parameter */ - if (!(rec_per_key_part= (double*) my_malloc((keys + uniques)*HA_MAX_KEY_SEG*sizeof(double) + (keys + uniques)*HA_MAX_KEY_SEG*sizeof(ulong) + @@ -160,7 +157,7 @@ int maria_create(const char *name, enum data_file_type datafile_type, /* Start by checking fields and field-types used */ - varchar_length=long_varchar_count=packed= not_block_record_extra_length= + long_varchar_count=packed= not_block_record_extra_length= pack_reclength= max_field_lengths= 0; reclength= min_pack_length= ci->null_bytes; forced_packed= 0; @@ -232,7 +229,6 @@ int maria_create(const char *name, enum data_file_type datafile_type, } else if (type == FIELD_VARCHAR) { - varchar_length+= column->length-1; /* Used for min_pack_length */ pack_reclength++; not_block_record_extra_length++; max_field_lengths++; @@ -368,6 +364,7 @@ int maria_create(const char *name, enum data_file_type datafile_type, pack_bytes); if (!ci->data_file_length && ci->max_rows) { + set_if_bigger(ci->max_rows, ci->reloc_rows); if (pack_reclength == INT_MAX32 || (~(ulonglong) 0)/ci->max_rows < (ulonglong) pack_reclength) ci->data_file_length= ~(ulonglong) 0; @@ -401,13 +398,14 @@ int maria_create(const char *name, enum data_file_type datafile_type, else ci->max_rows= data_file_length / (min_pack_length + extra_header_size + - DIR_ENTRY_SIZE)+1; + DIR_ENTRY_SIZE); } else ci->max_rows=(ha_rows) (ci->data_file_length/(min_pack_length + ((options & HA_OPTION_PACK_RECORD) ? - 3 : 0)))+1; + 3 : 0))); + set_if_smaller(ci->reloc_rows, ci->max_rows); } max_rows= (ulonglong) ci->max_rows; if (datafile_type == BLOCK_RECORD) @@ -800,6 +798,7 @@ int maria_create(const char *name, enum data_file_type datafile_type, share.state.state.data_file_length= maria_block_size; /* Add length of packed fields + length */ share.base.pack_reclength+= share.base.max_field_lengths+3; + share.base.max_pack_length= share.base.pack_reclength; /* Adjust max_pack_length, to be used if we have short rows */ if (share.base.max_pack_length < maria_block_size) From cc915cd59973a9eaccdeb2ca4c30ab4d8878ea43 Mon Sep 17 00:00:00 2001 From: Monty Date: Thu, 18 Jan 2018 01:42:51 +0200 Subject: [PATCH 02/17] Fixed some build scripts to work with gprof and gcov --- BUILD/SETUP.sh | 1 + BUILD/compile-pentium-gprof | 2 +- BUILD/compile-pentium64-gcov | 2 +- BUILD/compile-pentium64-gprof | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh index 2d6548dda0e..3a1a861f2f8 100755 --- a/BUILD/SETUP.sh +++ b/BUILD/SETUP.sh @@ -307,3 +307,4 @@ gprof_compile_flags="-O2 -pg -g" gprof_link_flags="--disable-shared $static_link" +disable_gprof_plugins="--with-zlib-dir=bundled --without-plugin-oqgraph --without-plugin-mroonga" diff --git a/BUILD/compile-pentium-gprof b/BUILD/compile-pentium-gprof index de014e3ae8b..498c964df74 100755 --- a/BUILD/compile-pentium-gprof +++ b/BUILD/compile-pentium-gprof @@ -19,6 +19,6 @@ path=`dirname $0` . "$path/SETUP.sh" extra_flags="$pentium_cflags $gprof_compile_flags" -extra_configs="$pentium_configs $debug_configs $gprof_link_flags $disable_64_bit_plugins" +extra_configs="$pentium_configs $debug_configs $gprof_link_flags $disable_64_bit_plugins $disable_gprof_plugins" . "$path/FINISH.sh" diff --git a/BUILD/compile-pentium64-gcov b/BUILD/compile-pentium64-gcov index 9587c51b4e0..6e3366c79bd 100755 --- a/BUILD/compile-pentium64-gcov +++ b/BUILD/compile-pentium64-gcov @@ -28,6 +28,6 @@ export LDFLAGS="$gcov_link_flags" extra_flags="$pentium64_cflags $max_cflags $gcov_compile_flags" c_warnings="$c_warnings $debug_extra_warnings" cxx_warnings="$cxx_warnings $debug_extra_warnings" -extra_configs="$pentium_configs $debug_configs $gcov_configs $max_configs" +extra_configs="$pentium_configs $debug_configs $gcov_configs $max_configs --without-oqgraph" . "$path/FINISH.sh" diff --git a/BUILD/compile-pentium64-gprof b/BUILD/compile-pentium64-gprof index b7821e06b6e..346777a4611 100755 --- a/BUILD/compile-pentium64-gprof +++ b/BUILD/compile-pentium64-gprof @@ -20,6 +20,6 @@ path=`dirname $0` . "$path/SETUP.sh" extra_flags="$pentium64_cflags $gprof_compile_flags" -extra_configs="$pentium_configs $max_configs $gprof_link_flags --with-zlib-dir=bundled" +extra_configs="$pentium_configs $max_configs $gprof_link_flags $disable_gprof_plugins" . "$path/FINISH.sh" From 6c09a6542e94d2bcaaa7d03abe6b6cab40083f99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 18 Jan 2018 16:13:50 +0200 Subject: [PATCH 03/17] MDEV-14985 innodb_undo_log_truncate may be blocked if transactions were recovered at startup The field trx_rseg_t::trx_ref_count that was added in WL#6965 in MySQL 5.7.5 is being incremented twice if a recovered transaction includes both undo log partitions insert_undo and update_undo. This reference count is being used in trx_purge(), which invokes trx_purge_initiate_truncate() to try to truncate an undo tablespace file. Because of the double-increment, the trx_ref_count would never reach 0. It is possible that after the failed truncation attempt, the undo tablespace would be disabled for logging any new transactions until the server is restarted (hopefully after committing or rolling back all transactions, so that no transactions would be recovered on the next startup). trx_resurrect_insert(), trx_resurrect_update(): Do not increment trx_ref_count. Instead, let the caller do that. trx_lists_init_at_db_start(): Increment rseg->trx_ref_count only once for each recovered transaction. Adjust comments. Finally, if innodb_force_recovery prevents the undo log scan, do not bother iterating the empty lists. --- storage/innobase/trx/trx0trx.cc | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index ed334207b4c..0e488d6379a 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -824,10 +824,6 @@ trx_resurrect_insert( ut_d(trx->start_line = __LINE__); trx->rsegs.m_redo.rseg = rseg; - /* For transactions with active data will not have rseg size = 1 - or will not qualify for purge limit criteria. So it is safe to increment - this trx_ref_count w/o mutex protection. */ - ++trx->rsegs.m_redo.rseg->trx_ref_count; *trx->xid = undo->xid; trx->id = undo->trx_id; trx->rsegs.m_redo.insert_undo = undo; @@ -934,10 +930,6 @@ trx_resurrect_update( trx_rseg_t* rseg) /*!< in/out: rollback segment */ { trx->rsegs.m_redo.rseg = rseg; - /* For transactions with active data will not have rseg size = 1 - or will not qualify for purge limit criteria. So it is safe to increment - this trx_ref_count w/o mutex protection. */ - ++trx->rsegs.m_redo.rseg->trx_ref_count; *trx->xid = undo->xid; trx->id = undo->trx_id; trx->rsegs.m_redo.update_undo = undo; @@ -991,10 +983,12 @@ trx_lists_init_at_db_start() purge_sys = UT_NEW_NOKEY(purge_sys_t()); - if (srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN) { - trx_rseg_array_init(); + if (srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN) { + return; } + trx_rseg_array_init(); + /* Look from the rollback segments if there exist undo logs for transactions. */ @@ -1002,8 +996,9 @@ trx_lists_init_at_db_start() trx_undo_t* undo; trx_rseg_t* rseg = trx_sys->rseg_array[i]; - /* At this stage non-redo rseg slots are all NULL as they are - re-created on server start and existing slots are not read. */ + /* Some rollback segment may be unavailable, + especially if the server was previously run with a + non-default value of innodb_undo_logs. */ if (rseg == NULL) { continue; } @@ -1013,6 +1008,11 @@ trx_lists_init_at_db_start() undo != NULL; undo = UT_LIST_GET_NEXT(undo_list, undo)) { + /* trx_purge() will not run before we return, + so we can safely increment this without + holding rseg->mutex. */ + ++rseg->trx_ref_count; + trx_t* trx; trx = trx_resurrect_insert(undo, rseg); @@ -1037,6 +1037,7 @@ trx_lists_init_at_db_start() if (trx == NULL) { trx = trx_allocate_for_background(); + ++rseg->trx_ref_count; ut_d(trx->start_file = __FILE__); ut_d(trx->start_line = __LINE__); From 30289a2713807dbca9b2560634379a5bd9ea86e8 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Thu, 18 Jan 2018 15:56:28 -0800 Subject: [PATCH 04/17] Fixed mdev-14969 Non-recursive Common Table Expressions used in view caused an error The function subselect_single_select_engine::print() did not print the WITH clause attached to a subselect with single select engine. As a result views using suqueries with attached WITH clauses lost these clauses when saved in frm files. --- mysql-test/r/cte_nonrecursive.result | 74 ++++++++++++++++++++++++++++ mysql-test/t/cte_nonrecursive.test | 47 ++++++++++++++++++ sql/item_subselect.cc | 3 ++ 3 files changed, 124 insertions(+) diff --git a/mysql-test/r/cte_nonrecursive.result b/mysql-test/r/cte_nonrecursive.result index c1f4c9fd486..4f193384d0f 100644 --- a/mysql-test/r/cte_nonrecursive.result +++ b/mysql-test/r/cte_nonrecursive.result @@ -1295,3 +1295,77 @@ TERM03 TERM03 TERM01 NULL NULL TERM04 drop table t1,t2; +# +# MDEV-14969: view using subquery with attached CTE +# +create table region ( +r_regionkey int, +r_name char(25), +primary key (r_regionkey) +); +insert into region values +(0,'AFRICA'), (1,'AMERICA'), (2,'ASIA'), (3,'EUROPE'), (4,'MIDDLE EAST'); +create table nation ( +n_nationkey int, +n_name char(25), +n_regionkey int, +primary key (n_nationkey), +key i_n_regionkey (n_regionkey) +); +insert into nation values +(0,'ALGERIA',0), (1,'ARGENTINA',1), (2,'BRAZIL',1), (3,'CANADA',1), +(4,'EGYPT',4), (5,'ETHIOPIA',0), (6,'FRANCE',3), (7,'GERMANY',3), +(8,'INDIA',2), (9,'INDONESIA',2), (10,'IRAN',4), (11,'IRAQ',4), +(12,'JAPAN',2), (13,'JORDAN',4), (14,'KENYA',0), (15,'MOROCCO',0), +(16,'MOZAMBIQUE',0), (17,'PERU',1), (18,'CHINA',2), (19,'ROMANIA',3), +(20,'SAUDI ARABIA',4), (21,'VIETNAM',2), (22,'RUSSIA',3), +(23,'UNITED KINGDOM',3), (24,'UNITED STATES',1); +select * from nation n ,region r +where n.n_regionkey = r.r_regionkey and +r.r_regionkey in +(with t as (select * from region where r_regionkey <= 3 ) +select r_regionkey from t where r_name <> "ASIA"); +n_nationkey n_name n_regionkey r_regionkey r_name +0 ALGERIA 0 0 AFRICA +5 ETHIOPIA 0 0 AFRICA +14 KENYA 0 0 AFRICA +15 MOROCCO 0 0 AFRICA +16 MOZAMBIQUE 0 0 AFRICA +1 ARGENTINA 1 1 AMERICA +2 BRAZIL 1 1 AMERICA +3 CANADA 1 1 AMERICA +17 PERU 1 1 AMERICA +24 UNITED STATES 1 1 AMERICA +6 FRANCE 3 3 EUROPE +7 GERMANY 3 3 EUROPE +19 ROMANIA 3 3 EUROPE +22 RUSSIA 3 3 EUROPE +23 UNITED KINGDOM 3 3 EUROPE +create view v as +select * from nation n ,region r +where n.n_regionkey = r.r_regionkey and +r.r_regionkey in +(with t as (select * from region where r_regionkey <= 3) +select r_regionkey from t where r_name <> "ASIA"); +show create view v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select `n`.`n_nationkey` AS `n_nationkey`,`n`.`n_name` AS `n_name`,`n`.`n_regionkey` AS `n_regionkey`,`r`.`r_regionkey` AS `r_regionkey`,`r`.`r_name` AS `r_name` from (`nation` `n` join `region` `r`) where `n`.`n_regionkey` = `r`.`r_regionkey` and `r`.`r_regionkey` in (with t as (select `region`.`r_regionkey` AS `r_regionkey`,`region`.`r_name` AS `r_name` from `region` where `region`.`r_regionkey` <= 3)select `t`.`r_regionkey` from `t` where `t`.`r_name` <> 'ASIA') latin1 latin1_swedish_ci +select * from v; +n_nationkey n_name n_regionkey r_regionkey r_name +0 ALGERIA 0 0 AFRICA +5 ETHIOPIA 0 0 AFRICA +14 KENYA 0 0 AFRICA +15 MOROCCO 0 0 AFRICA +16 MOZAMBIQUE 0 0 AFRICA +1 ARGENTINA 1 1 AMERICA +2 BRAZIL 1 1 AMERICA +3 CANADA 1 1 AMERICA +17 PERU 1 1 AMERICA +24 UNITED STATES 1 1 AMERICA +6 FRANCE 3 3 EUROPE +7 GERMANY 3 3 EUROPE +19 ROMANIA 3 3 EUROPE +22 RUSSIA 3 3 EUROPE +23 UNITED KINGDOM 3 3 EUROPE +drop view v; +drop table region, nation; diff --git a/mysql-test/t/cte_nonrecursive.test b/mysql-test/t/cte_nonrecursive.test index 9436665bfee..a092a161277 100644 --- a/mysql-test/t/cte_nonrecursive.test +++ b/mysql-test/t/cte_nonrecursive.test @@ -882,3 +882,50 @@ union all where c1.term is null); drop table t1,t2; + +--echo # +--echo # MDEV-14969: view using subquery with attached CTE +--echo # + +create table region ( + r_regionkey int, + r_name char(25), + primary key (r_regionkey) +); +insert into region values +(0,'AFRICA'), (1,'AMERICA'), (2,'ASIA'), (3,'EUROPE'), (4,'MIDDLE EAST'); + +create table nation ( + n_nationkey int, + n_name char(25), + n_regionkey int, + primary key (n_nationkey), + key i_n_regionkey (n_regionkey) +); +insert into nation values +(0,'ALGERIA',0), (1,'ARGENTINA',1), (2,'BRAZIL',1), (3,'CANADA',1), +(4,'EGYPT',4), (5,'ETHIOPIA',0), (6,'FRANCE',3), (7,'GERMANY',3), +(8,'INDIA',2), (9,'INDONESIA',2), (10,'IRAN',4), (11,'IRAQ',4), +(12,'JAPAN',2), (13,'JORDAN',4), (14,'KENYA',0), (15,'MOROCCO',0), +(16,'MOZAMBIQUE',0), (17,'PERU',1), (18,'CHINA',2), (19,'ROMANIA',3), +(20,'SAUDI ARABIA',4), (21,'VIETNAM',2), (22,'RUSSIA',3), +(23,'UNITED KINGDOM',3), (24,'UNITED STATES',1); + +select * from nation n ,region r + where n.n_regionkey = r.r_regionkey and + r.r_regionkey in + (with t as (select * from region where r_regionkey <= 3 ) + select r_regionkey from t where r_name <> "ASIA"); + +create view v as +select * from nation n ,region r + where n.n_regionkey = r.r_regionkey and + r.r_regionkey in + (with t as (select * from region where r_regionkey <= 3) + select r_regionkey from t where r_name <> "ASIA"); + +show create view v; +select * from v; + +drop view v; +drop table region, nation; diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 96d3bea6685..74f11ca6e41 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -4373,6 +4373,9 @@ table_map subselect_union_engine::upper_select_const_tables() void subselect_single_select_engine::print(String *str, enum_query_type query_type) { + With_clause* with_clause= select_lex->get_with_clause(); + if (with_clause) + with_clause->print(str, query_type); select_lex->print(get_thd(), str, query_type); } From f67b8273c03b4802cb97e68b0c1baf5de330a2bf Mon Sep 17 00:00:00 2001 From: Monty Date: Sun, 21 Jan 2018 17:17:16 +0200 Subject: [PATCH 05/17] Fixed wrong arguments to printf in InnoDB --- storage/innobase/handler/ha_innodb.cc | 16 ++++++------ storage/innobase/handler/handler0alter.cc | 7 +++--- storage/innobase/row/row0import.cc | 30 +++++++++++------------ storage/innobase/row/row0merge.cc | 4 +-- storage/innobase/row/row0quiesce.cc | 28 ++++++++++----------- 5 files changed, 43 insertions(+), 42 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index b42da1c025e..198cfadc195 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -2166,7 +2166,7 @@ convert_error_code_to_mysql( case DB_TOO_BIG_INDEX_COL: my_error(ER_INDEX_COLUMN_TOO_LONG, MYF(0), - DICT_MAX_FIELD_LEN_BY_FORMAT_FLAG(flags)); + (ulong) DICT_MAX_FIELD_LEN_BY_FORMAT_FLAG(flags)); return(HA_ERR_INDEX_COL_TOO_LONG); case DB_NO_SAVEPOINT: @@ -5287,7 +5287,6 @@ innobase_close_connection( "MariaDB is closing a connection that has an active " "InnoDB transaction. " TRX_ID_FMT " row modifications " "will roll back.", - " row modifications will roll back.", trx->undo_no); ut_d(ib::warn() << "trx: " << trx << " started on: " @@ -12132,7 +12131,7 @@ create_table_info_t::create_options_are_invalid() ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: invalid KEY_BLOCK_SIZE = %u." " Valid values are [1, 2, 4, 8, 16]", - m_create_info->key_block_size); + (uint) m_create_info->key_block_size); ret = "KEY_BLOCK_SIZE"; break; } @@ -12623,7 +12622,7 @@ index_bad: m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: ignoring KEY_BLOCK_SIZE=%u.", - m_create_info->key_block_size); + (uint) m_create_info->key_block_size); } } @@ -12646,7 +12645,7 @@ index_bad: ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: ignoring KEY_BLOCK_SIZE=%u" " unless ROW_FORMAT=COMPRESSED.", - m_create_info->key_block_size); + (uint) m_create_info->key_block_size); zip_allowed = false; } } else { @@ -14199,7 +14198,8 @@ ha_innobase::records_in_range( push_warning_printf( ha_thd(), Sql_condition::WARN_LEVEL_WARN, ER_NO_DEFAULT, - "btr_estimate_n_rows_in_range(): %f", n_rows); + "btr_estimate_n_rows_in_range(): %lld", + (longlong) n_rows); ); func_exit: @@ -22963,7 +22963,7 @@ ib_push_frm_error( "installations? See " REFMAN "innodb-troubleshooting.html\n", - ib_table->name); + ib_table->name.m_name); if (push_warning) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, @@ -23007,7 +23007,7 @@ ib_push_frm_error( "installations? See " REFMAN "innodb-troubleshooting.html\n", - ib_table->name, n_keys, + ib_table->name.m_name, n_keys, table->s->keys); if (push_warning) { diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 44c40d05b4c..9e6d81e21e4 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -300,7 +300,7 @@ my_error_innodb( break; case DB_TOO_BIG_INDEX_COL: my_error(ER_INDEX_COLUMN_TOO_LONG, MYF(0), - DICT_MAX_FIELD_LEN_BY_FORMAT_FLAG(flags)); + (ulong) DICT_MAX_FIELD_LEN_BY_FORMAT_FLAG(flags)); break; case DB_TOO_MANY_CONCURRENT_TRXS: my_error(ER_TOO_MANY_CONCURRENT_TRXS, MYF(0)); @@ -1636,7 +1636,7 @@ innobase_get_foreign_key_info( /* Not possible to add a foreign key without a referenced column */ mutex_exit(&dict_sys->mutex); - my_error(ER_CANNOT_ADD_FOREIGN, MYF(0), tbl_namep); + my_error(ER_CANNOT_ADD_FOREIGN, MYF(0)); goto err_exit; } @@ -2050,7 +2050,8 @@ innobase_check_index_keys( } #endif /* MYSQL_RENAME_INDEX */ - my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key.name); + my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), + key.name); return(ER_WRONG_NAME_FOR_INDEX); } diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index a02f60c6800..473e0c25f86 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -2463,7 +2463,7 @@ row_import_cfg_read_index_fields( ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while reading index fields."); return(DB_IO_ERROR); @@ -2499,7 +2499,7 @@ row_import_cfg_read_index_fields( ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while parsing table name."); return(err); @@ -2569,7 +2569,7 @@ row_import_read_index_data( ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), msg); + (ulong) errno, strerror(errno), msg); ib::error() << "IO Error: " << msg; @@ -2644,7 +2644,7 @@ row_import_read_index_data( ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while parsing index name."); return(err); @@ -2683,7 +2683,7 @@ row_import_read_indexes( if (fread(row, 1, sizeof(row), file) != sizeof(row)) { ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while reading number of indexes."); return(DB_IO_ERROR); @@ -2769,7 +2769,7 @@ row_import_read_columns( if (fread(row, 1, sizeof(row), file) != sizeof(row)) { ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while reading table column meta-data."); return(DB_IO_ERROR); @@ -2833,7 +2833,7 @@ row_import_read_columns( ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while parsing table column name."); return(err); @@ -2864,7 +2864,7 @@ row_import_read_v1( if (fread(value, 1, sizeof(value), file) != sizeof(value)) { ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while reading meta-data export hostname length."); return(DB_IO_ERROR); @@ -2892,7 +2892,7 @@ row_import_read_v1( ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while parsing export hostname."); return(err); @@ -2906,7 +2906,7 @@ row_import_read_v1( if (fread(value, 1, sizeof(value), file) != sizeof(value)) { ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while reading meta-data table name length."); return(DB_IO_ERROR); @@ -2933,7 +2933,7 @@ row_import_read_v1( if (err != DB_SUCCESS) { ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while parsing table name."); return(err); @@ -2952,7 +2952,7 @@ row_import_read_v1( if (fread(row, 1, sizeof(ib_uint64_t), file) != sizeof(ib_uint64_t)) { ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while reading autoinc value."); return(DB_IO_ERROR); @@ -2968,7 +2968,7 @@ row_import_read_v1( if (fread(row, 1, sizeof(row), file) != sizeof(row)) { ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while reading meta-data header."); return(DB_IO_ERROR); @@ -3039,7 +3039,7 @@ row_import_read_meta_data( if (fread(&row, 1, sizeof(row), file) != sizeof(row)) { ib_senderrf( thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while reading meta-data version."); return(DB_IO_ERROR); @@ -3090,7 +3090,7 @@ row_import_read_cfg( ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_READ_ERROR, - errno, strerror(errno), msg); + (ulong) errno, strerror(errno), msg); cfg.m_missing = true; diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 017821fdf38..4776bcc3c25 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -4702,8 +4702,8 @@ row_merge_build_indexes( "Table %s is encrypted but encryption service or" " used key_id is not available. " " Can't continue reading table.", - !old_table->is_readable() ? old_table->name : - new_table->name); + !old_table->is_readable() ? old_table->name.m_name : + new_table->name.m_name); goto func_exit; } diff --git a/storage/innobase/row/row0quiesce.cc b/storage/innobase/row/row0quiesce.cc index 21cc67620f6..77cb35b8f21 100644 --- a/storage/innobase/row/row0quiesce.cc +++ b/storage/innobase/row/row0quiesce.cc @@ -67,7 +67,7 @@ row_quiesce_write_index_fields( ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while writing index fields."); return(DB_IO_ERROR); @@ -87,7 +87,7 @@ row_quiesce_write_index_fields( ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while writing index column."); return(DB_IO_ERROR); @@ -121,7 +121,7 @@ row_quiesce_write_indexes( if (fwrite(row, 1, sizeof(row), file) != sizeof(row)) { ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while writing index count."); return(DB_IO_ERROR); @@ -175,7 +175,7 @@ row_quiesce_write_indexes( ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while writing index meta-data."); return(DB_IO_ERROR); @@ -196,7 +196,7 @@ row_quiesce_write_indexes( ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while writing index name."); return(DB_IO_ERROR); @@ -256,7 +256,7 @@ row_quiesce_write_table( if (fwrite(row, 1, sizeof(row), file) != sizeof(row)) { ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while writing table column data."); return(DB_IO_ERROR); @@ -283,7 +283,7 @@ row_quiesce_write_table( ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while writing column name."); return(DB_IO_ERROR); @@ -315,7 +315,7 @@ row_quiesce_write_header( if (fwrite(&value, 1, sizeof(value), file) != sizeof(value)) { ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while writing meta-data version number."); return(DB_IO_ERROR); @@ -345,7 +345,7 @@ row_quiesce_write_header( ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while writing hostname."); return(DB_IO_ERROR); @@ -365,7 +365,7 @@ row_quiesce_write_header( ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while writing table name."); return(DB_IO_ERROR); @@ -381,7 +381,7 @@ row_quiesce_write_header( if (fwrite(row, 1, sizeof(ib_uint64_t), file) != sizeof(ib_uint64_t)) { ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while writing table autoinc value."); return(DB_IO_ERROR); @@ -405,7 +405,7 @@ row_quiesce_write_header( if (fwrite(row, 1, sizeof(row), file) != sizeof(row)) { ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), + (ulong) errno, strerror(errno), "while writing table meta-data."); return(DB_IO_ERROR); @@ -458,7 +458,7 @@ row_quiesce_write_cfg( ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), msg); + (ulong) errno, strerror(errno), msg); } if (fclose(file) != 0) { @@ -468,7 +468,7 @@ row_quiesce_write_cfg( ib_senderrf( thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR, - errno, strerror(errno), msg); + (ulong) errno, strerror(errno), msg); } } From 6b7dcefdc83c4444ac8a4623b46810ff940528db Mon Sep 17 00:00:00 2001 From: Monty Date: Sun, 21 Jan 2018 20:16:22 +0200 Subject: [PATCH 06/17] Reset thd->lex->current_select for SP current_select may point to data from old parser states when calling a stored procedure with CALL The failure happens in Item::Item when testing if we are in having. Fixed by explicitely reseting current_select in do_execute_sp() and in sp_rcontext::create(). The later is also needed for stored functions(). --- sql/sp_rcontext.cc | 8 +++++++- sql/sql_parse.cc | 6 ++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc index 08f942b7d6d..396f5b448fc 100644 --- a/sql/sp_rcontext.cc +++ b/sql/sp_rcontext.cc @@ -61,6 +61,7 @@ sp_rcontext *sp_rcontext::create(THD *thd, const sp_pcontext *root_parsing_ctx, Field *return_value_fld) { + SELECT_LEX *save_current_select; sp_rcontext *ctx= new (thd->mem_root) sp_rcontext(root_parsing_ctx, return_value_fld, thd->in_sub_stmt); @@ -68,14 +69,19 @@ sp_rcontext *sp_rcontext::create(THD *thd, if (!ctx) return NULL; + /* Reset current_select as it's checked in Item_ident::Item_ident */ + save_current_select= thd->lex->current_select; + thd->lex->current_select= 0; + if (ctx->alloc_arrays(thd) || ctx->init_var_table(thd) || ctx->init_var_items(thd)) { delete ctx; - return NULL; + ctx= 0; } + thd->lex->current_select= save_current_select; return ctx; } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 21abc1a248c..99c57fc7cfa 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2876,6 +2876,12 @@ static bool do_execute_sp(THD *thd, sp_head *sp) ha_rows select_limit= thd->variables.select_limit; thd->variables.select_limit= HA_POS_ERROR; + /* + Reset current_select as it may point to random data as a + result of previous parsing. + */ + thd->lex->current_select= NULL; + /* We never write CALL statements into binlog: - If the mode is non-prelocked, each statement will be logged From 4f8555f1f68a22f33db57c31547df4f0832d78d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Sun, 21 Jan 2018 18:23:28 +0200 Subject: [PATCH 07/17] MDEV-14941 Timeouts on persistent statistics tables caused by MDEV-14511 MDEV-14511 tried to avoid some consistency problems related to InnoDB persistent statistics. The persistent statistics are being written by an InnoDB internal SQL interpreter that requires the InnoDB data dictionary cache to be locked. Before MDEV-14511, the statistics were written during DDL in separate transactions, which could unnecessarily reduce performance (each commit would require a redo log flush) and break atomicity, because the statistics would be updated separately from the dictionary transaction. However, because it is unacceptable to hold the InnoDB data dictionary cache locked while suspending the execution for waiting for a transactional lock (in the mysql.innodb_index_stats or mysql.innodb_table_stats tables) to be released, any lock conflict was immediately be reported as "lock wait timeout". To fix MDEV-14941, an attempt to reduce these lock conflicts by acquiring transactional locks on the user tables in both the statistics and DDL operations was made, but it would still not entirely prevent lock conflicts on the mysql.innodb_index_stats and mysql.innodb_table_stats tables. Fixing the remaining problems would require a change that is too intrusive for a GA release series, such as MariaDB 10.2. Thefefore, we revert the change MDEV-14511. To silence the MDEV-13201 assertion, we use the pre-existing flag trx_t::internal. --- .../suite/innodb/r/innodb_stats_debug.result | 12 - .../suite/innodb/t/innodb_stats_debug.test | 13 - .../innodb/t/innodb_stats_drop_locked.test | 2 +- storage/innobase/btr/btr0defragment.cc | 35 +- storage/innobase/dict/dict0defrag_bg.cc | 140 ++-- storage/innobase/dict/dict0stats.cc | 597 ++++++++++-------- storage/innobase/dict/dict0stats_bg.cc | 23 +- storage/innobase/handler/ha_innodb.cc | 67 +- storage/innobase/handler/handler0alter.cc | 191 +++--- storage/innobase/include/dict0defrag_bg.h | 31 +- storage/innobase/include/dict0stats.h | 115 ++-- storage/innobase/include/dict0stats.ic | 11 +- storage/innobase/include/trx0trx.h | 3 - storage/innobase/lock/lock0lock.cc | 41 +- storage/innobase/row/row0mysql.cc | 55 +- storage/innobase/row/row0trunc.cc | 13 +- storage/innobase/trx/trx0purge.cc | 2 +- storage/innobase/trx/trx0trx.cc | 2 +- 18 files changed, 681 insertions(+), 672 deletions(-) delete mode 100644 mysql-test/suite/innodb/r/innodb_stats_debug.result delete mode 100644 mysql-test/suite/innodb/t/innodb_stats_debug.test diff --git a/mysql-test/suite/innodb/r/innodb_stats_debug.result b/mysql-test/suite/innodb/r/innodb_stats_debug.result deleted file mode 100644 index 8f599acc08c..00000000000 --- a/mysql-test/suite/innodb/r/innodb_stats_debug.result +++ /dev/null @@ -1,12 +0,0 @@ -call mtr.add_suppression("InnoDB: Cannot save (table|index) statistics for table `test`\\.`t1`.*: Persistent statistics do not exist"); -CREATE TABLE t1 (a INT, KEY(a)) ENGINE=INNODB STATS_PERSISTENT=1; -SET @save_debug= @@SESSION.debug_dbug; -SET debug_dbug= '+d,stats_index_error'; -ANALYZE TABLE t1; -Table Op Msg_type Msg_text -test.t1 analyze status Operation failed -SET debug_dbug= @save_debug; -ANALYZE TABLE t1; -Table Op Msg_type Msg_text -test.t1 analyze status OK -DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/innodb_stats_debug.test b/mysql-test/suite/innodb/t/innodb_stats_debug.test deleted file mode 100644 index cd41c0b8fb0..00000000000 --- a/mysql-test/suite/innodb/t/innodb_stats_debug.test +++ /dev/null @@ -1,13 +0,0 @@ ---source include/have_innodb.inc ---source include/have_debug.inc - -call mtr.add_suppression("InnoDB: Cannot save (table|index) statistics for table `test`\\.`t1`.*: Persistent statistics do not exist"); - -CREATE TABLE t1 (a INT, KEY(a)) ENGINE=INNODB STATS_PERSISTENT=1; -SET @save_debug= @@SESSION.debug_dbug; -SET debug_dbug= '+d,stats_index_error'; -ANALYZE TABLE t1; -SET debug_dbug= @save_debug; -ANALYZE TABLE t1; - -DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/innodb_stats_drop_locked.test b/mysql-test/suite/innodb/t/innodb_stats_drop_locked.test index 47f363a4bb6..26367b8e6ae 100644 --- a/mysql-test/suite/innodb/t/innodb_stats_drop_locked.test +++ b/mysql-test/suite/innodb/t/innodb_stats_drop_locked.test @@ -57,5 +57,5 @@ SELECT table_name FROM mysql.innodb_index_stats WHERE table_name='innodb_stats_drop_locked'; --disable_query_log -call mtr.add_suppression("Unable to delete statistics for table test\\.innodb_stats_drop_locked: Lock wait"); +call mtr.add_suppression("Unable to delete statistics for table test.innodb_stats_drop_locked: Lock wait timeout. They can be deleted later using DELETE FROM mysql.innodb_index_stats WHERE database_name"); --enable_query_log diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc index 86df5a077a8..70444ca1830 100644 --- a/storage/innobase/btr/btr0defragment.cc +++ b/storage/innobase/btr/btr0defragment.cc @@ -751,8 +751,6 @@ DECLARE_THREAD(btr_defragment_thread)(void*) buf_block_t* first_block; buf_block_t* last_block; - trx_t* trx = trx_allocate_for_background(); - while (srv_shutdown_state == SRV_SHUTDOWN_NONE) { ut_ad(btr_defragment_thread_active); @@ -828,36 +826,31 @@ DECLARE_THREAD(btr_defragment_thread)(void*) /* Update the last_processed time of this index. */ item->last_processed = now; } else { + dberr_t err = DB_SUCCESS; mtr_commit(&mtr); /* Reaching the end of the index. */ dict_stats_empty_defrag_stats(index); - trx->error_state = DB_SUCCESS; - ut_d(trx->persistent_stats = true); - ++trx->will_lock; - dberr_t err = dict_stats_save_defrag_stats(index, trx); - if (err == DB_SUCCESS) { - err = dict_stats_save_defrag_summary( - index, trx); - } - + err = dict_stats_save_defrag_stats(index); if (err != DB_SUCCESS) { - trx_rollback_to_savepoint(trx, NULL); ib::error() << "Saving defragmentation stats for table " - << index->table->name - << " index " << index->name - << " failed with error " - << ut_strerr(err); - } else if (trx->state != TRX_STATE_NOT_STARTED) { - trx_commit_for_mysql(trx); + << index->table->name.m_name + << " index " << index->name() + << " failed with error " << err; + } else { + err = dict_stats_save_defrag_summary(index); + + if (err != DB_SUCCESS) { + ib::error() << "Saving defragmentation summary for table " + << index->table->name.m_name + << " index " << index->name() + << " failed with error " << err; + } } - ut_d(trx->persistent_stats = false); btr_defragment_remove_item(item); } } - trx_free_for_background(trx); - btr_defragment_thread_active = false; os_thread_exit(); OS_THREAD_DUMMY_RETURN; diff --git a/storage/innobase/dict/dict0defrag_bg.cc b/storage/innobase/dict/dict0defrag_bg.cc index 34120f0bdc9..976e2ac3877 100644 --- a/storage/innobase/dict/dict0defrag_bg.cc +++ b/storage/innobase/dict/dict0defrag_bg.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2016, 2017, MariaDB Corporation. +Copyright (c) 2016, MariaDB Corporation. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -29,7 +29,6 @@ Created 25/08/2016 Jan Lindström #include "dict0defrag_bg.h" #include "row0mysql.h" #include "srv0start.h" -#include "trx0roll.h" #include "ut0new.h" #include @@ -202,18 +201,17 @@ dict_stats_defrag_pool_del( mutex_exit(&defrag_pool_mutex); } -/** Get the first index that has been added for updating persistent defrag -stats and eventually save its stats. -@param[in,out] trx transaction that will be started and committed */ +/*****************************************************************//** +Get the first index that has been added for updating persistent defrag +stats and eventually save its stats. */ static void -dict_stats_process_entry_from_defrag_pool(trx_t* trx) +dict_stats_process_entry_from_defrag_pool() { table_id_t table_id; index_id_t index_id; ut_ad(!srv_read_only_mode); - ut_ad(trx->persistent_stats); /* pop the first index from the auto defrag pool */ if (!dict_stats_defrag_pool_get(&table_id, &index_id)) { @@ -242,64 +240,62 @@ dict_stats_process_entry_from_defrag_pool(trx_t* trx) return; } - mutex_exit(&dict_sys->mutex); - trx->error_state = DB_SUCCESS; - ++trx->will_lock; - dberr_t err = dict_stats_save_defrag_stats(index, trx); - - if (err != DB_SUCCESS) { - trx_rollback_to_savepoint(trx, NULL); - ib::error() << "Saving defragmentation status for table " - << index->table->name - << " index " << index->name - << " failed " << err; - } else if (trx->state != TRX_STATE_NOT_STARTED) { - trx_commit_for_mysql(trx); - } - + dict_stats_save_defrag_stats(index); dict_table_close(table, FALSE, FALSE); } -/** Process indexes that have been scheduled for defragmenting. -@param[in,out] trx transaction that will be started and committed */ +/*****************************************************************//** +Get the first index that has been added for updating persistent defrag +stats and eventually save its stats. */ void -dict_defrag_process_entries_from_defrag_pool(trx_t* trx) +dict_defrag_process_entries_from_defrag_pool() +/*==========================================*/ { while (defrag_pool->size() && !dict_stats_start_shutdown) { - dict_stats_process_entry_from_defrag_pool(trx); + dict_stats_process_entry_from_defrag_pool(); } } -/** Save defragmentation result. -@param[in] index index that was defragmented -@param[in,out] trx transaction +/*********************************************************************//** +Save defragmentation result. @return DB_SUCCESS or error code */ dberr_t -dict_stats_save_defrag_summary(dict_index_t* index, trx_t* trx) +dict_stats_save_defrag_summary( +/*============================*/ + dict_index_t* index) /*!< in: index */ { - ut_ad(trx->persistent_stats); + dberr_t ret=DB_SUCCESS; + lint now = (lint) ut_time(); if (dict_index_is_ibuf(index)) { return DB_SUCCESS; } - return dict_stats_save_index_stat(index, ut_time(), "n_pages_freed", - index->stat_defrag_n_pages_freed, - NULL, - "Number of pages freed during" - " last defragmentation run.", - trx); + rw_lock_x_lock(dict_operation_lock); + mutex_enter(&dict_sys->mutex); + + ret = dict_stats_save_index_stat(index, now, "n_pages_freed", + index->stat_defrag_n_pages_freed, + NULL, + "Number of pages freed during" + " last defragmentation run.", + NULL); + + mutex_exit(&dict_sys->mutex); + rw_lock_x_unlock(dict_operation_lock); + + return (ret); } -/** Save defragmentation stats for a given index. -@param[in] index index that is being defragmented -@param[in,out] trx transaction +/*********************************************************************//** +Save defragmentation stats for a given index. @return DB_SUCCESS or error code */ dberr_t -dict_stats_save_defrag_stats(dict_index_t* index, trx_t* trx) +dict_stats_save_defrag_stats( +/*============================*/ + dict_index_t* index) /*!< in: index */ { - ut_ad(trx->error_state == DB_SUCCESS); - ut_ad(trx->persistent_stats); + dberr_t ret; if (dict_index_is_ibuf(index)) { return DB_SUCCESS; @@ -309,6 +305,7 @@ dict_stats_save_defrag_stats(dict_index_t* index, trx_t* trx) return dict_stats_report_error(index->table, true); } + lint now = (lint) ut_time(); mtr_t mtr; ulint n_leaf_pages; ulint n_leaf_reserved; @@ -325,33 +322,40 @@ dict_stats_save_defrag_stats(dict_index_t* index, trx_t* trx) return DB_SUCCESS; } - ib_time_t now = ut_time(); - dberr_t err = dict_stats_save_index_stat( - index, now, "n_page_split", - index->stat_defrag_n_page_split, + rw_lock_x_lock(dict_operation_lock); + + mutex_enter(&dict_sys->mutex); + ret = dict_stats_save_index_stat(index, now, "n_page_split", + index->stat_defrag_n_page_split, + NULL, + "Number of new page splits on leaves" + " since last defragmentation.", + NULL); + if (ret != DB_SUCCESS) { + goto end; + } + + ret = dict_stats_save_index_stat( + index, now, "n_leaf_pages_defrag", + n_leaf_pages, NULL, - "Number of new page splits on leaves" - " since last defragmentation.", - trx); - if (err == DB_SUCCESS) { - err = dict_stats_save_index_stat( - index, now, "n_leaf_pages_defrag", - n_leaf_pages, - NULL, - "Number of leaf pages when this stat is saved to disk", - trx); + "Number of leaf pages when this stat is saved to disk", + NULL); + if (ret != DB_SUCCESS) { + goto end; } - if (err == DB_SUCCESS) { - err = dict_stats_save_index_stat( - index, now, "n_leaf_pages_reserved", - n_leaf_reserved, - NULL, - "Number of pages reserved for this " - "index leaves when this stat " - "is saved to disk", - trx); - } + ret = dict_stats_save_index_stat( + index, now, "n_leaf_pages_reserved", + n_leaf_reserved, + NULL, + "Number of pages reserved for this index leaves when this stat " + "is saved to disk", + NULL); - return err; +end: + mutex_exit(&dict_sys->mutex); + rw_lock_x_unlock(dict_operation_lock); + + return (ret); } diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index 662ea959b9e..67991814540 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2009, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2017, MariaDB Corporation. +Copyright (c) 2015, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -276,7 +276,9 @@ This function will free the pinfo object. @param[in,out] pinfo pinfo to pass to que_eval_sql() must already have any literals bound to it @param[in] sql SQL string to execute -@param[in,out] trx transaction +@param[in,out] trx in case of NULL the function will allocate and +free the trx object. If it is not NULL then it will be rolled back +only in the case of error, but not freed. @return DB_SUCCESS or error code */ static dberr_t @@ -286,12 +288,53 @@ dict_stats_exec_sql( trx_t* trx) { dberr_t err; + bool trx_started = false; + + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(mutex_own(&dict_sys->mutex)); + + if (!dict_stats_persistent_storage_check(true)) { + pars_info_free(pinfo); + return(DB_STATS_DO_NOT_EXIST); + } + + if (trx == NULL) { + trx = trx_allocate_for_background(); + trx_started = true; + + if (srv_read_only_mode) { + trx_start_internal_read_only(trx); + } else { + trx_start_internal(trx); + } + } err = que_eval_sql(pinfo, sql, FALSE, trx); /* pinfo is freed here */ DBUG_EXECUTE_IF("stats_index_error", + if (!trx_started) { err = DB_STATS_DO_NOT_EXIST; - trx->error_state = DB_STATS_DO_NOT_EXIST;); + trx->error_state = DB_STATS_DO_NOT_EXIST; + }); + + if (!trx_started && err == DB_SUCCESS) { + return(DB_SUCCESS); + } + + if (err == DB_SUCCESS) { + trx_commit_for_mysql(trx); + } else { + trx->op_info = "rollback of internal trx on stats tables"; + trx->dict_operation_lock_mode = RW_X_LATCH; + trx_rollback_to_savepoint(trx, NULL); + trx->dict_operation_lock_mode = 0; + trx->op_info = ""; + ut_a(trx->error_state == DB_SUCCESS); + } + + if (trx_started) { + trx_free_for_background(trx); + } return(err); } @@ -500,12 +543,16 @@ dict_stats_empty_index( } } -/** Reset the table and index statsistics, corresponding to an empty table. -@param[in,out] table table whose statistics are to be reset -@param[in] empty_defrag_stats whether to empty the defrag statistics -*/ +/*********************************************************************//** +Write all zeros (or 1 where it makes sense) into a table and its indexes' +statistics members. The resulting stats correspond to an empty table. */ +static void -dict_stats_empty_table(dict_table_t* table, bool empty_defrag_stats) +dict_stats_empty_table( +/*===================*/ + dict_table_t* table, /*!< in/out: table */ + bool empty_defrag_stats) + /*!< in: whether to empty defrag stats */ { /* Zero the stats members */ @@ -2251,7 +2298,9 @@ storage. @param[in] stat_value value of the stat @param[in] sample_size n pages sampled or NULL @param[in] stat_description description of the stat -@param[in,out] trx transaction +@param[in,out] trx in case of NULL the function will +allocate and free the trx object. If it is not NULL then it will be +rolled back only in the case of error, but not freed. @return DB_SUCCESS or error code */ dberr_t dict_stats_save_index_stat( @@ -2268,7 +2317,9 @@ dict_stats_save_index_stat( char db_utf8[MAX_DB_UTF8_LEN]; char table_utf8[MAX_TABLE_UTF8_LEN]; - ut_ad(trx->persistent_stats || trx->in_mysql_trx_list); + ut_ad(!trx || trx->internal || trx->in_mysql_trx_list); + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(mutex_own(&dict_sys->mutex)); dict_fs2utf8(index->table->name.m_name, db_utf8, sizeof(db_utf8), table_utf8, sizeof(table_utf8)); @@ -2294,8 +2345,6 @@ dict_stats_save_index_stat( pars_info_add_str_literal(pinfo, "stat_description", stat_description); - mutex_enter(&dict_sys->mutex); - ret = dict_stats_exec_sql( pinfo, "PROCEDURE INDEX_STATS_SAVE () IS\n" @@ -2322,8 +2371,6 @@ dict_stats_save_index_stat( ");\n" "END;", trx); - mutex_exit(&dict_sys->mutex); - if (ret != DB_SUCCESS) { if (innodb_index_stats_not_found == false && index->stats_error_printed == false) { @@ -2376,7 +2423,6 @@ dict_stats_report_error(dict_table_t* table, bool defragment) /** Save the table's statistics into the persistent statistics storage. @param[in] table_orig table whose stats to save -@param[in,out] trx transaction @param[in] only_for_index if this is non-NULL, then stats for indexes that are not equal to it will not be saved, if NULL, then all indexes' stats are saved @@ -2385,8 +2431,7 @@ static dberr_t dict_stats_save( dict_table_t* table_orig, - trx_t* trx, - const index_id_t* only_for_index = NULL) + const index_id_t* only_for_index) { pars_info_t* pinfo; ib_time_t now; @@ -2395,10 +2440,11 @@ dict_stats_save( char db_utf8[MAX_DB_UTF8_LEN]; char table_utf8[MAX_TABLE_UTF8_LEN]; - ut_ad(trx->persistent_stats || trx->in_mysql_trx_list); + if (high_level_read_only) { + return DB_READ_ONLY; + } - if (table_orig->is_readable()) { - } else { + if (!table_orig->is_readable()) { return (dict_stats_report_error(table_orig)); } @@ -2407,8 +2453,9 @@ dict_stats_save( dict_fs2utf8(table->name.m_name, db_utf8, sizeof(db_utf8), table_utf8, sizeof(table_utf8)); - now = ut_time(); + rw_lock_x_lock(dict_operation_lock); + mutex_enter(&dict_sys->mutex); pinfo = pars_info_create(); @@ -2421,8 +2468,6 @@ dict_stats_save( pars_info_add_ull_literal(pinfo, "sum_of_other_index_sizes", table->stat_sum_of_other_index_sizes); - mutex_enter(&dict_sys->mutex); - ret = dict_stats_exec_sql( pinfo, "PROCEDURE TABLE_STATS_SAVE () IS\n" @@ -2443,18 +2488,28 @@ dict_stats_save( ":clustered_index_size,\n" ":sum_of_other_index_sizes\n" ");\n" - "END;", trx); + "END;", NULL); - mutex_exit(&dict_sys->mutex); + if (ret != DB_SUCCESS) { + ib::error() << "Cannot save table statistics for table " + << table->name << ": " << ut_strerr(ret); + mutex_exit(&dict_sys->mutex); + rw_lock_x_unlock(dict_operation_lock); + + dict_stats_snapshot_free(table); + + return(ret); + } + + trx_t* trx = trx_allocate_for_background(); + trx_start_internal(trx); + + dict_index_t* index; index_map_t indexes( (ut_strcmp_functor()), index_map_t_allocator(mem_key_dict_stats_index_map_t)); - if (ret != DB_SUCCESS) { - goto end; - } - /* Below we do all the modifications in innodb_index_stats in a single transaction for performance reasons. Modifying more than one row in a single transaction may deadlock with other transactions if they @@ -2467,17 +2522,18 @@ dict_stats_save( stat_name). This is why below we sort the indexes by name and then for each index, do the mods ordered by stat_name. */ - for (dict_index_t* index = dict_table_get_first_index(table); + for (index = dict_table_get_first_index(table); index != NULL; index = dict_table_get_next_index(index)) { indexes[index->name] = index; } - for (index_map_t::const_iterator it = indexes.begin(); - it != indexes.end(); ++it) { + index_map_t::const_iterator it; - dict_index_t* index = it->second; + for (it = indexes.begin(); it != indexes.end(); ++it) { + + index = it->second; if (only_for_index != NULL && index->id != *only_for_index) { continue; @@ -2540,11 +2596,13 @@ dict_stats_save( } } - if (ret != DB_SUCCESS) { + trx_commit_for_mysql(trx); + end: - ib::error() << "Cannot save table statistics for table " - << table->name << ": " << ut_strerr(ret); - } + trx_free_for_background(trx); + + mutex_exit(&dict_sys->mutex); + rw_lock_x_unlock(dict_operation_lock); dict_stats_snapshot_free(table); @@ -3042,13 +3100,12 @@ dict_stats_empty_defrag_modified_counter( } } -/** Calculate index statistics. -@param[in,out] index index tree -@param[in,out] trx transaction (for persistent statistics) -@return DB_SUCCESS or error code */ -UNIV_INTERN -dberr_t -dict_stats_update_for_index(dict_index_t* index, trx_t* trx) +/*********************************************************************//** +Fetches or calculates new estimates for index statistics. */ +void +dict_stats_update_for_index( +/*========================*/ + dict_index_t* index) /*!< in/out: index */ { DBUG_ENTER("dict_stats_update_for_index"); @@ -3060,8 +3117,8 @@ dict_stats_update_for_index(dict_index_t* index, trx_t* trx) dict_table_stats_lock(index->table, RW_X_LATCH); dict_stats_analyze_index(index); dict_table_stats_unlock(index->table, RW_X_LATCH); - DBUG_RETURN(dict_stats_save(index->table, trx, - &index->id)); + dict_stats_save(index->table, &index->id); + DBUG_VOID_RETURN; } /* else */ @@ -3084,20 +3141,22 @@ dict_stats_update_for_index(dict_index_t* index, trx_t* trx) dict_stats_update_transient_for_index(index); dict_table_stats_unlock(index->table, RW_X_LATCH); - DBUG_RETURN(DB_SUCCESS); + DBUG_VOID_RETURN; } -/** Calculate new estimates for table and index statistics. -@param[in,out] table table -@param[in] stats_upd_option how to update statistics -@param[in,out] trx transaction -@return DB_* error code or DB_SUCCESS */ -UNIV_INTERN +/*********************************************************************//** +Calculates new estimates for table and index statistics. The statistics +are used in query optimization. +@return DB_SUCCESS or error code */ dberr_t dict_stats_update( - dict_table_t* table, - dict_stats_upd_option_t stats_upd_option, - trx_t* trx) +/*==============*/ + dict_table_t* table, /*!< in/out: table */ + dict_stats_upd_option_t stats_upd_option) + /*!< in: whether to (re) calc + the stats or to fetch them from + the persistent statistics + storage */ { ut_ad(!mutex_own(&dict_sys->mutex)); @@ -3142,7 +3201,7 @@ dict_stats_update( return(err); } - err = dict_stats_save(table, trx); + err = dict_stats_save(table, NULL); return(err); } @@ -3178,7 +3237,7 @@ dict_stats_update( if (dict_stats_persistent_storage_check(false)) { - return(dict_stats_save(table, trx)); + return(dict_stats_save(table, NULL)); } return(DB_STATS_DO_NOT_EXIST); @@ -3257,9 +3316,9 @@ dict_stats_update( } if (dict_stats_auto_recalc_is_enabled(table)) { - return dict_stats_update( - table, DICT_STATS_RECALC_PERSISTENT, - trx); + return(dict_stats_update( + table, + DICT_STATS_RECALC_PERSISTENT)); } ib::info() << "Trying to use table " << table->name @@ -3307,20 +3366,25 @@ transient: return(DB_SUCCESS); } -/** Remove the persistent statistics for an index. -@param[in] db_and_table schema and table name, e.g., 'db/table' -@param[in] iname index name -@param[out] errstr error message (when not returning DB_SUCCESS) -@param[in] errstr_sz sizeof errstr -@param[in,out] trx transaction +/*********************************************************************//** +Removes the information for a particular index's stats from the persistent +storage if it exists and if there is data stored for this index. +This function creates its own trx and commits it. +A note from Marko why we cannot edit user and sys_* tables in one trx: +marko: The problem is that ibuf merges should be disabled while we are +rolling back dict transactions. +marko: If ibuf merges are not disabled, we need to scan the *.ibd files. +But we shouldn't open *.ibd files before we have rolled back dict +transactions and opened the SYS_* records for the *.ibd files. @return DB_SUCCESS or error code */ dberr_t dict_stats_drop_index( - const char* db_and_table, - const char* iname, - char* errstr, - size_t errstr_sz, - trx_t* trx) +/*==================*/ + const char* db_and_table,/*!< in: db and table, e.g. 'db/table' */ + const char* iname, /*!< in: index name */ + char* errstr, /*!< out: error message if != DB_SUCCESS + is returned */ + ulint errstr_sz)/*!< in: size of the errstr buffer */ { char db_utf8[MAX_DB_UTF8_LEN]; char table_utf8[MAX_TABLE_UTF8_LEN]; @@ -3336,11 +3400,6 @@ dict_stats_drop_index( return(DB_SUCCESS); } - if (!dict_stats_persistent_storage_check(false)) { - /* Statistics tables do not exist. */ - return(DB_SUCCESS); - } - dict_fs2utf8(db_and_table, db_utf8, sizeof(db_utf8), table_utf8, sizeof(table_utf8)); @@ -3352,6 +3411,7 @@ dict_stats_drop_index( pars_info_add_str_literal(pinfo, "index_name", iname); + rw_lock_x_lock(dict_operation_lock); mutex_enter(&dict_sys->mutex); ret = dict_stats_exec_sql( @@ -3362,18 +3422,16 @@ dict_stats_drop_index( "database_name = :database_name AND\n" "table_name = :table_name AND\n" "index_name = :index_name;\n" - "END;\n", trx); + "END;\n", NULL); mutex_exit(&dict_sys->mutex); + rw_lock_x_unlock(dict_operation_lock); - switch (ret) { - case DB_STATS_DO_NOT_EXIST: - case DB_SUCCESS: - return(DB_SUCCESS); - case DB_QUE_THR_SUSPENDED: - ret = DB_LOCK_WAIT; - /* fall through */ - default: + if (ret == DB_STATS_DO_NOT_EXIST) { + ret = DB_SUCCESS; + } + + if (ret != DB_SUCCESS) { snprintf(errstr, errstr_sz, "Unable to delete statistics for index %s" " from %s%s: %s. They can be deleted later using" @@ -3399,71 +3457,98 @@ dict_stats_drop_index( return(ret); } -/** Delete table statistics. -@param[in] db schema name -@param[in] t table name -@param[in,out] trx transaction +/*********************************************************************//** +Executes +DELETE FROM mysql.innodb_table_stats +WHERE database_name = '...' AND table_name = '...'; +Creates its own transaction and commits it. @return DB_SUCCESS or error code */ UNIV_INLINE dberr_t -dict_stats_delete_from_table_stats(const char* db, const char* t, trx_t* trx) +dict_stats_delete_from_table_stats( +/*===============================*/ + const char* database_name, /*!< in: database name, e.g. 'db' */ + const char* table_name) /*!< in: table name, e.g. 'table' */ { - pars_info_t* pinfo = pars_info_create(); + pars_info_t* pinfo; + dberr_t ret; - pars_info_add_str_literal(pinfo, "database_name", db); - pars_info_add_str_literal(pinfo, "table_name", t); + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(mutex_own(&dict_sys->mutex)); - return dict_stats_exec_sql( + pinfo = pars_info_create(); + + pars_info_add_str_literal(pinfo, "database_name", database_name); + pars_info_add_str_literal(pinfo, "table_name", table_name); + + ret = dict_stats_exec_sql( pinfo, "PROCEDURE DELETE_FROM_TABLE_STATS () IS\n" "BEGIN\n" "DELETE FROM \"" TABLE_STATS_NAME "\" WHERE\n" "database_name = :database_name AND\n" "table_name = :table_name;\n" - "END;\n", trx); + "END;\n", NULL); + + return(ret); } -/** Delete index statistics. -@param[in] db schema name -@param[in] t table name -@param[in,out] trx transaction +/*********************************************************************//** +Executes +DELETE FROM mysql.innodb_index_stats +WHERE database_name = '...' AND table_name = '...'; +Creates its own transaction and commits it. @return DB_SUCCESS or error code */ UNIV_INLINE dberr_t -dict_stats_delete_from_index_stats(const char* db, const char* t, trx_t* trx) +dict_stats_delete_from_index_stats( +/*===============================*/ + const char* database_name, /*!< in: database name, e.g. 'db' */ + const char* table_name) /*!< in: table name, e.g. 'table' */ { - pars_info_t* pinfo = pars_info_create(); + pars_info_t* pinfo; + dberr_t ret; - pars_info_add_str_literal(pinfo, "database_name", db); - pars_info_add_str_literal(pinfo, "table_name", t); + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(mutex_own(&dict_sys->mutex)); - return dict_stats_exec_sql( + pinfo = pars_info_create(); + + pars_info_add_str_literal(pinfo, "database_name", database_name); + pars_info_add_str_literal(pinfo, "table_name", table_name); + + ret = dict_stats_exec_sql( pinfo, "PROCEDURE DELETE_FROM_INDEX_STATS () IS\n" "BEGIN\n" "DELETE FROM \"" INDEX_STATS_NAME "\" WHERE\n" "database_name = :database_name AND\n" "table_name = :table_name;\n" - "END;\n", trx); + "END;\n", NULL); + + return(ret); } -/** Remove the persistent statistics for a table and all of its indexes. -@param[in] db_and_table schema and table name, e.g., 'db/table' -@param[out] errstr error message (when not returning DB_SUCCESS) -@param[in] errstr_sz sizeof errstr -@param[in,out] trx transaction +/*********************************************************************//** +Removes the statistics for a table and all of its indexes from the +persistent statistics storage if it exists and if there is data stored for +the table. This function creates its own transaction and commits it. @return DB_SUCCESS or error code */ dberr_t dict_stats_drop_table( - const char* db_and_table, - char* errstr, - size_t errstr_sz, - trx_t* trx) +/*==================*/ + const char* db_and_table, /*!< in: db and table, e.g. 'db/table' */ + char* errstr, /*!< out: error message + if != DB_SUCCESS is returned */ + ulint errstr_sz) /*!< in: size of errstr buffer */ { char db_utf8[MAX_DB_UTF8_LEN]; char table_utf8[MAX_TABLE_UTF8_LEN]; dberr_t ret; + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(mutex_own(&dict_sys->mutex)); + /* skip tables that do not contain a database name e.g. if we are dropping SYS_TABLES */ if (strchr(db_and_table, '/') == NULL) { @@ -3478,32 +3563,24 @@ dict_stats_drop_table( return(DB_SUCCESS); } - if (!dict_stats_persistent_storage_check(true)) { - /* Statistics tables do not exist. */ - return(DB_SUCCESS); - } - dict_fs2utf8(db_and_table, db_utf8, sizeof(db_utf8), table_utf8, sizeof(table_utf8)); - ret = dict_stats_delete_from_table_stats(db_utf8, table_utf8, trx); + ret = dict_stats_delete_from_table_stats(db_utf8, table_utf8); if (ret == DB_SUCCESS) { - ret = dict_stats_delete_from_index_stats( - db_utf8, table_utf8, trx); + ret = dict_stats_delete_from_index_stats(db_utf8, table_utf8); } - switch (ret) { - case DB_SUCCESS: - case DB_STATS_DO_NOT_EXIST: - return(DB_SUCCESS); - case DB_QUE_THR_SUSPENDED: - ret = DB_LOCK_WAIT; - /* fall through */ - default: + if (ret == DB_STATS_DO_NOT_EXIST) { + ret = DB_SUCCESS; + } + + if (ret != DB_SUCCESS) { + snprintf(errstr, errstr_sz, - "Unable to delete statistics for table %s.%s: %s. " - "They can be deleted later using " + "Unable to delete statistics for table %s.%s: %s." + " They can be deleted later using" " DELETE FROM %s WHERE" " database_name = '%s' AND" @@ -3526,30 +3603,36 @@ dict_stats_drop_table( return(ret); } -/** Rename table statistics. -@param[in] old_dbname_utf8 old schema name -@param[in] old_tablename_utf8 old table name -@param[in] new_dbname_utf8 new schema name -@param[in] new_tablename_utf8 new schema name -@param[in,out] trx transaction +/*********************************************************************//** +Executes +UPDATE mysql.innodb_table_stats SET +database_name = '...', table_name = '...' +WHERE database_name = '...' AND table_name = '...'; +Creates its own transaction and commits it. @return DB_SUCCESS or error code */ UNIV_INLINE dberr_t -dict_stats_rename_in_table_stats( - const char* old_dbname_utf8, - const char* old_tablename_utf8, - const char* new_dbname_utf8, - const char* new_tablename_utf8, - trx_t* trx) +dict_stats_rename_table_in_table_stats( +/*===================================*/ + const char* old_dbname_utf8,/*!< in: database name, e.g. 'olddb' */ + const char* old_tablename_utf8,/*!< in: table name, e.g. 'oldtable' */ + const char* new_dbname_utf8,/*!< in: database name, e.g. 'newdb' */ + const char* new_tablename_utf8)/*!< in: table name, e.g. 'newtable' */ { - pars_info_t* pinfo = pars_info_create(); + pars_info_t* pinfo; + dberr_t ret; + + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(mutex_own(&dict_sys->mutex)); + + pinfo = pars_info_create(); pars_info_add_str_literal(pinfo, "old_dbname_utf8", old_dbname_utf8); pars_info_add_str_literal(pinfo, "old_tablename_utf8", old_tablename_utf8); pars_info_add_str_literal(pinfo, "new_dbname_utf8", new_dbname_utf8); pars_info_add_str_literal(pinfo, "new_tablename_utf8", new_tablename_utf8); - return dict_stats_exec_sql( + ret = dict_stats_exec_sql( pinfo, "PROCEDURE RENAME_TABLE_IN_TABLE_STATS () IS\n" "BEGIN\n" @@ -3559,33 +3642,41 @@ dict_stats_rename_in_table_stats( "WHERE\n" "database_name = :old_dbname_utf8 AND\n" "table_name = :old_tablename_utf8;\n" - "END;\n", trx); + "END;\n", NULL); + + return(ret); } -/** Rename index statistics. -@param[in] old_dbname_utf8 old schema name -@param[in] old_tablename_utf8 old table name -@param[in] new_dbname_utf8 new schema name -@param[in] new_tablename_utf8 new schema name -@param[in,out] trx transaction +/*********************************************************************//** +Executes +UPDATE mysql.innodb_index_stats SET +database_name = '...', table_name = '...' +WHERE database_name = '...' AND table_name = '...'; +Creates its own transaction and commits it. @return DB_SUCCESS or error code */ UNIV_INLINE dberr_t -dict_stats_rename_in_index_stats( - const char* old_dbname_utf8, - const char* old_tablename_utf8, - const char* new_dbname_utf8, - const char* new_tablename_utf8, - trx_t* trx) +dict_stats_rename_table_in_index_stats( +/*===================================*/ + const char* old_dbname_utf8,/*!< in: database name, e.g. 'olddb' */ + const char* old_tablename_utf8,/*!< in: table name, e.g. 'oldtable' */ + const char* new_dbname_utf8,/*!< in: database name, e.g. 'newdb' */ + const char* new_tablename_utf8)/*!< in: table name, e.g. 'newtable' */ { - pars_info_t* pinfo = pars_info_create(); + pars_info_t* pinfo; + dberr_t ret; + + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(mutex_own(&dict_sys->mutex)); + + pinfo = pars_info_create(); pars_info_add_str_literal(pinfo, "old_dbname_utf8", old_dbname_utf8); pars_info_add_str_literal(pinfo, "old_tablename_utf8", old_tablename_utf8); pars_info_add_str_literal(pinfo, "new_dbname_utf8", new_dbname_utf8); pars_info_add_str_literal(pinfo, "new_tablename_utf8", new_tablename_utf8); - return dict_stats_exec_sql( + ret = dict_stats_exec_sql( pinfo, "PROCEDURE RENAME_TABLE_IN_INDEX_STATS () IS\n" "BEGIN\n" @@ -3595,23 +3686,23 @@ dict_stats_rename_in_index_stats( "WHERE\n" "database_name = :old_dbname_utf8 AND\n" "table_name = :old_tablename_utf8;\n" - "END;\n", trx); + "END;\n", NULL); + + return(ret); } -/** Rename a table in the InnoDB persistent statistics storage. -@param[in] old_name old schema and table name, e.g., 'db/table' -@param[in] new_name new schema and table name, e.g., 'db/table' -@param[out] errstr error message (when not returning DB_SUCCESS) -@param[in] errstr_sz sizeof errstr -@param[in,out] trx transaction +/*********************************************************************//** +Renames a table in InnoDB persistent stats storage. +This function creates its own transaction and commits it. @return DB_SUCCESS or error code */ dberr_t dict_stats_rename_table( - const char* old_name, - const char* new_name, - char* errstr, - size_t errstr_sz, - trx_t* trx) +/*====================*/ + const char* old_name, /*!< in: old name, e.g. 'db/table' */ + const char* new_name, /*!< in: new name, e.g. 'db/table' */ + char* errstr, /*!< out: error string if != DB_SUCCESS + is returned */ + size_t errstr_sz) /*!< in: errstr size */ { char old_db_utf8[MAX_DB_UTF8_LEN]; char new_db_utf8[MAX_DB_UTF8_LEN]; @@ -3619,6 +3710,9 @@ dict_stats_rename_table( char new_table_utf8[MAX_TABLE_UTF8_LEN]; dberr_t ret; + ut_ad(!rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(!mutex_own(&dict_sys->mutex)); + /* skip innodb_table_stats and innodb_index_stats themselves */ if (strcmp(old_name, TABLE_STATS_NAME) == 0 || strcmp(old_name, INDEX_STATS_NAME) == 0 @@ -3628,95 +3722,45 @@ dict_stats_rename_table( return(DB_SUCCESS); } - if (!dict_stats_persistent_storage_check(false)) { - /* Statistics tables do not exist. */ - return(DB_SUCCESS); - } - dict_fs2utf8(old_name, old_db_utf8, sizeof(old_db_utf8), old_table_utf8, sizeof(old_table_utf8)); dict_fs2utf8(new_name, new_db_utf8, sizeof(new_db_utf8), new_table_utf8, sizeof(new_table_utf8)); + rw_lock_x_lock(dict_operation_lock); + mutex_enter(&dict_sys->mutex); + ulint n_attempts = 0; do { - trx_savept_t savept = trx_savept_take(trx); + n_attempts++; - mutex_enter(&dict_sys->mutex); - - ret = dict_stats_rename_in_table_stats( + ret = dict_stats_rename_table_in_table_stats( old_db_utf8, old_table_utf8, - new_db_utf8, new_table_utf8, trx); + new_db_utf8, new_table_utf8); - mutex_exit(&dict_sys->mutex); - - switch (ret) { - case DB_DUPLICATE_KEY: - trx_rollback_to_savepoint(trx, &savept); - mutex_enter(&dict_sys->mutex); + if (ret == DB_DUPLICATE_KEY) { dict_stats_delete_from_table_stats( - new_db_utf8, new_table_utf8, trx); - mutex_exit(&dict_sys->mutex); - /* fall through */ - case DB_LOCK_WAIT_TIMEOUT: - trx->error_state = DB_SUCCESS; - os_thread_sleep(200000 /* 0.2 sec */); - continue; - case DB_STATS_DO_NOT_EXIST: - ret = DB_SUCCESS; - break; - default: - break; + new_db_utf8, new_table_utf8); } - break; - } while (++n_attempts < 5); + if (ret == DB_STATS_DO_NOT_EXIST) { + ret = DB_SUCCESS; + } - const char* table_name = TABLE_STATS_NAME_PRINT; - - if (ret != DB_SUCCESS) { - goto err_exit; - } - - table_name = INDEX_STATS_NAME_PRINT; - - n_attempts = 0; - do { - trx_savept_t savept = trx_savept_take(trx); - - mutex_enter(&dict_sys->mutex); - - ret = dict_stats_rename_in_index_stats( - old_db_utf8, old_table_utf8, - new_db_utf8, new_table_utf8, trx); - - mutex_exit(&dict_sys->mutex); - - switch (ret) { - case DB_DUPLICATE_KEY: - trx_rollback_to_savepoint(trx, &savept); + if (ret != DB_SUCCESS) { + mutex_exit(&dict_sys->mutex); + rw_lock_x_unlock(dict_operation_lock); + os_thread_sleep(200000 /* 0.2 sec */); + rw_lock_x_lock(dict_operation_lock); mutex_enter(&dict_sys->mutex); - dict_stats_delete_from_index_stats( - new_db_utf8, new_table_utf8, trx); - mutex_exit(&dict_sys->mutex); - /* fall through */ - case DB_LOCK_WAIT_TIMEOUT: - trx->error_state = DB_SUCCESS; - os_thread_sleep(200000 /* 0.2 sec */); - continue; - case DB_STATS_DO_NOT_EXIST: - ret = DB_SUCCESS; - break; - default: - break; } - - break; - } while (++n_attempts < 5); + } while ((ret == DB_DEADLOCK + || ret == DB_DUPLICATE_KEY + || ret == DB_LOCK_WAIT_TIMEOUT) + && n_attempts < 5); if (ret != DB_SUCCESS) { -err_exit: snprintf(errstr, errstr_sz, "Unable to rename statistics from" " %s.%s to %s.%s in %s: %s." @@ -3731,10 +3775,69 @@ err_exit: old_db_utf8, old_table_utf8, new_db_utf8, new_table_utf8, - table_name, + TABLE_STATS_NAME_PRINT, ut_strerr(ret), - table_name, + TABLE_STATS_NAME_PRINT, + new_db_utf8, new_table_utf8, + old_db_utf8, old_table_utf8); + mutex_exit(&dict_sys->mutex); + rw_lock_x_unlock(dict_operation_lock); + return(ret); + } + /* else */ + + n_attempts = 0; + do { + n_attempts++; + + ret = dict_stats_rename_table_in_index_stats( + old_db_utf8, old_table_utf8, + new_db_utf8, new_table_utf8); + + if (ret == DB_DUPLICATE_KEY) { + dict_stats_delete_from_index_stats( + new_db_utf8, new_table_utf8); + } + + if (ret == DB_STATS_DO_NOT_EXIST) { + ret = DB_SUCCESS; + } + + if (ret != DB_SUCCESS) { + mutex_exit(&dict_sys->mutex); + rw_lock_x_unlock(dict_operation_lock); + os_thread_sleep(200000 /* 0.2 sec */); + rw_lock_x_lock(dict_operation_lock); + mutex_enter(&dict_sys->mutex); + } + } while ((ret == DB_DEADLOCK + || ret == DB_DUPLICATE_KEY + || ret == DB_LOCK_WAIT_TIMEOUT) + && n_attempts < 5); + + mutex_exit(&dict_sys->mutex); + rw_lock_x_unlock(dict_operation_lock); + + if (ret != DB_SUCCESS) { + snprintf(errstr, errstr_sz, + "Unable to rename statistics from" + " %s.%s to %s.%s in %s: %s." + " They can be renamed later using" + + " UPDATE %s SET" + " database_name = '%s'," + " table_name = '%s'" + " WHERE" + " database_name = '%s' AND" + " table_name = '%s';", + + old_db_utf8, old_table_utf8, + new_db_utf8, new_table_utf8, + INDEX_STATS_NAME_PRINT, + ut_strerr(ret), + + INDEX_STATS_NAME_PRINT, new_db_utf8, new_table_utf8, old_db_utf8, old_table_utf8); } @@ -3742,6 +3845,7 @@ err_exit: return(ret); } +#ifdef MYSQL_RENAME_INDEX /*********************************************************************//** Renames an index in InnoDB persistent stats storage. This function creates its own transaction and commits it. @@ -3798,6 +3902,7 @@ dict_stats_rename_index( return(ret); } +#endif /* MYSQL_RENAME_INDEX */ /* tests @{ */ #ifdef UNIV_ENABLE_UNIT_TEST_DICT_STATS diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc index 3f5419a0751..6c9a17c8a7c 100644 --- a/storage/innobase/dict/dict0stats_bg.cc +++ b/storage/innobase/dict/dict0stats_bg.cc @@ -32,7 +32,6 @@ Created Apr 25, 2012 Vasil Dimov #include "srv0start.h" #include "ut0new.h" #include "fil0fil.h" -#include "trx0trx.h" #include @@ -181,7 +180,7 @@ dict_stats_update_if_needed(dict_table_t* table) if (counter > threshold) { /* this will reset table->stat_modified_counter to 0 */ - dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT, NULL); + dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT); } } @@ -324,7 +323,8 @@ Get the first table that has been added for auto recalc and eventually update its stats. */ static void -dict_stats_process_entry_from_recalc_pool(trx_t* trx) +dict_stats_process_entry_from_recalc_pool() +/*=======================================*/ { table_id_t table_id; @@ -378,12 +378,8 @@ dict_stats_process_entry_from_recalc_pool(trx_t* trx) dict_stats_recalc_pool_add(table); } else { - trx->error_state = DB_SUCCESS; - ++trx->will_lock; - dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT, trx); - if (trx->state != TRX_STATE_NOT_STARTED) { - trx_commit_for_mysql(trx); - } + + dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT); } mutex_enter(&dict_sys->mutex); @@ -444,9 +440,6 @@ DECLARE_THREAD(dict_stats_thread)(void*) */ #endif /* UNIV_PFS_THREAD */ - trx_t* trx = trx_allocate_for_background(); - ut_d(trx->persistent_stats = true); - while (!dict_stats_start_shutdown) { /* Wake up periodically even if not signaled. This is @@ -472,14 +465,12 @@ DECLARE_THREAD(dict_stats_thread)(void*) break; } - dict_stats_process_entry_from_recalc_pool(trx); - dict_defrag_process_entries_from_defrag_pool(trx); + dict_stats_process_entry_from_recalc_pool(); + dict_defrag_process_entries_from_defrag_pool(); os_event_reset(dict_stats_event); } - ut_d(trx->persistent_stats = false); - trx_free_for_background(trx); srv_dict_stats_thread_active = false; os_event_set(dict_stats_shutdown_event); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 198cfadc195..bfcf8dfe741 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -6470,22 +6470,7 @@ no_such_table: /* No point to init any statistics if tablespace is still encrypted. */ if (ib_table->is_readable()) { - trx_t* trx = check_trx_exists(thd); - bool alloc = !trx_state_eq(trx, TRX_STATE_NOT_STARTED); - - if (alloc) { - trx = trx_allocate_for_background(); - } - ut_ad(!trx->persistent_stats); - ut_d(trx->persistent_stats = true); - trx->error_state = DB_SUCCESS; - ++trx->will_lock; - dict_stats_init(ib_table, trx); - innobase_commit_low(trx); - ut_d(trx->persistent_stats = false); - if (alloc) { - trx_free_for_background(trx); - } + dict_stats_init(ib_table); } else { ib_table->stat_initialized = 1; } @@ -13274,10 +13259,7 @@ create_table_info_t::create_table_update_dict() innobase_copy_frm_flags_from_create_info(innobase_table, m_create_info); - ++m_trx->will_lock; - m_trx->error_state = DB_SUCCESS; - dict_stats_update(innobase_table, DICT_STATS_EMPTY_TABLE, m_trx); - innobase_commit_low(m_trx); + dict_stats_update(innobase_table, DICT_STATS_EMPTY_TABLE); if (innobase_table) { /* We update the highest file format in the system table @@ -13539,8 +13521,7 @@ ha_innobase::discard_or_import_tablespace( /* Adjust the persistent statistics. */ ret = dict_stats_update(dict_table, - DICT_STATS_RECALC_PERSISTENT, - m_prebuilt->trx); + DICT_STATS_RECALC_PERSISTENT); if (ret != DB_SUCCESS) { push_warning_printf( @@ -13548,11 +13529,8 @@ ha_innobase::discard_or_import_tablespace( Sql_condition::WARN_LEVEL_WARN, ER_ALTER_INFO, "Error updating stats for table '%s'" - " after table import: %s", + " after table rebuild: %s", dict_table->name.m_name, ut_strerr(ret)); - trx_rollback_to_savepoint(m_prebuilt->trx, NULL); - } else { - trx_commit_for_mysql(m_prebuilt->trx); } } @@ -14031,6 +14009,8 @@ ha_innobase::rename_table( innobase_commit_low(trx); + trx_free_for_mysql(trx); + if (error == DB_SUCCESS) { char norm_from[MAX_FULL_NAME_LEN]; char norm_to[MAX_FULL_NAME_LEN]; @@ -14040,24 +14020,17 @@ ha_innobase::rename_table( normalize_table_name(norm_from, from); normalize_table_name(norm_to, to); - trx->error_state = DB_SUCCESS; - ++trx->will_lock; ret = dict_stats_rename_table(norm_from, norm_to, - errstr, sizeof errstr, trx); + errstr, sizeof(errstr)); if (ret != DB_SUCCESS) { - trx_rollback_to_savepoint(trx, NULL); ib::error() << errstr; push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_LOCK_WAIT_TIMEOUT, errstr); - } else { - innobase_commit_low(trx); } } - trx_free_for_mysql(trx); - /* Add a special case to handle the Duplicated Key error and return DB_ERROR instead. This is to avoid a possible SIGSEGV error from mysql error @@ -14595,34 +14568,18 @@ ha_innobase::info_low( } ut_ad(!mutex_own(&dict_sys->mutex)); - /* Do not use prebuilt->trx in case this is - called in the middle of a transaction. We - should commit the transaction after - dict_stats_update() in order not to hog locks - on the mysql.innodb_table_stats, - mysql.innodb_index_stats tables. */ - trx_t* trx = trx_allocate_for_background(); - ut_d(trx->persistent_stats = true); - ++trx->will_lock; - ret = dict_stats_update(ib_table, opt, trx); + ret = dict_stats_update(ib_table, opt); if (ret != DB_SUCCESS) { m_prebuilt->trx->op_info = ""; - trx_rollback_to_savepoint(trx, NULL); - } else { - m_prebuilt->trx->op_info = - "returning various info to MySQL"; - trx_commit_for_mysql(trx); - } - - ut_d(trx->persistent_stats = false); - trx_free_for_background(trx); - - if (ret != DB_SUCCESS) { DBUG_RETURN(HA_ERR_GENERIC); } + + m_prebuilt->trx->op_info = + "returning various info to MariaDB"; } + stats.update_time = (ulong) ib_table->update_time; } diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 9e6d81e21e4..94b36e3e7d6 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -8214,27 +8214,29 @@ commit_cache_norebuild( /** Adjust the persistent statistics after non-rebuilding ALTER TABLE. Remove statistics for dropped indexes, add statistics for created indexes and rename statistics for renamed indexes. -@param ha_alter_info Data used during in-place alter -@param ctx In-place ALTER TABLE context -@param table_name Table name in MySQL -@param trx transaction -@return error code */ +@param ha_alter_info Data used during in-place alter +@param ctx In-place ALTER TABLE context +@param altered_table MySQL table that is being altered +@param table_name Table name in MySQL +@param thd MySQL connection +*/ static -dberr_t +void alter_stats_norebuild( +/*==================*/ Alter_inplace_info* ha_alter_info, ha_innobase_inplace_ctx* ctx, + TABLE* altered_table, const char* table_name, - trx_t* trx) + THD* thd) { - dberr_t err = DB_SUCCESS; ulint i; DBUG_ENTER("alter_stats_norebuild"); DBUG_ASSERT(!ctx->need_rebuild()); if (!dict_stats_is_persistent_enabled(ctx->new_table)) { - DBUG_RETURN(err); + DBUG_VOID_RETURN; } /* Delete corresponding rows from the stats table. We do this @@ -8263,13 +8265,10 @@ alter_stats_norebuild( char errstr[1024]; - dberr_t err2 = dict_stats_drop_index( - ctx->new_table->name.m_name, key->name, - errstr, sizeof errstr, trx); - - if (err2 != DB_SUCCESS) { - err = err2; - push_warning(trx->mysql_thd, + if (dict_stats_drop_index( + ctx->new_table->name.m_name, key->name, + errstr, sizeof errstr) != DB_SUCCESS) { + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_LOCK_WAIT_TIMEOUT, errstr); } @@ -8305,36 +8304,34 @@ alter_stats_norebuild( DBUG_ASSERT(index->table == ctx->new_table); if (!(index->type & DICT_FTS)) { - dict_stats_init(ctx->new_table, trx); - dberr_t err2 = dict_stats_update_for_index(index, trx); - if (err2 != DB_SUCCESS) { - err = err2; - } + dict_stats_init(ctx->new_table); + dict_stats_update_for_index(index); } } - DBUG_RETURN(err); + DBUG_VOID_RETURN; } /** Adjust the persistent statistics after rebuilding ALTER TABLE. Remove statistics for dropped indexes, add statistics for created indexes and rename statistics for renamed indexes. -@param table InnoDB table that was rebuilt by ALTER TABLE -@param table_name Table name in MySQL -@param trx transaction -@return error code */ +@param table InnoDB table that was rebuilt by ALTER TABLE +@param table_name Table name in MySQL +@param thd MySQL connection +*/ static -dberr_t +void alter_stats_rebuild( +/*================*/ dict_table_t* table, const char* table_name, - trx_t* trx) + THD* thd) { DBUG_ENTER("alter_stats_rebuild"); if (dict_table_is_discarded(table) || !dict_stats_is_persistent_enabled(table)) { - DBUG_RETURN(DB_SUCCESS); + DBUG_VOID_RETURN; } #ifndef DBUG_OFF @@ -8347,24 +8344,7 @@ alter_stats_rebuild( table->file_unreadable = true; ); - char errstr[1024]; - mutex_enter(&dict_sys->mutex); - dberr_t ret = dict_stats_drop_table(table->name.m_name, - errstr, sizeof errstr, trx); - mutex_exit(&dict_sys->mutex); - if (ret != DB_SUCCESS) { - push_warning_printf( - trx->mysql_thd, - Sql_condition::WARN_LEVEL_WARN, - ER_ALTER_INFO, - "Deleting persistent statistics" - " for rebuilt table '%s' in" - " InnoDB failed: %s", - table_name, errstr); - DBUG_RETURN(ret); - } - - ret = dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT, trx); + dberr_t ret = dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT); DBUG_EXECUTE_IF( "ib_rename_index_fail2", @@ -8373,7 +8353,7 @@ alter_stats_rebuild( if (ret != DB_SUCCESS) { push_warning_printf( - trx->mysql_thd, + thd, Sql_condition::WARN_LEVEL_WARN, ER_ALTER_INFO, "Error updating stats for table '%s'" @@ -8381,7 +8361,7 @@ alter_stats_rebuild( table_name, ut_strerr(ret)); } - DBUG_RETURN(ret); + DBUG_VOID_RETURN; } #ifndef DBUG_OFF @@ -8688,36 +8668,6 @@ ha_innobase::commit_inplace_alter_table( if (fail) { trx_rollback_for_mysql(trx); } else if (!new_clustered) { - if (ctx0->num_to_drop_vcol || ctx0->num_to_add_vcol) { - DBUG_ASSERT(ctx0->old_table->get_ref_count() == 1); - bool warned = false; - - for (inplace_alter_handler_ctx** pctx = ctx_array; - *pctx; pctx++) { - ha_innobase_inplace_ctx* ctx - = static_cast - (*pctx); - - DBUG_ASSERT(!ctx->need_rebuild()); - char errstr[1024]; - if (dict_stats_drop_table( - ctx->old_table->name.m_name, - errstr, sizeof errstr, trx) - != DB_SUCCESS && !warned) { - warned = true; - push_warning_printf( - m_user_thd, - Sql_condition::WARN_LEVEL_WARN, - ER_ALTER_INFO, - "Deleting persistent " - "statistics for table '%s' in" - " InnoDB failed: %s", - table_share->table_name.str, - errstr); - } - } - } - trx_commit_for_mysql(trx); } else { mtr_t mtr; @@ -8974,6 +8924,23 @@ foreign_fail: m_prebuilt->table = dict_table_open_on_name( tb_name, TRUE, TRUE, DICT_ERR_IGNORE_NONE); + /* Drop outdated table stats. */ + char errstr[1024]; + if (dict_stats_drop_table( + m_prebuilt->table->name.m_name, + errstr, sizeof(errstr)) + != DB_SUCCESS) { + push_warning_printf( + m_user_thd, + Sql_condition::WARN_LEVEL_WARN, + ER_ALTER_INFO, + "Deleting persistent statistics" + " for table '%s' in" + " InnoDB failed: %s", + table->s->table_name.str, + errstr); + } + row_mysql_unlock_data_dictionary(trx); trx_free_for_mysql(trx); MONITOR_ATOMIC_DEC(MONITOR_PENDING_ALTER_TABLE); @@ -9029,6 +8996,41 @@ foreign_fail: } #endif if (new_clustered) { + /* Since the table has been rebuilt, we remove + all persistent statistics corresponding to the + old copy of the table (which was renamed to + ctx->tmp_name). */ + + char errstr[1024]; + + DBUG_ASSERT(0 == strcmp(ctx->old_table->name.m_name, + ctx->tmp_name)); + + DBUG_EXECUTE_IF( + "ib_rename_index_fail3", + DBUG_SET("+d,innodb_report_deadlock"); + ); + + if (dict_stats_drop_table( + ctx->new_table->name.m_name, + errstr, sizeof(errstr)) + != DB_SUCCESS) { + push_warning_printf( + m_user_thd, + Sql_condition::WARN_LEVEL_WARN, + ER_ALTER_INFO, + "Deleting persistent statistics" + " for rebuilt table '%s' in" + " InnoDB failed: %s", + table->s->table_name.str, + errstr); + } + + DBUG_EXECUTE_IF( + "ib_rename_index_fail3", + DBUG_SET("-d,innodb_report_deadlock"); + ); + DBUG_EXECUTE_IF("ib_ddl_crash_before_commit", DBUG_SUICIDE();); @@ -9074,14 +9076,11 @@ foreign_fail: } row_mysql_unlock_data_dictionary(trx); - trx->error_state = DB_SUCCESS; - ++trx->will_lock; + trx_free_for_mysql(trx); /* TODO: The following code could be executed while allowing concurrent access to the table (MDL downgrade). */ - trx->mysql_thd = m_user_thd; - dberr_t stats_err = DB_SUCCESS; if (new_clustered) { for (inplace_alter_handler_ctx** pctx = ctx_array; @@ -9090,11 +9089,10 @@ foreign_fail: = static_cast (*pctx); DBUG_ASSERT(ctx->need_rebuild()); - stats_err = alter_stats_rebuild( - ctx->new_table, table->s->table_name.str, trx); - if (stats_err != DB_SUCCESS) { - break; - } + + alter_stats_rebuild( + ctx->new_table, table->s->table_name.str, + m_user_thd); DBUG_INJECT_CRASH("ib_commit_inplace_crash", crash_inject_count++); } @@ -9106,25 +9104,14 @@ foreign_fail: (*pctx); DBUG_ASSERT(!ctx->need_rebuild()); - stats_err = alter_stats_norebuild( - ha_alter_info, ctx, - table->s->table_name.str, trx); - if (stats_err != DB_SUCCESS) { - break; - } + alter_stats_norebuild( + ha_alter_info, ctx, altered_table, + table->s->table_name.str, m_user_thd); DBUG_INJECT_CRASH("ib_commit_inplace_crash", crash_inject_count++); } } - if (stats_err != DB_SUCCESS) { - trx_rollback_to_savepoint(trx, NULL); - } else { - trx_commit_for_mysql(trx); - } - - trx_free_for_mysql(trx); - innobase_parse_hint_from_comment( m_user_thd, m_prebuilt->table, altered_table->s); diff --git a/storage/innobase/include/dict0defrag_bg.h b/storage/innobase/include/dict0defrag_bg.h index 6dd73f1d481..8d77a461dc9 100644 --- a/storage/innobase/include/dict0defrag_bg.h +++ b/storage/innobase/include/dict0defrag_bg.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2016, 2017, MariaDB Corporation. +Copyright (c) 2016, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -66,24 +66,27 @@ dict_stats_defrag_pool_del( all entries for the table */ const dict_index_t* index); /*!< in: index to remove */ -/** Process indexes that have been scheduled for defragmenting. -@param[in,out] trx transaction that will be started and committed */ +/*****************************************************************//** +Get the first index that has been added for updating persistent defrag +stats and eventually save its stats. */ void -dict_defrag_process_entries_from_defrag_pool(trx_t* trx); +dict_defrag_process_entries_from_defrag_pool(); +/*===========================================*/ -/** Save defragmentation result. -@param[in] index index that was defragmented -@param[in,out] trx transaction +/*********************************************************************//** +Save defragmentation result. @return DB_SUCCESS or error code */ dberr_t -dict_stats_save_defrag_summary(dict_index_t* index, trx_t* trx) - MY_ATTRIBUTE((nonnull, warn_unused_result)); +dict_stats_save_defrag_summary( +/*============================*/ + dict_index_t* index) /*!< in: index */ + MY_ATTRIBUTE((warn_unused_result)); -/** Save defragmentation stats for a given index. -@param[in] index index that is being defragmented -@param[in,out] trx transaction +/*********************************************************************//** +Save defragmentation stats for a given index. @return DB_SUCCESS or error code */ dberr_t -dict_stats_save_defrag_stats(dict_index_t* index, trx_t* trx) - MY_ATTRIBUTE((nonnull, warn_unused_result)); +dict_stats_save_defrag_stats( +/*============================*/ + dict_index_t* index); /*!< in: index */ #endif /* dict0defrag_bg_h */ diff --git a/storage/innobase/include/dict0stats.h b/storage/innobase/include/dict0stats.h index 357d26b5557..5dd53c46d1b 100644 --- a/storage/innobase/include/dict0stats.h +++ b/storage/innobase/include/dict0stats.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2009, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -92,12 +92,13 @@ bool dict_stats_auto_recalc_is_enabled(const dict_table_t* table) MY_ATTRIBUTE((nonnull, warn_unused_result)); -/** Initialize table statistics for the first time when opening a table. -@param[in,out] table freshly opened table -@param[in,out] trx transaction */ +/*********************************************************************//** +Initialize table's stats for the first time when opening a table. */ UNIV_INLINE void -dict_stats_init(dict_table_t* table, trx_t* trx); +dict_stats_init( +/*============*/ + dict_table_t* table); /*!< in/out: table */ /*********************************************************************//** Deinitialize table's stats after the last close of the table. This is @@ -116,68 +117,67 @@ void dict_stats_update_if_needed(dict_table_t* table) MY_ATTRIBUTE((nonnull)); -/** Calculate new estimates for table and index statistics. -@param[in,out] table table -@param[in] stats_upd_option how to update statistics -@param[in,out] trx transaction +/*********************************************************************//** +Calculates new estimates for table and index statistics. The statistics +are used in query optimization. @return DB_* error code or DB_SUCCESS */ dberr_t dict_stats_update( - dict_table_t* table, - dict_stats_upd_option_t stats_upd_option, - trx_t* trx); +/*==============*/ + dict_table_t* table, /*!< in/out: table */ + dict_stats_upd_option_t stats_upd_option); + /*!< in: whether to (re) calc + the stats or to fetch them from + the persistent storage */ -/** Remove the persistent statistics for an index. -@param[in] db_and_table schema and table name, e.g., 'db/table' -@param[in] iname index name -@param[out] errstr error message (when not returning DB_SUCCESS) -@param[in] errstr_sz sizeof errstr -@param[in,out] trx transaction +/*********************************************************************//** +Removes the information for a particular index's stats from the persistent +storage if it exists and if there is data stored for this index. +This function creates its own trx and commits it. @return DB_SUCCESS or error code */ dberr_t dict_stats_drop_index( - const char* db_and_table, - const char* iname, - char* errstr, - size_t errstr_sz, - trx_t* trx); +/*==================*/ + const char* tname, /*!< in: table name */ + const char* iname, /*!< in: index name */ + char* errstr, /*!< out: error message if != DB_SUCCESS + is returned */ + ulint errstr_sz);/*!< in: size of the errstr buffer */ -/** Remove the persistent statistics for a table and all of its indexes. -@param[in] db_and_table schema and table name, e.g., 'db/table' -@param[out] errstr error message (when not returning DB_SUCCESS) -@param[in] errstr_sz sizeof errstr -@param[in,out] trx transaction +/*********************************************************************//** +Removes the statistics for a table and all of its indexes from the +persistent storage if it exists and if there is data stored for the table. +This function creates its own transaction and commits it. @return DB_SUCCESS or error code */ dberr_t dict_stats_drop_table( - const char* db_and_table, - char* errstr, - size_t errstr_sz, - trx_t* trx); +/*==================*/ + const char* table_name, /*!< in: table name */ + char* errstr, /*!< out: error message + if != DB_SUCCESS is returned */ + ulint errstr_sz); /*!< in: size of errstr buffer */ -/** Calculate index statistics. -@param[in,out] index index tree -@param[in,out] trx transaction (for persistent statistics) -@return DB_SUCCESS or error code */ -UNIV_INTERN -dberr_t -dict_stats_update_for_index(dict_index_t* index, trx_t* trx) +/*********************************************************************//** +Fetches or calculates new estimates for index statistics. */ +void +dict_stats_update_for_index( +/*========================*/ + dict_index_t* index) /*!< in/out: index */ MY_ATTRIBUTE((nonnull)); -/** Rename a table in the InnoDB persistent statistics storage. -@param[in] old_name old schema and table name, e.g., 'db/table' -@param[in] new_name new schema and table name, e.g., 'db/table' -@param[out] errstr error message (when not returning DB_SUCCESS) -@param[in] errstr_sz sizeof errstr -@param[in,out] trx transaction +/*********************************************************************//** +Renames a table in InnoDB persistent stats storage. +This function creates its own transaction and commits it. @return DB_SUCCESS or error code */ dberr_t dict_stats_rename_table( - const char* old_name, - const char* new_name, - char* errstr, - size_t errstr_sz, - trx_t* trx); +/*====================*/ + const char* old_name, /*!< in: old table name */ + const char* new_name, /*!< in: new table name */ + char* errstr, /*!< out: error string if != DB_SUCCESS + is returned */ + size_t errstr_sz); /*!< in: errstr size */ +#ifdef MYSQL_RENAME_INDEX /*********************************************************************//** Renames an index in InnoDB persistent stats storage. This function creates its own transaction and commits it. @@ -191,13 +191,7 @@ dict_stats_rename_index( const char* old_index_name, /*!< in: old index name */ const char* new_index_name) /*!< in: new index name */ __attribute__((warn_unused_result)); - -/** Reset the table and index statsistics, corresponding to an empty table. -@param[in,out] table table whose statistics are to be reset -@param[in] empty_defrag_stats whether to empty the defrag statistics -*/ -void -dict_stats_empty_table(dict_table_t* table, bool empty_defrag_stats = true); +#endif /* MYSQL_RENAME_INDEX */ /** Save an individual index's statistic into the persistent statistics storage. @@ -207,7 +201,9 @@ storage. @param[in] stat_value value of the stat @param[in] sample_size n pages sampled or NULL @param[in] stat_description description of the stat -@param[in,out] trx dictionary transaction +@param[in,out] trx in case of NULL the function will +allocate and free the trx object. If it is not NULL then it will be +rolled back only in the case of error, but not freed. @return DB_SUCCESS or error code */ dberr_t dict_stats_save_index_stat( @@ -217,8 +213,7 @@ dict_stats_save_index_stat( ib_uint64_t stat_value, ib_uint64_t* sample_size, const char* stat_description, - trx_t* trx) - MY_ATTRIBUTE((nonnull(1,3,7), warn_unused_result)); + trx_t* trx); /** Report an error if updating table statistics failed because .ibd file is missing, table decryption failed or table is corrupted. diff --git a/storage/innobase/include/dict0stats.ic b/storage/innobase/include/dict0stats.ic index e0784f63038..0d187ed90c7 100644 --- a/storage/innobase/include/dict0stats.ic +++ b/storage/innobase/include/dict0stats.ic @@ -141,12 +141,13 @@ dict_stats_auto_recalc_is_enabled(const dict_table_t* table) } } -/** Initialize table statistics for the first time when opening a table. -@param[in,out] table freshly opened table -@param[in,out] trx transaction */ +/*********************************************************************//** +Initialize table's stats for the first time when opening a table. */ UNIV_INLINE void -dict_stats_init(dict_table_t* table, trx_t* trx) +dict_stats_init( +/*============*/ + dict_table_t* table) /*!< in/out: table */ { ut_ad(!mutex_own(&dict_sys->mutex)); @@ -162,7 +163,7 @@ dict_stats_init(dict_table_t* table, trx_t* trx) opt = DICT_STATS_RECALC_TRANSIENT; } - dict_stats_update(table, opt, trx); + dict_stats_update(table, opt); } /*********************************************************************//** diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 001d3650bfe..b2d4952318c 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -1123,9 +1123,6 @@ struct trx_t { mysql_trx_list; /*!< list of transactions created for MySQL; protected by trx_sys->mutex */ #ifdef UNIV_DEBUG - /** whether this transaction is updating persistent statistics - (used for silencing a debug assertion at shutdown) */ - bool persistent_stats; bool in_mysql_trx_list; /*!< true if in trx_sys->mysql_trx_list */ diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 7a1afec820c..41cf2f15ba4 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -1683,7 +1683,18 @@ RecLock::prepare() const ut_error; } - ut_ad(trx_get_dict_operation(m_trx) == TRX_DICT_OP_NONE); + switch (trx_get_dict_operation(m_trx)) { + case TRX_DICT_OP_NONE: + break; + case TRX_DICT_OP_TABLE: + case TRX_DICT_OP_INDEX: + ib::error() << "A record lock wait happens in a dictionary" + " operation. index " << m_index->name + << " of table " << m_index->table->name + << ". " << BUG_REPORT_MSG; + ut_ad(0); + } + ut_ad(m_index->table->n_ref_count > 0 || !m_index->table->can_be_evicted); } @@ -2246,24 +2257,6 @@ RecLock::add_to_waitq(lock_t* wait_for, const lock_prdt_t* prdt) /* Do the preliminary checks, and set query thread state */ - switch (UNIV_EXPECT(trx_get_dict_operation(m_trx), TRX_DICT_OP_NONE)) { - case TRX_DICT_OP_NONE: - break; - case TRX_DICT_OP_TABLE: - case TRX_DICT_OP_INDEX: - ut_ad(!prdt); - - if (m_trx->dict_operation_lock_mode != RW_X_LATCH) { - } else if (!strcmp(m_index->table->name.m_name, - "mysql/innodb_table_stats") - || !strcmp(m_index->table->name.m_name, - "mysql/innodb_index_stats")) { - /* Statistics can be updated as part of a DDL - transaction, but only as the very last operation. */ - return(DB_QUE_THR_SUSPENDED); - } - } - prepare(); bool high_priority = trx_is_high_priority(m_trx); @@ -4664,16 +4657,6 @@ lock_table_enqueue_waiting( break; case TRX_DICT_OP_TABLE: case TRX_DICT_OP_INDEX: - if (trx->dict_operation_lock_mode != RW_X_LATCH) { - } else if (!strcmp(table->name.m_name, - "mysql/innodb_table_stats") - || !strcmp(table->name.m_name, - "mysql/innodb_index_stats")) { - /* Statistics can be updated as part of a DDL - transaction, but only as the very last operation. */ - return(DB_QUE_THR_SUSPENDED); - } - ib::error() << "A table lock wait happens in a dictionary" " operation. Table " << table->name << ". " << BUG_REPORT_MSG; diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 7db855244ab..50be659ca50 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -3289,9 +3289,31 @@ run_again: que_thr_stop_for_mysql_no_error(thr, trx); } else { que_thr_stop_for_mysql(thr); - ut_ad(err != DB_QUE_THR_SUSPENDED); - if (row_mysql_handle_errors(&err, trx, thr, NULL)) { + if (err != DB_QUE_THR_SUSPENDED) { + ibool was_lock_wait; + + was_lock_wait = row_mysql_handle_errors( + &err, trx, thr, NULL); + + if (was_lock_wait) { + goto run_again; + } + } else { + que_thr_t* run_thr; + que_node_t* parent; + + parent = que_node_get_parent(thr); + + run_thr = que_fork_start_command( + static_cast(parent)); + + ut_a(run_thr == thr); + + /* There was a lock wait but the thread was not + in a ready to run or running state. */ + trx->error_state = DB_LOCK_WAIT; + goto run_again; } } @@ -3569,6 +3591,7 @@ row_drop_table_for_mysql( if (!dict_table_is_temporary(table)) { + dict_stats_recalc_pool_del(table); dict_stats_defrag_pool_del(table, NULL); if (btr_defragment_thread_active) { /* During fts_drop_orphaned_tables() in @@ -3577,6 +3600,17 @@ row_drop_table_for_mysql( initialized by btr_defragment_init(). */ btr_defragment_remove_table(table); } + + /* Remove stats for this table and all of its indexes from the + persistent storage if it exists and if there are stats for this + table in there. This function creates its own trx and commits + it. */ + char errstr[1024]; + err = dict_stats_drop_table(name, errstr, sizeof(errstr)); + + if (err != DB_SUCCESS) { + ib::warn() << errstr; + } } dict_table_prevent_eviction(table); @@ -3894,20 +3928,9 @@ row_drop_table_for_mysql( table_flags = table->flags; ut_ad(!dict_table_is_temporary(table)); -#if MYSQL_VERSION_ID >= 100300 - if (!table->no_rollback()) -#endif - { - char errstr[1024]; - if (dict_stats_drop_table(name, errstr, sizeof errstr, - trx) != DB_SUCCESS) { - ib::warn() << errstr; - } - - err = row_drop_ancillary_fts_tables(table, trx); - if (err != DB_SUCCESS) { - break; - } + err = row_drop_ancillary_fts_tables(table, trx); + if (err != DB_SUCCESS) { + break; } /* Determine the tablespace filename before we drop diff --git a/storage/innobase/row/row0trunc.cc b/storage/innobase/row/row0trunc.cc index 5d4ab80f71c..068b4d96ed2 100644 --- a/storage/innobase/row/row0trunc.cc +++ b/storage/innobase/row/row0trunc.cc @@ -1271,6 +1271,10 @@ row_truncate_complete( } } + if (err == DB_SUCCESS) { + dict_stats_update(table, DICT_STATS_EMPTY_TABLE); + } + trx->op_info = ""; /* For temporary tables or if there was an error, we need to reset @@ -2103,17 +2107,8 @@ row_truncate_table_for_mysql( dict_table_autoinc_unlock(table); if (trx_is_started(trx)) { - char errstr[1024]; - if (dict_stats_drop_table(table->name.m_name, errstr, - sizeof errstr, trx) != DB_SUCCESS) { - ib::warn() << "Deleting persistent " - "statistics for table " << table->name - << " failed: " << errstr; - } trx_commit_for_mysql(trx); - - dict_stats_empty_table(table); } return(row_truncate_complete(table, trx, fsp_flags, logger, err)); diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index 07150577d22..a1f16df2304 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -302,7 +302,7 @@ trx_purge_add_update_undo_to_history( || (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND && purge_sys->state == PURGE_STATE_DISABLED) || ((trx->undo_no == 0 || trx->in_mysql_trx_list - || trx->persistent_stats) + || trx->internal) && srv_fast_shutdown)); /* Add the log as the first in the history list */ diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index 0e488d6379a..cc79a499c02 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -553,7 +553,7 @@ trx_validate_state_before_free(trx_t* trx) ut_ad(!trx->declared_to_be_inside_innodb); ut_ad(!trx->n_mysql_tables_in_use); ut_ad(!trx->mysql_n_tables_locked); - ut_ad(!trx->persistent_stats); + ut_ad(!trx->internal); if (trx->declared_to_be_inside_innodb) { From 9b4dfdaa5a1e1ca84a2e9e65dd3066b382f65ae7 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 18 Jan 2018 15:25:40 +0300 Subject: [PATCH 08/17] MDEV-13352: Server crashes in st_join_table::remove_duplicates join_tab->distinct=true means "Before doing record read with this join_tab, call join_tab->remove_duplicates() to eliminate duplicates". remove_duplicates() assumes that - there is a temporary table $T with rows that are to be de-duplicated - there is a previous join_tab (e.g. with join_tab->fields) which was used to populate the temp.table $T. When the query has "Impossible WHERE" and window function, then the above conditions are not met (but we still might need a window function computation step when the query has implicit grouping). The fix is to not add remove_duplicates step if the select execution is degenerate (and we'll have at most one row in the output anyway). --- mysql-test/r/win.result | 10 ++++++++++ mysql-test/t/win.test | 10 ++++++++++ sql/sql_select.cc | 10 +++++++++- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/win.result b/mysql-test/r/win.result index 7e0c86b1668..e3cb40e8343 100644 --- a/mysql-test/r/win.result +++ b/mysql-test/r/win.result @@ -3289,3 +3289,13 @@ SELECT id, window FROM door as window; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'window' at line 2 DROP TABLE door; +# +# MDEV-13352: Server crashes in st_join_table::remove_duplicates +# +CREATE TABLE t1 (i INT); +INSERT INTO t1 VALUES (1),(2); +SELECT DISTINCT ROW_NUMBER() OVER(), i FROM t1 WHERE 0; +ROW_NUMBER() OVER() i +SELECT ROW_NUMBER() OVER(), i FROM t1 WHERE 0; +ROW_NUMBER() OVER() i +DROP TABLE t1; diff --git a/mysql-test/t/win.test b/mysql-test/t/win.test index 1e747e59a1a..95ffb6d9909 100644 --- a/mysql-test/t/win.test +++ b/mysql-test/t/win.test @@ -2057,3 +2057,13 @@ SELECT id, window FROM door as window; DROP TABLE door; + +--echo # +--echo # MDEV-13352: Server crashes in st_join_table::remove_duplicates +--echo # +CREATE TABLE t1 (i INT); +INSERT INTO t1 VALUES (1),(2); +SELECT DISTINCT ROW_NUMBER() OVER(), i FROM t1 WHERE 0; +SELECT ROW_NUMBER() OVER(), i FROM t1 WHERE 0; +DROP TABLE t1; + diff --git a/sql/sql_select.cc b/sql/sql_select.cc index e80c02b52b4..6b1d406bf8a 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -2670,7 +2670,15 @@ bool JOIN::make_aggr_tables_info() curr_tab->having= having; having->update_used_tables(); } - curr_tab->distinct= true; + /* + We only need DISTINCT operation if the join is not degenerate. + If it is, we must not request DISTINCT processing, because + remove_duplicates() assumes there is a preceding computation step (and + in the degenerate join, there's none) + */ + if (top_join_tab_count) + curr_tab->distinct= true; + having= NULL; select_distinct= false; } From d04e1d4bdc956e6c8768df895b9d7607b543fff8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 22 Jan 2018 15:27:24 +0200 Subject: [PATCH 09/17] MDEV-15029 XA COMMIT and XA ROLLBACK operate on freed transaction object innobase_commit_by_xid(), innobase_rollback_by_xid(): Decrement the reference count before freeing the transaction object to the pool. Failure to do so might corrupt the transaction bookkeeping if trx_create_low() returns the same object to another thread before we are done with it. trx_sys_close(): Detach the recovered XA PREPARE transactions from trx_sys->rw_trx_list before freeing them. --- storage/innobase/handler/ha_innodb.cc | 26 +++++++++++++++----------- storage/innobase/trx/trx0sys.cc | 10 +++------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index bfcf8dfe741..3ed43155e70 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -17881,12 +17881,14 @@ innobase_commit_by_xid( } if (trx_t* trx = trx_get_trx_by_xid(xid)) { - TrxInInnoDB trx_in_innodb(trx); - - innobase_commit_low(trx); - ut_ad(trx->mysql_thd == NULL); + ut_ad(trx->in_innodb & TRX_FORCE_ROLLBACK_DISABLE); /* use cases are: disconnected xa, slave xa, recovery */ - trx_deregister_from_2pc(trx); + { + TrxInInnoDB trx_in_innodb(trx); + innobase_commit_low(trx); + ut_ad(trx->mysql_thd == NULL); + trx_deregister_from_2pc(trx); + } ut_ad(!trx->will_lock); /* trx cache requirement */ trx_free_for_background(trx); @@ -17915,12 +17917,14 @@ innobase_rollback_by_xid( } if (trx_t* trx = trx_get_trx_by_xid(xid)) { - TrxInInnoDB trx_in_innodb(trx); - - int ret = innobase_rollback_trx(trx); - - trx_deregister_from_2pc(trx); - ut_ad(!trx->will_lock); + int ret; + ut_ad(trx->in_innodb & TRX_FORCE_ROLLBACK_DISABLE); + { + TrxInInnoDB trx_in_innodb(trx); + ret = innobase_rollback_trx(trx); + trx_deregister_from_2pc(trx); + ut_ad(!trx->will_lock); + } trx_free_for_background(trx); return(ret); diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc index 3a798a504fc..393dff8c026 100644 --- a/storage/innobase/trx/trx0sys.cc +++ b/storage/innobase/trx/trx0sys.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -939,13 +939,9 @@ trx_sys_close(void) || srv_read_only_mode || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO); - for (trx_t* trx = UT_LIST_GET_FIRST(trx_sys->rw_trx_list); - trx != NULL; - trx = UT_LIST_GET_FIRST(trx_sys->rw_trx_list)) { - - trx_free_prepared(trx); - + while (trx_t* trx = UT_LIST_GET_FIRST(trx_sys->rw_trx_list)) { UT_LIST_REMOVE(trx_sys->rw_trx_list, trx); + trx_free_prepared(trx); } /* There can't be any active transactions. */ From 30f1d2f642a0f9702e799b0153b28f0d402a8073 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 22 Jan 2018 16:29:43 +0200 Subject: [PATCH 10/17] Remove useless method LatchCounter::sum_deregister() --- storage/innobase/include/sync0policy.h | 9 +-------- storage/innobase/include/sync0types.h | 10 +--------- 2 files changed, 2 insertions(+), 17 deletions(-) diff --git a/storage/innobase/include/sync0policy.h b/storage/innobase/include/sync0policy.h index 1b86d2633bf..de27c87816c 100644 --- a/storage/innobase/include/sync0policy.h +++ b/storage/innobase/include/sync0policy.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2013, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -454,14 +454,7 @@ public: void destroy() UNIV_NOTHROW { - latch_meta_t& meta = sync_latch_get_meta(m_id); - - ut_ad(meta.get_id() == m_id); - - meta.get_counter()->sum_deregister(m_count); - m_count = NULL; - ut_d(MutexDebug::destroy()); } diff --git a/storage/innobase/include/sync0types.h b/storage/innobase/include/sync0types.h index d7583f87f3b..711225041bc 100644 --- a/storage/innobase/include/sync0types.h +++ b/storage/innobase/include/sync0types.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -640,14 +640,6 @@ public: return(count); } - /** Deregister the count. We don't do anything - @param[in] count The count instance to deregister */ - void sum_deregister(Count* count) - UNIV_NOTHROW - { - /* Do nothing */ - } - /** Register a single instance counter */ void single_register(Count* count) UNIV_NOTHROW From 89ae5d7f2fca3efdea69bf13abb1e2785af7fe10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 22 Jan 2018 16:30:38 +0200 Subject: [PATCH 11/17] Allocate mutex_monitor, create_tracker statically --- storage/innobase/handler/ha_innodb.cc | 10 +++++----- storage/innobase/include/ut0mutex.h | 2 +- storage/innobase/sync/sync0debug.cc | 24 +++++------------------- storage/innobase/sync/sync0sync.cc | 2 +- 4 files changed, 12 insertions(+), 26 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 3ed43155e70..a17abf199fe 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -16740,7 +16740,7 @@ innodb_show_mutex_status( DBUG_ASSERT(hton == innodb_hton_ptr); - mutex_monitor->iterate(collector); + mutex_monitor.iterate(collector); if (!collector.to_string(hton, thd, stat_print)) { DBUG_RETURN(1); @@ -18907,7 +18907,7 @@ innodb_monitor_set_option( if (MONITOR_IS_ON(MONITOR_LATCHES)) { - mutex_monitor->enable(); + mutex_monitor.enable(); } break; @@ -18922,7 +18922,7 @@ innodb_monitor_set_option( if (!MONITOR_IS_ON(MONITOR_LATCHES)) { - mutex_monitor->disable(); + mutex_monitor.disable(); } break; @@ -18931,13 +18931,13 @@ innodb_monitor_set_option( if (monitor_id == (MONITOR_LATCHES)) { - mutex_monitor->reset(); + mutex_monitor.reset(); } break; case MONITOR_RESET_ALL_VALUE: srv_mon_reset_all(monitor_id); - mutex_monitor->reset(); + mutex_monitor.reset(); break; default: diff --git a/storage/innobase/include/ut0mutex.h b/storage/innobase/include/ut0mutex.h index bd3603ad4d0..dc387dadbdc 100644 --- a/storage/innobase/include/ut0mutex.h +++ b/storage/innobase/include/ut0mutex.h @@ -164,7 +164,7 @@ public: }; /** Defined in sync0sync.cc */ -extern MutexMonitor* mutex_monitor; +extern MutexMonitor mutex_monitor; /** Creates, or rather, initializes a mutex object in a specified memory diff --git a/storage/innobase/sync/sync0debug.cc b/storage/innobase/sync/sync0debug.cc index fa7a99ea4c0..793c6a59e33 100644 --- a/storage/innobase/sync/sync0debug.cc +++ b/storage/innobase/sync/sync0debug.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -1705,7 +1705,7 @@ private: }; /** Track latch creation location. For reducing the size of the latches */ -static CreateTracker* create_tracker; +static CreateTracker create_tracker; /** Register a latch, called when it is created @param[in] ptr Latch instance that was created @@ -1717,7 +1717,7 @@ sync_file_created_register( const char* filename, uint16_t line) { - create_tracker->register_latch(ptr, filename, line); + create_tracker.register_latch(ptr, filename, line); } /** Deregister a latch, called when it is destroyed @@ -1725,7 +1725,7 @@ sync_file_created_register( void sync_file_created_deregister(const void* ptr) { - create_tracker->deregister_latch(ptr); + create_tracker.deregister_latch(ptr); } /** Get the string where the file was created. Its format is "name:line" @@ -1734,7 +1734,7 @@ sync_file_created_deregister(const void* ptr) std::string sync_file_created_get(const void* ptr) { - return(create_tracker->get(ptr)); + return(create_tracker.get(ptr)); } /** Initializes the synchronization data structures. */ @@ -1744,12 +1744,6 @@ sync_check_init() ut_ad(!LatchDebug::s_initialized); ut_d(LatchDebug::s_initialized = true); - /** For collecting latch statistic - SHOW ... MUTEX */ - mutex_monitor = UT_NEW_NOKEY(MutexMonitor()); - - /** For trcking mutex creation location */ - create_tracker = UT_NEW_NOKEY(CreateTracker()); - sync_latch_meta_init(); /* Init the rw-lock & mutex list and create the mutex to protect it. */ @@ -1773,14 +1767,6 @@ sync_check_close() sync_array_close(); - UT_DELETE(mutex_monitor); - - mutex_monitor = NULL; - - UT_DELETE(create_tracker); - - create_tracker = NULL; - sync_latch_meta_destroy(); } diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc index 4be7162f631..16dd90cd879 100644 --- a/storage/innobase/sync/sync0sync.cc +++ b/storage/innobase/sync/sync0sync.cc @@ -118,7 +118,7 @@ mysql_pfs_key_t trx_purge_latch_key; #endif /* UNIV_PFS_RWLOCK */ /** For monitoring active mutexes */ -MutexMonitor* mutex_monitor; +MutexMonitor mutex_monitor; /** Prints wait info of the sync system. From 29eeb527fd4496eeb852f4aec80a43e60a1bc10c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 22 Jan 2018 16:53:33 +0200 Subject: [PATCH 12/17] MDEV-12173 "[Warning] Trying to access missing tablespace" ibuf_merge_or_delete_for_page(): Invoke fil_space_acquire_silent() instead of fil_space_acquire() in order to avoid displaying a useless message. We know perfectly well that a tablespace can be dropped while a change buffer merge is pending, because change buffer merges skip any transactional locks. --- storage/innobase/ibuf/ibuf0ibuf.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index d26391a80f4..7041ad53f0d 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2016, 2017, MariaDB Corporation. +Copyright (c) 2016, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -4492,7 +4492,7 @@ ibuf_merge_or_delete_for_page( return; } - space = fil_space_acquire(page_id.space()); + space = fil_space_acquire_silent(page_id.space()); if (UNIV_UNLIKELY(!space)) { /* Do not try to read the bitmap page from the From 87db5eb8130a58bd7556bda8a5637dfef982d51a Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 23 Jan 2018 09:12:25 +0000 Subject: [PATCH 13/17] MDEV-13825 mariabackup --lock-ddl-per-table does not properly lock FULLTEXT auxiliary tables Change the logic to take mdl lock on all tables before tablespaces are copied, rather than lock every single tablespace just before it is copied. --- extra/mariabackup/xtrabackup.cc | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 375d8845a2a..7eee64bd6ef 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -433,6 +433,22 @@ datafiles_iter_free(datafiles_iter_t *it) free(it); } +void mdl_lock_all() +{ + mdl_lock_init(); + datafiles_iter_t *it = datafiles_iter_new(fil_system); + if (!it) + return; + + while (fil_node_t *node = datafiles_iter_next(it)){ + if (fil_is_user_tablespace_id(node->space->id) + && check_if_skip_table(node->space->name)) + continue; + + mdl_lock_table(node->space->id); + } + datafiles_iter_free(it); +} /* ======== Date copying thread context ======== */ typedef struct { @@ -2199,10 +2215,6 @@ xtrabackup_copy_datafile(fil_node_t* node, uint thread_n) return(FALSE); } - if (opt_lock_ddl_per_table) { - mdl_lock_table(node->space->id); - } - if (!changed_page_bitmap) { read_filter = &rf_pass_through; } @@ -3562,9 +3574,7 @@ xtrabackup_backup_func() "or RENAME TABLE during the backup, inconsistent backup will be " "produced.\n"); - if (opt_lock_ddl_per_table) { - mdl_lock_init(); - } + /* initialize components */ if(innodb_init_param()) { @@ -3879,6 +3889,10 @@ reread_log_header: "files transfer\n", xtrabackup_parallel); } + if (opt_lock_ddl_per_table) { + mdl_lock_all(); + } + it = datafiles_iter_new(fil_system); if (it == NULL) { msg("mariabackup: Error: datafiles_iter_new() failed.\n"); From 7cc507f22e6eaec5ec83e24cd45275656bc7962f Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Tue, 23 Jan 2018 17:12:29 +0400 Subject: [PATCH 14/17] MDEV-14603 signal 11 with short stacktrace --- mysql-test/r/ps.result | 42 +++++++++++++++++++++++++++++ mysql-test/t/ps.test | 57 +++++++++++++++++++++++++++++++++++++++ sql/sql_class.h | 19 +++++++++++++ sql/sql_prepare.cc | 61 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 179 insertions(+) diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result index 1958cafca1e..96d4b3d2f70 100644 --- a/mysql-test/r/ps.result +++ b/mysql-test/r/ps.result @@ -5121,3 +5121,45 @@ END; $$ ERROR 22007: Incorrect datetime value: '10' for column 'a' at row 1 DROP PROCEDURE p1; +# +# MDEV-14603 signal 11 with short stacktrace +# +SET NAMES utf8; +CREATE TABLE t1(i INT); +CREATE PROCEDURE p1(tn VARCHAR(32)) +EXECUTE IMMEDIATE CONCAT('ANALYZE TABLE ',tn); +CALL p1('t1'); +Table Op Msg_type Msg_text +test.t1 analyze status Table is already up to date +DROP PROCEDURE p1; +DROP TABLE t1; +SET NAMES utf8; +CREATE PROCEDURE p1() +EXECUTE IMMEDIATE CONCAT('SELECT ',CONVERT(RAND() USING latin1)); +CALL p1(); +DROP PROCEDURE p1; +SET NAMES utf8; +CREATE PROCEDURE p1() +BEGIN +PREPARE stmt FROM CONCAT('SELECT ',CONVERT(RAND() USING latin1)); +EXECUTE stmt; +DEALLOCATE PREPARE stmt; +END; +$$ +CALL p1(); +DROP PROCEDURE p1; +SET NAMES utf8; +CREATE PROCEDURE p1(a VARCHAR(10) CHARACTER SET utf8) +EXECUTE IMMEDIATE 'SELECT ?' USING CONCAT(a, CONVERT(RAND() USING latin1)); +CALL p1('x'); +DROP PROCEDURE p1; +SET NAMES utf8; +CREATE PROCEDURE p1(a VARCHAR(10) CHARACTER SET utf8) +BEGIN +PREPARE stmt FROM 'SELECT ?'; +EXECUTE stmt USING CONCAT(a, CONVERT(RAND() USING latin1)); +DEALLOCATE PREPARE stmt; +END; +$$ +CALL p1('x'); +DROP PROCEDURE p1; diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test index 327a94cdace..565f831efdd 100644 --- a/mysql-test/t/ps.test +++ b/mysql-test/t/ps.test @@ -4578,3 +4578,60 @@ END; $$ DELIMITER ;$$ DROP PROCEDURE p1; + +--echo # +--echo # MDEV-14603 signal 11 with short stacktrace +--echo # + +SET NAMES utf8; +CREATE TABLE t1(i INT); +CREATE PROCEDURE p1(tn VARCHAR(32)) + EXECUTE IMMEDIATE CONCAT('ANALYZE TABLE ',tn); +CALL p1('t1'); +DROP PROCEDURE p1; +DROP TABLE t1; + +SET NAMES utf8; +CREATE PROCEDURE p1() + EXECUTE IMMEDIATE CONCAT('SELECT ',CONVERT(RAND() USING latin1)); +--disable_result_log +CALL p1(); +--enable_result_log +DROP PROCEDURE p1; + +SET NAMES utf8; +DELIMITER $$; +CREATE PROCEDURE p1() +BEGIN + PREPARE stmt FROM CONCAT('SELECT ',CONVERT(RAND() USING latin1)); + EXECUTE stmt; + DEALLOCATE PREPARE stmt; +END; +$$ +DELIMITER ;$$ +--disable_result_log +CALL p1(); +--enable_result_log +DROP PROCEDURE p1; + +SET NAMES utf8; +CREATE PROCEDURE p1(a VARCHAR(10) CHARACTER SET utf8) + EXECUTE IMMEDIATE 'SELECT ?' USING CONCAT(a, CONVERT(RAND() USING latin1)); +--disable_result_log +CALL p1('x'); +--enable_result_log +DROP PROCEDURE p1; + +SET NAMES utf8; +DELIMITER $$; +CREATE PROCEDURE p1(a VARCHAR(10) CHARACTER SET utf8) +BEGIN + PREPARE stmt FROM 'SELECT ?'; + EXECUTE stmt USING CONCAT(a, CONVERT(RAND() USING latin1)); + DEALLOCATE PREPARE stmt; +END; +$$ +DELIMITER ;$$ +--disable_result_log +CALL p1('x'); +DROP PROCEDURE p1; diff --git a/sql/sql_class.h b/sql/sql_class.h index bcd43cd62cd..5a409ec8268 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1296,6 +1296,25 @@ public: }; +class Item_change_list_savepoint: public Item_change_list +{ +public: + Item_change_list_savepoint(Item_change_list *list) + { + list->move_elements_to(this); + } + void rollback(Item_change_list *list) + { + list->rollback_item_tree_changes(); + move_elements_to(list); + } + ~Item_change_list_savepoint() + { + DBUG_ASSERT(is_empty()); + } +}; + + /** Type of locked tables mode. See comment for THD::locked_tables_mode for complete description. diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 4e847fb9ff3..16c386e5e8f 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -2823,6 +2823,25 @@ void mysql_sql_stmt_prepare(THD *thd) DBUG_VOID_RETURN; } + /* + Make sure we call Prepared_statement::prepare() with an empty + THD::change_list. It can be non-empty as LEX::get_dynamic_sql_string() + calls fix_fields() for the Item containing the PS source, + e.g. on character set conversion: + + SET NAMES utf8; + DELIMITER $$ + CREATE PROCEDURE p1() + BEGIN + PREPARE stmt FROM CONCAT('SELECT ',CONVERT(RAND() USING latin1)); + EXECUTE stmt; + END; + $$ + DELIMITER ; + CALL p1(); + */ + Item_change_list_savepoint change_list_savepoint(thd); + if (stmt->prepare(query.str, (uint) query.length)) { /* Statement map deletes the statement on erase */ @@ -2833,6 +2852,7 @@ void mysql_sql_stmt_prepare(THD *thd) SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL); my_ok(thd, 0L, 0L, "Statement prepared"); } + change_list_savepoint.rollback(thd); DBUG_VOID_RETURN; } @@ -2864,7 +2884,28 @@ void mysql_sql_stmt_execute_immediate(THD *thd) // See comments on thd->free_list in mysql_sql_stmt_execute() Item *free_list_backup= thd->free_list; thd->free_list= NULL; + /* + Make sure we call Prepared_statement::execute_immediate() + with an empty THD::change_list. It can be non empty as the above + LEX::prepared_stmt_params_fix_fields() and LEX::get_dynamic_str_string() + call fix_fields() for the PS source and PS parameter Items and + can do Item tree changes, e.g. on character set conversion: + + - Example #1: Item tree changes in get_dynamic_str_string() + SET NAMES utf8; + CREATE PROCEDURE p1() + EXECUTE IMMEDIATE CONCAT('SELECT ',CONVERT(RAND() USING latin1)); + CALL p1(); + + - Example #2: Item tree changes in prepared_stmt_param_fix_fields(): + SET NAMES utf8; + CREATE PROCEDURE p1(a VARCHAR(10) CHARACTER SET utf8) + EXECUTE IMMEDIATE 'SELECT ?' USING CONCAT(a, CONVERT(RAND() USING latin1)); + CALL p1('x'); + */ + Item_change_list_savepoint change_list_savepoint(thd); (void) stmt->execute_immediate(query.str, (uint) query.length); + change_list_savepoint.rollback(thd); thd->free_items(); thd->free_list= free_list_backup; @@ -3262,7 +3303,27 @@ void mysql_sql_stmt_execute(THD *thd) */ Item *free_list_backup= thd->free_list; thd->free_list= NULL; // Hide the external (e.g. "SET STATEMENT") Items + /* + Make sure we call Prepared_statement::execute_loop() with an empty + THD::change_list. It can be non-empty because the above + LEX::prepared_stmt_params_fix_fields() calls fix_fields() for + the PS parameter Items and can do some Item tree changes, + e.g. on character set conversion: + + SET NAMES utf8; + DELIMITER $$ + CREATE PROCEDURE p1(a VARCHAR(10) CHARACTER SET utf8) + BEGIN + PREPARE stmt FROM 'SELECT ?'; + EXECUTE stmt USING CONCAT(a, CONVERT(RAND() USING latin1)); + END; + $$ + DELIMITER ; + CALL p1('x'); + */ + Item_change_list_savepoint change_list_savepoint(thd); (void) stmt->execute_loop(&expanded_query, FALSE, NULL, NULL); + change_list_savepoint.rollback(thd); thd->free_items(); // Free items created by execute_loop() /* Now restore the "external" (e.g. "SET STATEMENT") Item list. From 12f900228fbbc49032a4f22935de049ad6cf74b2 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Tue, 16 Jan 2018 14:24:27 +1100 Subject: [PATCH 15/17] mariadbbackup: use defaults-group-suffix even if no --defaults-file Signed-off-by: Daniel Black --- extra/mariabackup/xtrabackup.cc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 7eee64bd6ef..016ddb6c60f 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -1223,8 +1223,8 @@ static int prepare_export() if (strncmp(orig_argv1,"--defaults-file=",16) == 0) { sprintf(cmdline, - IF_WIN("\"","") "\"%s\" --mysqld \"%s\" --defaults-group-suffix=%s" - " --defaults-extra-file=./backup-my.cnf --datadir=." + IF_WIN("\"","") "\"%s\" --mysqld \"%s\" " + " --defaults-extra-file=./backup-my.cnf --defaults-group-suffix=%s --datadir=." " --innodb --innodb-fast-shutdown=0" " --innodb_purge_rseg_truncate_frequency=1 --innodb-buffer-pool-size=%llu" " --console --skip-log-error --bootstrap < " BOOTSTRAP_FILENAME IF_WIN("\"",""), @@ -1236,11 +1236,12 @@ static int prepare_export() { sprintf(cmdline, IF_WIN("\"","") "\"%s\" --mysqld" - " --defaults-file=./backup-my.cnf --datadir=." + " --defaults-file=./backup-my.cnf --defaults-group-suffix=%s --datadir=." " --innodb --innodb-fast-shutdown=0" " --innodb_purge_rseg_truncate_frequency=1 --innodb-buffer-pool-size=%llu" " --console --log-error= --bootstrap < " BOOTSTRAP_FILENAME IF_WIN("\"",""), mariabackup_exe, + (my_defaults_group_suffix?my_defaults_group_suffix:""), xtrabackup_use_memory); } From ac3e7f788eced87ea4780584d65f815020f9b31f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 24 Jan 2018 10:23:52 +0200 Subject: [PATCH 16/17] MDEV-15016: multiple page cleaner threads use a lot of CPU While the bug was reported as a regression of MDEV-11025 Make number of page cleaner threads variable dynamic in MariaDB Server 10.3, the code that MariaDB Server 10.2 inherited from MySQL 5.7.4 (WL#6642) looks prone to similar errors. pc_flush_slot(): If there is no work to do, reset the is_requested signal, to avoid potential busy-waiting in buf_flush_page_cleaner_worker(). If the coordinator thread has shut down, avoid resetting the is_requested event, to avoid a potential hang at shutdown if there are multiple worker threads. --- storage/innobase/buf/buf0flu.cc | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 9426d140e71..9807a9696e9 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2013, 2017, MariaDB Corporation. +Copyright (c) 2013, 2018, MariaDB Corporation. Copyright (c) 2013, 2014, Fusion-io This program is free software; you can redistribute it and/or modify it under @@ -2843,7 +2843,9 @@ pc_flush_slot(void) mutex_enter(&page_cleaner->mutex); - if (page_cleaner->n_slots_requested > 0) { + if (!page_cleaner->n_slots_requested) { + os_event_reset(page_cleaner->is_requested); + } else { page_cleaner_slot_t* slot = NULL; ulint i; @@ -2865,16 +2867,16 @@ pc_flush_slot(void) page_cleaner->n_slots_flushing++; slot->state = PAGE_CLEANER_STATE_FLUSHING; - if (page_cleaner->n_slots_requested == 0) { - os_event_reset(page_cleaner->is_requested); - } - - if (!page_cleaner->is_running) { + if (UNIV_UNLIKELY(!page_cleaner->is_running)) { slot->n_flushed_lru = 0; slot->n_flushed_list = 0; goto finish_mutex; } + if (page_cleaner->n_slots_requested == 0) { + os_event_reset(page_cleaner->is_requested); + } + mutex_exit(&page_cleaner->mutex); lru_tm = ut_time_ms(); @@ -2885,7 +2887,7 @@ pc_flush_slot(void) lru_tm = ut_time_ms() - lru_tm; lru_pass++; - if (!page_cleaner->is_running) { + if (UNIV_UNLIKELY(!page_cleaner->is_running)) { slot->n_flushed_list = 0; goto finish; } From c269f1d6feb2e5ac1aeef96fbd3a64d0085c2f7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 24 Jan 2018 10:46:04 +0200 Subject: [PATCH 17/17] Allocate page_cleaner and page_cleaner.slot[] statically --- storage/innobase/buf/buf0flu.cc | 249 +++++++++++++++----------------- 1 file changed, 118 insertions(+), 131 deletions(-) diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 9807a9696e9..e938c27cfec 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -174,7 +174,7 @@ struct page_cleaner_t { requests for all slots */ ulint flush_pass; /*!< count to finish to flush requests for all slots */ - page_cleaner_slot_t* slots; /*!< pointer to the slots */ + page_cleaner_slot_t slots[MAX_BUFFER_POOLS]; bool is_running; /*!< false if attempt to shutdown */ @@ -185,7 +185,7 @@ struct page_cleaner_t { #endif /* UNIV_DEBUG */ }; -static page_cleaner_t* page_cleaner = NULL; +static page_cleaner_t page_cleaner; #ifdef UNIV_DEBUG my_bool innodb_page_cleaner_disabled_debug; @@ -2514,23 +2514,23 @@ page_cleaner_flush_pages_recommendation( lsn_avg_rate = (lsn_avg_rate + lsn_rate) / 2; /* aggregate stats of all slots */ - mutex_enter(&page_cleaner->mutex); + mutex_enter(&page_cleaner.mutex); - ulint flush_tm = page_cleaner->flush_time; - ulint flush_pass = page_cleaner->flush_pass; + ulint flush_tm = page_cleaner.flush_time; + ulint flush_pass = page_cleaner.flush_pass; - page_cleaner->flush_time = 0; - page_cleaner->flush_pass = 0; + page_cleaner.flush_time = 0; + page_cleaner.flush_pass = 0; ulint lru_tm = 0; ulint list_tm = 0; ulint lru_pass = 0; ulint list_pass = 0; - for (ulint i = 0; i < page_cleaner->n_slots; i++) { + for (ulint i = 0; i < page_cleaner.n_slots; i++) { page_cleaner_slot_t* slot; - slot = &page_cleaner->slots[i]; + slot = &page_cleaner.slots[i]; lru_tm += slot->flush_lru_time; lru_pass += slot->flush_lru_pass; @@ -2543,7 +2543,7 @@ page_cleaner_flush_pages_recommendation( slot->flush_list_pass = 0; } - mutex_exit(&page_cleaner->mutex); + mutex_exit(&page_cleaner.mutex); /* minimum values are 1, to avoid dividing by zero. */ if (lru_tm < 1) { @@ -2584,9 +2584,9 @@ page_cleaner_flush_pages_recommendation( MONITOR_SET(MONITOR_FLUSH_AVG_TIME, flush_tm / flush_pass); MONITOR_SET(MONITOR_FLUSH_ADAPTIVE_AVG_PASS, - list_pass / page_cleaner->n_slots); + list_pass / page_cleaner.n_slots); MONITOR_SET(MONITOR_LRU_BATCH_FLUSH_AVG_PASS, - lru_pass / page_cleaner->n_slots); + lru_pass / page_cleaner.n_slots); MONITOR_SET(MONITOR_FLUSH_AVG_PASS, flush_pass); prev_lsn = cur_lsn; @@ -2630,12 +2630,12 @@ page_cleaner_flush_pages_recommendation( sum_pages_for_lsn += pages_for_lsn; - mutex_enter(&page_cleaner->mutex); - ut_ad(page_cleaner->slots[i].state + mutex_enter(&page_cleaner.mutex); + ut_ad(page_cleaner.slots[i].state == PAGE_CLEANER_STATE_NONE); - page_cleaner->slots[i].n_pages_requested + page_cleaner.slots[i].n_pages_requested = pages_for_lsn / buf_flush_lsn_scan_factor + 1; - mutex_exit(&page_cleaner->mutex); + mutex_exit(&page_cleaner.mutex); } sum_pages_for_lsn /= buf_flush_lsn_scan_factor; @@ -2655,20 +2655,20 @@ page_cleaner_flush_pages_recommendation( } /* Normalize request for each instance */ - mutex_enter(&page_cleaner->mutex); - ut_ad(page_cleaner->n_slots_requested == 0); - ut_ad(page_cleaner->n_slots_flushing == 0); - ut_ad(page_cleaner->n_slots_finished == 0); + mutex_enter(&page_cleaner.mutex); + ut_ad(page_cleaner.n_slots_requested == 0); + ut_ad(page_cleaner.n_slots_flushing == 0); + ut_ad(page_cleaner.n_slots_finished == 0); for (ulint i = 0; i < srv_buf_pool_instances; i++) { /* if REDO has enough of free space, don't care about age distribution of pages */ - page_cleaner->slots[i].n_pages_requested = pct_for_lsn > 30 ? - page_cleaner->slots[i].n_pages_requested + page_cleaner.slots[i].n_pages_requested = pct_for_lsn > 30 ? + page_cleaner.slots[i].n_pages_requested * n_pages / sum_pages_for_lsn + 1 : n_pages / srv_buf_pool_instances; } - mutex_exit(&page_cleaner->mutex); + mutex_exit(&page_cleaner.mutex); MONITOR_SET(MONITOR_FLUSH_N_TO_FLUSH_REQUESTED, n_pages); @@ -2727,25 +2727,18 @@ void buf_flush_page_cleaner_init(void) /*=============================*/ { - ut_ad(page_cleaner == NULL); + ut_ad(!page_cleaner.is_running); - page_cleaner = static_cast( - ut_zalloc_nokey(sizeof(*page_cleaner))); + mutex_create(LATCH_ID_PAGE_CLEANER, &page_cleaner.mutex); - mutex_create(LATCH_ID_PAGE_CLEANER, &page_cleaner->mutex); + page_cleaner.is_requested = os_event_create("pc_is_requested"); + page_cleaner.is_finished = os_event_create("pc_is_finished"); - page_cleaner->is_requested = os_event_create("pc_is_requested"); - page_cleaner->is_finished = os_event_create("pc_is_finished"); + page_cleaner.n_slots = static_cast(srv_buf_pool_instances); - page_cleaner->n_slots = static_cast(srv_buf_pool_instances); + ut_d(page_cleaner.n_disabled_debug = 0); - page_cleaner->slots = static_cast( - ut_zalloc_nokey(page_cleaner->n_slots - * sizeof(*page_cleaner->slots))); - - ut_d(page_cleaner->n_disabled_debug = 0); - - page_cleaner->is_running = true; + page_cleaner.is_running = true; } /** @@ -2754,21 +2747,17 @@ static void buf_flush_page_cleaner_close(void) { + ut_ad(!page_cleaner.is_running); + /* waiting for all worker threads exit */ - while (page_cleaner->n_workers > 0) { + while (page_cleaner.n_workers) { os_thread_sleep(10000); } - mutex_destroy(&page_cleaner->mutex); + mutex_destroy(&page_cleaner.mutex); - ut_free(page_cleaner->slots); - - os_event_destroy(page_cleaner->is_finished); - os_event_destroy(page_cleaner->is_requested); - - ut_free(page_cleaner); - - page_cleaner = NULL; + os_event_destroy(page_cleaner.is_finished); + os_event_destroy(page_cleaner.is_requested); } /** @@ -2794,17 +2783,17 @@ pc_request( / srv_buf_pool_instances; } - mutex_enter(&page_cleaner->mutex); + mutex_enter(&page_cleaner.mutex); - ut_ad(page_cleaner->n_slots_requested == 0); - ut_ad(page_cleaner->n_slots_flushing == 0); - ut_ad(page_cleaner->n_slots_finished == 0); + ut_ad(page_cleaner.n_slots_requested == 0); + ut_ad(page_cleaner.n_slots_flushing == 0); + ut_ad(page_cleaner.n_slots_finished == 0); - page_cleaner->requested = (min_n > 0); - page_cleaner->lsn_limit = lsn_limit; + page_cleaner.requested = (min_n > 0); + page_cleaner.lsn_limit = lsn_limit; - for (ulint i = 0; i < page_cleaner->n_slots; i++) { - page_cleaner_slot_t* slot = &page_cleaner->slots[i]; + for (ulint i = 0; i < page_cleaner.n_slots; i++) { + page_cleaner_slot_t* slot = &page_cleaner.slots[i]; ut_ad(slot->state == PAGE_CLEANER_STATE_NONE); @@ -2820,13 +2809,13 @@ pc_request( slot->state = PAGE_CLEANER_STATE_REQUESTED; } - page_cleaner->n_slots_requested = page_cleaner->n_slots; - page_cleaner->n_slots_flushing = 0; - page_cleaner->n_slots_finished = 0; + page_cleaner.n_slots_requested = page_cleaner.n_slots; + page_cleaner.n_slots_flushing = 0; + page_cleaner.n_slots_finished = 0; - os_event_set(page_cleaner->is_requested); + os_event_set(page_cleaner.is_requested); - mutex_exit(&page_cleaner->mutex); + mutex_exit(&page_cleaner.mutex); } /** @@ -2841,16 +2830,16 @@ pc_flush_slot(void) int lru_pass = 0; int list_pass = 0; - mutex_enter(&page_cleaner->mutex); + mutex_enter(&page_cleaner.mutex); - if (!page_cleaner->n_slots_requested) { - os_event_reset(page_cleaner->is_requested); + if (!page_cleaner.n_slots_requested) { + os_event_reset(page_cleaner.is_requested); } else { page_cleaner_slot_t* slot = NULL; ulint i; - for (i = 0; i < page_cleaner->n_slots; i++) { - slot = &page_cleaner->slots[i]; + for (i = 0; i < page_cleaner.n_slots; i++) { + slot = &page_cleaner.slots[i]; if (slot->state == PAGE_CLEANER_STATE_REQUESTED) { break; @@ -2858,26 +2847,26 @@ pc_flush_slot(void) } /* slot should be found because - page_cleaner->n_slots_requested > 0 */ - ut_a(i < page_cleaner->n_slots); + page_cleaner.n_slots_requested > 0 */ + ut_a(i < page_cleaner.n_slots); buf_pool_t* buf_pool = buf_pool_from_array(i); - page_cleaner->n_slots_requested--; - page_cleaner->n_slots_flushing++; + page_cleaner.n_slots_requested--; + page_cleaner.n_slots_flushing++; slot->state = PAGE_CLEANER_STATE_FLUSHING; - if (UNIV_UNLIKELY(!page_cleaner->is_running)) { + if (UNIV_UNLIKELY(!page_cleaner.is_running)) { slot->n_flushed_lru = 0; slot->n_flushed_list = 0; goto finish_mutex; } - if (page_cleaner->n_slots_requested == 0) { - os_event_reset(page_cleaner->is_requested); + if (page_cleaner.n_slots_requested == 0) { + os_event_reset(page_cleaner.is_requested); } - mutex_exit(&page_cleaner->mutex); + mutex_exit(&page_cleaner.mutex); lru_tm = ut_time_ms(); @@ -2887,13 +2876,13 @@ pc_flush_slot(void) lru_tm = ut_time_ms() - lru_tm; lru_pass++; - if (UNIV_UNLIKELY(!page_cleaner->is_running)) { + if (UNIV_UNLIKELY(!page_cleaner.is_running)) { slot->n_flushed_list = 0; goto finish; } /* Flush pages from flush_list if required */ - if (page_cleaner->requested) { + if (page_cleaner.requested) { flush_counters_t n; memset(&n, 0, sizeof(flush_counters_t)); list_tm = ut_time_ms(); @@ -2901,7 +2890,7 @@ pc_flush_slot(void) slot->succeeded_list = buf_flush_do_batch( buf_pool, BUF_FLUSH_LIST, slot->n_pages_requested, - page_cleaner->lsn_limit, + page_cleaner.lsn_limit, &n); slot->n_flushed_list = n.flushed; @@ -2913,10 +2902,10 @@ pc_flush_slot(void) slot->succeeded_list = true; } finish: - mutex_enter(&page_cleaner->mutex); + mutex_enter(&page_cleaner.mutex); finish_mutex: - page_cleaner->n_slots_flushing--; - page_cleaner->n_slots_finished++; + page_cleaner.n_slots_flushing--; + page_cleaner.n_slots_finished++; slot->state = PAGE_CLEANER_STATE_FINISHED; slot->flush_lru_time += lru_tm; @@ -2924,15 +2913,15 @@ finish_mutex: slot->flush_lru_pass += lru_pass; slot->flush_list_pass += list_pass; - if (page_cleaner->n_slots_requested == 0 - && page_cleaner->n_slots_flushing == 0) { - os_event_set(page_cleaner->is_finished); + if (page_cleaner.n_slots_requested == 0 + && page_cleaner.n_slots_flushing == 0) { + os_event_set(page_cleaner.is_finished); } } - ulint ret = page_cleaner->n_slots_requested; + ulint ret = page_cleaner.n_slots_requested; - mutex_exit(&page_cleaner->mutex); + mutex_exit(&page_cleaner.mutex); return(ret); } @@ -2954,16 +2943,16 @@ pc_wait_finished( *n_flushed_lru = 0; *n_flushed_list = 0; - os_event_wait(page_cleaner->is_finished); + os_event_wait(page_cleaner.is_finished); - mutex_enter(&page_cleaner->mutex); + mutex_enter(&page_cleaner.mutex); - ut_ad(page_cleaner->n_slots_requested == 0); - ut_ad(page_cleaner->n_slots_flushing == 0); - ut_ad(page_cleaner->n_slots_finished == page_cleaner->n_slots); + ut_ad(page_cleaner.n_slots_requested == 0); + ut_ad(page_cleaner.n_slots_flushing == 0); + ut_ad(page_cleaner.n_slots_finished == page_cleaner.n_slots); - for (ulint i = 0; i < page_cleaner->n_slots; i++) { - page_cleaner_slot_t* slot = &page_cleaner->slots[i]; + for (ulint i = 0; i < page_cleaner.n_slots; i++) { + page_cleaner_slot_t* slot = &page_cleaner.slots[i]; ut_ad(slot->state == PAGE_CLEANER_STATE_FINISHED); @@ -2976,11 +2965,11 @@ pc_wait_finished( slot->n_pages_requested = 0; } - page_cleaner->n_slots_finished = 0; + page_cleaner.n_slots_finished = 0; - os_event_reset(page_cleaner->is_finished); + os_event_reset(page_cleaner.is_finished); - mutex_exit(&page_cleaner->mutex); + mutex_exit(&page_cleaner.mutex); return(all_succeeded); } @@ -3008,20 +2997,18 @@ static void buf_flush_page_cleaner_disabled_loop(void) { - ut_ad(page_cleaner != NULL); - if (!innodb_page_cleaner_disabled_debug) { /* We return to avoid entering and exiting mutex. */ return; } - mutex_enter(&page_cleaner->mutex); - page_cleaner->n_disabled_debug++; - mutex_exit(&page_cleaner->mutex); + mutex_enter(&page_cleaner.mutex); + page_cleaner.n_disabled_debug++; + mutex_exit(&page_cleaner.mutex); while (innodb_page_cleaner_disabled_debug && srv_shutdown_state == SRV_SHUTDOWN_NONE - && page_cleaner->is_running) { + && page_cleaner.is_running) { os_thread_sleep(100000); /* [A] */ } @@ -3039,9 +3026,9 @@ buf_flush_page_cleaner_disabled_loop(void) Therefore we are waiting in step 2 for this thread exiting here. */ - mutex_enter(&page_cleaner->mutex); - page_cleaner->n_disabled_debug--; - mutex_exit(&page_cleaner->mutex); + mutex_enter(&page_cleaner.mutex); + page_cleaner.n_disabled_debug--; + mutex_exit(&page_cleaner.mutex); } /** Disables page cleaner threads (coordinator and workers). @@ -3057,7 +3044,7 @@ buf_flush_page_cleaner_disabled_debug_update( void* var_ptr, const void* save) { - if (page_cleaner == NULL) { + if (!page_cleaner.is_running) { return; } @@ -3070,9 +3057,9 @@ buf_flush_page_cleaner_disabled_debug_update( /* Enable page cleaner threads. */ while (srv_shutdown_state == SRV_SHUTDOWN_NONE) { - mutex_enter(&page_cleaner->mutex); - const ulint n = page_cleaner->n_disabled_debug; - mutex_exit(&page_cleaner->mutex); + mutex_enter(&page_cleaner.mutex); + const ulint n = page_cleaner.n_disabled_debug; + mutex_exit(&page_cleaner.mutex); /* Check if all threads have been enabled, to avoid problem when we decide to re-disable them soon. */ if (n == 0) { @@ -3097,21 +3084,21 @@ buf_flush_page_cleaner_disabled_debug_update( That's why we have sleep-loop instead of simply waiting on some disabled_debug_event. */ - os_event_set(page_cleaner->is_requested); + os_event_set(page_cleaner.is_requested); - mutex_enter(&page_cleaner->mutex); + mutex_enter(&page_cleaner.mutex); - ut_ad(page_cleaner->n_disabled_debug + ut_ad(page_cleaner.n_disabled_debug <= srv_n_page_cleaners); - if (page_cleaner->n_disabled_debug + if (page_cleaner.n_disabled_debug == srv_n_page_cleaners) { - mutex_exit(&page_cleaner->mutex); + mutex_exit(&page_cleaner.mutex); break; } - mutex_exit(&page_cleaner->mutex); + mutex_exit(&page_cleaner.mutex); os_thread_sleep(100000); } @@ -3265,10 +3252,10 @@ DECLARE_THREAD(buf_flush_page_cleaner_coordinator)(void*) && srv_flush_sync && buf_flush_sync_lsn > 0) { /* woke up for flush_sync */ - mutex_enter(&page_cleaner->mutex); + mutex_enter(&page_cleaner.mutex); lsn_t lsn_limit = buf_flush_sync_lsn; buf_flush_sync_lsn = 0; - mutex_exit(&page_cleaner->mutex); + mutex_exit(&page_cleaner.mutex); /* Request flushing for threads */ pc_request(ULINT_MAX, lsn_limit); @@ -3280,8 +3267,8 @@ DECLARE_THREAD(buf_flush_page_cleaner_coordinator)(void*) /* only coordinator is using these counters, so no need to protect by lock. */ - page_cleaner->flush_time += ut_time_ms() - tm; - page_cleaner->flush_pass++; + page_cleaner.flush_time += ut_time_ms() - tm; + page_cleaner.flush_pass++; /* Wait for all slots to be finished */ ulint n_flushed_lru = 0; @@ -3326,8 +3313,8 @@ DECLARE_THREAD(buf_flush_page_cleaner_coordinator)(void*) /* only coordinator is using these counters, so no need to protect by lock. */ - page_cleaner->flush_time += ut_time_ms() - tm; - page_cleaner->flush_pass++ ; + page_cleaner.flush_time += ut_time_ms() - tm; + page_cleaner.flush_pass++ ; /* Wait for all slots to be finished */ ulint n_flushed_lru = 0; @@ -3473,8 +3460,8 @@ thread_exit: /* All worker threads are waiting for the event here, and no more access to page_cleaner structure by them. Wakes worker threads up just to make them exit. */ - page_cleaner->is_running = false; - os_event_set(page_cleaner->is_requested); + page_cleaner.is_running = false; + os_event_set(page_cleaner.is_requested); buf_flush_page_cleaner_close(); @@ -3501,9 +3488,9 @@ DECLARE_THREAD(buf_flush_page_cleaner_worker)( { my_thread_init(); - mutex_enter(&page_cleaner->mutex); - page_cleaner->n_workers++; - mutex_exit(&page_cleaner->mutex); + mutex_enter(&page_cleaner.mutex); + page_cleaner.n_workers++; + mutex_exit(&page_cleaner.mutex); #ifdef UNIV_LINUX /* linux might be able to set different setting for each thread @@ -3517,20 +3504,20 @@ DECLARE_THREAD(buf_flush_page_cleaner_worker)( #endif /* UNIV_LINUX */ while (true) { - os_event_wait(page_cleaner->is_requested); + os_event_wait(page_cleaner.is_requested); ut_d(buf_flush_page_cleaner_disabled_loop()); - if (!page_cleaner->is_running) { + if (!page_cleaner.is_running) { break; } pc_flush_slot(); } - mutex_enter(&page_cleaner->mutex); - page_cleaner->n_workers--; - mutex_exit(&page_cleaner->mutex); + mutex_enter(&page_cleaner.mutex); + page_cleaner.n_workers--; + mutex_exit(&page_cleaner.mutex); my_thread_end(); @@ -3565,11 +3552,11 @@ buf_flush_request_force( /* adjust based on lsn_avg_rate not to get old */ lsn_t lsn_target = lsn_limit + lsn_avg_rate * 3; - mutex_enter(&page_cleaner->mutex); + mutex_enter(&page_cleaner.mutex); if (lsn_target > buf_flush_sync_lsn) { buf_flush_sync_lsn = lsn_target; } - mutex_exit(&page_cleaner->mutex); + mutex_exit(&page_cleaner.mutex); os_event_set(buf_flush_event); }