diff --git a/mysql-test/suite/innodb/r/innodb-stats-sample.result b/mysql-test/suite/innodb/r/innodb-stats-sample.result new file mode 100644 index 00000000000..a049a1d82c1 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb-stats-sample.result @@ -0,0 +1,4 @@ +Variable_name Value +innodb_stats_sample_pages 1 +Variable_name Value +innodb_stats_traditional OFF diff --git a/mysql-test/suite/innodb/t/innodb-stats-sample.test b/mysql-test/suite/innodb/t/innodb-stats-sample.test new file mode 100644 index 00000000000..35d35bfa382 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb-stats-sample.test @@ -0,0 +1,78 @@ +--source include/have_innodb.inc +# +# Test that mysqld does not crash when running ANALYZE TABLE with +# different values of the parameter innodb_stats_sample_pages. +# + +# we care only that the following SQL commands do not produce errors +# and do not crash the server +-- disable_query_log +-- disable_result_log +-- enable_warnings + +let $sample_pages=`select @@innodb_stats_sample_pages`; +let $traditional=`select @@innodb_stats_traditional`; +SET GLOBAL innodb_stats_sample_pages=0; +#use new method to calculate statistics +SET GLOBAL innodb_stats_traditional=0; + +# check that the value has been adjusted to 1 +-- enable_result_log +SHOW VARIABLES LIKE 'innodb_stats_sample_pages'; +SHOW VARIABLES LIKE 'innodb_stats_traditional'; +-- disable_result_log + +CREATE TABLE innodb_analyze ( + a INT, + b INT, + c char(50), + KEY(a), + KEY(b,a) +) ENGINE=InnoDB; + +# test with empty table +ANALYZE TABLE innodb_analyze; + +SET GLOBAL innodb_stats_sample_pages=2; +ANALYZE TABLE innodb_analyze; + +SET GLOBAL innodb_stats_sample_pages=1; +ANALYZE TABLE innodb_analyze; + +SET GLOBAL innodb_stats_sample_pages=8000; +ANALYZE TABLE innodb_analyze; + +delimiter //; +create procedure innodb_insert_proc (repeat_count int) +begin + declare current_num int; + set current_num = 0; + while current_num < repeat_count do + insert into innodb_analyze values(current_num, current_num*100,substring(MD5(RAND()), -44)); + set current_num = current_num + 1; + end while; +end// +delimiter ;// +commit; + +set autocommit=0; +call innodb_insert_proc(7000); +commit; +set autocommit=1; + +SET GLOBAL innodb_stats_sample_pages=1; +ANALYZE TABLE innodb_analyze; + +SET GLOBAL innodb_stats_sample_pages=8; +ANALYZE TABLE innodb_analyze; + +SET GLOBAL innodb_stats_sample_pages=16; +ANALYZE TABLE innodb_analyze; + +SET GLOBAL innodb_stats_sample_pages=8000; +ANALYZE TABLE innodb_analyze; + +DROP PROCEDURE innodb_insert_proc; +DROP TABLE innodb_analyze; +EVAL SET GLOBAL innodb_stats_sample_pages=$sample_pages; +EVAL SET GLOBAL innodb_stats_traditional=$traditional; \ No newline at end of file diff --git a/storage/innobase/btr/btr0cur.c b/storage/innobase/btr/btr0cur.c index 4e10ea54126..a0fddd7212d 100644 --- a/storage/innobase/btr/btr0cur.c +++ b/storage/innobase/btr/btr0cur.c @@ -3777,18 +3777,24 @@ btr_estimate_number_of_different_key_vals( n_sample_pages = srv_stats_sample_pages; } } else { - /* New logaritmic number of pages that are estimated. We - first pick minimun from srv_stats_sample_pages and number of - pages on index. Then we pick maximum from previous number of - pages and log2(number of index pages) * srv_stats_sample_pages. */ + /* New logaritmic number of pages that are estimated. + Number of pages estimated should be between 1 and + index->stat_index_size. We pick index->stat_index_size + as maximum and log2(index->stat_index_size)*sr_stats_sample_pages + if between range as minimum.*/ if (index->stat_index_size > 0) { - n_sample_pages = ut_max(ut_min(srv_stats_sample_pages, index->stat_index_size), - log2(index->stat_index_size)*srv_stats_sample_pages); + n_sample_pages = ut_min(index->stat_index_size, + ut_max(ut_min(srv_stats_sample_pages, + index->stat_index_size), + log2(index->stat_index_size)*srv_stats_sample_pages)); } else { n_sample_pages = 1; } } + /* Sanity check */ + ut_ad(n_sample_pages > 0 && n_sample_pages < (index->stat_index_size <= 1 ? 1 : index->stat_index_size)); + /* We sample some pages in the index to get an estimate */ for (i = 0; i < n_sample_pages; i++) { diff --git a/storage/xtradb/btr/btr0cur.c b/storage/xtradb/btr/btr0cur.c index b7bafd5b5a8..703b0cf6043 100644 --- a/storage/xtradb/btr/btr0cur.c +++ b/storage/xtradb/btr/btr0cur.c @@ -3957,18 +3957,24 @@ btr_estimate_number_of_different_key_vals( n_sample_pages = srv_stats_sample_pages; } } else { - /* New logaritmic number of pages that are estimated. We - first pick minimun from srv_stats_sample_pages and number of - pages on index. Then we pick maximum from previous number of - pages and log2(number of index pages) * srv_stats_sample_pages. */ + /* New logaritmic number of pages that are estimated. + Number of pages estimated should be between 1 and + index->stat_index_size. We pick index->stat_index_size + as maximum and log2(index->stat_index_size)*sr_stats_sample_pages + if between range as minimum.*/ if (index->stat_index_size > 0) { - n_sample_pages = ut_max(ut_min(srv_stats_sample_pages, index->stat_index_size), - log2(index->stat_index_size)*srv_stats_sample_pages); + n_sample_pages = ut_min(index->stat_index_size, + ut_max(ut_min(srv_stats_sample_pages, + index->stat_index_size), + log2(index->stat_index_size)*srv_stats_sample_pages)); } else { n_sample_pages = 1; } } + /* Sanity check */ + ut_ad(n_sample_pages > 0 && n_sample_pages <= (index->stat_index_size < 1 ? 1 : index->stat_index_size)); + /* We sample some pages in the index to get an estimate */ for (i = 0; i < n_sample_pages; i++) {