From 36ab6cc80cc8d8bf332852ecdf5fac3adf8e8888 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 12 May 2024 19:10:47 +0300 Subject: [PATCH] MDEV-34125: ANALYZE FORMAT=JSON: r_engine_stats.pages_read_time_ms has wrong scale - Change the comments in class ha_handler_stats to say the members are in ticks, not milliseconds. - In sql_explain.cc, adjust the scale to print milliseconds. --- mysql-test/main/analyze_engine_stats2.opt | 1 + mysql-test/main/analyze_engine_stats2.result | 57 +++++++++++++++ mysql-test/main/analyze_engine_stats2.test | 73 ++++++++++++++++++++ sql/ha_handler_stats.h | 9 ++- sql/sql_explain.cc | 5 +- 5 files changed, 142 insertions(+), 3 deletions(-) create mode 100644 mysql-test/main/analyze_engine_stats2.opt create mode 100644 mysql-test/main/analyze_engine_stats2.result create mode 100644 mysql-test/main/analyze_engine_stats2.test diff --git a/mysql-test/main/analyze_engine_stats2.opt b/mysql-test/main/analyze_engine_stats2.opt new file mode 100644 index 00000000000..a5bb9a93636 --- /dev/null +++ b/mysql-test/main/analyze_engine_stats2.opt @@ -0,0 +1 @@ +--innodb_buffer_pool_dump_at_shutdown=off --innodb_buffer_pool_load_at_startup=off --innodb-stats-persistent=1 --innodb-stats-auto-recalc=off diff --git a/mysql-test/main/analyze_engine_stats2.result b/mysql-test/main/analyze_engine_stats2.result new file mode 100644 index 00000000000..3ba9895dd39 --- /dev/null +++ b/mysql-test/main/analyze_engine_stats2.result @@ -0,0 +1,57 @@ +# +# MDEV-34125: ANALYZE FORMAT=JSON: r_engine_stats.pages_read_time_ms has wrong scale +# +create table t1 ( +a varchar(255), +b varchar(255), +c varchar(255), +d varchar(255), +primary key(a,b,c,d) +) engine=innodb; +insert into t1 select +repeat(uuid(), 7), +repeat(uuid(), 7), +repeat(uuid(), 7), +repeat(uuid(), 7) +from seq_1_to_16384; +# restart +set log_slow_verbosity='engine'; +set long_query_time=0.0; +set @js='$analyze_output'; +select @js; +@js +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "index", + "key": "PRIMARY", + "key_length": "1028", + "used_key_parts": ["a", "b", "c", "d"], + "r_loops": 1, + "rows": 1, + "r_rows": 16384, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_engine_stats": { + "pages_accessed": "REPLACED", + "pages_read_count": "REPLACED", + "pages_read_time_ms": "REPLACED" + }, + "filtered": 100, + "r_filtered": 100, + "using_index": true + } + } +} +set @pages_read_time_ms= +(select json_value(@js,'$.query_block.table.r_engine_stats.pages_read_time_ms')); + + + OK: pages_read_time is same in slow log and ANALYZE + +set long_query_time=default; +drop table t1; diff --git a/mysql-test/main/analyze_engine_stats2.test b/mysql-test/main/analyze_engine_stats2.test new file mode 100644 index 00000000000..1a811f6d9e0 --- /dev/null +++ b/mysql-test/main/analyze_engine_stats2.test @@ -0,0 +1,73 @@ +# +# r_engine_stats tests that require slow query log. +# +--source include/analyze-format.inc +--source include/have_sequence.inc +--source include/have_innodb.inc + +--echo # +--echo # MDEV-34125: ANALYZE FORMAT=JSON: r_engine_stats.pages_read_time_ms has wrong scale +--echo # + +# Each row is 1K. +create table t1 ( + a varchar(255), + b varchar(255), + c varchar(255), + d varchar(255), + primary key(a,b,c,d) +) engine=innodb; + +# The data size is 160K * 1K = 160M +# 16M / (page_size=16K) = 1K pages. +insert into t1 select + repeat(uuid(), 7), + repeat(uuid(), 7), + repeat(uuid(), 7), + repeat(uuid(), 7) +from seq_1_to_16384; + +source include/restart_mysqld.inc; +set log_slow_verbosity='engine'; +set long_query_time=0.0; + +let $analyze_output= `analyze format=json +select * from t1 force index (PRIMARY) order by a desc, b desc, c desc, d desc`; +evalp set @js='$analyze_output'; + +# Print it out for user-friendlines +--replace_regex /("(r_[a-z_]*_time_ms|pages[^"]*)": )[^, \n]*/\1"REPLACED"/ +select @js; + +set @pages_read_time_ms= + (select json_value(@js,'$.query_block.table.r_engine_stats.pages_read_time_ms')); + +let ANALYZE_PAGES=`select @pages_read_time_ms`; +let SLOW_LOG_FILE= `select @@slow_query_log_file`; + +perl; + my $slow_log_file= $ENV{'SLOW_LOG_FILE'} or die "SLOW_LOG_FILE not set"; + my $analyze_pages=$ENV{'ANALYZE_PAGES'}; + open(FILE, $slow_log_file) or die "Failed to open $slow_log_file"; + # We didn't run any queries touching a storage engine after the query of + # interest, so we will be fine here if we just get the last occurrence of + # Pages_read_time: NNNN in the file + while() { + $slow_log_pages=$1 if (/Pages_read_time: ([0-9.]+)/); + } + close(FILE); + + if ( $slow_log_pages > $analyze_pages * 0.95 && + $slow_log_pages < $analyze_pages * 1.05) { + print "\n\n OK: pages_read_time is same in slow log and ANALYZE\n\n"; + } else { + print "\n\n FAIL: $slow_log_pages not equal to $analyze_pages\n"; + } + +EOF + + +set long_query_time=default; +drop table t1; + + diff --git a/sql/ha_handler_stats.h b/sql/ha_handler_stats.h index 726ba6041dc..60ae5deedbe 100644 --- a/sql/ha_handler_stats.h +++ b/sql/ha_handler_stats.h @@ -26,9 +26,14 @@ public: ulonglong pages_accessed; /* Pages accessed from page cache */ ulonglong pages_updated; /* Pages changed in page cache */ ulonglong pages_read_count; /* Pages read from disk */ - ulonglong pages_read_time; /* Time reading pages, in microsec. */ + + /* Time spent reading pages, in timer_tracker_frequency() units */ + ulonglong pages_read_time; + ulonglong undo_records_read; - ulonglong engine_time; /* Time spent in engine in microsec */ + + /* Time spent in engine, in timer_tracker_frequency() units */ + ulonglong engine_time; uint active; /* <> 0 if status has to be updated */ #define first_stat pages_accessed #define last_stat engine_time diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index 774eacb58a5..c84a3a00a8f 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -207,6 +207,9 @@ int Explain_query::send_explain(THD *thd, bool extended) int Explain_query::print_explain(select_result_sink *output, uint8 explain_flags, bool is_analyze) { + /* A sanity check for ANALYZE: */ + DBUG_ASSERT(timer_tracker_frequency() != 0.0); + if (upd_del_plan) { upd_del_plan->print_explain(this, output, explain_flags, is_analyze); @@ -1723,7 +1726,7 @@ static void trace_engine_stats(handler *file, Json_writer *writer) writer->add_member("pages_read_count").add_ull(hs->pages_read_count); if (hs->pages_read_time) writer->add_member("pages_read_time_ms"). - add_double(hs->pages_read_time / 1000.0); + add_double(hs->pages_read_time * 1000. / timer_tracker_frequency()); if (hs->undo_records_read) writer->add_member("old_rows_read").add_ull(hs->undo_records_read); writer->end_object();