diff --git a/mysql-test/include/log_slow_grep.inc b/mysql-test/include/log_slow_grep.inc index db82f3cb8fb..28717e18123 100644 --- a/mysql-test/include/log_slow_grep.inc +++ b/mysql-test/include/log_slow_grep.inc @@ -22,7 +22,7 @@ # InnoDB/Engines --let log_expected_matches = $log_slow_verbosity_innodb_expected_matches ---let grep_pattern = ^# Pages_accessed: \d+ Pages_read: \d+ Pages_updated: \d+ Old_rows_read: \d+\$ +--let grep_pattern = ^# Pages_accessed: \d+ Pages_read: \d+ Pages_prefetched: \d+ Pages_updated: \d+ Old_rows_read: \d+\$ --source include/log_grep.inc --let grep_pattern = ^# Pages_read_time: \d+\.\d+ Engine_time: \d+\.\d+\$ diff --git a/mysql-test/main/analyze_stmt_prefetch_count.opt b/mysql-test/main/analyze_stmt_prefetch_count.opt new file mode 100644 index 00000000000..c33354be63a --- /dev/null +++ b/mysql-test/main/analyze_stmt_prefetch_count.opt @@ -0,0 +1 @@ +--innodb-buffer-pool-size=32M --innodb_buffer_pool_dump_at_shutdown=off --innodb_buffer_pool_load_at_startup=off --innodb-stats-persistent=1 --innodb-stats-auto-recalc=off diff --git a/mysql-test/main/analyze_stmt_prefetch_count.result b/mysql-test/main/analyze_stmt_prefetch_count.result new file mode 100644 index 00000000000..34c9085b49e --- /dev/null +++ b/mysql-test/main/analyze_stmt_prefetch_count.result @@ -0,0 +1,60 @@ +create table t1 ( +a varchar(255), +b varchar(255), +c varchar(255), +d varchar(255), +primary key(a,b,c,d) +) engine=innodb; +SET unique_checks=0, foreign_key_checks= 0; +begin; +insert into t1 select +repeat(uuid(), 7), +repeat(uuid(), 7), +repeat(uuid(), 7), +repeat(uuid(), 7) +from seq_1_to_16384; +insert into t1 values ('z','z','z','z'); +commit; +# Restart the server to make sure we have an empty InnoDB Buffer Pool +# (in the test's .opt file we've disabled buffer pool saving/loading +# and also tried to disable any background activity) +SET GLOBAL innodb_fast_shutdown=0; +# restart +set @innodb_pages_read0= +(select variable_value +from information_schema.session_status +where variable_name like 'innodb_pages_read'); +set @js='$analyze_output'; +set @js=json_extract(@js, '$.query_block.table.r_engine_stats'); +set @pages_accessed= cast(json_value(@js,'$.pages_accessed') as INT); +set @pages_read_count= cast(json_value(@js,'$.pages_read_count') as INT); +set @pages_prefetch_read_count= cast(json_value(@js,'$.pages_prefetch_read_count') as INT); +select @pages_accessed > 1000 and @pages_accessed < 1500; +@pages_accessed > 1000 and @pages_accessed < 1500 +1 +set @total_read = (@pages_read_count + @pages_prefetch_read_count); +select @pages_accessed*0.75 < @total_read, @total_read < @pages_accessed*1.25; +@pages_accessed*0.75 < @total_read @total_read < @pages_accessed*1.25 +1 1 +set @innodb_pages_read1= +(select variable_value +from information_schema.session_status +where variable_name like 'innodb_pages_read'); +set @innodb_pages_read_incr= (@innodb_pages_read1 - @innodb_pages_read0); +select @innodb_pages_read_incr > 1000, @innodb_pages_read_incr < 1500; +@innodb_pages_read_incr > 1000 @innodb_pages_read_incr < 1500 +1 1 +set @js='$analyze_output'; +set @js=json_extract(@js, '$.query_block.table.r_engine_stats'); +# This must just print pages_accessed. No page reads or prefetch reads, +# because the previous query has read all the needed pages into the +# buffer pool, which is set to be large enough to accomodate the whole +# table. +select @js; +@js +{"pages_accessed": NUMBER} +set @pages_accessed2= cast(json_value(@js,'$.pages_accessed') as INT); +select @pages_accessed2 = @pages_accessed; +@pages_accessed2 = @pages_accessed +1 +drop table t1; diff --git a/mysql-test/main/analyze_stmt_prefetch_count.test b/mysql-test/main/analyze_stmt_prefetch_count.test new file mode 100644 index 00000000000..ad6bac4763c --- /dev/null +++ b/mysql-test/main/analyze_stmt_prefetch_count.test @@ -0,0 +1,77 @@ +--source include/have_innodb.inc +--source include/have_sequence.inc + + +# Each row is 1K. +create table t1 ( + a varchar(255), + b varchar(255), + c varchar(255), + d varchar(255), + primary key(a,b,c,d) +) engine=innodb; + +# The data size is 16K * 1K = 16M +# 16M / (page_size=16K) = 1K pages. +SET unique_checks=0, foreign_key_checks= 0; +begin; +insert into t1 select + repeat(uuid(), 7), + repeat(uuid(), 7), + repeat(uuid(), 7), + repeat(uuid(), 7) +from seq_1_to_16384; +insert into t1 values ('z','z','z','z'); +commit; + +--echo # Restart the server to make sure we have an empty InnoDB Buffer Pool +--echo # (in the test's .opt file we've disabled buffer pool saving/loading +--echo # and also tried to disable any background activity) +SET GLOBAL innodb_fast_shutdown=0; +--source include/restart_mysqld.inc + +set @innodb_pages_read0= + (select variable_value + from information_schema.session_status + where variable_name like 'innodb_pages_read'); + +let $analyze_output= `analyze format=json +select * from t1 force index (PRIMARY) order by a,b,c,d`; +evalp set @js='$analyze_output'; + +set @js=json_extract(@js, '$.query_block.table.r_engine_stats'); +#select @js; +set @pages_accessed= cast(json_value(@js,'$.pages_accessed') as INT); +set @pages_read_count= cast(json_value(@js,'$.pages_read_count') as INT); +set @pages_prefetch_read_count= cast(json_value(@js,'$.pages_prefetch_read_count') as INT); + +select @pages_accessed > 1000 and @pages_accessed < 1500; + +set @total_read = (@pages_read_count + @pages_prefetch_read_count); + +select @pages_accessed*0.75 < @total_read, @total_read < @pages_accessed*1.25; + +set @innodb_pages_read1= + (select variable_value + from information_schema.session_status + where variable_name like 'innodb_pages_read'); + +set @innodb_pages_read_incr= (@innodb_pages_read1 - @innodb_pages_read0); + +select @innodb_pages_read_incr > 1000, @innodb_pages_read_incr < 1500; + +let $analyze_output= `analyze format=json +select * from t1 force index (PRIMARY) order by a,b,c,d`; +evalp set @js='$analyze_output'; +set @js=json_extract(@js, '$.query_block.table.r_engine_stats'); + +--echo # This must just print pages_accessed. No page reads or prefetch reads, +--echo # because the previous query has read all the needed pages into the +--echo # buffer pool, which is set to be large enough to accomodate the whole +--echo # table. +--replace_regex /[0-9]+/NUMBER/ +select @js; +set @pages_accessed2= cast(json_value(@js,'$.pages_accessed') as INT); + +select @pages_accessed2 = @pages_accessed; +drop table t1; diff --git a/mysql-test/main/log_slow_innodb.result b/mysql-test/main/log_slow_innodb.result index 11a6f2c32c0..9cf898c73fa 100644 --- a/mysql-test/main/log_slow_innodb.result +++ b/mysql-test/main/log_slow_innodb.result @@ -23,7 +23,7 @@ UPDATE t1 set b=b+1 where a=1 or a=999; [log_grep.inc] lines: 0 [log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$ [log_grep.inc] lines: 0 -[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_updated: \d+ Old_rows_read: \d+$ expected_matches: 2 +[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_prefetched: \d+ Pages_updated: \d+ Old_rows_read: \d+$ expected_matches: 2 [log_grep.inc] found expected match count: 2 [log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Pages_read_time: \d+\.\d+ Engine_time: \d+\.\d+$ expected_matches: 2 [log_grep.inc] found expected match count: 2 @@ -47,7 +47,7 @@ SELECT 1; [log_grep.inc] lines: 0 [log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$ [log_grep.inc] lines: 0 -[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_updated: \d+ Old_rows_read: \d+$ +[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_prefetched: \d+ Pages_updated: \d+ Old_rows_read: \d+$ [log_grep.inc] lines: 0 [log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Pages_read_time: \d+\.\d+ Engine_time: \d+\.\d+$ [log_grep.inc] lines: 0 @@ -85,7 +85,7 @@ INSERT INTO t1 VALUE(1000) pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$ [log_grep.inc] lines: 0 [log_grep.inc] file: log_slow_innodb-verbosity_3 --source include/log_slow_start.inc -INSERT INTO t1 VALUE(1000) pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_updated: \d+ Old_rows_read: \d+$ +INSERT INTO t1 VALUE(1000) pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_prefetched: \d+ Pages_updated: \d+ Old_rows_read: \d+$ [log_grep.inc] lines: 0 [log_grep.inc] file: log_slow_innodb-verbosity_3 --source include/log_slow_start.inc diff --git a/sql/ha_handler_stats.h b/sql/ha_handler_stats.h index 60ae5deedbe..362a19dab0b 100644 --- a/sql/ha_handler_stats.h +++ b/sql/ha_handler_stats.h @@ -30,6 +30,15 @@ public: /* Time spent reading pages, in timer_tracker_frequency() units */ ulonglong pages_read_time; + /* + Number of pages that we've requested to prefetch while running the query. + Note that we don't know: + - how much time was spent reading these pages (and how to count the time + if reading was done in parallel) + - whether the pages were read by "us" or somebody else... + */ + ulonglong pages_prefetched; + ulonglong undo_records_read; /* Time spent in engine, in timer_tracker_frequency() units */ diff --git a/sql/log.cc b/sql/log.cc index 9b2249825a5..28954992a9a 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -3283,10 +3283,12 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time, if (my_b_printf(&log_file, "# Pages_accessed: %lu Pages_read: %lu " + "Pages_prefetched: %lu " "Pages_updated: %lu Old_rows_read: %lu\n" "# Pages_read_time: %s Engine_time: %s\n", (ulong) stats->pages_accessed, (ulong) stats->pages_read_count, + (ulong) stats->pages_prefetched, (ulong) stats->pages_updated, (ulong) stats->undo_records_read, query_time_buff, lock_time_buff)) diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index c84a3a00a8f..764d5dd8cef 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -1727,6 +1727,8 @@ static void trace_engine_stats(handler *file, Json_writer *writer) if (hs->pages_read_time) writer->add_member("pages_read_time_ms"). add_double(hs->pages_read_time * 1000. / timer_tracker_frequency()); + if (hs->pages_prefetched) + writer->add_member("pages_prefetch_read_count").add_ull(hs->pages_prefetched); if (hs->undo_records_read) writer->add_member("old_rows_read").add_ull(hs->undo_records_read); writer->end_object(); diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 1c1e1150635..7279497e14c 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -427,6 +427,7 @@ read_ahead: if (count) { + mariadb_increment_pages_prefetched(count); DBUG_PRINT("ib_buf", ("random read-ahead %zu pages from %s: %u", count, space->chain.start->name, low.page_no())); @@ -671,6 +672,7 @@ failed: if (count) { + mariadb_increment_pages_prefetched(count); DBUG_PRINT("ib_buf", ("random read-ahead %zu pages from %s: %u", count, space->chain.start->name, new_low.page_no())); diff --git a/storage/innobase/include/mariadb_stats.h b/storage/innobase/include/mariadb_stats.h index 3a2790d02f0..838955461e8 100644 --- a/storage/innobase/include/mariadb_stats.h +++ b/storage/innobase/include/mariadb_stats.h @@ -79,6 +79,12 @@ inline void mariadb_increment_undo_records_read() stats->undo_records_read++; } +inline void mariadb_increment_pages_prefetched(ulint n_pages) +{ + if (ha_handler_stats *stats= mariadb_stats) + stats->pages_prefetched += n_pages; +} + /* The following has to be identical code as measure() in sql_analyze_stmt.h