mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
MDEV-34190: r_engine_stats.pages_read_count is unrealistically low
The symptoms were: take a server with no activity and a table that's not in the buffer pool. Run a query that reads the whole table and observe that r_engine_stats.pages_read_count shows about 2% of the table was read. Who reads the rest? The cause was that page prefetching done inside InnoDB was not counted. This counts page prefetch requests made in buf_read_ahead_random() and buf_read_ahead_linear() and makes them visible in: - ANALYZE: r_engine_stats.pages_prefetch_read_count - Slow Query Log: Pages_prefetched: This patch intentionally doesn't attempt to count the time to read the prefetched pages: * there's no obvious place where one can do it * prefetch reads may be done in parallel (right?), it is not clear how to count the time in this case.
This commit is contained in:
@ -22,7 +22,7 @@
|
||||
|
||||
# InnoDB/Engines
|
||||
--let log_expected_matches = $log_slow_verbosity_innodb_expected_matches
|
||||
--let grep_pattern = ^# Pages_accessed: \d+ Pages_read: \d+ Pages_updated: \d+ Old_rows_read: \d+\$
|
||||
--let grep_pattern = ^# Pages_accessed: \d+ Pages_read: \d+ Pages_prefetched: \d+ Pages_updated: \d+ Old_rows_read: \d+\$
|
||||
--source include/log_grep.inc
|
||||
|
||||
--let grep_pattern = ^# Pages_read_time: \d+\.\d+ Engine_time: \d+\.\d+\$
|
||||
|
1
mysql-test/main/analyze_stmt_prefetch_count.opt
Normal file
1
mysql-test/main/analyze_stmt_prefetch_count.opt
Normal file
@ -0,0 +1 @@
|
||||
--innodb-buffer-pool-size=32M --innodb_buffer_pool_dump_at_shutdown=off --innodb_buffer_pool_load_at_startup=off --innodb-stats-persistent=1 --innodb-stats-auto-recalc=off
|
60
mysql-test/main/analyze_stmt_prefetch_count.result
Normal file
60
mysql-test/main/analyze_stmt_prefetch_count.result
Normal file
@ -0,0 +1,60 @@
|
||||
create table t1 (
|
||||
a varchar(255),
|
||||
b varchar(255),
|
||||
c varchar(255),
|
||||
d varchar(255),
|
||||
primary key(a,b,c,d)
|
||||
) engine=innodb;
|
||||
SET unique_checks=0, foreign_key_checks= 0;
|
||||
begin;
|
||||
insert into t1 select
|
||||
repeat(uuid(), 7),
|
||||
repeat(uuid(), 7),
|
||||
repeat(uuid(), 7),
|
||||
repeat(uuid(), 7)
|
||||
from seq_1_to_16384;
|
||||
insert into t1 values ('z','z','z','z');
|
||||
commit;
|
||||
# Restart the server to make sure we have an empty InnoDB Buffer Pool
|
||||
# (in the test's .opt file we've disabled buffer pool saving/loading
|
||||
# and also tried to disable any background activity)
|
||||
SET GLOBAL innodb_fast_shutdown=0;
|
||||
# restart
|
||||
set @innodb_pages_read0=
|
||||
(select variable_value
|
||||
from information_schema.session_status
|
||||
where variable_name like 'innodb_pages_read');
|
||||
set @js='$analyze_output';
|
||||
set @js=json_extract(@js, '$.query_block.table.r_engine_stats');
|
||||
set @pages_accessed= cast(json_value(@js,'$.pages_accessed') as INT);
|
||||
set @pages_read_count= cast(json_value(@js,'$.pages_read_count') as INT);
|
||||
set @pages_prefetch_read_count= cast(json_value(@js,'$.pages_prefetch_read_count') as INT);
|
||||
select @pages_accessed > 1000 and @pages_accessed < 1500;
|
||||
@pages_accessed > 1000 and @pages_accessed < 1500
|
||||
1
|
||||
set @total_read = (@pages_read_count + @pages_prefetch_read_count);
|
||||
select @pages_accessed*0.75 < @total_read, @total_read < @pages_accessed*1.25;
|
||||
@pages_accessed*0.75 < @total_read @total_read < @pages_accessed*1.25
|
||||
1 1
|
||||
set @innodb_pages_read1=
|
||||
(select variable_value
|
||||
from information_schema.session_status
|
||||
where variable_name like 'innodb_pages_read');
|
||||
set @innodb_pages_read_incr= (@innodb_pages_read1 - @innodb_pages_read0);
|
||||
select @innodb_pages_read_incr > 1000, @innodb_pages_read_incr < 1500;
|
||||
@innodb_pages_read_incr > 1000 @innodb_pages_read_incr < 1500
|
||||
1 1
|
||||
set @js='$analyze_output';
|
||||
set @js=json_extract(@js, '$.query_block.table.r_engine_stats');
|
||||
# This must just print pages_accessed. No page reads or prefetch reads,
|
||||
# because the previous query has read all the needed pages into the
|
||||
# buffer pool, which is set to be large enough to accomodate the whole
|
||||
# table.
|
||||
select @js;
|
||||
@js
|
||||
{"pages_accessed": NUMBER}
|
||||
set @pages_accessed2= cast(json_value(@js,'$.pages_accessed') as INT);
|
||||
select @pages_accessed2 = @pages_accessed;
|
||||
@pages_accessed2 = @pages_accessed
|
||||
1
|
||||
drop table t1;
|
77
mysql-test/main/analyze_stmt_prefetch_count.test
Normal file
77
mysql-test/main/analyze_stmt_prefetch_count.test
Normal file
@ -0,0 +1,77 @@
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_sequence.inc
|
||||
|
||||
|
||||
# Each row is 1K.
|
||||
create table t1 (
|
||||
a varchar(255),
|
||||
b varchar(255),
|
||||
c varchar(255),
|
||||
d varchar(255),
|
||||
primary key(a,b,c,d)
|
||||
) engine=innodb;
|
||||
|
||||
# The data size is 16K * 1K = 16M
|
||||
# 16M / (page_size=16K) = 1K pages.
|
||||
SET unique_checks=0, foreign_key_checks= 0;
|
||||
begin;
|
||||
insert into t1 select
|
||||
repeat(uuid(), 7),
|
||||
repeat(uuid(), 7),
|
||||
repeat(uuid(), 7),
|
||||
repeat(uuid(), 7)
|
||||
from seq_1_to_16384;
|
||||
insert into t1 values ('z','z','z','z');
|
||||
commit;
|
||||
|
||||
--echo # Restart the server to make sure we have an empty InnoDB Buffer Pool
|
||||
--echo # (in the test's .opt file we've disabled buffer pool saving/loading
|
||||
--echo # and also tried to disable any background activity)
|
||||
SET GLOBAL innodb_fast_shutdown=0;
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
set @innodb_pages_read0=
|
||||
(select variable_value
|
||||
from information_schema.session_status
|
||||
where variable_name like 'innodb_pages_read');
|
||||
|
||||
let $analyze_output= `analyze format=json
|
||||
select * from t1 force index (PRIMARY) order by a,b,c,d`;
|
||||
evalp set @js='$analyze_output';
|
||||
|
||||
set @js=json_extract(@js, '$.query_block.table.r_engine_stats');
|
||||
#select @js;
|
||||
set @pages_accessed= cast(json_value(@js,'$.pages_accessed') as INT);
|
||||
set @pages_read_count= cast(json_value(@js,'$.pages_read_count') as INT);
|
||||
set @pages_prefetch_read_count= cast(json_value(@js,'$.pages_prefetch_read_count') as INT);
|
||||
|
||||
select @pages_accessed > 1000 and @pages_accessed < 1500;
|
||||
|
||||
set @total_read = (@pages_read_count + @pages_prefetch_read_count);
|
||||
|
||||
select @pages_accessed*0.75 < @total_read, @total_read < @pages_accessed*1.25;
|
||||
|
||||
set @innodb_pages_read1=
|
||||
(select variable_value
|
||||
from information_schema.session_status
|
||||
where variable_name like 'innodb_pages_read');
|
||||
|
||||
set @innodb_pages_read_incr= (@innodb_pages_read1 - @innodb_pages_read0);
|
||||
|
||||
select @innodb_pages_read_incr > 1000, @innodb_pages_read_incr < 1500;
|
||||
|
||||
let $analyze_output= `analyze format=json
|
||||
select * from t1 force index (PRIMARY) order by a,b,c,d`;
|
||||
evalp set @js='$analyze_output';
|
||||
set @js=json_extract(@js, '$.query_block.table.r_engine_stats');
|
||||
|
||||
--echo # This must just print pages_accessed. No page reads or prefetch reads,
|
||||
--echo # because the previous query has read all the needed pages into the
|
||||
--echo # buffer pool, which is set to be large enough to accomodate the whole
|
||||
--echo # table.
|
||||
--replace_regex /[0-9]+/NUMBER/
|
||||
select @js;
|
||||
set @pages_accessed2= cast(json_value(@js,'$.pages_accessed') as INT);
|
||||
|
||||
select @pages_accessed2 = @pages_accessed;
|
||||
drop table t1;
|
@ -23,7 +23,7 @@ UPDATE t1 set b=b+1 where a=1 or a=999;
|
||||
[log_grep.inc] lines: 0
|
||||
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$
|
||||
[log_grep.inc] lines: 0
|
||||
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_updated: \d+ Old_rows_read: \d+$ expected_matches: 2
|
||||
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_prefetched: \d+ Pages_updated: \d+ Old_rows_read: \d+$ expected_matches: 2
|
||||
[log_grep.inc] found expected match count: 2
|
||||
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Pages_read_time: \d+\.\d+ Engine_time: \d+\.\d+$ expected_matches: 2
|
||||
[log_grep.inc] found expected match count: 2
|
||||
@ -47,7 +47,7 @@ SELECT 1;
|
||||
[log_grep.inc] lines: 0
|
||||
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$
|
||||
[log_grep.inc] lines: 0
|
||||
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_updated: \d+ Old_rows_read: \d+$
|
||||
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_prefetched: \d+ Pages_updated: \d+ Old_rows_read: \d+$
|
||||
[log_grep.inc] lines: 0
|
||||
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Pages_read_time: \d+\.\d+ Engine_time: \d+\.\d+$
|
||||
[log_grep.inc] lines: 0
|
||||
@ -85,7 +85,7 @@ INSERT INTO t1 VALUE(1000) pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$
|
||||
[log_grep.inc] lines: 0
|
||||
[log_grep.inc] file: log_slow_innodb-verbosity_3
|
||||
--source include/log_slow_start.inc
|
||||
INSERT INTO t1 VALUE(1000) pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_updated: \d+ Old_rows_read: \d+$
|
||||
INSERT INTO t1 VALUE(1000) pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_prefetched: \d+ Pages_updated: \d+ Old_rows_read: \d+$
|
||||
[log_grep.inc] lines: 0
|
||||
[log_grep.inc] file: log_slow_innodb-verbosity_3
|
||||
--source include/log_slow_start.inc
|
||||
|
@ -30,6 +30,15 @@ public:
|
||||
/* Time spent reading pages, in timer_tracker_frequency() units */
|
||||
ulonglong pages_read_time;
|
||||
|
||||
/*
|
||||
Number of pages that we've requested to prefetch while running the query.
|
||||
Note that we don't know:
|
||||
- how much time was spent reading these pages (and how to count the time
|
||||
if reading was done in parallel)
|
||||
- whether the pages were read by "us" or somebody else...
|
||||
*/
|
||||
ulonglong pages_prefetched;
|
||||
|
||||
ulonglong undo_records_read;
|
||||
|
||||
/* Time spent in engine, in timer_tracker_frequency() units */
|
||||
|
@ -3283,10 +3283,12 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
|
||||
|
||||
if (my_b_printf(&log_file,
|
||||
"# Pages_accessed: %lu Pages_read: %lu "
|
||||
"Pages_prefetched: %lu "
|
||||
"Pages_updated: %lu Old_rows_read: %lu\n"
|
||||
"# Pages_read_time: %s Engine_time: %s\n",
|
||||
(ulong) stats->pages_accessed,
|
||||
(ulong) stats->pages_read_count,
|
||||
(ulong) stats->pages_prefetched,
|
||||
(ulong) stats->pages_updated,
|
||||
(ulong) stats->undo_records_read,
|
||||
query_time_buff, lock_time_buff))
|
||||
|
@ -1727,6 +1727,8 @@ static void trace_engine_stats(handler *file, Json_writer *writer)
|
||||
if (hs->pages_read_time)
|
||||
writer->add_member("pages_read_time_ms").
|
||||
add_double(hs->pages_read_time * 1000. / timer_tracker_frequency());
|
||||
if (hs->pages_prefetched)
|
||||
writer->add_member("pages_prefetch_read_count").add_ull(hs->pages_prefetched);
|
||||
if (hs->undo_records_read)
|
||||
writer->add_member("old_rows_read").add_ull(hs->undo_records_read);
|
||||
writer->end_object();
|
||||
|
@ -427,6 +427,7 @@ read_ahead:
|
||||
|
||||
if (count)
|
||||
{
|
||||
mariadb_increment_pages_prefetched(count);
|
||||
DBUG_PRINT("ib_buf", ("random read-ahead %zu pages from %s: %u",
|
||||
count, space->chain.start->name,
|
||||
low.page_no()));
|
||||
@ -671,6 +672,7 @@ failed:
|
||||
|
||||
if (count)
|
||||
{
|
||||
mariadb_increment_pages_prefetched(count);
|
||||
DBUG_PRINT("ib_buf", ("random read-ahead %zu pages from %s: %u",
|
||||
count, space->chain.start->name,
|
||||
new_low.page_no()));
|
||||
|
@ -79,6 +79,12 @@ inline void mariadb_increment_undo_records_read()
|
||||
stats->undo_records_read++;
|
||||
}
|
||||
|
||||
inline void mariadb_increment_pages_prefetched(ulint n_pages)
|
||||
{
|
||||
if (ha_handler_stats *stats= mariadb_stats)
|
||||
stats->pages_prefetched += n_pages;
|
||||
}
|
||||
|
||||
/*
|
||||
The following has to be identical code as measure() in sql_analyze_stmt.h
|
||||
|
||||
|
Reference in New Issue
Block a user