mirror of
https://github.com/MariaDB/server.git
synced 2025-04-28 06:45:23 +03:00
WITH LARGE BUFFER POOL (Note: this a backport of revno:3472 from mysql-trunk) rb://845 approved by: Marko When dropping a table (with an .ibd file i.e.: with innodb_file_per_table set) we scan entire LRU to invalidate pages from that table. This can be painful in case of large buffer pools as we hold the buf_pool->mutex for the scan. Note that gravity of the problem does not depend on the size of the table. Even with an empty table but a large and filled up buffer pool we'll end up scanning a very long LRU list. The fix is to scan flush_list and just remove the blocks belonging to the table from the flush_list, marking them as non-dirty. The blocks are left in the LRU list for eventual eviction due to aging. The flush_list is typically much smaller than the LRU list but for cases where it is very long we have the solution of releasing the buf_pool->mutex after scanning 1K pages. buf_page_[set|unset]_sticky(): Use new IO-state BUF_IO_PIN to ensure that a block stays in the flush_list and LRU list when we release buf_pool->mutex. Previously we have been abusing BUF_IO_READ to achieve this.
60 lines
1.3 KiB
Plaintext
60 lines
1.3 KiB
Plaintext
-- source include/have_innodb.inc
|
|
|
|
let $per_table=`select @@innodb_file_per_table`;
|
|
let $format=`select @@innodb_file_format`;
|
|
|
|
-- let $query_i_s = SELECT page_size FROM information_schema.innodb_cmpmem WHERE pages_used > 0
|
|
|
|
set global innodb_file_per_table=on;
|
|
set global innodb_file_format=`1`;
|
|
|
|
create table t1(a text) engine=innodb key_block_size=8;
|
|
|
|
-- disable_query_log
|
|
|
|
# insert some rows so we are using compressed pages
|
|
-- let $i = 10
|
|
while ($i)
|
|
{
|
|
insert into t1 values(repeat('abcdefghijklmnopqrstuvwxyz',100));
|
|
dec $i;
|
|
}
|
|
-- enable_query_log
|
|
|
|
# we should be using some 8K pages
|
|
-- eval $query_i_s
|
|
|
|
drop table t1;
|
|
|
|
# because of lazy eviction at drop table in 5.5 there should be some
|
|
# used 8K pages
|
|
-- eval $query_i_s
|
|
|
|
# create a non-compressed table and insert enough into it to evict
|
|
# compressed pages
|
|
create table t2(a text) engine=innodb;
|
|
|
|
-- disable_query_log
|
|
|
|
-- let $i = 400
|
|
while ($i)
|
|
{
|
|
insert into t2 values(repeat('abcdefghijklmnopqrstuvwxyz',1000));
|
|
dec $i;
|
|
}
|
|
|
|
-- enable_query_log
|
|
|
|
# now there should be no 8K pages in the buffer pool
|
|
-- eval $query_i_s
|
|
|
|
drop table t2;
|
|
|
|
#
|
|
# restore environment to the state it was before this test execution
|
|
#
|
|
|
|
-- disable_query_log
|
|
eval set global innodb_file_format=$format;
|
|
eval set global innodb_file_per_table=$per_table;
|