1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-08 11:22:35 +03:00

MDEV-30545 Remove innodb_defragment and related parameters

The deprecated parameters will be removed:

    innodb_defragment
    innodb_defragment_n_pages
    innodb_defragment_stats_accuracy
    innodb_defragment_fill_factor_n_recs
    innodb_defragment_fill_factor
    innodb_defragment_frequency

The mysql.innodb_index_stats.stat_name values 'n_page_split' and
'n_pages_freed' will lose their special meaning.

The related changes to OPTIMIZE TABLE in InnoDB will be removed as well.
The parameter innodb_optimize_fulltext_only will retain its special
meaning in OPTIMIZE TABLE.

Tested by: Matthias Leich
This commit is contained in:
Marko Mäkelä
2023-03-11 10:45:35 +02:00
parent b314f7b642
commit 7ca89af6f8
65 changed files with 29 additions and 3804 deletions

View File

@@ -30,7 +30,6 @@ handler.interface
heap.heap heap.heap
innodb.innodb innodb.innodb
innodb.autoinc_persist innodb.autoinc_persist
innodb.innodb_defrag_binlog
innodb.innodb_mysql innodb.innodb_mysql
innodb.monitor innodb.monitor
innodb.purge innodb.purge

View File

@@ -1,25 +0,0 @@
call mtr.add_suppression("InnoDB: Table `test`\\.`t1` (has an unreadable root page)");
call mtr.add_suppression("InnoDB: The page \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\] in file '.*test.t1\\.ibd' cannot be decrypted; key_version=1");
call mtr.add_suppression("InnoDB: Recovery failed to read page");
call mtr.add_suppression("Couldn't load plugins from 'file_key_management");
call mtr.add_suppression("Table .*t1.* is corrupted");
call mtr.add_suppression("\\[ERROR\\] InnoDB: Cannot decrypt \\[page id: space=");
# restart: --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
CREATE TABLE t1 (pk INT PRIMARY KEY, f VARCHAR(8)) ENGINE=InnoDB
ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
# restart: --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize Error Table 'test.t1' doesn't exist in engine
test.t1 optimize status Operation failed
SHOW WARNINGS;
Level Code Message
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check Error Table test/t1 is corrupted. Please drop the table and recreate.
test.t1 check error Corrupt
SHOW WARNINGS;
Level Code Message
# restart: --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
DROP TABLE t1;

View File

@@ -1,5 +0,0 @@
[strict_crc32]
--innodb-checksum-algorithm=strict_crc32
[strict_full_crc32]
--innodb-checksum-algorithm=strict_full_crc32

View File

@@ -1,6 +0,0 @@
--loose-innodb-buffer-pool-stats
--loose-innodb-buffer-page
--loose-innodb-buffer-page-lru
--innodb-defragment=1
--innodb-purge-rseg-truncate-frequency=1
--skip-innodb-fast-shutdown

View File

@@ -1,42 +0,0 @@
--source include/have_innodb.inc
# embedded does not support restart
-- source include/not_embedded.inc
-- source filekeys_plugin_exists.inc
#
# MDEV-8769: Server crash at file btr0btr.ic line 122 when defragmenting encrypted table using incorrect keys
# MDEV-8768: Server crash at file btr0btr.ic line 122 when checking encrypted table using incorrect keys
#
call mtr.add_suppression("InnoDB: Table `test`\\.`t1` (has an unreadable root page)");
call mtr.add_suppression("InnoDB: The page \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\] in file '.*test.t1\\.ibd' cannot be decrypted; key_version=1");
call mtr.add_suppression("InnoDB: Recovery failed to read page");
# Suppression for builds where file_key_management plugin is linked statically
call mtr.add_suppression("Couldn't load plugins from 'file_key_management");
call mtr.add_suppression("Table .*t1.* is corrupted");
# for innodb_checksum_algorithm=full_crc32 only
call mtr.add_suppression("\\[ERROR\\] InnoDB: Cannot decrypt \\[page id: space=");
--let $restart_parameters=--plugin-load-add=file_key_management --file-key-management --file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt
--source include/restart_mysqld.inc
CREATE TABLE t1 (pk INT PRIMARY KEY, f VARCHAR(8)) ENGINE=InnoDB
ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
--let $restart_parameters=--plugin-load-add=file_key_management --file-key-management --file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys3.txt
--source include/restart_mysqld.inc
--replace_regex /key_id [1-9][0-9]*/\1 /
OPTIMIZE TABLE t1;
--replace_regex /key_id [1-9][0-9]*/\1 /
SHOW WARNINGS;
--replace_regex /key_id [1-9][0-9]*/\1 /
CHECK TABLE t1;
--replace_regex /key_id [1-9][0-9]*/\1 /
SHOW WARNINGS;
--let $restart_parameters=--plugin-load-add=file_key_management --file-key-management --file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt
--source include/restart_mysqld.inc
DROP TABLE t1;

View File

@@ -1,22 +0,0 @@
set global innodb_defragment=1;
Warnings:
Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
create table t1 (a int not null primary key auto_increment, b varchar(256), key second(a, b)) engine=innodb;
insert t1 select null, repeat('a', 256) from seq_1_to_100;
select count(*) from t1;
count(*)
100
connect con1,localhost,root;
start transaction;
select count(*) from t1;
count(*)
100
connection default;
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
connection con1;
drop table t1;
set global innodb_defragment=default;
Warnings:
Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release

View File

@@ -1,31 +0,0 @@
include/master-slave.inc
[connection master]
drop table if exists t1;
create table t1(a int not null primary key auto_increment, b varchar(256), key second(b)) engine=innodb;
insert into t1 values (1, REPEAT("a", 256));
insert into t1 values (2, REPEAT("a", 256));
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
drop table t1;
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS `t1` /* generated by server */
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1(a int not null primary key auto_increment, b varchar(256), key second(b)) engine=innodb
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Annotate_rows # # insert into t1 values (1, REPEAT("a", 256))
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Annotate_rows # # insert into t1 values (2, REPEAT("a", 256))
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; optimize table t1
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
include/rpl_end.inc

View File

@@ -1,110 +0,0 @@
SET @n_pages= @@GLOBAL.innodb_defragment_n_pages;
SET @accuracy= @@GLOBAL.innodb_defragment_stats_accuracy;
SET @sp= @@GLOBAL.innodb_stats_persistent;
SET GLOBAL innodb_stats_persistent = 0;
set global innodb_defragment_stats_accuracy = 80;
Warnings:
Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b VARCHAR(256),
c INT,
g GEOMETRY NOT NULL,
t VARCHAR(256),
KEY second(a, b),
KEY third(c),
SPATIAL gk(g),
FULLTEXT INDEX fti(t)) ENGINE=INNODB;
connect con1,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK;
connect con2,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK;
connect con3,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK;
connect con4,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK;
connection default;
SET @@global.innodb_defragment_n_pages = 20;
Warnings:
Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
CREATE TEMPORARY TABLE tt (a INT, KEY(a)) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
INSERT INTO tt SELECT 0 FROM seq_1_to_180;
INSERT INTO tt SELECT 5 FROM seq_1_to_160;
INSERT INTO tt SELECT 1 FROM seq_1_to_1000;
OPTIMIZE TABLE tt;
Table Op Msg_type Msg_text
test.tt optimize note Table does not support optimize, doing recreate + analyze instead
test.tt optimize status OK
select count(*) from t1;
count(*)
20000
select count(*) from t1 force index (second);
count(*)
20000
select count(*) from t1 force index (third);
count(*)
20000
select count(*) from t1;
count(*)
15800
select count(*) from t1 force index (second);
count(*)
15800
select count(*) from t1 force index (third);
count(*)
15800
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
count(stat_value) > 0
0
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
count(stat_value) > 0
1
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
count(stat_value) > 0
1
connection con1;
optimize table t1;;
connection default;
INSERT INTO t1 VALUES (400000, REPEAT('A', 256),300000, Point(1,1),'More like a test but different.');;
connection con2;
INSERT INTO t1 VALUES (500000, REPEAT('A', 256),400000, Point(1,1),'Totally different text book.');;
connection con3;
DELETE FROM t1 where a between 1 and 100;;
connection con4;
UPDATE t1 SET c = c + 1 where c between 2000 and 8000;;
connection con1;
connection con2;
connection con3;
connection con4;
connection default;
disconnect con1;
disconnect con2;
disconnect con3;
disconnect con4;
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
check table t1 extended;
Table Op Msg_type Msg_text
test.t1 check status OK
select count(*) from t1;
count(*)
15723
select count(*) from t1 force index (second);
count(*)
15723
select count(*) from t1 force index (third);
count(*)
15723
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
count(stat_value) > 0
1
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
count(stat_value) > 0
1
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
count(stat_value) > 0
1
drop table t1;
SET GLOBAL innodb_defragment_n_pages = @n_pages;
Warnings:
Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
SET GLOBAL innodb_defragment_stats_accuracy = @accuracy;
Warnings:
Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
SET GLOBAL innodb_stats_persistent = @sp;

View File

@@ -1,139 +0,0 @@
SET GLOBAL innodb_defragment_stats_accuracy = 20;
Warnings:
Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
DELETE FROM mysql.innodb_index_stats;
# Create table.
CREATE TABLE t1 (a INT PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256),
KEY SECOND(a, b)) ENGINE=INNODB STATS_PERSISTENT=0;
INSERT INTO t1 SELECT 100*FLOOR(seq/70)+seq%70, REPEAT('A', 256)
FROM seq_1_to_1024;
# Not enough page splits to trigger persistent stats write yet.
SELECT * FROM mysql.innodb_index_stats;
database_name table_name index_name last_update stat_name stat_value sample_size stat_description
INSERT INTO t1 SELECT 100*FLOOR(seq/70)+seq%70, REPEAT('A', 256)
FROM seq_1025_to_1433;
BEGIN;
INSERT INTO t1 SELECT 100*20+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*19+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*18+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*17+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*16+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*15+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*14+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*13+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*12+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*11+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*10+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*9+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*8+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*7+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*6+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*5+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*4+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*3+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*2+seq, REPEAT('A', 256)
FROM seq_70_to_99;
INSERT INTO t1 SELECT 100*1+seq, REPEAT('A', 256)
FROM seq_70_to_99;
ROLLBACK;
SELECT @@GLOBAL.innodb_force_recovery<2 "have background defragmentation";
have background defragmentation
1
SELECT table_name, index_name, stat_name FROM mysql.innodb_index_stats;
table_name index_name stat_name
t1 PRIMARY n_leaf_pages_defrag
t1 PRIMARY n_leaf_pages_reserved
t1 PRIMARY n_page_split
t1 SECOND n_leaf_pages_defrag
t1 SECOND n_leaf_pages_reserved
t1 SECOND n_page_split
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
SELECT table_name, index_name, stat_name FROM mysql.innodb_index_stats;
table_name index_name stat_name
t1 PRIMARY n_leaf_pages_defrag
t1 PRIMARY n_leaf_pages_reserved
t1 PRIMARY n_page_split
t1 PRIMARY n_pages_freed
t1 SECOND n_leaf_pages_defrag
t1 SECOND n_leaf_pages_reserved
t1 SECOND n_page_split
t1 SECOND n_pages_freed
set global innodb_defragment_stats_accuracy = 40;
Warnings:
Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
INSERT INTO t1 (b) SELECT b from t1;
SELECT table_name, index_name, stat_name FROM mysql.innodb_index_stats;
table_name index_name stat_name
t1 PRIMARY n_leaf_pages_defrag
t1 PRIMARY n_leaf_pages_reserved
t1 PRIMARY n_page_split
t1 PRIMARY n_pages_freed
t1 SECOND n_leaf_pages_defrag
t1 SECOND n_leaf_pages_reserved
t1 SECOND n_page_split
t1 SECOND n_pages_freed
INSERT INTO t1 (b) SELECT b from t1;
SELECT stat_name FROM mysql.innodb_index_stats WHERE table_name='t1';
stat_name
n_leaf_pages_defrag
n_leaf_pages_defrag
n_leaf_pages_reserved
n_leaf_pages_reserved
n_page_split
n_page_split
n_pages_freed
n_pages_freed
# Table rename should cause stats rename.
rename table t1 to t2;
SELECT table_name, index_name, stat_name FROM mysql.innodb_index_stats;
table_name index_name stat_name
t2 PRIMARY n_leaf_pages_defrag
t2 PRIMARY n_leaf_pages_reserved
t2 PRIMARY n_page_split
t2 PRIMARY n_pages_freed
t2 SECOND n_leaf_pages_defrag
t2 SECOND n_leaf_pages_reserved
t2 SECOND n_page_split
t2 SECOND n_pages_freed
drop index SECOND on t2;
#
# MDEV-26636: Statistics must not be written for temporary tables
#
SET GLOBAL innodb_defragment_stats_accuracy = 1;
Warnings:
Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
CREATE TEMPORARY TABLE t (a INT PRIMARY KEY, c CHAR(255) NOT NULL)
ENGINE=InnoDB;
INSERT INTO t SELECT seq, '' FROM seq_1_to_100;
# restart
SELECT table_name, index_name, stat_name FROM mysql.innodb_index_stats;
table_name index_name stat_name
t2 PRIMARY n_leaf_pages_defrag
t2 PRIMARY n_leaf_pages_reserved
t2 PRIMARY n_page_split
t2 PRIMARY n_pages_freed
# Clean up
ALTER TABLE t2 STATS_PERSISTENT=1;
DROP TABLE t2;
SELECT * FROM mysql.innodb_index_stats;
database_name table_name index_name last_update stat_name stat_value sample_size stat_description

View File

@@ -1,42 +0,0 @@
DROP TABLE if exists t1;
SET @start_table_definition_cache = @@global.table_definition_cache;
SET @@global.table_definition_cache = 400;
SET @start_flush_log_at_trx_commit = @@global.innodb_flush_log_at_trx_commit;
SET @@global.innodb_flush_log_at_trx_commit=2;
SET @start_innodb_defragment_stats_accuracy = @@global.innodb_defragment_stats_accuracy;
SET @@global.innodb_defragment_stats_accuracy = 80;
Warnings:
Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), KEY SECOND(a, b)) ENGINE=INNODB;
INSERT INTO t1 VALUES(1, REPEAT('A', 256));
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
select stat_value > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name = 'n_page_split';
stat_value > 0
Create 505 table to overflow the table cache.
Sleep for a while to make sure t1 is evicted.
select sleep(15);
sleep(15)
0
Reload t1 to get defrag stats from persistent storage
INSERT INTO t1 (b) SELECT b from t1;
make sure the stats thread will wake up and do the write even if there's a race condition between set and reset.
select sleep(15);
sleep(15)
0
select stat_value > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name = 'n_page_split';
stat_value > 0
SET @@global.innodb_defragment_stats_accuracy = @start_innodb_defragment_stats_accuracy;
Warnings:
Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
SET @@global.table_definition_cache = @start_table_definition_cache;
DROP TABLE t1;

View File

@@ -1,139 +0,0 @@
set global innodb_defragment_stats_accuracy = 80;
Warnings:
Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), KEY SECOND(a, b)) ENGINE=INNODB;
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
INSERT INTO t1 VALUES (100000, REPEAT('A', 256));
INSERT INTO t1 VALUES (200000, REPEAT('A', 256));
INSERT INTO t1 VALUES (300000, REPEAT('A', 256));
INSERT INTO t1 VALUES (400000, REPEAT('A', 256));
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
create procedure defragment()
begin
set @i = 0;
repeat
set @i = @i + 1;
optimize table t1;
until @i = 3 end repeat;
end //
select count(stat_value) from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
count(stat_value)
0
select count(stat_value) from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
count(stat_value)
2
select count(stat_value) from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
count(stat_value)
2
select count(*) from t1;
count(*)
10004
connect con1,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK;
connection con1;
call defragment();
connection default;
connection con1;
connection default;
disconnect con1;
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
# restart
select count(*) from t1;
count(*)
7904
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
count(stat_value) = 0
0
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
count(stat_value) > 0
1
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
count(stat_value) > 0
1
select count(*) from t1 force index (second);
count(*)
7904
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_pages_freed');
count(stat_value) = 0
1
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_page_split');
count(stat_value) = 0
1
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_leaf_pages_defrag');
count(stat_value) = 0
1
SET @@global.innodb_defragment_n_pages = 3;
Warnings:
Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
# restart
select count(stat_value) < 3 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
count(stat_value) < 3
1
select count(stat_value) < 3 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
count(stat_value) < 3
1
select count(stat_value) < 3 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
count(stat_value) < 3
1
select count(*) from t1;
count(*)
6904
select count(stat_value) < 3 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
count(stat_value) < 3
1
select count(stat_value) < 3 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
count(stat_value) < 3
1
select count(stat_value) < 3 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
count(stat_value) < 3
1
select count(*) from t1 force index (second);
count(*)
6904
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_pages_freed');
count(stat_value) = 0
1
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_page_split');
count(stat_value) = 0
1
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_leaf_pages_defrag');
count(stat_value) = 0
1
SET @@global.innodb_defragment_n_pages = 10;
Warnings:
Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
# restart
select count(stat_value) > 1 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
count(stat_value) > 1
1
select count(stat_value) > 1 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
count(stat_value) > 1
1
select count(stat_value) > 1 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
count(stat_value) > 1
1
select count(*) from t1 force index (second);
count(*)
6904
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_pages_freed');
count(stat_value) = 0
1
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_page_split');
count(stat_value) = 0
1
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_leaf_pages_defrag');
count(stat_value) = 0
1
DROP PROCEDURE defragment;
DROP TABLE t1;

View File

@@ -1,118 +0,0 @@
Testing tables with large records
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), c VARCHAR(256), KEY SECOND(a, b,c)) ENGINE=INNODB;
INSERT INTO t1 VALUES (1, REPEAT('A', 256), REPEAT('B', 256));
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
SET GLOBAL innodb_fast_shutdown = 0;
# restart
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
select count(*) from t1;
count(*)
927
select count(*) from t1 force index (second);
count(*)
927
# A few more insertions on the page should not cause a page split.
insert into t1 values (81, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (83, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (87, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (82, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (86, REPEAT('A', 256), REPEAT('B', 256));
# Insert more rows to cause a page split
insert into t1 values (180, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (181, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (182, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (183, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (184, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (185, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (186, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (187, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (188, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (189, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (190, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (191, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (192, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (193, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (194, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (195, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (196, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (197, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (198, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (199, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (200, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (201, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (202, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (203, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (204, REPEAT('A', 256), REPEAT('B', 256));
DROP TABLE t1;
Testing table with small records
CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(16), c VARCHAR(32), KEY SECOND(a,b,c)) ENGINE=INNODB;
SET GLOBAL innodb_fast_shutdown = 0;
# restart
optimize table t2;
Table Op Msg_type Msg_text
test.t2 optimize status OK
select count(*) from t2;
count(*)
3701
select count(*) from t2 force index(second);
count(*)
3701
The page should have room for about 20 insertions
insert into t2 values(1181, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1191, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1182, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1192, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1183, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1193, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1184, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1194, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1185, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1195, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1186, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1196, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1187, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1197, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1188, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1198, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1189, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1199, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1190, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1180, REPEAT('A', 16), REPEAT('B',32));
# Insert more rows to cause a page split
insert into t2 values (180, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (181, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (182, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (183, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (184, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (185, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (186, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (187, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (188, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (189, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (190, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (191, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (192, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (193, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (194, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (195, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (196, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (197, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (198, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (199, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (200, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (201, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (202, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (203, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (204, REPEAT('A', 16), REPEAT('B', 32));
DROP TABLE t2;

View File

@@ -1,46 +0,0 @@
SET @innodb_defragment_orig=@@GLOBAL.innodb_defragment;
SET @innodb_optimize_fulltext_orig=@@GLOBAL.innodb_optimize_fulltext_only;
SET GLOBAL innodb_defragment = 1;
Warnings:
Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
SET GLOBAL innodb_optimize_fulltext_only = 0;
#
# MDEV-12198 innodb_defragment=1 crashes server on
# OPTIMIZE TABLE when FULLTEXT index exists
#
CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(256),
KEY(a, b), FULLTEXT KEY(b)) ENGINE=INNODB;
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
INSERT INTO t1 VALUES (100000, REPEAT('A', 256));
INSERT INTO t1 VALUES (200000, REPEAT('A', 256));
INSERT INTO t1 VALUES (300000, REPEAT('A', 256));
INSERT INTO t1 VALUES (400000, REPEAT('A', 256));
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
#
# MDEV-15824 innodb_defragment=ON trumps
# innodb_optimize_fulltext_only=ON in OPTIMIZE TABLE
#
SET GLOBAL innodb_optimize_fulltext_only = 1;
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
SET GLOBAL innodb_defragment = 0;
Warnings:
Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
DROP TABLE t1;
CREATE TABLE t1 (c POINT PRIMARY KEY, SPATIAL INDEX(c)) ENGINE=InnoDB;
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
DROP TABLE t1;
SET GLOBAL innodb_defragment = @innodb_defragment_orig;
Warnings:
Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
SET GLOBAL innodb_optimize_fulltext_only = @innodb_optimize_fulltext_orig;

View File

@@ -81,9 +81,6 @@ INNODB_HAVE_LZMA
INNODB_HAVE_BZIP2 INNODB_HAVE_BZIP2
INNODB_HAVE_SNAPPY INNODB_HAVE_SNAPPY
INNODB_HAVE_PUNCH_HOLE INNODB_HAVE_PUNCH_HOLE
INNODB_DEFRAGMENT_COMPRESSION_FAILURES
INNODB_DEFRAGMENT_FAILURES
INNODB_DEFRAGMENT_COUNT
INNODB_INSTANT_ALTER_COLUMN INNODB_INSTANT_ALTER_COLUMN
INNODB_ONLINEDDL_ROWLOG_ROWS INNODB_ONLINEDDL_ROWLOG_ROWS
INNODB_ONLINEDDL_ROWLOG_PCT_USED INNODB_ONLINEDDL_ROWLOG_PCT_USED

View File

@@ -277,16 +277,6 @@ SET GLOBAL innodb_limit_optimistic_insert_debug = 2;
INSERT INTO t1 VALUES (1),(5),(4),(3),(2); INSERT INTO t1 VALUES (1),(5),(4),(3),(2);
SET GLOBAL innodb_limit_optimistic_insert_debug = @old_limit; SET GLOBAL innodb_limit_optimistic_insert_debug = @old_limit;
ALTER TABLE t1 ADD COLUMN b INT, ALGORITHM=INSTANT; ALTER TABLE t1 ADD COLUMN b INT, ALGORITHM=INSTANT;
SET @old_defragment = @@innodb_defragment;
SET GLOBAL innodb_defragment = 1;
Warnings:
Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
SET GLOBAL innodb_defragment = @old_defragment;
Warnings:
Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
ALTER TABLE t1 ADD vb INT AS (b) VIRTUAL; ALTER TABLE t1 ADD vb INT AS (b) VIRTUAL;
CHECK TABLE t1; CHECK TABLE t1;
Table Op Msg_type Msg_text Table Op Msg_type Msg_text

View File

@@ -1,22 +0,0 @@
#
# MDEV-9155 Enabling Defragmenting in 10.1.8 still causes OPTIMIZE TABLE to take metadatalocks
#
source include/have_innodb.inc;
source include/have_sequence.inc;
set global innodb_defragment=1;
create table t1 (a int not null primary key auto_increment, b varchar(256), key second(a, b)) engine=innodb;
insert t1 select null, repeat('a', 256) from seq_1_to_100;
select count(*) from t1;
connect (con1,localhost,root);
start transaction;
select count(*) from t1;
connection default;
optimize table t1;
connection con1;
drop table t1;
set global innodb_defragment=default;

View File

@@ -1,5 +0,0 @@
--loose-innodb-buffer-pool-stats
--loose-innodb-buffer-page
--loose-innodb-buffer-page-lru
--binlog-format=row
--innodb-defragment=1

View File

@@ -1,21 +0,0 @@
--source include/have_innodb.inc
--source include/master-slave.inc
--source include/big_test.inc
--source include/not_valgrind.inc
--source include/not_embedded.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1(a int not null primary key auto_increment, b varchar(256), key second(b)) engine=innodb;
insert into t1 values (1, REPEAT("a", 256));
insert into t1 values (2, REPEAT("a", 256));
optimize table t1;
drop table t1;
--source include/show_binlog_events.inc
--source include/rpl_end.inc

View File

@@ -1,5 +0,0 @@
--loose-innodb-buffer-pool-stats
--loose-innodb-buffer-page
--loose-innodb-buffer-page-lru
--innodb-log-buffer-size=3m
--innodb-defragment=1

View File

@@ -1,143 +0,0 @@
--source include/have_innodb.inc
--source include/big_test.inc
--source include/not_valgrind.inc
--source include/not_embedded.inc
--source include/have_sequence.inc
SET @n_pages= @@GLOBAL.innodb_defragment_n_pages;
SET @accuracy= @@GLOBAL.innodb_defragment_stats_accuracy;
SET @sp= @@GLOBAL.innodb_stats_persistent;
SET GLOBAL innodb_stats_persistent = 0;
set global innodb_defragment_stats_accuracy = 80;
# Create table.
#
# TODO: Currently we do not defragment spatial indexes,
# because doing it properly would require
# appropriate logic around the SSN (split
# sequence number).
#
# Also do not defragment auxiliary tables related to FULLTEXT INDEX.
#
# Both types added to this test to make sure they do not cause
# problems.
#
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b VARCHAR(256),
c INT,
g GEOMETRY NOT NULL,
t VARCHAR(256),
KEY second(a, b),
KEY third(c),
SPATIAL gk(g),
FULLTEXT INDEX fti(t)) ENGINE=INNODB;
connect (con1,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK);
connect (con2,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK);
connect (con3,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK);
connect (con4,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK);
connection default;
SET @@global.innodb_defragment_n_pages = 20;
CREATE TEMPORARY TABLE tt (a INT, KEY(a)) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
INSERT INTO tt SELECT 0 FROM seq_1_to_180;
INSERT INTO tt SELECT 5 FROM seq_1_to_160;
INSERT INTO tt SELECT 1 FROM seq_1_to_1000;
OPTIMIZE TABLE tt;
let $data_size = 20000;
let $delete_size = 2000;
# Populate table.
let $i = $data_size;
--disable_query_log
while ($i)
{
eval
INSERT INTO t1 VALUES ($data_size + 1 - $i, REPEAT('A', 256), $i, Point($i,$i), 'This is a test message.');
dec $i;
}
--enable_query_log
select count(*) from t1;
select count(*) from t1 force index (second);
select count(*) from t1 force index (third);
# Delete some data
--disable_query_log
let $size = $delete_size;
while ($size)
{
let $j = 100 * $size;
eval delete from t1 where a between $j - 20 and $j;
dec $size;
}
--enable_query_log
select count(*) from t1;
select count(*) from t1 force index (second);
select count(*) from t1 force index (third);
# Above delete will free some pages and insert causes page split and these could cause defrag
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
connection con1;
--send optimize table t1;
connection default;
--send INSERT INTO t1 VALUES (400000, REPEAT('A', 256),300000, Point(1,1),'More like a test but different.');
connection con2;
--send INSERT INTO t1 VALUES (500000, REPEAT('A', 256),400000, Point(1,1),'Totally different text book.');
connection con3;
--send DELETE FROM t1 where a between 1 and 100;
connection con4;
--send UPDATE t1 SET c = c + 1 where c between 2000 and 8000;
connection con1;
--disable_result_log
--reap
--enable_result_log
connection con2;
--reap
connection con3;
--reap
connection con4;
--reap
connection default;
--reap
disconnect con1;
disconnect con2;
disconnect con3;
disconnect con4;
optimize table t1;
check table t1 extended;
select count(*) from t1;
select count(*) from t1 force index (second);
select count(*) from t1 force index (third);
# Now pages are freed
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
drop table t1;
# reset system
SET GLOBAL innodb_defragment_n_pages = @n_pages;
SET GLOBAL innodb_defragment_stats_accuracy = @accuracy;
SET GLOBAL innodb_stats_persistent = @sp;

View File

@@ -1 +0,0 @@
--innodb-defragment=1

View File

@@ -1,88 +0,0 @@
--source include/have_innodb.inc
--source include/not_valgrind.inc
--source include/not_embedded.inc
--source include/have_sequence.inc
SET GLOBAL innodb_defragment_stats_accuracy = 20;
DELETE FROM mysql.innodb_index_stats;
--echo # Create table.
CREATE TABLE t1 (a INT PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256),
KEY SECOND(a, b)) ENGINE=INNODB STATS_PERSISTENT=0;
INSERT INTO t1 SELECT 100*FLOOR(seq/70)+seq%70, REPEAT('A', 256)
FROM seq_1_to_1024;
--echo # Not enough page splits to trigger persistent stats write yet.
SELECT * FROM mysql.innodb_index_stats;
INSERT INTO t1 SELECT 100*FLOOR(seq/70)+seq%70, REPEAT('A', 256)
FROM seq_1025_to_1433;
BEGIN;
let $num_delete = 20;
while ($num_delete)
{
eval INSERT INTO t1 SELECT 100*$num_delete+seq, REPEAT('A', 256)
FROM seq_70_to_99;
dec $num_delete;
}
ROLLBACK;
SELECT @@GLOBAL.innodb_force_recovery<2 "have background defragmentation";
# Wait for defrag_pool to be processed.
let $wait_timeout=30;
let $wait_condition = SELECT COUNT(*)>0 FROM mysql.innodb_index_stats;
--source include/wait_condition.inc
--sorted_result
SELECT table_name, index_name, stat_name FROM mysql.innodb_index_stats;
optimize table t1;
--sorted_result
SELECT table_name, index_name, stat_name FROM mysql.innodb_index_stats;
set global innodb_defragment_stats_accuracy = 40;
INSERT INTO t1 (b) SELECT b from t1;
--sorted_result
SELECT table_name, index_name, stat_name FROM mysql.innodb_index_stats;
INSERT INTO t1 (b) SELECT b from t1;
--sorted_result
SELECT stat_name FROM mysql.innodb_index_stats WHERE table_name='t1';
--echo # Table rename should cause stats rename.
rename table t1 to t2;
--sorted_result
SELECT table_name, index_name, stat_name FROM mysql.innodb_index_stats;
drop index SECOND on t2;
--echo #
--echo # MDEV-26636: Statistics must not be written for temporary tables
--echo #
SET GLOBAL innodb_defragment_stats_accuracy = 1;
CREATE TEMPORARY TABLE t (a INT PRIMARY KEY, c CHAR(255) NOT NULL)
ENGINE=InnoDB;
INSERT INTO t SELECT seq, '' FROM seq_1_to_100;
--source include/restart_mysqld.inc
--sorted_result
SELECT table_name, index_name, stat_name FROM mysql.innodb_index_stats;
--echo # Clean up
# Starting with 10.6, DROP TABLE will not touch persistent statistics
# (not defragmentation statistics either) if the table has none!
ALTER TABLE t2 STATS_PERSISTENT=1;
DROP TABLE t2;
SELECT * FROM mysql.innodb_index_stats;

View File

@@ -1 +0,0 @@
--innodb-defragment=1

View File

@@ -1,77 +0,0 @@
--source include/have_innodb.inc
--source include/big_test.inc
--source include/not_valgrind.inc
--source include/not_embedded.inc
--disable_warnings
DROP TABLE if exists t1;
--enable_warnings
let $num_tables = 505;
SET @start_table_definition_cache = @@global.table_definition_cache;
SET @@global.table_definition_cache = 400;
SET @start_flush_log_at_trx_commit = @@global.innodb_flush_log_at_trx_commit;
SET @@global.innodb_flush_log_at_trx_commit=2;
# set stats accuracy to be pretty high so stats sync is easily triggered.
SET @start_innodb_defragment_stats_accuracy = @@global.innodb_defragment_stats_accuracy;
SET @@global.innodb_defragment_stats_accuracy = 80;
# Create table.
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), KEY SECOND(a, b)) ENGINE=INNODB;
# Populate data
INSERT INTO t1 VALUES(1, REPEAT('A', 256));
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
select stat_value > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name = 'n_page_split';
# Create many tables to over flow the table definition cache
--echo Create $num_tables table to overflow the table cache.
--disable_query_log
let $count = $num_tables;
while ($count)
{
EVAL CREATE TABLE t_$count (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT) ENGINE=INNODB;
EVAL INSERT INTO t_$count VALUES (1), (2);
dec $count;
}
--enable_query_log
--echo Sleep for a while to make sure t1 is evicted.
select sleep(15);
--echo Reload t1 to get defrag stats from persistent storage
INSERT INTO t1 (b) SELECT b from t1;
--echo make sure the stats thread will wake up and do the write even if there's a race condition between set and reset.
select sleep(15);
select stat_value > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name = 'n_page_split';
# Clean up
SET @@global.innodb_defragment_stats_accuracy = @start_innodb_defragment_stats_accuracy;
SET @@global.table_definition_cache = @start_table_definition_cache;
--disable_query_log
let $count = $num_tables;
while ($count)
{
EVAL DROP TABLE t_$count;
dec $count;
}
set @@global.innodb_flush_log_at_trx_commit = @start_flush_log_at_trx_commit;
--enable_query_log
DROP TABLE t1;

View File

@@ -1,4 +0,0 @@
--loose-innodb-buffer-pool-stats
--loose-innodb-buffer-page
--loose-innodb-buffer-page-lru
--innodb-defragment=1

View File

@@ -1,157 +0,0 @@
--source include/have_innodb.inc
--source include/big_test.inc
--source include/not_embedded.inc
# Valgrind is to slow for this test
--source include/not_valgrind.inc
set global innodb_defragment_stats_accuracy = 80;
# Create table.
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), KEY SECOND(a, b)) ENGINE=INNODB;
## Test-1 defragment an empty table
optimize table t1;
## Test-2 defragment a single page table
INSERT INTO t1 VALUES (100000, REPEAT('A', 256));
INSERT INTO t1 VALUES (200000, REPEAT('A', 256));
INSERT INTO t1 VALUES (300000, REPEAT('A', 256));
INSERT INTO t1 VALUES (400000, REPEAT('A', 256));
optimize table t1;
## Test-3 defragment (somewhat) in parallel with delete queries
let $data_size = 10000;
let $delete_size = 100;
delimiter //;
create procedure defragment()
begin
set @i = 0;
repeat
set @i = @i + 1;
optimize table t1;
until @i = 3 end repeat;
end //
delimiter ;//
# Populate table.
let $i = $data_size;
--disable_query_log
BEGIN;
while ($i)
{
eval
INSERT INTO t1 VALUES ($data_size + 1 - $i, REPEAT('A', 256));
dec $i;
}
COMMIT;
--enable_query_log
select count(stat_value) from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
select count(stat_value) from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
select count(stat_value) from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
select count(*) from t1;
connect (con1,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK);
connection con1;
--send call defragment()
connection default;
--disable_query_log
let $size = $delete_size;
while ($size)
{
let $j = 100 * $size;
eval delete from t1 where a between $j - 20 and $j;
dec $size;
}
--enable_query_log
connection con1;
--disable_result_log
--reap
--enable_result_log
connection default;
disconnect con1;
optimize table t1;
--source include/restart_mysqld.inc
select count(*) from t1;
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
select count(*) from t1 force index (second);
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_pages_freed');
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_page_split');
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_leaf_pages_defrag');
## Test-4 defragment with larger n_pages
# delete some more records
--disable_query_log
let $size = $delete_size;
while ($size)
{
let $j = 100 * $size;
eval delete from t1 where a between $j - 30 and $j - 20;
dec $size;
}
--enable_query_log
SET @@global.innodb_defragment_n_pages = 3;
# This will not reduce number of pages by a lot
optimize table t1;
--source include/restart_mysqld.inc
select count(stat_value) < 3 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
select count(stat_value) < 3 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
select count(stat_value) < 3 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
select count(*) from t1;
# We didn't create large wholes with the previous deletion, so if innodb_defragment_n_pages = 3, we won't be able to free up many pages.
select count(stat_value) < 3 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
select count(stat_value) < 3 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
select count(stat_value) < 3 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
select count(*) from t1 force index (second);
# Same holds for secondary index, not many pages are released.
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_pages_freed');
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_page_split');
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_leaf_pages_defrag');
SET @@global.innodb_defragment_n_pages = 10;
optimize table t1;
--source include/restart_mysqld.inc
select count(stat_value) > 1 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
select count(stat_value) > 1 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
select count(stat_value) > 1 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_leaf_pages_defrag');
select count(*) from t1 force index (second);
# Same holds for secondary index, not many pages are released.
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_pages_freed');
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_page_split');
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and index_name = 'second' and stat_name in ('n_leaf_pages_defrag');
DROP PROCEDURE defragment;
DROP TABLE t1;

View File

@@ -1,4 +0,0 @@
--loose-innodb-buffer-pool-stats
--loose-innodb-buffer-page
--loose-innodb-buffer-page-lru
--innodb-defragment=1

View File

@@ -1,203 +0,0 @@
--source include/have_innodb.inc
--source include/big_test.inc
--source include/not_valgrind.inc
--source include/not_embedded.inc
--source include/have_innodb_16k.inc
--echo Testing tables with large records
# Create table.
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), c VARCHAR(256), KEY SECOND(a, b,c)) ENGINE=INNODB;
# Populate table.
INSERT INTO t1 VALUES (1, REPEAT('A', 256), REPEAT('B', 256));
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
--disable_query_log
let $size = 10;
while ($size)
{
let $j = 100 * $size;
eval delete from t1 where a between $j - 20 and $j + 5;
dec $size;
}
--enable_query_log
SET GLOBAL innodb_fast_shutdown = 0;
--source include/restart_mysqld.inc
optimize table t1;
select count(*) from t1;
--let $primary_before = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'PRIMARY', Value, 1)
select count(*) from t1 force index (second);
--let $second_before = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'second', Value, 1)
--echo # A few more insertions on the page should not cause a page split.
insert into t1 values (81, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (83, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (87, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (82, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (86, REPEAT('A', 256), REPEAT('B', 256));
--let $primary_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'PRIMARY', Value, 1)
--let $second_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'second', Value, 1)
if ($primary_before != $primary_after) {
--echo Insertion caused page split on primary, which should be avoided by innodb_defragment_fill_factor.
}
if ($second_before != $second_after) {
--echo Insertion caused page split on second, which should be avoided by innodb_defragment_fill_factor.
}
--echo # Insert more rows to cause a page split
insert into t1 values (180, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (181, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (182, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (183, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (184, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (185, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (186, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (187, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (188, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (189, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (190, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (191, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (192, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (193, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (194, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (195, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (196, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (197, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (198, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (199, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (200, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (201, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (202, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (203, REPEAT('A', 256), REPEAT('B', 256));
insert into t1 values (204, REPEAT('A', 256), REPEAT('B', 256));
--let $primary_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'PRIMARY', Value, 1)
--let $second_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t1%' and index_name = 'second', Value, 1)
if ($primary_before == $primary_after) {
--echo Too little space is reserved on primary index.
}
if ($second_before == $second_after) {
--echo Too little space is reserved on second index.
}
DROP TABLE t1;
--echo Testing table with small records
CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(16), c VARCHAR(32), KEY SECOND(a,b,c)) ENGINE=INNODB;
# Populate table.
--disable_query_log
INSERT INTO t2 VALUES (1, REPEAT('A', 16), REPEAT('B', 32));
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
INSERT INTO t2 (b) SELECT b from t2;
--enable_query_log
--disable_query_log
let $size = 40;
while ($size)
{
let $j = 100 * $size;
eval delete from t2 where a between $j - 20 and $j;
dec $size;
}
--enable_query_log
SET GLOBAL innodb_fast_shutdown = 0;
--source include/restart_mysqld.inc
optimize table t2;
select count(*) from t2;
select count(*) from t2 force index(second);
--let $second_before = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t2%' and index_name = 'second', Value, 1)
--echo The page should have room for about 20 insertions
insert into t2 values(1181, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1191, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1182, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1192, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1183, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1193, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1184, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1194, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1185, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1195, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1186, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1196, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1187, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1197, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1188, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1198, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1189, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1199, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1190, REPEAT('A', 16), REPEAT('B',32));
insert into t2 values(1180, REPEAT('A', 16), REPEAT('B',32));
--let $second_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t2%' and index_name = 'second', Value, 1)
if ($second_before != $second_after) {
--echo Insertion caused page split on second, which should be avoided by innodb_defragment_fill_factor.
}
--echo # Insert more rows to cause a page split
insert into t2 values (180, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (181, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (182, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (183, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (184, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (185, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (186, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (187, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (188, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (189, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (190, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (191, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (192, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (193, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (194, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (195, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (196, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (197, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (198, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (199, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (200, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (201, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (202, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (203, REPEAT('A', 16), REPEAT('B', 32));
insert into t2 values (204, REPEAT('A', 16), REPEAT('B', 32));
--let $second_after = query_get_value(select count(*) as Value from information_schema.innodb_buffer_page where table_name like '%t2%' and index_name = 'second', Value, 1)
if ($second_before == $second_after) {
--echo Too little space is reserved on second index.
}
DROP TABLE t2;

View File

@@ -1,41 +0,0 @@
--source include/have_innodb.inc
SET @innodb_defragment_orig=@@GLOBAL.innodb_defragment;
SET @innodb_optimize_fulltext_orig=@@GLOBAL.innodb_optimize_fulltext_only;
SET GLOBAL innodb_defragment = 1;
SET GLOBAL innodb_optimize_fulltext_only = 0;
--echo #
--echo # MDEV-12198 innodb_defragment=1 crashes server on
--echo # OPTIMIZE TABLE when FULLTEXT index exists
--echo #
CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(256),
KEY(a, b), FULLTEXT KEY(b)) ENGINE=INNODB;
OPTIMIZE TABLE t1;
INSERT INTO t1 VALUES (100000, REPEAT('A', 256));
INSERT INTO t1 VALUES (200000, REPEAT('A', 256));
INSERT INTO t1 VALUES (300000, REPEAT('A', 256));
INSERT INTO t1 VALUES (400000, REPEAT('A', 256));
OPTIMIZE TABLE t1;
--echo #
--echo # MDEV-15824 innodb_defragment=ON trumps
--echo # innodb_optimize_fulltext_only=ON in OPTIMIZE TABLE
--echo #
SET GLOBAL innodb_optimize_fulltext_only = 1;
OPTIMIZE TABLE t1;
SET GLOBAL innodb_defragment = 0;
OPTIMIZE TABLE t1;
DROP TABLE t1;
CREATE TABLE t1 (c POINT PRIMARY KEY, SPATIAL INDEX(c)) ENGINE=InnoDB;
OPTIMIZE TABLE t1;
DROP TABLE t1;
SET GLOBAL innodb_defragment = @innodb_defragment_orig;
SET GLOBAL innodb_optimize_fulltext_only = @innodb_optimize_fulltext_orig;

View File

@@ -325,11 +325,6 @@ SET GLOBAL innodb_limit_optimistic_insert_debug = @old_limit;
ALTER TABLE t1 ADD COLUMN b INT, ALGORITHM=INSTANT; ALTER TABLE t1 ADD COLUMN b INT, ALGORITHM=INSTANT;
SET @old_defragment = @@innodb_defragment;
SET GLOBAL innodb_defragment = 1;
OPTIMIZE TABLE t1;
SET GLOBAL innodb_defragment = @old_defragment;
# Exploit MDEV-17468 to force the table definition to be reloaded # Exploit MDEV-17468 to force the table definition to be reloaded
ALTER TABLE t1 ADD vb INT AS (b) VIRTUAL; ALTER TABLE t1 ADD vb INT AS (b) VIRTUAL;
CHECK TABLE t1; CHECK TABLE t1;

View File

@@ -1,24 +0,0 @@
SET @orig = @@global.innodb_defragment;
SELECT @orig;
@orig
0
SET GLOBAL innodb_defragment = OFF;
Warnings:
Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment;
@@global.innodb_defragment
0
SET GLOBAL innodb_defragment = ON;
Warnings:
Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment;
@@global.innodb_defragment
1
SET GLOBAL innodb_defragment = 100;
ERROR 42000: Variable 'innodb_defragment' can't be set to the value of '100'
SELECT @@global.innodb_defragment;
@@global.innodb_defragment
1
SET GLOBAL innodb_defragment = @orig;
Warnings:
Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release

View File

@@ -1,47 +0,0 @@
SET @start_innodb_defragment_fill_factor = @@global.innodb_defragment_fill_factor;
SELECT @start_innodb_defragment_fill_factor;
@start_innodb_defragment_fill_factor
0.9
SELECT COUNT(@@global.innodb_defragment_fill_factor);
COUNT(@@global.innodb_defragment_fill_factor)
1
SET @@global.innodb_defragment_fill_factor = 0.77777777777777;
Warnings:
Warning 1287 '@@innodb_defragment_fill_factor' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_fill_factor;
@@global.innodb_defragment_fill_factor
0.777778
SET @@global.innodb_defragment_fill_factor = 1;
Warnings:
Warning 1287 '@@innodb_defragment_fill_factor' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_fill_factor;
@@global.innodb_defragment_fill_factor
1.000000
SET @@global.innodb_defragment_fill_factor = 0.7;
Warnings:
Warning 1287 '@@innodb_defragment_fill_factor' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_fill_factor;
@@global.innodb_defragment_fill_factor
0.700000
SET @@global.innodb_defragment_fill_factor = -1;
Warnings:
Warning 1287 '@@innodb_defragment_fill_factor' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_fill_factor value: '-1'
SELECT @@global.innodb_defragment_fill_factor;
@@global.innodb_defragment_fill_factor
0.700000
SET @@global.innodb_defragment_fill_factor = 2;
Warnings:
Warning 1287 '@@innodb_defragment_fill_factor' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_fill_factor value: '2'
SELECT @@global.innodb_defragment_fill_factor;
@@global.innodb_defragment_fill_factor
1.000000
SET @@global.innodb_defragment_fill_factor = "abc";
ERROR 42000: Incorrect argument type to variable 'innodb_defragment_fill_factor'
SELECT @@global.innodb_defragment_fill_factor;
@@global.innodb_defragment_fill_factor
1.000000
SET @@global.innodb_defragment_fill_factor = @start_innodb_defragment_fill_factor;
Warnings:
Warning 1287 '@@innodb_defragment_fill_factor' is deprecated and will be removed in a future release

View File

@@ -1,52 +0,0 @@
SET @start_innodb_defragment_fill_factor_n_recs = @@global.innodb_defragment_fill_factor_n_recs;
SELECT @start_innodb_defragment_fill_factor_n_recs;
@start_innodb_defragment_fill_factor_n_recs
20
SELECT COUNT(@@global.innodb_defragment_fill_factor_n_recs);
COUNT(@@global.innodb_defragment_fill_factor_n_recs)
1
SET @@global.innodb_defragment_fill_factor_n_recs = 50;
Warnings:
Warning 1287 '@@innodb_defragment_fill_factor_n_recs' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_fill_factor_n_recs;
@@global.innodb_defragment_fill_factor_n_recs
50
SET @@global.innodb_defragment_fill_factor_n_recs = 100;
Warnings:
Warning 1287 '@@innodb_defragment_fill_factor_n_recs' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_fill_factor_n_recs;
@@global.innodb_defragment_fill_factor_n_recs
100
SET @@global.innodb_defragment_fill_factor_n_recs = 1;
Warnings:
Warning 1287 '@@innodb_defragment_fill_factor_n_recs' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_fill_factor_n_recs;
@@global.innodb_defragment_fill_factor_n_recs
1
SET @@global.innodb_defragment_fill_factor_n_recs = -1;
Warnings:
Warning 1287 '@@innodb_defragment_fill_factor_n_recs' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_fill_factor... value: '-1'
SELECT @@global.innodb_defragment_fill_factor_n_recs;
@@global.innodb_defragment_fill_factor_n_recs
1
SET @@global.innodb_defragment_fill_factor_n_recs = 10000;
Warnings:
Warning 1287 '@@innodb_defragment_fill_factor_n_recs' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_fill_factor... value: '10000'
SELECT @@global.innodb_defragment_fill_factor_n_recs;
@@global.innodb_defragment_fill_factor_n_recs
100
SET @@global.innodb_defragment_fill_factor_n_recs = 10.5;
ERROR 42000: Incorrect argument type to variable 'innodb_defragment_fill_factor_n_recs'
SELECT @@global.innodb_defragment_fill_factor_n_recs;
@@global.innodb_defragment_fill_factor_n_recs
100
SET @@global.innodb_defragment_fill_factor_n_recs = "abc";
ERROR 42000: Incorrect argument type to variable 'innodb_defragment_fill_factor_n_recs'
SELECT @@global.innodb_defragment_fill_factor_n_recs;
@@global.innodb_defragment_fill_factor_n_recs
100
SET @@global.innodb_defragment_fill_factor_n_recs = @start_innodb_defragment_fill_factor_n_recs;
Warnings:
Warning 1287 '@@innodb_defragment_fill_factor_n_recs' is deprecated and will be removed in a future release

View File

@@ -1,52 +0,0 @@
SET @start_innodb_defragment_frequency = @@global.innodb_defragment_frequency;
SELECT @start_innodb_defragment_frequency;
@start_innodb_defragment_frequency
40
SELECT COUNT(@@global.innodb_defragment_frequency);
COUNT(@@global.innodb_defragment_frequency)
1
SET @@global.innodb_defragment_frequency = 200;
Warnings:
Warning 1287 '@@innodb_defragment_frequency' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_frequency;
@@global.innodb_defragment_frequency
200
SET @@global.innodb_defragment_frequency = 1;
Warnings:
Warning 1287 '@@innodb_defragment_frequency' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_frequency;
@@global.innodb_defragment_frequency
1
SET @@global.innodb_defragment_frequency = 1000;
Warnings:
Warning 1287 '@@innodb_defragment_frequency' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_frequency;
@@global.innodb_defragment_frequency
1000
SET @@global.innodb_defragment_frequency = -1;
Warnings:
Warning 1287 '@@innodb_defragment_frequency' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_frequency value: '-1'
SELECT @@global.innodb_defragment_frequency;
@@global.innodb_defragment_frequency
1
SET @@global.innodb_defragment_frequency = 10000;
Warnings:
Warning 1287 '@@innodb_defragment_frequency' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_frequency value: '10000'
SELECT @@global.innodb_defragment_frequency;
@@global.innodb_defragment_frequency
1000
SET @@global.innodb_defragment_frequency = 10.5;
ERROR 42000: Incorrect argument type to variable 'innodb_defragment_frequency'
SELECT @@global.innodb_defragment_frequency;
@@global.innodb_defragment_frequency
1000
SET @@global.innodb_defragment_frequency = "abc";
ERROR 42000: Incorrect argument type to variable 'innodb_defragment_frequency'
SELECT @@global.innodb_defragment_frequency;
@@global.innodb_defragment_frequency
1000
SET @@global.innodb_defragment_frequency = @start_innodb_defragment_frequency;
Warnings:
Warning 1287 '@@innodb_defragment_frequency' is deprecated and will be removed in a future release

View File

@@ -1,36 +0,0 @@
SET @start_innodb_defragment_n_pages = @@global.innodb_defragment_n_pages;
SELECT @start_innodb_defragment_n_pages;
@start_innodb_defragment_n_pages
7
SELECT COUNT(@@global.innodb_defragment_n_pages);
COUNT(@@global.innodb_defragment_n_pages)
1
SET @@global.innodb_defragment_n_pages = 1;
Warnings:
Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_n_pages value: '1'
SELECT @@global.innodb_defragment_n_pages;
@@global.innodb_defragment_n_pages
2
SET @@global.innodb_defragment_n_pages = 2;
Warnings:
Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_n_pages;
@@global.innodb_defragment_n_pages
2
SET @@global.innodb_defragment_n_pages = 32;
Warnings:
Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_n_pages;
@@global.innodb_defragment_n_pages
32
SET @@global.innodb_defragment_n_pages = 64;
Warnings:
Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_n_pages value: '64'
SELECT @@global.innodb_defragment_n_pages;
@@global.innodb_defragment_n_pages
32
SET @@global.innodb_defragment_n_pages = @start_innodb_defragment_n_pages;
Warnings:
Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release

View File

@@ -1,41 +0,0 @@
SET @start_innodb_defragment_stats_accuracy = @@global.innodb_defragment_stats_accuracy;
SELECT @start_innodb_defragment_stats_accuracy;
@start_innodb_defragment_stats_accuracy
0
SELECT COUNT(@@global.innodb_defragment_stats_accuracy);
COUNT(@@global.innodb_defragment_stats_accuracy)
1
SET @@global.innodb_defragment_stats_accuracy = 1;
Warnings:
Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_stats_accuracy;
@@global.innodb_defragment_stats_accuracy
1
SET @@global.innodb_defragment_stats_accuracy = 1000;
Warnings:
Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_stats_accuracy;
@@global.innodb_defragment_stats_accuracy
1000
SET @@global.innodb_defragment_stats_accuracy = -1;
Warnings:
Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_stats_accuracy value: '-1'
SELECT @@global.innodb_defragment_stats_accuracy;
@@global.innodb_defragment_stats_accuracy
0
SET @@global.innodb_defragment_stats_accuracy = 1000000000000;
Warnings:
Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_stats_accuracy value: '1000000000000'
SELECT @@global.innodb_defragment_stats_accuracy;
@@global.innodb_defragment_stats_accuracy
4294967295
SET @@global.innodb_defragment_stats_accuracy = "abc";
ERROR 42000: Incorrect argument type to variable 'innodb_defragment_stats_accuracy'
SELECT @@global.innodb_defragment_stats_accuracy;
@@global.innodb_defragment_stats_accuracy
4294967295
SET @@global.innodb_defragment_stats_accuracy = @start_innodb_defragment_stats_accuracy;
Warnings:
Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release

View File

@@ -415,78 +415,6 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST redundant,compact,dynamic ENUM_VALUE_LIST redundant,compact,dynamic
READ_ONLY NO READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME INNODB_DEFRAGMENT
SESSION_VALUE NULL
DEFAULT_VALUE OFF
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BOOLEAN
VARIABLE_COMMENT Enable/disable InnoDB defragmentation (default FALSE). When set to FALSE, all existing defragmentation will be paused. And new defragmentation command will fail.Paused defragmentation commands will resume when this variable is set to true again.
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST OFF,ON
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME INNODB_DEFRAGMENT_FILL_FACTOR
SESSION_VALUE NULL
DEFAULT_VALUE 0.900000
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE DOUBLE
VARIABLE_COMMENT A number between [0.7, 1] that tells defragmentation how full it should fill a page. Default is 0.9. Number below 0.7 won't make much sense.This variable, together with innodb_defragment_fill_factor_n_recs, is introduced so defragmentation won't pack the page too full and cause page split on the next insert on every page. The variable indicating more defragmentation gain is the one effective.
NUMERIC_MIN_VALUE 0.7
NUMERIC_MAX_VALUE 1
NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME INNODB_DEFRAGMENT_FILL_FACTOR_N_RECS
SESSION_VALUE NULL
DEFAULT_VALUE 20
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT How many records of space defragmentation should leave on the page. This variable, together with innodb_defragment_fill_factor, is introduced so defragmentation won't pack the page too full and cause page split on the next insert on every page. The variable indicating more defragmentation gain is the one effective.
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 100
NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME INNODB_DEFRAGMENT_FREQUENCY
SESSION_VALUE NULL
DEFAULT_VALUE 40
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT Do not defragment a single index more than this number of time per second.This controls the number of time defragmentation thread can request X_LOCK on an index. Defragmentation thread will check whether 1/defragment_frequency (s) has passed since it worked on this index last time, and put the index back to the queue if not enough time has passed. The actual frequency can only be lower than this given number.
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 1000
NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME INNODB_DEFRAGMENT_N_PAGES
SESSION_VALUE NULL
DEFAULT_VALUE 7
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT Number of pages considered at once when merging multiple pages to defragment
NUMERIC_MIN_VALUE 2
NUMERIC_MAX_VALUE 32
NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME INNODB_DEFRAGMENT_STATS_ACCURACY
SESSION_VALUE NULL
DEFAULT_VALUE 0
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT How many defragment stats changes there are before the stats are written to persistent storage. Set to 0 meaning disable defragment stats tracking.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME INNODB_DISABLE_SORT_FILE_CACHE VARIABLE_NAME INNODB_DISABLE_SORT_FILE_CACHE
SESSION_VALUE NULL SESSION_VALUE NULL
DEFAULT_VALUE OFF DEFAULT_VALUE OFF

View File

@@ -1,20 +0,0 @@
-- source include/have_innodb.inc
# Check the default value
SET @orig = @@global.innodb_defragment;
SELECT @orig;
# Turn off
SET GLOBAL innodb_defragment = OFF;
SELECT @@global.innodb_defragment;
# Turn on
SET GLOBAL innodb_defragment = ON;
SELECT @@global.innodb_defragment;
# Wrong value
--error ER_WRONG_VALUE_FOR_VAR
SET GLOBAL innodb_defragment = 100;
SELECT @@global.innodb_defragment;
SET GLOBAL innodb_defragment = @orig;

View File

@@ -1,27 +0,0 @@
--source include/have_innodb.inc
SET @start_innodb_defragment_fill_factor = @@global.innodb_defragment_fill_factor;
SELECT @start_innodb_defragment_fill_factor;
SELECT COUNT(@@global.innodb_defragment_fill_factor);
SET @@global.innodb_defragment_fill_factor = 0.77777777777777;
SELECT @@global.innodb_defragment_fill_factor;
SET @@global.innodb_defragment_fill_factor = 1;
SELECT @@global.innodb_defragment_fill_factor;
SET @@global.innodb_defragment_fill_factor = 0.7;
SELECT @@global.innodb_defragment_fill_factor;
SET @@global.innodb_defragment_fill_factor = -1;
SELECT @@global.innodb_defragment_fill_factor;
SET @@global.innodb_defragment_fill_factor = 2;
SELECT @@global.innodb_defragment_fill_factor;
--Error ER_WRONG_TYPE_FOR_VAR
SET @@global.innodb_defragment_fill_factor = "abc";
SELECT @@global.innodb_defragment_fill_factor;
SET @@global.innodb_defragment_fill_factor = @start_innodb_defragment_fill_factor;

View File

@@ -1,31 +0,0 @@
--source include/have_innodb.inc
SET @start_innodb_defragment_fill_factor_n_recs = @@global.innodb_defragment_fill_factor_n_recs;
SELECT @start_innodb_defragment_fill_factor_n_recs;
SELECT COUNT(@@global.innodb_defragment_fill_factor_n_recs);
SET @@global.innodb_defragment_fill_factor_n_recs = 50;
SELECT @@global.innodb_defragment_fill_factor_n_recs;
SET @@global.innodb_defragment_fill_factor_n_recs = 100;
SELECT @@global.innodb_defragment_fill_factor_n_recs;
SET @@global.innodb_defragment_fill_factor_n_recs = 1;
SELECT @@global.innodb_defragment_fill_factor_n_recs;
SET @@global.innodb_defragment_fill_factor_n_recs = -1;
SELECT @@global.innodb_defragment_fill_factor_n_recs;
SET @@global.innodb_defragment_fill_factor_n_recs = 10000;
SELECT @@global.innodb_defragment_fill_factor_n_recs;
--Error ER_WRONG_TYPE_FOR_VAR
SET @@global.innodb_defragment_fill_factor_n_recs = 10.5;
SELECT @@global.innodb_defragment_fill_factor_n_recs;
--Error ER_WRONG_TYPE_FOR_VAR
SET @@global.innodb_defragment_fill_factor_n_recs = "abc";
SELECT @@global.innodb_defragment_fill_factor_n_recs;
SET @@global.innodb_defragment_fill_factor_n_recs = @start_innodb_defragment_fill_factor_n_recs;

View File

@@ -1,37 +0,0 @@
--source include/have_innodb.inc
SET @start_innodb_defragment_frequency = @@global.innodb_defragment_frequency;
SELECT @start_innodb_defragment_frequency;
SELECT COUNT(@@global.innodb_defragment_frequency);
# test valid value
SET @@global.innodb_defragment_frequency = 200;
SELECT @@global.innodb_defragment_frequency;
# test valid min
SET @@global.innodb_defragment_frequency = 1;
SELECT @@global.innodb_defragment_frequency;
# test valid max
SET @@global.innodb_defragment_frequency = 1000;
SELECT @@global.innodb_defragment_frequency;
# test invalid value < min
SET @@global.innodb_defragment_frequency = -1;
SELECT @@global.innodb_defragment_frequency;
# test invalid value > max
SET @@global.innodb_defragment_frequency = 10000;
SELECT @@global.innodb_defragment_frequency;
# test wrong type
--Error ER_WRONG_TYPE_FOR_VAR
SET @@global.innodb_defragment_frequency = 10.5;
SELECT @@global.innodb_defragment_frequency;
--Error ER_WRONG_TYPE_FOR_VAR
SET @@global.innodb_defragment_frequency = "abc";
SELECT @@global.innodb_defragment_frequency;
SET @@global.innodb_defragment_frequency = @start_innodb_defragment_frequency;

View File

@@ -1,22 +0,0 @@
--source include/have_innodb.inc
SET @start_innodb_defragment_n_pages = @@global.innodb_defragment_n_pages;
SELECT @start_innodb_defragment_n_pages;
SELECT COUNT(@@global.innodb_defragment_n_pages);
SET @@global.innodb_defragment_n_pages = 1;
SELECT @@global.innodb_defragment_n_pages;
SET @@global.innodb_defragment_n_pages = 2;
SELECT @@global.innodb_defragment_n_pages;
SET @@global.innodb_defragment_n_pages = 32;
SELECT @@global.innodb_defragment_n_pages;
SET @@global.innodb_defragment_n_pages = 64;
SELECT @@global.innodb_defragment_n_pages;
SET @@global.innodb_defragment_n_pages = @start_innodb_defragment_n_pages;

View File

@@ -1,24 +0,0 @@
--source include/have_innodb.inc
SET @start_innodb_defragment_stats_accuracy = @@global.innodb_defragment_stats_accuracy;
SELECT @start_innodb_defragment_stats_accuracy;
SELECT COUNT(@@global.innodb_defragment_stats_accuracy);
SET @@global.innodb_defragment_stats_accuracy = 1;
SELECT @@global.innodb_defragment_stats_accuracy;
SET @@global.innodb_defragment_stats_accuracy = 1000;
SELECT @@global.innodb_defragment_stats_accuracy;
SET @@global.innodb_defragment_stats_accuracy = -1;
SELECT @@global.innodb_defragment_stats_accuracy;
SET @@global.innodb_defragment_stats_accuracy = 1000000000000;
SELECT @@global.innodb_defragment_stats_accuracy;
--Error ER_WRONG_TYPE_FOR_VAR
SET @@global.innodb_defragment_stats_accuracy = "abc";
SELECT @@global.innodb_defragment_stats_accuracy;
SET @@global.innodb_defragment_stats_accuracy = @start_innodb_defragment_stats_accuracy;

View File

@@ -5302,6 +5302,12 @@ static int init_server_components()
#if defined(__linux__) #if defined(__linux__)
MARIADB_REMOVED_OPTION("super-large-pages"), MARIADB_REMOVED_OPTION("super-large-pages"),
#endif #endif
MARIADB_REMOVED_OPTION("innodb-defragment"),
MARIADB_REMOVED_OPTION("innodb-defragment-n-pages"),
MARIADB_REMOVED_OPTION("innodb-defragment-stats-accuracy"),
MARIADB_REMOVED_OPTION("innodb-defragment-fill-factor"),
MARIADB_REMOVED_OPTION("innodb-defragment-fill-factor-n-recs"),
MARIADB_REMOVED_OPTION("innodb-defragment-frequency"),
MARIADB_REMOVED_OPTION("innodb-idle-flush-pct"), MARIADB_REMOVED_OPTION("innodb-idle-flush-pct"),
MARIADB_REMOVED_OPTION("innodb-locks-unsafe-for-binlog"), MARIADB_REMOVED_OPTION("innodb-locks-unsafe-for-binlog"),
MARIADB_REMOVED_OPTION("innodb-rollback-segments"), MARIADB_REMOVED_OPTION("innodb-rollback-segments"),

View File

@@ -132,7 +132,6 @@ SET(INNOBASE_SOURCES
btr/btr0cur.cc btr/btr0cur.cc
btr/btr0pcur.cc btr/btr0pcur.cc
btr/btr0sea.cc btr/btr0sea.cc
btr/btr0defragment.cc
buf/buf0block_hint.cc buf/buf0block_hint.cc
buf/buf0buddy.cc buf/buf0buddy.cc
buf/buf0buf.cc buf/buf0buf.cc
@@ -151,7 +150,6 @@ SET(INNOBASE_SOURCES
dict/dict0mem.cc dict/dict0mem.cc
dict/dict0stats.cc dict/dict0stats.cc
dict/dict0stats_bg.cc dict/dict0stats_bg.cc
dict/dict0defrag_bg.cc
dict/drop.cc dict/drop.cc
eval/eval0eval.cc eval/eval0eval.cc
eval/eval0proc.cc eval/eval0proc.cc
@@ -186,7 +184,6 @@ SET(INNOBASE_SOURCES
include/btr0bulk.h include/btr0bulk.h
include/btr0cur.h include/btr0cur.h
include/btr0cur.inl include/btr0cur.inl
include/btr0defragment.h
include/btr0pcur.h include/btr0pcur.h
include/btr0pcur.inl include/btr0pcur.inl
include/btr0sea.h include/btr0sea.h
@@ -211,7 +208,6 @@ SET(INNOBASE_SOURCES
include/dict0boot.h include/dict0boot.h
include/dict0crea.h include/dict0crea.h
include/dict0crea.inl include/dict0crea.inl
include/dict0defrag_bg.h
include/dict0dict.h include/dict0dict.h
include/dict0dict.inl include/dict0dict.inl
include/dict0load.h include/dict0load.h

View File

@@ -34,7 +34,6 @@ Created 6/2/1994 Heikki Tuuri
#include "btr0cur.h" #include "btr0cur.h"
#include "btr0sea.h" #include "btr0sea.h"
#include "btr0pcur.h" #include "btr0pcur.h"
#include "btr0defragment.h"
#include "rem0cmp.h" #include "rem0cmp.h"
#include "lock0lock.h" #include "lock0lock.h"
#include "trx0trx.h" #include "trx0trx.h"
@@ -2848,12 +2847,6 @@ got_split_rec:
} }
btr_page_create(new_block, new_page_zip, cursor->index(), btr_page_create(new_block, new_page_zip, cursor->index(),
page_level, mtr); page_level, mtr);
/* Only record the leaf level page splits. */
if (!page_level) {
cursor->index()->stat_defrag_n_page_split ++;
cursor->index()->stat_defrag_modified_counter ++;
btr_defragment_save_defrag_stats_if_needed(cursor->index());
}
/* 3. Calculate the first record on the upper half-page, and the /* 3. Calculate the first record on the upper half-page, and the
first record (move_limit) on original page which ends up on the first record (move_limit) on original page which ends up on the

View File

@@ -1,802 +0,0 @@
/*****************************************************************************
Copyright (C) 2012, 2014 Facebook, Inc. All Rights Reserved.
Copyright (C) 2014, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/**************************************************//**
@file btr/btr0defragment.cc
Index defragmentation.
Created 05/29/2014 Rongrong Zhong
Modified 16/07/2014 Sunguck Lee
Modified 30/07/2014 Jan Lindström jan.lindstrom@mariadb.com
*******************************************************/
#include "btr0defragment.h"
#include "btr0btr.h"
#include "btr0cur.h"
#include "btr0sea.h"
#include "btr0pcur.h"
#include "dict0stats.h"
#include "dict0stats_bg.h"
#include "dict0defrag_bg.h"
#include "lock0lock.h"
#include "srv0start.h"
#include "mysqld.h"
#include <list>
/* When there's no work, either because defragment is disabled, or because no
query is submitted, thread checks state every BTR_DEFRAGMENT_SLEEP_IN_USECS.*/
#define BTR_DEFRAGMENT_SLEEP_IN_USECS 1000000
/* Reduce the target page size by this amount when compression failure happens
during defragmentaiton. 512 is chosen because it's a power of 2 and it is about
3% of the page size. When there are compression failures in defragmentation,
our goal is to get a decent defrag ratio with as few compression failure as
possible. From experimentation it seems that reduce the target size by 512 every
time will make sure the page is compressible within a couple of iterations. */
#define BTR_DEFRAGMENT_PAGE_REDUCTION_STEP_SIZE 512
/** Item in the work queue for btr_degrament_thread. */
struct btr_defragment_item_t
{
/** persistent cursor where btr_defragment_n_pages should start */
btr_pcur_t * const pcur;
/** completion signal */
pthread_cond_t *cond;
/** timestamp of last time this index is processed by defragment thread */
ulonglong last_processed= 0;
btr_defragment_item_t(btr_pcur_t *pcur, pthread_cond_t *cond)
: pcur(pcur), cond(cond) {}
};
/* Work queue for defragmentation. */
typedef std::list<btr_defragment_item_t*> btr_defragment_wq_t;
static btr_defragment_wq_t btr_defragment_wq;
/* Mutex protecting the defragmentation work queue.*/
static mysql_mutex_t btr_defragment_mutex;
#ifdef UNIV_PFS_MUTEX
mysql_pfs_key_t btr_defragment_mutex_key;
#endif /* UNIV_PFS_MUTEX */
/* Number of compression failures caused by defragmentation since server
start. */
Atomic_counter<ulint> btr_defragment_compression_failures;
/* Number of btr_defragment_n_pages calls that altered page but didn't
manage to release any page. */
Atomic_counter<ulint> btr_defragment_failures;
/* Total number of btr_defragment_n_pages calls that altered page.
The difference between btr_defragment_count and btr_defragment_failures shows
the amount of effort wasted. */
Atomic_counter<ulint> btr_defragment_count;
bool btr_defragment_active;
static void btr_defragment_chunk(void*);
static tpool::timer* btr_defragment_timer;
static tpool::task_group task_group(1);
static tpool::task btr_defragment_task(btr_defragment_chunk, 0, &task_group);
static void btr_defragment_start();
static void submit_defragment_task(void*arg=0)
{
srv_thread_pool->submit_task(&btr_defragment_task);
}
/******************************************************************//**
Initialize defragmentation. */
void
btr_defragment_init()
{
srv_defragment_interval = 1000000000ULL / srv_defragment_frequency;
mysql_mutex_init(btr_defragment_mutex_key, &btr_defragment_mutex,
nullptr);
btr_defragment_timer = srv_thread_pool->create_timer(submit_defragment_task);
btr_defragment_active = true;
}
/******************************************************************//**
Shutdown defragmentation. Release all resources. */
void
btr_defragment_shutdown()
{
if (!btr_defragment_timer)
return;
delete btr_defragment_timer;
btr_defragment_timer = 0;
task_group.cancel_pending(&btr_defragment_task);
mysql_mutex_lock(&btr_defragment_mutex);
std::list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin();
while(iter != btr_defragment_wq.end()) {
btr_defragment_item_t* item = *iter;
iter = btr_defragment_wq.erase(iter);
if (item->cond) {
pthread_cond_signal(item->cond);
}
}
mysql_mutex_unlock(&btr_defragment_mutex);
mysql_mutex_destroy(&btr_defragment_mutex);
btr_defragment_active = false;
}
/******************************************************************//**
Functions used by the query threads: btr_defragment_xxx_index
Query threads find/add/remove index. */
/******************************************************************//**
Check whether the given index is in btr_defragment_wq. We use index->id
to identify indices. */
bool
btr_defragment_find_index(
dict_index_t* index) /*!< Index to find. */
{
mysql_mutex_lock(&btr_defragment_mutex);
for (std::list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin();
iter != btr_defragment_wq.end();
++iter) {
btr_defragment_item_t* item = *iter;
btr_pcur_t* pcur = item->pcur;
btr_cur_t* cursor = btr_pcur_get_btr_cur(pcur);
dict_index_t* idx = btr_cur_get_index(cursor);
if (index->id == idx->id) {
mysql_mutex_unlock(&btr_defragment_mutex);
return true;
}
}
mysql_mutex_unlock(&btr_defragment_mutex);
return false;
}
/** Defragment an index.
@param pcur persistent cursor
@param thd current session, for checking thd_killed()
@return whether the operation was interrupted */
bool btr_defragment_add_index(btr_pcur_t *pcur, THD *thd)
{
dict_stats_empty_defrag_summary(pcur->index());
pthread_cond_t cond;
pthread_cond_init(&cond, nullptr);
btr_defragment_item_t item(pcur, &cond);
mysql_mutex_lock(&btr_defragment_mutex);
btr_defragment_wq.push_back(&item);
if (btr_defragment_wq.size() == 1)
/* Kick off defragmentation work */
btr_defragment_start();
bool interrupted= false;
for (;;)
{
timespec abstime;
set_timespec(abstime, 1);
if (!my_cond_timedwait(&cond, &btr_defragment_mutex.m_mutex, &abstime))
break;
if (thd_killed(thd))
{
item.cond= nullptr;
interrupted= true;
break;
}
}
pthread_cond_destroy(&cond);
mysql_mutex_unlock(&btr_defragment_mutex);
return interrupted;
}
/******************************************************************//**
When table is dropped, this function is called to mark a table as removed in
btr_efragment_wq. The difference between this function and the remove_index
function is this will not NULL the event. */
void
btr_defragment_remove_table(
dict_table_t* table) /*!< Index to be removed. */
{
mysql_mutex_lock(&btr_defragment_mutex);
for (auto item : btr_defragment_wq)
{
if (item->cond && table == item->pcur->index()->table)
{
pthread_cond_signal(item->cond);
item->cond= nullptr;
}
}
mysql_mutex_unlock(&btr_defragment_mutex);
}
/*********************************************************************//**
Check whether we should save defragmentation statistics to persistent storage.
Currently we save the stats to persistent storage every 100 updates. */
void btr_defragment_save_defrag_stats_if_needed(dict_index_t *index)
{
if (srv_defragment_stats_accuracy != 0 // stats tracking disabled
&& index->table->space_id != 0 // do not track system tables
&& !index->table->is_temporary()
&& index->stat_defrag_modified_counter
>= srv_defragment_stats_accuracy) {
dict_stats_defrag_pool_add(index);
index->stat_defrag_modified_counter = 0;
}
}
/*********************************************************************//**
Main defragment functionalities used by defragment thread.*/
/*************************************************************//**
Calculate number of records from beginning of block that can
fit into size_limit
@return number of records */
static
ulint
btr_defragment_calc_n_recs_for_size(
buf_block_t* block, /*!< in: B-tree page */
dict_index_t* index, /*!< in: index of the page */
ulint size_limit, /*!< in: size limit to fit records in */
ulint* n_recs_size) /*!< out: actual size of the records that fit
in size_limit. */
{
page_t* page = buf_block_get_frame(block);
ulint n_recs = 0;
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs* offsets = offsets_;
rec_offs_init(offsets_);
mem_heap_t* heap = NULL;
ulint size = 0;
page_cur_t cur;
const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0;
page_cur_set_before_first(block, &cur);
while (rec_t* cur_rec = page_cur_move_to_next(&cur)) {
if (page_rec_is_supremum(cur_rec)) {
break;
}
offsets = rec_get_offsets(cur_rec, index, offsets, n_core,
ULINT_UNDEFINED, &heap);
ulint rec_size = rec_offs_size(offsets);
size += rec_size;
if (size > size_limit) {
size = size - rec_size;
break;
}
n_recs ++;
}
*n_recs_size = size;
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
return n_recs;
}
MY_ATTRIBUTE((nonnull(2,3,4), warn_unused_result))
/************************************************************//**
Returns the upper level node pointer to a page. It is assumed that mtr holds
an sx-latch on the tree.
@return rec_get_offsets() of the node pointer record */
static
rec_offs*
btr_page_search_father_node_ptr(
rec_offs* offsets,/*!< in: work area for the return value */
mem_heap_t* heap, /*!< in: memory heap to use */
btr_cur_t* cursor, /*!< in: cursor pointing to user record,
out: cursor on node pointer record,
its page x-latched */
mtr_t* mtr) /*!< in: mtr */
{
const uint32_t page_no = btr_cur_get_block(cursor)->page.id().page_no();
dict_index_t* index = btr_cur_get_index(cursor);
ut_ad(!index->is_spatial());
ut_ad(mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
ut_ad(dict_index_get_page(index) != page_no);
const auto level = btr_page_get_level(btr_cur_get_page(cursor));
const rec_t* user_rec = btr_cur_get_rec(cursor);
ut_a(page_rec_is_user_rec(user_rec));
if (btr_cur_search_to_nth_level(level + 1,
dict_index_build_node_ptr(index,
user_rec, 0,
heap, level),
RW_X_LATCH,
cursor, mtr) != DB_SUCCESS) {
return nullptr;
}
const rec_t* node_ptr = btr_cur_get_rec(cursor);
ut_ad(!btr_cur_get_block(cursor)->page.lock.not_recursive()
|| mtr->memo_contains(index->lock, MTR_MEMO_X_LOCK));
offsets = rec_get_offsets(node_ptr, index, offsets, 0,
ULINT_UNDEFINED, &heap);
if (btr_node_ptr_get_child_page_no(node_ptr, offsets) != page_no) {
offsets = nullptr;
}
return(offsets);
}
static bool btr_page_search_father(mtr_t *mtr, btr_cur_t *cursor)
{
rec_t *rec=
page_rec_get_next(page_get_infimum_rec(cursor->block()->page.frame));
if (UNIV_UNLIKELY(!rec))
return false;
cursor->page_cur.rec= rec;
mem_heap_t *heap= mem_heap_create(100);
const bool got= btr_page_search_father_node_ptr(nullptr, heap, cursor, mtr);
mem_heap_free(heap);
return got;
}
/*************************************************************//**
Merge as many records from the from_block to the to_block. Delete
the from_block if all records are successfully merged to to_block.
@return the to_block to target for next merge operation.
@retval nullptr if corruption was noticed */
static
buf_block_t*
btr_defragment_merge_pages(
dict_index_t* index, /*!< in: index tree */
buf_block_t* from_block, /*!< in: origin of merge */
buf_block_t* to_block, /*!< in: destination of merge */
ulint zip_size, /*!< in: ROW_FORMAT=COMPRESSED size */
ulint reserved_space, /*!< in: space reserved for future
insert to avoid immediate page split */
ulint* max_data_size, /*!< in/out: max data size to
fit in a single compressed page. */
mem_heap_t* heap, /*!< in/out: pointer to memory heap */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
page_t* from_page = buf_block_get_frame(from_block);
page_t* to_page = buf_block_get_frame(to_block);
ulint level = btr_page_get_level(from_page);
ulint n_recs = page_get_n_recs(from_page);
ulint new_data_size = page_get_data_size(to_page);
ulint max_ins_size =
page_get_max_insert_size(to_page, n_recs);
ulint max_ins_size_reorg =
page_get_max_insert_size_after_reorganize(
to_page, n_recs);
ulint max_ins_size_to_use = max_ins_size_reorg > reserved_space
? max_ins_size_reorg - reserved_space : 0;
ulint move_size = 0;
ulint n_recs_to_move = 0;
rec_t* rec = NULL;
ulint target_n_recs = 0;
rec_t* orig_pred;
// Estimate how many records can be moved from the from_page to
// the to_page.
if (zip_size) {
ulint page_diff = srv_page_size - *max_data_size;
max_ins_size_to_use = (max_ins_size_to_use > page_diff)
? max_ins_size_to_use - page_diff : 0;
}
n_recs_to_move = btr_defragment_calc_n_recs_for_size(
from_block, index, max_ins_size_to_use, &move_size);
// If max_ins_size >= move_size, we can move the records without
// reorganizing the page, otherwise we need to reorganize the page
// first to release more space.
if (move_size <= max_ins_size) {
} else if (dberr_t err = btr_page_reorganize_block(page_zip_level,
to_block, index,
mtr)) {
// If reorganization fails, that means page is
// not compressable. There's no point to try
// merging into this page. Continue to the
// next page.
return err == DB_FAIL ? from_block : nullptr;
} else {
ut_ad(page_validate(to_page, index));
max_ins_size = page_get_max_insert_size(to_page, n_recs);
if (max_ins_size < move_size) {
return nullptr;
}
}
// Move records to pack to_page more full.
orig_pred = NULL;
target_n_recs = n_recs_to_move;
dberr_t err;
while (n_recs_to_move > 0) {
if (!(rec = page_rec_get_nth(from_page, n_recs_to_move + 1))) {
return nullptr;
}
orig_pred = page_copy_rec_list_start(
to_block, from_block, rec, index, mtr, &err);
if (orig_pred)
break;
if (err != DB_FAIL) {
return nullptr;
}
// If we reach here, that means compression failed after packing
// n_recs_to_move number of records to to_page. We try to reduce
// the targeted data size on the to_page by
// BTR_DEFRAGMENT_PAGE_REDUCTION_STEP_SIZE and try again.
btr_defragment_compression_failures++;
max_ins_size_to_use =
move_size > BTR_DEFRAGMENT_PAGE_REDUCTION_STEP_SIZE
? move_size - BTR_DEFRAGMENT_PAGE_REDUCTION_STEP_SIZE
: 0;
if (max_ins_size_to_use == 0) {
n_recs_to_move = 0;
move_size = 0;
break;
}
n_recs_to_move = btr_defragment_calc_n_recs_for_size(
from_block, index, max_ins_size_to_use, &move_size);
}
// If less than target_n_recs are moved, it means there are
// compression failures during page_copy_rec_list_start. Adjust
// the max_data_size estimation to reduce compression failures
// in the following runs.
if (target_n_recs > n_recs_to_move
&& *max_data_size > new_data_size + move_size) {
*max_data_size = new_data_size + move_size;
}
btr_cur_t parent;
parent.page_cur.index = index;
parent.page_cur.block = from_block;
if (!btr_page_search_father(mtr, &parent)) {
to_block = nullptr;
} else if (n_recs_to_move == n_recs) {
/* The whole page is merged with the previous page,
free it. */
lock_update_merge_left(*to_block, orig_pred,
from_block->page.id());
btr_search_drop_page_hash_index(from_block, false);
if (btr_level_list_remove(*from_block, *index, mtr)
!= DB_SUCCESS
|| btr_cur_node_ptr_delete(&parent, mtr) != DB_SUCCESS
|| btr_page_free(index, from_block, mtr) != DB_SUCCESS) {
return nullptr;
}
} else {
// There are still records left on the page, so
// increment n_defragmented. Node pointer will be changed
// so remove the old node pointer.
if (n_recs_to_move > 0) {
// Part of the page is merged to left, remove
// the merged records, update record locks and
// node pointer.
dtuple_t* node_ptr;
page_delete_rec_list_start(rec, from_block,
index, mtr);
lock_update_split_and_merge(to_block,
orig_pred,
from_block);
// FIXME: reuse the node_ptr!
if (btr_cur_node_ptr_delete(&parent, mtr)
!= DB_SUCCESS) {
return nullptr;
}
rec = page_rec_get_next(
page_get_infimum_rec(from_page));
if (!rec) {
return nullptr;
}
node_ptr = dict_index_build_node_ptr(
index, rec, page_get_page_no(from_page),
heap, level);
if (btr_insert_on_non_leaf_level(0, index, level+1,
node_ptr, mtr)
!= DB_SUCCESS) {
return nullptr;
}
}
to_block = from_block;
}
return to_block;
}
/*************************************************************//**
Tries to merge N consecutive pages, starting from the page pointed by the
cursor. Skip space 0. Only consider leaf pages.
This function first loads all N pages into memory, then for each of
the pages other than the first page, it tries to move as many records
as possible to the left sibling to keep the left sibling full. During
the process, if any page becomes empty, that page will be removed from
the level list. Record locks, hash, and node pointers are updated after
page reorganization.
@return pointer to the last block processed, or NULL if reaching end of index */
static
buf_block_t*
btr_defragment_n_pages(
buf_block_t* block, /*!< in: starting block for defragmentation */
dict_index_t* index, /*!< in: index tree */
uint n_pages,/*!< in: number of pages to defragment */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
/* We will need to load the n+1 block because if the last page is freed
and we need to modify the prev_page_no of that block. */
buf_block_t* blocks[BTR_DEFRAGMENT_MAX_N_PAGES + 1];
page_t* first_page;
buf_block_t* current_block;
ulint total_data_size = 0;
ulint total_n_recs = 0;
ulint data_size_per_rec;
ulint optimal_page_size;
ulint reserved_space;
ulint max_data_size = 0;
uint n_defragmented = 0;
uint n_new_slots;
mem_heap_t* heap;
ibool end_of_index = FALSE;
/* It doesn't make sense to call this function with n_pages = 1. */
ut_ad(n_pages > 1);
if (!page_is_leaf(block->page.frame)) {
return NULL;
}
if (!index->table->space || !index->table->space_id) {
/* Ignore space 0. */
return NULL;
}
if (n_pages > BTR_DEFRAGMENT_MAX_N_PAGES) {
n_pages = BTR_DEFRAGMENT_MAX_N_PAGES;
}
first_page = buf_block_get_frame(block);
const ulint zip_size = index->table->space->zip_size();
/* 1. Load the pages and calculate the total data size. */
blocks[0] = block;
for (uint i = 1; i <= n_pages; i++) {
page_t* page = buf_block_get_frame(blocks[i-1]);
uint32_t page_no = btr_page_get_next(page);
total_data_size += page_get_data_size(page);
total_n_recs += page_get_n_recs(page);
if (page_no == FIL_NULL) {
n_pages = i;
end_of_index = TRUE;
break;
}
blocks[i] = btr_block_get(*index, page_no, RW_X_LATCH, mtr);
if (!blocks[i]) {
return nullptr;
}
}
if (n_pages == 1) {
if (!page_has_prev(first_page)) {
/* last page in the index */
if (dict_index_get_page(index)
== page_get_page_no(first_page))
return NULL;
/* given page is the last page.
Lift the records to father. */
dberr_t err;
btr_lift_page_up(index, block, nullptr, mtr, &err);
}
return NULL;
}
/* 2. Calculate how many pages data can fit in. If not compressable,
return early. */
ut_a(total_n_recs != 0);
data_size_per_rec = total_data_size / total_n_recs;
// For uncompressed pages, the optimal data size if the free space of a
// empty page.
optimal_page_size = page_get_free_space_of_empty(
page_is_comp(first_page));
// For compressed pages, we take compression failures into account.
if (zip_size) {
ulint size = 0;
uint i = 0;
// We estimate the optimal data size of the index use samples of
// data size. These samples are taken when pages failed to
// compress due to insertion on the page. We use the average
// of all samples we have as the estimation. Different pages of
// the same index vary in compressibility. Average gives a good
// enough estimation.
for (;i < STAT_DEFRAG_DATA_SIZE_N_SAMPLE; i++) {
if (index->stat_defrag_data_size_sample[i] == 0) {
break;
}
size += index->stat_defrag_data_size_sample[i];
}
if (i != 0) {
size /= i;
optimal_page_size = ut_min(optimal_page_size, size);
}
max_data_size = optimal_page_size;
}
reserved_space = ut_min(static_cast<ulint>(
static_cast<double>(optimal_page_size)
* (1 - srv_defragment_fill_factor)),
(data_size_per_rec
* srv_defragment_fill_factor_n_recs));
optimal_page_size -= reserved_space;
n_new_slots = uint((total_data_size + optimal_page_size - 1)
/ optimal_page_size);
if (n_new_slots >= n_pages) {
/* Can't defragment. */
if (end_of_index)
return NULL;
return blocks[n_pages-1];
}
/* 3. Defragment pages. */
heap = mem_heap_create(256);
// First defragmented page will be the first page.
current_block = blocks[0];
// Start from the second page.
for (uint i = 1; i < n_pages; i ++) {
buf_block_t* new_block = btr_defragment_merge_pages(
index, blocks[i], current_block, zip_size,
reserved_space, &max_data_size, heap, mtr);
if (new_block != current_block) {
n_defragmented ++;
current_block = new_block;
if (!new_block) {
break;
}
}
}
mem_heap_free(heap);
n_defragmented ++;
btr_defragment_count++;
if (n_pages == n_defragmented) {
btr_defragment_failures++;
} else {
index->stat_defrag_n_pages_freed += (n_pages - n_defragmented);
}
if (end_of_index)
return NULL;
return current_block;
}
void btr_defragment_start() {
if (!srv_defragment)
return;
ut_ad(!btr_defragment_wq.empty());
submit_defragment_task();
}
/**
Callback used by defragment timer
Throttling "sleep", is implemented via rescheduling the
threadpool timer, which, when fired, will resume the work again,
where it is left.
The state (current item) is stored in function parameter.
*/
static void btr_defragment_chunk(void*)
{
THD *thd = innobase_create_background_thd("InnoDB defragment");
set_current_thd(thd);
btr_defragment_item_t* item = nullptr;
mtr_t mtr;
mysql_mutex_lock(&btr_defragment_mutex);
while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
if (!item) {
if (btr_defragment_wq.empty()) {
release_and_exit:
mysql_mutex_unlock(&btr_defragment_mutex);
func_exit:
set_current_thd(nullptr);
destroy_background_thd(thd);
return;
}
item = *btr_defragment_wq.begin();
ut_ad(item);
}
if (!item->cond) {
processed:
btr_defragment_wq.remove(item);
item = nullptr;
continue;
}
mysql_mutex_unlock(&btr_defragment_mutex);
ulonglong now = my_interval_timer();
ulonglong elapsed = now - item->last_processed;
if (elapsed < srv_defragment_interval) {
/* If we see an index again before the interval
determined by the configured frequency is reached,
we just sleep until the interval pass. Since
defragmentation of all indices queue up on a single
thread, it's likely other indices that follow this one
don't need to sleep again. */
int sleep_ms = (int)((srv_defragment_interval - elapsed) / 1000 / 1000);
if (sleep_ms) {
btr_defragment_timer->set_time(sleep_ms, 0);
goto func_exit;
}
}
log_free_check();
mtr_start(&mtr);
dict_index_t *index = item->pcur->index();
index->set_modified(mtr);
/* To follow the latching order defined in WL#6326,
acquire index->lock X-latch. This entitles us to
acquire page latches in any order for the index. */
mtr_x_lock_index(index, &mtr);
if (buf_block_t *last_block =
item->pcur->restore_position(
BTR_PURGE_TREE_ALREADY_LATCHED, &mtr)
== btr_pcur_t::CORRUPTED
? nullptr
: btr_defragment_n_pages(btr_pcur_get_block(item->pcur),
index, srv_defragment_n_pages,
&mtr)) {
/* If we haven't reached the end of the index,
place the cursor on the last record of last page,
store the cursor position, and put back in queue. */
page_t* last_page = buf_block_get_frame(last_block);
rec_t* rec = page_rec_get_prev(
page_get_supremum_rec(last_page));
if (rec && page_rec_is_user_rec(rec)) {
page_cur_position(rec, last_block,
btr_pcur_get_page_cur(
item->pcur));
}
btr_pcur_store_position(item->pcur, &mtr);
mtr_commit(&mtr);
/* Update the last_processed time of this index. */
item->last_processed = now;
mysql_mutex_lock(&btr_defragment_mutex);
} else {
mtr_commit(&mtr);
/* Reaching the end of the index. */
dict_stats_empty_defrag_stats(index);
if (dberr_t err= dict_stats_save_defrag_stats(index)) {
ib::error() << "Saving defragmentation stats for table "
<< index->table->name
<< " index " << index->name()
<< " failed with error " << err;
} else {
err = dict_stats_save_defrag_summary(index,
thd);
if (err != DB_SUCCESS) {
ib::error() << "Saving defragmentation summary for table "
<< index->table->name
<< " index " << index->name()
<< " failed with error " << err;
}
}
mysql_mutex_lock(&btr_defragment_mutex);
if (item->cond) {
pthread_cond_signal(item->cond);
}
goto processed;
}
}
goto release_and_exit;
}

View File

@@ -1,429 +0,0 @@
/*****************************************************************************
Copyright (c) 2016, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/**************************************************//**
@file dict/dict0defrag_bg.cc
Defragmentation routines.
Created 25/08/2016 Jan Lindström
*******************************************************/
#include "dict0dict.h"
#include "dict0stats.h"
#include "dict0stats_bg.h"
#include "dict0defrag_bg.h"
#include "btr0btr.h"
#include "srv0start.h"
#include "trx0trx.h"
#include "lock0lock.h"
#include "row0mysql.h"
static mysql_mutex_t defrag_pool_mutex;
/** Iterator type for iterating over the elements of objects of type
defrag_pool_t. */
typedef defrag_pool_t::iterator defrag_pool_iterator_t;
/** Pool where we store information on which tables are to be processed
by background defragmentation. */
defrag_pool_t defrag_pool;
/*****************************************************************//**
Initialize the defrag pool, called once during thread initialization. */
void
dict_defrag_pool_init(void)
/*=======================*/
{
ut_ad(!srv_read_only_mode);
mysql_mutex_init(0, &defrag_pool_mutex, nullptr);
}
/*****************************************************************//**
Free the resources occupied by the defrag pool, called once during
thread de-initialization. */
void
dict_defrag_pool_deinit(void)
/*=========================*/
{
ut_ad(!srv_read_only_mode);
mysql_mutex_destroy(&defrag_pool_mutex);
}
/*****************************************************************//**
Get an index from the auto defrag pool. The returned index id is removed
from the pool.
@return true if the pool was non-empty and "id" was set, false otherwise */
static
bool
dict_stats_defrag_pool_get(
/*=======================*/
table_id_t* table_id, /*!< out: table id, or unmodified if
list is empty */
index_id_t* index_id) /*!< out: index id, or unmodified if
list is empty */
{
ut_ad(!srv_read_only_mode);
mysql_mutex_lock(&defrag_pool_mutex);
if (defrag_pool.empty()) {
mysql_mutex_unlock(&defrag_pool_mutex);
return(false);
}
defrag_pool_item_t& item = defrag_pool.back();
*table_id = item.table_id;
*index_id = item.index_id;
defrag_pool.pop_back();
mysql_mutex_unlock(&defrag_pool_mutex);
return(true);
}
/*****************************************************************//**
Add an index in a table to the defrag pool, which is processed by the
background stats gathering thread. Only the table id and index id are
added to the list, so the table can be closed after being enqueued and
it will be opened when needed. If the table or index does not exist later
(has been DROPped), then it will be removed from the pool and skipped. */
void
dict_stats_defrag_pool_add(
/*=======================*/
const dict_index_t* index) /*!< in: table to add */
{
defrag_pool_item_t item;
ut_ad(!srv_read_only_mode);
mysql_mutex_lock(&defrag_pool_mutex);
/* quit if already in the list */
for (defrag_pool_iterator_t iter = defrag_pool.begin();
iter != defrag_pool.end();
++iter) {
if ((*iter).table_id == index->table->id
&& (*iter).index_id == index->id) {
mysql_mutex_unlock(&defrag_pool_mutex);
return;
}
}
item.table_id = index->table->id;
item.index_id = index->id;
defrag_pool.push_back(item);
if (defrag_pool.size() == 1) {
/* Kick off dict stats optimizer work */
dict_stats_schedule_now();
}
mysql_mutex_unlock(&defrag_pool_mutex);
}
/*****************************************************************//**
Delete a given index from the auto defrag pool. */
void
dict_stats_defrag_pool_del(
/*=======================*/
const dict_table_t* table, /*!<in: if given, remove
all entries for the table */
const dict_index_t* index) /*!< in: if given, remove this index */
{
ut_a((table && !index) || (!table && index));
ut_ad(!srv_read_only_mode);
ut_ad(dict_sys.frozen());
mysql_mutex_lock(&defrag_pool_mutex);
defrag_pool_iterator_t iter = defrag_pool.begin();
while (iter != defrag_pool.end()) {
if ((table && (*iter).table_id == table->id)
|| (index
&& (*iter).table_id == index->table->id
&& (*iter).index_id == index->id)) {
/* erase() invalidates the iterator */
iter = defrag_pool.erase(iter);
if (index)
break;
} else {
iter++;
}
}
mysql_mutex_unlock(&defrag_pool_mutex);
}
/*****************************************************************//**
Get the first index that has been added for updating persistent defrag
stats and eventually save its stats. */
static void dict_stats_process_entry_from_defrag_pool(THD *thd)
{
table_id_t table_id;
index_id_t index_id;
ut_ad(!srv_read_only_mode);
/* pop the first index from the auto defrag pool */
if (!dict_stats_defrag_pool_get(&table_id, &index_id))
/* no index in defrag pool */
return;
/* If the table is no longer cached, we've already lost the in
memory stats so there's nothing really to write to disk. */
MDL_ticket *mdl= nullptr;
if (dict_table_t *table=
dict_table_open_on_id(table_id, false, DICT_TABLE_OP_OPEN_ONLY_IF_CACHED,
thd, &mdl))
{
if (dict_index_t *index= !table->corrupted
? dict_table_find_index_on_id(table, index_id) : nullptr)
if (index->is_btree())
dict_stats_save_defrag_stats(index);
dict_table_close(table, false, thd, mdl);
}
}
/**
Get the first index that has been added for updating persistent defrag
stats and eventually save its stats. */
void dict_defrag_process_entries_from_defrag_pool(THD *thd)
{
while (!defrag_pool.empty())
dict_stats_process_entry_from_defrag_pool(thd);
}
/*********************************************************************//**
Save defragmentation result.
@return DB_SUCCESS or error code */
dberr_t dict_stats_save_defrag_summary(dict_index_t *index, THD *thd)
{
MDL_ticket *mdl_table= nullptr, *mdl_index= nullptr;
dict_table_t *table_stats= dict_table_open_on_name(TABLE_STATS_NAME, false,
DICT_ERR_IGNORE_NONE);
if (table_stats)
{
dict_sys.freeze(SRW_LOCK_CALL);
table_stats= dict_acquire_mdl_shared<false>(table_stats, thd, &mdl_table);
dict_sys.unfreeze();
}
if (!table_stats || strcmp(table_stats->name.m_name, TABLE_STATS_NAME))
{
release_and_exit:
if (table_stats)
dict_table_close(table_stats, false, thd, mdl_table);
return DB_STATS_DO_NOT_EXIST;
}
dict_table_t *index_stats= dict_table_open_on_name(INDEX_STATS_NAME, false,
DICT_ERR_IGNORE_NONE);
if (index_stats)
{
dict_sys.freeze(SRW_LOCK_CALL);
index_stats= dict_acquire_mdl_shared<false>(index_stats, thd, &mdl_index);
dict_sys.unfreeze();
}
if (!index_stats)
goto release_and_exit;
if (strcmp(index_stats->name.m_name, INDEX_STATS_NAME))
{
dict_table_close(index_stats, false, thd, mdl_index);
goto release_and_exit;
}
trx_t *trx= trx_create();
trx->mysql_thd= thd;
trx_start_internal(trx);
dberr_t ret= trx->read_only
? DB_READ_ONLY
: lock_table_for_trx(table_stats, trx, LOCK_X);
if (ret == DB_SUCCESS)
ret= lock_table_for_trx(index_stats, trx, LOCK_X);
row_mysql_lock_data_dictionary(trx);
if (ret == DB_SUCCESS)
ret= dict_stats_save_index_stat(index, time(nullptr), "n_pages_freed",
index->stat_defrag_n_pages_freed,
nullptr,
"Number of pages freed during"
" last defragmentation run.",
trx);
if (ret == DB_SUCCESS)
trx->commit();
else
trx->rollback();
if (table_stats)
dict_table_close(table_stats, true, thd, mdl_table);
if (index_stats)
dict_table_close(index_stats, true, thd, mdl_index);
row_mysql_unlock_data_dictionary(trx);
trx->free();
return ret;
}
/**************************************************************//**
Gets the number of reserved and used pages in a B-tree.
@return number of pages reserved, or ULINT_UNDEFINED if the index
is unavailable */
static
ulint
btr_get_size_and_reserved(
dict_index_t* index, /*!< in: index */
ulint flag, /*!< in: BTR_N_LEAF_PAGES or BTR_TOTAL_SIZE */
ulint* used, /*!< out: number of pages used (<= reserved) */
mtr_t* mtr) /*!< in/out: mini-transaction where index
is s-latched */
{
ulint dummy;
ut_ad(mtr->memo_contains(index->lock, MTR_MEMO_SX_LOCK));
ut_a(flag == BTR_N_LEAF_PAGES || flag == BTR_TOTAL_SIZE);
if (index->page == FIL_NULL
|| dict_index_is_online_ddl(index)
|| !index->is_committed()
|| !index->table->space) {
return(ULINT_UNDEFINED);
}
dberr_t err;
buf_block_t* root = btr_root_block_get(index, RW_SX_LATCH, mtr, &err);
*used = 0;
if (!root) {
return ULINT_UNDEFINED;
}
mtr->x_lock_space(index->table->space);
ulint n = fseg_n_reserved_pages(*root, PAGE_HEADER + PAGE_BTR_SEG_LEAF
+ root->page.frame, used, mtr);
if (flag == BTR_TOTAL_SIZE) {
n += fseg_n_reserved_pages(*root,
PAGE_HEADER + PAGE_BTR_SEG_TOP
+ root->page.frame, &dummy, mtr);
*used += dummy;
}
return(n);
}
/*********************************************************************//**
Save defragmentation stats for a given index.
@return DB_SUCCESS or error code */
dberr_t
dict_stats_save_defrag_stats(
/*============================*/
dict_index_t* index) /*!< in: index */
{
if (!index->is_readable())
return dict_stats_report_error(index->table, true);
const time_t now= time(nullptr);
mtr_t mtr;
ulint n_leaf_pages;
mtr.start();
mtr_sx_lock_index(index, &mtr);
ulint n_leaf_reserved= btr_get_size_and_reserved(index, BTR_N_LEAF_PAGES,
&n_leaf_pages, &mtr);
mtr.commit();
if (n_leaf_reserved == ULINT_UNDEFINED)
return DB_SUCCESS;
THD *thd= current_thd;
MDL_ticket *mdl_table= nullptr, *mdl_index= nullptr;
dict_table_t* table_stats= dict_table_open_on_name(TABLE_STATS_NAME, false,
DICT_ERR_IGNORE_NONE);
if (table_stats)
{
dict_sys.freeze(SRW_LOCK_CALL);
table_stats= dict_acquire_mdl_shared<false>(table_stats, thd, &mdl_table);
dict_sys.unfreeze();
}
if (!table_stats || strcmp(table_stats->name.m_name, TABLE_STATS_NAME))
{
release_and_exit:
if (table_stats)
dict_table_close(table_stats, false, thd, mdl_table);
return DB_STATS_DO_NOT_EXIST;
}
dict_table_t *index_stats= dict_table_open_on_name(INDEX_STATS_NAME, false,
DICT_ERR_IGNORE_NONE);
if (index_stats)
{
dict_sys.freeze(SRW_LOCK_CALL);
index_stats= dict_acquire_mdl_shared<false>(index_stats, thd, &mdl_index);
dict_sys.unfreeze();
}
if (!index_stats)
goto release_and_exit;
if (strcmp(index_stats->name.m_name, INDEX_STATS_NAME))
{
dict_table_close(index_stats, false, thd, mdl_index);
goto release_and_exit;
}
trx_t *trx= trx_create();
trx->mysql_thd= thd;
trx_start_internal(trx);
dberr_t ret= trx->read_only
? DB_READ_ONLY
: lock_table_for_trx(table_stats, trx, LOCK_X);
if (ret == DB_SUCCESS)
ret= lock_table_for_trx(index_stats, trx, LOCK_X);
row_mysql_lock_data_dictionary(trx);
if (ret == DB_SUCCESS)
ret= dict_stats_save_index_stat(index, now, "n_page_split",
index->stat_defrag_n_page_split, nullptr,
"Number of new page splits on leaves"
" since last defragmentation.", trx);
if (ret == DB_SUCCESS)
ret= dict_stats_save_index_stat(index, now, "n_leaf_pages_defrag",
n_leaf_pages, nullptr,
"Number of leaf pages when"
" this stat is saved to disk", trx);
if (ret == DB_SUCCESS)
ret= dict_stats_save_index_stat(index, now, "n_leaf_pages_reserved",
n_leaf_reserved, nullptr,
"Number of pages reserved for"
" this index leaves"
" when this stat is saved to disk", trx);
if (ret == DB_SUCCESS)
trx->commit();
else
trx->rollback();
if (table_stats)
dict_table_close(table_stats, true, thd, mdl_table);
if (index_stats)
dict_table_close(index_stats, true, thd, mdl_index);
row_mysql_unlock_data_dictionary(trx);
trx->free();
return ret;
}

View File

@@ -2086,13 +2086,6 @@ dict_index_add_to_cache(
new_index->stat_index_size = 1; new_index->stat_index_size = 1;
new_index->stat_n_leaf_pages = 1; new_index->stat_n_leaf_pages = 1;
new_index->stat_defrag_n_pages_freed = 0;
new_index->stat_defrag_n_page_split = 0;
new_index->stat_defrag_sample_next_slot = 0;
memset(&new_index->stat_defrag_data_size_sample,
0x0, sizeof(ulint) * STAT_DEFRAG_DATA_SIZE_N_SAMPLE);
/* Add the new index as the last index for the table */ /* Add the new index as the last index for the table */
UT_LIST_ADD_LAST(new_index->table->indexes, new_index); UT_LIST_ADD_LAST(new_index->table->indexes, new_index);

View File

@@ -675,9 +675,6 @@ dict_stats_table_clone_create(
heap, heap,
idx->n_uniq * sizeof(idx->stat_n_non_null_key_vals[0])); idx->n_uniq * sizeof(idx->stat_n_non_null_key_vals[0]));
ut_d(idx->magic_n = DICT_INDEX_MAGIC_N); ut_d(idx->magic_n = DICT_INDEX_MAGIC_N);
idx->stat_defrag_n_page_split = 0;
idx->stat_defrag_n_pages_freed = 0;
} }
ut_d(t->magic_n = DICT_TABLE_MAGIC_N); ut_d(t->magic_n = DICT_TABLE_MAGIC_N);
@@ -701,13 +698,7 @@ dict_stats_table_clone_free(
/*********************************************************************//** /*********************************************************************//**
Write all zeros (or 1 where it makes sense) into an index Write all zeros (or 1 where it makes sense) into an index
statistics members. The resulting stats correspond to an empty index. */ statistics members. The resulting stats correspond to an empty index. */
static static void dict_stats_empty_index(dict_index_t *index)
void
dict_stats_empty_index(
/*===================*/
dict_index_t* index, /*!< in/out: index */
bool empty_defrag_stats)
/*!< in: whether to empty defrag stats */
{ {
ut_ad(!(index->type & DICT_FTS)); ut_ad(!(index->type & DICT_FTS));
ut_ad(index->table->stats_mutex_is_owner()); ut_ad(index->table->stats_mutex_is_owner());
@@ -722,23 +713,12 @@ dict_stats_empty_index(
index->stat_index_size = 1; index->stat_index_size = 1;
index->stat_n_leaf_pages = 1; index->stat_n_leaf_pages = 1;
if (empty_defrag_stats) {
dict_stats_empty_defrag_stats(index);
dict_stats_empty_defrag_summary(index);
}
} }
/*********************************************************************//** /*********************************************************************//**
Write all zeros (or 1 where it makes sense) into a table and its indexes' Write all zeros (or 1 where it makes sense) into a table and its indexes'
statistics members. The resulting stats correspond to an empty table. */ statistics members. The resulting stats correspond to an empty table. */
static static void dict_stats_empty_table(dict_table_t *table)
void
dict_stats_empty_table(
/*===================*/
dict_table_t* table, /*!< in/out: table */
bool empty_defrag_stats)
/*!< in: whether to empty defrag stats */
{ {
/* Initialize table/index level stats is now protected by /* Initialize table/index level stats is now protected by
table level lock_mutex.*/ table level lock_mutex.*/
@@ -762,7 +742,7 @@ dict_stats_empty_table(
continue; continue;
} }
dict_stats_empty_index(index, empty_defrag_stats); dict_stats_empty_index(index);
} }
table->stat_initialized = TRUE; table->stat_initialized = TRUE;
@@ -888,7 +868,7 @@ dict_stats_copy(
if (dst_idx->type & DICT_FTS) { if (dst_idx->type & DICT_FTS) {
continue; continue;
} }
dict_stats_empty_index(dst_idx, true); dict_stats_empty_index(dst_idx);
} else { } else {
continue; continue;
} }
@@ -906,7 +886,7 @@ dict_stats_copy(
} }
if (!INDEX_EQ(src_idx, dst_idx)) { if (!INDEX_EQ(src_idx, dst_idx)) {
dict_stats_empty_index(dst_idx, true); dict_stats_empty_index(dst_idx);
continue; continue;
} }
@@ -917,7 +897,7 @@ dict_stats_copy(
/* Since src is smaller some elements in dst /* Since src is smaller some elements in dst
will remain untouched by the following memmove(), will remain untouched by the following memmove(),
thus we init all of them here. */ thus we init all of them here. */
dict_stats_empty_index(dst_idx, true); dict_stats_empty_index(dst_idx);
} else { } else {
n_copy_el = dst_idx->n_uniq; n_copy_el = dst_idx->n_uniq;
} }
@@ -937,13 +917,6 @@ dict_stats_copy(
dst_idx->stat_index_size = src_idx->stat_index_size; dst_idx->stat_index_size = src_idx->stat_index_size;
dst_idx->stat_n_leaf_pages = src_idx->stat_n_leaf_pages; dst_idx->stat_n_leaf_pages = src_idx->stat_n_leaf_pages;
dst_idx->stat_defrag_modified_counter =
src_idx->stat_defrag_modified_counter;
dst_idx->stat_defrag_n_pages_freed =
src_idx->stat_defrag_n_pages_freed;
dst_idx->stat_defrag_n_page_split =
src_idx->stat_defrag_n_page_split;
} }
dst->stat_initialized = TRUE; dst->stat_initialized = TRUE;
@@ -966,9 +939,6 @@ dict_index_t::stat_n_sample_sizes[]
dict_index_t::stat_n_non_null_key_vals[] dict_index_t::stat_n_non_null_key_vals[]
dict_index_t::stat_index_size dict_index_t::stat_index_size
dict_index_t::stat_n_leaf_pages dict_index_t::stat_n_leaf_pages
dict_index_t::stat_defrag_modified_counter
dict_index_t::stat_defrag_n_pages_freed
dict_index_t::stat_defrag_n_page_split
The returned object should be freed with dict_stats_snapshot_free() The returned object should be freed with dict_stats_snapshot_free()
when no longer needed. when no longer needed.
@param[in] table table whose stats to copy @param[in] table table whose stats to copy
@@ -1426,8 +1396,6 @@ Calculates new estimates for index statistics. This function is
relatively quick and is used to calculate transient statistics that relatively quick and is used to calculate transient statistics that
are not saved on disk. This was the only way to calculate statistics are not saved on disk. This was the only way to calculate statistics
before the Persistent Statistics feature was introduced. before the Persistent Statistics feature was introduced.
This function doesn't update the defragmentation related stats.
Only persistent statistics supports defragmentation stats.
@return error code @return error code
@retval DB_SUCCESS_LOCKED_REC if the table under bulk insert operation */ @retval DB_SUCCESS_LOCKED_REC if the table under bulk insert operation */
static static
@@ -1448,7 +1416,7 @@ dict_stats_update_transient_for_index(
various means, also via secondary indexes. */ various means, also via secondary indexes. */
dummy_empty: dummy_empty:
index->table->stats_mutex_lock(); index->table->stats_mutex_lock();
dict_stats_empty_index(index, false); dict_stats_empty_index(index);
index->table->stats_mutex_unlock(); index->table->stats_mutex_unlock();
return err; return err;
} else if (dict_index_is_online_ddl(index) || !index->is_committed() } else if (dict_index_is_online_ddl(index) || !index->is_committed()
@@ -1545,7 +1513,7 @@ dict_stats_update_transient(
if (!table->space) { if (!table->space) {
/* Nothing to do. */ /* Nothing to do. */
empty_table: empty_table:
dict_stats_empty_table(table, true); dict_stats_empty_table(table);
return err; return err;
} else if (index == NULL) { } else if (index == NULL) {
/* Table definition is corrupt */ /* Table definition is corrupt */
@@ -1564,7 +1532,7 @@ empty_table:
|| !index->is_readable() || !index->is_readable()
|| err == DB_SUCCESS_LOCKED_REC) { || err == DB_SUCCESS_LOCKED_REC) {
index->table->stats_mutex_lock(); index->table->stats_mutex_lock();
dict_stats_empty_index(index, false); dict_stats_empty_index(index);
index->table->stats_mutex_unlock(); index->table->stats_mutex_unlock();
continue; continue;
} }
@@ -2972,19 +2940,19 @@ dict_stats_update_persistent(
|| (index->type | DICT_UNIQUE) != (DICT_CLUSTERED | DICT_UNIQUE)) { || (index->type | DICT_UNIQUE) != (DICT_CLUSTERED | DICT_UNIQUE)) {
/* Table definition is corrupt */ /* Table definition is corrupt */
dict_stats_empty_table(table, true); dict_stats_empty_table(table);
return(DB_CORRUPTION); return(DB_CORRUPTION);
} }
table->stats_mutex_lock(); table->stats_mutex_lock();
dict_stats_empty_index(index, false); dict_stats_empty_index(index);
table->stats_mutex_unlock(); table->stats_mutex_unlock();
index_stats_t stats = dict_stats_analyze_index(index); index_stats_t stats = dict_stats_analyze_index(index);
if (stats.is_bulk_operation()) { if (stats.is_bulk_operation()) {
dict_stats_empty_table(table, false); dict_stats_empty_table(table);
return DB_SUCCESS_LOCKED_REC; return DB_SUCCESS_LOCKED_REC;
} }
@@ -3015,7 +2983,7 @@ dict_stats_update_persistent(
continue; continue;
} }
dict_stats_empty_index(index, false); dict_stats_empty_index(index);
if (dict_stats_should_ignore_index(index)) { if (dict_stats_should_ignore_index(index)) {
continue; continue;
@@ -3027,7 +2995,7 @@ dict_stats_update_persistent(
if (stats.is_bulk_operation()) { if (stats.is_bulk_operation()) {
table->stats_mutex_unlock(); table->stats_mutex_unlock();
dict_stats_empty_table(table, false); dict_stats_empty_table(table);
return DB_SUCCESS_LOCKED_REC; return DB_SUCCESS_LOCKED_REC;
} }
@@ -3155,25 +3123,21 @@ dict_stats_save_index_stat(
/** Report an error if updating table statistics failed because /** Report an error if updating table statistics failed because
.ibd file is missing, table decryption failed or table is corrupted. .ibd file is missing, table decryption failed or table is corrupted.
@param[in,out] table Table @param[in,out] table Table
@param[in] defragment true if statistics is for defragment
@retval DB_DECRYPTION_FAILED if decryption of the table failed @retval DB_DECRYPTION_FAILED if decryption of the table failed
@retval DB_TABLESPACE_DELETED if .ibd file is missing @retval DB_TABLESPACE_DELETED if .ibd file is missing
@retval DB_CORRUPTION if table is marked as corrupted */ @retval DB_CORRUPTION if table is marked as corrupted */
dberr_t static dberr_t dict_stats_report_error(dict_table_t* table)
dict_stats_report_error(dict_table_t* table, bool defragment)
{ {
dberr_t err; dberr_t err;
const char* df = defragment ? " defragment" : "";
if (!table->space) { if (!table->space) {
ib::warn() << "Cannot save" << df << " statistics for table " ib::warn() << "Cannot save statistics for table "
<< table->name << table->name
<< " because the .ibd file is missing. " << " because the .ibd file is missing. "
<< TROUBLESHOOTING_MSG; << TROUBLESHOOTING_MSG;
err = DB_TABLESPACE_DELETED; err = DB_TABLESPACE_DELETED;
} else { } else {
ib::warn() << "Cannot save" << df << " statistics for table " ib::warn() << "Cannot save statistics for table "
<< table->name << table->name
<< " because file " << " because file "
<< table->space->chain.start->name << table->space->chain.start->name
@@ -3183,7 +3147,7 @@ dict_stats_report_error(dict_table_t* table, bool defragment)
err = table->corrupted ? DB_CORRUPTION : DB_DECRYPTION_FAILED; err = table->corrupted ? DB_CORRUPTION : DB_DECRYPTION_FAILED;
} }
dict_stats_empty_table(table, defragment); dict_stats_empty_table(table);
return err; return err;
} }
@@ -3668,16 +3632,6 @@ dict_stats_fetch_index_stats_step(
== 0) { == 0) {
index->stat_n_leaf_pages = (ulint) stat_value; index->stat_n_leaf_pages = (ulint) stat_value;
arg->stats_were_modified = true; arg->stats_were_modified = true;
} else if (stat_name_len == 12 /* strlen("n_page_split") */
&& strncasecmp("n_page_split", stat_name, stat_name_len)
== 0) {
index->stat_defrag_n_page_split = (ulint) stat_value;
arg->stats_were_modified = true;
} else if (stat_name_len == 13 /* strlen("n_pages_freed") */
&& strncasecmp("n_pages_freed", stat_name, stat_name_len)
== 0) {
index->stat_defrag_n_pages_freed = (ulint) stat_value;
arg->stats_were_modified = true;
} else if (stat_name_len > PFX_LEN /* e.g. stat_name=="n_diff_pfx01" */ } else if (stat_name_len > PFX_LEN /* e.g. stat_name=="n_diff_pfx01" */
&& strncasecmp(PFX, stat_name, PFX_LEN) == 0) { && strncasecmp(PFX, stat_name, PFX_LEN) == 0) {
@@ -3785,7 +3739,7 @@ dict_stats_fetch_from_ps(
the persistent storage contains incomplete stats (e.g. missing stats the persistent storage contains incomplete stats (e.g. missing stats
for some index) then we would end up with (partially) uninitialized for some index) then we would end up with (partially) uninitialized
stats. */ stats. */
dict_stats_empty_table(table, true); dict_stats_empty_table(table);
THD* thd = current_thd; THD* thd = current_thd;
MDL_ticket *mdl_table = nullptr, *mdl_index = nullptr; MDL_ticket *mdl_table = nullptr, *mdl_index = nullptr;
@@ -3918,22 +3872,6 @@ release_and_exit:
return(ret); return(ret);
} }
/*********************************************************************//**
Clear defragmentation stats modified counter for all indices in table. */
static
void
dict_stats_empty_defrag_modified_counter(
dict_table_t* table) /*!< in: table */
{
dict_index_t* index;
ut_a(table);
for (index = dict_table_get_first_index(table);
index != NULL;
index = dict_table_get_next_index(index)) {
index->stat_defrag_modified_counter = 0;
}
}
/*********************************************************************//** /*********************************************************************//**
Fetches or calculates new estimates for index statistics. */ Fetches or calculates new estimates for index statistics. */
void void
@@ -4010,13 +3948,13 @@ dict_stats_update(
/* If we have set a high innodb_force_recovery level, do /* If we have set a high innodb_force_recovery level, do
not calculate statistics, as a badly corrupted index can not calculate statistics, as a badly corrupted index can
cause a crash in it. */ cause a crash in it. */
dict_stats_empty_table(table, false); dict_stats_empty_table(table);
return(DB_SUCCESS); return(DB_SUCCESS);
} }
if (trx_id_t bulk_trx_id = table->bulk_trx_id) { if (trx_id_t bulk_trx_id = table->bulk_trx_id) {
if (trx_sys.find(nullptr, bulk_trx_id, false)) { if (trx_sys.find(nullptr, bulk_trx_id, false)) {
dict_stats_empty_table(table, false); dict_stats_empty_table(table);
return DB_SUCCESS_LOCKED_REC; return DB_SUCCESS_LOCKED_REC;
} }
} }
@@ -4078,8 +4016,7 @@ dict_stats_update(
goto transient; goto transient;
case DICT_STATS_EMPTY_TABLE: case DICT_STATS_EMPTY_TABLE:
dict_stats_empty_table(table);
dict_stats_empty_table(table, true);
/* If table is using persistent stats, /* If table is using persistent stats,
then save the stats on disk */ then save the stats on disk */
@@ -4140,7 +4077,6 @@ dict_stats_update(
t->stats_last_recalc = table->stats_last_recalc; t->stats_last_recalc = table->stats_last_recalc;
t->stat_modified_counter = 0; t->stat_modified_counter = 0;
dict_stats_empty_defrag_modified_counter(t);
switch (err) { switch (err) {
case DB_SUCCESS: case DB_SUCCESS:

View File

@@ -27,7 +27,6 @@ Created Apr 25, 2012 Vasil Dimov
#include "dict0dict.h" #include "dict0dict.h"
#include "dict0stats.h" #include "dict0stats.h"
#include "dict0stats_bg.h" #include "dict0stats_bg.h"
#include "dict0defrag_bg.h"
#include "row0mysql.h" #include "row0mysql.h"
#include "srv0start.h" #include "srv0start.h"
#include "fil0fil.h" #include "fil0fil.h"
@@ -77,7 +76,6 @@ static void dict_stats_recalc_pool_deinit()
ut_ad(!srv_read_only_mode); ut_ad(!srv_read_only_mode);
recalc_pool.clear(); recalc_pool.clear();
defrag_pool.clear();
/* /*
recalc_pool may still have its buffer allocated. It will free it when recalc_pool may still have its buffer allocated. It will free it when
its destructor is called. its destructor is called.
@@ -87,9 +85,7 @@ static void dict_stats_recalc_pool_deinit()
to empty_pool object, which will free it when leaving this function: to empty_pool object, which will free it when leaving this function:
*/ */
recalc_pool_t recalc_empty_pool; recalc_pool_t recalc_empty_pool;
defrag_pool_t defrag_empty_pool;
recalc_pool.swap(recalc_empty_pool); recalc_pool.swap(recalc_empty_pool);
defrag_pool.swap(defrag_empty_pool);
} }
/*****************************************************************//** /*****************************************************************//**
@@ -255,7 +251,6 @@ void dict_stats_init()
ut_ad(!srv_read_only_mode); ut_ad(!srv_read_only_mode);
mysql_mutex_init(recalc_pool_mutex_key, &recalc_pool_mutex, nullptr); mysql_mutex_init(recalc_pool_mutex_key, &recalc_pool_mutex, nullptr);
pthread_cond_init(&recalc_pool_cond, nullptr); pthread_cond_init(&recalc_pool_cond, nullptr);
dict_defrag_pool_init();
stats_initialised= true; stats_initialised= true;
} }
@@ -272,7 +267,6 @@ void dict_stats_deinit()
stats_initialised = false; stats_initialised = false;
dict_stats_recalc_pool_deinit(); dict_stats_recalc_pool_deinit();
dict_defrag_pool_deinit();
mysql_mutex_destroy(&recalc_pool_mutex); mysql_mutex_destroy(&recalc_pool_mutex);
pthread_cond_destroy(&recalc_pool_cond); pthread_cond_destroy(&recalc_pool_cond);
@@ -380,7 +374,6 @@ static void dict_stats_func(void*)
THD *thd= innobase_create_background_thd("InnoDB statistics"); THD *thd= innobase_create_background_thd("InnoDB statistics");
set_current_thd(thd); set_current_thd(thd);
while (dict_stats_process_entry_from_recalc_pool(thd)) {} while (dict_stats_process_entry_from_recalc_pool(thd)) {}
dict_defrag_process_entries_from_defrag_pool(thd);
set_current_thd(nullptr); set_current_thd(nullptr);
destroy_background_thd(thd); destroy_background_thd(thd);
} }

View File

@@ -66,8 +66,6 @@ before transaction commit and must be rolled back explicitly are as follows:
#include "dict0stats.h" #include "dict0stats.h"
#include "dict0stats_bg.h" #include "dict0stats_bg.h"
#include "dict0defrag_bg.h"
#include "btr0defragment.h"
#include "lock0lock.h" #include "lock0lock.h"
#include "que0que.h" #include "que0que.h"
@@ -263,9 +261,6 @@ void trx_t::commit(std::vector<pfs_os_file_t> &deleted)
{ {
dict_table_t *table= p.first; dict_table_t *table= p.first;
dict_stats_recalc_pool_del(table->id, true); dict_stats_recalc_pool_del(table->id, true);
dict_stats_defrag_pool_del(table, nullptr);
if (btr_defragment_active)
btr_defragment_remove_table(table);
const fil_space_t *space= table->space; const fil_space_t *space= table->space;
ut_ad(!p.second.is_aux_table() || purge_sys.must_wait_FTS()); ut_ad(!p.second.is_aux_table() || purge_sys.must_wait_FTS());
dict_sys.remove(table); dict_sys.remove(table);

View File

@@ -77,7 +77,6 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include "buf0lru.h" #include "buf0lru.h"
#include "dict0boot.h" #include "dict0boot.h"
#include "dict0load.h" #include "dict0load.h"
#include "btr0defragment.h"
#include "dict0crea.h" #include "dict0crea.h"
#include "dict0stats.h" #include "dict0stats.h"
#include "dict0stats_bg.h" #include "dict0stats_bg.h"
@@ -1000,12 +999,6 @@ static SHOW_VAR innodb_status_variables[]= {
{"have_snappy", &(provider_service_snappy->is_loaded), SHOW_BOOL}, {"have_snappy", &(provider_service_snappy->is_loaded), SHOW_BOOL},
{"have_punch_hole", &innodb_have_punch_hole, SHOW_BOOL}, {"have_punch_hole", &innodb_have_punch_hole, SHOW_BOOL},
/* Defragmentation */
{"defragment_compression_failures",
&export_vars.innodb_defragment_compression_failures, SHOW_SIZE_T},
{"defragment_failures", &export_vars.innodb_defragment_failures,SHOW_SIZE_T},
{"defragment_count", &export_vars.innodb_defragment_count, SHOW_SIZE_T},
{"instant_alter_column", {"instant_alter_column",
&export_vars.innodb_instant_alter_column, SHOW_ULONG}, &export_vars.innodb_instant_alter_column, SHOW_ULONG},
@@ -14966,58 +14959,6 @@ ha_innobase::analyze(THD*, HA_CHECK_OPT*)
return(HA_ADMIN_OK); return(HA_ADMIN_OK);
} }
/*****************************************************************//**
Defragment table.
@return error number */
inline int ha_innobase::defragment_table()
{
for (dict_index_t *index= dict_table_get_first_index(m_prebuilt->table);
index; index= dict_table_get_next_index(index))
{
if (!index->is_btree())
continue;
if (btr_defragment_find_index(index))
{
// We borrow this error code. When the same index is already in
// the defragmentation queue, issuing another defragmentation
// only introduces overhead. We return an error here to let the
// user know this is not necessary. Note that this will fail a
// query that's trying to defragment a full table if one of the
// indicies in that table is already in defragmentation. We
// choose this behavior so user is aware of this rather than
// silently defragment other indicies of that table.
return ER_SP_ALREADY_EXISTS;
}
btr_pcur_t pcur;
mtr_t mtr;
mtr.start();
if (dberr_t err= pcur.open_leaf(true, index, BTR_SEARCH_LEAF, &mtr))
{
mtr.commit();
return convert_error_code_to_mysql(err, 0, m_user_thd);
}
else if (btr_pcur_get_block(&pcur)->page.id().page_no() == index->page)
{
mtr.commit();
continue;
}
btr_pcur_move_to_next(&pcur, &mtr);
btr_pcur_store_position(&pcur, &mtr);
mtr.commit();
ut_ad(pcur.index() == index);
const bool interrupted= btr_defragment_add_index(&pcur, m_user_thd);
ut_free(pcur.old_rec_buf);
if (interrupted)
return ER_QUERY_INTERRUPTED;
}
return 0;
}
/**********************************************************************//** /**********************************************************************//**
This is mapped to "ALTER TABLE tablename ENGINE=InnoDB", which rebuilds This is mapped to "ALTER TABLE tablename ENGINE=InnoDB", which rebuilds
the table in MySQL. */ the table in MySQL. */
@@ -15039,25 +14980,6 @@ ha_innobase::optimize(
calls to OPTIMIZE, which is undesirable. */ calls to OPTIMIZE, which is undesirable. */
bool try_alter = true; bool try_alter = true;
if (!m_prebuilt->table->is_temporary()
&& m_prebuilt->table->is_readable()
&& srv_defragment) {
int err = defragment_table();
if (err == 0) {
try_alter = false;
} else {
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
uint(err),
"InnoDB: Cannot defragment table %s: returned error code %d\n",
m_prebuilt->table->name.m_name, err);
if(err == ER_SP_ALREADY_EXISTS) {
try_alter = false;
}
}
}
if (innodb_optimize_fulltext_only) { if (innodb_optimize_fulltext_only) {
if (m_prebuilt->table->fts && m_prebuilt->table->fts->cache if (m_prebuilt->table->fts && m_prebuilt->table->fts->cache
&& m_prebuilt->table->space) { && m_prebuilt->table->space) {
@@ -18057,15 +17979,6 @@ innodb_reset_all_monitor_update(
TRUE); TRUE);
} }
static
void
innodb_defragment_frequency_update(THD*, st_mysql_sys_var*, void*,
const void* save)
{
srv_defragment_frequency = (*static_cast<const uint*>(save));
srv_defragment_interval = 1000000000ULL / srv_defragment_frequency;
}
static inline char *my_strtok_r(char *str, const char *delim, char **saveptr) static inline char *my_strtok_r(char *str, const char *delim, char **saveptr)
{ {
#if defined _WIN32 #if defined _WIN32
@@ -19091,60 +19004,6 @@ static MYSQL_SYSVAR_BOOL(buffer_pool_load_at_startup, srv_buffer_pool_load_at_st
"Load the buffer pool from a file named @@innodb_buffer_pool_filename", "Load the buffer pool from a file named @@innodb_buffer_pool_filename",
NULL, NULL, TRUE); NULL, NULL, TRUE);
static MYSQL_SYSVAR_BOOL(defragment, srv_defragment,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_DEPRECATED,
"Enable/disable InnoDB defragmentation (default FALSE). When set to FALSE, all existing "
"defragmentation will be paused. And new defragmentation command will fail."
"Paused defragmentation commands will resume when this variable is set to "
"true again.",
NULL, NULL, FALSE);
static MYSQL_SYSVAR_UINT(defragment_n_pages, srv_defragment_n_pages,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_DEPRECATED,
"Number of pages considered at once when merging multiple pages to "
"defragment",
NULL, NULL, 7, 2, 32, 0);
static MYSQL_SYSVAR_UINT(defragment_stats_accuracy,
srv_defragment_stats_accuracy,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_DEPRECATED,
"How many defragment stats changes there are before the stats "
"are written to persistent storage. Set to 0 meaning disable "
"defragment stats tracking.",
NULL, NULL, 0, 0, ~0U, 0);
static MYSQL_SYSVAR_UINT(defragment_fill_factor_n_recs,
srv_defragment_fill_factor_n_recs,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_DEPRECATED,
"How many records of space defragmentation should leave on the page. "
"This variable, together with innodb_defragment_fill_factor, is introduced "
"so defragmentation won't pack the page too full and cause page split on "
"the next insert on every page. The variable indicating more defragmentation"
" gain is the one effective.",
NULL, NULL, 20, 1, 100, 0);
static MYSQL_SYSVAR_DOUBLE(defragment_fill_factor, srv_defragment_fill_factor,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_DEPRECATED,
"A number between [0.7, 1] that tells defragmentation how full it should "
"fill a page. Default is 0.9. Number below 0.7 won't make much sense."
"This variable, together with innodb_defragment_fill_factor_n_recs, is "
"introduced so defragmentation won't pack the page too full and cause "
"page split on the next insert on every page. The variable indicating more "
"defragmentation gain is the one effective.",
NULL, NULL, 0.9, 0.7, 1, 0);
static MYSQL_SYSVAR_UINT(defragment_frequency, srv_defragment_frequency,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_DEPRECATED,
"Do not defragment a single index more than this number of time per second."
"This controls the number of time defragmentation thread can request X_LOCK "
"on an index. Defragmentation thread will check whether "
"1/defragment_frequency (s) has passed since it worked on this index last "
"time, and put the index back to the queue if not enough time has passed. "
"The actual frequency can only be lower than this given number.",
NULL, innodb_defragment_frequency_update,
SRV_DEFRAGMENT_FREQUENCY_DEFAULT, 1, 1000, 0);
static MYSQL_SYSVAR_ULONG(lru_scan_depth, srv_LRU_scan_depth, static MYSQL_SYSVAR_ULONG(lru_scan_depth, srv_LRU_scan_depth,
PLUGIN_VAR_RQCMDARG, PLUGIN_VAR_RQCMDARG,
"How deep to scan LRU to keep it clean", "How deep to scan LRU to keep it clean",
@@ -19719,12 +19578,6 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(buffer_pool_load_pages_abort), MYSQL_SYSVAR(buffer_pool_load_pages_abort),
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
MYSQL_SYSVAR(buffer_pool_load_at_startup), MYSQL_SYSVAR(buffer_pool_load_at_startup),
MYSQL_SYSVAR(defragment),
MYSQL_SYSVAR(defragment_n_pages),
MYSQL_SYSVAR(defragment_stats_accuracy),
MYSQL_SYSVAR(defragment_fill_factor),
MYSQL_SYSVAR(defragment_fill_factor_n_recs),
MYSQL_SYSVAR(defragment_frequency),
MYSQL_SYSVAR(lru_scan_depth), MYSQL_SYSVAR(lru_scan_depth),
MYSQL_SYSVAR(lru_flush_size), MYSQL_SYSVAR(lru_flush_size),
MYSQL_SYSVAR(flush_neighbors), MYSQL_SYSVAR(flush_neighbors),

View File

@@ -207,7 +207,6 @@ public:
int delete_table(const char *name) override; int delete_table(const char *name) override;
int rename_table(const char* from, const char* to) override; int rename_table(const char* from, const char* to) override;
inline int defragment_table();
int check(THD* thd, HA_CHECK_OPT* check_opt) override; int check(THD* thd, HA_CHECK_OPT* check_opt) override;
inline void reload_statistics(); inline void reload_statistics();

View File

@@ -1,65 +0,0 @@
/*****************************************************************************
Copyright (C) 2013, 2014 Facebook, Inc. All Rights Reserved.
Copyright (C) 2014, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
#ifndef btr0defragment_h
#define btr0defragment_h
#include "btr0pcur.h"
/* Max number of pages to consider at once during defragmentation. */
#define BTR_DEFRAGMENT_MAX_N_PAGES 32
/** stats in btr_defragment */
extern Atomic_counter<ulint> btr_defragment_compression_failures;
extern Atomic_counter<ulint> btr_defragment_failures;
extern Atomic_counter<ulint> btr_defragment_count;
/******************************************************************//**
Initialize defragmentation. */
void
btr_defragment_init(void);
/******************************************************************//**
Shutdown defragmentation. */
void
btr_defragment_shutdown();
/******************************************************************//**
Check whether the given index is in btr_defragment_wq. */
bool
btr_defragment_find_index(
dict_index_t* index); /*!< Index to find. */
/** Defragment an index.
@param pcur persistent cursor
@param thd current session, for checking thd_killed()
@return whether the operation was interrupted */
bool btr_defragment_add_index(btr_pcur_t *pcur, THD *thd);
/******************************************************************//**
When table is dropped, this function is called to mark a table as removed in
btr_efragment_wq. The difference between this function and the remove_index
function is this will not NULL the event. */
void
btr_defragment_remove_table(
dict_table_t* table); /*!< Index to be removed. */
/*********************************************************************//**
Check whether we should save defragmentation statistics to persistent storage.*/
void btr_defragment_save_defrag_stats_if_needed(dict_index_t *index);
/* Stop defragmentation.*/
void btr_defragment_end();
extern bool btr_defragment_active;
#endif

View File

@@ -1,101 +0,0 @@
/*****************************************************************************
Copyright (c) 2016, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/**************************************************//**
@file include/dict0defrag_bg.h
Code used for background table and index
defragmentation
Created 25/08/2016 Jan Lindström
*******************************************************/
#ifndef dict0defrag_bg_h
#define dict0defrag_bg_h
#include "dict0types.h"
/** Indices whose defrag stats need to be saved to persistent storage.*/
struct defrag_pool_item_t {
table_id_t table_id;
index_id_t index_id;
};
/** Allocator type, used by std::vector */
typedef ut_allocator<defrag_pool_item_t>
defrag_pool_allocator_t;
/** The multitude of tables to be defragmented- an STL vector */
typedef std::vector<defrag_pool_item_t, defrag_pool_allocator_t>
defrag_pool_t;
/** Pool where we store information on which tables are to be processed
by background defragmentation. */
extern defrag_pool_t defrag_pool;
/*****************************************************************//**
Initialize the defrag pool, called once during thread initialization. */
void
dict_defrag_pool_init(void);
/*========================*/
/*****************************************************************//**
Free the resources occupied by the defrag pool, called once during
thread de-initialization. */
void
dict_defrag_pool_deinit(void);
/*==========================*/
/*****************************************************************//**
Add an index in a table to the defrag pool, which is processed by the
background stats gathering thread. Only the table id and index id are
added to the list, so the table can be closed after being enqueued and
it will be opened when needed. If the table or index does not exist later
(has been DROPped), then it will be removed from the pool and skipped. */
void
dict_stats_defrag_pool_add(
/*=======================*/
const dict_index_t* index); /*!< in: table to add */
/*****************************************************************//**
Delete a given index from the auto defrag pool. */
void
dict_stats_defrag_pool_del(
/*=======================*/
const dict_table_t* table, /*!<in: if given, remove
all entries for the table */
const dict_index_t* index); /*!< in: index to remove */
/**
Get the first index that has been added for updating persistent defrag
stats and eventually save its stats. */
void dict_defrag_process_entries_from_defrag_pool(THD *thd);
/*********************************************************************//**
Save defragmentation result.
@return DB_SUCCESS or error code */
dberr_t dict_stats_save_defrag_summary(dict_index_t *index, THD *thd)
MY_ATTRIBUTE((nonnull, warn_unused_result));
/*********************************************************************//**
Save defragmentation stats for a given index.
@return DB_SUCCESS or error code */
dberr_t
dict_stats_save_defrag_stats(
/*============================*/
dict_index_t* index); /*!< in: index */
#endif /* dict0defrag_bg_h */

View File

@@ -945,10 +945,6 @@ struct zip_pad_info_t {
rounds */ rounds */
}; };
/** Number of samples of data size kept when page compression fails for
a certain index.*/
#define STAT_DEFRAG_DATA_SIZE_N_SAMPLE 10
/** "GEN_CLUST_INDEX" is the name reserved for InnoDB default /** "GEN_CLUST_INDEX" is the name reserved for InnoDB default
system clustered index when there is no primary key. */ system clustered index when there is no primary key. */
const char innobase_index_reserve_name[] = "GEN_CLUST_INDEX"; const char innobase_index_reserve_name[] = "GEN_CLUST_INDEX";
@@ -1115,23 +1111,6 @@ struct dict_index_t {
/*!< has persistent statistics error printed /*!< has persistent statistics error printed
for this index ? */ for this index ? */
/* @} */ /* @} */
/** Statistics for defragmentation, these numbers are estimations and
could be very inaccurate at certain times, e.g. right after restart,
during defragmentation, etc. */
/* @{ */
ulint stat_defrag_modified_counter;
ulint stat_defrag_n_pages_freed;
/* number of pages freed by defragmentation. */
ulint stat_defrag_n_page_split;
/* number of page splits since last full index
defragmentation. */
ulint stat_defrag_data_size_sample[STAT_DEFRAG_DATA_SIZE_N_SAMPLE];
/* data size when compression failure happened
the most recent 10 times. */
ulint stat_defrag_sample_next_slot;
/* in which slot the next sample should be
saved. */
/* @} */
private: private:
/** R-tree split sequence number */ /** R-tree split sequence number */
Atomic_relaxed<node_seq_t> rtr_ssn; Atomic_relaxed<node_seq_t> rtr_ssn;
@@ -2623,19 +2602,6 @@ dict_col_get_spatial_status(
return(spatial_status); return(spatial_status);
} }
/** Clear defragmentation summary. */
inline void dict_stats_empty_defrag_summary(dict_index_t* index)
{
index->stat_defrag_n_pages_freed = 0;
}
/** Clear defragmentation related index stats. */
inline void dict_stats_empty_defrag_stats(dict_index_t* index)
{
index->stat_defrag_modified_counter = 0;
index->stat_defrag_n_page_split = 0;
}
#include "dict0mem.inl" #include "dict0mem.inl"
#endif /* dict0mem_h */ #endif /* dict0mem_h */

View File

@@ -218,17 +218,6 @@ dict_stats_save_index_stat(
trx_t* trx) trx_t* trx)
MY_ATTRIBUTE((nonnull(1, 3, 6, 7))); MY_ATTRIBUTE((nonnull(1, 3, 6, 7)));
/** Report an error if updating table statistics failed because
.ibd file is missing, table decryption failed or table is corrupted.
@param[in,out] table Table
@param[in] defragment true if statistics is for defragment
@retval DB_DECRYPTION_FAILED if decryption of the table failed
@retval DB_TABLESPACE_DELETED if .ibd file is missing
@retval DB_CORRUPTION if table is marked as corrupted */
dberr_t
dict_stats_report_error(dict_table_t* table, bool defragment = false)
MY_ATTRIBUTE((nonnull, warn_unused_result));
#include "dict0stats.inl" #include "dict0stats.inl"
#ifdef UNIV_ENABLE_UNIT_TEST_DICT_STATS #ifdef UNIV_ENABLE_UNIT_TEST_DICT_STATS

View File

@@ -185,13 +185,6 @@ lock_update_split_left(
void lock_update_merge_left(const buf_block_t& left, const rec_t *orig_pred, void lock_update_merge_left(const buf_block_t& left, const rec_t *orig_pred,
const page_id_t right); const page_id_t right);
/** Update the locks when a page is split and merged to two pages,
in defragmentation. */
void lock_update_split_and_merge(
const buf_block_t* left_block, /*!< in: left page to which merged */
const rec_t* orig_pred, /*!< in: original predecessor of
supremum on the left page before merge*/
const buf_block_t* right_block);/*!< in: right page from which merged */
/*************************************************************//** /*************************************************************//**
Resets the original locks on heir and replaces them with gap type locks Resets the original locks on heir and replaces them with gap type locks
inherited from rec. */ inherited from rec. */

View File

@@ -265,16 +265,6 @@ extern ulong srv_read_ahead_threshold;
extern uint srv_n_read_io_threads; extern uint srv_n_read_io_threads;
extern uint srv_n_write_io_threads; extern uint srv_n_write_io_threads;
/* Defragmentation, Origianlly facebook default value is 100, but it's too high */
#define SRV_DEFRAGMENT_FREQUENCY_DEFAULT 40
extern my_bool srv_defragment;
extern uint srv_defragment_n_pages;
extern uint srv_defragment_stats_accuracy;
extern uint srv_defragment_fill_factor_n_recs;
extern double srv_defragment_fill_factor;
extern uint srv_defragment_frequency;
extern ulonglong srv_defragment_interval;
/* Number of IO operations per second the server can do */ /* Number of IO operations per second the server can do */
extern ulong srv_io_capacity; extern ulong srv_io_capacity;
@@ -658,14 +648,6 @@ struct export_var_t{
/** Number of undo tablespace truncation operations */ /** Number of undo tablespace truncation operations */
ulong innodb_undo_truncations; ulong innodb_undo_truncations;
ulint innodb_defragment_compression_failures; /*!< Number of
defragment re-compression
failures */
ulint innodb_defragment_failures; /*!< Number of defragment
failures*/
ulint innodb_defragment_count; /*!< Number of defragment
operations*/
/** Number of instant ALTER TABLE operations that affect columns */ /** Number of instant ALTER TABLE operations that affect columns */
ulong innodb_instant_alter_column; ulong innodb_instant_alter_column;

View File

@@ -589,8 +589,7 @@ public:
So we take more expensive approach: get trx through current_thd()->ha_data. So we take more expensive approach: get trx through current_thd()->ha_data.
Some threads don't have trx attached to THD, and at least server Some threads don't have trx attached to THD, and at least server
initialisation thread, fts_optimize_thread, srv_master_thread, initialisation thread doesn't even
dict_stats_thread, srv_monitor_thread, btr_defragment_thread don't even
have THD at all. For such cases we allocate pins only for duration of have THD at all. For such cases we allocate pins only for duration of
search and free them immediately. search and free them immediately.

View File

@@ -6483,45 +6483,3 @@ void lock_sys_t::deadlock_check()
if (acquired) if (acquired)
wr_unlock(); wr_unlock();
} }
/** Update the locks when a page is split and merged to two pages,
in defragmentation. */
void lock_update_split_and_merge(
const buf_block_t* left_block, /*!< in: left page to which merged */
const rec_t* orig_pred, /*!< in: original predecessor of
supremum on the left page before merge*/
const buf_block_t* right_block) /*!< in: right page from which merged */
{
ut_ad(page_is_leaf(left_block->page.frame));
ut_ad(page_is_leaf(right_block->page.frame));
ut_ad(page_align(orig_pred) == left_block->page.frame);
const page_id_t l{left_block->page.id()};
const page_id_t r{right_block->page.id()};
const rec_t *left_next_rec= page_rec_get_next_const(orig_pred);
if (UNIV_UNLIKELY(!left_next_rec))
{
ut_ad("corrupted page" == 0);
return;
}
ut_ad(!page_rec_is_metadata(left_next_rec));
/* This would likely be too large for a memory transaction. */
LockMultiGuard g{lock_sys.rec_hash, l, r};
/* Inherit the locks on the supremum of the left page to the
first record which was moved from the right page */
lock_rec_inherit_to_gap(g.cell1(), l, g.cell1(), l, left_block->page.frame,
page_rec_get_heap_no(left_next_rec),
PAGE_HEAP_NO_SUPREMUM);
/* Reset the locks on the supremum of the left page,
releasing waiting transactions */
lock_rec_reset_and_release_wait(g.cell1(), l, PAGE_HEAP_NO_SUPREMUM);
/* Inherit the locks to the supremum of the left page from the
successor of the infimum on the right page */
lock_rec_inherit_to_gap(g.cell1(), l, g.cell2(), r, left_block->page.frame,
PAGE_HEAP_NO_SUPREMUM,
lock_get_min_heap_no(right_block));
}

View File

@@ -43,7 +43,6 @@ Created 12/9/1995 Heikki Tuuri
#include "log0recv.h" #include "log0recv.h"
#include "fil0fil.h" #include "fil0fil.h"
#include "dict0stats_bg.h" #include "dict0stats_bg.h"
#include "btr0defragment.h"
#include "srv0srv.h" #include "srv0srv.h"
#include "srv0start.h" #include "srv0start.h"
#include "trx0sys.h" #include "trx0sys.h"
@@ -1080,7 +1079,6 @@ ATTRIBUTE_COLD void logs_empty_and_mark_files_at_shutdown()
/* Wait for the end of the buffer resize task.*/ /* Wait for the end of the buffer resize task.*/
buf_resize_shutdown(); buf_resize_shutdown();
dict_stats_shutdown(); dict_stats_shutdown();
btr_defragment_shutdown();
srv_shutdown_state = SRV_SHUTDOWN_CLEANUP; srv_shutdown_state = SRV_SHUTDOWN_CLEANUP;

View File

@@ -60,7 +60,6 @@ Created 10/8/1995 Heikki Tuuri
#include "srv0start.h" #include "srv0start.h"
#include "trx0i_s.h" #include "trx0i_s.h"
#include "trx0purge.h" #include "trx0purge.h"
#include "btr0defragment.h"
#include "ut0mem.h" #include "ut0mem.h"
#include "fil0fil.h" #include "fil0fil.h"
#include "fil0crypt.h" #include "fil0crypt.h"
@@ -318,21 +317,6 @@ uint srv_spin_wait_delay;
/** Number of initialized rollback segments for persistent undo log */ /** Number of initialized rollback segments for persistent undo log */
ulong srv_available_undo_logs; ulong srv_available_undo_logs;
/* Defragmentation */
my_bool srv_defragment;
/** innodb_defragment_n_pages */
uint srv_defragment_n_pages;
uint srv_defragment_stats_accuracy;
/** innodb_defragment_fill_factor_n_recs */
uint srv_defragment_fill_factor_n_recs;
/** innodb_defragment_fill_factor */
double srv_defragment_fill_factor;
/** innodb_defragment_frequency */
uint srv_defragment_frequency;
/** derived from innodb_defragment_frequency;
@see innodb_defragment_frequency_update() */
ulonglong srv_defragment_interval;
/** Current mode of operation */ /** Current mode of operation */
enum srv_operation_mode srv_operation; enum srv_operation_mode srv_operation;
@@ -994,11 +978,6 @@ srv_export_innodb_status(void)
export_vars.innodb_n_temp_blocks_decrypted = export_vars.innodb_n_temp_blocks_decrypted =
srv_stats.n_temp_blocks_decrypted; srv_stats.n_temp_blocks_decrypted;
export_vars.innodb_defragment_compression_failures =
btr_defragment_compression_failures;
export_vars.innodb_defragment_failures = btr_defragment_failures;
export_vars.innodb_defragment_count = btr_defragment_count;
export_vars.innodb_onlineddl_rowlog_rows = onlineddl_rowlog_rows; export_vars.innodb_onlineddl_rowlog_rows = onlineddl_rowlog_rows;
export_vars.innodb_onlineddl_rowlog_pct_used = onlineddl_rowlog_pct_used; export_vars.innodb_onlineddl_rowlog_pct_used = onlineddl_rowlog_pct_used;
export_vars.innodb_onlineddl_pct_progress = onlineddl_pct_progress; export_vars.innodb_onlineddl_pct_progress = onlineddl_pct_progress;

View File

@@ -71,7 +71,6 @@ Created 2/16/1996 Heikki Tuuri
#include "rem0rec.h" #include "rem0rec.h"
#include "srv0start.h" #include "srv0start.h"
#include "srv0srv.h" #include "srv0srv.h"
#include "btr0defragment.h"
#include "mysql/service_wsrep.h" /* wsrep_recovery */ #include "mysql/service_wsrep.h" /* wsrep_recovery */
#include "trx0rseg.h" #include "trx0rseg.h"
#include "buf0flu.h" #include "buf0flu.h"
@@ -1902,9 +1901,6 @@ skip_monitors:
fil_crypt_threads_cond. */ fil_crypt_threads_cond. */
fil_crypt_threads_init(); fil_crypt_threads_init();
/* Initialize online defragmentation. */
btr_defragment_init();
srv_started_redo = true; srv_started_redo = true;
} }
@@ -2000,7 +1996,6 @@ void innodb_shutdown()
fts_optimize_shutdown(); dict_stats_shutdown(); */ fts_optimize_shutdown(); dict_stats_shutdown(); */
fil_crypt_threads_cleanup(); fil_crypt_threads_cleanup();
btr_defragment_shutdown();
} }
/* This must be disabled before closing the buffer pool /* This must be disabled before closing the buffer pool