1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-01 03:47:19 +03:00

MDEV-29693 ANALYZE TABLE still flushes table definition cache when engine-independent statistics is used

This commits enables reloading of engine-independent statistics
without flushing the table from table definition cache.

This is achieved by allowing multiple version of the
TABLE_STATISTICS_CB object and having independent pointers to it in
TABLE and TABLE_SHARE.  The TABLE_STATISTICS_CB object have reference
pointers and are freed when no one is pointing to it anymore.

TABLE's TABLE_STATISTICS_CB pointer is updated to use the
TABLE_SHARE's pointer when read_statistics_for_tables() is called at
the beginning of a query.

Main changes:
- read_statistics_for_table() will allocate an new TABLE_STATISTICS_CB
  object.
- All get_stat_values() functions has a new parameter that tells
  where collected data should be stored. get_stat_values() are not
  using the table_field object anymore to store data.
- All get_stat_values() functions returns 1 if they found any
  data in the statistics tables.

Other things:
- Fixed INSERT DELAYED to not read statistics tables.
- Removed Statistics_state from TABLE_STATISTICS_CB as this is not
  needed anymore as wer are not changing TABLE_SHARE->stats_cb while
  calculating or loading statistics.
- Store values used with store_from_statistical_minmax_field() in
  TABLE_STATISTICS_CB::mem_root. This allowed me to remove the function
  delete_stat_values_for_table_share().
  - Field_blob::store_from_statistical_minmax_field() is implemented
    but is not normally used as we do not yet support EIS statistics
    for blobs. For example Field_blob::update_min() and
    Field_blob::update_max() are not implemented.
    Note that the function can be called if there is an concurrent
    "ALTER TABLE MODIFY field BLOB" running because of a bug in
    ALTER TABLE where it deletes entries from column_stats
    before it has an exclusive lock on the table.
- Use result of field->val_str(&val) as a pointer to the result
  instead of val (safetly fix).
- Allocate memory for collected statistics in THD::mem_root, not in
  in TABLE::mem_root. This could cause the TABLE object to grow if a
  ANALYZE TABLE was run many times on the same table.
  This was done in allocate_statistics_for_table(),
  create_min_max_statistical_fields_for_table() and
  create_min_max_statistical_fields_for_table_share().
- Store in TABLE_STATISTICS_CB::stats_available which statistics was
  found in the statistics tables.
- Removed index_table from class Index_prefix_calc as it was not used.
- Added TABLE_SHARE::LOCK_statistics to ensure we don't load EITS
  in parallel. First thread will load it, others will reuse the
  loaded data.
- Eliminate read_histograms_for_table(). The loading happens within
  read_statistics_for_tables() if histograms are needed.
  One downside is that if we have read statistics without histograms
  before and someone requires histograms, we have to read all statistics
  again (once) from the statistics tables.
  A smaller downside is the need to call alloc_root() for each
  individual histogram. Before we could allocate all the space for
  histograms with a single alloc_root.
- Fixed bug in MyISAM and Aria where they did not properly notice
  that table had changed after analyze table. This was not a problem
  before this patch as then the MyISAM and Aria tables where flushed
  as part of ANALYZE table which did hide this issue.
- Fixed a bug in ANALYZE table where table->records could be seen as 0
  in collect_statistics_for_table(). The effect of this unlikely bug
  was that a full table scan could be done even if
  analyze_sample_percentage was not set to 1.
- Changed multiple mallocs in a row to use multi_alloc_root().
- Added a mutex protection in update_statistics_for_table() to ensure
  that several tables are not updating the statistics at the same time.

Some of the changes in sql_statistics.cc are based on a patch from
Oleg Smirnov <olernov@gmail.com>

Co-authored-by: Oleg Smirnov <olernov@gmail.com>
Co-authored-by: Vicentiu Ciorbaru <cvicentiu@gmail.com>
Reviewer: Sergei Petrunia <sergey@mariadb.com>
This commit is contained in:
Monty
2023-08-05 01:08:05 +03:00
parent 88dd50b80a
commit a6bf4b5807
26 changed files with 1030 additions and 568 deletions

View File

@ -71,3 +71,31 @@ optimize table t1 extended;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'extended' at line 1
drop table t1;
End of 5.0 tests
#
# Test analyze of text column (not yet supported)
#
set optimizer_use_condition_selectivity=4;
set histogram_type='single_prec_hb';
set histogram_size=255;
create table t1 (a int not null, t tinytext, tx text);
insert into t1 select seq+1, repeat('X',seq*5), repeat('X',seq*10) from seq_0_to_50;
insert into t1 select seq+100, repeat('X',5), "" from seq_1_to_10;
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze Warning Engine-independent statistics are not collected for column 't'
test.t1 analyze Warning Engine-independent statistics are not collected for column 'tx'
test.t1 analyze status OK
explain select count(*) from t1 where t='XXXXXX';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 61 Using where
select column_name, min_value, max_value, hist_size from mysql.column_stats where table_name='t1';
column_name min_value max_value hist_size
a 1 110 255
drop table t1;
set use_stat_tables=default;
set histogram_type=default;
set histogram_size=default;
#
# End of 10.6 tests
#

View File

@ -1,3 +1,5 @@
--source include/have_sequence.inc
#
# Bug #10901 Analyze Table on new table destroys table
# This is minimal test case to get error
@ -87,3 +89,28 @@ optimize table t1 extended;
drop table t1;
--echo End of 5.0 tests
--echo #
--echo # Test analyze of text column (not yet supported)
--echo #
set optimizer_use_condition_selectivity=4;
set histogram_type='single_prec_hb';
set histogram_size=255;
create table t1 (a int not null, t tinytext, tx text);
insert into t1 select seq+1, repeat('X',seq*5), repeat('X',seq*10) from seq_0_to_50;
insert into t1 select seq+100, repeat('X',5), "" from seq_1_to_10;
analyze table t1;
explain select count(*) from t1 where t='XXXXXX';
select column_name, min_value, max_value, hist_size from mysql.column_stats where table_name='t1';
drop table t1;
set use_stat_tables=default;
set histogram_type=default;
set histogram_size=default;
--echo #
--echo # End of 10.6 tests
--echo #

View File

@ -1280,7 +1280,7 @@ pk v pk v
SHOW STATUS LIKE 'Handler_read_%';
Variable_name Value
Handler_read_first 0
Handler_read_key 14
Handler_read_key 1
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0

View File

@ -1804,7 +1804,7 @@ sum(t3.b)
show status like "handler_read%";
Variable_name Value
Handler_read_first 0
Handler_read_key 13
Handler_read_key 4
Handler_read_last 0
Handler_read_next 5
Handler_read_prev 0
@ -1819,7 +1819,7 @@ sum(t3.b)
show status like "handler_read%";
Variable_name Value
Handler_read_first 0
Handler_read_key 7
Handler_read_key 4
Handler_read_last 0
Handler_read_next 5
Handler_read_prev 0

View File

@ -1811,7 +1811,7 @@ sum(t3.b)
show status like "handler_read%";
Variable_name Value
Handler_read_first 0
Handler_read_key 13
Handler_read_key 4
Handler_read_last 0
Handler_read_next 5
Handler_read_prev 0
@ -1826,7 +1826,7 @@ sum(t3.b)
show status like "handler_read%";
Variable_name Value
Handler_read_first 0
Handler_read_key 7
Handler_read_key 4
Handler_read_last 0
Handler_read_next 5
Handler_read_prev 0

View File

@ -350,7 +350,6 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
VARIABLE_NAME VARIABLE_VALUE
HANDLER_COMMIT 1
HANDLER_READ_FIRST 1
HANDLER_READ_KEY 8
HANDLER_TMP_WRITE 24
# Should be 1 commit
# 4 locks (1 ha_partition + 1 ha_innobase) x 2 (lock/unlock)
@ -777,7 +776,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
VARIABLE_NAME VARIABLE_VALUE
HANDLER_COMMIT 1
HANDLER_READ_KEY 8
HANDLER_READ_KEY 6
HANDLER_READ_RND_NEXT 2
HANDLER_TMP_WRITE 24
HANDLER_UPDATE 2

View File

@ -903,4 +903,82 @@ id select_type table type possible_keys key key_len ref rows r_rows filtered r_f
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 2.78 10.00 Using where
drop table t1;
set @@global.histogram_size=@save_histogram_size;
#
# End of 10.4 tests
#
#
# MDEV-29693 ANALYZE TABLE still flushes table definition cache
# when engine-independent statistics is used
#
create table t1 (a int);
insert into t1 select seq from seq_0_to_99;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
connect con1, localhost, root,,;
connection con1;
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
connection default;
update t1 set a= a +100;
# Explain shows outdated statistics:
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
connection con1;
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
connection default;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
# Now explain shows updated statistics:
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 1.00 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
connection con1;
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 1.00 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
connection con1;
# Run update and analyze in con1:
update t1 set a= a - 150;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
connection default;
# Explain shows updated statistics:
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 99.22 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
disconnect con1;
drop table t1;
#
# End of 10.6 tests
#

View File

@ -1,9 +1,9 @@
# Tests will be skipped for the view protocol because the view protocol creates
# an additional util connection and other statistics data
-- source include/no_view_protocol.inc
--source include/no_view_protocol.inc
--source include/have_stat_tables.inc
--source include/have_partition.inc
--source include/have_sequence.inc
select @@global.use_stat_tables;
select @@session.use_stat_tables;
@ -640,4 +640,64 @@ drop table t1;
set @@global.histogram_size=@save_histogram_size;
--echo #
--echo # End of 10.4 tests
--echo #
--echo #
--echo # MDEV-29693 ANALYZE TABLE still flushes table definition cache
--echo # when engine-independent statistics is used
--echo #
create table t1 (a int);
insert into t1 select seq from seq_0_to_99;
analyze table t1 persistent for all;
analyze table t1 persistent for all;
explain extended select count(*) from t1 where a < 50;
connect (con1, localhost, root,,);
--connection con1
explain extended select count(*) from t1 where a < 50;
let $open_tables=`select variable_value from information_schema.global_status where variable_name="OPENED_TABLES"`;
--connection default
update t1 set a= a +100;
--echo # Explain shows outdated statistics:
explain extended select count(*) from t1 where a < 50;
--connection con1
explain extended select count(*) from t1 where a < 50;
--connection default
analyze table t1 persistent for all;
--echo # Now explain shows updated statistics:
explain extended select count(*) from t1 where a < 50;
--connection con1
explain extended select count(*) from t1 where a < 50;
--connection con1
--echo # Run update and analyze in con1:
update t1 set a= a - 150;
analyze table t1 persistent for all;
--connection default
--echo # Explain shows updated statistics:
explain extended select count(*) from t1 where a < 50;
disconnect con1;
let $new_open_tables=`select variable_value from information_schema.global_status where variable_name="OPENED_TABLES"`;
if ($open_tables != $new_open_tables)
{
--let $diff=`select $new_open_tables - $open_tables`
--echo "Fail: Test opened $diff new tables, 0 was expected"
}
drop table t1;
--echo #
--echo # End of 10.6 tests
--echo #

View File

@ -0,0 +1,91 @@
#
# Check that ANALYZE TABLE is remembered by MyISAM and Aria
#
create table t1 (a int) engine=myisam;
insert into t1 select seq from seq_0_to_99;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
flush tables;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
update t1 set a=100 where a=1;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
update t1 set a=100 where a=2;
flush tables;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
# Aria transactional=0
ALTER TABLE t1 ENGINE=aria transactional=0;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
update t1 set a=100 where a=10;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
flush tables;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
update t1 set a=100 where a=11;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
update t1 set a=100 where a=12;
flush tables;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
# Aria transactional=1
ALTER TABLE t1 ENGINE=aria transactional=1;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
update t1 set a=100 where a=20;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
flush tables;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
update t1 set a=100 where a=21;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
update t1 set a=100 where a=22;
flush tables;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
drop table t1;
#
# End of 10.5 tests
#

View File

@ -0,0 +1,50 @@
--source include/have_sequence.inc
--echo #
--echo # Check that ANALYZE TABLE is remembered by MyISAM and Aria
--echo #
create table t1 (a int) engine=myisam;
insert into t1 select seq from seq_0_to_99;
analyze table t1 persistent for all;
flush tables;
analyze table t1 persistent for all;
update t1 set a=100 where a=1;
analyze table t1 persistent for all;
update t1 set a=100 where a=2;
flush tables;
analyze table t1 persistent for all;
--echo # Aria transactional=0
ALTER TABLE t1 ENGINE=aria transactional=0;
analyze table t1 persistent for all;
update t1 set a=100 where a=10;
analyze table t1 persistent for all;
analyze table t1 persistent for all;
flush tables;
analyze table t1 persistent for all;
update t1 set a=100 where a=11;
analyze table t1 persistent for all;
update t1 set a=100 where a=12;
flush tables;
analyze table t1 persistent for all;
--echo # Aria transactional=1
ALTER TABLE t1 ENGINE=aria transactional=1;
analyze table t1 persistent for all;
update t1 set a=100 where a=20;
analyze table t1 persistent for all;
analyze table t1 persistent for all;
flush tables;
analyze table t1 persistent for all;
update t1 set a=100 where a=21;
analyze table t1 persistent for all;
update t1 set a=100 where a=22;
flush tables;
analyze table t1 persistent for all;
drop table t1;
--echo #
--echo # End of 10.5 tests
--echo #

View File

@ -935,7 +935,85 @@ id select_type table type possible_keys key key_len ref rows r_rows filtered r_f
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 2.78 10.00 Using where
drop table t1;
set @@global.histogram_size=@save_histogram_size;
#
# End of 10.4 tests
#
#
# MDEV-29693 ANALYZE TABLE still flushes table definition cache
# when engine-independent statistics is used
#
create table t1 (a int);
insert into t1 select seq from seq_0_to_99;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
connect con1, localhost, root,,;
connection con1;
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
connection default;
update t1 set a= a +100;
# Explain shows outdated statistics:
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
connection con1;
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 50.51 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
connection default;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
# Now explain shows updated statistics:
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 1.00 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
connection con1;
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 1.00 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
connection con1;
# Run update and analyze in con1:
update t1 set a= a - 150;
analyze table t1 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
connection default;
# Explain shows updated statistics:
explain extended select count(*) from t1 where a < 50;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 99.22 Using where
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`a` < 50
disconnect con1;
drop table t1;
#
# End of 10.6 tests
#
set global innodb_stats_persistent= @innodb_stats_persistent_save;
set global innodb_stats_persistent_sample_pages=
@innodb_stats_persistent_sample_pages_save;

View File

@ -1997,13 +1997,35 @@ int Field::store_to_statistical_minmax_field(Field *field, String *val)
}
int Field::store_from_statistical_minmax_field(Field *stat_field, String *str)
int Field::store_from_statistical_minmax_field(Field *stat_field, String *str,
MEM_ROOT *mem)
{
stat_field->val_str(str);
return store_text(str->ptr(), str->length(), &my_charset_bin);
}
/*
Same as above, but store the string in the statistics mem_root to make it
easy to free everything by just freeing the mem_root.
*/
int Field_blob::store_from_statistical_minmax_field(Field *stat_field,
String *str,
MEM_ROOT *mem)
{
String *tmp= stat_field->val_str(str);
uchar *ptr;
if (!(ptr= (uchar*) memdup_root(mem, tmp->ptr(), tmp->length())))
{
set_ptr((uint32) 0, NULL);
return 1;
}
set_ptr(tmp->length(), ptr);
return 0;
}
/**
Pack the field into a format suitable for storage and transfer.

View File

@ -1011,7 +1011,8 @@ public:
field statistical table field
str value buffer
*/
virtual int store_from_statistical_minmax_field(Field *field, String *str);
virtual int store_from_statistical_minmax_field(Field *field, String *str,
MEM_ROOT *mem);
#ifdef HAVE_MEM_CHECK
/**
@ -4469,6 +4470,8 @@ public:
}
bool make_empty_rec_store_default_value(THD *thd, Item *item) override;
int store(const char *to, size_t length, CHARSET_INFO *charset) override;
int store_from_statistical_minmax_field(Field *stat_field, String *str,
MEM_ROOT *mem) override;
using Field_str::store;
void hash_not_null(Hasher *hasher) override;
double val_real() override;

View File

@ -919,6 +919,7 @@ PSI_mutex_key key_LOCK_gtid_waiting;
PSI_mutex_key key_LOCK_after_binlog_sync;
PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered;
PSI_mutex_key key_TABLE_SHARE_LOCK_share;
PSI_mutex_key key_TABLE_SHARE_LOCK_statistics;
PSI_mutex_key key_LOCK_ack_receiver;
PSI_mutex_key key_TABLE_SHARE_LOCK_rotation;
@ -986,6 +987,7 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_structure_guard_mutex, "Query_cache::structure_guard_mutex", 0},
{ &key_TABLE_SHARE_LOCK_ha_data, "TABLE_SHARE::LOCK_ha_data", 0},
{ &key_TABLE_SHARE_LOCK_share, "TABLE_SHARE::LOCK_share", 0},
{ &key_TABLE_SHARE_LOCK_statistics, "TABLE_SHARE::LOCK_statistics", 0},
{ &key_TABLE_SHARE_LOCK_rotation, "TABLE_SHARE::LOCK_rotation", 0},
{ &key_LOCK_error_messages, "LOCK_error_messages", PSI_FLAG_GLOBAL},
{ &key_LOCK_prepare_ordered, "LOCK_prepare_ordered", PSI_FLAG_GLOBAL},

View File

@ -334,6 +334,7 @@ extern PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
key_relay_log_info_log_space_lock, key_relay_log_info_run_lock,
key_rpl_group_info_sleep_lock,
key_structure_guard_mutex, key_TABLE_SHARE_LOCK_ha_data,
key_TABLE_SHARE_LOCK_statistics,
key_LOCK_start_thread,
key_LOCK_error_messages,
key_PARTITION_LOCK_auto_inc;

View File

@ -923,8 +923,14 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
bitmap_clear_all(tab->read_set);
for (uint fields= 0; *field_ptr; field_ptr++, fields++)
{
/*
Note that type() always return MYSQL_TYPE_BLOB for
all blob types. Another function needs to be added
if we in the future want to distingush between blob
types here.
*/
enum enum_field_types type= (*field_ptr)->type();
if (type < MYSQL_TYPE_MEDIUM_BLOB ||
if (type < MYSQL_TYPE_TINY_BLOB ||
type > MYSQL_TYPE_BLOB)
tab->field[fields]->register_field_in_read_map();
else
@ -952,7 +958,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
}
pos--;
enum enum_field_types type= tab->field[pos]->type();
if (type < MYSQL_TYPE_MEDIUM_BLOB ||
if (type < MYSQL_TYPE_TINY_BLOB ||
type > MYSQL_TYPE_BLOB)
tab->field[pos]->register_field_in_read_map();
else
@ -984,6 +990,8 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
tab->keys_in_use_for_query.set_bit(--pos);
}
}
/* Ensure that number of records are updated */
table->table->file->info(HA_STATUS_VARIABLE);
if (!(compl_result_code=
alloc_statistics_for_table(thd, table->table)) &&
!(compl_result_code=
@ -1279,13 +1287,8 @@ send_result_message:
if (table->table && !table->view)
{
/*
Don't skip flushing if we are collecting EITS statistics.
*/
const bool skip_flush=
(operator_func == &handler::ha_analyze) &&
(table->table->file->ha_table_flags() & HA_ONLINE_ANALYZE) &&
!collect_eis;
/* Skip FLUSH TABLES if we are doing analyze */
const bool skip_flush= (operator_func == &handler::ha_analyze);
if (table->table->s->tmp_table)
{
/*
@ -1305,6 +1308,13 @@ send_result_message:
table->table= 0; // For query cache
query_cache_invalidate3(thd, table, 0);
}
else if (collect_eis && skip_flush && compl_result_code == HA_ADMIN_OK)
{
TABLE_LIST *save_next_global= table->next_global;
table->next_global= 0;
read_statistics_for_tables(thd, table, true /* force_reload */);
table->next_global= save_next_global;
}
}
/* Error path, a admin command failed. */
if (thd->transaction_rollback_request || fatal_error)

View File

@ -5323,7 +5323,8 @@ bool open_and_lock_tables(THD *thd, const DDL_options_st &options,
goto err;
/* Don't read statistics tables when opening internal tables */
if (!(flags & MYSQL_OPEN_IGNORE_LOGGING_FORMAT))
if (!(flags & (MYSQL_OPEN_IGNORE_LOGGING_FORMAT |
MYSQL_OPEN_IGNORE_ENGINE_STATS)))
(void) read_statistics_for_tables_if_needed(thd, tables);
if (derived)

View File

@ -130,6 +130,9 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update,
*/
#define MYSQL_OPEN_IGNORE_LOGGING_FORMAT 0x20000
/* Don't use statistics tables */
#define MYSQL_OPEN_IGNORE_ENGINE_STATS 0x40000
/** Please refer to the internals manual. */
#define MYSQL_OPEN_REOPEN (MYSQL_OPEN_IGNORE_FLUSH |\
MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK |\

View File

@ -580,7 +580,8 @@ bool open_and_lock_for_insert_delayed(THD *thd, TABLE_LIST *table_list)
Open tables used for sub-selects or in stored functions, will also
cache these functions.
*/
if (open_and_lock_tables(thd, table_list->next_global, TRUE, 0))
if (open_and_lock_tables(thd, table_list->next_global, TRUE,
MYSQL_OPEN_IGNORE_ENGINE_STATS))
{
end_delayed_insert(thd);
error= TRUE;
@ -2751,6 +2752,9 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
/* Ensure we don't use the table list of the original table */
copy->pos_in_table_list= 0;
/* We don't need statistics for insert delayed */
copy->stats_cb= 0;
/*
Make a copy of all fields.
The copied fields need to point into the copied record. This is done

View File

@ -6750,7 +6750,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
KEY *key_info=show_table->s->key_info;
if (show_table->file)
{
(void) read_statistics_for_tables(thd, tables);
(void) read_statistics_for_tables(thd, tables, false);
show_table->file->info(HA_STATUS_VARIABLE |
HA_STATUS_NO_LOCK |
HA_STATUS_CONST |

File diff suppressed because it is too large Load Diff

View File

@ -116,9 +116,9 @@ bool check_eits_preferred(THD *thd)
}
int read_statistics_for_tables_if_needed(THD *thd, TABLE_LIST *tables);
int read_statistics_for_tables(THD *thd, TABLE_LIST *tables);
int read_statistics_for_tables(THD *thd, TABLE_LIST *tables,
bool force_reload);
int collect_statistics_for_table(THD *thd, TABLE *table);
void delete_stat_values_for_table_share(TABLE_SHARE *table_share);
int alloc_statistics_for_table(THD *thd, TABLE *table);
int update_statistics_for_table(THD *thd, TABLE *table);
int delete_statistics_for_table(THD *thd, const LEX_CSTRING *db, const LEX_CSTRING *tab);
@ -308,7 +308,7 @@ public:
/* Array of records per key for index prefixes */
ulonglong *idx_avg_frequency;
uchar *histograms; /* Sequence of histograms */
uchar *histograms; /* Sequence of histograms */
};

View File

@ -373,14 +373,13 @@ TABLE_SHARE *alloc_table_share(const char *db, const char *table_name,
table_alias_charset->strnncoll(key, 6, "mysql", 6) == 0)
share->not_usable_by_query_cache= 1;
init_sql_alloc(PSI_INSTRUMENT_ME, &share->stats_cb.mem_root,
TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0));
memcpy((char*) &share->mem_root, (char*) &mem_root, sizeof(mem_root));
mysql_mutex_init(key_TABLE_SHARE_LOCK_share,
&share->LOCK_share, MY_MUTEX_INIT_SLOW);
mysql_mutex_init(key_TABLE_SHARE_LOCK_ha_data,
&share->LOCK_ha_data, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_TABLE_SHARE_LOCK_statistics,
&share->LOCK_statistics, MY_MUTEX_INIT_SLOW);
DBUG_EXECUTE_IF("simulate_big_table_id",
if (last_table_id < UINT_MAX32)
@ -481,15 +480,19 @@ void TABLE_SHARE::destroy()
ha_share= NULL; // Safety
}
delete_stat_values_for_table_share(this);
if (stats_cb)
{
stats_cb->usage_count--;
delete stats_cb;
}
delete sequence;
free_root(&stats_cb.mem_root, MYF(0));
/* The mutexes are initialized only for shares that are part of the TDC */
if (tmp_table == NO_TMP_TABLE)
{
mysql_mutex_destroy(&LOCK_share);
mysql_mutex_destroy(&LOCK_ha_data);
mysql_mutex_destroy(&LOCK_statistics);
}
my_hash_free(&name_hash);
@ -4527,6 +4530,72 @@ partititon_err:
}
/**
Free engine stats
This is only called from closefrm() when the TABLE object is destroyed
**/
void TABLE::free_engine_stats()
{
bool free_stats= 0;
TABLE_STATISTICS_CB *stats= stats_cb;
mysql_mutex_lock(&s->LOCK_share);
free_stats= --stats->usage_count == 0;
mysql_mutex_unlock(&s->LOCK_share);
if (free_stats)
delete stats;
}
/*
Use engine stats from table_share if table_share has been updated
*/
void TABLE::update_engine_independent_stats()
{
bool free_stats= 0;
TABLE_STATISTICS_CB *org_stats= stats_cb;
DBUG_ASSERT(stats_cb != s->stats_cb);
if (stats_cb != s->stats_cb)
{
mysql_mutex_lock(&s->LOCK_share);
if (org_stats)
free_stats= --org_stats->usage_count == 0;
if ((stats_cb= s->stats_cb))
stats_cb->usage_count++;
mysql_mutex_unlock(&s->LOCK_share);
if (free_stats)
delete org_stats;
}
}
/*
Update engine stats in table share to use new stats
*/
void
TABLE_SHARE::update_engine_independent_stats(TABLE_STATISTICS_CB *new_stats)
{
TABLE_STATISTICS_CB *free_stats= 0;
DBUG_ASSERT(new_stats->usage_count == 0);
mysql_mutex_lock(&LOCK_share);
if (stats_cb)
{
if (!--stats_cb->usage_count)
free_stats= stats_cb;
}
stats_cb= new_stats;
new_stats->usage_count++;
mysql_mutex_unlock(&LOCK_share);
if (free_stats)
delete free_stats;
}
/*
Free information allocated by openfrm
@ -4565,6 +4634,12 @@ int closefrm(TABLE *table)
table->part_info= 0;
}
#endif
if (table->stats_cb)
{
DBUG_ASSERT(table->s->tmp_table == NO_TMP_TABLE);
table->free_engine_stats();
}
free_root(&table->mem_root, MYF(0));
DBUG_RETURN(error);
}

View File

@ -624,94 +624,55 @@ enum open_frm_error {
from persistent statistical tables
*/
#define TABLE_STAT_NO_STATS 0
#define TABLE_STAT_TABLE 1
#define TABLE_STAT_COLUMN 2
#define TABLE_STAT_INDEX 4
#define TABLE_STAT_HISTOGRAM 8
/*
EITS statistics information for a table.
This data is loaded from mysql.{table|index|column}_stats tables and
then most of the time is owned by table's TABLE_SHARE object.
Individual TABLE objects also have pointer to this object, and we do
reference counting to know when to free it. See
TABLE::update_engine_stats(), TABLE::free_engine_stats(),
TABLE_SHARE::update_engine_stats(), TABLE_SHARE::destroy().
These implement a "shared pointer"-like functionality.
When new statistics is loaded, we create new TABLE_STATISTICS_CB and make
the TABLE_SHARE point to it. Some TABLE object may still be using older
TABLE_STATISTICS_CB objects. Reference counting allows to free
TABLE_STATISTICS_CB when it is no longer used.
*/
class TABLE_STATISTICS_CB
{
class Statistics_state
{
enum state_codes
{
EMPTY, /** data is not loaded */
LOADING, /** data is being loaded in some connection */
READY /** data is loaded and available for use */
};
int32 state;
public:
/** No state copy */
Statistics_state &operator=(const Statistics_state &) { return *this; }
/** Checks if data loading have been completed */
bool is_ready() const
{
return my_atomic_load32_explicit(const_cast<int32*>(&state),
MY_MEMORY_ORDER_ACQUIRE) == READY;
}
/**
Sets mutual exclusion for data loading
If stats are in LOADING state, waits until state change.
@return
@retval true atomic EMPTY -> LOADING transfer completed, ok to load
@retval false stats are in READY state, no need to load
*/
bool start_load()
{
for (;;)
{
int32 expected= EMPTY;
if (my_atomic_cas32_weak_explicit(&state, &expected, LOADING,
MY_MEMORY_ORDER_RELAXED,
MY_MEMORY_ORDER_RELAXED))
return true;
if (expected == READY)
return false;
(void) LF_BACKOFF();
}
}
/** Marks data available for subsequent use */
void end_load()
{
DBUG_ASSERT(my_atomic_load32_explicit(&state, MY_MEMORY_ORDER_RELAXED) ==
LOADING);
my_atomic_store32_explicit(&state, READY, MY_MEMORY_ORDER_RELEASE);
}
/** Restores empty state on error (e.g. OOM) */
void abort_load()
{
DBUG_ASSERT(my_atomic_load32_explicit(&state, MY_MEMORY_ORDER_RELAXED) ==
LOADING);
my_atomic_store32_explicit(&state, EMPTY, MY_MEMORY_ORDER_RELAXED);
}
};
class Statistics_state stats_state;
class Statistics_state hist_state;
uint usage_count; // Instances of this stat
public:
TABLE_STATISTICS_CB();
~TABLE_STATISTICS_CB();
MEM_ROOT mem_root; /* MEM_ROOT to allocate statistical data for the table */
Table_statistics *table_stats; /* Structure to access the statistical data */
ulong total_hist_size; /* Total size of all histograms */
uint stats_available;
bool histograms_are_ready() const
bool histograms_exists() const
{
return !total_hist_size || hist_state.is_ready();
return total_hist_size != 0;
}
bool start_histograms_load()
bool unused()
{
return total_hist_size && hist_state.start_load();
return usage_count == 0;
}
void end_histograms_load() { hist_state.end_load(); }
void abort_histograms_load() { hist_state.abort_load(); }
bool stats_are_ready() const { return stats_state.is_ready(); }
bool start_stats_load() { return stats_state.start_load(); }
void end_stats_load() { stats_state.end_load(); }
void abort_stats_load() { stats_state.abort_load(); }
/* Copy (latest) state from TABLE_SHARE to TABLE */
void update_stats_in_table(TABLE *table);
friend struct TABLE;
friend struct TABLE_SHARE;
};
/**
@ -734,6 +695,7 @@ struct TABLE_SHARE
TYPELIB *intervals; /* pointer to interval info */
mysql_mutex_t LOCK_ha_data; /* To protect access to ha_data */
mysql_mutex_t LOCK_share; /* To protect TABLE_SHARE */
mysql_mutex_t LOCK_statistics; /* To protect against concurrent load */
TDC_element *tdc;
@ -750,7 +712,17 @@ struct TABLE_SHARE
uint *blob_field; /* Index to blobs in Field arrray*/
LEX_CUSTRING vcol_defs; /* definitions of generated columns */
TABLE_STATISTICS_CB stats_cb;
/*
EITS statistics data from the last time the table was opened or ANALYZE
table was run.
This is typically same as any related TABLE::stats_cb until ANALYZE
table is run.
This pointer is only to be de-referenced under LOCK_share as the
pointer can change by another thread running ANALYZE TABLE.
Without using a LOCK_share one can check if the statistics has been
updated by checking if TABLE::stats_cb != TABLE_SHARE::stats_cb.
*/
TABLE_STATISTICS_CB *stats_cb;
uchar *default_values; /* row with default values */
LEX_CSTRING comment; /* Comment about table */
@ -1174,7 +1146,6 @@ struct TABLE_SHARE
void set_overlapped_keys();
void set_ignored_indexes();
key_map usable_indexes(THD *thd);
bool old_long_hash_function() const
{
return mysql_version < 100428 ||
@ -1189,6 +1160,7 @@ struct TABLE_SHARE
Item_func_hash *make_long_hash_func(THD *thd,
MEM_ROOT *mem_root,
List<Item> *field_list) const;
void update_engine_independent_stats(TABLE_STATISTICS_CB *stat);
};
/* not NULL, but cannot be dereferenced */
@ -1577,6 +1549,7 @@ public:
and can be useful for range optimizer.
*/
Item *notnull_cond;
TABLE_STATISTICS_CB *stats_cb;
inline void reset() { bzero((void*)this, sizeof(*this)); }
void init(THD *thd, TABLE_LIST *tl);
@ -1606,6 +1579,8 @@ public:
void mark_columns_used_by_virtual_fields(void);
void mark_check_constraint_columns_for_read(void);
int verify_constraints(bool ignore_failure);
void free_engine_stats();
void update_engine_independent_stats();
inline void column_bitmaps_set(MY_BITMAP *read_set_arg)
{
read_set= read_set_arg;

View File

@ -395,7 +395,15 @@ int _ma_mark_file_changed(register MARIA_SHARE *share)
if (!share->base.born_transactional)
{
if (!_MA_ALREADY_MARKED_FILE_CHANGED)
return _ma_mark_file_changed_now(share);
{
int res= _ma_mark_file_changed_now(share);
/*
Ensure that STATE_NOT_ANALYZED is reset on table changes
*/
share->state.changed|= (STATE_CHANGED | STATE_NOT_ANALYZED |
STATE_NOT_OPTIMIZED_KEYS);
return res;
}
}
else
{
@ -409,10 +417,10 @@ int _ma_mark_file_changed(register MARIA_SHARE *share)
(STATE_CHANGED | STATE_NOT_ANALYZED |
STATE_NOT_OPTIMIZED_KEYS)))
{
mysql_mutex_lock(&share->intern_lock);
mysql_mutex_lock(&share->intern_lock);
share->state.changed|=(STATE_CHANGED | STATE_NOT_ANALYZED |
STATE_NOT_OPTIMIZED_KEYS);
mysql_mutex_unlock(&share->intern_lock);
STATE_NOT_OPTIMIZED_KEYS);
mysql_mutex_unlock(&share->intern_lock);
}
}
return 0;
@ -430,7 +438,7 @@ int _ma_mark_file_changed_now(register MARIA_SHARE *share)
if (! _MA_ALREADY_MARKED_FILE_CHANGED)
{
share->state.changed|=(STATE_CHANGED | STATE_NOT_ANALYZED |
STATE_NOT_OPTIMIZED_KEYS);
STATE_NOT_OPTIMIZED_KEYS);
if (!share->global_changed)
{
share->changed= share->global_changed= 1;

View File

@ -603,12 +603,15 @@ int _mi_mark_file_changed(MI_INFO *info)
{
uchar buff[3];
register MYISAM_SHARE *share=info->s;
uint32 state;
DBUG_ENTER("_mi_mark_file_changed");
if (!(share->state.changed & STATE_CHANGED) || ! share->global_changed)
state= share->state.changed;
share->state.changed|= (STATE_CHANGED | STATE_NOT_ANALYZED |
STATE_NOT_OPTIMIZED_KEYS);
if (!(state & STATE_CHANGED) || ! share->global_changed)
{
share->state.changed|=(STATE_CHANGED | STATE_NOT_ANALYZED |
STATE_NOT_OPTIMIZED_KEYS);
if (!share->global_changed)
{
share->global_changed=1;