1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-29 05:21:33 +03:00

MDEV-31067: selectivity_from_histogram >1.0 for a DOUBLE_PREC_HB histogram

Variant #2.

When Histogram::point_selectivity() sees that the point value of interest
falls into one bucket, it tries to guess whether the bucket has many
different (unpopular) values or a few popular values. (The number of
rows is fixed, as it's a Height-balanced histogram).
The basis for this guess is the "width" of the value range the bucket
covers. Buckets covering wider value ranges are assumed to contain
values with proportionally lower frequencies.

This is just a [brave] guesswork. For a very narrow bucket, it may
produce an estimate that's larger than total #rows in the bucket
or even in the whole table.

Remove the guesswork and replace it with basic logic: return
either the per-table average selectivity of col=const, or selectivity
of one bucket, whichever is lower.
This commit is contained in:
Sergei Petrunia
2023-04-19 15:15:27 +03:00
parent bc970573b3
commit 85cc831880
5 changed files with 234 additions and 53 deletions

View File

@ -1319,14 +1319,93 @@ EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2;
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
set histogram_size=@save_histogram_size;
set use_stat_tables= @save_use_stat_tables;
DROP TABLE t1;
--echo # End of 10.2 tests
--echo #
--echo # MDEV-31067: selectivity_from_histogram >1.0 for a DOUBLE_PREC_HB histogram
--echo #
create table t0(a int); # This holds how many rows we hold in a bucket.
insert into t0 select 1 from seq_1_to_78;
create table t1(a int); # one-third of a bucket
insert into t1 select 1 from seq_1_to_26;
create table t10 (a int);
insert into t10 select 0 from t0, seq_1_to_4;
insert into t10 select 8693 from t1;
insert into t10 select 8694 from t1;
insert into t10 select 8695 from t1;
insert into t10 select 34783 from t1;
insert into t10 select 34784 from t1;
insert into t10 select 34785 from t1;
insert into t10 select 34785 from t0, seq_1_to_8;
insert into t10 select 65214 from t1;
insert into t10 select 65215 from t1;
insert into t10 select 65216 from t1;
insert into t10 select 65216 from t0, seq_1_to_52;
insert into t10 select 65217 from t1;
insert into t10 select 65218 from t1;
insert into t10 select 65219 from t1;
insert into t10 select 65219 from t0;
insert into t10 select 73913 from t1;
insert into t10 select 73914 from t1;
insert into t10 select 73915 from t1;
insert into t10 select 73915 from t0, seq_1_to_40;
insert into t10 select 78257 from t1;
insert into t10 select 78258 from t1;
insert into t10 select 78259 from t1;
insert into t10 select 91300 from t1;
insert into t10 select 91301 from t1;
insert into t10 select 91302 from t1;
insert into t10 select 91302 from t0, seq_1_to_6;
insert into t10 select 91303 from t1; # Only 1/3rd of bucket matches the search tuple
insert into t10 select 91304 from t1;
insert into t10 select 91305 from t1;
insert into t10 select 91305 from t0, seq_1_to_8;
insert into t10 select 99998 from t1;
insert into t10 select 99999 from t1;
insert into t10 select 100000 from t1;
set use_stat_tables=preferably;
analyze table t10 persistent for all;
flush tables;
set @tmp=@@optimizer_trace;
set optimizer_trace=1;
explain select * from t10 where a in (91303);
--echo # Must have selectivity_from_histogram <= 1.0:
select json_detailed(json_extract(trace, '$**.selectivity_for_columns'))
from information_schema.optimizer_trace;
set optimizer_trace=@tmp;
drop table t0,t1,t10;
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
set histogram_size=@save_histogram_size;
set use_stat_tables= @save_use_stat_tables;
#
# Clean up
#