mirror of
https://github.com/MariaDB/server.git
synced 2025-07-20 10:24:14 +03:00
MDEV-18741: Optimizer trace: multi-part key ranges are printed incorrectly
Changed the function append_range_all_keyparts to use sel_arg_range_seq_init / sel_arg_range_seq_next to produce ranges. Also adjusted to print format for the ranges, now the ranges are printed as: (keypart1_min, keypart2_min,..) OP (keypart1_name,keypart2_name, ..) OP (keypart1_max,keypart2_max, ..) Also added more tests for range and index merge access for optimizer trace
This commit is contained in:
@ -1248,7 +1248,7 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
|
||||
{
|
||||
"index": "a",
|
||||
"covering": true,
|
||||
"ranges": ["2 <= b <= 2 AND 3 <= c <= 3"],
|
||||
"ranges": ["(2,3) <= (b,c) <= (2,3)"],
|
||||
"rows": 8,
|
||||
"cost": 2.2
|
||||
}
|
||||
@ -1264,7 +1264,7 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
|
||||
"rows": 8,
|
||||
"cost": 2.2,
|
||||
"key_parts_used_for_access": ["a", "b", "c"],
|
||||
"ranges": ["2 <= b <= 2 AND 3 <= c <= 3"],
|
||||
"ranges": ["(2,3) <= (b,c) <= (2,3)"],
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
},
|
||||
@ -1446,7 +1446,7 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
|
||||
{
|
||||
"index": "id",
|
||||
"covering": true,
|
||||
"ranges": ["0x24a20f <= a"],
|
||||
"ranges": ["(0x24a20f) <= (a)"],
|
||||
"rows": 9,
|
||||
"cost": 2.35
|
||||
}
|
||||
@ -1462,7 +1462,7 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
|
||||
"rows": 9,
|
||||
"cost": 2.35,
|
||||
"key_parts_used_for_access": ["id"],
|
||||
"ranges": ["0x24a20f <= a"],
|
||||
"ranges": ["(0x24a20f) <= (a)"],
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
},
|
||||
@ -1624,7 +1624,7 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
|
||||
{
|
||||
"index": "id",
|
||||
"covering": true,
|
||||
"ranges": ["0x24a20f <= a <= 0x24a20f"],
|
||||
"ranges": ["(0x24a20f) <= (a) <= (0x24a20f)"],
|
||||
"rows": 9,
|
||||
"cost": 2.35
|
||||
}
|
||||
@ -1640,7 +1640,7 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
|
||||
"rows": 9,
|
||||
"cost": 2.35,
|
||||
"key_parts_used_for_access": ["id", "a"],
|
||||
"ranges": ["0x24a20f <= a <= 0x24a20f"],
|
||||
"ranges": ["(0x24a20f) <= (a) <= (0x24a20f)"],
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
},
|
||||
@ -1856,7 +1856,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
|
||||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "a_c",
|
||||
"ranges": ["1 <= a <= 1"],
|
||||
"ranges": ["(1) <= (a) <= (1)"],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
@ -1866,7 +1866,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
|
||||
},
|
||||
{
|
||||
"index": "a_b",
|
||||
"ranges": ["1 <= a <= 1 AND 2 <= b <= 2"],
|
||||
"ranges": ["(1,2) <= (a,b) <= (1,2)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
@ -1885,7 +1885,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
|
||||
"type": "range_scan",
|
||||
"index": "a_b",
|
||||
"rows": 21,
|
||||
"ranges": ["1 <= a <= 1 AND 2 <= b <= 2"]
|
||||
"ranges": ["(1,2) <= (a,b) <= (1,2)"]
|
||||
},
|
||||
"rows_for_plan": 21,
|
||||
"cost_for_plan": 27.445,
|
||||
@ -2025,7 +2025,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
|
||||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "a_c",
|
||||
"ranges": ["1 <= a <= 1"],
|
||||
"ranges": ["(1) <= (a) <= (1)"],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
@ -2044,7 +2044,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
|
||||
"type": "range_scan",
|
||||
"index": "a_c",
|
||||
"rows": 180,
|
||||
"ranges": ["1 <= a <= 1"]
|
||||
"ranges": ["(1) <= (a) <= (1)"]
|
||||
},
|
||||
"rows_for_plan": 180,
|
||||
"cost_for_plan": 231.72,
|
||||
@ -2895,7 +2895,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
|
||||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "pk",
|
||||
"ranges": ["2 <= pk <= 2"],
|
||||
"ranges": ["(2) <= (pk) <= (2)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
@ -2906,7 +2906,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
|
||||
},
|
||||
{
|
||||
"index": "pk_a",
|
||||
"ranges": ["2 <= pk <= 2 AND 5 <= a <= 5"],
|
||||
"ranges": ["(2,5) <= (pk,a) <= (2,5)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
@ -2917,7 +2917,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
|
||||
},
|
||||
{
|
||||
"index": "pk_a_b",
|
||||
"ranges": ["2 <= pk <= 2 AND 5 <= a <= 5 AND 1 <= b <= 1"],
|
||||
"ranges": ["(2,5,1) <= (pk,a,b) <= (2,5,1)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
@ -2964,7 +2964,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
|
||||
"type": "range_scan",
|
||||
"index": "pk_a_b",
|
||||
"rows": 1,
|
||||
"ranges": ["2 <= pk <= 2 AND 5 <= a <= 5 AND 1 <= b <= 1"]
|
||||
"ranges": ["(2,5,1) <= (pk,a,b) <= (2,5,1)"]
|
||||
},
|
||||
"rows_for_plan": 1,
|
||||
"cost_for_plan": 1.1793,
|
||||
@ -3338,7 +3338,7 @@ explain delete from t0 where t0.a<3 {
|
||||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "a",
|
||||
"ranges": ["NULL < a < 3"],
|
||||
"ranges": ["(NULL) < (a) < (3)"],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
@ -3354,7 +3354,7 @@ explain delete from t0 where t0.a<3 {
|
||||
"type": "range_scan",
|
||||
"index": "a",
|
||||
"rows": 3,
|
||||
"ranges": ["NULL < a < 3"]
|
||||
"ranges": ["(NULL) < (a) < (3)"]
|
||||
},
|
||||
"rows_for_plan": 3,
|
||||
"cost_for_plan": 5.007,
|
||||
@ -3481,7 +3481,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
|
||||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "a",
|
||||
"ranges": ["NULL < a < 3"],
|
||||
"ranges": ["(NULL) < (a) < (3)"],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
@ -3500,7 +3500,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
|
||||
"type": "range_scan",
|
||||
"index": "a",
|
||||
"rows": 3,
|
||||
"ranges": ["NULL < a < 3"]
|
||||
"ranges": ["(NULL) < (a) < (3)"]
|
||||
},
|
||||
"rows_for_plan": 3,
|
||||
"cost_for_plan": 1.407,
|
||||
@ -3546,7 +3546,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
|
||||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "a",
|
||||
"ranges": ["NULL < a < 3"],
|
||||
"ranges": ["(NULL) < (a) < (3)"],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
@ -3565,7 +3565,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
|
||||
"type": "range_scan",
|
||||
"index": "a",
|
||||
"rows": 3,
|
||||
"ranges": ["NULL < a < 3"]
|
||||
"ranges": ["(NULL) < (a) < (3)"]
|
||||
},
|
||||
"rows_for_plan": 3,
|
||||
"cost_for_plan": 1.407,
|
||||
@ -6034,4 +6034,238 @@ COUNT(*)
|
||||
1
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-18741: Optimizer trace: multi-part key ranges are printed incorrectly.
|
||||
#
|
||||
create table t0(a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table one_k (a int);
|
||||
insert into one_k select A.a + B.a*10 + C.a*100 from t0 A, t0 B, t0 C;
|
||||
create table t1 ( a int, b int, key a_b(a,b));
|
||||
insert into t1 select a,a from one_k;
|
||||
set optimizer_trace='enabled=on';
|
||||
explain select * from t1 force index (a_b) where a=2 and b=4;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref a_b a_b 10 const,const 1 Using index
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "a_b",
|
||||
"ranges":
|
||||
[
|
||||
"(2,4) <= (a,b) <= (2,4)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
"rows": 1,
|
||||
"cost": 1.1783,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
explain select * from t1 where a >= 900 and b between 10 and 20;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range a_b a_b 10 NULL 107 Using where; Using index
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "a_b",
|
||||
"ranges":
|
||||
[
|
||||
"(900,10) <= (a,b)"
|
||||
],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
"rows": 107,
|
||||
"cost": 10.955,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
drop table t0,t1;
|
||||
create table t1 (start_date date, end_date date, filler char(100), key(start_date, end_date)) ;
|
||||
insert into t1 select date_add(now(), interval a day), date_add(now(), interval (a+7) day), 'data' from one_k;
|
||||
explain select * from t1 force index(start_date) where start_date >= '2019-02-10' and end_date <'2019-04-01';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range start_date start_date 8 NULL 1000 Using index condition
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "start_date",
|
||||
"ranges":
|
||||
[
|
||||
"(0x4ac60f,NULL) < (start_date,end_date)"
|
||||
],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 1000,
|
||||
"cost": 1282.2,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
drop table t1,one_k;
|
||||
create table ten(a int);
|
||||
insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (
|
||||
a int not null,
|
||||
b int not null,
|
||||
c int not null,
|
||||
d int not null,
|
||||
key a_b_c(a,b,c)
|
||||
);
|
||||
insert into t1 select a,a, a,a from ten;
|
||||
explain select * from t1 force index(a_b_c) where a between 1 and 4 and b < 50;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range a_b_c a_b_c 8 NULL 4 Using index condition
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "a_b_c",
|
||||
"ranges":
|
||||
[
|
||||
"(1) <= (a,b) < (4,50)"
|
||||
],
|
||||
"rowid_ordered": false,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 4,
|
||||
"cost": 6.2648,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
drop table ten,t1;
|
||||
# Ported test from MYSQL for ranges involving Binary column
|
||||
CREATE TABLE t1(i INT PRIMARY KEY, b BINARY(16), INDEX i_b(b));
|
||||
INSERT INTO t1 VALUES (1, x'D95B94336A9946A39CF5B58CFE772D8C');
|
||||
INSERT INTO t1 VALUES (2, NULL);
|
||||
EXPLAIN SELECT * FROM t1 WHERE b IN (0xD95B94336A9946A39CF5B58CFE772D8C);
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref i_b i_b 17 const 1 Using index condition
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "i_b",
|
||||
"ranges":
|
||||
[
|
||||
"(0xd95b94336a9946a39cf5b58cfe772d8c) <= (b) <= (0xd95b94336a9946a39cf5b58cfe772d8c)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 1,
|
||||
"cost": 2.3797,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
EXPLAIN SELECT * FROM t1 WHERE b IS NULL;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref i_b i_b 17 const 1 Using index condition
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "i_b",
|
||||
"ranges":
|
||||
[
|
||||
"(NULL) <= (b) <= (NULL)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 1,
|
||||
"cost": 2.3797,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
drop table t1;
|
||||
set optimizer_trace='enabled=off';
|
||||
|
@ -387,4 +387,61 @@ SELECT COUNT(*) FROM v1 WHERE MATCH (f) AGAINST ('fooba');
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-18741: Optimizer trace: multi-part key ranges are printed incorrectly.
|
||||
--echo #
|
||||
|
||||
create table t0(a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table one_k (a int);
|
||||
insert into one_k select A.a + B.a*10 + C.a*100 from t0 A, t0 B, t0 C;
|
||||
create table t1 ( a int, b int, key a_b(a,b));
|
||||
insert into t1 select a,a from one_k;
|
||||
set optimizer_trace='enabled=on';
|
||||
|
||||
explain select * from t1 force index (a_b) where a=2 and b=4;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
explain select * from t1 where a >= 900 and b between 10 and 20;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
drop table t0,t1;
|
||||
|
||||
create table t1 (start_date date, end_date date, filler char(100), key(start_date, end_date)) ;
|
||||
--disable_warnings
|
||||
insert into t1 select date_add(now(), interval a day), date_add(now(), interval (a+7) day), 'data' from one_k;
|
||||
--enable_warnings
|
||||
explain select * from t1 force index(start_date) where start_date >= '2019-02-10' and end_date <'2019-04-01';
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
drop table t1,one_k;
|
||||
|
||||
create table ten(a int);
|
||||
insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (
|
||||
a int not null,
|
||||
b int not null,
|
||||
c int not null,
|
||||
d int not null,
|
||||
key a_b_c(a,b,c)
|
||||
);
|
||||
|
||||
insert into t1 select a,a, a,a from ten;
|
||||
explain select * from t1 force index(a_b_c) where a between 1 and 4 and b < 50;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
drop table ten,t1;
|
||||
|
||||
--echo # Ported test from MYSQL for ranges involving Binary column
|
||||
|
||||
CREATE TABLE t1(i INT PRIMARY KEY, b BINARY(16), INDEX i_b(b));
|
||||
INSERT INTO t1 VALUES (1, x'D95B94336A9946A39CF5B58CFE772D8C');
|
||||
INSERT INTO t1 VALUES (2, NULL);
|
||||
|
||||
EXPLAIN SELECT * FROM t1 WHERE b IN (0xD95B94336A9946A39CF5B58CFE772D8C);
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
EXPLAIN SELECT * FROM t1 WHERE b IS NULL;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
drop table t1;
|
||||
|
||||
set optimizer_trace='enabled=off';
|
||||
|
@ -110,7 +110,7 @@ explain select * from t1 where a=1 or b=1 {
|
||||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "a",
|
||||
"ranges": ["1 <= a <= 1"],
|
||||
"ranges": ["(1) <= (a) <= (1)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
@ -126,7 +126,7 @@ explain select * from t1 where a=1 or b=1 {
|
||||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "b",
|
||||
"ranges": ["1 <= b <= 1"],
|
||||
"ranges": ["(1) <= (b) <= (1)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
@ -147,7 +147,7 @@ explain select * from t1 where a=1 or b=1 {
|
||||
"type": "range_scan",
|
||||
"index": "a",
|
||||
"rows": 1,
|
||||
"ranges": ["1 <= a <= 1"],
|
||||
"ranges": ["(1) <= (a) <= (1)"],
|
||||
"analyzing_roworder_intersect": {
|
||||
"cause": "too few roworder scans"
|
||||
}
|
||||
@ -156,7 +156,7 @@ explain select * from t1 where a=1 or b=1 {
|
||||
"type": "range_scan",
|
||||
"index": "b",
|
||||
"rows": 1,
|
||||
"ranges": ["1 <= b <= 1"],
|
||||
"ranges": ["(1) <= (b) <= (1)"],
|
||||
"analyzing_roworder_intersect": {
|
||||
"cause": "too few roworder scans"
|
||||
}
|
||||
@ -176,13 +176,13 @@ explain select * from t1 where a=1 or b=1 {
|
||||
"type": "range_scan",
|
||||
"index": "a",
|
||||
"rows": 1,
|
||||
"ranges": ["1 <= a <= 1"]
|
||||
"ranges": ["(1) <= (a) <= (1)"]
|
||||
},
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "b",
|
||||
"rows": 1,
|
||||
"ranges": ["1 <= b <= 1"]
|
||||
"ranges": ["(1) <= (b) <= (1)"]
|
||||
}
|
||||
]
|
||||
},
|
||||
@ -243,3 +243,500 @@ explain select * from t1 where a=1 or b=1 {
|
||||
drop table t0,t1;
|
||||
set optimizer_trace="enabled=off";
|
||||
set @@optimizer_switch= @tmp_opt_switch;
|
||||
# More tests added index_merge access
|
||||
create table t1
|
||||
(
|
||||
/* Field names reflect value(rowid) distribution, st=STairs, swt= SaWTooth */
|
||||
st_a int not null default 0,
|
||||
swt1a int not null default 0,
|
||||
swt2a int not null default 0,
|
||||
st_b int not null default 0,
|
||||
swt1b int not null default 0,
|
||||
swt2b int not null default 0,
|
||||
/* fields/keys for row retrieval tests */
|
||||
key1 int,
|
||||
key2 int,
|
||||
key3 int,
|
||||
key4 int,
|
||||
/* make rows much bigger then keys */
|
||||
filler1 char (200),
|
||||
filler2 char (200),
|
||||
filler3 char (200),
|
||||
filler4 char (200),
|
||||
filler5 char (200),
|
||||
filler6 char (200),
|
||||
/* order of keys is important */
|
||||
key sta_swt12a(st_a,swt1a,swt2a),
|
||||
key sta_swt1a(st_a,swt1a),
|
||||
key sta_swt2a(st_a,swt2a),
|
||||
key sta_swt21a(st_a,swt2a,swt1a),
|
||||
key st_a(st_a),
|
||||
key stb_swt1a_2b(st_b,swt1b,swt2a),
|
||||
key stb_swt1b(st_b,swt1b),
|
||||
key st_b(st_b),
|
||||
key(key1),
|
||||
key(key2),
|
||||
key(key3),
|
||||
key(key4)
|
||||
) ;
|
||||
create table t0 as select * from t1;
|
||||
# Printing of many insert into t0 values (....) disabled.
|
||||
alter table t1 disable keys;
|
||||
# Printing of many insert into t1 select .... from t0 disabled.
|
||||
# Printing of many insert into t1 (...) values (....) disabled.
|
||||
alter table t1 enable keys;
|
||||
insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, -1, -1, 'key1-key2');
|
||||
insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 100, 100, 'key4-key3');
|
||||
set optimizer_trace='enabled=on';
|
||||
# 3-way ROR-intersection
|
||||
explain select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index_merge key1,key2,key3 key1,key2,key3 5,5,5 NULL 2 Using intersect(key1,key2,key3); Using where; Using index
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "key1",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key1) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 2243,
|
||||
"cost": 2862.1,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key2",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key2) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 2243,
|
||||
"cost": 2862.1,
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key3",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key3) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
"rows": 2243,
|
||||
"cost": 2862.1,
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
}
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"intersecting_indexes":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "key1",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 58.252,
|
||||
"disk_sweep_cost": 1923.1,
|
||||
"cumulative_total_cost": 1981.4,
|
||||
"usable": true,
|
||||
"matching_rows_now": 2243,
|
||||
"intersect_covering_with_this_index": false,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key2",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 116.5,
|
||||
"disk_sweep_cost": 84.518,
|
||||
"cumulative_total_cost": 201.02,
|
||||
"usable": true,
|
||||
"matching_rows_now": 77.636,
|
||||
"intersect_covering_with_this_index": false,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key3",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 174.76,
|
||||
"disk_sweep_cost": 0,
|
||||
"cumulative_total_cost": 174.76,
|
||||
"usable": true,
|
||||
"matching_rows_now": 2.6872,
|
||||
"intersect_covering_with_this_index": true,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"clustered_pk":
|
||||
{
|
||||
"clustered_pk_added_to_intersect": false,
|
||||
"cause": "no clustered pk index"
|
||||
},
|
||||
"rows": 2,
|
||||
"cost": 174.76,
|
||||
"covering": true,
|
||||
"chosen": true
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
]
|
||||
}
|
||||
]
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_access_plan":
|
||||
{
|
||||
"type": "index_roworder_intersect",
|
||||
"rows": 2,
|
||||
"cost": 174.76,
|
||||
"covering": true,
|
||||
"clustered_pk_scan": false,
|
||||
"intersect_of":
|
||||
[
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key1",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key1) <= (100)"
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key2",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key2) <= (100)"
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key3",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key3) <= (100)"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"rows_for_plan": 2,
|
||||
"cost_for_plan": 174.76,
|
||||
"chosen": true
|
||||
}
|
||||
]
|
||||
# ROR-union(ROR-intersection, ROR-range)
|
||||
explain select key1,key2,key3,key4 from t1 where key1=100 and key2=100 or key3=100 and key4=100;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 154 Using union(intersect(key1,key2),intersect(key3,key4)); Using where
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"cause": "too few roworder scans"
|
||||
},
|
||||
"analyzing_index_merge_union":
|
||||
[
|
||||
|
||||
{
|
||||
"indexes_to_merge":
|
||||
[
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "key1",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key1) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
"rows": 2243,
|
||||
"cost": 170.53,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key2",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key2) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
"rows": 2243,
|
||||
"cost": 170.53,
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
}
|
||||
],
|
||||
"index_to_merge": "key1",
|
||||
"cumulated_cost": 170.53
|
||||
},
|
||||
|
||||
{
|
||||
"range_scan_alternatives":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "key3",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key3) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
"rows": 2243,
|
||||
"cost": 170.53,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key4",
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key4) <= (100)"
|
||||
],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": true,
|
||||
"rows": 2243,
|
||||
"cost": 170.53,
|
||||
"chosen": false,
|
||||
"cause": "cost"
|
||||
}
|
||||
],
|
||||
"index_to_merge": "key3",
|
||||
"cumulated_cost": 341.05
|
||||
}
|
||||
],
|
||||
"cost_of_reading_ranges": 341.05,
|
||||
"use_roworder_union": true,
|
||||
"cause": "always cheaper than non roworder retrieval",
|
||||
"analyzing_roworder_scans":
|
||||
[
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key1",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key1) <= (100)"
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"intersecting_indexes":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "key1",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 58.252,
|
||||
"disk_sweep_cost": 1923.1,
|
||||
"cumulative_total_cost": 1981.4,
|
||||
"usable": true,
|
||||
"matching_rows_now": 2243,
|
||||
"intersect_covering_with_this_index": false,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key2",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 116.5,
|
||||
"disk_sweep_cost": 84.518,
|
||||
"cumulative_total_cost": 201.02,
|
||||
"usable": true,
|
||||
"matching_rows_now": 77.636,
|
||||
"intersect_covering_with_this_index": false,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"clustered_pk":
|
||||
{
|
||||
"clustered_pk_added_to_intersect": false,
|
||||
"cause": "no clustered pk index"
|
||||
},
|
||||
"rows": 77,
|
||||
"cost": 201.02,
|
||||
"covering": false,
|
||||
"chosen": true
|
||||
}
|
||||
},
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key3",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key3) <= (100)"
|
||||
],
|
||||
"analyzing_roworder_intersect":
|
||||
{
|
||||
"intersecting_indexes":
|
||||
[
|
||||
|
||||
{
|
||||
"index": "key3",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 58.252,
|
||||
"disk_sweep_cost": 1923.1,
|
||||
"cumulative_total_cost": 1981.4,
|
||||
"usable": true,
|
||||
"matching_rows_now": 2243,
|
||||
"intersect_covering_with_this_index": false,
|
||||
"chosen": true
|
||||
},
|
||||
|
||||
{
|
||||
"index": "key4",
|
||||
"index_scan_cost": 58.252,
|
||||
"cumulated_index_scan_cost": 116.5,
|
||||
"disk_sweep_cost": 84.518,
|
||||
"cumulative_total_cost": 201.02,
|
||||
"usable": true,
|
||||
"matching_rows_now": 77.636,
|
||||
"intersect_covering_with_this_index": false,
|
||||
"chosen": true
|
||||
}
|
||||
],
|
||||
"clustered_pk":
|
||||
{
|
||||
"clustered_pk_added_to_intersect": false,
|
||||
"cause": "no clustered pk index"
|
||||
},
|
||||
"rows": 77,
|
||||
"cost": 201.02,
|
||||
"covering": false,
|
||||
"chosen": true
|
||||
}
|
||||
}
|
||||
],
|
||||
"index_roworder_union_cost": 386.73,
|
||||
"members": 2,
|
||||
"chosen": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
|
||||
[
|
||||
|
||||
{
|
||||
"range_access_plan":
|
||||
{
|
||||
"type": "index_roworder_union",
|
||||
"union_of":
|
||||
[
|
||||
|
||||
{
|
||||
"type": "index_roworder_intersect",
|
||||
"rows": 77,
|
||||
"cost": 201.02,
|
||||
"covering": false,
|
||||
"clustered_pk_scan": false,
|
||||
"intersect_of":
|
||||
[
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key1",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key1) <= (100)"
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key2",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key2) <= (100)"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"type": "index_roworder_intersect",
|
||||
"rows": 77,
|
||||
"cost": 201.02,
|
||||
"covering": false,
|
||||
"clustered_pk_scan": false,
|
||||
"intersect_of":
|
||||
[
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key3",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key3) <= (100)"
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"type": "range_scan",
|
||||
"index": "key4",
|
||||
"rows": 2243,
|
||||
"ranges":
|
||||
[
|
||||
"(100) <= (key4) <= (100)"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"rows_for_plan": 154,
|
||||
"cost_for_plan": 386.73,
|
||||
"chosen": true
|
||||
}
|
||||
]
|
||||
drop table t0,t1;
|
||||
set optimizer_trace="enabled=off";
|
||||
|
@ -19,3 +19,115 @@ select * from information_schema.OPTIMIZER_TRACE;
|
||||
drop table t0,t1;
|
||||
set optimizer_trace="enabled=off";
|
||||
set @@optimizer_switch= @tmp_opt_switch;
|
||||
|
||||
--echo # More tests added index_merge access
|
||||
|
||||
--enable_warnings
|
||||
create table t1
|
||||
(
|
||||
/* Field names reflect value(rowid) distribution, st=STairs, swt= SaWTooth */
|
||||
st_a int not null default 0,
|
||||
swt1a int not null default 0,
|
||||
swt2a int not null default 0,
|
||||
|
||||
st_b int not null default 0,
|
||||
swt1b int not null default 0,
|
||||
swt2b int not null default 0,
|
||||
|
||||
/* fields/keys for row retrieval tests */
|
||||
key1 int,
|
||||
key2 int,
|
||||
key3 int,
|
||||
key4 int,
|
||||
|
||||
/* make rows much bigger then keys */
|
||||
filler1 char (200),
|
||||
filler2 char (200),
|
||||
filler3 char (200),
|
||||
filler4 char (200),
|
||||
filler5 char (200),
|
||||
filler6 char (200),
|
||||
|
||||
/* order of keys is important */
|
||||
key sta_swt12a(st_a,swt1a,swt2a),
|
||||
key sta_swt1a(st_a,swt1a),
|
||||
key sta_swt2a(st_a,swt2a),
|
||||
key sta_swt21a(st_a,swt2a,swt1a),
|
||||
key st_a(st_a),
|
||||
key stb_swt1a_2b(st_b,swt1b,swt2a),
|
||||
key stb_swt1b(st_b,swt1b),
|
||||
key st_b(st_b),
|
||||
|
||||
key(key1),
|
||||
key(key2),
|
||||
key(key3),
|
||||
key(key4)
|
||||
) ;
|
||||
# Fill table
|
||||
create table t0 as select * from t1;
|
||||
--disable_query_log
|
||||
--echo # Printing of many insert into t0 values (....) disabled.
|
||||
let $cnt=1000;
|
||||
while ($cnt)
|
||||
{
|
||||
eval insert into t0 values (1, 2, 3, 1, 2, 3, 0, 0, 0, 0, 'data1', 'data2', 'data3', 'data4', 'data5', 'data6');
|
||||
dec $cnt;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
alter table t1 disable keys;
|
||||
--disable_query_log
|
||||
--echo # Printing of many insert into t1 select .... from t0 disabled.
|
||||
let $1=4;
|
||||
while ($1)
|
||||
{
|
||||
let $2=4;
|
||||
while ($2)
|
||||
{
|
||||
let $3=4;
|
||||
while ($3)
|
||||
{
|
||||
eval insert into t1 select $1, $2, $3, $1 ,$2, $3, key1, key2, key3, key4, filler1, filler2, filler3, filler4, filler5, filler6 from t0;
|
||||
dec $3;
|
||||
}
|
||||
dec $2;
|
||||
}
|
||||
dec $1;
|
||||
}
|
||||
|
||||
--echo # Printing of many insert into t1 (...) values (....) disabled.
|
||||
# Row retrieval tests
|
||||
# -1 is used for values 'out of any range we are using'
|
||||
# insert enough rows for index intersection to be used for (key1,key2)
|
||||
insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 100, 100,'key1-key2-key3-key4');
|
||||
let $cnt=400;
|
||||
while ($cnt)
|
||||
{
|
||||
eval insert into t1 (key1, key2, key3, key4, filler1) values (100, -1, 100, -1,'key1-key3');
|
||||
dec $cnt;
|
||||
}
|
||||
let $cnt=400;
|
||||
while ($cnt)
|
||||
{
|
||||
eval insert into t1 (key1, key2, key3, key4, filler1) values (-1, 100, -1, 100,'key2-key4');
|
||||
dec $cnt;
|
||||
}
|
||||
--enable_query_log
|
||||
alter table t1 enable keys;
|
||||
|
||||
insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, -1, -1, 'key1-key2');
|
||||
insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 100, 100, 'key4-key3');
|
||||
set optimizer_trace='enabled=on';
|
||||
|
||||
--echo # 3-way ROR-intersection
|
||||
explain select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
--echo # ROR-union(ROR-intersection, ROR-range)
|
||||
explain select key1,key2,key3,key4 from t1 where key1=100 and key2=100 or key3=100 and key4=100;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
drop table t0,t1;
|
||||
set optimizer_trace="enabled=off";
|
||||
|
@ -116,7 +116,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
|
||||
"range_scan_alternatives": [
|
||||
{
|
||||
"index": "PRIMARY",
|
||||
"ranges": ["pk1 < 0", "0 < pk1"],
|
||||
"ranges": ["(pk1) < (0)", "(0) < (pk1)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
@ -127,7 +127,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
|
||||
},
|
||||
{
|
||||
"index": "key1",
|
||||
"ranges": ["1 <= key1 <= 1"],
|
||||
"ranges": ["(1) <= (key1) <= (1)"],
|
||||
"rowid_ordered": true,
|
||||
"using_mrr": false,
|
||||
"index_only": false,
|
||||
@ -164,7 +164,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
|
||||
"type": "range_scan",
|
||||
"index": "key1",
|
||||
"rows": 1,
|
||||
"ranges": ["1 <= key1 <= 1"]
|
||||
"ranges": ["(1) <= (key1) <= (1)"]
|
||||
},
|
||||
"rows_for_plan": 1,
|
||||
"cost_for_plan": 2.3751,
|
||||
|
281
sql/opt_range.cc
281
sql/opt_range.cc
@ -328,8 +328,6 @@ public:
|
||||
uint *imerge_cost_buff; /* buffer for index_merge cost estimates */
|
||||
uint imerge_cost_buff_size; /* size of the buffer */
|
||||
|
||||
/* TRUE if last checked tree->key can be used for ROR-scan */
|
||||
bool is_ror_scan;
|
||||
/* Number of ranges in the last checked tree->key */
|
||||
uint n_ranges;
|
||||
uint8 first_null_comp; /* first null component if any, 0 - otherwise */
|
||||
@ -351,7 +349,7 @@ static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts);
|
||||
static ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
|
||||
SEL_ARG *tree, bool update_tbl_stats,
|
||||
uint *mrr_flags, uint *bufsize,
|
||||
Cost_estimate *cost);
|
||||
Cost_estimate *cost, bool *is_ror_scan);
|
||||
|
||||
QUICK_RANGE_SELECT *get_quick_select(PARAM *param,uint index,
|
||||
SEL_ARG *key_tree, uint mrr_flags,
|
||||
@ -431,16 +429,18 @@ static int and_range_trees(RANGE_OPT_PARAM *param,
|
||||
static bool remove_nonrange_trees(RANGE_OPT_PARAM *param, SEL_TREE *tree);
|
||||
|
||||
static void print_key_value(String *out, const KEY_PART_INFO *key_part,
|
||||
const uchar *key);
|
||||
const uchar* key, uint length);
|
||||
static void print_keyparts_name(String *out, const KEY_PART_INFO *key_part,
|
||||
uint n_keypart, key_part_map keypart_map);
|
||||
|
||||
static void append_range_all_keyparts(Json_writer_array *range_trace,
|
||||
String *range_string,
|
||||
String *range_so_far, const SEL_ARG *keypart,
|
||||
static void trace_ranges(Json_writer_array *range_trace,
|
||||
PARAM *param, uint idx,
|
||||
SEL_ARG *keypart,
|
||||
const KEY_PART_INFO *key_parts);
|
||||
|
||||
static
|
||||
void append_range(String *out, const KEY_PART_INFO *key_parts,
|
||||
const uchar *min_key, const uchar *max_key, const uint flag);
|
||||
void print_range(String *out, const KEY_PART_INFO *key_part,
|
||||
KEY_MULTI_RANGE *range, uint n_key_parts);
|
||||
|
||||
|
||||
/*
|
||||
@ -2208,7 +2208,7 @@ public:
|
||||
@param param Parameters for range analysis of this table
|
||||
@param trace_object The optimizer trace object the info is appended to
|
||||
*/
|
||||
virtual void trace_basic_info(const PARAM *param,
|
||||
virtual void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const= 0;
|
||||
|
||||
};
|
||||
@ -2251,11 +2251,11 @@ public:
|
||||
}
|
||||
DBUG_RETURN(quick);
|
||||
}
|
||||
void trace_basic_info(const PARAM *param,
|
||||
void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const;
|
||||
};
|
||||
|
||||
void TRP_RANGE::trace_basic_info(const PARAM *param,
|
||||
void TRP_RANGE::trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const
|
||||
{
|
||||
DBUG_ASSERT(param->using_real_indexes);
|
||||
@ -2273,10 +2273,7 @@ void TRP_RANGE::trace_basic_info(const PARAM *param,
|
||||
// TRP_RANGE should not be created if there are no range intervals
|
||||
DBUG_ASSERT(key);
|
||||
|
||||
String range_info;
|
||||
range_info.length(0);
|
||||
range_info.set_charset(system_charset_info);
|
||||
append_range_all_keyparts(&trace_range, NULL, &range_info, key, key_part);
|
||||
trace_ranges(&trace_range, param, key_idx, key, key_part);
|
||||
}
|
||||
|
||||
|
||||
@ -2296,7 +2293,7 @@ public:
|
||||
struct st_ror_scan_info *cpk_scan; /* Clustered PK scan, if there is one */
|
||||
bool is_covering; /* TRUE if no row retrieval phase is necessary */
|
||||
double index_scan_costs; /* SUM(cost(index_scan)) */
|
||||
void trace_basic_info(const PARAM *param,
|
||||
void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const;
|
||||
};
|
||||
|
||||
@ -2317,11 +2314,11 @@ public:
|
||||
MEM_ROOT *parent_alloc);
|
||||
TABLE_READ_PLAN **first_ror; /* array of ptrs to plans for merged scans */
|
||||
TABLE_READ_PLAN **last_ror; /* end of the above array */
|
||||
void trace_basic_info(const PARAM *param,
|
||||
void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const;
|
||||
};
|
||||
|
||||
void TRP_ROR_UNION::trace_basic_info(const PARAM *param,
|
||||
void TRP_ROR_UNION::trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const
|
||||
{
|
||||
THD *thd= param->thd;
|
||||
@ -2351,12 +2348,12 @@ public:
|
||||
TRP_RANGE **range_scans_end; /* end of the array */
|
||||
/* keys whose scans are to be filtered by cpk conditions */
|
||||
key_map filtered_scans;
|
||||
void trace_basic_info(const PARAM *param,
|
||||
void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const;
|
||||
|
||||
};
|
||||
|
||||
void TRP_INDEX_INTERSECT::trace_basic_info(const PARAM *param,
|
||||
void TRP_INDEX_INTERSECT::trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const
|
||||
{
|
||||
THD *thd= param->thd;
|
||||
@ -2385,11 +2382,11 @@ public:
|
||||
MEM_ROOT *parent_alloc);
|
||||
TRP_RANGE **range_scans; /* array of ptrs to plans of merged scans */
|
||||
TRP_RANGE **range_scans_end; /* end of the array */
|
||||
void trace_basic_info(const PARAM *param,
|
||||
void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const;
|
||||
};
|
||||
|
||||
void TRP_INDEX_MERGE::trace_basic_info(const PARAM *param,
|
||||
void TRP_INDEX_MERGE::trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const
|
||||
{
|
||||
THD *thd= param->thd;
|
||||
@ -2452,12 +2449,12 @@ public:
|
||||
QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows,
|
||||
MEM_ROOT *parent_alloc);
|
||||
void use_index_scan() { is_index_scan= TRUE; }
|
||||
void trace_basic_info(const PARAM *param,
|
||||
void trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const;
|
||||
};
|
||||
|
||||
|
||||
void TRP_GROUP_MIN_MAX::trace_basic_info(const PARAM *param,
|
||||
void TRP_GROUP_MIN_MAX::trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const
|
||||
{
|
||||
THD *thd= param->thd;
|
||||
@ -2489,10 +2486,8 @@ void TRP_GROUP_MIN_MAX::trace_basic_info(const PARAM *param,
|
||||
// can have group quick without ranges
|
||||
if (index_tree)
|
||||
{
|
||||
String range_info;
|
||||
range_info.set_charset(system_charset_info);
|
||||
append_range_all_keyparts(&trace_range, NULL, &range_info, index_tree,
|
||||
key_part);
|
||||
trace_ranges(&trace_range, param, param_idx,
|
||||
index_tree, key_part);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3176,6 +3171,7 @@ double records_in_column_ranges(PARAM *param, uint idx,
|
||||
seq.real_keyno= MAX_KEY;
|
||||
seq.param= param;
|
||||
seq.start= tree;
|
||||
seq.is_ror_scan= FALSE;
|
||||
|
||||
seq_it= seq_if.init((void *) &seq, 0, flags);
|
||||
|
||||
@ -3395,7 +3391,6 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
|
||||
param.mem_root= &alloc;
|
||||
param.old_root= thd->mem_root;
|
||||
param.table= table;
|
||||
param.is_ror_scan= FALSE;
|
||||
param.remove_false_where_parts= true;
|
||||
|
||||
if (create_key_parts_for_pseudo_indexes(¶m, used_fields))
|
||||
@ -6374,7 +6369,7 @@ typedef struct st_ror_scan_info : INDEX_SCAN_INFO
|
||||
{
|
||||
} ROR_SCAN_INFO;
|
||||
|
||||
void TRP_ROR_INTERSECT::trace_basic_info(const PARAM *param,
|
||||
void TRP_ROR_INTERSECT::trace_basic_info(PARAM *param,
|
||||
Json_writer_object *trace_object) const
|
||||
{
|
||||
THD *thd= param->thd;
|
||||
@ -6397,20 +6392,9 @@ void TRP_ROR_INTERSECT::trace_basic_info(const PARAM *param,
|
||||
trace_isect_idx.add("rows", (*cur_scan)->records);
|
||||
|
||||
Json_writer_array trace_range(thd, "ranges");
|
||||
for (const SEL_ARG *current= (*cur_scan)->sel_arg->first(); current;
|
||||
current= current->next)
|
||||
{
|
||||
String range_info;
|
||||
range_info.set_charset(system_charset_info);
|
||||
for (const SEL_ARG *part= current; part;
|
||||
part= part->next_key_part ? part->next_key_part : nullptr)
|
||||
{
|
||||
const KEY_PART_INFO *cur_key_part= key_part + part->part;
|
||||
append_range(&range_info, cur_key_part, part->min_value,
|
||||
part->max_value, part->min_flag | part->max_flag);
|
||||
}
|
||||
trace_range.add(range_info.ptr(), range_info.length());
|
||||
}
|
||||
|
||||
trace_ranges(&trace_range, param, (*cur_scan)->idx,
|
||||
(*cur_scan)->sel_arg, key_part);
|
||||
}
|
||||
}
|
||||
|
||||
@ -7363,6 +7347,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
|
||||
Cost_estimate cost;
|
||||
double found_read_time;
|
||||
uint mrr_flags, buf_size;
|
||||
bool is_ror_scan= FALSE;
|
||||
INDEX_SCAN_INFO *index_scan;
|
||||
uint keynr= param->real_keynr[idx];
|
||||
if (key->type == SEL_ARG::MAYBE_KEY ||
|
||||
@ -7377,7 +7362,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
|
||||
|
||||
found_records= check_quick_select(param, idx, read_index_only, key,
|
||||
update_tbl_stats, &mrr_flags,
|
||||
&buf_size, &cost);
|
||||
&buf_size, &cost, &is_ror_scan);
|
||||
|
||||
if (found_records != HA_POS_ERROR && tree->index_scans &&
|
||||
(index_scan= (INDEX_SCAN_INFO *)alloc_root(param->mem_root,
|
||||
@ -7388,9 +7373,6 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
|
||||
const KEY &cur_key= param->table->key_info[keynr];
|
||||
const KEY_PART_INFO *key_part= cur_key.key_part;
|
||||
|
||||
String range_info;
|
||||
range_info.set_charset(system_charset_info);
|
||||
|
||||
index_scan->idx= idx;
|
||||
index_scan->keynr= keynr;
|
||||
index_scan->key_info= ¶m->table->key_info[keynr];
|
||||
@ -7401,17 +7383,16 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
|
||||
*tree->index_scans_end++= index_scan;
|
||||
|
||||
if (unlikely(thd->trace_started()))
|
||||
append_range_all_keyparts(&trace_range, NULL, &range_info, key,
|
||||
key_part);
|
||||
trace_ranges(&trace_range, param, idx, key, key_part);
|
||||
trace_range.end();
|
||||
|
||||
trace_idx.add("rowid_ordered", param->is_ror_scan)
|
||||
trace_idx.add("rowid_ordered", is_ror_scan)
|
||||
.add("using_mrr", !(mrr_flags & HA_MRR_USE_DEFAULT_IMPL))
|
||||
.add("index_only", read_index_only)
|
||||
.add("rows", found_records)
|
||||
.add("cost", cost.total_cost());
|
||||
}
|
||||
if ((found_records != HA_POS_ERROR) && param->is_ror_scan)
|
||||
if ((found_records != HA_POS_ERROR) && is_ror_scan)
|
||||
{
|
||||
tree->n_ror_scans++;
|
||||
tree->ror_scans_map.set_bit(idx);
|
||||
@ -11026,7 +11007,8 @@ void SEL_ARG::test_use_count(SEL_ARG *root)
|
||||
static
|
||||
ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
|
||||
SEL_ARG *tree, bool update_tbl_stats,
|
||||
uint *mrr_flags, uint *bufsize, Cost_estimate *cost)
|
||||
uint *mrr_flags, uint *bufsize, Cost_estimate *cost,
|
||||
bool *is_ror_scan)
|
||||
{
|
||||
SEL_ARG_RANGE_SEQ seq;
|
||||
RANGE_SEQ_IF seq_if = {NULL, sel_arg_range_seq_init, sel_arg_range_seq_next, 0, 0};
|
||||
@ -11051,9 +11033,9 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
|
||||
param->range_count=0;
|
||||
param->max_key_part=0;
|
||||
|
||||
param->is_ror_scan= TRUE;
|
||||
seq.is_ror_scan= TRUE;
|
||||
if (file->index_flags(keynr, 0, TRUE) & HA_KEY_SCAN_NOT_ROR)
|
||||
param->is_ror_scan= FALSE;
|
||||
seq.is_ror_scan= FALSE;
|
||||
|
||||
*mrr_flags= param->force_default_mrr? HA_MRR_USE_DEFAULT_IMPL: 0;
|
||||
/*
|
||||
@ -11106,12 +11088,12 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
|
||||
TODO: Don't have this logic here, make table engines return
|
||||
appropriate flags instead.
|
||||
*/
|
||||
param->is_ror_scan= FALSE;
|
||||
seq.is_ror_scan= FALSE;
|
||||
}
|
||||
else if (param->table->s->primary_key == keynr && pk_is_clustered)
|
||||
{
|
||||
/* Clustered PK scan is always a ROR scan (TODO: same as above) */
|
||||
param->is_ror_scan= TRUE;
|
||||
seq.is_ror_scan= TRUE;
|
||||
}
|
||||
else if (param->range_count > 1)
|
||||
{
|
||||
@ -11121,8 +11103,9 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
|
||||
(1,3)" returns ROR order for all records with x=1, then ROR
|
||||
order for records with x=3
|
||||
*/
|
||||
param->is_ror_scan= FALSE;
|
||||
seq.is_ror_scan= FALSE;
|
||||
}
|
||||
*is_ror_scan= seq.is_ror_scan;
|
||||
|
||||
DBUG_PRINT("exit", ("Records: %lu", (ulong) rows));
|
||||
DBUG_RETURN(rows); //psergey-merge:todo: maintain first_null_comp.
|
||||
@ -13547,20 +13530,18 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
|
||||
Cost_estimate dummy_cost;
|
||||
uint mrr_flags= HA_MRR_USE_DEFAULT_IMPL;
|
||||
uint mrr_bufsize=0;
|
||||
bool is_ror_scan= FALSE;
|
||||
cur_quick_prefix_records= check_quick_select(param, cur_param_idx,
|
||||
FALSE /*don't care*/,
|
||||
cur_index_tree, TRUE,
|
||||
&mrr_flags, &mrr_bufsize,
|
||||
&dummy_cost);
|
||||
&dummy_cost, &is_ror_scan);
|
||||
if (unlikely(cur_index_tree && thd->trace_started()))
|
||||
{
|
||||
Json_writer_array trace_range(thd, "ranges");
|
||||
|
||||
const KEY_PART_INFO *key_part= cur_index_info->key_part;
|
||||
|
||||
String range_info;
|
||||
range_info.set_charset(system_charset_info);
|
||||
append_range_all_keyparts(&trace_range, NULL, &range_info,
|
||||
trace_ranges(&trace_range, param, cur_param_idx,
|
||||
cur_index_tree, key_part);
|
||||
}
|
||||
}
|
||||
@ -15733,12 +15714,16 @@ void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose)
|
||||
}
|
||||
|
||||
#endif /* !DBUG_OFF */
|
||||
|
||||
static
|
||||
void append_range(String *out, const KEY_PART_INFO *key_part,
|
||||
const uchar *min_key, const uchar *max_key, const uint flag)
|
||||
void print_range(String *out, const KEY_PART_INFO *key_part,
|
||||
KEY_MULTI_RANGE *range, uint n_key_parts)
|
||||
{
|
||||
if (out->length() > 0)
|
||||
out->append(STRING_WITH_LEN(" AND "));
|
||||
uint flag= range->range_flag;
|
||||
String key_name;
|
||||
key_name.set_charset(system_charset_info);
|
||||
key_part_map keypart_map= range->start_key.keypart_map |
|
||||
range->end_key.keypart_map;
|
||||
|
||||
if (flag & GEOM_FLAG)
|
||||
{
|
||||
@ -15747,22 +15732,24 @@ void append_range(String *out, const KEY_PART_INFO *key_part,
|
||||
range types, so printing "col < some_geom" doesn't make sense.
|
||||
Just print the column name, not operator.
|
||||
*/
|
||||
out->append(key_part->field->field_name);
|
||||
print_keyparts_name(out, key_part, n_key_parts, keypart_map);
|
||||
out->append(STRING_WITH_LEN(" "));
|
||||
print_key_value(out, key_part, min_key);
|
||||
print_key_value(out, key_part, range->start_key.key,
|
||||
range->start_key.length);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(flag & NO_MIN_RANGE))
|
||||
{
|
||||
print_key_value(out, key_part, min_key);
|
||||
print_key_value(out, key_part, range->start_key.key,
|
||||
range->start_key.length);
|
||||
if (flag & NEAR_MIN)
|
||||
out->append(STRING_WITH_LEN(" < "));
|
||||
else
|
||||
out->append(STRING_WITH_LEN(" <= "));
|
||||
}
|
||||
|
||||
out->append(key_part->field->field_name);
|
||||
print_keyparts_name(out, key_part, n_key_parts, keypart_map);
|
||||
|
||||
if (!(flag & NO_MAX_RANGE))
|
||||
{
|
||||
@ -15770,7 +15757,8 @@ void append_range(String *out, const KEY_PART_INFO *key_part,
|
||||
out->append(STRING_WITH_LEN(" < "));
|
||||
else
|
||||
out->append(STRING_WITH_LEN(" <= "));
|
||||
print_key_value(out, key_part, max_key);
|
||||
print_key_value(out, key_part, range->end_key.key,
|
||||
range->end_key.length);
|
||||
}
|
||||
}
|
||||
|
||||
@ -15778,60 +15766,43 @@ void append_range(String *out, const KEY_PART_INFO *key_part,
|
||||
|
||||
Add ranges to the trace
|
||||
For ex:
|
||||
query: select * from t1 where a=2 ;
|
||||
and we have an index on a , so we create a range
|
||||
2 <= a <= 2
|
||||
lets say we have an index a_b(a,b)
|
||||
query: select * from t1 where a=2 and b=4 ;
|
||||
so we create a range:
|
||||
(2,4) <= (a,b) <= (2,4)
|
||||
this is added to the trace
|
||||
*/
|
||||
|
||||
static void append_range_all_keyparts(Json_writer_array *range_trace,
|
||||
String *range_string,
|
||||
String *range_so_far, const SEL_ARG *keypart,
|
||||
static void trace_ranges(Json_writer_array *range_trace,
|
||||
PARAM *param, uint idx,
|
||||
SEL_ARG *keypart,
|
||||
const KEY_PART_INFO *key_parts)
|
||||
{
|
||||
|
||||
DBUG_ASSERT(keypart);
|
||||
DBUG_ASSERT(keypart && keypart != &null_element);
|
||||
|
||||
// Navigate to first interval in red-black tree
|
||||
SEL_ARG_RANGE_SEQ seq;
|
||||
KEY_MULTI_RANGE range;
|
||||
range_seq_t seq_it;
|
||||
uint flags= 0;
|
||||
RANGE_SEQ_IF seq_if = {NULL, sel_arg_range_seq_init,
|
||||
sel_arg_range_seq_next, 0, 0};
|
||||
KEY *keyinfo= param->table->key_info + param->real_keynr[idx];
|
||||
uint n_key_parts= param->table->actual_n_key_parts(keyinfo);
|
||||
seq.keyno= idx;
|
||||
seq.real_keyno= param->real_keynr[idx];
|
||||
seq.param= param;
|
||||
seq.start= keypart;
|
||||
/*
|
||||
is_ror_scan is set to FALSE here, because we are only interested
|
||||
in iterating over all the ranges and printing them.
|
||||
*/
|
||||
seq.is_ror_scan= FALSE;
|
||||
const KEY_PART_INFO *cur_key_part= key_parts + keypart->part;
|
||||
const SEL_ARG *keypart_range= keypart->first();
|
||||
const size_t save_range_so_far_length= range_so_far->length();
|
||||
seq_it= seq_if.init((void *) &seq, 0, flags);
|
||||
|
||||
|
||||
while (keypart_range)
|
||||
while (!seq_if.next(seq_it, &range))
|
||||
{
|
||||
// Append the current range predicate to the range String
|
||||
switch (keypart->type)
|
||||
{
|
||||
case SEL_ARG::Type::KEY_RANGE:
|
||||
append_range(range_so_far, cur_key_part, keypart_range->min_value,
|
||||
keypart_range->max_value,
|
||||
keypart_range->min_flag | keypart_range->max_flag);
|
||||
break;
|
||||
case SEL_ARG::Type::MAYBE_KEY:
|
||||
range_so_far->append("MAYBE_KEY");
|
||||
break;
|
||||
case SEL_ARG::Type::IMPOSSIBLE:
|
||||
range_so_far->append("IMPOSSIBLE");
|
||||
break;
|
||||
default:
|
||||
DBUG_ASSERT(false);
|
||||
break;
|
||||
}
|
||||
|
||||
if (keypart_range->next_key_part &&
|
||||
keypart_range->next_key_part->part ==
|
||||
keypart_range->part + 1 &&
|
||||
keypart_range->is_singlepoint())
|
||||
{
|
||||
append_range_all_keyparts(range_trace, range_string, range_so_far,
|
||||
keypart_range->next_key_part, key_parts);
|
||||
}
|
||||
else
|
||||
range_trace->add(range_so_far->c_ptr_safe(), range_so_far->length());
|
||||
keypart_range= keypart_range->next;
|
||||
range_so_far->length(save_range_so_far_length);
|
||||
StringBuffer<128> range_info(system_charset_info);
|
||||
print_range(&range_info, cur_key_part, &range, n_key_parts);
|
||||
range_trace->add(range_info.c_ptr_safe(), range_info.length());
|
||||
}
|
||||
}
|
||||
|
||||
@ -15841,12 +15812,25 @@ static void append_range_all_keyparts(Json_writer_array *range_trace,
|
||||
@param[out] out String the key is appended to
|
||||
@param[in] key_part Index components description
|
||||
@param[in] key Key tuple
|
||||
@param[in] used_length length of the key tuple
|
||||
*/
|
||||
static void print_key_value(String *out, const KEY_PART_INFO *key_part,
|
||||
const uchar *key)
|
||||
{
|
||||
Field *field= key_part->field;
|
||||
|
||||
static void print_key_value(String *out, const KEY_PART_INFO *key_part,
|
||||
const uchar* key, uint used_length)
|
||||
{
|
||||
out->append(STRING_WITH_LEN("("));
|
||||
Field *field= key_part->field;
|
||||
StringBuffer<128> tmp(system_charset_info);
|
||||
TABLE *table= field->table;
|
||||
uint store_length;
|
||||
my_bitmap_map *old_sets[2];
|
||||
dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
|
||||
const uchar *key_end= key+used_length;
|
||||
|
||||
for (; key < key_end; key+=store_length, key_part++)
|
||||
{
|
||||
field= key_part->field;
|
||||
store_length= key_part->store_length;
|
||||
if (field->flags & BLOB_FLAG)
|
||||
{
|
||||
// Byte 0 of a nullable key is the null-byte. If set, key is NULL.
|
||||
@ -15856,11 +15840,9 @@ static void print_key_value(String *out, const KEY_PART_INFO *key_part,
|
||||
(field->type() == MYSQL_TYPE_GEOMETRY)
|
||||
? out->append(STRING_WITH_LEN("unprintable_geometry_value"))
|
||||
: out->append(STRING_WITH_LEN("unprintable_blob_value"));
|
||||
return;
|
||||
goto next;
|
||||
}
|
||||
|
||||
uint store_length= key_part->store_length;
|
||||
|
||||
if (field->real_maybe_null())
|
||||
{
|
||||
/*
|
||||
@ -15871,7 +15853,7 @@ static void print_key_value(String *out, const KEY_PART_INFO *key_part,
|
||||
if (*key)
|
||||
{
|
||||
out->append(STRING_WITH_LEN("NULL"));
|
||||
return;
|
||||
goto next;
|
||||
}
|
||||
key++; // Skip null byte
|
||||
store_length--;
|
||||
@ -15890,15 +15872,9 @@ static void print_key_value(String *out, const KEY_PART_INFO *key_part,
|
||||
out->append(_dig_vec_lower[*(key + i) >> 4]);
|
||||
out->append(_dig_vec_lower[*(key + i) & 0x0F]);
|
||||
}
|
||||
return;
|
||||
goto next;
|
||||
}
|
||||
|
||||
StringBuffer<128> tmp(system_charset_info);
|
||||
TABLE *table= field->table;
|
||||
my_bitmap_map *old_sets[2];
|
||||
|
||||
dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
|
||||
|
||||
field->set_key_image(key, key_part->length);
|
||||
if (field->type() == MYSQL_TYPE_BIT)
|
||||
(void)field->val_int_as_str(&tmp, 1); // may change tmp's charset
|
||||
@ -15906,5 +15882,40 @@ static void print_key_value(String *out, const KEY_PART_INFO *key_part,
|
||||
field->val_str(&tmp); // may change tmp's charset
|
||||
out->append(tmp.ptr(), tmp.length(), tmp.charset());
|
||||
|
||||
dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
|
||||
next:
|
||||
if (key + store_length < key_end)
|
||||
out->append(STRING_WITH_LEN(","));
|
||||
}
|
||||
dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
|
||||
out->append(STRING_WITH_LEN(")"));
|
||||
}
|
||||
|
||||
/**
|
||||
Print key parts involed in a range
|
||||
@param[out] out String the key is appended to
|
||||
@param[in] key_part Index components description
|
||||
@param[in] n_keypart Number of keyparts in index
|
||||
@param[in] keypart_map map for keyparts involved in the range
|
||||
*/
|
||||
|
||||
void print_keyparts_name(String *out, const KEY_PART_INFO *key_part,
|
||||
uint n_keypart, key_part_map keypart_map)
|
||||
{
|
||||
uint i;
|
||||
out->append(STRING_WITH_LEN("("));
|
||||
bool first_keypart= TRUE;
|
||||
for (i=0; i < n_keypart; key_part++, i++)
|
||||
{
|
||||
if (keypart_map & (1 << i))
|
||||
{
|
||||
if (first_keypart)
|
||||
first_keypart= FALSE;
|
||||
else
|
||||
out->append(STRING_WITH_LEN(","));
|
||||
out->append(key_part->field->field_name);
|
||||
}
|
||||
else
|
||||
break;
|
||||
}
|
||||
out->append(STRING_WITH_LEN(")"));
|
||||
}
|
||||
|
@ -458,6 +458,8 @@ public:
|
||||
SEL_ARG *key_tree= first();
|
||||
uint res= key_tree->store_min(key[key_tree->part].store_length,
|
||||
range_key, *range_key_flag);
|
||||
// add flags only if a key_part is written to the buffer
|
||||
if (res)
|
||||
*range_key_flag|= key_tree->min_flag;
|
||||
if (key_tree->next_key_part &&
|
||||
key_tree->next_key_part->type == SEL_ARG::KEY_RANGE &&
|
||||
@ -480,6 +482,7 @@ public:
|
||||
SEL_ARG *key_tree= last();
|
||||
uint res=key_tree->store_max(key[key_tree->part].store_length,
|
||||
range_key, *range_key_flag);
|
||||
if (res)
|
||||
(*range_key_flag)|= key_tree->max_flag;
|
||||
if (key_tree->next_key_part &&
|
||||
key_tree->next_key_part->type == SEL_ARG::KEY_RANGE &&
|
||||
|
@ -53,6 +53,11 @@ typedef struct st_sel_arg_range_seq
|
||||
int i; /* Index of last used element in the above array */
|
||||
|
||||
bool at_start; /* TRUE <=> The traversal has just started */
|
||||
/*
|
||||
Iteration functions will set this to FALSE
|
||||
if ranges being traversed do not allow to construct a ROR-scan"
|
||||
*/
|
||||
bool is_ror_scan;
|
||||
} SEL_ARG_RANGE_SEQ;
|
||||
|
||||
|
||||
@ -165,7 +170,7 @@ bool sel_arg_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range)
|
||||
seq->i--;
|
||||
step_down_to(seq, key_tree->next);
|
||||
key_tree= key_tree->next;
|
||||
seq->param->is_ror_scan= FALSE;
|
||||
seq->is_ror_scan= FALSE;
|
||||
goto walk_right_n_up;
|
||||
}
|
||||
|
||||
@ -207,7 +212,7 @@ walk_right_n_up:
|
||||
!memcmp(cur[-1].min_key, cur[-1].max_key, len) &&
|
||||
!key_tree->min_flag && !key_tree->max_flag))
|
||||
{
|
||||
seq->param->is_ror_scan= FALSE;
|
||||
seq->is_ror_scan= FALSE;
|
||||
if (!key_tree->min_flag)
|
||||
cur->min_key_parts +=
|
||||
key_tree->next_key_part->store_min_key(seq->param->key[seq->keyno],
|
||||
@ -312,7 +317,7 @@ walk_up_n_right:
|
||||
range->range_flag |= UNIQUE_RANGE | (cur->min_key_flag & NULL_RANGE);
|
||||
}
|
||||
|
||||
if (seq->param->is_ror_scan)
|
||||
if (seq->is_ror_scan)
|
||||
{
|
||||
/*
|
||||
If we get here, the condition on the key was converted to form
|
||||
@ -327,7 +332,7 @@ walk_up_n_right:
|
||||
(range->start_key.length == range->end_key.length) &&
|
||||
!memcmp(range->start_key.key, range->end_key.key, range->start_key.length) &&
|
||||
is_key_scan_ror(seq->param, seq->real_keyno, key_tree->part + 1)))
|
||||
seq->param->is_ror_scan= FALSE;
|
||||
seq->is_ror_scan= FALSE;
|
||||
}
|
||||
}
|
||||
seq->param->range_count++;
|
||||
|
Reference in New Issue
Block a user