mirror of
https://github.com/MariaDB/server.git
synced 2025-07-30 16:24:05 +03:00
Added test cases for preceding test
This includes all test changes from "Changing all cost calculation to be given in milliseconds" and forwards. Some of the things that caused changes in the result files: - As part of fixing tests, I added 'echo' to some comments to be able to easier find out where things where wrong. - MATERIALIZED has now a higher cost compared to X than before. Because of this some MATERIALIZED types have changed to DEPENDEND SUBQUERY. - Some test cases that required MATERIALIZED to repeat a bug was changed by adding more rows to force MATERIALIZED to happen. - 'Filtered' in SHOW EXPLAIN has in many case changed from 100.00 to something smaller. This is because now filtered also takes into account the smallest possible ref access and filters, even if they where not used. Another reason for 'Filtered' being smaller is that we now also take into account implicit filtering done for subqueries using FIRSTMATCH. (main.subselect_no_exists_to_in) This is caluculated in best_access_path() and stored in records_out. - Table orders has changed because more accurate costs. - 'index' and 'ALL' for small tables has changed to use 'range' or 'ref' because of optimizer_scan_setup_cost. - index can be changed to 'range' as 'range' optimizer assumes we don't have to read the blocks from disk that range optimizer has already read. This can be confusing in the case where there is no obvious where clause but instead there is a hidden 'key_column > NULL' added by the optimizer. (main.subselect_no_exists_to_in) - Scan on primary clustered key does not report 'Using Index' anymore (It's a table scan, not an index scan). - For derived tables, the number of rows is now 100 instead of 2, which can be seen in EXPLAIN. - More tests have "Using index for group by" as the cost of this optimization is now more correct (lower). - A primary key could be preferred for a normal key, even if it would access more rows, as it's faster to do 1 lokoup and 3 'index_next' on a clustered primary key than one lookup trough a secondary. (main.stat_tables_innodb) Notes: - There was a 4.7% more calls to best_extension_by_limited_search() in the main.greedy_optimizer test. However examining the test results it looked that the plans where slightly better (eq_ref where more chained together) so I assume this is ok. - I have verified a few test cases where there was notable/unexpected changes in the plan and in all cases the new optimizer plans where faster. (main.greedy_optimizer and some others)
This commit is contained in:
@ -3,6 +3,8 @@
|
||||
# Problem with range optimizer
|
||||
#
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_sequence.inc
|
||||
|
||||
SET optimizer_use_condition_selectivity=4;
|
||||
|
||||
set @innodb_stats_persistent_save= @@innodb_stats_persistent;
|
||||
@ -263,9 +265,9 @@ WHERE
|
||||
);
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Test of problem with IN on many different keyparts. (Bug #4157)
|
||||
#
|
||||
--echo #
|
||||
--echo # Test of problem with IN on many different keyparts. (Bug #4157)
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (
|
||||
id int( 11 ) unsigned NOT NULL AUTO_INCREMENT ,
|
||||
@ -284,7 +286,10 @@ KEY recount( owner, line )
|
||||
) ENGINE = MYISAM;
|
||||
|
||||
INSERT into t1 (owner,id,columnid,line) values (11,15,15,1),(11,13,13,5);
|
||||
INSERT into t1 (owner,id,columnid,line) select 11,seq+20,seq,seq from seq_1_to_100;
|
||||
|
||||
explain SELECT id, columnid, tableid, content, showid, line, ordinal FROM t1 WHERE owner=11 AND ((columnid IN ( 15, 13, 14 ) AND line IN ( 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 31 )) OR (columnid IN ( 13, 14 ) AND line IN ( 15 ))) LIMIT 0 , 30;
|
||||
--sorted_result
|
||||
SELECT id, columnid, tableid, content, showid, line, ordinal FROM t1 WHERE owner=11 AND ((columnid IN ( 15, 13, 14 ) AND line IN ( 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 31 )) OR (columnid IN ( 13, 14 ) AND line IN ( 15 ))) LIMIT 0 , 30;
|
||||
drop table t1;
|
||||
|
||||
@ -1025,7 +1030,8 @@ create table t2 (a int, b int, filler char(100));
|
||||
insert into t2 select A.a + 10 * (B.a + 10 * C.a), 10, 'filler' from t1 A,
|
||||
t1 B, t1 C where A.a < 5;
|
||||
|
||||
insert into t2 select 1000, b, 'filler' from t2 limit 250;
|
||||
insert into t2 select 1000, b, 'filler' from t2 limit 50;
|
||||
select count(*) from t2;
|
||||
alter table t2 add index (a,b);
|
||||
# t2 values
|
||||
# ( 1 , 10, 'filler')
|
||||
@ -1033,11 +1039,11 @@ alter table t2 add index (a,b);
|
||||
# ( 3 , 10, 'filler')
|
||||
# (... , 10, 'filler')
|
||||
# ...
|
||||
# (1000, 10, 'filler') - 250 times
|
||||
# (1000, 10, 'filler') - 100 times
|
||||
|
||||
# 250 rows, 1 row
|
||||
# 50 rows, 1 row
|
||||
|
||||
--echo # In following EXPLAIN the access method should be ref, #rows~=250
|
||||
--echo # In following EXPLAIN the access method should be ref, #rows~=50
|
||||
--echo # (and not 2) when we are not using rowid-ordered scans
|
||||
|
||||
explain select * from t2 where a=1000 and b<11;
|
||||
@ -1984,7 +1990,9 @@ eval explain format=json $q5;
|
||||
--sorted_result
|
||||
eval $q5;
|
||||
eval prepare stmt from "$q5";
|
||||
--sorted_result
|
||||
execute stmt;
|
||||
--sorted_result
|
||||
execute stmt;
|
||||
deallocate prepare stmt;
|
||||
|
||||
|
Reference in New Issue
Block a user