diff --git a/include/heap.h b/include/heap.h index 985b20f9dc9..69d1e441a4b 100644 --- a/include/heap.h +++ b/include/heap.h @@ -225,6 +225,7 @@ extern int heap_indexes_are_disabled(HP_INFO *info); extern void heap_update_auto_increment(HP_INFO *info, const byte *record); ha_rows hp_rb_records_in_range(HP_INFO *info, int inx, key_range *min_key, key_range *max_key); +int hp_panic(enum ha_panic_function flag); int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key, uint key_len, enum ha_rkey_function find_flag); extern gptr heap_find(HP_INFO *info,int inx,const byte *key); diff --git a/include/mysql/plugin.h b/include/mysql/plugin.h index ae952ccab1e..174c2b85739 100644 --- a/include/mysql/plugin.h +++ b/include/mysql/plugin.h @@ -301,7 +301,6 @@ struct st_mysql_ftparser struct st_mysql_storage_engine { int interface_version; - struct handlerton *handlerton; }; #endif diff --git a/mysql-test/r/ctype_ucs.result b/mysql-test/r/ctype_ucs.result index 0d8975c94c7..4f08b97492f 100644 --- a/mysql-test/r/ctype_ucs.result +++ b/mysql-test/r/ctype_ucs.result @@ -755,6 +755,27 @@ select export_set(5, name, upper(name), ",", 5) from bug20536; export_set(5, name, upper(name), ",", 5) test1,TEST1,test1,TEST1,TEST1 'test\_2','TEST\_2','test\_2','TEST\_2','TEST\_2' +CREATE TABLE t1 ( +status enum('active','passive') collate latin1_general_ci +NOT NULL default 'passive' +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `status` enum('active','passive') CHARACTER SET latin1 COLLATE latin1_general_ci NOT NULL DEFAULT 'passive' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +ALTER TABLE t1 ADD a int NOT NULL AFTER status; +CREATE TABLE t2 ( +status enum('active','passive') collate ucs2_turkish_ci +NOT NULL default 'passive' +); +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `status` enum('active','passive') CHARACTER SET ucs2 COLLATE ucs2_turkish_ci NOT NULL DEFAULT 'passive' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +ALTER TABLE t2 ADD a int NOT NULL AFTER status; +DROP TABLE t1,t2; select password(name) from bug20536; password(name) ???????????????????? diff --git a/mysql-test/r/func_gconcat.result b/mysql-test/r/func_gconcat.result index aef29ef9525..43646bd3d0e 100644 --- a/mysql-test/r/func_gconcat.result +++ b/mysql-test/r/func_gconcat.result @@ -660,3 +660,12 @@ CHAR_LENGTH( GROUP_CONCAT(b) ) 240001 SET GROUP_CONCAT_MAX_LEN = 1024; DROP TABLE t1; +CREATE TABLE t1 (a int, b int); +INSERT INTO t1 VALUES (2,1), (1,2), (2,2), (1,3); +SELECT GROUP_CONCAT(a), x +FROM (SELECT a, GROUP_CONCAT(b) x FROM t1 GROUP BY a) AS s +GROUP BY x; +GROUP_CONCAT(a) x +2 1,2 +1 2,3 +DROP TABLE t1; diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index 3838f04e20d..40ca0a38db2 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -851,8 +851,8 @@ CREATE TABLE t1 (a INT, b INT); INSERT INTO t1 VALUES (1,1),(1,2),(2,3); SELECT (SELECT COUNT(DISTINCT t1.b)) FROM t1 GROUP BY t1.a; (SELECT COUNT(DISTINCT t1.b)) -0 2 +1 SELECT (SELECT COUNT(DISTINCT 12)) FROM t1 GROUP BY t1.a; (SELECT COUNT(DISTINCT 12)) 1 @@ -1004,7 +1004,7 @@ SELECT SQL_NO_CACHE WHERE ttt.a = ccc.b AND ttt.a = t.a GROUP BY ttt.a) AS minid FROM t1 t, t2 c WHERE t.a = c.b; minid -NULL +1 DROP TABLE t1,t2; create table t1 select variance(0); show create table t1; diff --git a/mysql-test/r/func_in.result b/mysql-test/r/func_in.result index 0236cbfe26f..b88e5a66f96 100644 --- a/mysql-test/r/func_in.result +++ b/mysql-test/r/func_in.result @@ -343,3 +343,71 @@ some_id 1 2 drop table t1; +create table t1(f1 char(1)); +insert into t1 values ('a'),('b'),('1'); +select f1 from t1 where f1 in ('a',1); +f1 +a +1 +select f1, case f1 when 'a' then '+' when 1 then '-' end from t1; +f1 case f1 when 'a' then '+' when 1 then '-' end +a + +b NULL +1 - +create index t1f1_idx on t1(f1); +select f1 from t1 where f1 in ('a',1); +f1 +1 +a +explain select f1 from t1 where f1 in ('a',1); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL t1f1_idx 2 NULL 3 Using where; Using index +select f1 from t1 where f1 in ('a','b'); +f1 +a +b +explain select f1 from t1 where f1 in ('a','b'); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index t1f1_idx t1f1_idx 2 NULL 3 Using where; Using index +select f1 from t1 where f1 in (2,1); +f1 +1 +explain select f1 from t1 where f1 in (2,1); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index t1f1_idx t1f1_idx 2 NULL 3 Using where; Using index +create table t2(f2 int, index t2f2(f2)); +insert into t2 values(0),(1),(2); +select f2 from t2 where f2 in ('a',2); +f2 +0 +2 +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'a' +Warning 1292 Truncated incorrect DOUBLE value: 'a' +Warning 1292 Truncated incorrect DOUBLE value: 'a' +explain select f2 from t2 where f2 in ('a',2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index NULL t2f2 5 NULL 3 Using where; Using index +select f2 from t2 where f2 in ('a','b'); +f2 +0 +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'a' +Warning 1292 Truncated incorrect DOUBLE value: 'b' +explain select f2 from t2 where f2 in ('a','b'); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index t2f2 t2f2 5 NULL 3 Using where; Using index +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'a' +Warning 1292 Truncated incorrect DOUBLE value: 'b' +select f2 from t2 where f2 in (1,'b'); +f2 +0 +1 +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'b' +Warning 1292 Truncated incorrect DOUBLE value: 'b' +explain select f2 from t2 where f2 in (1,'b'); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index NULL t2f2 5 NULL 3 Using where; Using index +drop table t1, t2; diff --git a/mysql-test/r/greedy_optimizer.result b/mysql-test/r/greedy_optimizer.result index 1da49fbedb0..b02ff04780b 100644 --- a/mysql-test/r/greedy_optimizer.result +++ b/mysql-test/r/greedy_optimizer.result @@ -233,7 +233,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 6 @@ -245,7 +245,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 6 @@ -257,7 +257,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 6 @@ -269,7 +269,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 set optimizer_search_depth=1; select @@optimizer_search_depth; @@optimizer_search_depth @@ -385,7 +385,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 6 @@ -397,7 +397,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 6 @@ -409,7 +409,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 6 @@ -421,7 +421,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where show status like 'Last_query_cost'; Variable_name Value -Last_query_cost 274.418727 +Last_query_cost 289.418727 set optimizer_prune_level=1; select @@optimizer_prune_level; @@optimizer_prune_level diff --git a/mysql-test/r/join.result b/mysql-test/r/join.result index 75daa0fd46d..897ec4f7a6a 100644 --- a/mysql-test/r/join.result +++ b/mysql-test/r/join.result @@ -776,3 +776,31 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL a,b NULL NULL NULL 1000 Using where 1 SIMPLE t3 ref b b 5 test.t2.b 1 Using where drop table t1, t2, t3; +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t2 (a int, b int, primary key(a)); +insert into t2 select @v:=A.a+10*B.a, @v from t1 A, t1 B; +explain select * from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 +show status like '%cost%'; +Variable_name Value +Last_query_cost 4.016090 +select 'The cost of accessing t1 (dont care if it changes' '^'; +The cost of accessing t1 (dont care if it changes +The cost of accessing t1 (dont care if it changes^ +select 'vv: Following query must use ALL(t1), eq_ref(A), eq_ref(B): vv' Z; +Z +vv: Following query must use ALL(t1), eq_ref(A), eq_ref(B): vv +explain select * from t1, t2 A, t2 B where A.a = t1.a and B.a=A.b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 +1 SIMPLE A eq_ref PRIMARY PRIMARY 4 test.t1.a 1 +1 SIMPLE B eq_ref PRIMARY PRIMARY 4 test.A.b 1 +show status like '%cost%'; +Variable_name Value +Last_query_cost 24.016090 +select '^^: The above should be ~= 20 + cost(select * from t1). Value less than 20 is an error' Z; +Z +^^: The above should be ~= 20 + cost(select * from t1). Value less than 20 is an error +drop table t1, t2; diff --git a/mysql-test/r/order_by.result b/mysql-test/r/order_by.result index 64653de5e9c..6461696b0d0 100644 --- a/mysql-test/r/order_by.result +++ b/mysql-test/r/order_by.result @@ -495,17 +495,17 @@ gid sid uid 103853 5 250 EXPLAIN select t1.gid, t2.sid, t3.uid from t3, t2, t1 where t2.gid = t1.gid and t2.uid = t3.uid order by t1.gid, t3.uid; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index PRIMARY PRIMARY 4 NULL 6 Using index -1 SIMPLE t2 eq_ref PRIMARY,uid PRIMARY 4 test.t1.gid 1 +1 SIMPLE t2 ALL PRIMARY,uid NULL NULL NULL 6 Using temporary; Using filesort 1 SIMPLE t3 eq_ref PRIMARY PRIMARY 2 test.t2.uid 1 Using where; Using index +1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.gid 1 Using index EXPLAIN SELECT t1.gid, t3.uid from t1, t3 where t1.gid = t3.uid order by t1.gid,t3.skr; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t3 ALL PRIMARY NULL NULL NULL 6 Using temporary; Using filesort 1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t3.uid 1 Using where; Using index EXPLAIN SELECT t1.gid, t2.sid, t3.uid from t2, t1, t3 where t2.gid = t1.gid and t2.uid = t3.uid order by t3.uid, t1.gid; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index PRIMARY PRIMARY 4 NULL 6 Using index; Using temporary; Using filesort -1 SIMPLE t2 eq_ref PRIMARY,uid PRIMARY 4 test.t1.gid 1 +1 SIMPLE t2 ALL PRIMARY,uid NULL NULL NULL 6 Using temporary; Using filesort +1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.gid 1 Using index 1 SIMPLE t3 eq_ref PRIMARY PRIMARY 2 test.t2.uid 1 Using where; Using index EXPLAIN SELECT t1.gid, t3.uid from t1, t3 where t1.gid = t3.uid order by t3.skr,t1.gid; id select_type table type possible_keys key key_len ref rows Extra diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result index d8d433ef216..6c37f0834e9 100644 --- a/mysql-test/r/partition.result +++ b/mysql-test/r/partition.result @@ -68,7 +68,7 @@ create table t1 (a int) engine = csv partition by list (a) (partition p0 values in (null)); -ERROR HY000: CSV handler cannot be used in partitioned tables +ERROR HY000: Engine cannot be used in partitioned tables create table t1 (a bigint) partition by range (a) (partition p0 values less than (0xFFFFFFFFFFFFFFFF), @@ -102,7 +102,7 @@ create table t1 (a int) engine = csv partition by list (a) (partition p0 values in (null)); -ERROR HY000: CSV handler cannot be used in partitioned tables +ERROR HY000: Engine cannot be used in partitioned tables create table t1 (a int) partition by key(a) (partition p0 engine = MEMORY); @@ -1054,7 +1054,7 @@ drop table t1; create table t1 (a int) partition by key (a) (partition p0 engine = MERGE); -ERROR HY000: MyISAM Merge handler cannot be used in partitioned tables +ERROR HY000: Engine cannot be used in partitioned tables create table t1 (a varchar(1)) partition by key (a) as select 'a'; diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result index a7074b9a9ed..cfdee6e630e 100644 --- a/mysql-test/r/range.result +++ b/mysql-test/r/range.result @@ -941,3 +941,17 @@ item started price A1 2005-11-01 08:00:00 1000.000 A1 2005-11-15 00:00:00 2000.000 DROP TABLE t1; +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t2 (a int, b int, filler char(100)); +insert into t2 select A.a + 10 * (B.a + 10 * C.a), 10, 'filler' from t1 A, +t1 B, t1 C where A.a < 5; +insert into t2 select 1000, b, 'filler' from t2; +alter table t2 add index (a,b); +select 'In following EXPLAIN the access method should be ref, #rows~=500 (and not 2)' Z; +Z +In following EXPLAIN the access method should be ref, #rows~=500 (and not 2) +explain select * from t2 where a=1000 and b<11; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref a a 5 const 502 Using where +drop table t1, t2; diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index ba16bb9410b..a1463bc2b3c 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -1160,7 +1160,7 @@ Code2 char(2) NOT NULL default '', PRIMARY KEY (Code) ) ENGINE=MyISAM; INSERT INTO t2 VALUES ('AUS','Australia','Oceania','Australia and New Zealand',7741220.00,1901,18886000,79.8,351182.00,392911.00,'Australia','Constitutional Monarchy, Federation','Elisabeth II',135,'AU'); -INSERT INTO t2 VALUES ('AZE','Azerbaijan','Asia','Middle East',86600.00,1991,7734000,62.9,4127.00,4100.00,'Azärbaycan','Federal Republic','Heydär Äliyev',144,'AZ'); +INSERT INTO t2 VALUES ('AZE','Azerbaijan','Asia','Middle East',86600.00,1991,7734000,62.9,4127.00,4100.00,'Azärbaycan','Federal Republic','Heydär Äliyev',144,'AZ'); select t2.Continent, t1.Name, t1.Population from t2 LEFT JOIN t1 ON t2.Code = t1.t2 where t1.Population IN (select max(t1.Population) AS Population from t1, t2 where t1.t2 = t2.Code group by Continent); Continent Name Population Oceania Sydney 3276207 @@ -2512,7 +2512,7 @@ Code2 char(2) NOT NULL default '' ) ENGINE=MyISAM; INSERT INTO t1 VALUES ('XXX','Xxxxx','Oceania','Xxxxxx',26.00,0,0,0,0,0,'Xxxxx','Xxxxx','Xxxxx',NULL,'XX'); INSERT INTO t1 VALUES ('ASM','American Samoa','Oceania','Polynesia',199.00,0,68000,75.1,334.00,NULL,'Amerika Samoa','US Territory','George W. Bush',54,'AS'); -INSERT INTO t1 VALUES ('ATF','French Southern territories','Antarctica','Antarctica',7780.00,0,0,NULL,0.00,NULL,'Terres australes françaises','Nonmetropolitan Territory of France','Jacques Chirac',NULL,'TF'); +INSERT INTO t1 VALUES ('ATF','French Southern territories','Antarctica','Antarctica',7780.00,0,0,NULL,0.00,NULL,'Terres australes françaises','Nonmetropolitan Territory of France','Jacques Chirac',NULL,'TF'); INSERT INTO t1 VALUES ('UMI','United States Minor Outlying Islands','Oceania','Micronesia/Caribbean',16.00,0,0,NULL,0.00,NULL,'United States Minor Outlying Islands','Dependent Territory of the US','George W. Bush',NULL,'UM'); /*!40000 ALTER TABLE t1 ENABLE KEYS */; SELECT DISTINCT Continent AS c FROM t1 WHERE Code <> SOME ( SELECT Code FROM t1 WHERE Continent = c AND Population < 200); @@ -2966,6 +2966,42 @@ ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10; a a b 10 1 359 drop table t1,t2; +CREATE TABLE t1 ( +field1 int NOT NULL, +field2 int NOT NULL, +field3 int NOT NULL, +PRIMARY KEY (field1,field2,field3) +); +CREATE TABLE t2 ( +fieldA int NOT NULL, +fieldB int NOT NULL, +PRIMARY KEY (fieldA,fieldB) +); +INSERT INTO t1 VALUES +(1,1,1), (1,1,2), (1,2,1), (1,2,2), (1,2,3), (1,3,1); +INSERT INTO t2 VALUES (1,1), (1,2), (1,3); +SELECT field1, field2, COUNT(*) +FROM t1 GROUP BY field1, field2; +field1 field2 COUNT(*) +1 1 2 +1 2 3 +1 3 1 +SELECT field1, field2 +FROM t1 +GROUP BY field1, field2 +HAVING COUNT(*) >= ALL (SELECT fieldB +FROM t2 WHERE fieldA = field1); +field1 field2 +1 2 +SELECT field1, field2 +FROM t1 +GROUP BY field1, field2 +HAVING COUNT(*) < ANY (SELECT fieldB +FROM t2 WHERE fieldA = field1); +field1 field2 +1 1 +1 3 +DROP TABLE t1, t2; create table t1 (df decimal(5,1)); insert into t1 values(1.1); insert into t1 values(2.2); @@ -3422,3 +3458,64 @@ id select_type table type possible_keys key key_len ref rows Extra 4 UNION t12 system NULL NULL NULL NULL 0 const row not found NULL UNION RESULT ALL NULL NULL NULL NULL NULL DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT); +CREATE TABLE t2 (a INT); +INSERT INTO t2 values (1); +INSERT INTO t1 VALUES (1,1),(1,2),(2,3),(3,4); +SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; +(SELECT COUNT(DISTINCT t1.b) from t2) +2 +1 +1 +SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +FROM t1 GROUP BY t1.a; +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +2 +1 +1 +SELECT COUNT(DISTINCT t1.b), (SELECT COUNT(DISTINCT t1.b)) FROM t1 GROUP BY t1.a; +COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) +2 2 +1 1 +1 1 +SELECT COUNT(DISTINCT t1.b), +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +FROM t1 GROUP BY t1.a; +COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +2 2 +1 1 +1 1 +SELECT ( +SELECT ( +SELECT COUNT(DISTINCT t1.b) +) +) +FROM t1 GROUP BY t1.a; +( +SELECT ( +SELECT COUNT(DISTINCT t1.b) +) +) +2 +1 +1 +SELECT ( +SELECT ( +SELECT ( +SELECT COUNT(DISTINCT t1.b) +) +) +FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 t2 +GROUP BY t2.a; +( +SELECT ( +SELECT ( +SELECT COUNT(DISTINCT t1.b) +) +) +FROM t1 GROUP BY t1.a LIMIT 1) +2 +2 +2 +DROP TABLE t1,t2; diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 5d009ed7840..7a76ae1db03 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -472,11 +472,11 @@ create view v3 (x,y,z) as select b, a, b from t1; create view v4 (x,y,z) as select c+1, b, a from t1; create algorithm=temptable view v5 (x,y,z) as select c, b, a from t1; insert into v3 values (-60,4,30); -ERROR HY000: The target table v3 of the INSERT is not updatable +ERROR HY000: The target table v3 of the INSERT is not insertable-into insert into v4 values (-60,4,30); -ERROR HY000: The target table v4 of the INSERT is not updatable +ERROR HY000: The target table v4 of the INSERT is not insertable-into insert into v5 values (-60,4,30); -ERROR HY000: The target table v5 of the INSERT is not updatable +ERROR HY000: The target table v5 of the INSERT is not insertable-into insert into v1 values (-60,4,30); insert into v1 (z,y,x) values (50,6,-100); insert into v2 values (5,40); @@ -499,11 +499,11 @@ create view v3 (x,y,z) as select b, a, b from t1; create view v4 (x,y,z) as select c+1, b, a from t1; create algorithm=temptable view v5 (x,y,z) as select c, b, a from t1; insert into v3 select c, b, a from t2; -ERROR HY000: The target table v3 of the INSERT is not updatable +ERROR HY000: The target table v3 of the INSERT is not insertable-into insert into v4 select c, b, a from t2; -ERROR HY000: The target table v4 of the INSERT is not updatable +ERROR HY000: The target table v4 of the INSERT is not insertable-into insert into v5 select c, b, a from t2; -ERROR HY000: The target table v5 of the INSERT is not updatable +ERROR HY000: The target table v5 of the INSERT is not insertable-into insert into v1 select c, b, a from t2; insert into v1 (z,y,x) select a+20,b+2,-100 from t2; insert into v2 select b+1, a+10 from t2; @@ -1352,14 +1352,14 @@ drop table t1; create table t1 (s1 smallint); create view v1 as select * from t1 where 20 < (select (s1) from t1); insert into v1 values (30); -ERROR HY000: The target table v1 of the INSERT is not updatable +ERROR HY000: The target table v1 of the INSERT is not insertable-into create view v2 as select * from t1; create view v3 as select * from t1 where 20 < (select (s1) from v2); insert into v3 values (30); -ERROR HY000: The target table v3 of the INSERT is not updatable +ERROR HY000: The target table v3 of the INSERT is not insertable-into create view v4 as select * from v2 where 20 < (select (s1) from t1); insert into v4 values (30); -ERROR HY000: The target table v4 of the INSERT is not updatable +ERROR HY000: The target table v4 of the INSERT is not insertable-into drop view v4, v3, v2, v1; drop table t1; create table t1 (a int); @@ -2911,7 +2911,7 @@ INSERT INTO v2 VALUES (0); RETURN 0; END | SELECT f2(); -ERROR HY000: The target table v2 of the INSERT is not updatable +ERROR HY000: The target table v2 of the INSERT is not insertable-into DROP FUNCTION f1; DROP FUNCTION f2; DROP VIEW v1, v2; @@ -2935,4 +2935,25 @@ id select_type table type possible_keys key key_len ref rows Extra 2 SUBQUERY t1 ALL NULL NULL NULL NULL 3 DROP VIEW v1; DROP TABLE t1; +create table t1 (s1 int); +create view v1 as select s1 as a, s1 as b from t1; +insert into v1 values (1,1); +ERROR HY000: The target table v1 of the INSERT is not insertable-into +update v1 set a = 5; +drop view v1; +drop table t1; +CREATE TABLE t1(pk int PRIMARY KEY); +CREATE TABLE t2(pk int PRIMARY KEY, fk int, ver int, org int); +CREATE ALGORITHM=MERGE VIEW v1 AS +SELECT t1.* +FROM t1 JOIN t2 +ON t2.fk = t1.pk AND +t2.ver = (SELECT MAX(t.ver) FROM t2 t WHERE t.org = t2.org); +SHOW WARNINGS; +Level Code Message +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=MERGE DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`pk` AS `pk` from (`t1` join `t2` on(((`t2`.`fk` = `t1`.`pk`) and (`t2`.`ver` = (select max(`t`.`ver`) AS `MAX(t.ver)` from `t2` `t` where (`t`.`org` = `t2`.`org`)))))) +DROP VIEW v1; +DROP TABLE t1, t2; End of 5.0 tests. diff --git a/mysql-test/t/ctype_ucs.test b/mysql-test/t/ctype_ucs.test index 8116d39e3db..6c814368c88 100644 --- a/mysql-test/t/ctype_ucs.test +++ b/mysql-test/t/ctype_ucs.test @@ -484,6 +484,27 @@ select make_set(3, name, upper(name)) from bug20536; select export_set(5, name, upper(name)) from bug20536; select export_set(5, name, upper(name), ",", 5) from bug20536; +# +# Bug #20108: corrupted default enum value for a ucs2 field +# + +CREATE TABLE t1 ( + status enum('active','passive') collate latin1_general_ci + NOT NULL default 'passive' +); +SHOW CREATE TABLE t1; +ALTER TABLE t1 ADD a int NOT NULL AFTER status; + +CREATE TABLE t2 ( + status enum('active','passive') collate ucs2_turkish_ci + NOT NULL default 'passive' +); +SHOW CREATE TABLE t2; +ALTER TABLE t2 ADD a int NOT NULL AFTER status; + +DROP TABLE t1,t2; + + # Some broken functions: add these tests just to document current behavior. # PASSWORD and OLD_PASSWORD don't work with UCS2 strings, but to fix it would diff --git a/mysql-test/t/func_gconcat.test b/mysql-test/t/func_gconcat.test index 7a0d90961e4..19753430dde 100644 --- a/mysql-test/t/func_gconcat.test +++ b/mysql-test/t/func_gconcat.test @@ -447,3 +447,18 @@ SELECT a, CHAR_LENGTH(b) FROM t1; SELECT CHAR_LENGTH( GROUP_CONCAT(b) ) FROM t1; SET GROUP_CONCAT_MAX_LEN = 1024; DROP TABLE t1; + +# +# Bug #22015: crash with GROUP_CONCAT over a derived table that +# returns the results of aggregation by GROUP_CONCAT +# + +CREATE TABLE t1 (a int, b int); + +INSERT INTO t1 VALUES (2,1), (1,2), (2,2), (1,3); + +SELECT GROUP_CONCAT(a), x + FROM (SELECT a, GROUP_CONCAT(b) x FROM t1 GROUP BY a) AS s + GROUP BY x; + +DROP TABLE t1; diff --git a/mysql-test/t/func_in.test b/mysql-test/t/func_in.test index 8ddf1fbe314..906747c2f78 100644 --- a/mysql-test/t/func_in.test +++ b/mysql-test/t/func_in.test @@ -232,3 +232,27 @@ select some_id from t1 where some_id not in(2,-1); select some_id from t1 where some_id not in(-4,-1,-4); select some_id from t1 where some_id not in(-4,-1,3423534,2342342); drop table t1; + +# +# Bug#18360: Type aggregation for IN and CASE may lead to a wrong result +# +create table t1(f1 char(1)); +insert into t1 values ('a'),('b'),('1'); +select f1 from t1 where f1 in ('a',1); +select f1, case f1 when 'a' then '+' when 1 then '-' end from t1; +create index t1f1_idx on t1(f1); +select f1 from t1 where f1 in ('a',1); +explain select f1 from t1 where f1 in ('a',1); +select f1 from t1 where f1 in ('a','b'); +explain select f1 from t1 where f1 in ('a','b'); +select f1 from t1 where f1 in (2,1); +explain select f1 from t1 where f1 in (2,1); +create table t2(f2 int, index t2f2(f2)); +insert into t2 values(0),(1),(2); +select f2 from t2 where f2 in ('a',2); +explain select f2 from t2 where f2 in ('a',2); +select f2 from t2 where f2 in ('a','b'); +explain select f2 from t2 where f2 in ('a','b'); +select f2 from t2 where f2 in (1,'b'); +explain select f2 from t2 where f2 in (1,'b'); +drop table t1, t2; diff --git a/mysql-test/t/join.test b/mysql-test/t/join.test index 98bfb33b1e6..d0005f3b8f7 100644 --- a/mysql-test/t/join.test +++ b/mysql-test/t/join.test @@ -609,3 +609,24 @@ explain select * from t2,t3 where t2.a < 200 and t2.b=t3.b; drop table t1, t2, t3; +# BUG#14940 {Wrong query plan is chosen because of odd results of +# prev_record_reads() function } +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t2 (a int, b int, primary key(a)); +insert into t2 select @v:=A.a+10*B.a, @v from t1 A, t1 B; + +explain select * from t1; +show status like '%cost%'; +select 'The cost of accessing t1 (dont care if it changes' '^'; + +select 'vv: Following query must use ALL(t1), eq_ref(A), eq_ref(B): vv' Z; + +explain select * from t1, t2 A, t2 B where A.a = t1.a and B.a=A.b; +show status like '%cost%'; +select '^^: The above should be ~= 20 + cost(select * from t1). Value less than 20 is an error' Z; + + + +drop table t1, t2; diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test index 3b5d48f8178..ac79dbc3434 100644 --- a/mysql-test/t/ps.test +++ b/mysql-test/t/ps.test @@ -988,6 +988,58 @@ execute stmt; drop temporary table t1; deallocate prepare stmt; +# +# BUG#22085: Crash on the execution of a prepared statement that +# uses an IN subquery with aggregate functions in HAVING +# + +CREATE TABLE t1( + ID int(10) unsigned NOT NULL auto_increment, + Member_ID varchar(15) NOT NULL default '', + Action varchar(12) NOT NULL, + Action_Date datetime NOT NULL, + Track varchar(15) default NULL, + User varchar(12) default NULL, + Date_Updated timestamp NOT NULL default CURRENT_TIMESTAMP on update + CURRENT_TIMESTAMP, + PRIMARY KEY (ID), + KEY Action (Action), + KEY Action_Date (Action_Date) +); + +INSERT INTO t1(Member_ID, Action, Action_Date, Track) VALUES + ('111111', 'Disenrolled', '2006-03-01', 'CAD' ), + ('111111', 'Enrolled', '2006-03-01', 'CAD' ), + ('111111', 'Disenrolled', '2006-07-03', 'CAD' ), + ('222222', 'Enrolled', '2006-03-07', 'CAD' ), + ('222222', 'Enrolled', '2006-03-07', 'CHF' ), + ('222222', 'Disenrolled', '2006-08-02', 'CHF' ), + ('333333', 'Enrolled', '2006-03-01', 'CAD' ), + ('333333', 'Disenrolled', '2006-03-01', 'CAD' ), + ('444444', 'Enrolled', '2006-03-01', 'CAD' ), + ('555555', 'Disenrolled', '2006-03-01', 'CAD' ), + ('555555', 'Enrolled', '2006-07-21', 'CAD' ), + ('555555', 'Disenrolled', '2006-03-01', 'CHF' ), + ('666666', 'Enrolled', '2006-02-09', 'CAD' ), + ('666666', 'Enrolled', '2006-05-12', 'CHF' ), + ('666666', 'Disenrolled', '2006-06-01', 'CAD' ); + +PREPARE STMT FROM +"SELECT GROUP_CONCAT(Track SEPARATOR ', ') FROM t1 + WHERE Member_ID=? AND Action='Enrolled' AND + (Track,Action_Date) IN (SELECT Track, MAX(Action_Date) FROM t1 + WHERE Member_ID=? + GROUP BY Track + HAVING Track>='CAD' AND + MAX(Action_Date)>'2006-03-01')"; +SET @id='111111'; +EXECUTE STMT USING @id,@id; +SET @id='222222'; +EXECUTE STMT USING @id,@id; + +DEALLOCATE PREPARE STMT; +DROP TABLE t1; + --echo End of 4.1 tests ############################# 5.0 tests start ################################ # diff --git a/mysql-test/t/range.test b/mysql-test/t/range.test index 89508f468a7..1a80234e485 100644 --- a/mysql-test/t/range.test +++ b/mysql-test/t/range.test @@ -740,3 +740,30 @@ SELECT * FROM t1 WHERE item='A1' AND started<='2005-12-02 00:00:00'; DROP TABLE t1; # End of 5.0 tests + +# BUG#22393 fix: Adjust 'ref' estimate if we have 'range' estimate for +# a smaller scan interval +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t2 (a int, b int, filler char(100)); +insert into t2 select A.a + 10 * (B.a + 10 * C.a), 10, 'filler' from t1 A, +t1 B, t1 C where A.a < 5; + +insert into t2 select 1000, b, 'filler' from t2; +alter table t2 add index (a,b); +# t2 values +# ( 1 , 10, 'filler') +# ( 2 , 10, 'filler') +# ( 3 , 10, 'filler') +# (... , 10, 'filler') +# ... +# (1000, 10, 'filler') - 500 times + +# 500 rows, 1 row + +select 'In following EXPLAIN the access method should be ref, #rows~=500 (and not 2)' Z; +explain select * from t2 where a=1000 and b<11; + +drop table t1, t2; + diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index a4e535ac418..7811301a9bc 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -665,7 +665,7 @@ CREATE TABLE t2 ( ) ENGINE=MyISAM; INSERT INTO t2 VALUES ('AUS','Australia','Oceania','Australia and New Zealand',7741220.00,1901,18886000,79.8,351182.00,392911.00,'Australia','Constitutional Monarchy, Federation','Elisabeth II',135,'AU'); -INSERT INTO t2 VALUES ('AZE','Azerbaijan','Asia','Middle East',86600.00,1991,7734000,62.9,4127.00,4100.00,'Azärbaycan','Federal Republic','Heydär Äliyev',144,'AZ'); +INSERT INTO t2 VALUES ('AZE','Azerbaijan','Asia','Middle East',86600.00,1991,7734000,62.9,4127.00,4100.00,'Azärbaycan','Federal Republic','Heydär Äliyev',144,'AZ'); select t2.Continent, t1.Name, t1.Population from t2 LEFT JOIN t1 ON t2.Code = t1.t2 where t1.Population IN (select max(t1.Population) AS Population from t1, t2 where t1.t2 = t2.Code group by Continent); @@ -1526,7 +1526,7 @@ CREATE TABLE t1 ( ) ENGINE=MyISAM; INSERT INTO t1 VALUES ('XXX','Xxxxx','Oceania','Xxxxxx',26.00,0,0,0,0,0,'Xxxxx','Xxxxx','Xxxxx',NULL,'XX'); INSERT INTO t1 VALUES ('ASM','American Samoa','Oceania','Polynesia',199.00,0,68000,75.1,334.00,NULL,'Amerika Samoa','US Territory','George W. Bush',54,'AS'); -INSERT INTO t1 VALUES ('ATF','French Southern territories','Antarctica','Antarctica',7780.00,0,0,NULL,0.00,NULL,'Terres australes françaises','Nonmetropolitan Territory of France','Jacques Chirac',NULL,'TF'); +INSERT INTO t1 VALUES ('ATF','French Southern territories','Antarctica','Antarctica',7780.00,0,0,NULL,0.00,NULL,'Terres australes françaises','Nonmetropolitan Territory of France','Jacques Chirac',NULL,'TF'); INSERT INTO t1 VALUES ('UMI','United States Minor Outlying Islands','Oceania','Micronesia/Caribbean',16.00,0,0,NULL,0.00,NULL,'United States Minor Outlying Islands','Dependent Territory of the US','George W. Bush',NULL,'UM'); /*!40000 ALTER TABLE t1 ENABLE KEYS */; SELECT DISTINCT Continent AS c FROM t1 WHERE Code <> SOME ( SELECT Code FROM t1 WHERE Continent = c AND Population < 200); @@ -1918,6 +1918,43 @@ SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r drop table t1,t2; +# +# Bug #21853: assert failure for a grouping query with +# an ALL/ANY quantified subquery in HAVING +# + +CREATE TABLE t1 ( + field1 int NOT NULL, + field2 int NOT NULL, + field3 int NOT NULL, + PRIMARY KEY (field1,field2,field3) +); +CREATE TABLE t2 ( + fieldA int NOT NULL, + fieldB int NOT NULL, + PRIMARY KEY (fieldA,fieldB) +); + +INSERT INTO t1 VALUES + (1,1,1), (1,1,2), (1,2,1), (1,2,2), (1,2,3), (1,3,1); +INSERT INTO t2 VALUES (1,1), (1,2), (1,3); + +SELECT field1, field2, COUNT(*) + FROM t1 GROUP BY field1, field2; + +SELECT field1, field2 + FROM t1 + GROUP BY field1, field2 + HAVING COUNT(*) >= ALL (SELECT fieldB + FROM t2 WHERE fieldA = field1); +SELECT field1, field2 + FROM t1 + GROUP BY field1, field2 + HAVING COUNT(*) < ANY (SELECT fieldB + FROM t2 WHERE fieldA = field1); + +DROP TABLE t1, t2; + # End of 4.1 tests # @@ -2332,3 +2369,34 @@ explain select * from t1 where not exists ((select t11.i from t1 t11) union (select t12.i from t1 t12)); DROP TABLE t1; + +# +# Bug #21540: Subqueries with no from and aggregate functions return +# wrong results +CREATE TABLE t1 (a INT, b INT); +CREATE TABLE t2 (a INT); +INSERT INTO t2 values (1); +INSERT INTO t1 VALUES (1,1),(1,2),(2,3),(3,4); +SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; +SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) + FROM t1 GROUP BY t1.a; +SELECT COUNT(DISTINCT t1.b), (SELECT COUNT(DISTINCT t1.b)) FROM t1 GROUP BY t1.a; +SELECT COUNT(DISTINCT t1.b), + (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) + FROM t1 GROUP BY t1.a; +SELECT ( + SELECT ( + SELECT COUNT(DISTINCT t1.b) + ) +) +FROM t1 GROUP BY t1.a; +SELECT ( + SELECT ( + SELECT ( + SELECT COUNT(DISTINCT t1.b) + ) + ) + FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 t2 +GROUP BY t2.a; +DROP TABLE t1,t2; diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index 7d1ddccba83..934e0624fd6 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -347,13 +347,13 @@ create view v3 (x,y,z) as select b, a, b from t1; create view v4 (x,y,z) as select c+1, b, a from t1; create algorithm=temptable view v5 (x,y,z) as select c, b, a from t1; # try insert to VIEW with fields duplicate --- error 1288 +-- error 1573 insert into v3 values (-60,4,30); # try insert to VIEW with expression in SELECT list --- error 1288 +-- error 1573 insert into v4 values (-60,4,30); # try insert to VIEW using temporary table algorithm --- error 1288 +-- error 1573 insert into v5 values (-60,4,30); insert into v1 values (-60,4,30); insert into v1 (z,y,x) values (50,6,-100); @@ -375,13 +375,13 @@ create view v3 (x,y,z) as select b, a, b from t1; create view v4 (x,y,z) as select c+1, b, a from t1; create algorithm=temptable view v5 (x,y,z) as select c, b, a from t1; # try insert to VIEW with fields duplicate --- error 1288 +-- error 1573 insert into v3 select c, b, a from t2; # try insert to VIEW with expression in SELECT list --- error 1288 +-- error 1573 insert into v4 select c, b, a from t2; # try insert to VIEW using temporary table algorithm --- error 1288 +-- error 1573 insert into v5 select c, b, a from t2; insert into v1 select c, b, a from t2; insert into v1 (z,y,x) select a+20,b+2,-100 from t2; @@ -1249,14 +1249,14 @@ drop table t1; # create table t1 (s1 smallint); create view v1 as select * from t1 where 20 < (select (s1) from t1); --- error 1288 +-- error 1573 insert into v1 values (30); create view v2 as select * from t1; create view v3 as select * from t1 where 20 < (select (s1) from v2); --- error 1288 +-- error 1573 insert into v3 values (30); create view v4 as select * from v2 where 20 < (select (s1) from t1); --- error 1288 +-- error 1573 insert into v4 values (30); drop view v4, v3, v2, v1; drop table t1; @@ -2443,7 +2443,7 @@ DROP TABLE t1, t2; # # Bug #16069: VIEW does return the same results as underlying SELECT # with WHERE condition containing BETWEEN over dates - +# Dates as strings should be casted to date type CREATE TABLE t1 (id int NOT NULL PRIMARY KEY, td date DEFAULT NULL, KEY idx(td)); @@ -2830,7 +2830,7 @@ BEGIN END | delimiter ;| ---error ER_NON_UPDATABLE_TABLE +--error ER_NON_INSERTABLE_TABLE SELECT f2(); DROP FUNCTION f1; @@ -2855,4 +2855,33 @@ EXPLAIN SELECT * FROM v1 t WHERE t.s1+1 < (SELECT MAX(t1.s1) FROM t1); DROP VIEW v1; DROP TABLE t1; + +# +# Bug #5505: Wrong error message on INSERT into a view +# +create table t1 (s1 int); +create view v1 as select s1 as a, s1 as b from t1; +--error 1573 +insert into v1 values (1,1); +update v1 set a = 5; +drop view v1; +drop table t1; + +# +# Bug #21646: view qith a subquery in ON expression +# + +CREATE TABLE t1(pk int PRIMARY KEY); +CREATE TABLE t2(pk int PRIMARY KEY, fk int, ver int, org int); + +CREATE ALGORITHM=MERGE VIEW v1 AS +SELECT t1.* + FROM t1 JOIN t2 + ON t2.fk = t1.pk AND + t2.ver = (SELECT MAX(t.ver) FROM t2 t WHERE t.org = t2.org); +SHOW WARNINGS; +SHOW CREATE VIEW v1; + +DROP VIEW v1; +DROP TABLE t1, t2; --echo End of 5.0 tests. diff --git a/mysys/my_chsize.c b/mysys/my_chsize.c index 9760de29a08..fe0d0ffa607 100644 --- a/mysys/my_chsize.c +++ b/mysys/my_chsize.c @@ -46,7 +46,9 @@ int my_chsize(File fd, my_off_t newlength, int filler, myf MyFlags) DBUG_PRINT("my",("fd: %d length: %lu MyFlags: %d",fd,(ulong) newlength, MyFlags)); - oldsize = my_seek(fd, 0L, MY_SEEK_END, MYF(MY_WME+MY_FAE)); + if ((oldsize = my_seek(fd, 0L, MY_SEEK_END, MYF(MY_WME+MY_FAE))) == newlength) + DBUG_RETURN(0); + DBUG_PRINT("info",("old_size: %ld", (ulong) oldsize)); if (oldsize > newlength) diff --git a/sql/field.h b/sql/field.h index 8a6bda500d3..9b81931d416 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1522,6 +1522,8 @@ public: uint decimals, flags, pack_length, key_length; Field::utype unireg_check; TYPELIB *interval; // Which interval to use + TYPELIB *save_interval; // Temporary copy for the above + // Used only for UCS2 intervals List interval_list; CHARSET_INFO *charset; Field::geometry_type geom_type; diff --git a/sql/filesort.cc b/sql/filesort.cc index eb2960a0458..01f3bb97557 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -1345,6 +1345,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length, switch ((sortorder->result_type=sortorder->item->result_type())) { case STRING_RESULT: sortorder->length=sortorder->item->max_length; + set_if_smaller(sortorder->length, thd->variables.max_sort_length); if (use_strnxfrm((cs=sortorder->item->collation.collation))) { sortorder->length= cs->coll->strnxfrmlen(cs, sortorder->length); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 0739b704462..0d57f3decec 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -75,17 +75,25 @@ static const int max_transactions= 3; // should really be 2 but there is a trans static uint ndbcluster_partition_flags(); static uint ndbcluster_alter_table_flags(uint flags); static int ndbcluster_init(void *); -static int ndbcluster_end(ha_panic_function flag); -static bool ndbcluster_show_status(THD*,stat_print_fn *,enum ha_stat_type); -static int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info); -static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond); +static int ndbcluster_end(handlerton *hton, ha_panic_function flag); +static bool ndbcluster_show_status(handlerton *hton, THD*, + stat_print_fn *, + enum ha_stat_type); +static int ndbcluster_alter_tablespace(handlerton *hton, + THD* thd, + st_alter_tablespace *info); +static int ndbcluster_fill_files_table(handlerton *hton, + THD *thd, + TABLE_LIST *tables, + COND *cond); handlerton *ndbcluster_hton; -static handler *ndbcluster_create_handler(TABLE_SHARE *table, +static handler *ndbcluster_create_handler(handlerton *hton, + TABLE_SHARE *table, MEM_ROOT *mem_root) { - return new (mem_root) ha_ndbcluster(table); + return new (mem_root) ha_ndbcluster(hton, table); } static uint ndbcluster_partition_flags() @@ -4201,7 +4209,7 @@ int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type) Commit a transaction started in NDB */ -static int ndbcluster_commit(THD *thd, bool all) +static int ndbcluster_commit(handlerton *hton, THD *thd, bool all) { int res= 0; Thd_ndb *thd_ndb= get_thd_ndb(thd); @@ -4252,7 +4260,7 @@ static int ndbcluster_commit(THD *thd, bool all) Rollback a transaction started in NDB */ -static int ndbcluster_rollback(THD *thd, bool all) +static int ndbcluster_rollback(handlerton *hton, THD *thd, bool all) { int res= 0; Thd_ndb *thd_ndb= get_thd_ndb(thd); @@ -5552,8 +5560,8 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment, HA_HAS_OWN_BINLOGGING | \ HA_HAS_RECORDS -ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg): - handler(ndbcluster_hton, table_arg), +ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg): + handler(hton, table_arg), m_active_trans(NULL), m_active_cursor(NULL), m_table(NULL), @@ -5816,7 +5824,7 @@ int ha_ndbcluster::check_ndb_connection(THD* thd) } -static int ndbcluster_close_connection(THD *thd) +static int ndbcluster_close_connection(handlerton *hton, THD *thd) { Thd_ndb *thd_ndb= get_thd_ndb(thd); DBUG_ENTER("ndbcluster_close_connection"); @@ -5833,8 +5841,10 @@ static int ndbcluster_close_connection(THD *thd) Try to discover one table from NDB */ -int ndbcluster_discover(THD* thd, const char *db, const char *name, - const void** frmblob, uint* frmlen) +int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, + const char *name, + const void** frmblob, + uint* frmlen) { int error= 0; NdbError ndb_error; @@ -5914,7 +5924,8 @@ err: */ -int ndbcluster_table_exists_in_engine(THD* thd, const char *db, +int ndbcluster_table_exists_in_engine(handlerton *hton, THD* thd, + const char *db, const char *name) { Ndb* ndb; @@ -6014,7 +6025,7 @@ int ndbcluster_drop_database_impl(const char *path) DBUG_RETURN(ret); } -static void ndbcluster_drop_database(char *path) +static void ndbcluster_drop_database(handlerton *hton, char *path) { THD *thd= current_thd; DBUG_ENTER("ndbcluster_drop_database"); @@ -6175,7 +6186,9 @@ int ndbcluster_find_all_files(THD *thd) DBUG_RETURN(-(skipped + unhandled)); } -int ndbcluster_find_files(THD *thd,const char *db,const char *path, +int ndbcluster_find_files(handlerton *hton, THD *thd, + const char *db, + const char *path, const char *wild, bool dir, List *files) { DBUG_ENTER("ndbcluster_find_files"); @@ -6285,7 +6298,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, DBUG_PRINT("info", ("%s existed on disk", name)); // The .ndb file exists on disk, but it's not in list of tables in ndb // Verify that handler agrees table is gone. - if (ndbcluster_table_exists_in_engine(thd, db, file_name) == 0) + if (ndbcluster_table_exists_in_engine(hton, thd, db, file_name) == 0) { DBUG_PRINT("info", ("NDB says %s does not exists", file_name)); it.remove(); @@ -6539,7 +6552,7 @@ ndbcluster_init_error: DBUG_RETURN(TRUE); } -static int ndbcluster_end(ha_panic_function type) +static int ndbcluster_end(handlerton *hton, ha_panic_function type) { DBUG_ENTER("ndbcluster_end"); @@ -6623,7 +6636,7 @@ void ndbcluster_print_error(int error, const NdbOperation *error_op) share.db.length= 0; share.table_name.str= (char *) tab_name; share.table_name.length= strlen(tab_name); - ha_ndbcluster error_handler(&share); + ha_ndbcluster error_handler(ndbcluster_hton, &share); error_handler.print_error(error, MYF(0)); DBUG_VOID_RETURN; } @@ -9703,7 +9716,7 @@ err: Implements the SHOW NDB STATUS command. */ bool -ndbcluster_show_status(THD* thd, stat_print_fn *stat_print, +ndbcluster_show_status(handlerton *hton, THD* thd, stat_print_fn *stat_print, enum ha_stat_type stat_type) { char buf[IO_SIZE]; @@ -10167,7 +10180,7 @@ bool set_up_undofile(st_alter_tablespace *info, return false; } -int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info) +int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace *info) { DBUG_ENTER("ha_ndbcluster::alter_tablespace"); @@ -10428,7 +10441,9 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts) DBUG_RETURN(TRUE); } -static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, +static int ndbcluster_fill_files_table(handlerton *hton, + THD *thd, + TABLE_LIST *tables, COND *cond) { TABLE* table= tables->table; @@ -10749,7 +10764,7 @@ SHOW_VAR ndb_status_variables_export[]= { }; struct st_mysql_storage_engine ndbcluster_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, ndbcluster_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(ndbcluster) { diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 5cd5bb2c716..19ff513f9b1 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -627,7 +627,7 @@ class Thd_ndb class ha_ndbcluster: public handler { public: - ha_ndbcluster(TABLE_SHARE *table); + ha_ndbcluster(handlerton *hton, TABLE_SHARE *table); ~ha_ndbcluster(); int ha_initialise(); diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 3fc84ad1b66..5f5c8bcb221 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -506,7 +506,7 @@ ndbcluster_binlog_index_purge_file(THD *thd, const char *file) } static void -ndbcluster_binlog_log_query(THD *thd, enum_binlog_command binlog_command, +ndbcluster_binlog_log_query(handlerton *hton, THD *thd, enum_binlog_command binlog_command, const char *query, uint query_length, const char *db, const char *table_name) { @@ -637,7 +637,9 @@ static void ndbcluster_reset_slave(THD *thd) /* Initialize the binlog part of the ndb handlerton */ -static int ndbcluster_binlog_func(THD *thd, enum_binlog_func fn, void *arg) +static int ndbcluster_binlog_func(handlerton *hton, THD *thd, + enum_binlog_func fn, + void *arg) { switch(fn) { diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h index 822ebf3e358..233d1a58aaa 100644 --- a/sql/ha_ndbcluster_binlog.h +++ b/sql/ha_ndbcluster_binlog.h @@ -31,6 +31,8 @@ extern ulong ndb_extra_logging; #define NDB_INVALID_SCHEMA_OBJECT 241 +extern handlerton *ndbcluster_hton; + /* The numbers below must not change as they are passed between mysql servers, and if changed @@ -103,7 +105,6 @@ extern pthread_mutex_t injector_mutex; extern pthread_cond_t injector_cond; extern unsigned char g_node_id_map[max_ndb_nodes]; -extern handlerton *ndbcluster_hton; extern pthread_t ndb_util_thread; extern pthread_mutex_t LOCK_ndb_util_thread; extern pthread_cond_t COND_ndb_util_thread; diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index e435b356def..0da0094d92e 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -69,16 +69,17 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE * table); MODULE create/delete handler object ****************************************************************************/ -static handler *partition_create_handler(TABLE_SHARE *share, +static handler *partition_create_handler(handlerton *hton, + TABLE_SHARE *share, MEM_ROOT *mem_root); static uint partition_flags(); static uint alter_table_flags(uint flags); -handlerton *partition_hton; static int partition_initialize(void *p) { + handlerton *partition_hton; partition_hton= (handlerton *)p; partition_hton->state= SHOW_OPTION_YES; @@ -102,10 +103,11 @@ static int partition_initialize(void *p) New partition object */ -static handler *partition_create_handler(TABLE_SHARE *share, +static handler *partition_create_handler(handlerton *hton, + TABLE_SHARE *share, MEM_ROOT *mem_root) { - ha_partition *file= new (mem_root) ha_partition(share); + ha_partition *file= new (mem_root) ha_partition(hton, share); if (file && file->initialise_partition(mem_root)) { delete file; @@ -155,8 +157,8 @@ static uint alter_table_flags(uint flags __attribute__((unused))) NONE */ -ha_partition::ha_partition(TABLE_SHARE *share) - :handler(partition_hton, share), m_part_info(NULL), m_create_handler(FALSE), +ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) + :handler(hton, share), m_part_info(NULL), m_create_handler(FALSE), m_is_sub_partitioned(0) { DBUG_ENTER("ha_partition::ha_partition(table)"); @@ -176,8 +178,8 @@ ha_partition::ha_partition(TABLE_SHARE *share) NONE */ -ha_partition::ha_partition(partition_info *part_info) - :handler(partition_hton, NULL), m_part_info(part_info), +ha_partition::ha_partition(handlerton *hton, partition_info *part_info) + :handler(hton, NULL), m_part_info(part_info), m_create_handler(TRUE), m_is_sub_partitioned(m_part_info->is_sub_partitioned()) @@ -5632,7 +5634,7 @@ static int free_share(PARTITION_SHARE *share) #endif /* NOT_USED */ struct st_mysql_storage_engine partition_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, partition_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(partition) { diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 97086d7b632..b66db205549 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -149,8 +149,8 @@ public: partition handler. ------------------------------------------------------------------------- */ - ha_partition(TABLE_SHARE * table); - ha_partition(partition_info * part_info); + ha_partition(handlerton *hton, TABLE_SHARE * table); + ha_partition(handlerton *hton, partition_info * part_info); ~ha_partition(); /* A partition handler has no characteristics in itself. It only inherits diff --git a/sql/handler.cc b/sql/handler.cc index 7848552f3de..ccf1a1ef8d9 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -170,7 +170,7 @@ const char *ha_get_storage_engine(enum legacy_db_type db_type) static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root) { handlerton *hton= ha_default_handlerton(current_thd); - return (hton && hton->create) ? hton->create(table, mem_root) : NULL; + return (hton && hton->create) ? hton->create(hton, table, mem_root) : NULL; } @@ -232,7 +232,7 @@ handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc, if (db_type && db_type->state == SHOW_OPTION_YES && db_type->create) { - if ((file= db_type->create(share, alloc))) + if ((file= db_type->create(db_type, share, alloc))) file->init(); DBUG_RETURN(file); } @@ -251,7 +251,7 @@ handler *get_ha_partition(partition_info *part_info) { ha_partition *partition; DBUG_ENTER("get_ha_partition"); - if ((partition= new ha_partition(part_info))) + if ((partition= new ha_partition(partition_hton, part_info))) { if (partition->initialise_partition(current_thd->mem_root)) { @@ -376,7 +376,7 @@ int ha_finalize_handlerton(st_plugin_int *plugin) case SHOW_OPTION_YES: if (installed_htons[hton->db_type] == hton) installed_htons[hton->db_type]= NULL; - if (hton->panic && hton->panic(HA_PANIC_CLOSE)) + if (hton->panic && hton->panic(hton, HA_PANIC_CLOSE)) DBUG_RETURN(1); break; }; @@ -465,6 +465,26 @@ int ha_initialize_handlerton(st_plugin_int *plugin) hton->state= SHOW_OPTION_DISABLED; break; } + + /* + This is entirely for legacy. We will create a new "disk based" hton and a + "memory" hton which will be configurable longterm. We should be able to + remove partition and myisammrg. + */ + switch (hton->db_type) { + case DB_TYPE_HEAP: + heap_hton= hton; + break; + case DB_TYPE_MYISAM: + myisam_hton= hton; + break; + case DB_TYPE_PARTITION_DB: + partition_hton= hton; + break; + default: + break; + }; + DBUG_RETURN(0); err: DBUG_RETURN(1); @@ -498,7 +518,7 @@ static my_bool panic_handlerton(THD *unused1, st_plugin_int *plugin, void *arg) { handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->panic) - ((int*)arg)[0]|= hton->panic((enum ha_panic_function)((int*)arg)[1]); + ((int*)arg)[0]|= hton->panic(hton, (enum ha_panic_function)((int*)arg)[1]); return FALSE; } @@ -520,7 +540,7 @@ static my_bool dropdb_handlerton(THD *unused1, st_plugin_int *plugin, { handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->drop_database) - hton->drop_database((char *)path); + hton->drop_database(hton, (char *)path); return FALSE; } @@ -541,7 +561,7 @@ static my_bool closecon_handlerton(THD *thd, st_plugin_int *plugin, */ if (hton->state == SHOW_OPTION_YES && hton->close_connection && thd->ha_data[hton->slot]) - hton->close_connection(thd); + hton->close_connection(hton, thd); return FALSE; } @@ -617,7 +637,7 @@ int ha_prepare(THD *thd) statistic_increment(thd->status_var.ha_prepare_count,&LOCK_status); if ((*ht)->prepare) { - if ((err= (*(*ht)->prepare)(thd, all))) + if ((err= (*(*ht)->prepare)(*ht, thd, all))) { my_error(ER_ERROR_DURING_COMMIT, MYF(0), err); ha_rollback_trans(thd, all); @@ -691,7 +711,7 @@ int ha_commit_trans(THD *thd, bool all) for (; *ht && !error; ht++) { int err; - if ((err= (*(*ht)->prepare)(thd, all))) + if ((err= (*(*ht)->prepare)(*ht, thd, all))) { my_error(ER_ERROR_DURING_COMMIT, MYF(0), err); error= 1; @@ -738,7 +758,7 @@ int ha_commit_one_phase(THD *thd, bool all) for (ht=trans->ht; *ht; ht++) { int err; - if ((err= (*(*ht)->commit)(thd, all))) + if ((err= (*(*ht)->commit)(*ht, thd, all))) { my_error(ER_ERROR_DURING_COMMIT, MYF(0), err); error=1; @@ -794,7 +814,7 @@ int ha_rollback_trans(THD *thd, bool all) for (handlerton **ht=trans->ht; *ht; ht++) { int err; - if ((err= (*(*ht)->rollback)(thd, all))) + if ((err= (*(*ht)->rollback)(*ht, thd, all))) { // cannot happen my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err); error=1; @@ -871,7 +891,7 @@ static my_bool xacommit_handlerton(THD *unused1, st_plugin_int *plugin, handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->recover) { - hton->commit_by_xid(((struct xahton_st *)arg)->xid); + hton->commit_by_xid(hton, ((struct xahton_st *)arg)->xid); ((struct xahton_st *)arg)->result= 0; } return FALSE; @@ -883,7 +903,7 @@ static my_bool xarollback_handlerton(THD *unused1, st_plugin_int *plugin, handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->recover) { - hton->rollback_by_xid(((struct xahton_st *)arg)->xid); + hton->rollback_by_xid(hton, ((struct xahton_st *)arg)->xid); ((struct xahton_st *)arg)->result= 0; } return FALSE; @@ -993,7 +1013,7 @@ static my_bool xarecover_handlerton(THD *unused, st_plugin_int *plugin, if (hton->state == SHOW_OPTION_YES && hton->recover) { - while ((got= hton->recover(info->list, info->len)) > 0 ) + while ((got= hton->recover(hton, info->list, info->len)) > 0 ) { sql_print_information("Found %d prepared transaction(s) in %s", got, hton2plugin[hton->slot]->name.str); @@ -1024,7 +1044,7 @@ static my_bool xarecover_handlerton(THD *unused, st_plugin_int *plugin, char buf[XIDDATASIZE*4+6]; // see xid_to_str sql_print_information("commit xid %s", xid_to_str(buf, info->list+i)); #endif - hton->commit_by_xid(info->list+i); + hton->commit_by_xid(hton, info->list+i); } else { @@ -1033,7 +1053,7 @@ static my_bool xarecover_handlerton(THD *unused, st_plugin_int *plugin, sql_print_information("rollback xid %s", xid_to_str(buf, info->list+i)); #endif - hton->rollback_by_xid(info->list+i); + hton->rollback_by_xid(hton, info->list+i); } } if (got < info->len) @@ -1179,7 +1199,7 @@ static my_bool release_temporary_latches(THD *thd, st_plugin_int *plugin, handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->release_temporary_latches) - hton->release_temporary_latches(thd); + hton->release_temporary_latches(hton, thd); return FALSE; } @@ -1212,7 +1232,7 @@ int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv) { int err; DBUG_ASSERT((*ht)->savepoint_set != 0); - if ((err= (*(*ht)->savepoint_rollback)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset))) + if ((err= (*(*ht)->savepoint_rollback)(*ht, thd, (byte *)(sv+1)+(*ht)->savepoint_offset))) { // cannot happen my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err); error=1; @@ -1228,7 +1248,7 @@ int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv) for (; *ht ; ht++) { int err; - if ((err= (*(*ht)->rollback)(thd, !thd->in_sub_stmt))) + if ((err= (*(*ht)->rollback)(*ht, thd, !thd->in_sub_stmt))) { // cannot happen my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err); error=1; @@ -1262,7 +1282,7 @@ int ha_savepoint(THD *thd, SAVEPOINT *sv) error=1; break; } - if ((err= (*(*ht)->savepoint_set)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset))) + if ((err= (*(*ht)->savepoint_set)(*ht, thd, (byte *)(sv+1)+(*ht)->savepoint_offset))) { // cannot happen my_error(ER_GET_ERRNO, MYF(0), err); error=1; @@ -1288,7 +1308,9 @@ int ha_release_savepoint(THD *thd, SAVEPOINT *sv) int err; if (!(*ht)->savepoint_release) continue; - if ((err= (*(*ht)->savepoint_release)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset))) + if ((err= (*(*ht)->savepoint_release)(*ht, thd, + (byte *)(sv+1)+ + (*ht)->savepoint_offset))) { // cannot happen my_error(ER_GET_ERRNO, MYF(0), err); error=1; @@ -1305,7 +1327,7 @@ static my_bool snapshot_handlerton(THD *thd, st_plugin_int *plugin, if (hton->state == SHOW_OPTION_YES && hton->start_consistent_snapshot) { - hton->start_consistent_snapshot(thd); + hton->start_consistent_snapshot(hton, thd); *((bool *)arg)= false; } return FALSE; @@ -1333,7 +1355,8 @@ static my_bool flush_handlerton(THD *thd, st_plugin_int *plugin, void *arg) { handlerton *hton= (handlerton *)plugin->data; - if (hton->state == SHOW_OPTION_YES && hton->flush_logs && hton->flush_logs()) + if (hton->state == SHOW_OPTION_YES && hton->flush_logs && + hton->flush_logs(hton)) return TRUE; return FALSE; } @@ -1350,7 +1373,7 @@ bool ha_flush_logs(handlerton *db_type) else { if (db_type->state != SHOW_OPTION_YES || - (db_type->flush_logs && db_type->flush_logs())) + (db_type->flush_logs && db_type->flush_logs(db_type))) return TRUE; } return FALSE; @@ -2765,7 +2788,9 @@ static my_bool discover_handlerton(THD *thd, st_plugin_int *plugin, st_discover_args *vargs= (st_discover_args *)arg; handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->discover && - (!(hton->discover(thd, vargs->db, vargs->name, vargs->frmblob, vargs->frmlen)))) + (!(hton->discover(hton, thd, vargs->db, vargs->name, + vargs->frmblob, + vargs->frmlen)))) return TRUE; return FALSE; @@ -2814,7 +2839,7 @@ static my_bool find_files_handlerton(THD *thd, st_plugin_int *plugin, if (hton->state == SHOW_OPTION_YES && hton->find_files) - if (hton->find_files(thd, vargs->db, vargs->path, vargs->wild, + if (hton->find_files(hton, thd, vargs->db, vargs->path, vargs->wild, vargs->dir, vargs->files)) return TRUE; @@ -2861,7 +2886,7 @@ static my_bool table_exists_in_engine_handlerton(THD *thd, st_plugin_int *plugin handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->table_exists_in_engine) - if ((hton->table_exists_in_engine(thd, vargs->db, vargs->name)) == 1) + if ((hton->table_exists_in_engine(hton, thd, vargs->db, vargs->name)) == 1) return TRUE; return FALSE; @@ -2930,7 +2955,7 @@ static my_bool binlog_func_foreach(THD *thd, binlog_func_st *bfn) uint i= 0, sz= hton_list.sz; while(i < sz) - hton_list.hton[i++]->binlog_func(thd, bfn->fn, bfn->arg); + hton_list.hton[i++]->binlog_func(hton, thd, bfn->fn, bfn->arg); return FALSE; } @@ -2977,12 +3002,12 @@ struct binlog_log_query_st }; static my_bool binlog_log_query_handlerton2(THD *thd, - const handlerton *hton, + handlerton *hton, void *args) { struct binlog_log_query_st *b= (struct binlog_log_query_st*)args; if (hton->state == SHOW_OPTION_YES && hton->binlog_log_query) - hton->binlog_log_query(thd, + hton->binlog_log_query(hton, thd, b->binlog_command, b->query, b->query_length, @@ -2995,10 +3020,10 @@ static my_bool binlog_log_query_handlerton(THD *thd, st_plugin_int *plugin, void *args) { - return binlog_log_query_handlerton2(thd, (const handlerton *)plugin->data, args); + return binlog_log_query_handlerton2(thd, (handlerton *)plugin->data, args); } -void ha_binlog_log_query(THD *thd, const handlerton *hton, +void ha_binlog_log_query(THD *thd, handlerton *hton, enum_binlog_command binlog_command, const char *query, uint query_length, const char *db, const char *table_name) @@ -3296,7 +3321,7 @@ static my_bool exts_handlerton(THD *unused, st_plugin_int *plugin, handlerton *hton= (handlerton *)plugin->data; handler *file; if (hton->state == SHOW_OPTION_YES && hton->create && - (file= hton->create((TABLE_SHARE*) 0, current_thd->mem_root))) + (file= hton->create(hton, (TABLE_SHARE*) 0, current_thd->mem_root))) { List_iterator_fast it(*found_exts); const char **ext, *old_ext; @@ -3371,7 +3396,7 @@ static my_bool showstat_handlerton(THD *thd, st_plugin_int *plugin, enum ha_stat_type stat= *(enum ha_stat_type *) arg; handlerton *hton= (handlerton *)plugin->data; if (hton->state == SHOW_OPTION_YES && hton->show_status && - hton->show_status(thd, stat_print, stat)) + hton->show_status(hton, thd, stat_print, stat)) return TRUE; return FALSE; } @@ -3405,7 +3430,7 @@ bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat) } else result= db_type->show_status && - db_type->show_status(thd, stat_print, stat) ? 1 : 0; + db_type->show_status(db_type, thd, stat_print, stat) ? 1 : 0; } if (!result) @@ -3726,7 +3751,7 @@ int example_of_iterator_using_for_logs_cleanup(handlerton *hton) if (!hton->create_iterator) return 1; /* iterator creator is not supported */ - if ((*hton->create_iterator)(HA_TRANSACTLOG_ITERATOR, &iterator) != + if ((*hton->create_iterator)(hton, HA_TRANSACTLOG_ITERATOR, &iterator) != HA_ITERATOR_OK) { /* error during creation of log iterator or iterator is not supported */ diff --git a/sql/handler.h b/sql/handler.h index 2efe9f3ce6a..e0827fec5c8 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -449,7 +449,7 @@ class st_alter_tablespace : public Sql_alloc ulonglong autoextend_size; ulonglong max_size; uint nodegroup_id; - const handlerton *storage_engine; + handlerton *storage_engine; bool wait_until_completed; const char *ts_comment; enum tablespace_access_mode ts_access_mode; @@ -605,18 +605,18 @@ struct handlerton this storage area - set it to something, so that MySQL would know this storage engine was accessed in this connection */ - int (*close_connection)(THD *thd); + int (*close_connection)(handlerton *hton, THD *thd); /* sv points to an uninitialized storage area of requested size (see savepoint_offset description) */ - int (*savepoint_set)(THD *thd, void *sv); + int (*savepoint_set)(handlerton *hton, THD *thd, void *sv); /* sv points to a storage area, that was earlier passed to the savepoint_set call */ - int (*savepoint_rollback)(THD *thd, void *sv); - int (*savepoint_release)(THD *thd, void *sv); + int (*savepoint_rollback)(handlerton *hton, THD *thd, void *sv); + int (*savepoint_release)(handlerton *hton, THD *thd, void *sv); /* 'all' is true if it's a real commit, that makes persistent changes 'all' is false if it's not in fact a commit but an end of the @@ -624,25 +624,25 @@ struct handlerton NOTE 'all' is also false in auto-commit mode where 'end of statement' and 'real commit' mean the same event. */ - int (*commit)(THD *thd, bool all); - int (*rollback)(THD *thd, bool all); - int (*prepare)(THD *thd, bool all); - int (*recover)(XID *xid_list, uint len); - int (*commit_by_xid)(XID *xid); - int (*rollback_by_xid)(XID *xid); - void *(*create_cursor_read_view)(); - void (*set_cursor_read_view)(void *); - void (*close_cursor_read_view)(void *); - handler *(*create)(TABLE_SHARE *table, MEM_ROOT *mem_root); - void (*drop_database)(char* path); - int (*panic)(enum ha_panic_function flag); - int (*start_consistent_snapshot)(THD *thd); - bool (*flush_logs)(); - bool (*show_status)(THD *thd, stat_print_fn *print, enum ha_stat_type stat); + int (*commit)(handlerton *hton, THD *thd, bool all); + int (*rollback)(handlerton *hton, THD *thd, bool all); + int (*prepare)(handlerton *hton, THD *thd, bool all); + int (*recover)(handlerton *hton, XID *xid_list, uint len); + int (*commit_by_xid)(handlerton *hton, XID *xid); + int (*rollback_by_xid)(handlerton *hton, XID *xid); + void *(*create_cursor_read_view)(handlerton *hton, THD *thd); + void (*set_cursor_read_view)(handlerton *hton, THD *thd, void *read_view); + void (*close_cursor_read_view)(handlerton *hton, THD *thd, void *read_view); + handler *(*create)(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root); + void (*drop_database)(handlerton *hton, char* path); + int (*panic)(handlerton *hton, enum ha_panic_function flag); + int (*start_consistent_snapshot)(handlerton *hton, THD *thd); + bool (*flush_logs)(handlerton *hton); + bool (*show_status)(handlerton *hton, THD *thd, stat_print_fn *print, enum ha_stat_type stat); uint (*partition_flags)(); uint (*alter_table_flags)(uint flags); - int (*alter_tablespace)(THD *thd, st_alter_tablespace *ts_info); - int (*fill_files_table)(THD *thd, + int (*alter_tablespace)(handlerton *hton, THD *thd, st_alter_tablespace *ts_info); + int (*fill_files_table)(handlerton *hton, THD *thd, struct st_table_list *tables, class Item *cond); uint32 flags; /* global handler flags */ @@ -650,11 +650,12 @@ struct handlerton Those handlerton functions below are properly initialized at handler init. */ - int (*binlog_func)(THD *thd, enum_binlog_func fn, void *arg); - void (*binlog_log_query)(THD *thd, enum_binlog_command binlog_command, + int (*binlog_func)(handlerton *hton, THD *thd, enum_binlog_func fn, void *arg); + void (*binlog_log_query)(handlerton *hton, THD *thd, + enum_binlog_command binlog_command, const char *query, uint query_length, const char *db, const char *table_name); - int (*release_temporary_latches)(THD *thd); + int (*release_temporary_latches)(handlerton *hton, THD *thd); /* Get log status. @@ -663,20 +664,24 @@ struct handlerton (see example of implementation in handler.cc, TRANS_LOG_MGM_EXAMPLE_CODE) */ - enum log_status (*get_log_status)(char *log); + enum log_status (*get_log_status)(handlerton *hton, char *log); /* Iterators creator. Presence of the pointer should be checked before using */ enum handler_create_iterator_result - (*create_iterator)(enum handler_iterator_type type, + (*create_iterator)(handlerton *hton, enum handler_iterator_type type, struct handler_iterator *fill_this_in); - int (*discover)(THD* thd, const char *db, const char *name, - const void** frmblob, uint* frmlen); - int (*find_files)(THD *thd,const char *db,const char *path, + int (*discover)(handlerton *hton, THD* thd, const char *db, + const char *name, + const void** frmblob, + uint* frmlen); + int (*find_files)(handlerton *hton, THD *thd, + const char *db, + const char *path, const char *wild, bool dir, List *files); - int (*table_exists_in_engine)(THD* thd, const char *db, + int (*table_exists_in_engine)(handlerton *hton, THD* thd, const char *db, const char *name); }; @@ -691,6 +696,7 @@ struct handlerton #define HTON_NOT_USER_SELECTABLE (1 << 5) #define HTON_TEMPORARY_NOT_SUPPORTED (1 << 6) //Having temporary tables not supported #define HTON_SUPPORT_LOG_TABLES (1 << 7) //Engine supports log tables +#define HTON_NO_PARTITION (1 << 8) //You can not partition these tables typedef struct st_thd_trans { @@ -893,7 +899,7 @@ class handler :public Sql_alloc virtual void start_bulk_insert(ha_rows rows) {} virtual int end_bulk_insert() {return 0; } public: - const handlerton *ht; /* storage engine of this handler */ + handlerton *ht; /* storage engine of this handler */ byte *ref; /* Pointer to current row */ byte *dup_ref; /* Pointer to duplicate row */ @@ -943,7 +949,7 @@ public: */ Discrete_interval auto_inc_interval_for_cur_row; - handler(const handlerton *ht_arg, TABLE_SHARE *share_arg) + handler(handlerton *ht_arg, TABLE_SHARE *share_arg) :table_share(share_arg), estimation_rows_to_insert(0), ht(ht_arg), ref(0), key_used_on_scan(MAX_KEY), active_index(MAX_KEY), ref_length(sizeof(my_off_t)), @@ -1716,7 +1722,7 @@ void trans_register_ha(THD *thd, bool all, handlerton *ht); int ha_reset_logs(THD *thd); int ha_binlog_index_purge_file(THD *thd, const char *file); void ha_reset_slave(THD *thd); -void ha_binlog_log_query(THD *thd, const handlerton *db_type, +void ha_binlog_log_query(THD *thd, handlerton *db_type, enum_binlog_command binlog_command, const char *query, uint query_length, const char *db, const char *table_name); diff --git a/sql/item.cc b/sql/item.cc index 6f57977e9d6..f8a8b4a6272 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1210,6 +1210,7 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, split_sum_func(thd, ref_pointer_array, fields); } else if ((type() == SUM_FUNC_ITEM || (used_tables() & ~PARAM_TABLE_BIT)) && + type() != SUBSELECT_ITEM && (type() != REF_ITEM || ((Item_ref*)this)->ref_type() == Item_ref::VIEW_REF)) { @@ -5711,11 +5712,6 @@ void Item_trigger_field::cleanup() } -/* - If item is a const function, calculate it and return a const item - The original item is freed if not returned -*/ - Item_result item_cmp_type(Item_result a,Item_result b) { if (a == STRING_RESULT && b == STRING_RESULT) diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 0aa6d432966..135e4596996 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -66,12 +66,10 @@ static void agg_result_type(Item_result *type, Item **items, uint nitems) /* Aggregates result types from the array of items. - SYNOPSIS: + SYNOPSIS agg_cmp_type() - thd thread handle - type [out] the aggregated type - items array of items to aggregate the type from - nitems number of items in the array + items array of items to aggregate the type from + nitems number of items in the array DESCRIPTION This function aggregates result types from the array of items. Found type @@ -79,12 +77,43 @@ static void agg_result_type(Item_result *type, Item **items, uint nitems) Aggregation itself is performed by the item_cmp_type() function. */ -static void agg_cmp_type(THD *thd, Item_result *type, Item **items, uint nitems) +static Item_result agg_cmp_type(Item **items, uint nitems) { uint i; - type[0]= items[0]->result_type(); + Item_result type= items[0]->result_type(); for (i= 1 ; i < nitems ; i++) - type[0]= item_cmp_type(type[0], items[i]->result_type()); + type= item_cmp_type(type, items[i]->result_type()); + return type; +} + + +/* + Collects different types for comparison of first item with each other items + + SYNOPSIS + collect_cmp_types() + items Array of items to collect types from + nitems Number of items in the array + + DESCRIPTION + This function collects different result types for comparison of the first + item in the list with each of the remaining items in the 'items' array. + + RETURN + Bitmap of collected types +*/ + +static uint collect_cmp_types(Item **items, uint nitems) +{ + uint i; + uint found_types; + Item_result left_result= items[0]->result_type(); + DBUG_ASSERT(nitems > 1); + found_types= 0; + for (i= 1; i < nitems ; i++) + found_types|= 1<< (uint)item_cmp_type(left_result, + items[i]->result_type()); + return found_types; } @@ -1117,7 +1146,7 @@ void Item_func_between::fix_length_and_dec() */ if (!args[0] || !args[1] || !args[2]) return; - agg_cmp_type(thd, &cmp_type, args, 3); + cmp_type= agg_cmp_type(args, 3); if (cmp_type == STRING_RESULT && agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV, 1)) return; @@ -1597,94 +1626,65 @@ Item_func_nullif::is_null() return (null_value= (!cmp.compare() ? 1 : args[0]->null_value)); } + /* - CASE expression Return the matching ITEM or NULL if all compares (including else) failed + + SYNOPSIS + find_item() + str Buffer string + + DESCRIPTION + Find and return matching items for CASE or ELSE item if all compares + are failed or NULL if ELSE item isn't defined. + + IMPLEMENTATION + In order to do correct comparisons of the CASE expression (the expression + between CASE and the first WHEN) with each WHEN expression several + comparators are used. One for each result type. CASE expression can be + evaluated up to # of different result types are used. To check whether + the CASE expression already was evaluated for a particular result type + a bit mapped variable value_added_map is used. Result types are mapped + to it according to their int values i.e. STRING_RESULT is mapped to bit + 0, REAL_RESULT to bit 1, so on. + + RETURN + NULL - Nothing found and there is no ELSE expression defined + item - Found item or ELSE item if defined and all comparisons are + failed */ Item *Item_func_case::find_item(String *str) { - String *first_expr_str, *tmp; - my_decimal *first_expr_dec, first_expr_dec_val; - longlong first_expr_int; - double first_expr_real; - char buff[MAX_FIELD_WIDTH]; - String buff_str(buff,sizeof(buff),default_charset()); + uint value_added_map= 0; - /* These will be initialized later */ - LINT_INIT(first_expr_str); - LINT_INIT(first_expr_int); - LINT_INIT(first_expr_real); - LINT_INIT(first_expr_dec); - - if (first_expr_num != -1) + if (first_expr_num == -1) { - switch (cmp_type) - { - case STRING_RESULT: - // We can't use 'str' here as this may be overwritten - if (!(first_expr_str= args[first_expr_num]->val_str(&buff_str))) - return else_expr_num != -1 ? args[else_expr_num] : 0; // Impossible - break; - case INT_RESULT: - first_expr_int= args[first_expr_num]->val_int(); - if (args[first_expr_num]->null_value) - return else_expr_num != -1 ? args[else_expr_num] : 0; - break; - case REAL_RESULT: - first_expr_real= args[first_expr_num]->val_real(); - if (args[first_expr_num]->null_value) - return else_expr_num != -1 ? args[else_expr_num] : 0; - break; - case DECIMAL_RESULT: - first_expr_dec= args[first_expr_num]->val_decimal(&first_expr_dec_val); - if (args[first_expr_num]->null_value) - return else_expr_num != -1 ? args[else_expr_num] : 0; - break; - case ROW_RESULT: - default: - // This case should never be chosen - DBUG_ASSERT(0); - break; - } - } - - // Compare every WHEN argument with it and return the first match - for (uint i=0 ; i < ncases ; i+=2) - { - if (first_expr_num == -1) + for (uint i=0 ; i < ncases ; i+=2) { // No expression between CASE and the first WHEN if (args[i]->val_bool()) return args[i+1]; continue; } - switch (cmp_type) { - case STRING_RESULT: - if ((tmp=args[i]->val_str(str))) // If not null - if (sortcmp(tmp,first_expr_str,cmp_collation.collation)==0) - return args[i+1]; - break; - case INT_RESULT: - if (args[i]->val_int()==first_expr_int && !args[i]->null_value) - return args[i+1]; - break; - case REAL_RESULT: - if (args[i]->val_real() == first_expr_real && !args[i]->null_value) - return args[i+1]; - break; - case DECIMAL_RESULT: + } + else + { + /* Compare every WHEN argument with it and return the first match */ + for (uint i=0 ; i < ncases ; i+=2) { - my_decimal value; - if (my_decimal_cmp(args[i]->val_decimal(&value), first_expr_dec) == 0) - return args[i+1]; - break; - } - case ROW_RESULT: - default: - // This case should never be chosen - DBUG_ASSERT(0); - break; + cmp_type= item_cmp_type(left_result_type, args[i]->result_type()); + DBUG_ASSERT(cmp_type != ROW_RESULT); + DBUG_ASSERT(cmp_items[(uint)cmp_type]); + if (!(value_added_map & (1<<(uint)cmp_type))) + { + cmp_items[(uint)cmp_type]->store_value(args[first_expr_num]); + if ((null_value=args[first_expr_num]->null_value)) + return else_expr_num != -1 ? args[else_expr_num] : 0; + value_added_map|= 1<<(uint)cmp_type; + } + if (!cmp_items[(uint)cmp_type]->cmp(args[i]) && !args[i]->null_value) + return args[i + 1]; } } // No, WHEN clauses all missed, return ELSE expression @@ -1791,7 +1791,7 @@ void Item_func_case::fix_length_and_dec() Item **agg; uint nagg; THD *thd= current_thd; - + uint found_types= 0; if (!(agg= (Item**) sql_alloc(sizeof(Item*)*(ncases+1)))) return; @@ -1818,16 +1818,31 @@ void Item_func_case::fix_length_and_dec() */ if (first_expr_num != -1) { + uint i; agg[0]= args[first_expr_num]; + left_result_type= agg[0]->result_type(); + for (nagg= 0; nagg < ncases/2 ; nagg++) agg[nagg+1]= args[nagg*2]; nagg++; - agg_cmp_type(thd, &cmp_type, agg, nagg); - if ((cmp_type == STRING_RESULT) && - agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV, 1)) - return; + found_types= collect_cmp_types(agg, nagg); + + for (i= 0; i <= (uint)DECIMAL_RESULT; i++) + { + if (found_types & (1 << i) && !cmp_items[i]) + { + DBUG_ASSERT((Item_result)i != ROW_RESULT); + if ((Item_result)i == STRING_RESULT && + agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV, 1)) + return; + if (!(cmp_items[i]= + cmp_item::get_comparator((Item_result)i, + cmp_collation.collation))) + return; + } + } } - + if (else_expr_num == -1 || args[else_expr_num]->maybe_null) maybe_null=1; @@ -2412,16 +2427,14 @@ static int srtcmp_in(CHARSET_INFO *cs, const String *x,const String *y) void Item_func_in::fix_length_and_dec() { Item **arg, **arg_end; - uint const_itm= 1; + bool const_itm= 1; THD *thd= current_thd; + uint found_types= 0; + uint type_cnt= 0, i; + left_result_type= args[0]->result_type(); + found_types= collect_cmp_types(args, arg_count); - agg_cmp_type(thd, &cmp_type, args, arg_count); - - if (cmp_type == STRING_RESULT && - agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1)) - return; - - for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++) + for (arg= args + 1, arg_end= args + arg_count; arg != arg_end ; arg++) { if (!arg[0]->const_item()) { @@ -2429,26 +2442,39 @@ void Item_func_in::fix_length_and_dec() break; } } - + for (i= 0; i <= (uint)DECIMAL_RESULT; i++) + { + if (found_types & 1 << i) + (type_cnt)++; + } /* - Row item with NULLs inside can return NULL or FALSE => + Row item with NULLs inside can return NULL or FALSE => they can't be processed as static */ - if (const_itm && !nulls_in_row()) + if (type_cnt == 1 && const_itm && !nulls_in_row()) { + uint tmp_type; + Item_result cmp_type; + /* Only one cmp type was found. Extract it here */ + for (tmp_type= 0; found_types - 1; found_types>>= 1) + tmp_type++; + cmp_type= (Item_result)tmp_type; + switch (cmp_type) { case STRING_RESULT: - array=new in_string(arg_count-1,(qsort2_cmp) srtcmp_in, + if (agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1)) + return; + array=new in_string(arg_count - 1,(qsort2_cmp) srtcmp_in, cmp_collation.collation); break; case INT_RESULT: - array= new in_longlong(arg_count-1); + array= new in_longlong(arg_count - 1); break; case REAL_RESULT: - array= new in_double(arg_count-1); + array= new in_double(arg_count - 1); break; case ROW_RESULT: - array= new in_row(arg_count-1, args[0]); + array= new in_row(arg_count - 1, args[0]); break; case DECIMAL_RESULT: array= new in_decimal(arg_count - 1); @@ -2468,15 +2494,25 @@ void Item_func_in::fix_length_and_dec() else have_null= 1; } - if ((array->used_count=j)) + if ((array->used_count= j)) array->sort(); } } else { - in_item= cmp_item::get_comparator(cmp_type, cmp_collation.collation); - if (cmp_type == STRING_RESULT) - in_item->cmp_charset= cmp_collation.collation; + for (i= 0; i <= (uint) DECIMAL_RESULT; i++) + { + if (found_types & (1 << i) && !cmp_items[i]) + { + if ((Item_result)i == STRING_RESULT && + agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1)) + return; + if (!(cmp_items[i]= + cmp_item::get_comparator((Item_result)i, + cmp_collation.collation))) + return; + } + } } maybe_null= args[0]->maybe_null; max_length= 1; @@ -2495,25 +2531,61 @@ void Item_func_in::print(String *str) } +/* + Evaluate the function and return its value. + + SYNOPSIS + val_int() + + DESCRIPTION + Evaluate the function and return its value. + + IMPLEMENTATION + If the array object is defined then the value of the function is + calculated by means of this array. + Otherwise several cmp_item objects are used in order to do correct + comparison of left expression and an expression from the values list. + One cmp_item object correspond to one used comparison type. Left + expression can be evaluated up to number of different used comparison + types. A bit mapped variable value_added_map is used to check whether + the left expression already was evaluated for a particular result type. + Result types are mapped to it according to their integer values i.e. + STRING_RESULT is mapped to bit 0, REAL_RESULT to bit 1, so on. + + RETURN + Value of the function +*/ + longlong Item_func_in::val_int() { + cmp_item *in_item; DBUG_ASSERT(fixed == 1); + uint value_added_map= 0; if (array) { int tmp=array->find(args[0]); null_value=args[0]->null_value || (!tmp && have_null); return (longlong) (!null_value && tmp != negated); } - in_item->store_value(args[0]); - if ((null_value=args[0]->null_value)) - return 0; - have_null= 0; - for (uint i=1 ; i < arg_count ; i++) + + for (uint i= 1 ; i < arg_count ; i++) { + Item_result cmp_type= item_cmp_type(left_result_type, args[i]->result_type()); + in_item= cmp_items[(uint)cmp_type]; + DBUG_ASSERT(in_item); + if (!(value_added_map & (1 << (uint)cmp_type))) + { + in_item->store_value(args[0]); + if ((null_value=args[0]->null_value)) + return 0; + have_null= 0; + value_added_map|= 1 << (uint)cmp_type; + } if (!in_item->cmp(args[i]) && !args[i]->null_value) return (longlong) (!negated); have_null|= args[i]->null_value; } + null_value= have_null; return (longlong) (!null_value && negated); } diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index a9149ade097..f2a8fd7db63 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -589,49 +589,6 @@ public: }; -class Item_func_case :public Item_func -{ - int first_expr_num, else_expr_num; - enum Item_result cached_result_type; - String tmp_value; - uint ncases; - Item_result cmp_type; - DTCollation cmp_collation; -public: - Item_func_case(List &list, Item *first_expr_arg, Item *else_expr_arg) - :Item_func(), first_expr_num(-1), else_expr_num(-1), - cached_result_type(INT_RESULT) - { - ncases= list.elements; - if (first_expr_arg) - { - first_expr_num= list.elements; - list.push_back(first_expr_arg); - } - if (else_expr_arg) - { - else_expr_num= list.elements; - list.push_back(else_expr_arg); - } - set_arguments(list); - } - double val_real(); - longlong val_int(); - String *val_str(String *); - my_decimal *val_decimal(my_decimal *); - bool fix_fields(THD *thd, Item **ref); - void fix_length_and_dec(); - uint decimal_precision() const; - table_map not_null_tables() const { return 0; } - enum Item_result result_type () const { return cached_result_type; } - const char *func_name() const { return "case"; } - void print(String *str); - Item *find_item(String *str); - CHARSET_INFO *compare_collation() { return cmp_collation.collation; } - bool check_partition_func_processor(byte *bool_arg) { return 0;} -}; - - /* Functions to handle the optimized IN */ @@ -686,6 +643,7 @@ public: { return test(compare(collation, base + pos1*size, base + pos2*size)); } + virtual Item_result result_type()= 0; }; class in_string :public in_vector @@ -707,6 +665,7 @@ public: Item_string *to= (Item_string*)item; to->str_value= *str; } + Item_result result_type() { return STRING_RESULT; } }; class in_longlong :public in_vector @@ -729,6 +688,7 @@ public: { ((Item_int*)item)->value= ((longlong*)base)[pos]; } + Item_result result_type() { return INT_RESULT; } }; class in_double :public in_vector @@ -746,6 +706,7 @@ public: { ((Item_float*)item)->value= ((double*) base)[pos]; } + Item_result result_type() { return REAL_RESULT; } }; @@ -766,6 +727,8 @@ public: Item_decimal *item_dec= (Item_decimal*)item; item_dec->set_decimal_value(dec); } + Item_result result_type() { return DECIMAL_RESULT; } + }; @@ -796,7 +759,9 @@ class cmp_item_string :public cmp_item protected: String *value_res; public: + cmp_item_string () {} cmp_item_string (CHARSET_INFO *cs) { cmp_charset= cs; } + void set_charset(CHARSET_INFO *cs) { cmp_charset= cs; } friend class cmp_item_sort_string; friend class cmp_item_sort_string_in_static; }; @@ -807,6 +772,8 @@ protected: char value_buff[STRING_BUFFER_USUAL_SIZE]; String value; public: + cmp_item_sort_string(): + cmp_item_string() {} cmp_item_sort_string(CHARSET_INFO *cs): cmp_item_string(cs), value(value_buff, sizeof(value_buff), cs) {} @@ -828,6 +795,11 @@ public: return sortcmp(value_res, cmp->value_res, cmp_charset); } cmp_item *make_same(); + void set_charset(CHARSET_INFO *cs) + { + cmp_charset= cs; + value.set_quick(value_buff, sizeof(value_buff), cs); + } }; class cmp_item_int :public cmp_item @@ -908,6 +880,7 @@ public: ~in_row(); void set(uint pos,Item *item); byte *get_value(Item *item); + Item_result result_type() { return ROW_RESULT; } }; /* @@ -943,18 +916,109 @@ public: } }; + +/* + The class Item_func_case is the CASE ... WHEN ... THEN ... END function + implementation. + + When there is no expression between CASE and the first WHEN + (the CASE expression) then this function simple checks all WHEN expressions + one after another. When some WHEN expression evaluated to TRUE then the + value of the corresponding THEN expression is returned. + + When the CASE expression is specified then it is compared to each WHEN + expression individually. When an equal WHEN expression is found + corresponding THEN expression is returned. + In order to do correct comparisons several comparators are used. One for + each result type. Different result types that are used in particular + CASE ... END expression are collected in the fix_length_and_dec() member + function and only comparators for there result types are used. +*/ + +class Item_func_case :public Item_func +{ + int first_expr_num, else_expr_num; + enum Item_result cached_result_type, left_result_type; + String tmp_value; + uint ncases; + Item_result cmp_type; + DTCollation cmp_collation; + cmp_item *cmp_items[5]; /* For all result types */ + cmp_item *case_item; +public: + Item_func_case(List &list, Item *first_expr_arg, Item *else_expr_arg) + :Item_func(), first_expr_num(-1), else_expr_num(-1), + cached_result_type(INT_RESULT), left_result_type(INT_RESULT), case_item(0) + { + ncases= list.elements; + if (first_expr_arg) + { + first_expr_num= list.elements; + list.push_back(first_expr_arg); + } + if (else_expr_arg) + { + else_expr_num= list.elements; + list.push_back(else_expr_arg); + } + set_arguments(list); + bzero(&cmp_items, sizeof(cmp_items)); + } + double val_real(); + longlong val_int(); + String *val_str(String *); + my_decimal *val_decimal(my_decimal *); + bool fix_fields(THD *thd, Item **ref); + void fix_length_and_dec(); + uint decimal_precision() const; + table_map not_null_tables() const { return 0; } + enum Item_result result_type () const { return cached_result_type; } + const char *func_name() const { return "case"; } + void print(String *str); + Item *find_item(String *str); + CHARSET_INFO *compare_collation() { return cmp_collation.collation; } + bool check_partition_func_processor(byte *bool_arg) { return 0;} + void cleanup() + { + uint i; + DBUG_ENTER("Item_func_case::cleanup"); + Item_func::cleanup(); + for (i= 0; i <= (uint)DECIMAL_RESULT; i++) + { + delete cmp_items[i]; + cmp_items[i]= 0; + } + DBUG_VOID_RETURN; + } +}; + +/* + The Item_func_in class implements the in_expr IN(values_list) function. + + The current implementation distinguishes 2 cases: + 1) all items in the value_list are constants and have the same + result type. This case is handled by in_vector class. + 2) items in the value_list have different result types or there is some + non-constant items. + In this case Item_func_in employs several cmp_item objects to performs + comparisons of in_expr and an item from the values_list. One cmp_item + object for each result type. Different result types are collected in the + fix_length_and_dec() member function by means of collect_cmp_types() + function. +*/ class Item_func_in :public Item_func_opt_neg { public: - Item_result cmp_type; in_vector *array; - cmp_item *in_item; bool have_null; + Item_result left_result_type; + cmp_item *cmp_items[5]; /* One cmp_item for each result type */ DTCollation cmp_collation; Item_func_in(List &list) - :Item_func_opt_neg(list), array(0), in_item(0), have_null(0) + :Item_func_opt_neg(list), array(0), have_null(0) { + bzero(&cmp_items, sizeof(cmp_items)); allowed_arg_cols= 0; // Fetch this value from first argument } longlong val_int(); @@ -963,12 +1027,16 @@ public: uint decimal_precision() const { return 1; } void cleanup() { + uint i; DBUG_ENTER("Item_func_in::cleanup"); Item_int_func::cleanup(); delete array; - delete in_item; array= 0; - in_item= 0; + for (i= 0; i <= (uint)DECIMAL_RESULT; i++) + { + delete cmp_items[i]; + cmp_items[i]= 0; + } DBUG_VOID_RETURN; } optimize_type select_optimize() const diff --git a/sql/item_sum.cc b/sql/item_sum.cc index cf122f565ff..73e2c5e6935 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -246,7 +246,27 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) aggr_sl->inner_sum_func_list->next= this; } aggr_sl->inner_sum_func_list= this; - + aggr_sl->with_sum_func= 1; + + /* + Mark Item_subselect(s) as containing aggregate function all the way up + to aggregate function's calculation context. + Note that we must not mark the Item of calculation context itself + because with_sum_func on the calculation context st_select_lex is + already set above. + + with_sum_func being set for an Item means that this Item refers + (somewhere in it, e.g. one of its arguments if it's a function) directly + or through intermediate items to an aggregate function that is calculated + in a context "outside" of the Item (e.g. in the current or outer select). + + with_sum_func being set for an st_select_lex means that this st_select_lex + has aggregate functions directly referenced (i.e. not through a sub-select). + */ + for (sl= thd->lex->current_select; + sl && sl != aggr_sl && sl->master_unit()->item; + sl= sl->master_unit()->outer_select() ) + sl->master_unit()->item->with_sum_func= 1; } return FALSE; } diff --git a/sql/log.cc b/sql/log.cc index 9875b16f8e5..05758fd6e7d 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -46,13 +46,13 @@ static Muted_query_log_event invisible_commit; static bool test_if_number(const char *str, long *res, bool allow_wildcards); -static int binlog_init(); -static int binlog_close_connection(THD *thd); -static int binlog_savepoint_set(THD *thd, void *sv); -static int binlog_savepoint_rollback(THD *thd, void *sv); -static int binlog_commit(THD *thd, bool all); -static int binlog_rollback(THD *thd, bool all); -static int binlog_prepare(THD *thd, bool all); +static int binlog_init(void *p); +static int binlog_close_connection(handlerton *hton, THD *thd); +static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv); +static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv); +static int binlog_commit(handlerton *hton, THD *thd, bool all); +static int binlog_rollback(handlerton *hton, THD *thd, bool all); +static int binlog_prepare(handlerton *hton, THD *thd, bool all); sql_print_message_func sql_print_message_handlers[3] = { @@ -1171,7 +1171,7 @@ int binlog_init(void *p) return 0; } -static int binlog_close_connection(THD *thd) +static int binlog_close_connection(handlerton *hton, THD *thd) { binlog_trx_data *const trx_data= (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; @@ -1184,7 +1184,8 @@ static int binlog_close_connection(THD *thd) } static int -binlog_end_trans(THD *thd, binlog_trx_data *trx_data, Log_event *end_ev) +binlog_end_trans(THD *thd, binlog_trx_data *trx_data, + Log_event *end_ev) { DBUG_ENTER("binlog_end_trans"); int error=0; @@ -1238,7 +1239,7 @@ binlog_end_trans(THD *thd, binlog_trx_data *trx_data, Log_event *end_ev) DBUG_RETURN(error); } -static int binlog_prepare(THD *thd, bool all) +static int binlog_prepare(handlerton *hton, THD *thd, bool all) { /* do nothing. @@ -1249,7 +1250,7 @@ static int binlog_prepare(THD *thd, bool all) return 0; } -static int binlog_commit(THD *thd, bool all) +static int binlog_commit(handlerton *hton, THD *thd, bool all) { DBUG_ENTER("binlog_commit"); binlog_trx_data *const trx_data= @@ -1273,7 +1274,7 @@ static int binlog_commit(THD *thd, bool all) DBUG_RETURN(binlog_end_trans(thd, trx_data, &invisible_commit)); } -static int binlog_rollback(THD *thd, bool all) +static int binlog_rollback(handlerton *hton, THD *thd, bool all) { DBUG_ENTER("binlog_rollback"); int error=0; @@ -1326,7 +1327,7 @@ static int binlog_rollback(THD *thd, bool all) that case there is no need to have it in the binlog). */ -static int binlog_savepoint_set(THD *thd, void *sv) +static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv) { DBUG_ENTER("binlog_savepoint_set"); binlog_trx_data *const trx_data= @@ -1342,7 +1343,7 @@ static int binlog_savepoint_set(THD *thd, void *sv) DBUG_RETURN(error); } -static int binlog_savepoint_rollback(THD *thd, void *sv) +static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv) { DBUG_ENTER("binlog_savepoint_rollback"); binlog_trx_data *const trx_data= @@ -4678,7 +4679,7 @@ err1: } struct st_mysql_storage_engine binlog_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, binlog_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(binlog) { diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index fe42531c35c..6103e8b07de 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -458,7 +458,8 @@ enum enum_parsing_place NO_MATTER, IN_HAVING, SELECT_LIST, - IN_WHERE + IN_WHERE, + IN_ON }; struct st_table; @@ -1637,19 +1638,7 @@ extern SHOW_COMP_OPTION have_ndbcluster; extern SHOW_COMP_OPTION have_partition_db; extern SHOW_COMP_OPTION have_merge_db; -#ifdef WITH_CSV_STORAGE_ENGINE -extern handlerton *tina_hton; -#endif -#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE -extern handlerton *ndbcluster_hton; -#endif -#ifdef WITH_PARTITION_STORAGE_ENGINE extern handlerton *partition_hton; -#endif -#ifdef WITH_MYISAMMRG_STORAGE_ENGINE -extern handlerton *myisammrg_hton; -#endif - extern handlerton *myisam_hton; extern handlerton *heap_hton; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index a3844ea8fc0..ba7db6aaf82 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -354,6 +354,14 @@ my_bool opt_safe_user_create = 0, opt_no_mix_types = 0; my_bool opt_show_slave_auth_info, opt_sql_bin_update = 0; my_bool opt_log_slave_updates= 0; my_bool opt_innodb; + +/* + Legacy global handlerton. These will be removed (please do not add more). +*/ +handlerton *heap_hton; +handlerton *myisam_hton; +handlerton *partition_hton; + #ifdef WITH_INNOBASE_STORAGE_ENGINE extern ulong innobase_fast_shutdown; extern ulong innobase_large_page_size; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 05f0341dbe7..1059818373b 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -4913,9 +4913,17 @@ static SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, Item_func *cond_func, { Item_func_in *func=(Item_func_in*) cond_func; + /* + Array for IN() is constructed when all values have the same result + type. Tree won't be built for values with different result types, + so we check it here to avoid unnecessary work. + */ + if (!func->array) + break; + if (inv) { - if (func->array && func->cmp_type != ROW_RESULT) + if (func->array->result_type() != ROW_RESULT) { /* We get here for conditions in form "t.key NOT IN (c1, c2, ...)", diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 3e8ad9bb78b..b3c7c1b80e7 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -443,11 +443,9 @@ bool partition_info::check_engine_mix(handlerton **engine_array, uint no_parts) DBUG_RETURN(TRUE); } } while (++i < no_parts); - if (engine_array[0] == myisammrg_hton || - engine_array[0] == tina_hton) + if (engine_array[0]->flags & HTON_NO_PARTITION) { - my_error(ER_PARTITION_MERGE_ERROR, MYF(0), - engine_array[0] == myisammrg_hton ? "MyISAM Merge" : "CSV"); + my_error(ER_PARTITION_MERGE_ERROR, MYF(0)); DBUG_RETURN(TRUE); } DBUG_RETURN(FALSE); diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index c90e771fd40..a73b3b97f1b 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -5960,9 +5960,9 @@ ER_EVENT_SET_VAR_ERROR eng "Error during starting/stopping of the scheduler. Error code %u" ger "Fehler während des Startens oder Anhalten des Schedulers. Fehlercode %u" ER_PARTITION_MERGE_ERROR - eng "%s handler cannot be used in partitioned tables" - ger "%s-Handler kann in partitionierten Tabellen nicht verwendet werden" - swe "%s kan inte användas i en partitionerad tabell" + eng "Engine cannot be used in partitioned tables" + ger "Engine kann in partitionierten Tabellen nicht verwendet werden" + swe "Engine inte användas i en partitionerad tabell" ER_CANT_ACTIVATE_LOG eng "Cannot activate '%-.64s' log" ger "Kann Logdatei '%-.64s' nicht aktivieren" @@ -5990,4 +5990,6 @@ ER_UNSUPORTED_LOG_ENGINE eng "This storage engine cannot be used for log tables"" ER_BAD_LOG_STATEMENT eng "You cannot '%s' a log table if logging is enabled" +ER_NON_INSERTABLE_TABLE + eng "The target table %-.100s of the %s is not insertable-into" diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 9e3049d433b..a530c7f7fdc 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1461,8 +1461,11 @@ void update_non_unique_table_error(TABLE_LIST *update, */ if (update->view) { + /* Issue the ER_NON_INSERTABLE_TABLE error for an INSERT */ if (update->view == duplicate->view) - my_error(ER_NON_UPDATABLE_TABLE, MYF(0), update->alias, operation); + my_error(!strncmp(operation, "INSERT", 6) ? + ER_NON_INSERTABLE_TABLE : ER_NON_UPDATABLE_TABLE, MYF(0), + update->alias, operation); else my_error(ER_VIEW_PREVENT_UPDATE, MYF(0), (duplicate->view ? duplicate->alias : update->alias), diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index e0831b0f8ee..ee3b8aa79fe 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -2389,7 +2389,7 @@ Query_cache::register_tables_from_list(TABLE_LIST *tables_used, tables_used->engine_data)) DBUG_RETURN(0); - if (tables_used->table->s->db_type == myisammrg_hton) + if (tables_used->table->s->db_type->db_type == DB_TYPE_MRG_MYISAM) { ha_myisammrg *handler = (ha_myisammrg *) tables_used->table->file; MYRG_INFO *file = handler->myrg_info(); @@ -3013,7 +3013,7 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used, "other non-cacheable table(s)")); DBUG_RETURN(0); } - if (tables_used->table->s->db_type == myisammrg_hton) + if (tables_used->table->s->db_type->db_type == DB_TYPE_MRG_MYISAM) { ha_myisammrg *handler = (ha_myisammrg *)tables_used->table->file; MYRG_INFO *file = handler->myrg_info(); diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc index 2784e71ccae..46fae3ffb99 100644 --- a/sql/sql_cursor.cc +++ b/sql/sql_cursor.cc @@ -45,7 +45,7 @@ class Sensitive_cursor: public Server_side_cursor query_id_t query_id; struct Engine_info { - const handlerton *ht; + handlerton *ht; void *read_view; }; Engine_info ht_info[MAX_HA]; @@ -318,12 +318,12 @@ Sensitive_cursor::post_open(THD *thd) info= &ht_info[0]; for (handlerton **pht= thd->transaction.stmt.ht; *pht; pht++) { - const handlerton *ht= *pht; + handlerton *ht= *pht; close_at_commit|= test(ht->flags & HTON_CLOSE_CURSORS_AT_COMMIT); if (ht->create_cursor_read_view) { info->ht= ht; - info->read_view= (ht->create_cursor_read_view)(); + info->read_view= (ht->create_cursor_read_view)(ht, thd); ++info; } } @@ -433,7 +433,7 @@ Sensitive_cursor::fetch(ulong num_rows) thd->set_n_backup_active_arena(this, &backup_arena); for (info= ht_info; info->read_view ; info++) - (info->ht->set_cursor_read_view)(info->read_view); + (info->ht->set_cursor_read_view)(info->ht, thd, info->read_view); join->fetch_limit+= num_rows; @@ -454,7 +454,7 @@ Sensitive_cursor::fetch(ulong num_rows) reset_thd(thd); for (info= ht_info; info->read_view; info++) - (info->ht->set_cursor_read_view)(0); + (info->ht->set_cursor_read_view)(info->ht, thd, 0); if (error == NESTED_LOOP_CURSOR_LIMIT) { @@ -487,7 +487,7 @@ Sensitive_cursor::close() for (Engine_info *info= ht_info; info->read_view; info++) { - (info->ht->close_cursor_read_view)(info->read_view); + (info->ht->close_cursor_read_view)(info->ht, thd, info->read_view); info->read_view= 0; info->ht= 0; } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 946a6233bc2..c0bc6628754 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -112,7 +112,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, if (!table_list->updatable) { - my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "INSERT"); + my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT"); return -1; } @@ -228,7 +228,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, (table_list->view && check_view_insertability(thd, table_list))) { - my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "INSERT"); + my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT"); return -1; } diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 3f3fde03bce..d94c45c4bdd 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1722,7 +1722,8 @@ bool st_lex::can_be_merged() unit= unit->next_unit()) { if (unit->first_select()->parent_lex == this && - (unit->item == 0 || unit->item->place() != IN_WHERE)) + (unit->item == 0 || + (unit->item->place() != IN_WHERE && unit->item->place() != IN_ON))) { selects_allow_merge= 0; break; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index e195b2b71ac..ed91719fd46 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -70,7 +70,7 @@ static int join_tab_cmp_straight(const void* ptr1, const void* ptr2); static void find_best(JOIN *join,table_map rest_tables,uint index, double record_count,double read_time); static uint cache_record_length(JOIN *join,uint index); -static double prev_record_reads(JOIN *join,table_map found_ref); +static double prev_record_reads(JOIN *join, uint idx, table_map found_ref); static bool get_best_combination(JOIN *join); static store_key *get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables, @@ -3437,6 +3437,7 @@ set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key) join->positions[idx].table= table; join->positions[idx].key=key; join->positions[idx].records_read=1.0; /* This is a const table */ + join->positions[idx].ref_depend_map= 0; /* Move the const table as down as possible in best_ref */ JOIN_TAB **pos=join->best_ref+idx+1; @@ -3494,6 +3495,7 @@ best_access_path(JOIN *join, double best= DBL_MAX; double best_time= DBL_MAX; double records= DBL_MAX; + table_map best_ref_depends_map; double tmp; ha_rows rec; @@ -3522,13 +3524,20 @@ best_access_path(JOIN *join, /* Calculate how many key segments of the current key we can use */ start_key= keyuse; - do - { /* for each keypart */ + + do /* For each keypart */ + { uint keypart= keyuse->keypart; table_map best_part_found_ref= 0; double best_prev_record_reads= DBL_MAX; - do + + do /* For each way to access the keypart */ { + + /* + if 1. expression doesn't refer to forward tables + 2. we won't get two ref-or-null's + */ if (!(remaining_tables & keyuse->used_tables) && !(ref_or_null_part && (keyuse->optimize & KEY_OPTIMIZE_REF_OR_NULL))) @@ -3536,8 +3545,9 @@ best_access_path(JOIN *join, found_part|= keyuse->keypart_map; if (!(keyuse->used_tables & ~join->const_table_map)) const_part|= keyuse->keypart_map; - double tmp= prev_record_reads(join, (found_ref | - keyuse->used_tables)); + + double tmp= prev_record_reads(join, idx, (found_ref | + keyuse->used_tables)); if (tmp < best_prev_record_reads) { best_part_found_ref= keyuse->used_tables & ~join->const_table_map; @@ -3576,7 +3586,7 @@ best_access_path(JOIN *join, Really, there should be records=0.0 (yes!) but 1.0 would be probably safer */ - tmp= prev_record_reads(join, found_ref); + tmp= prev_record_reads(join, idx, found_ref); records= 1.0; } else @@ -3591,7 +3601,7 @@ best_access_path(JOIN *join, max_key_part= (uint) ~0; if ((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME) { - tmp = prev_record_reads(join, found_ref); + tmp = prev_record_reads(join, idx, found_ref); records=1.0; } else @@ -3728,7 +3738,30 @@ best_access_path(JOIN *join, { /* Check if we have statistic about the distribution */ if ((records= keyinfo->rec_per_key[max_key_part-1])) + { + /* + Fix for the case where the index statistics is too + optimistic: If + (1) We're considering ref(const) and there is quick select + on the same index, + (2) and that quick select uses more keyparts (i.e. it will + scan equal/smaller interval then this ref(const)) + (3) and E(#rows) for quick select is higher then our + estimate, + Then + We'll use E(#rows) from quick select. + + Q: Why do we choose to use 'ref'? Won't quick select be + cheaper in some cases ? + TODO: figure this out and adjust the plan choice if needed. + */ + if (!found_ref && table->quick_keys.is_set(key) && // (1) + table->quick_key_parts[key] > max_key_part && // (2) + records < (double)table->quick_rows[key]) // (3) + records= (double)table->quick_rows[key]; + tmp= records; + } else { /* @@ -3821,6 +3854,7 @@ best_access_path(JOIN *join, best_records= records; best_key= start_key; best_max_key_part= max_key_part; + best_ref_depends_map= found_ref; } } records= best_records; @@ -3949,6 +3983,8 @@ best_access_path(JOIN *join, best= tmp; records= rows2double(rnd_records); best_key= 0; + /* range/index_merge/ALL/index access method are "independent", so: */ + best_ref_depends_map= 0; } } @@ -3957,6 +3993,7 @@ best_access_path(JOIN *join, join->positions[idx].read_time= best; join->positions[idx].key= best_key; join->positions[idx].table= s; + join->positions[idx].ref_depend_map= best_ref_depends_map; if (!best_key && idx == join->const_tables && @@ -4724,17 +4761,85 @@ cache_record_length(JOIN *join,uint idx) } +/* + Get the number of different row combinations for subset of partial join + + SYNOPSIS + prev_record_reads() + join The join structure + idx Number of tables in the partial join order (i.e. the + partial join order is in join->positions[0..idx-1]) + found_ref Bitmap of tables for which we need to find # of distinct + row combinations. + + DESCRIPTION + Given a partial join order (in join->positions[0..idx-1]) and a subset of + tables within that join order (specified in found_ref), find out how many + distinct row combinations of subset tables will be in the result of the + partial join order. + + This is used as follows: Suppose we have a table accessed with a ref-based + method. The ref access depends on current rows of tables in found_ref. + We want to count # of different ref accesses. We assume two ref accesses + will be different if at least one of access parameters is different. + Example: consider a query + + SELECT * FROM t1, t2, t3 WHERE t1.key=c1 AND t2.key=c2 AND t3.key=t1.field + + and a join order: + t1, ref access on t1.key=c1 + t2, ref access on t2.key=c2 + t3, ref access on t3.key=t1.field + + For t1: n_ref_scans = 1, n_distinct_ref_scans = 1 + For t2: n_ref_scans = records_read(t1), n_distinct_ref_scans=1 + For t3: n_ref_scans = records_read(t1)*records_read(t2) + n_distinct_ref_scans = #records_read(t1) + + The reason for having this function (at least the latest version of it) + is that we need to account for buffering in join execution. + + An edge-case example: if we have a non-first table in join accessed via + ref(const) or ref(param) where there is a small number of different + values of param, then the access will likely hit the disk cache and will + not require any disk seeks. + + The proper solution would be to assume an LRU disk cache of some size, + calculate probability of cache hits, etc. For now we just count + identical ref accesses as one. + + RETURN + Expected number of row combinations +*/ + static double -prev_record_reads(JOIN *join,table_map found_ref) +prev_record_reads(JOIN *join, uint idx, table_map found_ref) { double found=1.0; - found_ref&= ~OUTER_REF_TABLE_BIT; - for (POSITION *pos=join->positions ; found_ref ; pos++) + POSITION *pos_end= join->positions - 1; + for (POSITION *pos= join->positions + idx - 1; pos != pos_end; pos--) { if (pos->table->table->map & found_ref) { - found_ref&= ~pos->table->table->map; - found*=pos->records_read; + found_ref|= pos->ref_depend_map; + /* + For the case of "t1 LEFT JOIN t2 ON ..." where t2 is a const table + with no matching row we will get position[t2].records_read==0. + Actually the size of output is one null-complemented row, therefore + we will use value of 1 whenever we get records_read==0. + + Note + - the above case can't occur if inner part of outer join has more + than one table: table with no matches will not be marked as const. + + - Ideally we should add 1 to records_read for every possible null- + complemented row. We're not doing it because: 1. it will require + non-trivial code and add overhead. 2. The value of records_read + is an inprecise estimate and adding 1 (or, in the worst case, + #max_nested_outer_joins=64-1) will not make it any more precise. + */ + if (pos->records_read) + found*= pos->records_read; } } return found; @@ -10478,6 +10583,7 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) tab->info="const row not found"; /* Mark for EXPLAIN that the row was not found */ pos->records_read=0.0; + pos->ref_depend_map= 0; if (!table->maybe_null || error > 0) DBUG_RETURN(error); } @@ -10503,6 +10609,7 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) tab->info="unique row not found"; /* Mark for EXPLAIN that the row was not found */ pos->records_read=0.0; + pos->ref_depend_map= 0; if (!table->maybe_null || error > 0) DBUG_RETURN(error); } diff --git a/sql/sql_select.h b/sql/sql_select.h index 3b0c312757d..eb6d2d5d34f 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -176,7 +176,13 @@ enum_nested_loop_state sub_select(JOIN *join,JOIN_TAB *join_tab, bool */ typedef struct st_position { + /* + The "fanout": number of output rows that will be produced (after + pushed down selection condition is applied) per each row combination of + previous tables. + */ double records_read; + /* Cost accessing the table in course of the entire complete join execution, i.e. cost of one access method use (e.g. 'range' or 'ref' scan ) times @@ -184,7 +190,15 @@ typedef struct st_position */ double read_time; JOIN_TAB *table; + + /* + NULL - 'index' or 'range' or 'index_merge' or 'ALL' access is used. + Other - [eq_]ref[_or_null] access is used. Pointer to {t.keypart1 = expr} + */ KEYUSE *key; + + /* If ref-based access is used: bitmap of tables this table depends on */ + table_map ref_depend_map; } POSITION; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index ce7832569fe..45865adddcd 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -4999,7 +4999,7 @@ static my_bool run_hton_fill_schema_files(THD *thd, st_plugin_int *plugin, (run_hton_fill_schema_files_args *) arg; handlerton *hton= (handlerton *)plugin->data; if(hton->fill_files_table && hton->state == SHOW_OPTION_YES) - hton->fill_files_table(thd, args->tables, args->cond); + hton->fill_files_table(hton, thd, args->tables, args->cond); return false; } diff --git a/sql/sql_tablespace.cc b/sql/sql_tablespace.cc index 13dfb491af4..470fa5bc862 100644 --- a/sql/sql_tablespace.cc +++ b/sql/sql_tablespace.cc @@ -21,7 +21,7 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info) { int error= HA_ADMIN_NOT_IMPLEMENTED; - const handlerton *hton= ts_info->storage_engine; + handlerton *hton= ts_info->storage_engine; DBUG_ENTER("mysql_alter_tablespace"); /* @@ -42,7 +42,7 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info) if (hton->alter_tablespace) { - if ((error= hton->alter_tablespace(thd, ts_info))) + if ((error= hton->alter_tablespace(hton, thd, ts_info))) { if (error == HA_ADMIN_NOT_IMPLEMENTED) { diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 187d289cb16..7f6d935ff5e 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -1603,7 +1603,7 @@ bool insert_view_fields(THD *thd, List *list, TABLE_LIST *view) list->push_back(fld); else { - my_error(ER_NON_UPDATABLE_TABLE, MYF(0), view->alias, "INSERT"); + my_error(ER_NON_INSERTABLE_TABLE, MYF(0), view->alias, "INSERT"); DBUG_RETURN(TRUE); } } diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index bfe23d359ca..d55e32a2c42 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -6847,11 +6847,13 @@ join_table: /* Change the current name resolution context to a local context. */ if (push_new_name_resolution_context(YYTHD, $1, $3)) YYABORT; + Select->parsing_place= IN_ON; } expr { add_join_on($3,$6); Lex->pop_context(); + Select->parsing_place= NO_MATTER; } | table_ref STRAIGHT_JOIN table_factor ON @@ -6860,12 +6862,14 @@ join_table: /* Change the current name resolution context to a local context. */ if (push_new_name_resolution_context(YYTHD, $1, $3)) YYABORT; + Select->parsing_place= IN_ON; } expr { $3->straight=1; add_join_on($3,$6); Lex->pop_context(); + Select->parsing_place= NO_MATTER; } | table_ref normal_join table_ref USING @@ -6889,6 +6893,7 @@ join_table: /* Change the current name resolution context to a local context. */ if (push_new_name_resolution_context(YYTHD, $1, $5)) YYABORT; + Select->parsing_place= IN_ON; } expr { @@ -6896,6 +6901,7 @@ join_table: Lex->pop_context(); $5->outer_join|=JOIN_TYPE_LEFT; $$=$5; + Select->parsing_place= NO_MATTER; } | table_ref LEFT opt_outer JOIN_SYM table_factor { @@ -6920,6 +6926,7 @@ join_table: /* Change the current name resolution context to a local context. */ if (push_new_name_resolution_context(YYTHD, $1, $5)) YYABORT; + Select->parsing_place= IN_ON; } expr { @@ -6928,6 +6935,7 @@ join_table: YYABORT; add_join_on($$, $8); Lex->pop_context(); + Select->parsing_place= NO_MATTER; } | table_ref RIGHT opt_outer JOIN_SYM table_factor { diff --git a/sql/unireg.cc b/sql/unireg.cc index 396ff4fba27..10e21654939 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -292,13 +292,19 @@ bool mysql_create_frm(THD *thd, const char *file_name, goto err3; { - /* Unescape all UCS2 intervals: were escaped in pack_headers */ + /* + Restore all UCS2 intervals. + HEX representation of them is not needed anymore. + */ List_iterator it(create_fields); create_field *field; while ((field=it++)) { - if (field->interval && field->charset->mbminlen > 1) - unhex_type2(field->interval); + if (field->save_interval) + { + field->interval= field->save_interval; + field->save_interval= 0; + } } } DBUG_RETURN(0); @@ -589,18 +595,36 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type, reclength=(uint) (field->offset+ data_offset + length); n_length+= (ulong) strlen(field->field_name)+1; field->interval_id=0; + field->save_interval= 0; if (field->interval) { uint old_int_count=int_count; if (field->charset->mbminlen > 1) { - /* Escape UCS2 intervals using HEX notation */ + /* + Escape UCS2 intervals using HEX notation to avoid + problems with delimiters between enum elements. + As the original representation is still needed in + the function make_empty_rec to create a record of + filled with default values it is saved in save_interval + The HEX representation is created from this copy. + */ + field->save_interval= field->interval; + field->interval= (TYPELIB*) sql_alloc(sizeof(TYPELIB)); + *field->interval= *field->save_interval; + field->interval->type_names= + (const char **) sql_alloc(sizeof(char*) * + (field->interval->count+1)); + field->interval->type_names[field->interval->count]= 0; + field->interval->type_lengths= + (uint *) sql_alloc(sizeof(uint) * field->interval->count); + for (uint pos= 0; pos < field->interval->count; pos++) { char *dst; - uint length= field->interval->type_lengths[pos], hex_length; - const char *src= field->interval->type_names[pos]; + uint length= field->save_interval->type_lengths[pos], hex_length; + const char *src= field->save_interval->type_names[pos]; hex_length= length * 2; field->interval->type_lengths[pos]= hex_length; field->interval->type_names[pos]= dst= sql_alloc(hex_length + 1); @@ -842,18 +866,19 @@ static bool make_empty_rec(THD *thd, File file,enum legacy_db_type table_type, /* regfield don't have to be deleted as it's allocated with sql_alloc() */ - Field *regfield= make_field(&share, - (char*) buff+field->offset + data_offset, - field->length, - null_pos + null_count / 8, - null_count & 7, - field->pack_flag, - field->sql_type, - field->charset, - field->geom_type, - field->unireg_check, - field->interval, - field->field_name); + Field *regfield=make_field(&share, + (char*) buff+field->offset + data_offset, + field->length, + null_pos + null_count / 8, + null_count & 7, + field->pack_flag, + field->sql_type, + field->charset, + field->geom_type, + field->unireg_check, + field->save_interval ? field->save_interval : + field->interval, + field->field_name); if (!regfield) goto err; // End of memory diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index c31bbfd612e..0fd5f0ed099 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -139,17 +139,21 @@ static HASH archive_open_tables; #define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption /* Static declarations for handerton */ -static handler *archive_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root); +static handler *archive_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root); +int archive_db_end(handlerton *hton, ha_panic_function type); + /* Number of rows that will force a bulk insert. */ #define ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT 2 -handlerton *archive_hton; - -static handler *archive_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) +static handler *archive_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root) { - return new (mem_root) ha_archive(table); + return new (mem_root) ha_archive(hton, table); } /* @@ -178,6 +182,7 @@ static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length, int archive_db_init(void *p) { DBUG_ENTER("archive_db_init"); + handlerton *archive_hton; if (archive_inited) DBUG_RETURN(FALSE); archive_hton= (handlerton *)p; @@ -226,13 +231,13 @@ int archive_db_done(void *p) } -int archive_db_end(ha_panic_function type) +int archive_db_end(handlerton *hton, ha_panic_function type) { return archive_db_done(NULL); } -ha_archive::ha_archive(TABLE_SHARE *table_arg) - :handler(archive_hton, table_arg), delayed_insert(0), bulk_insert(0) +ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg), delayed_insert(0), bulk_insert(0) { /* Set our original buffer from pre-allocated memory */ buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info); @@ -1571,7 +1576,7 @@ bool ha_archive::check_and_repair(THD *thd) } struct st_mysql_storage_engine archive_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, archive_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(archive) { diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index d1069afd87f..1a601c8451a 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -67,7 +67,7 @@ class ha_archive: public handler uint current_k_offset; public: - ha_archive(TABLE_SHARE *table_arg); + ha_archive(handlerton *hton, TABLE_SHARE *table_arg); ~ha_archive() { } @@ -139,5 +139,3 @@ public: bool check_and_repair(THD *thd); }; -int archive_db_end(ha_panic_function type); - diff --git a/storage/blackhole/ha_blackhole.cc b/storage/blackhole/ha_blackhole.cc index bef5ee656fe..7ab32cccf84 100644 --- a/storage/blackhole/ha_blackhole.cc +++ b/storage/blackhole/ha_blackhole.cc @@ -24,11 +24,11 @@ /* Static declarations for handlerton */ -handlerton *blackhole_hton; -static handler *blackhole_create_handler(TABLE_SHARE *table, +static handler *blackhole_create_handler(handlerton *hton, + TABLE_SHARE *table, MEM_ROOT *mem_root) { - return new (mem_root) ha_blackhole(table); + return new (mem_root) ha_blackhole(hton, table); } @@ -36,8 +36,9 @@ static handler *blackhole_create_handler(TABLE_SHARE *table, ** BLACKHOLE tables *****************************************************************************/ -ha_blackhole::ha_blackhole(TABLE_SHARE *table_arg) - :handler(blackhole_hton, table_arg) +ha_blackhole::ha_blackhole(handlerton *hton, + TABLE_SHARE *table_arg) + :handler(hton, table_arg) {} @@ -203,6 +204,7 @@ int ha_blackhole::index_last(byte * buf) static int blackhole_init(void *p) { + handlerton *blackhole_hton; blackhole_hton= (handlerton *)p; blackhole_hton->state= SHOW_OPTION_YES; blackhole_hton->db_type= DB_TYPE_BLACKHOLE_DB; @@ -212,7 +214,7 @@ static int blackhole_init(void *p) } struct st_mysql_storage_engine blackhole_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, blackhole_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(blackhole) { diff --git a/storage/blackhole/ha_blackhole.h b/storage/blackhole/ha_blackhole.h index 55c26f6f02e..54cec41fd26 100644 --- a/storage/blackhole/ha_blackhole.h +++ b/storage/blackhole/ha_blackhole.h @@ -28,7 +28,7 @@ class ha_blackhole: public handler THR_LOCK thr_lock; public: - ha_blackhole(TABLE_SHARE *table_arg); + ha_blackhole(handlerton *hton, TABLE_SHARE *table_arg); ~ha_blackhole() { } diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index eaa11d5b061..8f090470f49 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -74,7 +74,11 @@ static int write_meta_file(File meta_file, ha_rows rows, bool dirty); pthread_mutex_t tina_mutex; static HASH tina_open_tables; static int tina_init= 0; -static handler *tina_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root); +static handler *tina_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root); +int tina_end(handlerton *hton, ha_panic_function type); + off_t Transparent_file::read_next() { @@ -123,7 +127,6 @@ char Transparent_file::get_value(off_t offset) return buff[0]; } } -handlerton *tina_hton; /***************************************************************************** ** TINA tables @@ -150,6 +153,8 @@ static byte* tina_get_key(TINA_SHARE *share,uint *length, static int tina_init_func(void *p) { + handlerton *tina_hton; + if (!tina_init) { tina_hton= (handlerton *)p; @@ -161,7 +166,8 @@ static int tina_init_func(void *p) tina_hton->db_type= DB_TYPE_CSV_DB; tina_hton->create= tina_create_handler; tina_hton->panic= tina_end; - tina_hton->flags= HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES; + tina_hton->flags= (HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES | + HTON_NO_PARTITION); } return 0; } @@ -449,7 +455,7 @@ static int free_share(TINA_SHARE *share) DBUG_RETURN(result_code); } -int tina_end(ha_panic_function type) +int tina_end(handlerton *hton, ha_panic_function type) { return tina_done_func(NULL); } @@ -493,14 +499,16 @@ off_t find_eoln_buff(Transparent_file *data_buff, off_t begin, } -static handler *tina_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) +static handler *tina_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root) { - return new (mem_root) ha_tina(table); + return new (mem_root) ha_tina(hton, table); } -ha_tina::ha_tina(TABLE_SHARE *table_arg) - :handler(tina_hton, table_arg), +ha_tina::ha_tina(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg), /* These definitions are found in handler.h They are not probably completely right. @@ -1516,7 +1524,7 @@ bool ha_tina::check_if_incompatible_data(HA_CREATE_INFO *info, } struct st_mysql_storage_engine csv_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, tina_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(csv) { diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h index 14ca848c855..f408e8f4a7d 100644 --- a/storage/csv/ha_tina.h +++ b/storage/csv/ha_tina.h @@ -129,7 +129,7 @@ private: int init_tina_writer(); public: - ha_tina(TABLE_SHARE *table_arg); + ha_tina(handlerton *hton, TABLE_SHARE *table_arg); ~ha_tina() { if (chain_alloced) @@ -212,5 +212,3 @@ public: int chain_append(); }; -int tina_end(ha_panic_function type); - diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc index c4c2524a118..68840a41879 100644 --- a/storage/example/ha_example.cc +++ b/storage/example/ha_example.cc @@ -73,7 +73,9 @@ #include -static handler *example_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root); +static handler *example_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root); static int example_init_func(); static bool example_init_func_for_handlerton(); static int example_panic(enum ha_panic_function flag); @@ -201,14 +203,16 @@ static int free_share(EXAMPLE_SHARE *share) } -static handler* example_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) +static handler* example_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root) { - return new (mem_root) ha_example(table); + return new (mem_root) ha_example(hton, table); } -ha_example::ha_example(TABLE_SHARE *table_arg) - :handler(example_hton, table_arg) +ha_example::ha_example(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg) {} /* @@ -703,7 +707,7 @@ int ha_example::create(const char *name, TABLE *table_arg, } struct st_mysql_storage_engine example_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, example_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(example) diff --git a/storage/example/ha_example.h b/storage/example/ha_example.h index 956dc62311c..2f5dbc29c56 100644 --- a/storage/example/ha_example.h +++ b/storage/example/ha_example.h @@ -45,7 +45,7 @@ class ha_example: public handler EXAMPLE_SHARE *share; /* Shared lock info */ public: - ha_example(TABLE_SHARE *table_arg); + ha_example(handlerton *hton, TABLE_SHARE *table_arg); ~ha_example() { } diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index 3bb34e5aacc..3c1cc722f02 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -359,19 +359,22 @@ static const uint sizeof_trailing_and= sizeof(" AND ") - 1; static const uint sizeof_trailing_where= sizeof(" WHERE ") - 1; /* Static declaration for handerton */ -static handler *federated_create_handler(TABLE_SHARE *table, +static handler *federated_create_handler(handlerton *hton, + TABLE_SHARE *table, MEM_ROOT *mem_root); -static int federated_commit(THD *thd, bool all); -static int federated_rollback(THD *thd, bool all); +static int federated_commit(handlerton *hton, THD *thd, bool all); +static int federated_rollback(handlerton *hton, THD *thd, bool all); +static int federated_db_init(void); +static int federated_db_end(handlerton *hton, ha_panic_function type); + /* Federated storage engine handlerton */ -handlerton *federated_hton; - -static handler *federated_create_handler(TABLE_SHARE *table, +static handler *federated_create_handler(handlerton *hton, + TABLE_SHARE *table, MEM_ROOT *mem_root) { - return new (mem_root) ha_federated(table); + return new (mem_root) ha_federated(hton, table); } @@ -399,7 +402,7 @@ static byte *federated_get_key(FEDERATED_SHARE *share, uint *length, int federated_db_init(void *p) { DBUG_ENTER("federated_db_init"); - federated_hton= (handlerton *)p; + handlerton *federated_hton= (handlerton *)p; federated_hton->state= SHOW_OPTION_YES; federated_hton->db_type= DB_TYPE_FEDERATED_DB; federated_hton->commit= federated_commit; @@ -434,7 +437,7 @@ error: FALSE OK */ -int federated_db_end(ha_panic_function type) +int federated_db_end(handlerton *hton, ha_panic_function type) { if (federated_init) { @@ -723,8 +726,9 @@ error: ** FEDERATED tables *****************************************************************************/ -ha_federated::ha_federated(TABLE_SHARE *table_arg) - :handler(federated_hton, table_arg), +ha_federated::ha_federated(handlerton *hton, + TABLE_SHARE *table_arg) + :handler(hton, table_arg), mysql(0), stored_result(0) { trx_next= 0; @@ -2736,7 +2740,7 @@ bool ha_federated::get_error_message(int error, String* buf) int ha_federated::external_lock(THD *thd, int lock_type) { int error= 0; - ha_federated *trx= (ha_federated *)thd->ha_data[federated_hton->slot]; + ha_federated *trx= (ha_federated *)thd->ha_data[ht->slot]; DBUG_ENTER("ha_federated::external_lock"); if (lock_type != F_UNLCK) @@ -2754,7 +2758,7 @@ int ha_federated::external_lock(THD *thd, int lock_type) DBUG_PRINT("info", ("error setting autocommit TRUE: %d", error)); DBUG_RETURN(error); } - trans_register_ha(thd, FALSE, federated_hton); + trans_register_ha(thd, FALSE, ht); } else { @@ -2770,8 +2774,8 @@ int ha_federated::external_lock(THD *thd, int lock_type) DBUG_PRINT("info", ("error setting autocommit FALSE: %d", error)); DBUG_RETURN(error); } - thd->ha_data[federated_hton->slot]= this; - trans_register_ha(thd, TRUE, federated_hton); + thd->ha_data[ht->slot]= this; + trans_register_ha(thd, TRUE, ht); /* Send a lock table to the remote end. We do not support this at the moment @@ -2796,10 +2800,10 @@ int ha_federated::external_lock(THD *thd, int lock_type) } -static int federated_commit(THD *thd, bool all) +static int federated_commit(handlerton *hton, THD *thd, bool all) { int return_val= 0; - ha_federated *trx= (ha_federated *)thd->ha_data[federated_hton->slot]; + ha_federated *trx= (ha_federated *)thd->ha_data[hton->slot]; DBUG_ENTER("federated_commit"); if (all) @@ -2814,7 +2818,7 @@ static int federated_commit(THD *thd, bool all) if (error && !return_val); return_val= error; } - thd->ha_data[federated_hton->slot]= NULL; + thd->ha_data[hton->slot]= NULL; } DBUG_PRINT("info", ("error val: %d", return_val)); @@ -2822,10 +2826,10 @@ static int federated_commit(THD *thd, bool all) } -static int federated_rollback(THD *thd, bool all) +static int federated_rollback(handlerton *hton, THD *thd, bool all) { int return_val= 0; - ha_federated *trx= (ha_federated *)thd->ha_data[federated_hton->slot]; + ha_federated *trx= (ha_federated *)thd->ha_data[hton->slot]; DBUG_ENTER("federated_rollback"); if (all) @@ -2840,7 +2844,7 @@ static int federated_rollback(THD *thd, bool all) if (error && !return_val) return_val= error; } - thd->ha_data[federated_hton->slot]= NULL; + thd->ha_data[hton->slot]= NULL; } DBUG_PRINT("info", ("error val: %d", return_val)); @@ -2882,7 +2886,7 @@ int ha_federated::execute_simple_query(const char *query, int len) } struct st_mysql_storage_engine federated_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, federated_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(federated) { diff --git a/storage/federated/ha_federated.h b/storage/federated/ha_federated.h index ebdc775d3bf..ade1e5b181e 100644 --- a/storage/federated/ha_federated.h +++ b/storage/federated/ha_federated.h @@ -94,7 +94,7 @@ private: int stash_remote_error(); public: - ha_federated(TABLE_SHARE *table_arg); + ha_federated(handlerton *hton, TABLE_SHARE *table_arg); ~ha_federated() {} /* The name that will be used for display purposes */ const char *table_type() const { return "FEDERATED"; } @@ -236,6 +236,3 @@ public: MYSQL_RES **result); }; -int federated_db_init(void); -int federated_db_end(ha_panic_function type); - diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc index d550442b0ec..4a83b2e12d8 100644 --- a/storage/heap/ha_heap.cc +++ b/storage/heap/ha_heap.cc @@ -25,12 +25,20 @@ #include "ha_heap.h" -static handler *heap_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root); +static handler *heap_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root); + +int heap_panic(handlerton *hton, ha_panic_function flag) +{ + return hp_panic(flag); +} -handlerton *heap_hton; int heap_init(void *p) { + handlerton *heap_hton; + heap_hton= (handlerton *)p; heap_hton->state= SHOW_OPTION_YES; heap_hton->db_type= DB_TYPE_HEAP; @@ -41,9 +49,11 @@ int heap_init(void *p) return 0; } -static handler *heap_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) +static handler *heap_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root) { - return new (mem_root) ha_heap(table); + return new (mem_root) ha_heap(hton, table); } @@ -51,8 +61,8 @@ static handler *heap_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) ** HEAP tables *****************************************************************************/ -ha_heap::ha_heap(TABLE_SHARE *table_arg) - :handler(heap_hton, table_arg), file(0), records_changed(0), +ha_heap::ha_heap(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg), file(0), records_changed(0), key_stat_version(0) {} @@ -701,7 +711,7 @@ bool ha_heap::check_if_incompatible_data(HA_CREATE_INFO *info, } struct st_mysql_storage_engine heap_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, heap_hton}; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(heap) { diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h index 00e59856f26..0f6386655cb 100644 --- a/storage/heap/ha_heap.h +++ b/storage/heap/ha_heap.h @@ -31,7 +31,7 @@ class ha_heap: public handler uint records_changed; uint key_stat_version; public: - ha_heap(TABLE_SHARE *table); + ha_heap(handlerton *hton, TABLE_SHARE *table); ~ha_heap() {} const char *table_type() const { diff --git a/storage/heap/hp_panic.c b/storage/heap/hp_panic.c index 2b659cbfbb3..08c310bec3d 100644 --- a/storage/heap/hp_panic.c +++ b/storage/heap/hp_panic.c @@ -19,10 +19,10 @@ /* if flag == HA_PANIC_CLOSE then all files are removed for more memory */ -int heap_panic(enum ha_panic_function flag) +int hp_panic(enum ha_panic_function flag) { LIST *element,*next_open; - DBUG_ENTER("heap_panic"); + DBUG_ENTER("hp_panic"); pthread_mutex_lock(&THR_LOCK_heap); for (element=heap_open_list ; element ; element=next_open) @@ -54,4 +54,4 @@ int heap_panic(enum ha_panic_function flag) } pthread_mutex_unlock(&THR_LOCK_heap); DBUG_RETURN(0); -} /* heap_panic */ +} /* hp_panic */ diff --git a/storage/heap/hp_test1.c b/storage/heap/hp_test1.c index 703b39b1e2d..a7423effac8 100644 --- a/storage/heap/hp_test1.c +++ b/storage/heap/hp_test1.c @@ -155,7 +155,7 @@ int main(int argc, char **argv) } #endif - if (heap_close(file) || heap_panic(HA_PANIC_CLOSE)) + if (heap_close(file) || hp_panic(HA_PANIC_CLOSE)) goto err; my_end(MY_GIVE_INFO); return(0); diff --git a/storage/heap/hp_test2.c b/storage/heap/hp_test2.c index c1d987a3b5f..b4e8cf98f0b 100644 --- a/storage/heap/hp_test2.c +++ b/storage/heap/hp_test2.c @@ -603,7 +603,7 @@ end: if (heap_close(file) || (file2 && heap_close(file2))) goto err; heap_delete_table(filename2); - heap_panic(HA_PANIC_CLOSE); + hp_panic(HA_PANIC_CLOSE); my_end(MY_GIVE_INFO); return(0); err: @@ -669,7 +669,7 @@ static sig_handler endprog(int sig_number __attribute__((unused))) else #endif { - heap_panic(HA_PANIC_CLOSE); + hp_panic(HA_PANIC_CLOSE); my_end(1); exit(1); } diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 84533d6c814..68e2609cbf8 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -54,6 +54,12 @@ pthread_cond_t commit_cond; pthread_mutex_t commit_cond_m; bool innodb_inited= 0; +/* + This needs to exist until the query cache callback is removed + or learns to pass hton. +*/ +static handlerton *legacy_innodb_hton; + /*-----------------------------------------------------------------*/ /* These variables are used to implement (semi-)synchronous MySQL binlog replication for InnoDB tables. */ @@ -197,22 +203,25 @@ static mysql_byte* innobase_get_key(INNOBASE_SHARE *share,uint *length, my_bool not_used __attribute__((unused))); static INNOBASE_SHARE *get_share(const char *table_name); static void free_share(INNOBASE_SHARE *share); -static int innobase_close_connection(THD* thd); -static int innobase_commit(THD* thd, bool all); -static int innobase_rollback(THD* thd, bool all); -static int innobase_rollback_to_savepoint(THD* thd, void *savepoint); -static int innobase_savepoint(THD* thd, void *savepoint); -static int innobase_release_savepoint(THD* thd, void *savepoint); -static handler *innobase_create_handler(TABLE_SHARE *table, +static int innobase_close_connection(handlerton *hton, THD* thd); +static int innobase_commit(handlerton *hton, THD* thd, bool all); +static int innobase_rollback(handlerton *hton, THD* thd, bool all); +static int innobase_rollback_to_savepoint(handlerton *hton, THD* thd, + void *savepoint); +static int innobase_savepoint(handlerton *hton, THD* thd, void *savepoint); +static int innobase_release_savepoint(handlerton *hton, THD* thd, + void *savepoint); +static handler *innobase_create_handler(handlerton *hton, + TABLE_SHARE *table, MEM_ROOT *mem_root); static const char innobase_hton_name[]= "InnoDB"; -handlerton *innobase_hton; - -static handler *innobase_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) +static handler *innobase_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root) { - return new (mem_root) ha_innobase(table); + return new (mem_root) ha_innobase(hton, table); } @@ -380,7 +389,8 @@ documentation, see handler.cc. */ int innobase_release_temporary_latches( /*===============================*/ - THD *thd) + handlerton *hton, + THD *thd) { trx_t* trx; @@ -389,7 +399,7 @@ innobase_release_temporary_latches( return 0; } - trx = (trx_t*) thd->ha_data[innobase_hton->slot]; + trx = (trx_t*) thd->ha_data[hton->slot]; if (trx) { innobase_release_stat_resources(trx); @@ -841,13 +851,14 @@ trx_t* check_trx_exists( /*=============*/ /* out: InnoDB transaction handle */ + handlerton* hton, /* in: handlerton for innodb */ THD* thd) /* in: user thread handle */ { trx_t* trx; ut_ad(thd == current_thd); - trx = (trx_t*) thd->ha_data[innobase_hton->slot]; + trx = (trx_t*) thd->ha_data[hton->slot]; if (trx == NULL) { DBUG_ASSERT(thd != NULL); @@ -861,7 +872,7 @@ check_trx_exists( CPU time */ trx->support_xa = (ibool)(thd->variables.innodb_support_xa); - thd->ha_data[innobase_hton->slot] = trx; + thd->ha_data[hton->slot] = trx; } else { if (trx->magic_n != TRX_MAGIC_N) { mem_analyze_corruption(trx); @@ -889,8 +900,8 @@ check_trx_exists( /************************************************************************* Construct ha_innobase handler. */ -ha_innobase::ha_innobase(TABLE_SHARE *table_arg) - :handler(innobase_hton, table_arg), +ha_innobase::ha_innobase(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg), int_table_flags(HA_REC_NOT_IN_SEQ | HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | @@ -917,7 +928,7 @@ ha_innobase::update_thd( row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; trx_t* trx; - trx = check_trx_exists(thd); + trx = check_trx_exists(ht, thd); if (prebuilt->trx != trx) { @@ -938,10 +949,11 @@ inline void innobase_register_stmt( /*===================*/ + handlerton* hton, /* in: Innobase hton */ THD* thd) /* in: MySQL thd (connection) object */ { /* Register the statement */ - trans_register_ha(thd, FALSE, innobase_hton); + trans_register_ha(thd, FALSE, hton); } /************************************************************************* @@ -955,17 +967,18 @@ inline void innobase_register_trx_and_stmt( /*===========================*/ + handlerton *hton, /* in: Innobase handlerton */ THD* thd) /* in: MySQL thd (connection) object */ { /* NOTE that actually innobase_register_stmt() registers also the transaction in the AUTOCOMMIT=1 mode. */ - innobase_register_stmt(thd); + innobase_register_stmt(hton, thd); if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { /* No autocommit mode, register for a transaction */ - trans_register_ha(thd, TRUE, innobase_hton); + trans_register_ha(thd, TRUE, hton); } } @@ -1061,7 +1074,7 @@ innobase_query_caching_of_table_permitted( return((my_bool)FALSE); } - trx = check_trx_exists(thd); + trx = check_trx_exists(legacy_innodb_hton, thd); if (trx->has_search_latch) { ut_print_timestamp(stderr); sql_print_error("The calling thread is holding the adaptive " @@ -1120,7 +1133,7 @@ innobase_query_caching_of_table_permitted( if (trx->active_trans == 0) { - innobase_register_trx_and_stmt(thd); + innobase_register_trx_and_stmt(legacy_innodb_hton, thd); trx->active_trans = 1; } @@ -1295,7 +1308,7 @@ ha_innobase::init_table_handle_for_HANDLER(void) if (prebuilt->trx->active_trans == 0) { - innobase_register_trx_and_stmt(current_thd); + innobase_register_trx_and_stmt(ht, current_thd); prebuilt->trx->active_trans = 1; } @@ -1338,7 +1351,8 @@ innobase_init(void *p) char *default_path; DBUG_ENTER("innobase_init"); - innobase_hton= (handlerton *)p; + handlerton *innobase_hton= (handlerton *)p; + legacy_innodb_hton= innobase_hton; innobase_hton->state=have_innodb; innobase_hton->db_type= DB_TYPE_INNODB; @@ -1613,7 +1627,7 @@ error: Closes an InnoDB database. */ int -innobase_end(ha_panic_function type) +innobase_end(handlerton *hton, ha_panic_function type) /*==============*/ /* out: TRUE if error */ { @@ -1651,7 +1665,7 @@ Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit flushes the logs, and the name of this function should be innobase_checkpoint. */ bool -innobase_flush_logs(void) +innobase_flush_logs(handlerton *hton) /*=====================*/ /* out: TRUE if error */ { @@ -1690,6 +1704,7 @@ int innobase_start_trx_and_assign_read_view( /*====================================*/ /* out: 0 */ + handlerton *hton, /* in: Innodb handlerton */ THD* thd) /* in: MySQL thread handle of the user for whom the transaction should be committed */ { @@ -1699,7 +1714,7 @@ innobase_start_trx_and_assign_read_view( /* Create a new trx struct for thd, if it does not yet have one */ - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); /* This is just to play safe: release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to @@ -1718,9 +1733,7 @@ innobase_start_trx_and_assign_read_view( /* Set the MySQL flag to mark that there is an active transaction */ if (trx->active_trans == 0) { - - innobase_register_trx_and_stmt(current_thd); - + innobase_register_trx_and_stmt(hton, current_thd); trx->active_trans = 1; } @@ -1735,7 +1748,8 @@ int innobase_commit( /*============*/ /* out: 0 */ - THD* thd, /* in: MySQL thread handle of the user for whom + handlerton *hton, /* in: Innodb handlerton */ + THD* thd, /* in: MySQL thread handle of the user for whom the transaction should be committed */ bool all) /* in: TRUE - commit transaction FALSE - the current SQL statement ended */ @@ -1745,7 +1759,7 @@ innobase_commit( DBUG_ENTER("innobase_commit"); DBUG_PRINT("trans", ("ending transaction")); - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); /* Update the info whether we should skip XA steps that eat CPU time */ trx->support_xa = (ibool)(thd->variables.innodb_support_xa); @@ -1871,6 +1885,7 @@ int innobase_report_binlog_offset_and_commit( /*=====================================*/ /* out: 0 */ + handlerton *hton, /* in: Innodb handlerton */ THD* thd, /* in: user thread */ void* trx_handle, /* in: InnoDB trx handle */ char* log_file_name, /* in: latest binlog file name */ @@ -1888,7 +1903,7 @@ innobase_report_binlog_offset_and_commit( trx->flush_log_later = TRUE; - innobase_commit(thd, TRUE); + innobase_commit(hton, thd, TRUE); trx->flush_log_later = FALSE; @@ -1936,11 +1951,12 @@ int innobase_commit_complete( /*=====================*/ /* out: 0 */ + handlerton *hton, /* in: Innodb handlerton */ THD* thd) /* in: user thread */ { trx_t* trx; - trx = (trx_t*) thd->ha_data[innobase_hton->slot]; + trx = (trx_t*) thd->ha_data[hton->slot]; if (trx && trx->active_trans) { @@ -1964,6 +1980,7 @@ static int innobase_rollback( /*==============*/ /* out: 0 or error number */ + handlerton *hton, /* in: Innodb handlerton */ THD* thd, /* in: handle to the MySQL thread of the user whose transaction should be rolled back */ bool all) /* in: TRUE - commit transaction @@ -1975,7 +1992,7 @@ innobase_rollback( DBUG_ENTER("innobase_rollback"); DBUG_PRINT("trans", ("aborting transaction")); - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); /* Update the info whether we should skip XA steps that eat CPU time */ trx->support_xa = (ibool)(thd->variables.innodb_support_xa); @@ -2047,6 +2064,7 @@ innobase_rollback_to_savepoint( /*===========================*/ /* out: 0 if success, HA_ERR_NO_SAVEPOINT if no savepoint with the given name */ + handlerton *hton, /* in: Innodb handlerton */ THD* thd, /* in: handle to the MySQL thread of the user whose transaction should be rolled back */ void* savepoint) /* in: savepoint data */ @@ -2058,7 +2076,7 @@ innobase_rollback_to_savepoint( DBUG_ENTER("innobase_rollback_to_savepoint"); - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); /* Release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to release the search system latch @@ -2083,6 +2101,7 @@ innobase_release_savepoint( /*=======================*/ /* out: 0 if success, HA_ERR_NO_SAVEPOINT if no savepoint with the given name */ + handlerton* hton, /* in: handlerton for Innodb */ THD* thd, /* in: handle to the MySQL thread of the user whose transaction should be rolled back */ void* savepoint) /* in: savepoint data */ @@ -2093,7 +2112,7 @@ innobase_release_savepoint( DBUG_ENTER("innobase_release_savepoint"); - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); /* TODO: use provided savepoint data area to store savepoint data */ @@ -2111,6 +2130,7 @@ int innobase_savepoint( /*===============*/ /* out: always 0, that is, always succeeds */ + handlerton* hton, /* in: handle to the Innodb handlerton */ THD* thd, /* in: handle to the MySQL thread */ void* savepoint) /* in: savepoint data */ { @@ -2127,7 +2147,7 @@ innobase_savepoint( DBUG_ASSERT(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) || thd->in_sub_stmt); - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); /* Release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to release the search system latch @@ -2154,12 +2174,13 @@ int innobase_close_connection( /*======================*/ /* out: 0 or error number */ + handlerton* hton, /* in: innobase handlerton */ THD* thd) /* in: handle to the MySQL thread of the user whose resources should be free'd */ { trx_t* trx; - trx = (trx_t*)thd->ha_data[innobase_hton->slot]; + trx = (trx_t*)thd->ha_data[hton->slot]; ut_a(trx); @@ -3252,11 +3273,11 @@ ha_innobase::write_row( DBUG_ENTER("ha_innobase::write_row"); if (prebuilt->trx != - (trx_t*) current_thd->ha_data[innobase_hton->slot]) { + (trx_t*) current_thd->ha_data[ht->slot]) { sql_print_error("The transaction object for the table handle is at " "%p, but for the current thread it is at %p", prebuilt->trx, - (trx_t*) current_thd->ha_data[innobase_hton->slot]); + (trx_t*) current_thd->ha_data[ht->slot]); fputs("InnoDB: Dump of 200 bytes around prebuilt: ", stderr); ut_print_buf(stderr, ((const byte*)prebuilt) - 100, 200); @@ -3264,7 +3285,7 @@ ha_innobase::write_row( "InnoDB: Dump of 200 bytes around transaction.all: ", stderr); ut_print_buf(stderr, - ((byte*)(&(current_thd->ha_data[innobase_hton->slot]))) - 100, + ((byte*)(&(current_thd->ha_data[ht->slot]))) - 100, 200); putc('\n', stderr); ut_error; @@ -3318,7 +3339,7 @@ no_commit: no need to re-acquire locks on it. */ /* Altering to InnoDB format */ - innobase_commit(user_thd, 1); + innobase_commit(ht, user_thd, 1); /* Note that this transaction is still active. */ prebuilt->trx->active_trans = 1; /* We will need an IX lock on the destination table. */ @@ -3334,7 +3355,7 @@ no_commit: /* Commit the transaction. This will release the table locks, so they have to be acquired again. */ - innobase_commit(user_thd, 1); + innobase_commit(ht, user_thd, 1); /* Note that this transaction is still active. */ prebuilt->trx->active_trans = 1; /* Re-acquire the table lock on the source table. */ @@ -3637,7 +3658,7 @@ ha_innobase::update_row( DBUG_ENTER("ha_innobase::update_row"); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton->slot]); + (trx_t*) current_thd->ha_data[ht->slot]); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); @@ -3698,7 +3719,7 @@ ha_innobase::delete_row( DBUG_ENTER("ha_innobase::delete_row"); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton->slot]); + (trx_t*) current_thd->ha_data[ht->slot]); if (last_query_id != user_thd->query_id) { prebuilt->sql_stat_start = TRUE; @@ -3796,7 +3817,7 @@ ha_innobase::try_semi_consistent_read(bool yes) row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton->slot]); + (trx_t*) current_thd->ha_data[ht->slot]); /* Row read type is set to semi consistent read if this was requested by the MySQL and either innodb_locks_unsafe_for_binlog @@ -3963,7 +3984,7 @@ ha_innobase::index_read( DBUG_ENTER("index_read"); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton->slot]); + (trx_t*) current_thd->ha_data[ht->slot]); statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); @@ -4078,7 +4099,7 @@ ha_innobase::change_active_index( ut_ad(user_thd == current_thd); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton->slot]); + (trx_t*) current_thd->ha_data[ht->slot]); active_index = keynr; @@ -4168,7 +4189,7 @@ ha_innobase::general_fetch( DBUG_ENTER("general_fetch"); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton->slot]); + (trx_t*) current_thd->ha_data[ht->slot]); innodb_srv_conc_enter_innodb(prebuilt->trx); @@ -4404,7 +4425,7 @@ ha_innobase::rnd_pos( &LOCK_status); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton->slot]); + (trx_t*) current_thd->ha_data[ht->slot]); if (prebuilt->clust_index_was_generated) { /* No primary key was defined for the table and we @@ -4454,7 +4475,7 @@ ha_innobase::position( uint len; ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton->slot]); + (trx_t*) current_thd->ha_data[ht->slot]); if (prebuilt->clust_index_was_generated) { /* No primary key was defined for the table and we @@ -4782,7 +4803,7 @@ ha_innobase::create( /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(thd); + parent_trx = check_trx_exists(ht, thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -4955,7 +4976,7 @@ ha_innobase::discard_or_import_tablespace( ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton->slot]); + (trx_t*) current_thd->ha_data[ht->slot]); dict_table = prebuilt->table; trx = prebuilt->trx; @@ -5035,7 +5056,7 @@ ha_innobase::delete_table( /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(thd); + parent_trx = check_trx_exists(ht, thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5102,6 +5123,7 @@ void innobase_drop_database( /*===================*/ /* out: error number */ + handlerton *hton, /* in: handlerton of Innodb */ char* path) /* in: database path; inside InnoDB the name of the last directory in the path is used as the database name: for example, in 'mysql/data/test' @@ -5117,7 +5139,7 @@ innobase_drop_database( /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(current_thd); + parent_trx = check_trx_exists(hton, current_thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5196,7 +5218,7 @@ ha_innobase::rename_table( /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(current_thd); + parent_trx = check_trx_exists(ht, current_thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5283,7 +5305,7 @@ ha_innobase::records_in_range( DBUG_ENTER("records_in_range"); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton->slot]); + (trx_t*) current_thd->ha_data[ht->slot]); prebuilt->trx->op_info = (char*)"estimating records in index range"; @@ -5725,7 +5747,7 @@ ha_innobase::check( ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton->slot]); + (trx_t*) current_thd->ha_data[ht->slot]); if (prebuilt->mysql_template == NULL) { /* Build the template; we will use a dummy template @@ -6009,7 +6031,7 @@ ha_innobase::can_switch_engines(void) DBUG_ENTER("ha_innobase::can_switch_engines"); ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[innobase_hton->slot]); + (trx_t*) current_thd->ha_data[ht->slot]); prebuilt->trx->op_info = "determining if there are foreign key constraints"; @@ -6202,10 +6224,10 @@ ha_innobase::start_stmt( /* Set the MySQL flag to mark that there is an active transaction */ if (trx->active_trans == 0) { - innobase_register_trx_and_stmt(thd); + innobase_register_trx_and_stmt(ht, thd); trx->active_trans = 1; } else { - innobase_register_stmt(thd); + innobase_register_stmt(ht, thd); } return(0); @@ -6278,10 +6300,10 @@ ha_innobase::external_lock( transaction */ if (trx->active_trans == 0) { - innobase_register_trx_and_stmt(thd); + innobase_register_trx_and_stmt(ht, thd); trx->active_trans = 1; } else if (trx->n_mysql_tables_in_use == 0) { - innobase_register_stmt(thd); + innobase_register_stmt(ht, thd); } trx->n_mysql_tables_in_use++; @@ -6359,7 +6381,7 @@ ha_innobase::external_lock( if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { if (trx->active_trans != 0) { - innobase_commit(thd, TRUE); + innobase_commit(ht, thd, TRUE); } } else { if (trx->isolation_level <= TRX_ISO_READ_COMMITTED @@ -6440,7 +6462,7 @@ ha_innobase::transactional_table_lock( /* Set the MySQL flag to mark that there is an active transaction */ if (trx->active_trans == 0) { - innobase_register_trx_and_stmt(thd); + innobase_register_trx_and_stmt(ht, thd); trx->active_trans = 1; } @@ -6488,6 +6510,7 @@ Monitor to the client. */ bool innodb_show_status( /*===============*/ + handlerton* hton, /* in: the innodb handlerton */ THD* thd, /* in: the MySQL query thread of the caller */ stat_print_fn *stat_print) { @@ -6503,7 +6526,7 @@ innodb_show_status( DBUG_RETURN(FALSE); } - trx = check_trx_exists(thd); + trx = check_trx_exists(hton, thd); innobase_release_stat_resources(trx); @@ -6578,6 +6601,7 @@ Implements the SHOW MUTEX STATUS command. . */ bool innodb_mutex_show_status( /*=====================*/ + handlerton* hton, /* in: the innodb handlerton */ THD* thd, /* in: the MySQL query thread of the caller */ stat_print_fn* stat_print) @@ -6659,14 +6683,15 @@ innodb_mutex_show_status( DBUG_RETURN(FALSE); } -bool innobase_show_status(THD* thd, stat_print_fn* stat_print, - enum ha_stat_type stat_type) +bool innobase_show_status(handlerton *hton, THD* thd, + stat_print_fn* stat_print, + enum ha_stat_type stat_type) { switch (stat_type) { case HA_ENGINE_STATUS: - return innodb_show_status(thd, stat_print); + return innodb_show_status(hton, thd, stat_print); case HA_ENGINE_MUTEX: - return innodb_mutex_show_status(thd, stat_print); + return innodb_mutex_show_status(hton, thd, stat_print); default: return FALSE; } @@ -6766,7 +6791,7 @@ ha_innobase::store_lock( because we call update_thd() later, in ::external_lock()! Failure to understand this caused a serious memory corruption bug in 5.1.11. */ - trx = check_trx_exists(thd); + trx = check_trx_exists(ht, thd); /* NOTE: MySQL can call this function with lock 'type' TL_IGNORE! Be careful to ignore TL_IGNORE if we are going to do something with @@ -7152,7 +7177,7 @@ ha_innobase::reset_auto_increment(ulonglong value) bool ha_innobase::get_error_message(int error, String *buf) { - trx_t* trx = check_trx_exists(current_thd); + trx_t* trx = check_trx_exists(ht, current_thd); buf->copy(trx->detailed_error, strlen(trx->detailed_error), system_charset_info); @@ -7371,13 +7396,14 @@ int innobase_xa_prepare( /*================*/ /* out: 0 or error number */ + handlerton *hton, THD* thd, /* in: handle to the MySQL thread of the user whose XA transaction should be prepared */ bool all) /* in: TRUE - commit transaction FALSE - the current SQL statement ended */ { int error = 0; - trx_t* trx = check_trx_exists(thd); + trx_t* trx = check_trx_exists(hton, thd); if (thd->lex->sql_command != SQLCOM_XA_PREPARE && (all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) @@ -7467,6 +7493,7 @@ innobase_xa_recover( /*================*/ /* out: number of prepared transactions stored in xid_list */ + handlerton *hton, XID* xid_list, /* in/out: prepared transactions */ uint len) /* in: number of slots in xid_list */ { @@ -7486,6 +7513,7 @@ int innobase_commit_by_xid( /*===================*/ /* out: 0 or error number */ + handlerton *hton, XID* xid) /* in: X/Open XA transaction identification */ { trx_t* trx; @@ -7509,6 +7537,7 @@ int innobase_rollback_by_xid( /*=====================*/ /* out: 0 or error number */ + handlerton *hton, XID *xid) /* in: X/Open XA transaction identification */ { trx_t* trx; @@ -7529,12 +7558,13 @@ This consistent view is then used inside of MySQL when accessing records using a cursor. */ void* -innobase_create_cursor_view(void) -/*=============================*/ - /* out: Pointer to cursor view or NULL */ +innobase_create_cursor_view( + /* out: pointer to cursor view or NULL */ + handlerton *hton, /* in: innobase hton */ + THD* thd) /* in: user thread handle */ { return(read_cursor_view_create_for_mysql( - check_trx_exists(current_thd))); + check_trx_exists(hton, thd))); } /*********************************************************************** @@ -7544,10 +7574,11 @@ corresponding MySQL thread still lacks one. */ void innobase_close_cursor_view( -/*=======================*/ + handlerton *hton, + THD* thd, /* in: user thread handle */ void* curview)/* in: Consistent read view to be closed */ { - read_cursor_view_close_for_mysql(check_trx_exists(current_thd), + read_cursor_view_close_for_mysql(check_trx_exists(hton, current_thd), (cursor_view_t*) curview); } @@ -7560,9 +7591,11 @@ restored to a transaction read view. */ void innobase_set_cursor_view( /*=====================*/ + handlerton *hton, + THD* thd, /* in: user thread handle */ void* curview)/* in: Consistent cursor view to be set */ { - read_cursor_set_for_mysql(check_trx_exists(current_thd), + read_cursor_set_for_mysql(check_trx_exists(hton, current_thd), (cursor_view_t*) curview); } @@ -7607,7 +7640,7 @@ SHOW_VAR innodb_status_variables_export[]= { }; struct st_mysql_storage_engine innobase_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, innobase_hton}; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(innobase) { diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index 4d5dc6b52d6..aebbc3d6654 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -80,7 +80,7 @@ class ha_innobase: public handler /* Init values for the class: */ public: - ha_innobase(TABLE_SHARE *table_arg); + ha_innobase(handlerton *hton, TABLE_SHARE *table_arg); ~ha_innobase() {} /* Get the row type from the storage engine. If this method returns @@ -240,8 +240,8 @@ extern ulong srv_flush_log_at_trx_commit; } int innobase_init(void); -int innobase_end(ha_panic_function type); -bool innobase_flush_logs(void); +int innobase_end(handlerton *hton, ha_panic_function type); +bool innobase_flush_logs(handlerton *hton); uint innobase_get_free_space(void); /* @@ -258,14 +258,14 @@ int innobase_commit_complete(void* trx_handle); void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); #endif -void innobase_drop_database(char *path); -bool innobase_show_status(THD* thd, stat_print_fn*, enum ha_stat_type); +void innobase_drop_database(handlerton *hton, char *path); +bool innobase_show_status(handlerton *hton, THD* thd, stat_print_fn*, enum ha_stat_type); -int innobase_release_temporary_latches(THD *thd); +int innobase_release_temporary_latches(handlerton *hton, THD *thd); -void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); +void innobase_store_binlog_offset_and_flush_log(handlerton *hton, char *binlog_name,longlong offset); -int innobase_start_trx_and_assign_read_view(THD* thd); +int innobase_start_trx_and_assign_read_view(handlerton *hton, THD* thd); /*********************************************************************** This function is used to prepare X/Open XA distributed transaction */ @@ -273,6 +273,7 @@ This function is used to prepare X/Open XA distributed transaction */ int innobase_xa_prepare( /*====================*/ /* out: 0 or error number */ + handlerton *hton, /* in: innobase hton */ THD* thd, /* in: handle to the MySQL thread of the user whose XA transaction should be prepared */ bool all); /* in: TRUE - commit transaction @@ -285,6 +286,7 @@ int innobase_xa_recover( /*====================*/ /* out: number of prepared transactions stored in xid_list */ + handlerton *hton, /* in: innobase hton */ XID* xid_list, /* in/out: prepared transactions */ uint len); /* in: number of slots in xid_list */ @@ -295,6 +297,7 @@ which is in the prepared state */ int innobase_commit_by_xid( /*=======================*/ /* out: 0 or error number */ + handlerton *hton, /* in: innobase hton */ XID* xid); /* in : X/Open XA Transaction Identification */ /*********************************************************************** @@ -303,6 +306,7 @@ which is in the prepared state */ int innobase_rollback_by_xid( /* out: 0 or error number */ + handlerton *hton, /* in: innobase hton */ XID *xid); /* in : X/Open XA Transaction Identification */ @@ -313,9 +317,10 @@ This consistent view is then used inside of MySQL when accessing records using a cursor. */ void* -innobase_create_cursor_view(void); -/*=============================*/ - /* out: Pointer to cursor view or NULL */ +innobase_create_cursor_view( + /* out: Pointer to cursor view or NULL */ + handlerton *hton, /* in: innobase hton */ + THD* thd); /* in: user thread handle */ /*********************************************************************** Close the given consistent cursor view of a transaction and restore @@ -325,8 +330,11 @@ corresponding MySQL thread still lacks one. */ void innobase_close_cursor_view( /*=======================*/ + handlerton *hton, /* in: innobase hton */ + THD* thd, /* in: user thread handle */ void* curview); /* in: Consistent read view to be closed */ + /*********************************************************************** Set the given consistent cursor view to a transaction which is created if the corresponding MySQL thread still lacks one. If the given @@ -336,4 +344,6 @@ restored to a transaction read view. */ void innobase_set_cursor_view( /*=====================*/ + handlerton *hton, /* in: innobase hton */ + THD* thd, /* in: user thread handle */ void* curview); /* in: Consistent read view to be set */ diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 39cbc20fd4b..373e098a2b1 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -48,9 +48,11 @@ TYPELIB myisam_stats_method_typelib= { ** MyISAM tables *****************************************************************************/ -static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) +static handler *myisam_create_handler(handlerton *hton, + TABLE_SHARE *table, + MEM_ROOT *mem_root) { - return new (mem_root) ha_myisam(table); + return new (mem_root) ha_myisam(hton, table); } // collect errors printed by mi_check routines @@ -133,8 +135,8 @@ void mi_check_print_warning(MI_CHECK *param, const char *fmt,...) } -ha_myisam::ha_myisam(TABLE_SHARE *table_arg) - :handler(myisam_hton, table_arg), file(0), +ha_myisam::ha_myisam(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg), file(0), int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER | HA_DUPLICATE_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY | HA_FILE_BASED | HA_CAN_GEOMETRY | HA_NO_TRANSACTIONS | @@ -1787,21 +1789,27 @@ bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *info, return COMPATIBLE_DATA_YES; } -handlerton *myisam_hton; +extern int mi_panic(enum ha_panic_function flag); +int myisam_panic(handlerton *hton, ha_panic_function flag) +{ + return mi_panic(flag); +} static int myisam_init(void *p) { + handlerton *myisam_hton; + myisam_hton= (handlerton *)p; - myisam_hton->state=SHOW_OPTION_YES; - myisam_hton->db_type=DB_TYPE_MYISAM; - myisam_hton->create=myisam_create_handler; - myisam_hton->panic=mi_panic; + myisam_hton->state= SHOW_OPTION_YES; + myisam_hton->db_type= DB_TYPE_MYISAM; + myisam_hton->create= myisam_create_handler; + myisam_hton->panic= myisam_panic; myisam_hton->flags= HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES; return 0; } struct st_mysql_storage_engine myisam_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, myisam_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(myisam) { diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h index 511b796ebdb..85abcdca8b2 100644 --- a/storage/myisam/ha_myisam.h +++ b/storage/myisam/ha_myisam.h @@ -43,7 +43,7 @@ class ha_myisam: public handler int repair(THD *thd, MI_CHECK ¶m, bool optimize); public: - ha_myisam(TABLE_SHARE *table_arg); + ha_myisam(handlerton *hton, TABLE_SHARE *table_arg); ~ha_myisam() {} handler *clone(MEM_ROOT *mem_root); const char *table_type() const { return "MyISAM"; } diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index 91c04866b5a..a7685bf653d 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -1370,7 +1370,8 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info, param->temp_filename); goto err; } - if (filecopy(param,new_file,info->dfile,0L,new_header_length, + if (new_header_length && + filecopy(param,new_file,info->dfile,0L,new_header_length, "datafile-header")) goto err; info->s->state.dellink= HA_OFFSET_ERROR; @@ -2072,7 +2073,8 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, param->temp_filename); goto err; } - if (filecopy(param, new_file,info->dfile,0L,new_header_length, + if (new_header_length && + filecopy(param, new_file,info->dfile,0L,new_header_length, "datafile-header")) goto err; if (param->testflag & T_UNPACK) @@ -2466,7 +2468,8 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info, param->temp_filename); goto err; } - if (filecopy(param, new_file,info->dfile,0L,new_header_length, + if (new_header_length && + filecopy(param, new_file,info->dfile,0L,new_header_length, "datafile-header")) goto err; if (param->testflag & T_UNPACK) diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index 6ed07df1012..7f656c2e67c 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -34,19 +34,16 @@ static handler *myisammrg_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root); -/* MyISAM MERGE handlerton */ - -handlerton *myisammrg_hton; - -static handler *myisammrg_create_handler(TABLE_SHARE *table, +static handler *myisammrg_create_handler(handlerton *hton, + TABLE_SHARE *table, MEM_ROOT *mem_root) { - return new (mem_root) ha_myisammrg(table); + return new (mem_root) ha_myisammrg(hton, table); } -ha_myisammrg::ha_myisammrg(TABLE_SHARE *table_arg) - :handler(myisammrg_hton, table_arg), file(0) +ha_myisammrg::ha_myisammrg(handlerton *hton, TABLE_SHARE *table_arg) + :handler(hton, table_arg), file(0) {} static const char *ha_myisammrg_exts[] = { @@ -554,21 +551,29 @@ bool ha_myisammrg::check_if_incompatible_data(HA_CREATE_INFO *info, return COMPATIBLE_DATA_NO; } +extern int myrg_panic(enum ha_panic_function flag); +int myisammrg_panic(handlerton *hton, ha_panic_function flag) +{ + return myrg_panic(flag); +} + static int myisammrg_init(void *p) { + handlerton *myisammrg_hton; + myisammrg_hton= (handlerton *)p; - myisammrg_hton->state=have_merge_db; - myisammrg_hton->db_type=DB_TYPE_MRG_MYISAM; - myisammrg_hton->create=myisammrg_create_handler; - myisammrg_hton->panic=myrg_panic; - myisammrg_hton->flags= HTON_CAN_RECREATE; + myisammrg_hton->state= have_merge_db; + myisammrg_hton->db_type= DB_TYPE_MRG_MYISAM; + myisammrg_hton->create= myisammrg_create_handler; + myisammrg_hton->panic= myisammrg_panic; + myisammrg_hton->flags= HTON_CAN_RECREATE|HTON_NO_PARTITION; return 0; } struct st_mysql_storage_engine myisammrg_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION, myisammrg_hton }; +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; mysql_declare_plugin(myisammrg) { diff --git a/storage/myisammrg/ha_myisammrg.h b/storage/myisammrg/ha_myisammrg.h index d58a3523c26..faa3ae73c59 100644 --- a/storage/myisammrg/ha_myisammrg.h +++ b/storage/myisammrg/ha_myisammrg.h @@ -28,7 +28,7 @@ class ha_myisammrg: public handler MYRG_INFO *file; public: - ha_myisammrg(TABLE_SHARE *table_arg); + ha_myisammrg(handlerton *hton, TABLE_SHARE *table_arg); ~ha_myisammrg() {} const char *table_type() const { return "MRG_MyISAM"; } const char **bas_ext() const;